summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget13
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-ab5500-core-adc20
-rw-r--r--Documentation/ABI/testing/sysfs-socinfo16
-rw-r--r--Documentation/DocBook/Makefile8
-rw-r--r--Documentation/DocBook/cg2900.tmpl1381
-rw-r--r--Documentation/DocBook/cg2900_fm_radio.tmpl2025
-rw-r--r--Documentation/DocBook/db5500_keypad.tmpl91
-rw-r--r--Documentation/DocBook/device-drivers.tmpl17
-rw-r--r--Documentation/DocBook/gpio.tmpl112
-rw-r--r--Documentation/DocBook/i2c.tmpl116
-rw-r--r--Documentation/DocBook/i2s.tmpl97
-rw-r--r--Documentation/DocBook/lps001wp_prs.tmpl89
-rw-r--r--Documentation/DocBook/lsm303dlh.tmpl91
-rw-r--r--Documentation/DocBook/msp.tmpl104
-rw-r--r--Documentation/DocBook/prcmu-fw-api.tmpl109
-rw-r--r--Documentation/DocBook/shrm.tmpl139
-rw-r--r--Documentation/DocBook/ske_keypad.tmpl89
-rw-r--r--Documentation/DocBook/ste_ff_vibra.tmpl217
-rw-r--r--Documentation/DocBook/stmpe.tmpl115
-rwxr-xr-x[-rw-r--r--]Documentation/DocBook/stylesheet.xsl28
-rw-r--r--Documentation/DocBook/synaptics_rmi4_touchp.tmpl106
-rw-r--r--Documentation/DocBook/tc_keypad.tmpl113
-rw-r--r--Documentation/DocBook/touchp.tmpl104
-rw-r--r--Documentation/DocBook/u5500_LogicalMailbox.tmpl114
-rw-r--r--Documentation/DocBook/ux500_usb.tmpl151
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/trace/stm-trace.txt193
-rw-r--r--Makefile6
-rw-r--r--arch/arm/Kconfig17
-rw-r--r--arch/arm/Kconfig.debug9
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/boottime.c46
-rw-r--r--arch/arm/configs/u8500_android_defconfig333
-rw-r--r--arch/arm/configs/u8500_defconfig250
-rw-r--r--arch/arm/include/asm/cacheflush.h15
-rw-r--r--arch/arm/include/asm/delay.h11
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/io.h6
-rw-r--r--arch/arm/include/asm/outercache.h14
-rw-r--r--arch/arm/include/asm/setup.h21
-rw-r--r--arch/arm/include/asm/smp_twd.h8
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/kernel/armksyms.c4
-rw-r--r--arch/arm/kernel/elf.c6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c25
-rw-r--r--arch/arm/kernel/machine_kexec.c11
-rw-r--r--arch/arm/kernel/process.c11
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/smp_twd.c24
-rw-r--r--arch/arm/lib/delay.S69
-rw-r--r--arch/arm/lib/delay.c81
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/ssi.c134
-rw-r--r--arch/arm/mach-ux500/Kconfig134
-rw-r--r--arch/arm/mach-ux500/Kconfig-arch85
-rw-r--r--arch/arm/mach-ux500/Makefile66
-rw-r--r--arch/arm/mach-ux500/board-mop500-bm.c489
-rw-r--r--arch/arm/mach-ux500/board-mop500-bm.h24
-rwxr-xr-xarch/arm/mach-ux500/board-mop500-cyttsp.c227
-rw-r--r--arch/arm/mach-ux500/board-mop500-mcde.c517
-rw-r--r--arch/arm/mach-ux500/board-mop500-mmio.c514
-rw-r--r--arch/arm/mach-ux500/board-mop500-msp.c193
-rw-r--r--arch/arm/mach-ux500/board-mop500-pins.c1021
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c357
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h8
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c161
-rw-r--r--arch/arm/mach-ux500/board-mop500-sensors.c239
-rw-r--r--arch/arm/mach-ux500/board-mop500-stm.c441
-rw-r--r--arch/arm/mach-ux500/board-mop500-stuib.c119
-rw-r--r--arch/arm/mach-ux500/board-mop500-u8500uib.c40
-rw-r--r--arch/arm/mach-ux500/board-mop500-uib.c178
-rw-r--r--arch/arm/mach-ux500/board-mop500-wlan.c230
-rw-r--r--arch/arm/mach-ux500/board-mop500-wlan.h17
-rw-r--r--arch/arm/mach-ux500/board-mop500.c932
-rw-r--r--arch/arm/mach-ux500/board-mop500.h20
-rw-r--r--arch/arm/mach-ux500/board-pins-sleep-force.c269
-rw-r--r--arch/arm/mach-ux500/board-pins-sleep-force.h38
-rw-r--r--arch/arm/mach-ux500/board-u5500-bm.c496
-rw-r--r--arch/arm/mach-ux500/board-u5500-bm.h26
-rwxr-xr-xarch/arm/mach-ux500/board-u5500-cyttsp.c144
-rw-r--r--arch/arm/mach-ux500/board-u5500-mcde.c231
-rw-r--r--arch/arm/mach-ux500/board-u5500-mmio.c415
-rw-r--r--arch/arm/mach-ux500/board-u5500-pins.c225
-rw-r--r--arch/arm/mach-ux500/board-u5500-regulators.c221
-rw-r--r--arch/arm/mach-ux500/board-u5500-sdi.c232
-rw-r--r--arch/arm/mach-ux500/board-u5500-wlan.c89
-rw-r--r--arch/arm/mach-ux500/board-u5500-wlan.h18
-rw-r--r--arch/arm/mach-ux500/board-u5500.c686
-rw-r--r--arch/arm/mach-ux500/board-u5500.h35
-rw-r--r--arch/arm/mach-ux500/board-ux500-usb.h13
-rw-r--r--arch/arm/mach-ux500/clock-db5500.c743
-rw-r--r--arch/arm/mach-ux500/clock-db8500.c1162
-rw-r--r--arch/arm/mach-ux500/clock-debug.c237
-rw-r--r--arch/arm/mach-ux500/clock.c949
-rw-r--r--arch/arm/mach-ux500/clock.h261
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c88
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c65
-rw-r--r--arch/arm/mach-ux500/cpu-db9500.c28
-rw-r--r--arch/arm/mach-ux500/cpu.c114
-rw-r--r--arch/arm/mach-ux500/dbx500_dump.c161
-rw-r--r--arch/arm/mach-ux500/dcache.c254
-rw-r--r--arch/arm/mach-ux500/devices-common.c26
-rw-r--r--arch/arm/mach-ux500/devices-common.h35
-rw-r--r--arch/arm/mach-ux500/devices-db5500.c282
-rw-r--r--arch/arm/mach-ux500/devices-db5500.h45
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c521
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h5
-rw-r--r--arch/arm/mach-ux500/devices.c45
-rw-r--r--arch/arm/mach-ux500/dma-db5500.c147
-rw-r--r--arch/arm/mach-ux500/dma-db8500.c314
-rw-r--r--arch/arm/mach-ux500/hotplug.c32
-rw-r--r--arch/arm/mach-ux500/hwmem-int.c199
-rw-r--r--arch/arm/mach-ux500/hwreg.c651
-rw-r--r--arch/arm/mach-ux500/include/mach/ab8500_gpadc.h36
-rw-r--r--arch/arm/mach-ux500/include/mach/abx500-accdet.h360
-rw-r--r--arch/arm/mach-ux500/include/mach/context.h86
-rw-r--r--arch/arm/mach-ux500/include/mach/crypto-ux500.h21
-rw-r--r--arch/arm/mach-ux500/include/mach/cw1200_plat.h28
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-keypad.h42
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/dcache.h26
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/gpio.h24
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h21
-rw-r--r--arch/arm/mach-ux500/include/mach/hsi.h124
-rw-r--r--arch/arm/mach-ux500/include/mach/id.h56
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h4
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-u5500.h11
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db5500.h31
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h29
-rw-r--r--arch/arm/mach-ux500/include/mach/isa_ioctl.h50
-rw-r--r--arch/arm/mach-ux500/include/mach/mbox-db5500.h7
-rw-r--r--arch/arm/mach-ux500/include/mach/mbox_channels-db5500.h82
-rw-r--r--arch/arm/mach-ux500/include/mach/mloader-dbx500.h48
-rw-r--r--arch/arm/mach-ux500/include/mach/msp.h1023
-rw-r--r--arch/arm/mach-ux500/include/mach/pm-timer.h30
-rw-r--r--arch/arm/mach-ux500/include/mach/pm.h109
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu-debug.h23
-rw-r--r--arch/arm/mach-ux500/include/mach/reboot_reasons.h48
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h3
-rw-r--r--arch/arm/mach-ux500/include/mach/sim_detect.h15
-rw-r--r--arch/arm/mach-ux500/include/mach/ste-dma40-db5500.h (renamed from arch/arm/mach-ux500/ste-dma40-db5500.h)12
-rw-r--r--arch/arm/mach-ux500/include/mach/ste-dma40-db8500.h (renamed from arch/arm/mach-ux500/ste-dma40-db8500.h)21
-rw-r--r--arch/arm/mach-ux500/include/mach/suspend.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h47
-rw-r--r--arch/arm/mach-ux500/include/mach/timex.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/usb.h10
-rw-r--r--arch/arm/mach-ux500/l2x0-prefetch.c160
-rw-r--r--arch/arm/mach-ux500/mloader-db5500.c202
-rw-r--r--arch/arm/mach-ux500/mloader-db8500.c81
-rw-r--r--arch/arm/mach-ux500/modem-irq-db5500.c6
-rw-r--r--arch/arm/mach-ux500/pins-db8500.h72
-rw-r--r--arch/arm/mach-ux500/pins.c252
-rw-r--r--arch/arm/mach-ux500/pins.h46
-rw-r--r--arch/arm/mach-ux500/pm/Kconfig70
-rw-r--r--arch/arm/mach-ux500/pm/Makefile12
-rw-r--r--arch/arm/mach-ux500/pm/context-db5500.c407
-rw-r--r--arch/arm/mach-ux500/pm/context-db8500.c456
-rw-r--r--arch/arm/mach-ux500/pm/context.c962
-rw-r--r--arch/arm/mach-ux500/pm/context_arm.S409
-rw-r--r--arch/arm/mach-ux500/pm/performance.c224
-rw-r--r--arch/arm/mach-ux500/pm/pm.c221
-rw-r--r--arch/arm/mach-ux500/pm/prcmu-qos-power.c722
-rw-r--r--arch/arm/mach-ux500/pm/runtime.c509
-rw-r--r--arch/arm/mach-ux500/pm/scu.h25
-rw-r--r--arch/arm/mach-ux500/pm/suspend.c273
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.c165
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.h63
-rw-r--r--arch/arm/mach-ux500/pm/timer.c193
-rw-r--r--arch/arm/mach-ux500/pm/usecase_gov.c973
-rw-r--r--arch/arm/mach-ux500/prcc.h20
-rw-r--r--arch/arm/mach-ux500/prcmu-debug.c563
-rw-r--r--arch/arm/mach-ux500/product.c133
-rw-r--r--arch/arm/mach-ux500/product.h26
-rw-r--r--arch/arm/mach-ux500/reboot_reasons.c78
-rw-r--r--arch/arm/mach-ux500/regulator-u5500.h20
-rw-r--r--arch/arm/mach-ux500/tee_service_svp.c66
-rw-r--r--arch/arm/mach-ux500/tee_ta_start_modem_svp.c56
-rw-r--r--arch/arm/mach-ux500/tee_ux500.c95
-rw-r--r--arch/arm/mach-ux500/timer.c30
-rw-r--r--arch/arm/mach-ux500/uart-db8500.c225
-rw-r--r--arch/arm/mach-ux500/usb.c82
-rw-r--r--arch/arm/mm/cache-fa.S18
-rw-r--r--arch/arm/mm/cache-l2x0.c13
-rw-r--r--arch/arm/mm/cache-v3.S18
-rw-r--r--arch/arm/mm/cache-v4.S18
-rw-r--r--arch/arm/mm/cache-v4wb.S18
-rw-r--r--arch/arm/mm/cache-v4wt.S18
-rw-r--r--arch/arm/mm/cache-v6.S18
-rw-r--r--arch/arm/mm/cache-v7.S94
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/plat-nomadik/include/plat/mtu.h2
-rw-r--r--arch/arm/plat-nomadik/include/plat/pincfg.h19
-rw-r--r--arch/arm/plat-nomadik/include/plat/ske.h9
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h29
-rw-r--r--arch/arm/plat-nomadik/timer.c32
-rw-r--r--arch/arm/plat-omap/include/plat/ssi.h204
-rw-r--r--block/partitions/Kconfig19
-rw-r--r--block/partitions/Makefile1
-rwxr-xr-xblock/partitions/blkdev_parts.c127
-rwxr-xr-xblock/partitions/blkdev_parts.h14
-rw-r--r--block/partitions/check.c4
-rw-r--r--drivers/Kconfig7
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/soc.c79
-rw-r--r--drivers/char/Makefile5
-rw-r--r--drivers/char/m6718_modem_char.c722
-rw-r--r--drivers/char/shrm_char.c897
-rw-r--r--drivers/clocksource/Kconfig19
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c23
-rw-r--r--drivers/clocksource/db5500-mtimer.c67
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c173
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c324
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/ux500/Kconfig29
-rw-r--r--drivers/crypto/ux500/Makefile8
-rw-r--r--drivers/crypto/ux500/cryp/Makefile13
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c418
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h308
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2313
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c45
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h31
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h125
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h124
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h387
-rwxr-xr-xdrivers/crypto/ux500/hash/hash_alg_p.h26
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2080
-rw-r--r--drivers/dma/ste_dma40.c68
-rw-r--r--drivers/dma/ste_dma40_ll.c26
-rw-r--r--drivers/dma/ste_dma40_ll.h4
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ab8500.c168
-rw-r--r--drivers/gpio/gpio-nomadik.c112
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/mali/Kconfig15
-rw-r--r--drivers/gpu/mali/Makefile10
-rw-r--r--drivers/gpu/mali/mali400ko/.gitignore32
-rw-r--r--drivers/gpu/mali/mali400ko/Makefile102
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h236
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile346
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common56
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform45
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h87
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h94
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h87
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h79
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h93
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h107
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h121
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h109
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c370
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c1418
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c1187
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h171
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c892
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h134
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c183
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h99
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h21
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c515
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h25
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h17
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c1425
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c2924
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h66
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c309
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c348
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h145
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h21
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c240
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h46
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c2032
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h355
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h107
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c207
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h44
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c29
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h1620
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h166
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h184
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h252
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h1148
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h710
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c921
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h323
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c243
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h155
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c81
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h62
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c461
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h80
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c718
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h296
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h66
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c82
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h77
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c565
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h29
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c786
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c72
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h57
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c55
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c86
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c228
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c271
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c578
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c98
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c22
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c50
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c191
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c195
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h32
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c65
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c142
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c128
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c336
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c103
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c135
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c41
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h70
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c50
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c61
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c388
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h100
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c190
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c159
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt28
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h170
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h219
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile115
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c329
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c387
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h126
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c166
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h91
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h49
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c194
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h35
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h141
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h49
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c409
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c273
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h23
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c245
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h23
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c70
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c27
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c243
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c76
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h35
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c173
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h41
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt17
-rw-r--r--drivers/gpu/mali/mali400ko/mali.spec57
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt24
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile17
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c158
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h25
-rw-r--r--drivers/hsi/Kconfig20
-rw-r--r--drivers/hsi/Makefile6
-rw-r--r--drivers/hsi/clients/Kconfig19
-rw-r--r--drivers/hsi/clients/Makefile6
-rw-r--r--drivers/hsi/clients/cfhsi.c318
-rw-r--r--drivers/hsi/clients/hsi_char.c1118
-rw-r--r--drivers/hsi/controllers/Kconfig33
-rw-r--r--drivers/hsi/controllers/Makefile6
-rw-r--r--drivers/hsi/controllers/omap_ssi.c1853
-rw-r--r--drivers/hsi/controllers/ste_hsi.c1843
-rw-r--r--drivers/hsi/hsi.c496
-rw-r--r--drivers/hsi/hsi_boardinfo.c64
-rw-r--r--drivers/hsi/hsi_core.h37
-rw-r--r--drivers/hwmon/Kconfig86
-rw-r--r--drivers/hwmon/Makefile6
-rw-r--r--drivers/hwmon/ab5500.c212
-rw-r--r--drivers/hwmon/ab8500.c184
-rw-r--r--drivers/hwmon/abx500.c698
-rw-r--r--drivers/hwmon/abx500.h95
-rw-r--r--drivers/hwmon/dbx500.c402
-rw-r--r--drivers/hwmon/hwmon.c21
-rw-r--r--drivers/hwmon/l3g4200d.c717
-rw-r--r--drivers/hwmon/lsm303dlh_a.c1371
-rw-r--r--drivers/hwmon/lsm303dlh_m.c924
-rw-r--r--drivers/hwmon/lsm303dlhc_a.c704
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c57
-rw-r--r--drivers/input/keyboard/Kconfig12
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/db5500_keypad.c799
-rw-r--r--drivers/input/keyboard/gpio_keys.c21
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c664
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c89
-rw-r--r--drivers/input/misc/Kconfig48
-rw-r--r--drivers/input/misc/Makefile4
-rw-r--r--drivers/input/misc/ab5500-accdet.c284
-rw-r--r--drivers/input/misc/ab8500-accdet.c451
-rw-r--r--drivers/input/misc/ab8500-ponkey.c213
-rw-r--r--drivers/input/misc/abx500-accdet.c1011
-rw-r--r--drivers/input/misc/lps001wp_prs.c1453
-rw-r--r--drivers/input/misc/ste_ff_vibra.c234
-rw-r--r--drivers/input/touchscreen/Kconfig17
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c499
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_core.c2247
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_core.h44
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_ldr.h333
-rwxr-xr-xdrivers/input/touchscreen/cyttsp_spi.c302
-rw-r--r--drivers/input/touchscreen/synaptics_i2c_rmi.c675
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ab5500.c811
-rw-r--r--drivers/leds/leds-lm3530.c25
-rw-r--r--drivers/leds/leds-lp5521.c8
-rw-r--r--drivers/leds/leds-pwm.c8
-rwxr-xr-xdrivers/media/radio/CG2900/Makefile12
-rw-r--r--drivers/media/radio/CG2900/cg2900_fm_api.c3205
-rw-r--r--drivers/media/radio/CG2900/cg2900_fm_api.h1077
-rw-r--r--drivers/media/radio/CG2900/cg2900_fm_driver.c4922
-rw-r--r--drivers/media/radio/CG2900/cg2900_fm_driver.h1793
-rw-r--r--drivers/media/radio/CG2900/radio-cg2900.c3024
-rw-r--r--drivers/media/radio/Kconfig16
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/mfd/Kconfig46
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab5500-core.c72
-rw-r--r--drivers/mfd/ab5500-gpadc.c1224
-rw-r--r--drivers/mfd/ab5500-power.c96
-rw-r--r--drivers/mfd/ab8500-core.c63
-rw-r--r--drivers/mfd/ab8500-debugfs.c1057
-rw-r--r--drivers/mfd/ab8500-denc.c539
-rw-r--r--drivers/mfd/ab8500-gpadc.c12
-rw-r--r--drivers/mfd/ab8500-i2c.c1
-rw-r--r--drivers/mfd/ab8500-sysctrl.c135
-rw-r--r--drivers/mfd/db5500-prcmu-regs.h141
-rw-r--r--drivers/mfd/db5500-prcmu.c2050
-rw-r--r--drivers/mfd/db8500-prcmu.c925
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h131
-rw-r--r--drivers/mfd/stmpe.c18
-rw-r--r--drivers/mfd/tc35892.c503
-rw-r--r--drivers/mfd/tc3589x.c131
-rw-r--r--drivers/mfd/tps6105x.c1
-rw-r--r--drivers/misc/Kconfig58
-rw-r--r--drivers/misc/Kconfig.stm120
-rw-r--r--drivers/misc/Makefile9
-rw-r--r--drivers/misc/ab8500-pwm.c45
-rw-r--r--drivers/misc/bh1780gli.c194
-rw-r--r--drivers/misc/compdev/Makefile1
-rw-r--r--drivers/misc/compdev/compdev.c539
-rw-r--r--drivers/misc/db8500-modem-trace.c273
-rw-r--r--drivers/misc/dbx500-mloader.c269
-rw-r--r--drivers/misc/dispdev/Makefile1
-rw-r--r--drivers/misc/dispdev/dispdev.c658
-rw-r--r--drivers/misc/hwmem/Makefile3
-rw-r--r--drivers/misc/hwmem/cache_handler.c510
-rw-r--r--drivers/misc/hwmem/cache_handler.h61
-rw-r--r--drivers/misc/hwmem/contig_alloc.c571
-rw-r--r--drivers/misc/hwmem/hwmem-ioctl.c532
-rw-r--r--drivers/misc/hwmem/hwmem-main.c726
-rw-r--r--drivers/misc/mbox.c (renamed from arch/arm/mach-ux500/mbox-db5500.c)368
-rw-r--r--drivers/misc/mbox_channels-db5500.c1273
-rw-r--r--drivers/misc/modem_audio/Kconfig6
-rw-r--r--drivers/misc/modem_audio/Makefile2
-rw-r--r--drivers/misc/modem_audio/mad.c506
-rw-r--r--drivers/misc/sim_detect.c304
-rw-r--r--drivers/misc/stm.c850
-rw-r--r--drivers/mmc/card/block.c137
-rw-r--r--drivers/mmc/card/mmc_test.c180
-rw-r--r--drivers/mmc/core/core.c71
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/host/mmci.c623
-rw-r--r--drivers/mmc/host/mmci.h24
-rw-r--r--drivers/modem/Kconfig44
-rw-r--r--drivers/modem/Makefile6
-rw-r--r--drivers/modem/m6718_spi/Kconfig83
-rw-r--r--drivers/modem/m6718_spi/Makefile15
-rw-r--r--drivers/modem/m6718_spi/debug.c490
-rw-r--r--drivers/modem/m6718_spi/modem_debug.h36
-rw-r--r--drivers/modem/m6718_spi/modem_driver.c292
-rw-r--r--drivers/modem/m6718_spi/modem_netlink.h20
-rw-r--r--drivers/modem/m6718_spi/modem_private.h106
-rw-r--r--drivers/modem/m6718_spi/modem_protocol.h24
-rw-r--r--drivers/modem/m6718_spi/modem_queue.h24
-rw-r--r--drivers/modem/m6718_spi/modem_state.c1300
-rw-r--r--drivers/modem/m6718_spi/modem_state.h36
-rw-r--r--drivers/modem/m6718_spi/modem_statemachine.h81
-rw-r--r--drivers/modem/m6718_spi/modem_util.h57
-rw-r--r--drivers/modem/m6718_spi/netlink.c182
-rw-r--r--drivers/modem/m6718_spi/protocol.c431
-rw-r--r--drivers/modem/m6718_spi/queue.c183
-rw-r--r--drivers/modem/m6718_spi/statemachine.c1406
-rw-r--r--drivers/modem/m6718_spi/util.c282
-rw-r--r--drivers/modem/mcdd.c190
-rw-r--r--drivers/modem/modem_access.c417
-rw-r--r--drivers/modem/modem_m6718.c95
-rw-r--r--drivers/modem/modem_u8500.c95
-rw-r--r--drivers/modem/shrm/Kconfig43
-rw-r--r--drivers/modem/shrm/Makefile11
-rw-r--r--drivers/modem/shrm/modem_shrm_driver.c670
-rw-r--r--drivers/modem/shrm/shrm_driver.c1439
-rw-r--r--drivers/modem/shrm/shrm_fifo.c837
-rw-r--r--drivers/modem/shrm/shrm_protocol.c1262
-rw-r--r--drivers/net/Makefile5
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_shmcore.c52
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c51
-rw-r--r--drivers/net/m6718_modem_net.c333
-rw-r--r--drivers/net/u8500_shrm.c318
-rw-r--r--drivers/power/Kconfig32
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab5500_btemp.c923
-rw-r--r--drivers/power/ab5500_charger.c1820
-rw-r--r--drivers/power/ab5500_fg.c1954
-rw-r--r--drivers/power/ab8500_btemp.c1152
-rw-r--r--drivers/power/ab8500_chargalg.c1989
-rw-r--r--drivers/power/ab8500_charger.c2818
-rw-r--r--drivers/power/ab8500_fg.c2498
-rw-r--r--drivers/power/abx500_chargalg.c1920
-rw-r--r--drivers/regulator/Kconfig35
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/ab5500.c625
-rw-r--r--drivers/regulator/ab8500-debug.c1777
-rw-r--r--drivers/regulator/ab8500-ext.c451
-rw-r--r--drivers/regulator/ab8500.c446
-rw-r--r--drivers/regulator/core.c48
-rw-r--r--drivers/regulator/db5500-prcmu.c334
-rw-r--r--drivers/regulator/db8500-prcmu.c118
-rw-r--r--drivers/regulator/dbx500-prcmu.c342
-rw-r--r--drivers/regulator/dbx500-prcmu.h56
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c44
-rw-r--r--drivers/rtc/rtc-ab.c483
-rw-r--r--drivers/rtc/rtc-ab8500.c26
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-pl022.c17
-rw-r--r--drivers/spi/stm_msp.c1929
-rw-r--r--drivers/staging/Kconfig20
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/ab5500_sim/Makefile1
-rw-r--r--drivers/staging/ab5500_sim/ab5500-sim.c306
-rw-r--r--drivers/staging/ab5500_sim/sysfs-sim83
-rw-r--r--drivers/staging/camera_flash/Kconfig7
-rw-r--r--drivers/staging/camera_flash/Makefile5
-rw-r--r--drivers/staging/camera_flash/adp1653.c537
-rwxr-xr-xdrivers/staging/camera_flash/adp1653.h82
-rwxr-xr-xdrivers/staging/camera_flash/adp1653_plat.h24
-rw-r--r--drivers/staging/camera_flash/camera_flash.h74
-rw-r--r--drivers/staging/camera_flash/camera_flash_bitfields.h83
-rw-r--r--drivers/staging/camera_flash/flash_common.c460
-rwxr-xr-xdrivers/staging/camera_flash/flash_common.h57
-rw-r--r--drivers/staging/cg2900/Kconfig73
-rw-r--r--drivers/staging/cg2900/Makefile15
-rw-r--r--drivers/staging/cg2900/TODO23
-rw-r--r--drivers/staging/cg2900/bluetooth/Makefile9
-rw-r--r--drivers/staging/cg2900/bluetooth/btcg2900.c1198
-rw-r--r--drivers/staging/cg2900/bluetooth/cg2900_uart.c2169
-rw-r--r--drivers/staging/cg2900/bluetooth/hci_ldisc.c657
-rw-r--r--drivers/staging/cg2900/bluetooth/hci_uart.h105
-rw-r--r--drivers/staging/cg2900/board-ux500-cg2900.c366
-rw-r--r--drivers/staging/cg2900/clock-cg2900.c147
-rw-r--r--drivers/staging/cg2900/devices-cg2900-ux500.c219
-rw-r--r--drivers/staging/cg2900/devices-cg2900.c299
-rw-r--r--drivers/staging/cg2900/devices-cg2900.h59
-rw-r--r--drivers/staging/cg2900/include/cg2900.h280
-rw-r--r--drivers/staging/cg2900/include/cg2900_audio.h473
-rw-r--r--drivers/staging/cg2900/include/cg2900_hci.h19
-rw-r--r--drivers/staging/cg2900/mfd/Makefile18
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_audio.c3486
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_char_devices.c719
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_chip.c3618
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_chip.h611
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_core.c715
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_core.h51
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_lib.c284
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_lib.h61
-rw-r--r--drivers/staging/cg2900/mfd/cg2900_test.c402
-rw-r--r--drivers/staging/cg2900/mfd/stlc2690_chip.c1671
-rw-r--r--drivers/staging/cg2900/mfd/stlc2690_chip.h47
-rw-r--r--drivers/staging/cw1200/.gitignore10
-rw-r--r--drivers/staging/cw1200/Kconfig105
-rw-r--r--drivers/staging/cw1200/Makefile20
-rw-r--r--drivers/staging/cw1200/TODO10
-rw-r--r--drivers/staging/cw1200/ap.c1149
-rw-r--r--drivers/staging/cw1200/ap.h49
-rw-r--r--drivers/staging/cw1200/bh.c622
-rw-r--r--drivers/staging/cw1200/bh.h30
-rw-r--r--drivers/staging/cw1200/cw1200.h313
-rw-r--r--drivers/staging/cw1200/cw1200_plat.h28
-rw-r--r--drivers/staging/cw1200/cw1200_sdio.c469
-rw-r--r--drivers/staging/cw1200/debug.c611
-rw-r--r--drivers/staging/cw1200/debug.h168
-rw-r--r--drivers/staging/cw1200/fwio.c594
-rw-r--r--drivers/staging/cw1200/fwio.h36
-rw-r--r--drivers/staging/cw1200/ht.h43
-rw-r--r--drivers/staging/cw1200/hwio.c287
-rw-r--r--drivers/staging/cw1200/hwio.h243
-rw-r--r--drivers/staging/cw1200/itp.c739
-rw-r--r--drivers/staging/cw1200/itp.h151
-rw-r--r--drivers/staging/cw1200/main.c567
-rw-r--r--drivers/staging/cw1200/pm.c459
-rw-r--r--drivers/staging/cw1200/pm.h49
-rw-r--r--drivers/staging/cw1200/queue.c584
-rw-r--r--drivers/staging/cw1200/queue.h116
-rw-r--r--drivers/staging/cw1200/sbus.h39
-rw-r--r--drivers/staging/cw1200/scan.c446
-rw-r--r--drivers/staging/cw1200/scan.h54
-rw-r--r--drivers/staging/cw1200/sta.c1638
-rw-r--r--drivers/staging/cw1200/sta.h87
-rw-r--r--drivers/staging/cw1200/txrx.c1372
-rw-r--r--drivers/staging/cw1200/txrx.h95
-rw-r--r--drivers/staging/cw1200/wsm.c1836
-rw-r--r--drivers/staging/cw1200/wsm.h1833
-rw-r--r--drivers/staging/mmio/Kconfig11
-rw-r--r--drivers/staging/mmio/Makefile1
-rw-r--r--drivers/staging/mmio/mmio.h176
-rw-r--r--drivers/staging/mmio/st_mmio.c1173
-rw-r--r--drivers/staging/nmf-cm/Kconfig12
-rw-r--r--drivers/staging/nmf-cm/Make.config8
-rw-r--r--drivers/staging/nmf-cm/Makefile99
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/channel_engine.h101
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/cm_engine.h48
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/communication_engine.h73
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/component_engine.h403
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/configuration_engine.h28
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/control/configuration_engine.h193
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/control/control_engine.h26
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/control/irq_engine.h120
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/domain_engine.h108
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/executive_engine_mgt_engine.h28
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/memory_engine.h93
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/migration_engine.h16
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/perfmeter_engine.h41
-rw-r--r--drivers/staging/nmf-cm/cm/engine/api/repository_mgt_engine.h93
-rw-r--r--drivers/staging/nmf-cm/cm/engine/communication/fifo/inc/nmf_fifo_arm.h55
-rw-r--r--drivers/staging/nmf-cm/cm/engine/communication/fifo/src/nmf_fifo_arm.c241
-rw-r--r--drivers/staging/nmf-cm/cm/engine/communication/inc/communication.h44
-rw-r--r--drivers/staging/nmf-cm/cm/engine/communication/inc/communication_type.h60
-rw-r--r--drivers/staging/nmf-cm/cm/engine/communication/src/communication.c328
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/bind.h443
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/component_type.h64
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/description.h108
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/dspevent.h28
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/initializer.h30
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/instance.h222
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/introspection.h109
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/nmfheaderabi.h154
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/inc/template.h110
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/binder.c1313
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/binder_check.c205
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/component_wrapper.c1298
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/dspevent.c78
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/initializer.c383
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/instantiater.c829
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/introspection.c327
-rw-r--r--drivers/staging/nmf-cm/cm/engine/component/src/loader.c384
-rw-r--r--drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration.h37
-rw-r--r--drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_status.h45
-rw-r--r--drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_type.h81
-rw-r--r--drivers/staging/nmf-cm/cm/engine/configuration/src/configuration.c172
-rw-r--r--drivers/staging/nmf-cm/cm/engine/configuration/src/configuration_wrapper.c301
-rw-r--r--drivers/staging/nmf-cm/cm/engine/dsp/inc/dsp.h453
-rw-r--r--drivers/staging/nmf-cm/cm/engine/dsp/inc/semaphores_dsp.h22
-rw-r--r--drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h959
-rw-r--r--drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_macros.h86
-rw-r--r--drivers/staging/nmf-cm/cm/engine/dsp/src/dsp.c1083
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/bfd.h123
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/common.h125
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/elfabi.h539
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/elfapi.h125
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/memory.h77
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp-loadmap.h47
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp.h31
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/mpcal.h20
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/inc/reloc.h35
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/elf64.c47
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/elfload.c773
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/elfmmdsp.c575
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/elfrelocate.c79
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/elfxx.c591
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/mmdsp-debug.c435
-rw-r--r--drivers/staging/nmf-cm/cm/engine/elf/src/mpcal.c6
-rw-r--r--drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h52
-rw-r--r--drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/src/executive_engine_mgt.c405
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h41
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h163
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h59
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h19
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h151
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h32
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h275
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h32
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c97
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/domain.c608
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c95
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c222
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/migration.c392
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c656
-rw-r--r--drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c250
-rw-r--r--drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h498
-rw-r--r--drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/src/os_adaptation_layer.c44
-rw-r--r--drivers/staging/nmf-cm/cm/engine/perfmeter/inc/mpcload.h21
-rw-r--r--drivers/staging/nmf-cm/cm/engine/perfmeter/inc/perfmeter_type.h33
-rw-r--r--drivers/staging/nmf-cm/cm/engine/perfmeter/src/mpcload.c119
-rw-r--r--drivers/staging/nmf-cm/cm/engine/power_mgt/inc/power.h61
-rw-r--r--drivers/staging/nmf-cm/cm/engine/power_mgt/src/cmpower.c244
-rw-r--r--drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_mgt.h55
-rw-r--r--drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_type.h26
-rw-r--r--drivers/staging/nmf-cm/cm/engine/repository_mgt/src/repository_mgt.c322
-rw-r--r--drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h32
-rw-r--r--drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/src/hw_semaphores.c171
-rw-r--r--drivers/staging/nmf-cm/cm/engine/semaphores/inc/semaphores.h25
-rw-r--r--drivers/staging/nmf-cm/cm/engine/semaphores/src/semaphores.c94
-rw-r--r--drivers/staging/nmf-cm/cm/engine/trace/inc/trace.h48
-rw-r--r--drivers/staging/nmf-cm/cm/engine/trace/inc/xtitrace.h50
-rw-r--r--drivers/staging/nmf-cm/cm/engine/trace/src/panic.c331
-rw-r--r--drivers/staging/nmf-cm/cm/engine/trace/src/trace.c221
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/inc/convert.h21
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/inc/mem.h19
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/inc/string.h33
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/inc/swap.h23
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/inc/table.h59
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/src/convert.c20
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/src/mem.c27
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/src/string.c231
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/src/swap.c156
-rw-r--r--drivers/staging/nmf-cm/cm/engine/utils/src/table.c155
-rw-r--r--drivers/staging/nmf-cm/cm/inc/cm.h22
-rw-r--r--drivers/staging/nmf-cm/cm/inc/cm_def.h50
-rw-r--r--drivers/staging/nmf-cm/cm/inc/cm_macros.h148
-rw-r--r--drivers/staging/nmf-cm/cm/inc/cm_type.h147
-rw-r--r--drivers/staging/nmf-cm/cm_debug.c840
-rw-r--r--drivers/staging/nmf-cm/cm_debug.h28
-rw-r--r--drivers/staging/nmf-cm/cm_dma.c226
-rw-r--r--drivers/staging/nmf-cm/cm_dma.h23
-rw-r--r--drivers/staging/nmf-cm/cm_service.c129
-rw-r--r--drivers/staging/nmf-cm/cm_service.h22
-rw-r--r--drivers/staging/nmf-cm/cm_syscall.c1413
-rw-r--r--drivers/staging/nmf-cm/cmioctl.h604
-rw-r--r--drivers/staging/nmf-cm/cmld.c1403
-rw-r--r--drivers/staging/nmf-cm/cmld.h189
-rw-r--r--drivers/staging/nmf-cm/configuration.c146
-rw-r--r--drivers/staging/nmf-cm/configuration.h75
-rw-r--r--drivers/staging/nmf-cm/ee/api/panic.idt74
-rw-r--r--drivers/staging/nmf-cm/ee/api/trace.idt30
-rw-r--r--drivers/staging/nmf-cm/inc/nmf-def.h42
-rw-r--r--drivers/staging/nmf-cm/inc/nmf-limits.h106
-rw-r--r--drivers/staging/nmf-cm/inc/nmf-tracedescription.h323
-rw-r--r--drivers/staging/nmf-cm/inc/nmf_type.idt63
-rw-r--r--drivers/staging/nmf-cm/inc/type.h35
-rw-r--r--drivers/staging/nmf-cm/inc/typedef.h192
-rw-r--r--drivers/staging/nmf-cm/nmf/inc/channel_type.h40
-rw-r--r--drivers/staging/nmf-cm/nmf/inc/component_type.h26
-rw-r--r--drivers/staging/nmf-cm/nmf/inc/service_type.h93
-rw-r--r--drivers/staging/nmf-cm/osal-kernel.c1223
-rw-r--r--drivers/staging/nmf-cm/osal-kernel.h168
-rw-r--r--drivers/staging/nmf-cm/share/communication/inc/communication_fifo.h19
-rw-r--r--drivers/staging/nmf-cm/share/communication/inc/initializer.h38
-rw-r--r--drivers/staging/nmf-cm/share/communication/inc/nmf_fifo_desc.h36
-rw-r--r--drivers/staging/nmf-cm/share/communication/inc/nmf_service.h17
-rw-r--r--drivers/staging/nmf-cm/share/inc/macros.h213
-rw-r--r--drivers/staging/nmf-cm/share/inc/nmf.h46
-rw-r--r--drivers/staging/nmf-cm/share/inc/nomadik_mapping.h98
-rw-r--r--drivers/staging/nmf-cm/share/semaphores/inc/hwsem_hwp.h85
-rw-r--r--drivers/staging/nmf-cm/share/semaphores/inc/semaphores.h43
-rw-r--r--drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c1
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c316
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h1
-rw-r--r--drivers/tee/Kconfig13
-rw-r--r--drivers/tee/Makefile8
-rw-r--r--drivers/tee/tee_driver.c692
-rw-r--r--drivers/tee/tee_service.c17
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c484
-rw-r--r--drivers/usb/core/hub.c39
-rw-r--r--drivers/usb/core/notify.c7
-rw-r--r--drivers/usb/core/usb.h4
-rw-r--r--drivers/usb/gadget/epautoconf.c6
-rw-r--r--drivers/usb/gadget/f_ecm.c24
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/musb/Kconfig13
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/musb/musb_core.h11
-rw-r--r--drivers/usb/musb/musb_gadget.c31
-rw-r--r--drivers/usb/musb/musb_host.c102
-rw-r--r--drivers/usb/musb/ux500.c408
-rw-r--r--drivers/usb/musb/ux500_dma.c125
-rw-r--r--drivers/usb/otg/Kconfig9
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/ab5500-usb.c802
-rw-r--r--drivers/usb/otg/ab8500-usb.c730
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/av8100/Kconfig48
-rw-r--r--drivers/video/av8100/Makefile10
-rw-r--r--drivers/video/av8100/av8100.c4166
-rw-r--r--drivers/video/av8100/av8100_regs.h346
-rw-r--r--drivers/video/av8100/hdmi.c2479
-rw-r--r--drivers/video/av8100/hdmi_loc.h75
-rw-r--r--drivers/video/b2r2/Kconfig134
-rw-r--r--drivers/video/b2r2/Makefile15
-rw-r--r--drivers/video/b2r2/b2r2_blt_main.c3363
-rw-r--r--drivers/video/b2r2/b2r2_core.c2819
-rw-r--r--drivers/video/b2r2/b2r2_core.h108
-rw-r--r--drivers/video/b2r2/b2r2_debug.c338
-rw-r--r--drivers/video/b2r2/b2r2_debug.h102
-rw-r--r--drivers/video/b2r2/b2r2_filters.c376
-rw-r--r--drivers/video/b2r2/b2r2_filters.h73
-rw-r--r--drivers/video/b2r2/b2r2_generic.c3334
-rw-r--r--drivers/video/b2r2/b2r2_generic.h51
-rw-r--r--drivers/video/b2r2/b2r2_global.h119
-rw-r--r--drivers/video/b2r2/b2r2_hw.h707
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.c496
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.h31
-rw-r--r--drivers/video/b2r2/b2r2_internal.h590
-rw-r--r--drivers/video/b2r2/b2r2_kernel_if.c37
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.c668
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.h161
-rw-r--r--drivers/video/b2r2/b2r2_node_gen.c83
-rw-r--r--drivers/video/b2r2/b2r2_node_split.c3734
-rw-r--r--drivers/video/b2r2/b2r2_node_split.h124
-rw-r--r--drivers/video/b2r2/b2r2_profiler/Makefile3
-rw-r--r--drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c270
-rw-r--r--drivers/video/b2r2/b2r2_profiler_api.h66
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.c106
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.h22
-rw-r--r--drivers/video/b2r2/b2r2_structures.h226
-rw-r--r--drivers/video/b2r2/b2r2_timing.c22
-rw-r--r--drivers/video/b2r2/b2r2_timing.h22
-rw-r--r--drivers/video/b2r2/b2r2_utils.c633
-rw-r--r--drivers/video/b2r2/b2r2_utils.h66
-rw-r--r--drivers/video/mcde/Kconfig96
-rw-r--r--drivers/video/mcde/Makefile23
-rw-r--r--drivers/video/mcde/display-ab8500.c494
-rw-r--r--drivers/video/mcde/display-av8100.c1610
-rw-r--r--drivers/video/mcde/display-fictive.c63
-rw-r--r--drivers/video/mcde/display-generic_dsi.c309
-rw-r--r--drivers/video/mcde/display-samsung_s6d16d0.c224
-rw-r--r--drivers/video/mcde/display-sony_acx424akp_dsi.c412
-rw-r--r--drivers/video/mcde/display-vuib500-dpi.c215
-rw-r--r--drivers/video/mcde/dsilink_regs.h2037
-rw-r--r--drivers/video/mcde/mcde_bus.c274
-rw-r--r--drivers/video/mcde/mcde_debugfs.c207
-rw-r--r--drivers/video/mcde/mcde_debugfs.h25
-rw-r--r--drivers/video/mcde/mcde_display.c416
-rw-r--r--drivers/video/mcde/mcde_dss.c479
-rw-r--r--drivers/video/mcde/mcde_fb.c898
-rw-r--r--drivers/video/mcde/mcde_hw.c3709
-rw-r--r--drivers/video/mcde/mcde_mod.c69
-rw-r--r--drivers/video/mcde/mcde_regs.h5096
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/mpcore_wdt.c101
-rw-r--r--drivers/watchdog/ux500_wdt.c451
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/av8100.fw.ihex1281
-rw-r--r--include/Kbuild1
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/amba/mmci.h22
-rw-r--r--include/linux/boottime.h89
-rw-r--r--include/linux/clksrc-db5500-mtimer.h17
-rw-r--r--include/linux/compdev.h106
-rw-r--r--include/linux/cpufreq-dbx500.h16
-rw-r--r--include/linux/cpufreq.h2
-rwxr-xr-xinclude/linux/cyttsp.h114
-rw-r--r--include/linux/db8500-modem-trace.h24
-rw-r--r--include/linux/dispdev.h66
-rw-r--r--include/linux/gpio/nomadik.h (renamed from arch/arm/plat-nomadik/include/plat/gpio-nomadik.h)1
-rw-r--r--include/linux/hsi/Kbuild1
-rw-r--r--include/linux/hsi/hsi.h391
-rw-r--r--include/linux/hsi/hsi_char.h66
-rw-r--r--include/linux/hwmem.h597
-rw-r--r--include/linux/hwmon.h5
-rw-r--r--include/linux/input/bu21013.h26
-rw-r--r--include/linux/input/lps001wp.h87
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/l3g4200d.h27
-rw-r--r--include/linux/led-lm3530.h9
-rw-r--r--include/linux/leds-ab5500.h35
-rw-r--r--include/linux/leds_pwm.h1
-rw-r--r--include/linux/lsm303dlh.h63
-rw-r--r--include/linux/mfd/ab8500/bm.h547
-rw-r--r--include/linux/mfd/ab8500/denc-regs.h357
-rw-r--r--include/linux/mfd/ab8500/denc.h82
-rw-r--r--include/linux/mfd/ab8500/ux500_chargalg.h38
-rw-r--r--include/linux/mfd/abx500.h258
-rw-r--r--include/linux/mfd/abx500/ab5500-bm.h116
-rw-r--r--include/linux/mfd/abx500/ab5500-gpadc.h70
-rw-r--r--include/linux/mfd/abx500/ab5500.h27
-rw-r--r--include/linux/mfd/abx500/ab8500-gpadc.h2
-rw-r--r--include/linux/mfd/abx500/ab8500-gpio.h58
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h5
-rw-r--r--include/linux/mfd/abx500/ab8500.h38
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h38
-rw-r--r--include/linux/mfd/db5500-prcmu.h149
-rw-r--r--include/linux/mfd/db8500-prcmu.h233
-rw-r--r--include/linux/mfd/dbx500-prcmu.h372
-rw-r--r--include/linux/mfd/stmpe.h2
-rw-r--r--include/linux/mfd/tc35892.h146
-rw-r--r--include/linux/mfd/tc3589x.h61
-rw-r--r--include/linux/mloader.h25
-rw-r--r--include/linux/mmc/host.h17
-rw-r--r--include/linux/modem/m6718_spi/modem_char.h26
-rw-r--r--include/linux/modem/m6718_spi/modem_driver.h165
-rw-r--r--include/linux/modem/m6718_spi/modem_net.h50
-rw-r--r--include/linux/modem/modem.h63
-rw-r--r--include/linux/modem/modem_client.h53
-rw-r--r--include/linux/modem/shrm/shrm.h23
-rw-r--r--include/linux/modem/shrm/shrm_config.h111
-rw-r--r--include/linux/modem/shrm/shrm_driver.h216
-rw-r--r--include/linux/modem/shrm/shrm_net.h44
-rw-r--r--include/linux/modem/shrm/shrm_private.h183
-rw-r--r--include/linux/moduleparam.h2
-rw-r--r--include/linux/regulator/ab5500.h33
-rw-r--r--include/linux/regulator/ab8500-debug.h21
-rw-r--r--include/linux/regulator/ab8500.h47
-rw-r--r--include/linux/regulator/db5500-prcmu.h27
-rw-r--r--include/linux/regulator/dbx500-prcmu.h92
-rw-r--r--include/linux/spi/stm_msp.h126
-rw-r--r--include/linux/sys_soc.h50
-rw-r--r--include/linux/tee.h314
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/otg.h3
-rw-r--r--include/linux/videodev2.h58
-rw-r--r--include/net/bluetooth/bluetooth.h12
-rw-r--r--include/net/bluetooth/hci.h18
-rw-r--r--include/net/bluetooth/hci_core.h9
-rw-r--r--include/net/bluetooth/sco.h4
-rw-r--r--include/sound/ux500_ab8500.h36
-rw-r--r--include/sound/ux500_ab8500_ext.h22
-rw-r--r--include/trace/Kbuild1
-rw-r--r--include/trace/stm.h228
-rw-r--r--include/video/Kbuild1
-rw-r--r--include/video/av8100.h549
-rw-r--r--include/video/b2r2_blt.h638
-rw-r--r--include/video/hdmi.h205
-rw-r--r--include/video/mcde.h391
-rw-r--r--include/video/mcde_display-ab8500.h24
-rw-r--r--include/video/mcde_display-av8100.h53
-rw-r--r--include/video/mcde_display-generic_dsi.h35
-rw-r--r--include/video/mcde_display-sony_acx424akp_dsi.h27
-rw-r--r--include/video/mcde_display-vuib500-dpi.h31
-rw-r--r--include/video/mcde_display.h149
-rw-r--r--include/video/mcde_dss.h81
-rw-r--r--include/video/mcde_fb.h65
-rw-r--r--init/Kconfig9
-rw-r--r--init/Makefile1
-rw-r--r--init/boottime.c475
-rw-r--r--init/main.c6
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/kexec.c12
-rw-r--r--kernel/power/suspend.c22
-rw-r--r--kernel/printk.c12
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_sched_switch.c10
-rw-r--r--net/bluetooth/bnep/bnep.h2
-rw-r--r--net/bluetooth/bnep/core.c53
-rw-r--r--net/bluetooth/hci_conn.c51
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/l2cap_core.c4
-rw-r--r--net/bluetooth/lib.c2
-rw-r--r--net/bluetooth/mgmt.c4
-rw-r--r--net/bluetooth/sco.c70
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c1
-rw-r--r--net/mac80211/work.c24
-rw-r--r--net/rfkill/core.c2
-rwxr-xr-xscripts/setlocalversion42
-rw-r--r--sound/arm/Kconfig12
-rw-r--r--sound/arm/Makefile4
-rw-r--r--sound/arm/u8500_alsa_ab8500.c2691
-rw-r--r--sound/arm/u8500_alsa_ab8500.h165
-rw-r--r--sound/arm/u8500_alsa_hdmi.c936
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/soc/Kconfig3
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/codecs/Kconfig22
-rw-r--r--sound/soc/codecs/Makefile16
-rw-r--r--sound/soc/codecs/ab3550.c1429
-rw-r--r--sound/soc/codecs/ab3550.h333
-rwxr-xr-xsound/soc/codecs/ab5500.c1805
-rw-r--r--sound/soc/codecs/ab5500.h408
-rw-r--r--sound/soc/codecs/ab8500_audio.c2960
-rw-r--r--sound/soc/codecs/ab8500_audio.h676
-rw-r--r--sound/soc/codecs/av8100_audio.c526
-rw-r--r--sound/soc/codecs/av8100_audio.h163
-rw-r--r--sound/soc/codecs/cg29xx.c772
-rw-r--r--sound/soc/codecs/cg29xx.h41
-rw-r--r--sound/soc/ux500/Kconfig67
-rw-r--r--sound/soc/ux500/Makefile46
-rwxr-xr-xsound/soc/ux500/u5500.c195
-rw-r--r--sound/soc/ux500/u8500.c245
-rw-r--r--sound/soc/ux500/ux500_ab3550.c76
-rw-r--r--sound/soc/ux500/ux500_ab3550.h19
-rwxr-xr-xsound/soc/ux500/ux500_ab5500.c132
-rwxr-xr-xsound/soc/ux500/ux500_ab5500.h28
-rw-r--r--sound/soc/ux500/ux500_ab8500.c966
-rw-r--r--sound/soc/ux500/ux500_av8100.c167
-rw-r--r--sound/soc/ux500/ux500_av8100.h19
-rw-r--r--sound/soc/ux500/ux500_cg29xx.c227
-rw-r--r--sound/soc/ux500/ux500_cg29xx.h20
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c1007
-rw-r--r--sound/soc/ux500/ux500_msp_dai.h83
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c1019
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.h41
-rw-r--r--sound/soc/ux500/ux500_pcm.c430
-rw-r--r--sound/soc/ux500/ux500_pcm.h44
1041 files changed, 297218 insertions, 3729 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget b/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
index d548eaac230..3bf505e7227 100644
--- a/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
+++ b/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
@@ -19,3 +19,16 @@ Description:
Possible values are:
1 -> ignore the FUA flag
0 -> obey the FUA flag
+
+What: /sys/devices/platform/_UDC_/gadget/host_request
+Date: December 2010
+Contact: Pavan Kondeti <pkondeti@...>
+Description:
+ OTG 2.0 compliant host keeps polling OTG2.0 peripheral
+ for host role. Set host_request flag, which tells host
+ to give up the host role to peripheral.
+
+ 1 -> host role is requested
+ 0 -> no effect (automatically cleared upon reset/disconnect)
+
+ (_UDC_ is the name of the USB Device Controller driver)
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-ab5500-core-adc b/Documentation/ABI/testing/sysfs-devices-platform-ab5500-core-adc
new file mode 100644
index 00000000000..fcfc0ed26fb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-ab5500-core-adc
@@ -0,0 +1,20 @@
+What: /sys/devices/platform/ab5500-core.0/ab5500-adc.0/adc0volt
+Date: Nov 2011
+KernelVersion: 3.0
+Contact: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+Description: The adc0volt attribute allows the userspace to read the
+ voltage of the device connected to the General Purpose
+ Analog to Digital Converter (GPADC) channel-0. Voltage
+ conversion from analog to digital happens only when this
+ attribute is read. GPADC block is present in AB5500 chip
+ and has input voltage range of 0-1.8 volt for GPADC Ch-0.
+ It provides result of the converted voltage in 10 bits.
+ Other GPADC channels attributes may appear in this path
+ later. For minimum and maximum input voltage range for
+ each channel please refer to the ST-Ericssons AB5500
+ datasheet. An example usage of GPADC can be an ALS device
+ connected to the channel and user space adapts the
+ LCD backlight brightness based on ambient light value
+ read from the attribute.
+Users: HAL.
+
diff --git a/Documentation/ABI/testing/sysfs-socinfo b/Documentation/ABI/testing/sysfs-socinfo
new file mode 100644
index 00000000000..afd9da2fa76
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-socinfo
@@ -0,0 +1,16 @@
+What: /sys/socinfo
+Date: March 2011
+contact: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com>
+Description:
+ The /sys/socinfo directory contains information about the
+ System-on-Chip. It is only available if platform implements it.
+ This directory contains two kind of attributes :
+ - common attributes:
+ * machine: the name of the machine.
+ * family: the family name of the SoC
+ - SoC-specific attributes: The SoC vendor can declare attributes
+ to export some strings to user-space, like the serial-number for
+ example.
+
+Users:
+ User-space applications which needs these kind of attributes.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 66725a3d30d..023616787da 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,13 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
- tracepoint.xml drm.xml media_api.xml
+ tracepoint.xml drm.xml media_api.xml \
+ shrm.xml touchp.xml \
+ tc_keypad.xml prcmu-fw-api.xml cg2900_fm_radio.xml \
+ synaptics_rmi4_touchp.xml db5500_keypad.xml \
+ u5500_LogicalMailbox.xml cg2900.xml \
+ lsm303dlh.xml ske_keypad.xml ste_ff_vibra.xml ux500_usb.xml \
+ lps001wp_prs.xml
include $(srctree)/Documentation/DocBook/media/Makefile
diff --git a/Documentation/DocBook/cg2900.tmpl b/Documentation/DocBook/cg2900.tmpl
new file mode 100644
index 00000000000..34667f1b1d9
--- /dev/null
+++ b/Documentation/DocBook/cg2900.tmpl
@@ -0,0 +1,1381 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STE-Connectivity-template">
+ <bookinfo>
+ <title>CG2900 Driver</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Henrik</firstname>
+ <surname>Possung</surname>
+ <affiliation>
+ <address>
+ <email>henrik.possung@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Par-Gunnar</firstname>
+ <surname>Hjalmdahl</surname>
+ <affiliation>
+ <address>
+ <email>par-gunnar.p.hjalmdahl@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson SA</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Connectivity</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+ <toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ This documentation describes the functions provided by the ST-Ericsson CG2900 Driver for enabling
+ ST-Ericsson CG2900 Combo Controller Hardware.
+
+ </para>
+ </chapter>
+
+ <chapter id="gettingstarted">
+ <title>Getting Started</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ There are no special compilation flags needed to build the CG2900 driver.
+ </para>
+ <para>
+ There must be patch and settings files that match the used chip version inside the firmware folder.
+ The files:
+ <itemizedlist>
+ <listitem><para>CG2900_XXXX_YYYY_patch.fw</para></listitem>
+ <listitem><para>CG2900_XXXX_YYYY_settings.fw</para></listitem>
+ </itemizedlist>
+ where XXXX is chip revision and YYYY is chip sub-version returned from the HCI Read Local Version command.
+ </para>
+
+ <!-- TODO: If the driver needs preparations to be used
+ (special compilation flags, files in the file system,
+ knowledge about a specific domain etc), specify it here.
+ Remove this chapter completely if there is nothing
+ to mention and there is no tutorial needed.
+ Do NOT change the chapter id or title! -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+
+ <section id="basic-tutorial">
+ <title>Basic Tutorial</title>
+ <para>
+ To enable the ST-Ericsson CG2900 driver using KConfig go to <constant>Device Drivers -> Staging Drivers</constant>
+ and enable the CG2900 Driver. If BlueZ shall be used as Bluetooth stack also enable the CG2900 Bluetooth driver.
+ Depending on choice the driver will either be included as LKM or built into the Kernel.
+ If building as LKM, several files will be generated:
+ <itemizedlist>
+ <listitem><para>cg2900.ko which contains the main driver</para></listitem>
+ <listitem><para>cg2900_char_devices.ko which contains the character devices</para></listitem>
+ <listitem><para>cg2900_uart.ko which contains the UART driver</para></listitem>
+ <listitem><para>cg2900_chip.ko which contains the CG2900 chip specific driver</para></listitem>
+ <listitem><para>stlc2690_chip.ko which contains the STLC2690 chip specific driver</para></listitem>
+ <listitem><para>cg2900_audio.ko which contains the CG2900 audio driver</para></listitem>
+ <listitem><para>btcg2900.ko which contains the registration and mapping towards the BlueZ Bluetooth stack</para></listitem>
+ </itemizedlist>
+
+ <!-- TODO: Provide a basic tutorial, outlining how
+ to test the presence of the driver,
+ for example how to configure, compile and run the
+ example(s).
+ Several sections with different tutorials,
+ all located within the Getting Started
+ chapter may be provided. -->
+ </para>
+
+ <para>
+ <!-- TODO: This guideline for this section may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </section>
+
+ </chapter>
+
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ The ST-Ericsson CG2900 driver works as a multiplexer between different users, such as a Bluetooth stack and a FM driver,
+ and the connectivity chip. The driver supports multiple physical transports, although currently only UART is implemented.
+ Apart from just transporting data between stacks and the chip, the ST-Ericsson CG2900 driver also deals with power handling,
+ powering up and down the chip and also downloading necessary patches and settings for the chip to start up properly.
+ <!-- TODO: A brief introduction about the concepts
+ which are introduced by the driver.
+ Remove this chapter completely if there are no
+ special concepts introduced by this driver.
+ Do NOT change the chapter id or title! -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </chapter>
+
+ <chapter id="tasks">
+ <title>Tasks</title>
+ <!-- Do NOT change the chapter id or title! -->
+
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Platform device handling</term>
+ <listitem>
+ <para>
+ Each H:4 channel is created as a multifunction device. The driver for each channel must register as a platform driver for this channel,
+ supplying <function>probe</function> and <function>remove</function> functions.
+ When a transport is opened to the chip, the CG2900 chip driver will allocate and instantiate a platform device for each channel. This
+ means that at this point the device framework in the Kernel will call the <function>probe</function> function for the platform driver. It is then the responsibility for the platform driver
+ to register its callback functions and save its device to the platform data structure inside the probed device.
+ </para><para>
+ When the transport is removed the CG2900 chip driver will free each platform device and the the platform driver's <function>remove</function> function will then be called.
+ </para><para>
+ For user space, the user space character device will not exist until the character device driver has been probed.
+ The character devices will be removed when the character device driver is removed, i.e. when the transport is removed.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Opening a channel</term>
+ <listitem>
+ <para>
+ In order to be able to send and receive data on an H:4 channel, the user (i.e. respective stack) must open the channel.
+ Opening a channel will make it possible to send data to and receive data from the connectivity controller.
+ If the controller were earlier powered down, opening a channel will also cause the controller to be powered up.
+ When chip is powered up, patches and settings for the ARM subsystem will be downloaded as well.
+ Other IPs within the controller must however download each respective patches and settings.
+ If chip was already powered up when opening the channel no patch will be automatically downloaded.
+
+ <variablelist>
+ <varlistentry>
+ <term>Opening a channel from Kernel space</term>
+ <listitem>
+ <para>
+ When a stack is placed in Kernel space, it shall open a channel by calling the API function <function>open</function> inside the platform data of the device.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <variablelist>
+ <varlistentry>
+ <term>Opening a channel from User space</term>
+ <listitem>
+ <para>
+ When a stack is placed in User space, it shall open a channel by calling the syscall function <function>open</function> on the corresponding file.
+ The files are located in folder <filename>/dev/</filename> and are named <filename>cg2900_gnss</filename> and similar. Each file
+ corresponds to one H:4 channel.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Closing a channel</term>
+ <listitem>
+ <para>
+ When a user, i.e. a stack has no need for a functionality, it should close the corresponding H:4 channel.
+ This is usually done when a user disables a certain feature, for example Bluetooth. The reason why the channels
+ need to be closed is that the ST-E CG2900 driver will free the resources and also shutdown the controller if there are
+ no more active users of the chip. This will lower the power consumption thereby increasing battery life.
+
+ <variablelist>
+ <varlistentry>
+ <term>Closing a channel from Kernel space</term>
+ <listitem>
+ <para>
+ When a stack is placed in Kernel space, it shall close a channel by calling the API function
+ <function>close</function> inside the platform data of the device.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <variablelist>
+ <varlistentry>
+ <term>Closing a channel from User space</term>
+ <listitem>
+ <para>
+ When a stack is placed in User space, it shall close a channel by calling the syscall function
+ <function>close</function> on the corresponding file.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Writing to a channel</term>
+ <listitem>
+ <para>
+ When a stack (Bluetooth, FM, or GNSS) wants to send a packet it shall perform a write operation.
+ The packet shall not contain the H:4 header since this is added by the CG2900 driver.
+ All other data in the packet shall however exist in the packet in the format correct for that HCI channel.
+ The CG2900 users need to perform flow control over channels so any ticket handling
+ or similar must be handled by respective stack.
+
+ <variablelist>
+ <varlistentry>
+ <term>Writing to a channel from Kernel space</term>
+ <listitem>
+ <para>
+ When a stack is placed in Kernel space, it shall start with allocating a packet of the correct size using
+ <function>alloc_skb</function> inside the platform data of the device. This function will return an sk_buff (Socket buffer) structure that
+ has necessary space reserved for CG2900 driver operation.
+ The stack shall then copy the data, preferrably using <function>skb_put</function>, and then call
+ <function>write</function> inside the platform data of the device to perform the write operation. When the function returns, the buffer has
+ been transferred and there is no need for the calling function to free the buffer. If the operation fails, i.e.
+ an error code is returned, the caller must however free the buffer.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <variablelist>
+ <varlistentry>
+ <term>Writing to a channel from User space</term>
+ <listitem>
+ <para>
+ When a stack is placed in User space, it shall call the <function>write</function> function on
+ the corresponding file to perform a transmit operation. After function returns the data has been
+ copied and is considered sent.
+ The caller does not need to preserve the data.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <variablelist>
+ <varlistentry>
+ <term>Writing to FM_Radio and FM_Audio channel</term>
+ <listitem>
+ <para>
+ CG2900 driver only supports FM legacy commands. The reason is that the FM_Radio and FM_Audio uses the same H4 channel aginst the chip,
+ in order to multiplex the FM user commands the data pakets are parsed by the CG2900 driver.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Reading from a channel</term>
+ <listitem>
+ <para>
+ When a stack (Bluetooth, FM, or GNSS) wants to receive a packet it shall perform a receive operation.
+ The packet returned does not contain the H:4 header since this is removed by the CG2900 driver.
+ All other data in the packet in the packet is in the format correct for that HCI channel.
+ The CG2900 driver does not perform any flow control over the H:4 channel so any ticket handling
+ or similar must be handled by respective stack.
+
+ <variablelist>
+ <varlistentry>
+ <term>Reading from a channel from Kernel space</term>
+ <listitem>
+ <para>
+ When a stack is placed in Kernel space, it has to supply a callback function for the receive functionality when being probed.
+ This callback function will be called when the ST-E CG2900 driver has received a packet.
+ The packet received will always be a complete HCI packet, i.e. no fragmention on HCI layer.
+ When the packet has been received it is the responsability of the receiver to see to that the packet is freed using
+ <function>kfree_skb</function> when it is no more needed.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <variablelist>
+ <varlistentry>
+ <term>Reading from a channel from User space</term>
+ <listitem>
+ <para>
+ When a stack is placed in User space, it shall call the <function>read</function> function on
+ the corresponding file to perform a receive operation. This function will read as many bytes as there are present
+ up to the size of the supplied buffer. If no data is available the function will hang until data becomes available, reset
+ occurs, or the channel is closed.
+ For smooth operation it is recommended to use the <function>poll</function> functionality on the file, preferrably
+ from a dedicated thread. This way one thread can monitor both read and reset operations in one common thread while transmit
+ operations may continue unblocked in a separate thread.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Reset handling</term>
+ <listitem>
+ <para>
+ The stacks shall always try to avoid performing Reset operations. The Reset will result in a hardware reset of the controller
+ and will therefore cause all existing links and settings to be lost. All stacks using the controller must also be informed
+ about the reset and handle it in a proper way.
+ The reset operation should only be used when there is no other option to get the controller into a working state, for example
+ if the controller has stopped answering to commands.
+ After the hardware reset, the ST-E CG2900 driver will automatically perform deregister the channel so it has to be reopened again.
+
+ <variablelist>
+ <varlistentry>
+ <term>Reset handling from Kernel space</term>
+ <listitem>
+ <para>
+ When a stack is placed in Kernel space, it initiates a Reset operation by calling <function>reset</function> inside the platform data of the device.
+ This will trigger a hardware reset of the controller. When the hardware reset is finished all registered users will be called
+ through respective reset callback. When the callback function is finished the registered device will be closed and when all
+ opened users have been informed and closed, the chip is shutdown. This is similar to a closure of all opened channels.
+ The stack will then have to open the channel in order to use the channel once again.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <variablelist>
+ <varlistentry>
+ <term>Reset handling from User space</term>
+ <listitem>
+ <para>
+ When a stack is placed in User space, it shall call the <function>ioctl</function> function on
+ the corresponding file to perform a reset operation. The command parameter <constant>CG2900_CHAR_DEV_IOCTL_RESET</constant>
+ shall be used when calling <function>ioctl</function>.
+ When the <function>ioctl</function> returns, the stack shall close the channel and then re-open it again. This must be done so
+ the channel is registered correctly in Kernel space.
+ For smooth operation it is recommended to use the <function>poll</function> functionality on the file, preferrably
+ from a dedicated thread. This way one thread can monitor both read and reset operations in one common thread while transmit
+ operations may continue unblocked in a separate thread.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Example code Kernel space</term>
+ <listitem>
+ <para>
+ This example will open the FM channel, write a packet, read a packet and then close the channel.
+
+ <programlisting>
+ bool event_received;
+
+ void read_cb(struct cg2900_user_data *dev, struct sk_buff *skb)
+ {
+ event_received = true;
+ kfree_skb(skb);
+ }
+
+ void reset_cb(struct cg2900_user_data *dev)
+ {
+ /* Handle reset. Device will be automatically closed by the CG2900 driver */
+ }
+
+ void example_open(struct my_info *info)
+ {
+ struct cg2900_user_data *user = dev_get_platdata(info->dev);
+ int err;
+
+ if (user->opened) {
+ dev_err(info->dev, "Error! Channel already opened!\n");
+ return;
+ }
+
+ err = user->open(user);
+ if (err) {
+ dev_err(info->dev, "Error (%d)! Couldn't register!\n", err);
+ }
+ }
+
+ void example_close(struct my_info *info)
+ {
+ struct cg2900_user_data *user = dev_get_platdata(info->dev);
+
+ if (user->opened)
+ user->close(user);
+ }
+
+ void example_write_and_read(struct my_info *info, uint8_t *data, int len)
+ {
+ int err;
+ struct cg2900_user_data *user = dev_get_platdata(info->dev);
+ struct sk_buff *skb = user->alloc_skb(len, GFP_KERNEL);
+
+ if (skb) {
+ memcpy(skb_put(skb, len), data, len);
+ err = user->write(user, skb);
+ if (!err) {
+ event_received = false;
+
+ while (!event_received) {
+ /* Wait for ack event. Received in read_cb() above */
+ schedule_timeout_interruptible(jiffies + 50);
+ }
+ } else {
+ dev_err(info->dev, "Couldn't write to controller (%d)\n", err);
+ kfree_skb(skb);
+ }
+ }
+ }
+
+ static int __devinit my_probe(struct platform_device *pdev)
+ {
+ struct my_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = kzalloc(sizeof(*info));
+ if (!info)
+ return -ENOMEM;
+
+ dev_set_drvdata(&amp;pdev->dev, info);
+ info->dev = &amp;pdev->dev;
+
+ pf_data = dev_get_platdata(&amp;pdev->dev);
+ pf_data->dev = &amp;pdev->dev;
+ pf_data->read_cb = read_cb;
+ pf_data->reset_cb = reset_cb;
+
+ /*
+ * Alert my user that we are ready to start and give
+ * it my info pointer.
+ */
+ return my_user_start(info);
+ }
+
+ static int __devexit my_remove(struct platform_device *pdev)
+ {
+ struct my_info *info;
+
+ info = dev_get_drvdata(&amp;pdev->dev);
+ my_user_stop(info);
+ kfree(info);
+ return 0;
+ }
+
+ static struct platform_driver my_driver = {
+ .driver = {
+ .name = "cg2900-fm",
+ .owner = THIS_MODULE,
+ },
+ .probe = my_probe,
+ .remove = __devexit_p(my_remove),
+ };
+
+ static int __init my_init(void)
+ {
+ return platform_driver_register(&amp;my_driver);
+ }
+
+ static void __exit my_exit(void)
+ {
+ platform_driver_unregister(&amp;my_driver);
+ }
+
+ module_init(my_init);
+ module_exit(my_exit);
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Example code User space</term>
+ <listitem>
+ <para>
+ This example will open the GNSS channel, write a packet, read a packet and then close the channel.
+ In this example all functions are performed in the same thread.
+ It is however adviced to perform <function>read</function> and <function>ioctl</function> read through a separate thread,
+ preferrably using <function>poll</function>.
+
+ <programlisting>
+ struct my_info_t {
+ int fd;
+ };
+
+ static struct my_info_t my_info;
+
+ /* This is a fake command and has nothing to do with real GNSS commands.
+ * Note that the command does NOT contain the H:4 header.
+ * The header is added by the ST-E CG2900 driver.
+ */
+ static const uint8_t tx_cmd[] = {0x12, 0x34, 0x56};
+
+ int main(int argc, char **argv)
+ {
+ uint8_t rx_buffer[100];
+ int rx_bytes = 0;
+ int err;
+
+ my_info.fd = open("/dev/cg2900_gnss", O_RDWR);
+ if (my_info.fd &lt; 0) {
+ printf("Error on open file: %d (%s)\n", errno, strerror(errno));
+ return errno;
+ }
+ if (0 &gt; write(my_info.fd, tx_cmd, sizeof(tx_cmd))) {
+ printf("Error on write file: %d (%s)\n", errno, strerror(errno));
+ return errno;
+ }
+ /* Read will sleep until there is data available */
+ rx_bytes = read(my_info.fd, rx_buffer, 100);
+ if (rx_bytes &gt;= 0) {
+ printf("Received %d bytes\n", rx_bytes);
+ } else {
+ printf("Error on read file: %d (%s)\n", errno, strerror(errno));
+ return errno;
+ }
+ err = close(my_info.fd);
+ if (err) {
+ printf("Error on close file: %d (%s)\n", errno, strerror(errno));
+ return errno;
+ }
+ return 0;
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <!-- TODO: Task descriptions are step by step instructions
+ for performing specific actions and tasks.
+ Each task is typically one scenario.
+ Each task is described in a separate (section).
+ (section) tags can be nested, which is
+ especially recommended if
+ the task consists of several scenarios.
+ Remove this chapter completely if there are no
+ tasks to mention and there is no tutorial needed.
+ Do NOT change the chapter id or title! -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </chapter>
+
+ <chapter id="driver-configuration">
+ <title>Driver Configuration and Interaction</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ N/A
+ <!-- TODO: Use this paragraph as an introduction to driver
+ configuration and interaction. Describe the big picture. -->
+ <!-- TODO: This chapter contains driver specific way to perform
+ configuration and interaction. The chapter includes a
+ number of sections. They should not be removed and if
+ the driver does not have the specific support for
+ configuration or interaction should the text "not
+ applicable" be inserted. Do NOT change the chapter id
+ or title! -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+
+ <section id="driver-implemented-operations">
+ <title>Implemented operations in driver</title>
+ <para>
+ <!-- TODO: Describe the actual usage of the driver. Specify the actual
+ implemented operations in struct <structname>file_operations</structname>
+ and any other set of operations. Create a table with two columns
+ (see example in intro chapter how to create a table).
+ Column one list all operations supported (read,
+ write, open, close, ioctl etc) and column two a description of the
+ semantics of the operations in the specific context of the device
+ driver from the users perspective. Document the operations in a way
+ that a user of the driver can be helped. -->
+ </para>
+ <para>
+ <table>
+ <title> Supported device driver operations when using character device </title>
+ <tgroup cols="2"><tbody>
+ <row><entry> open </entry> <entry> Opening a character device will register the caller to that HCI channel.</entry> </row>
+ <row><entry> release </entry> <entry> Releasing a character device will deregister the caller from that HCI channel</entry> </row>
+ <row><entry> poll </entry> <entry> Polling a character device will check if there is data to read on that HCI channel</entry> </row>
+ <row><entry> read </entry> <entry> Reading from a character device reads from that HCI channel</entry> </row>
+ <row><entry> write </entry> <entry> Writing to a character device writes to that HCI channel</entry> </row>
+ <row><entry> unlocked_ioctl </entry> <entry> Performing IO control on a character device will perform special operations such as reset on that HCI channel</entry> </row>
+ </tbody></tgroup>
+ </table>
+ </para>
+ </section>
+
+ <section id="driver-loading">
+ <title>Driver loading parameters</title>
+ <para>
+ <!-- TODO: Describe parameters that can be specified at kernel
+ driver loading with insmod or modprobe. If the driver
+ has no parameters to be specified at load time, replace this
+ text with "Not Applicable". -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>uart_default_baud</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>115200</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Readable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter uart_default_baud in cg2900_uart.c defines the baud rate used after a chip has just been powered up.
+ It shall be set to the default baud rate of the controller.
+ For ST-Ericsson controllers STLC2690 and CG2900 this value shall be 115200.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>uart_high_baud</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>3000000</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Modifiable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter uart_high_baud in cg2900_uart.c defines the baud rate to use for normal data transfer.
+ This should normally be the highest allowed by the system with regards to flow control, clocks, etc.
+ For ST-Ericsson controllers STLC2690 and CG2900 the following values are supported:
+ <itemizedlist>
+ <listitem><para>57600</para></listitem>
+ <listitem><para>115200</para></listitem>
+ <listitem><para>230400</para></listitem>
+ <listitem><para>460800</para></listitem>
+ <listitem><para>921600</para></listitem>
+ <listitem><para>2000000</para></listitem>
+ <listitem><para>3000000</para></listitem>
+ <listitem><para>4000000</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>uart_debug</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>0</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Modifiable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter uart_debug in cg2900_uart.c enables or disables dumping of all
+ data transmitted and received through the UART.
+ 0 means disabled and non-zero value means enabled.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>bd_address</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>array (Entered as comma separated value)</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>0x00 0x80 0xDE 0xAD 0xBE 0x00</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Modifiable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter bd_address in cg2900_core.c defines the Bluetooth device address to use for the current device.
+ The value is an array of 6 bytes and shall be entered as a comma separated value.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>sleep_timeout_ms</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>10000</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Modifiable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter sleep_timeout_ms in cg2900_core.c defines the sleep timeout for data transmission in milliseconds.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ <!-- TODO: This guideline for this section may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </section>
+
+ <section id="driver-ioctl">
+ <title>Driver IO Control</title>
+ <para>
+ <!-- TODO: Describe driver parameters that can be modified
+ in runtime. Make a list of all device-dependent request code with
+ description of arguments, meaning etc. If the driver has no IO control
+ interface, replace this text with "Not Applicable". -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><constant>CG2900_CHAR_DEV_IOCTL_RESET</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>CG2900_CHAR_DEV_IOCTL_RESET</constant> IOCTL starts a reset
+ of the connectivity chip. This will affect the current open channel and
+ all other open channels as well.
+ </para><para>
+ IOCTL value created using <constant>_IOW('U', 210, int)</constant>.
+ </para><para>
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If reset is performed without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><constant>CG2900_CHAR_DEV_IOCTL_CHECK4RESET</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Query</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>CG2900_CHAR_DEV_IOCTL_CHECK4RESET</constant> IOCTL checks if a reset
+ has been performed on a device.
+ </para><para>
+ IOCTL value created using <constant>_IOR('U', 212, int)</constant>.
+ </para><para>
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If device is still open the IOCTL function will return 0.</para></listitem>
+ <listitem><para>If reset has occurred the IOCTL function will return 1.</para></listitem>
+ <listitem><para>If device has been closed the IOCTL function will return 2.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><constant>CG2900_CHAR_DEV_IOCTL_GET_REVISION</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Query</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>struct cg2900_rev_data</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>CG2900_CHAR_DEV_IOCTL_GET_REVISION</constant> IOCTL returns the revision value
+ and the sub-version value of the local connectivity controller if such information is available.
+ </para><para>
+ IOCTL value created using <constant>_IOR('U', 213, struct cg2900_rev_data)</constant>.
+ </para><para>
+ Returned values are according to information that may be retrieved from chip manufacturer.
+ One example is ST-Ericsson CG2900 PG2.0 which have revision 0x0200 and sub-version 0x0000.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </section>
+
+ <section id="driver-sysfs">
+ <title>Driver Interaction with Sysfs</title>
+ <para>
+ <!-- TODO: Describe data available for read and write on the drivers
+ Sysfs entry. Specify where the entry for the device is located in
+ Sysfs such as <filename>/sys/devices/*</filename>, <filename>/sys/devices/*</filename>
+ , etc.
+ Specify the data types for the attributes. Specify if the
+ attributes are read-only or write-only. If the driver has no Sysfs
+ interface, replace this text with "Not Applicable". -->
+ Not Applicable
+ </para>
+ </section>
+
+ <section id="driver-proc">
+ <title>Driver Interaction using /proc filesystem</title>
+ <para>
+ Not Applicable
+ <!-- TODO: Describe data available for read and write on the drivers
+ /proc entry. Specify where the entry for the device is located.
+ Specify the data types for the attributes. Specify if the
+ attributes are read-only or writeonly. If the driver has no /proc
+ interface, replace this text with "Not Applicable". -->
+ </para>
+ </section>
+
+ <section id="driver-other">
+ <title>Other means for Driver Interaction</title>
+ <para>
+ <!-- TODO: Does the driver have any configurations files? Describe other means
+ for driver status access or configuration. If the driver has no other
+ means (besides the one in already described in this chapter), replace
+ this text with "Not Applicable". -->
+ Not Applicable
+ </para>
+ </section>
+
+ <section id="driver-node">
+ <title>Driver Node File</title>
+ <variablelist>
+ <varlistentry>
+ <term>CG2900 main device</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_driver0</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_driver represents the main parent node for all other character devices supplied in the ST-Ericsson CG2900 driver except for the Test device. It does not support any operations such as read or write.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>BT Command</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_bt_cmd</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_bt_cmd is the device for the HCI Bluetooth command channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>BT ACL</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_bt_acl</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_bt_acl is the device for the HCI Bluetooth ACL channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>BT Event</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_bt_evt</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_bt_evt is the device for the HCI Bluetooth event channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>FM Radio</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_fm_radio</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_fm_radio is the device for the HCI FM Radio channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>GNSS</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_gnss</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_gnss is the device for the HCI GNSS channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Debug</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_debug</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_debug is the device for the HCI Debug channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ST-Ericsson Tools</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_ste_tools</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_ste_tools is the device for the HCI ST-Ericsson tools channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>HCI Logger</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_hci_logger</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_hci_logger is the device for the HCI logger channel.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Test stub</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_test</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_test is the device for performing module tests of the ST-Ericsson CG2900 driver. It acts as a stub replacing the transport towards the chip. It is of the type Misc devices.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>BT Audio</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_bt_audio</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_bt_audio is the device for sending HCI BT Audio controll commands to the chip. </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>FM Audio</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_fm_audio</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_fm_audio is the device for sending HCI BT Audio controll commands to the chip.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>Core</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_core</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_core is a device for turn on/off the chip. NOTE other devices will also turn on/off the chip if needed.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>CG2900 Audio</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_audio</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The cg2900_audio is a device for testing the CG2900 Audio driver from User space.
+ It replicates the normal CG2900 Audio interface through <constant>write/read</constant> operations.
+ The <constant>write</constant> command is used as following:
+ <itemizedlist>
+ <listitem><para>4 byte op code (see below)</para></listitem>
+ <listitem><para>Data field according to respective CG2900 Audio function (no session ID needed)</para></listitem>
+ </itemizedlist>
+ If the operation fails the <constant>write</constant> command operation will return the error.
+ Op codes are (4 bytes size):
+ <itemizedlist>
+ <listitem><para>0x00000001 = CHAR_DEV_OP_CODE_SET_DAI_CONF</para></listitem>
+ <listitem><para>0x00000002 = CHAR_DEV_OP_CODE_GET_DAI_CONF</para></listitem>
+ <listitem><para>0x00000003 = CHAR_DEV_OP_CODE_CONFIGURE_ENDPOINT</para></listitem>
+ <listitem><para>0x00000004 = CHAR_DEV_OP_CODE_CONNECT_AND_START_STREAM</para></listitem>
+ <listitem><para>0x00000005 = CHAR_DEV_OP_CODE_STOP_STREAM</para></listitem>
+ </itemizedlist>
+
+ The <constant>read</constant> command is used for the commands <constant>CHAR_DEV_OP_CODE_GET_DAI_CONF</constant>
+ and <constant>CHAR_DEV_OP_CODE_CONNECT_AND_START_STREAM</constant> if the corresponding commands are successful.
+ The returned data will be formatted accordingly:
+ <itemizedlist>
+ <listitem><para>4 byte op code (see below)</para></listitem>
+ <listitem><para>Data field according to normal CG2900 Audio functions, e.g. stream handle or configuration</para></listitem>
+ </itemizedlist>
+ The <constant>CHAR_DEV_OP_CODE_GET_DAI_CONF</constant> is a bit special since it require an endpoint in-parameter
+ (when calling <constant>write</constant>) to return the corresponding DAI configuration when calling <constant>read</constant>.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>HCI Raw</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/cg2900_hci_raw</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The cg2900_hci_raw is the device for raw access to HCI interface.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </section>
+
+
+ </chapter>
+
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Driver supports only one user per HCI channel.</term>
+ <listitem>
+ <para>
+ To simplify design and limitation as well as keeping the API simple and reliable, the driver only supports
+ one user per HCI channel.
+ <!-- TODO: Briefly describe the limitation, unless all
+ information is already present in the title.
+ Use full english sentences.
+ Repeat the varlistentry for each limitation.
+ If none are known, replace this varlistentry
+ with the one below. -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>HCI Raw channel requires exclusive access to chip.</term>
+ <listitem>
+ <para>
+ cg2900_hci_raw channel cannot be opened if there are any other channels already opened except hci_logger channel.
+ Also any other channels cannot be opened except hci_logger channel when cg2900_hci_raw is already opened.
+ This is to guarantee that different users won't interfere with each other and prevent flow control issues.
+ <!-- TODO: Briefly describe the limitation, unless all
+ information is already present in the title.
+ Use full english sentences.
+ Repeat the varlistentry for each limitation.
+ If none are known, replace this varlistentry
+ with the one below. -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+<chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ List of public functions.
+ </para>
+ <!-- Do NOT change the chapter id or title! -->
+ <!-- TODO: Replace with link to appropriate headerfile(s).
+ One per row, ensure the
+ exclamation mark is on the first column! If no
+ appropriate header file exist describing a public interface,
+ replace the inclusion with a paragraph containing the text
+ "Not Applicable" -->
+ <section id="cg2900.h">
+ <title>cg2900.h</title>
+!Edrivers/staging/cg2900/mfd/cg2900_core.c
+!Idrivers/staging/cg2900/include/cg2900.h
+ </section>
+ <section id="cg2900_audio.h">
+ <title>cg2900_audio.h</title>
+!Edrivers/staging/cg2900/mfd/cg2900_audio.c
+ </section>
+
+</chapter>
+
+<chapter id="internal-functions">
+ <title>Internal Functions Provided</title>
+ <para>
+ List of internal functions.
+ </para>
+ <!-- Do NOT change the chapter id or title! -->
+ <!-- TODO: Replace with link to appropriate headerfile(s),
+ source file(s), or both. One per row, ensure the
+ exclamation mark is on the first column! If no
+ appropriate header or source file exist describing a public interface,
+ replace the inclusion with a paragraph containing the text
+ "Not Applicable"-->
+ <section id="cg2900_lib.h">
+ <title>cg2900_lib.h</title>
+!Edrivers/staging/cg2900/mfd/cg2900_lib.c
+ </section>
+</chapter>
+</book>
diff --git a/Documentation/DocBook/cg2900_fm_radio.tmpl b/Documentation/DocBook/cg2900_fm_radio.tmpl
new file mode 100644
index 00000000000..2cdbf146ff4
--- /dev/null
+++ b/Documentation/DocBook/cg2900_fm_radio.tmpl
@@ -0,0 +1,2025 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STE-CG2900-fm-driver-template">
+ <bookinfo>
+ <title>V4L FM Radio Driver for CG2900</title>
+ <authorgroup>
+ <author>
+ <firstname>Hemant</firstname>
+ <surname>Gupta</surname>
+ <affiliation>
+ <address>
+ <email>hemant.gupta@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+ <subjectset>
+ <subject>
+ <subjectterm>Connectivity</subjectterm>
+ </subject>
+ </subjectset>
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+ <toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ This documentation describes the functions provided by the CG2900 FM Driver.
+ </para>
+ </chapter>
+ <chapter id="gettingstarted">
+ <title>Getting Started</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ There are no special compilation flags needed to build the CG2900
+ FM Driver.
+ </para>
+ <para>
+ There must be coeffecient and firmware files that match the used chip version inside the firmware folder.
+ The files:
+ <itemizedlist>
+ <listitem><para>cg2900_fm_bt_src_coeff_info.fw.org</para></listitem>
+ <listitem><para>cg2900_fm_ext_src_coeff_info.fw.org</para></listitem>
+ <listitem><para>cg2900_fm_fm_coeff_info.fw.org</para></listitem>
+ <listitem><para>cg2900_fm_fm_prog_info.fw.org</para></listitem>
+ </itemizedlist>
+ handle the mapping between chip version and correct firmware files (firmware and coeffecient files).
+ The necessary firmware and coeffecient files should be placed with the extension <constant>.fw.org</constant>.
+ Note that there is a limitation in the Kernel firmware system regarding name length of a file.
+ </para>
+ <section id="basic-tutorial">
+ <title>Basic Tutorial</title>
+ <para>
+ To enable the CG2900 FM Driver using KConfig go to <constant>Device Drivers -> Multimedia devices </constant>
+ and enable the following:
+ <itemizedlist>
+ <listitem><para>Video For Linux</para></listitem>
+ <listitem><para>Enable Video For Linux API 1 compatible Layer</para></listitem>
+ <listitem><para>Radio Adapters</para></listitem>
+ <listitem><para>Radio Adapter -> ST-Ericsson CG2900 FM Radio support</para></listitem>
+ </itemizedlist>
+ Select the driver as built in kernel object.
+ </para>
+ </section>
+ </chapter>
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ The CG2900 FM driver acts as an interface between Video4Linux and CG2900-Protocol Driver. It configures the FM chip in FM Rx or FM Tx mode. It also sends the unformatted RDS data to the application for decoding while in FM rx mode and sends the formatted RDS data to FM Chip while in Tx mode.
+ </para>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>FM Driver Working</term>
+ <listitem>
+ <para>
+ In order to send and receive data on an H:4 channel, the FM Driver opens the channel by registering with the CG2900 Protocol driver. After this the FM driver encapsulates the user operation into specific HCI comamnds and sends that data to the CG2900 Connectivity Controller and waits till the response for the previous command is received. FM Driver in this way maintains the flow control.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+ <chapter id="Tasks">
+ <title>Tasks</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <section id="Switching-On-FM">
+ <title>Switch On FM</title>
+ <para>
+ FM specific tasks
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Switching On FM</term>
+ <listitem>
+ <para>
+ For switching on FM the character device /dev/radio0 should be opened from user space. This sets the FM Radio in Idle mode. For configuring the FM Radio in Rx or Tx mode the IOCTL's VIDIOC_S_TUNER and VIDIOC_S_MODULATOR respectively.
+ <programlisting>
+ int fd;
+ fd = open("/dev/radio0", O_RDONLY);
+ if(fd &lt; 0) {
+ printf("open:error!!!\n");
+ goto err;
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="Switching-Off-FM">
+ <title>Switch Off FM</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Switching Off FM</term>
+ <listitem>
+ <para>
+ For switching OFF FM the character device /dev/radio0 should be closed from user space.
+ <programlisting>
+ if(fd &gt;= 0)
+ close(fd);
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="Rx-Mode">
+ <title>Switching To FM Rx Mode</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Switching To FM Rx Mode</term>
+ <listitem>
+ <para>
+ For switching on FM Rx mode the IOCTL VIDIOC_S_TUNER should be called with appropriate parameters.
+ <programlisting>
+ memset(&amp;tuner, 0, sizeof(tuner));
+ tuner.index = 0;
+ tuner.rxsubchans |= V4L2_TUNER_SUB_STEREO;
+ if (ioctl(fd, VIDIOC_S_TUNER, &amp;tuner) &lt; 0) {
+ printf("VIDIOC_S_TUNER:error!!\n");
+ return;
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="Tx-Mode">
+ <title>Switching To FM Tx Mode</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Switching To FM Tx Mode</term>
+ <listitem>
+ <para>
+ For switching on FM Tx mode the IOCTL VIDIOC_S_MODULATOR should be
+ called with appropriate parameters.
+ <programlisting>
+ memset(&amp;modulator, 0, sizeof(modulator));
+ modulator.index = 0;
+ modulator.txsubchans |= V4L2_TUNER_SUB_STEREO;
+ if (ioctl(fd, VIDIOC_S_MODULATOR, &amp;modulator) &lt; 0) {
+ printf("VIDIOC_S_MODULATOR:error!!\n");
+ return;
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="FM-Standby">
+ <title>Standby</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Making the FM Radio go in Standby Mode</term>
+ <listitem>
+ <para>
+ For making the FM Radio go in Standby mode, the IOCTL VIDIOC_S_CTRL should be used. The id of the v4l2_control structure should be set to V4L2_CID_CG2900_RADIO_CHIP_STATE and the value of v4l2_control structure should be set as V4L2_CG2900_RADIO_STANDBY.
+ <programlisting>
+ struct v4l2_control sctrl;
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_CHIP_STATE;
+ sctrl.value = V4L2_CG2900_RADIO_STANDBY;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp;sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="Powerup-from-standby">
+ <title>Powering Up FM From Standby Mode</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Powering Up FM Radio from Standby Mode</term>
+ <listitem>
+ <para>
+ For powering up FM radio again from standby mode, the IOCTL VIDIOC_S_CTRL should be used. The id of the v4l2_control structure should be set to V4L2_CID_CG2900_RADIO_CHIP_STATE and the value of v4l2_control structure should be set as V4L2_CG2900_RADIO_POWERUP.
+ <programlisting>
+ struct v4l2_control sctrl;
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_CHIP_STATE;
+ sctrl.value = V4L2_CG2900_RADIO_POWERUP;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp;sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="tune-frequency">
+ <title>Tune Channel</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Tune to a particular station</term>
+ <listitem>
+ <para>
+ for tuning to a particular station, the IOCTL VIDIOC_S_FREQUENCY should be used. The frequency of the v4l2_frequency structure should be converted to V4L2 format.
+ <programlisting>
+ struct v4l2_frequency freq;
+ int ret;
+ /* Convert frequency in Hz to V4L2 Format */
+ freq.frequency = (frequency * 2)/ 125;
+ ret = ioctl(fd, VIDIOC_S_FREQUENCY, &amp;freq);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_FREQUENCY:error!!\n");
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="get-frequency">
+ <title>Get Tuned Channel</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Get the Currently tuned Station Frequncy</term>
+ <listitem>
+ <para>
+ for tuning to a particular station, the IOCTL VIDIOC_G_FREQUENCY should be used. The frequency returned in the v4l2_frequency structure would be in V4L2 format.
+ <programlisting>
+ struct v4l2_frequency freq;
+ int ret;
+ ret = ioctl(fd, VIDIOC_G_FREQUENCY, &amp;freq);
+ if (ret &lt; 0) {
+ printf("VIDIOC_G_FREQUENCY:error!!\n");
+ *frequency = 0;
+ return;
+ }
+ /* Convert frequency to Hz from V4L2 Format */
+ *frequency = (freq.frequency * 125)/2;
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="get-signal-strength">
+ <title>Retreive Signal Strength</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Retreive Signal Strength</term>
+ <listitem>
+ <para>
+ For retreiving the Signal strength of the currently tuned channel in FM Rx mode, IOCTL VIDIOC_G_TUNER should be called. The current signal strength would be represented by the parameter signal of the v4l2_tuner strucure.
+ <programlisting>
+ void get_signal_strength(int *rssi)
+ {
+ struct v4l2_tuner tuner;
+ int ret;
+ memset(&amp;tuner, 0, sizeof(tuner));
+ tuner.index = 0;
+ ret = ioctl(fd, VIDIOC_G_TUNER, &amp;tuner);
+ if (ret &lt; 0) {
+ printf("VIDIOC_G_TUNER:error!!\n");
+ *rssi = 0;
+ return;
+ }
+ *rssi = tuner.signal;
+ }
+ </programlisting>
+Note: Currently the retrieved signal strength is in decimals and not in "dBuV", proper external conversion required.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="band-scan">
+ <title>Band Scan</title>
+ <para>
+ <!-- Driver loading Parameters:Not Applicable -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Band Scan</term>
+ <listitem>
+ <para>
+ For doing a band scan, ie search for all available stations in the entire FM band, IOCTL VIDIOC_S_CTRL should be used with parameter id of the v4l2_control structure should be set to V4L2_CID_CG2900_RADIO_BANDSCAN and the value of v4l2_control structure should be set as V4L2_CG2900_RADIO_BANDSCAN_START. If the IOCTL returns successfully, a common thread (which should have been created at the start of the application and is already polling to FM driver for multiple other interrupts including events related to Block Scan and Search Frequency operation) which polls with an infinite timeout iteratively until the end of the user-space application, when poll in one iteration is complete. When poll is complete, thread will make an IOCTL call with VIDIOC_G_EXT_CTRLS with parameter id set to V4L2_CID_CG2900_RADIO_GET_INTERRUPT and the data structure FmInterrupt.controls-&gt;string associated with V4L2_CID_CG2900_RADIO_GET_INTERRUPT shall contain the interrupt retrieved from FMD. Interrupts received in this manner from FMD can be any of the following.
+ <itemizedlist>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_UNKNOWN</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_SEARCH_COMPLETED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_BAND_SCAN_COMPLETED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_BLOCK_SCAN_COMPLETED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_SCAN_CANCELLED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_MONO_STEREO_TRANSITION</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_DEVICE_RESET</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_RDS_RECEIVED</para></listitem>
+ </itemizedlist>
+
+For Band Scan it shall be V4L2_CG2900_RADIO_INTERRUPT_BAND_SCAN_COMPLETE, an appropriate handler should be then called from thethread to retrieve the found stations along with RSSI using the IOCTL VIDIOC_G_EXT_CTRLS, this IOCTL should be used with parameters as described in example code. Note that the common thread for capturing synchronous as well asynchronous events used here is FmInterruptMonitoringThread. It shall be used in other sections i.e. Block Scan, Cancel Scan, Search Frequency and Mono Stereo Transition.
+ <programlisting>
+ void Band_Scan()
+ {
+ struct v4l2_control sctrl
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_BANDSCAN;
+ sctrl.value = V4L2_CG2900_RADIO_BANDSCAN_START;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp;sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+ pthread_create(&amp;fmScanThread, NULL, FmScanThread, NULL);
+ }
+
+ static void *FmInterruptMonitoringThread(void *param)
+ {
+ struct v4l2_ext_controls FmInterrupt;
+ struct pollfd pollFd;
+ long * p = NULL;
+ int index, ret, count = 0;
+ int interrupt, interrupt_reason;
+ int err;
+
+ while(closeApp) {
+ pollFd.fd = fd;
+ pollFd.events = POLLRDNORM;
+ /* wait infinitely for interrupt */
+ timeout = -1;
+ ret = poll(&amp;pollFd, 1, timeout);
+ if(!closeApp)
+ break;
+ if(ret) {
+ if(pollFd.revents &amp; POLLRDNORM)
+ {
+ /* Get the interrupt */
+ FmInterrupt.count = 0;
+ FmInterrupt.ctrl_class = V4L2_CTRL_CLASS_USER;
+ FmInterrupt.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ if(!FmInterrupt.controls)
+ goto error;
+ FmInterrupt.controls-&gt;id = V4L2_CID_CG2900_RADIO_GET_INTERRUPT;
+ FmInterrupt.controls-&gt;size = 2;
+ FmInterrupt.controls-&gt;string = (int *)malloc(sizeof(int) * FmInterrupt.controls-&gt;size);
+ interrupt_buffer_pointer = FmInterrupt.controls-&gt;string;
+ if (ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp;FmInterrupt) &lt; 0) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ ret_val = -1;
+ goto error_free_ext_control_string;
+ }
+
+ if(!ret_val) {
+ interrupt = *interrupt_buffer_pointer;
+ interrupt_reason = *(interrupt_buffer_pointer + 1);
+ printf("Interrupt = %d, , Result = %d\n", interrupt, interrupt_reason);
+ if(interrupt_reason == 0) {
+ switch(interrupt)
+ {
+ case V4L2_CG2900_RADIO_INTERRUPT_BAND_SCAN_COMPLETED:
+ /* Band Scan Completed */
+ HandleBandScanCompletion();
+ otherOperationInProgress = 0;
+ break;
+
+ }
+ }
+ }
+error_free_ext_control_string:
+ free(FmInterrupt.controls-&gt;string);
+error_free_ext_control_control:
+ free(FmInterrupt.controls);
+error:
+ otherOperationInProgress = 0;
+ } else {
+ printf ("FmInterruptMonitoringThread : poll returned = %d\n", ret);
+ }
+ }
+ }
+ return 0;
+ }
+
+ static void HandleBandScanCompletion()
+ {
+ struct v4l2_ext_controls scanResult;
+ long * band_scan_pointer = NULL;
+ int err;
+ int index, ret, count = 0;
+
+ /* Get the Number Of Channels */
+ scanResult.count = 0;
+ scanResult.ctrl_class = V4L2_CTRL_CLASS_USER;
+ scanResult.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ if(!scanResult.controls)
+ goto done;
+ scanResult.controls-&gt;id = V4L2_CID_CG2900_RADIO_BANDSCAN_GET_RESULTS;
+ scanResult.controls-&gt;size = 0;
+ scanResult.controls-&gt;string = NULL;
+ err = ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp; scanResult);
+
+ if (err &lt; 0 &amp; &amp; errno != ENOSPC) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ goto error_free_ext_control_control;
+ }
+
+ if(scanResult.controls-&gt;size &gt; 0 )
+ {
+ scanResult.controls-&gt;string = (long *)malloc(sizeof(long) * 2 * scanResult.controls-&gt;size );
+ band_scan_pointer = scanResult.controls-&gt;string;
+ printf("\n\n\n==================================\n");
+ printf("\nNumber of Channels Found = %d \n", scanResult.controls-&gt;size);
+ printf("\n==================================\n");
+ if (ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp;scanResult) &lt; 0) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ goto error_free_ext_control_string;
+ }
+ printf("\n================================\n");
+ printf("\nSNo. Frequency(MHz) RSSI\n");
+ printf("\n================================\n");
+ for (index = 0, count = 0; index &lt; scanResult.controls-&gt;size; index ++, count +=2) {
+ printf("%d %d.%d %d\n", index + 1,
+ MEGAHRTZ((*(band_scan_pointer +count + 0) * 125) / 2),
+ *(band_scan_pointer + count + 1));
+ }
+ printf("\n================================\n");
+ error_free_ext_control_string:
+ free(band_scan_pointer);
+ }
+ else if(scanResult.controls-&gt;size == 0)
+ {
+ printf("\nNo channels found during scanning!!\n");
+ }
+ error_free_ext_control_control:
+ free(scanResult.controls);
+ done:
+ otherOperationInProgress = 0;
+ }
+
+ </programlisting>
+ Note: Currently the retrieved signal strength is in decimals and not in "dBuV", proper external conversion required.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="block-scan">
+ <title>Block Scan</title>
+ <para>
+ <!-- Driver loading Parameters:Not Applicable -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Block Scan</term>
+ <listitem>
+ <para>
+ The Block Scan functionality will take two inputs, start and stop frequency (V4L2 compliance) and enables the host to scan all channels with in that range for RSSI values. The measured channels will be stored in a list in order of channel number and the block scan feature to identify "empty" channels for transmission. And for doing a block scan, IOCTL VIDIOC_S_EXT_CTRL with parameter id of the v4l2_control structure should be set to V4L2_CID_FM_RADIO_BLOCKSCAN_START. If the IOCTL returns successfully, a common thread (which should have been created at the start of the application and is already polling to FM driver for multiple other interrupts including events related to Band Scan and Search Frequency operation) which polls with an infinite timeout iteratively until the end of the user-space application, when poll in one iteration is complete. When poll is complete, thread will make an IOCTL call with VIDIOC_G_EXT_CTRLS with parameter id set to V4L2_CID_CG2900_RADIO_GET_INTERRUPT and the data structure FmInterrupt.controls-&gt;string associated with V4L2_CID_CG2900_RADIO_GET_INTERRUPT shall contain the interrupt retrieved from FMD. Interrupts received in this manner from FMD can be any of the following.
+ <itemizedlist>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_UNKNOWN</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_SEARCH_COMPLETED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_BAND_SCAN_COMPLETED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_BLOCK_SCAN_COMPLETED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_SCAN_CANCELLED</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_MONO_STEREO_TRANSITION</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_DEVICE_RESET</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_INTERRUPT_RDS_RECEIVED</para></listitem>
+ </itemizedlist>
+
+For Block Scan it shall be V4L2_CG2900_RADIO_INTERRUPT_BLOCK_SCAN_COMPLETED, an appropriate handler should be then called from thethread to retrieve the found stations along with RSSI using the IOCTL VIDIOC_G_EXT_CTRLS, this IOCTL should be used with parameters as described in example code. Note that the common thread for capturing synchronous as well asynchronous events used here is FmInterruptMonitoringThread. It shall be used in other sections i.e. Band Scan, Cancel Scan, Search Frequency and Mono Stereo Transition. When poll is complete, the found stations along with RSSI should be retrieved using the IOCTL VIDIOC_G_EXT_CTRLS should be used with parameters as described in example code.
+ <programlisting>
+ void Block_Scan()
+ {
+ struct v4l2_ext_controls ext_ctrl;
+ long *p = NULL;
+ int index;
+ int ret_val;
+ if(1 == mode) {
+ otherOperationInProgress = 1;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_USER;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls-&gt;id = V4L2_CID_CG2900_RADIO_BLOCKSCAN_START;
+ ext_ctrl.controls-&gt;size = 2;
+ ext_ctrl.controls-&gt;string = (long *)malloc(sizeof(long) * ext_ctrl.controls-&gt;size);
+ p = ext_ctrl.controls-&gt;string;
+ *p = (StartFreq * 2)/ 125;
+ *(p + 1) = (EndFreq * 2)/ 125;;
+ if (ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp;ext_ctrl) &lt; 0)
+ printf("APP_BlockScanStart:VIDIOC_S_EXT_CTRLS:error!!\n");
+ free(ext_ctrl.controls-&gt;string);
+ free(ext_ctrl.controls);
+ pthread_create(&amp;fmBlockScanThread, NULL, FmBlockScanThread, NULL);
+ }
+
+ static void *FmInterruptMonitoringThread(void *param)
+ {
+ struct v4l2_ext_controls FmInterrupt;
+ struct pollfd pollFd;
+ long * p = NULL;
+ int index, ret, count = 0;
+ int interrupt, interrupt_reason;
+ int err;
+
+ while(closeApp) {
+ pollFd.fd = fd;
+ pollFd.events = POLLRDNORM;
+ /* wait infinitely for interrupt */
+ timeout = -1;
+ ret = poll(&amp;pollFd, 1, timeout);
+ if(!closeApp)
+ break;
+ if(ret) {
+ if(pollFd.revents &amp; POLLRDNORM)
+ {
+ /* Get the interrupt */
+ FmInterrupt.count = 0;
+ FmInterrupt.ctrl_class = V4L2_CTRL_CLASS_USER;
+ FmInterrupt.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ if(!FmInterrupt.controls)
+ goto error;
+ FmInterrupt.controls-&gt;id = V4L2_CID_CG2900_RADIO_GET_INTERRUPT;
+ FmInterrupt.controls-&gt;size = 2;
+ FmInterrupt.controls-&gt;string = (int *)malloc(sizeof(int) * FmInterrupt.controls-&gt;size);
+ interrupt_buffer_pointer = FmInterrupt.controls-&gt;string;
+ if (ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp;FmInterrupt) &lt; 0) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ ret_val = -1;
+ goto error_free_ext_control_string;
+ }
+
+ if(!ret_val) {
+ interrupt = *interrupt_buffer_pointer;
+ interrupt_reason = *(interrupt_buffer_pointer + 1);
+ printf("Interrupt = %d, , Result = %d\n", interrupt, interrupt_reason);
+ if(interrupt_reason == 0) {
+ switch(interrupt)
+ {
+ case V4L2_CG2900_RADIO_INTERRUPT_BLOCK_SCAN_COMPLETED:
+ /* Block Scan Completed */
+ HandleBlockScanCompletion();
+ otherOperationInProgress = 0;
+ break;
+ }
+ }
+ }
+error_free_ext_control_string:
+ free(FmInterrupt.controls-&gt;string);
+error_free_ext_control_control:
+ free(FmInterrupt.controls);
+error:
+ otherOperationInProgress = 0;
+ } else {
+ printf ("FmInterruptMonitoringThread : poll returned = %d\n", ret);
+ }
+ }
+ }
+ return 0;
+ }
+
+ static void HandleBlockScanCompletion()
+ {
+ struct v4l2_ext_controls blockscanResult;
+ long * block_scan_pointer = NULL;
+ int index, ret;
+ int err;
+ int current_grid = -1;
+ FILE *fp;
+ long start_freq = StartFreq;
+ long next_freq_offset = 0;
+
+ fp = fopen("/sys/module/radio_cg2900/parameters/grid", "r");
+ if(fp != NULL)
+ {
+ /* Retrieve the currently set grid to determine the next channel is 50 Khz, 100 Khz or 200 Khz apart */
+ fscanf(fp, "%d", &amp;current_grid);
+ fclose(fp);
+ }
+
+ if(current_grid == 0) {
+ next_freq_offset = 50000;
+ } else if (current_grid == 1) {
+ next_freq_offset = 100000;
+ } else if (current_grid == 2) {
+ next_freq_offset = 200000;
+ }
+
+ /* Get the Number Of Channels */
+ blockscanResult.count = 0;
+ blockscanResult.ctrl_class = V4L2_CTRL_CLASS_USER;
+ blockscanResult.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ if(!blockscanResult.controls)
+ goto done;
+ blockscanResult.controls-&gt;id = V4L2_CID_CG2900_RADIO_BLOCKSCAN_GET_RESULTS;
+ blockscanResult.controls-&gt;size = 0;
+
+ blockscanResult.controls-&gt;string = NULL;
+ err = ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp;blockscanResult);
+
+ if (err &lt; 0 &amp;&amp; errno != ENOSPC) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ goto error_free_ext_control_control;
+ }
+
+ if(blockscanResult.controls-&gt;size &gt; 0)
+ {
+ blockscanResult.controls-&gt;string = (long *)malloc(sizeof(long) * blockscanResult.controls-&gt;size );
+ block_scan_pointer = blockscanResult.controls-&gt;string;
+ if (ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp;blockscanResult) &lt; 0) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ goto error_free_ext_control_string;
+ }
+ printf("\n================================\n");
+ printf("\nMHz. RSSI\n");
+ printf("\n================================\n");
+ for (index = 0; index &lt; blockscanResult.controls-&gt;size; index ++) {
+ printf("%d.%d %d\n", MEGAHRTZ(start_freq), *(block_scan_pointer + index));
+ start_freq += next_freq_offset;
+ }
+ printf("\n================================\n");
+ error_free_ext_control_string:
+ free(block_scan_pointer);
+ }
+ else if(blockscanResult.controls-&gt;size == 0)
+ {
+ printf("\nNo channels found during Block Scan!!\n");
+ }
+ error_free_ext_control_control:
+ free(blockscanResult.controls);
+ done:
+ otherOperationInProgress = 0;
+ }
+
+
+ </programlisting>
+ Note: Currently the retrieved signal strength is in decimals and not in "dBuV", proper external conversion required.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="cancel-scan-seek">
+ <title>Cancel Scan</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Cancel Scan/Seek</term>
+ <listitem>
+ <para>
+ This is used for stopping an active Band Scan, Seek operation and Block Scan. IOCTL VIDIOC_S_CTRL should be used with parameter id of the v4l2_control structure. For Eg incase of Band scan the parameter id should be set to V4L2_CID_CG2900_RADIO_BANDSCAN and the value of v4l2_control structure should be set as V4L2_CG2900_RADIO_BANDSCAN_STOP. The example thread shown in following code snippet FmInterruptMonitoringThread shall have already been started as mentioned in Band Scan and Block sections, it shall receive an asynchronous event V4L2_CG2900_RADIO_INTERRUPT_SCAN_CANCELLED.
+ <programlisting>
+ struct v4l2_control sctrl;
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_BANDSCAN;
+ sctrl.value = V4L2_CG2900_RADIO_BANDSCAN_STOP;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp;sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+
+ static void *FmInterruptMonitoringThread(void *param)
+ {
+ struct v4l2_ext_controls FmInterrupt;
+ struct pollfd pollFd;
+ long * p = NULL;
+ int index, ret, count = 0;
+ int interrupt, interrupt_reason;
+ int err;
+
+ while(closeApp) {
+ pollFd.fd = fd;
+ pollFd.events = POLLRDNORM;
+ /* wait infinitely for interrupt */
+ timeout = -1;
+ ret = poll(&amp;pollFd, 1, timeout);
+ if(!closeApp)
+ break;
+ if(ret) {
+ if(pollFd.revents &amp; POLLRDNORM)
+ {
+ /* Get the interrupt */
+ FmInterrupt.count = 0;
+ FmInterrupt.ctrl_class = V4L2_CTRL_CLASS_USER;
+ FmInterrupt.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ if(!FmInterrupt.controls)
+ goto error;
+ FmInterrupt.controls-&gt;id = V4L2_CID_CG2900_RADIO_GET_INTERRUPT;
+ FmInterrupt.controls-&gt;size = 2;
+ FmInterrupt.controls-&gt;string = (int *)malloc(sizeof(int) * FmInterrupt.controls-&gt;size);
+ interrupt_buffer_pointer = FmInterrupt.controls-&gt;string;
+ if (ioctl(fd, VIDIOC_G_EXT_CTRLS, &amp;FmInterrupt) &lt; 0) {
+ printf("VIDIOC_G_EXT_CTRLS:error!!\n");
+ ret_val = -1;
+ goto error_free_ext_control_string;
+ }
+
+ if(!ret_val) {
+ interrupt = *interrupt_buffer_pointer;
+ interrupt_reason = *(interrupt_buffer_pointer + 1);
+ printf("Interrupt = %d, , Result = %d\n", interrupt, interrupt_reason);
+ if(interrupt_reason == 0) {
+ switch(interrupt)
+ {
+ case V4L2_CG2900_RADIO_INTERRUPT_SCAN_CANCELLED:
+ /* Scan/Search/Block Scan Cancelled */
+ printf(" Scan cancelled by user\n");
+ otherOperationInProgress = 0;
+ break;
+ }
+ }
+ }
+error_free_ext_control_string:
+ free(FmInterrupt.controls-&gt;string);
+error_free_ext_control_control:
+ free(FmInterrupt.controls);
+error:
+ otherOperationInProgress = 0;
+ } else {
+ printf ("FmInterruptMonitoringThread : poll returned = %d\n", ret);
+ }
+ }
+ }
+ return 0;
+ }
+
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="rds-receive">
+ <title>RDS Receive</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>RDS Receive</term>
+ <listitem>
+ <para>
+ For enabling/disabling RDS for FM Rx, IOCTL VIDIOC_S_TUNER should be used with parameter rxsubchans of the v4l2_tuner structure set to V4L2_TUNER_SUB_RDS if rds needs to be enabled and the same value must not be set in case rds is to be disabled. Once RDS data is available, the appplication would be signalled waiting on poll. If the Interrupt retrieved using V4L2_CID_CG2900_RADIO_GET_INTERRUPT is V4L2_CG2900_RADIO_INTERRUPT_RDS_RECEIVED, RDS data can be retrieved using the read() functionality from the CG2900 FM driver. The RDS data received from FM Driver should be parsed in user space to retrive RDS information i.e Radio Text, Program Service Name, Program Identification, Program Type, Alternate Frequency, etc.
+ <programlisting>
+ void rds_rx_set(bool enable_rds)
+ {
+ struct v4l2_tuner tuner;
+ int ret;
+ memset(&amp;tuner, 0, sizeof(tuner));
+ tuner.index = 0;
+ if(enable_rds)
+ tuner.rxsubchans |= V4L2_TUNER_SUB_RDS;
+ else
+ tuner.rxsubchans &amp; = ~V4L2_TUNER_SUB_RDS;
+ ret = ioctl(fd, VIDIOC_S_TUNER, &amp;tuner);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_TUNER:error!!\n");
+ }
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="af-update_switch">
+ <title>AF Update and Switching</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>AF Update &amp; Switching</term>
+ <listitem>
+ <para>Alternate Frequency (AF) Handling needs to be done in user space.</para>
+ <para>The application should use the AF RDS group data to compose a list of AFs when tuned to a new channel.</para>
+ <para>When the reception of the currently tuned frequency falls below a set threshold, it can decide to switch to one of the alternative frequencies for this channel.
+ </para>
+ <para>The application can perform an AF Update, which returns the RSSI value for all or some of the channel's AFs. Thus allowing the hardware to switch to the AF with the highest RSSI. The AF Update could be designed to stop as soon as it finds an AF with an acceptable RSSI level. In the event that all the AF RSSI values are lower than the base channel, the AF Switch would not be necessary.
+ </para>
+ <para>To know the RSSI of the alternative frequencies (V4L2 compliance), the application can use the IOCTL VIDIOC_S_CTRL with parameter id set to V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_START, and the parameter value be set as the frequency in Hz for a channel from the AF List. If this call returns successfully, the RSSI of the frequency can then be retrieved. Using IOCTL VIDIOC_G_CTRL, with the parameter id set to V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_GET_RESULT, and the output parameter value will contain the RSSI of the AF frequency.
+ </para>
+ <para>If it is still deemed necessary to switch channels, the next step is then to switch to an alternative frequency in the AF list. This can be done using the IOCTL VIDIOC_S_EXT_CTRLS, with:
+ </para>
+ <itemizedlist>
+ <listitem><para>Parameter id set to V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START</para></listitem>
+ <listitem><para>Parameter size set to 2</para></listitem>
+ <listitem><para>Parameters filled as below (string field of the parameter) </para></listitem>
+ <listitem><para>Control class parameter set to V4L2_CTRL_CLASS_USER</para></listitem>
+ <listitem><para>The AF switch frequency in Hz</para></listitem>
+ <listitem><para>Expected PI code </para></listitem>
+ </itemizedlist>
+ <para>The application can check if the AF switch succeeded or not using the IOCTL VIDIOC_G_CTRL, with parameter id set to V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT, and the output parameter value will contain the AF switch conclusion.
+ </para>
+ <para> The example code below illustrates both the aforementioned functionalities.</para>
+ <para>
+ <programlisting>
+ void PerformAFUpdate(long AF_Frequency, int *AF_Rssi)
+ {
+ struct v4l2_control sctrl, gctrl;
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_START;
+ sctrl.value = AF_Frequency;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp; sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+ gctrl.id = V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_GET_RESULT;
+ ret = ioctl(fd, VIDIOC_G_CTRL, &amp; gctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_G_CTRL:error!!\n");
+ }
+ *AF_Rssi = gctrl.value;
+ }
+ void PerformAFSwitch(long AF_BestFrequency, int AF_ExpectedPI, int *AF_SwitchConclusion)
+ {
+ struct v4l2_control gctrl;
+ struct v4l2_ext_controls ext_ctrl;
+ int ret;
+ int conclusion;
+ long freq;
+ long *p = NULL;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_USER;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls->id = V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START;
+ ext_ctrl.controls->size = 2;
+ ext_ctrl.controls->string = (long *)malloc(sizeof(long) * ext_ctrl.controls->size);
+ p = ext_ctrl.controls->string;
+ *p = (AF_BestFrequency * 2)/ 125;
+ *(p+1) = (long)AF_ExpectedPI;
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRLS:error!!\n");
+ }
+ free(ext_ctrl.controls->string);
+ free(ext_ctrl.controls);
+ gctrl.id = V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT;
+ ret = ioctl(fd, VIDIOC_G_CTRL, &amp; gctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_G_CTRL:error!!\n");
+ }
+ *AF_SwitchConclusion = gctrl.value;
+ }
+ </programlisting>
+ Note: For V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT returned values are:
+ <itemizedlist>
+ <listitem><para> -1 AF Switch failed, the AF-RSSI was too low.</para></listitem>
+ <listitem><para> -2 AF Switch failed, the AF-PI Doesn't correlate.</para></listitem>
+ <listitem><para> -3 AF Switch failed, the AF-RDS SYNC Lost.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="rds-transmit">
+ <title>RDS Transmit</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>RDS Transmit</term>
+ <listitem>
+ <para>
+ For enabling/disabling RDS for FM Tx, IOCTL VIDIOC_S_MODULATOR should be used with parameter txsubchans of the v4l2_modulator structure set to V4L2_TUNER_SUB_RDS if rds needs to be enabled and the same value must not be set in case rds is to be disabled. For Trasmitting RDS Data like PI, PTY, PSN, RT, VIDIOC_S_EXT_CTRLS IOCTL should be used with the id set to V4L2_CID_RDS_TX_PI, V4L2_CID_RDS_TX_PTY, V4L2_CID_RDS_TX_PS_NAME and V4L2_CID_RDS_TX_RADIO_TEXT respectively. Below example shows how to transmit various RDS functionalities.
+ <programlisting>
+ void rds_tx_set(bool enable_rds)
+ {
+ struct v4l2_modulator modulator;
+ int ret;
+ memset(&amp;modulator, 0, sizeof(modulator));
+ modulator.index = 0;
+ if(enable_rds)
+ modulator.txsubchans |= V4L2_TUNER_SUB_RDS;
+ else
+ modulator.txsubchans &amp; = ~V4L2_TUNER_SUB_RDS;
+ ret = ioctl(fd, VIDIOC_S_MODULATOR, &amp; modulator);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_MODULATOR:error!!\n");
+ }
+ }
+ void rds_tx_PI(void *value)
+ {
+ struct v4l2_ext_controls ext_ctrl;
+ int ret;
+ unsigned short *pi_code = (unsigned short *)value;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_FM_TX;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls->id = V4L2_CID_RDS_TX_PI;
+ ext_ctrl.controls->size = 0;
+ ext_ctrl.controls->string = NULL;
+ ext_ctrl.controls->value = *pi_code;
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRLS:error!!\n");
+ }
+ free(ext_ctrl.controls);
+ }
+ void rds_tx_PTY(void *value)
+ {
+ struct v4l2_ext_controls ext_ctrl;
+ int ret;
+ unsigned short *pty_code = (unsigned short *)value;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_FM_TX;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls->id = V4L2_CID_RDS_TX_PTY;
+ ext_ctrl.controls->size = 0;
+ ext_ctrl.controls->string = NULL;
+ ext_ctrl.controls->value = *pty_code;
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRLS:error!!\n");
+ }
+ free(ext_ctrl.controls);
+ }
+ void rds_tx_PSN(void *value)
+ {
+ struct v4l2_ext_controls ext_ctrl;
+ int ret;
+ char *psn = (char *)value;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_FM_TX;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls->id = V4L2_CID_RDS_TX_PS_NAME;
+ ext_ctrl.controls->size = strlen(psn);
+ ext_ctrl.controls->value = 0;
+ ext_ctrl.controls->string = (char *)malloc(ext_ctrl.controls->size);
+ memcpy(ext_ctrl.controls->string, psn, ext_ctrl.controls->size);
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRLS:error!!\n");
+ }
+ free(ext_ctrl.controls->string);
+ free(ext_ctrl.controls);
+ }
+ void rds_tx_RT(void *value)
+ {
+ struct v4l2_ext_controls ext_ctrl;
+ int ret;
+ char *radio_text = (char *)value;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_FM_TX;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls->id = V4L2_CID_RDS_TX_RADIO_TEXT;
+ ext_ctrl.controls->size = strlen(radio_text);
+ ext_ctrl.controls->value = 0;
+ ext_ctrl.controls->string = (char *)malloc(ext_ctrl.controls->size);
+ memcpy(ext_ctrl.controls->string, radio_text, ext_ctrl.controls->size);
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRLS:error!!\n");
+ }
+ free(ext_ctrl.controls->string);
+ free(ext_ctrl.controls);
+ }
+ </programlisting>
+ Note: RDS default parameters
+ <itemizedlist>
+ <listitem><para>Programme Identification code[PI]: Default value -> 0x1234</para></listitem>
+ <listitem><para>Programme Type[PTY]: Default value -> OTHER_MUSIC</para></listitem>
+ <listitem><para>Music/Speech switch[M/S]: Default value -> Music</para></listitem>
+ <listitem><para>Programme Service name[PS]: Default value -> FM Xmit</para></listitem>
+ <listitem><para>Radio text[RT]: Default value -> Default Radio Text</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="internal-test-tone-generator">
+ <title>Test Tone Generation</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>1. Enabling-Disabling test tone</term>
+ <listitem>
+ <para>
+ 1. For setting the test tone status, application should use IOCTL VIDIOC_S_CTRL with its parameter id set to V4L2_CID_CG2900_RADIO_TEST_TONE_GENERATOR_SET_STATUS and parameter value set to any of the following :
+ <itemizedlist>
+ <listitem><para>Turn off test tone - use V4L2_CG2900_RADIO_TEST_TONE_GEN_OFF</para></listitem>
+ <listitem><para>Turn on test tone - use V4L2_CG2900_RADIO_TEST_TONE_GEN_ON_WO_SRC</para></listitem>
+ <listitem><para>Turn on with sample rate correction - use V4L2_CG2900_RADIO_TEST_TONE_GEN_ON_W_SRC</para></listitem>
+ </itemizedlist>
+ <programlisting>
+ void SetTestToneStatus(int state)
+ {
+ struct v4l2_control sctrl;
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_TEST_TONE_GENERATOR_SET_STATUS;
+ sctrl.value = state;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp; sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>2. Test Tone Connect</term>
+ <listitem>
+ <para>
+ 2. The internal test tone can be used following modes: FMR (receiver mode) and FMT (transmitter mode). Each of the waves, or the sum of them, can be used as audio source for the receiver audio outputs, or for the transmitter audio input. This can be done by means of command "Test Tone Connect". For the receiver, all available audio outputs will be connected to the tone generator. After switching FM to another mode, the command "Test Tone Connect" must be executed again to set up a connection in the new mode.
+ </para>
+
+ <para>
+ The IOCTL VIDIOC_S_EXT_CTRLS is used to perform Test Tone Connect operation, with following parameters:
+ </para>
+ <itemizedlist>
+ <listitem><para>Parameter id set to V4L2_CID_CG2900_RADIO_TEST_TONE_CONNECT</para></listitem>
+ <listitem><para>Parameter size set to 2</para></listitem>
+ <listitem><para>Control class parameter set to V4L2_CTRL_CLASS_USER</para></listitem>
+ <listitem><para>Parameters value filled as below in code snippet(string field of the parameter) </para></listitem>
+ <listitem><para>First byte of Parameter value shall contain connect parameter for left audio output </para></listitem>
+ <listitem><para>Second byte of Parameter value shall contain connect parameter forright audio output </para></listitem>
+ </itemizedlist>
+
+ <para>
+ Value of either of Parameter values (for left or right audio outputs)can assume values:
+ </para>
+ <itemizedlist>
+ <listitem><para>V4L2_CG2900_RADIO_TEST_TONE_NORMAL_AUDIO - Normal Audio</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_TEST_TONE_ZERO - Zero</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_TEST_TONE_TONE_1 - Tone_1</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_TEST_TONE_TONE_2 - Tone_2</para></listitem>
+ <listitem><para>V4L2_CG2900_RADIO_TEST_TONE_TONE_SUM - Tone_Sum</para></listitem>
+ </itemizedlist>
+
+ <programlisting>
+ void TestToneConnect(u8 left_audio_mode, u8 right_audio_mode)
+ {
+ u8 *test_tone_connect_ptr = NULL;
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_USER;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls-&gt;id = V4L2_CID_CG2900_RADIO_TEST_TONE_CONNECT;
+ ext_ctrl.controls-&gt;size = 2;
+ ext_ctrl.controls-&gt;string = (u8 *)malloc(sizeof(u8) * ext_ctrl.controls-&gt;size);
+ test_tone_connect_ptr = ext_ctrl.controls-&gt;string;
+ *(test_tone_connect_ptr) = left_audio_mode;
+ *(test_tone_connect_ptr + 1) = right_audio_mode;
+
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRL:error!!\n");
+ }
+ }
+ </programlisting>
+
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>3. Test Tone Set Parameters</term>
+ <listitem>
+ <para>
+ 3. The tone generator is capable of generating two different sine waves with adjustable offset, amplitude, phase offset and frequency. These properties can be changed with command "Test Tone Set Params"
+ </para>
+
+ <para>
+ The IOCTL VIDIOC_S_EXT_CTRLS is used to perform Test Tone Set Parameters operation, with following parameters:
+ </para>
+ <itemizedlist>
+ <listitem><para>Parameter id set to V4L2_CID_CG2900_RADIO_TEST_TONE_SET_PARAMS</para></listitem>
+ <listitem><para>Parameter size set to 6</para></listitem>
+ <listitem><para>Control class parameter set to V4L2_CTRL_CLASS_USER</para></listitem>
+ <listitem><para>Parameters value filled as below in code snippet(string field of the parameter) </para></listitem>
+ <listitem><para>First word of Parameter value shall contain tone_gen (0: tone_1, 1:tone_2)</para></listitem>
+ <listitem><para>Second word of Parameter value shall contain frequency ([0x0000..0x7FFF], (default = 0x064D))</para></listitem>
+ <listitem><para>Third word of Parameter value shall contain volume ([0x0000..0x7FFF], (default = 0x0CCD))</para></listitem>
+ <listitem><para>Fourth word of Parameter value shall contain phase offset([0x8000..0x7FFF], (default = 0x0000))</para></listitem>
+ <listitem><para>Fifth word of Parameter value shall contain DC to add to tone([0x8000..0x7FFF], (default = 0x0000))</para></listitem>
+ <listitem><para>Sixth word of Parameter value shall contain waveform type(0=sine shaped, 1=Pulse shaped)</para></listitem>
+ </itemizedlist>
+
+ <programlisting>
+ void Sample_TestToneSetParams()
+ {
+ u8 *test_tone_connect_ptr = NULL;
+ u8 tone_gen =0, waveform = 1; /* Tone_Gen = Tone_1, waveform type = pulse shaped */
+ u16 frequency = 0x064D, volume = 0x0CCD, phase_offset = 0x0000, dc=0x0000;
+
+ ext_ctrl.ctrl_class = V4L2_CTRL_CLASS_USER;
+ ext_ctrl.controls = (struct v4l2_ext_control *) malloc(sizeof(struct v4l2_ext_control));
+ ext_ctrl.count = 0;
+ ext_ctrl.controls-&gt;id = V4L2_CID_CG2900_RADIO_TEST_TONE_CONNECT;
+ ext_ctrl.controls-&gt;size = 6;
+ ext_ctrl.controls-&gt;string = (u16 *)malloc(sizeof(u16) * ext_ctrl.controls-&gt;size);
+ test_tone_connect_ptr = ext_ctrl.controls-&gt;string;
+ *(test_tone_connect_ptr) = tone_gen;
+ *(test_tone_connect_ptr + 1) = frequency;
+ *(test_tone_connect_ptr + 2) = volume;
+ *(test_tone_connect_ptr + 3) = phase_offset;
+ *(test_tone_connect_ptr + 4) = dc;
+ *(test_tone_connect_ptr + 5) = waveform;
+
+ ret = ioctl(fd, VIDIOC_S_EXT_CTRLS, &amp; ext_ctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_EXT_CTRL:error!!\n");
+ }
+ }
+ </programlisting>
+
+ </listitem>
+
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="de-emphasis-filter">
+ <title>Test Tone Generation</title>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>Set De-Emphasis Filter</term>
+ <listitem>
+ <para>
+ To apply de-emphasis filter to the FM received signal to compansate for
+ preemphasis that has been applied to the signal by the FM transmitter. IOCTL VIDIOC_S_CTRL is used in with parameter id set to V4L2_CID_CG2900_RADIO_TUNE_DEEMPHASIS and parameter value can take following values:
+ <itemizedlist>
+ <listitem><para>Disable de-emphasis - use V4L2_CG2900_RADIO_DEEMPHASIS_DISABLED</para></listitem>
+ <listitem><para>De-emphasis with 50 micro seconds - use V4L2_CG2900_RADIO_DEEMPHASIS_50_uS</para></listitem>
+ <listitem><para>De-emphasis filter with 75 micro seconds - use V4L2_CG2900_RADIO_DEEMPHASIS_75_uS</para></listitem>
+ </itemizedlist>
+
+ <programlisting>
+ void SetDeemphasisLevel(int deemphasis_level)
+ {
+ struct v4l2_control sctrl;
+ int ret;
+ sctrl.id = V4L2_CID_CG2900_RADIO_TUNE_DEEMPHASIS ;
+ sctrl.value = deemphasis_level;
+ ret = ioctl(fd, VIDIOC_S_CTRL, &amp; sctrl);
+ if (ret &lt; 0) {
+ printf("VIDIOC_S_CTRL:error!!\n");
+ }
+ }
+ </programlisting>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+
+ </chapter>
+ <chapter id="driver-configuration">
+ <title>Driver Configuration and Interaction</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ For debug purposes the variable cg2900_fm_debug_level in the file cg2900_fm_driver.c can be changed to set how much debug printouts
+ that shall be generated.
+ <itemizedlist>
+ <listitem><para>1 = Error logs</para></listitem>
+ <listitem><para>2 = Info logs, e.g. function entries</para></listitem>
+ <listitem><para>3 = Debug logs</para></listitem>
+ <listitem><para>4 = HCI logs, i.e. contents of the transferred data</para></listitem>
+ </itemizedlist>
+ </para>
+ <section id="driver-implemented-operations">
+ <title>Implemented operations in driver</title>
+ <para>
+ </para>
+ <para>
+ <table>
+ <title> Supported device driver operations when using character device </title>
+ <tgroup cols="2"><tbody>
+ <row><entry> open </entry> <entry> Opening a character device will Initialize the FM Chip and download the firmware files.</entry> </row>
+ <row><entry> close </entry> <entry> Closes a character device will deinitialize the FM Chip.</entry> </row>
+ <row><entry> poll </entry> <entry> Polling a character device will check if there is requested data is available or not.</entry> </row>
+ <row><entry> read </entry> <entry> Reading from a character device reads RDS data from the Chip</entry> </row>
+ </tbody></tgroup>
+ </table>
+ </para>
+ </section>
+ <section id="driver-loading">
+ <title>Driver loading parameters</title>
+ <para>
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>radio_nr</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>0</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Readable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter radio_nr in radio-cg2900.c can be set to register a particular minor number with Video4Linux. Currently this parameter is set to 0 by default, signifying that the "\dev\radio0" is the character device assigned to CG2900 FM Driver in Video4Linux.
+ If the Platform has more than 1 radio drivers, the radio_nr parameter should be changed in file radio-cg2900.c.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Checking the Radio Number</term>
+ <listitem>
+ <para>
+ cat sys/module/radio_cg2900/parameters/radio_nr
+ </para>
+ <para>
+ The above command gets the radio number registered with
+ Video4Linux. This is used for opening the FM Radio
+ character device from user space.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>grid</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>1</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Readable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter grid in radio-cg2900.c defines the spacing to be used in Khz while switching on FM Radio.
+ <itemizedlist>
+ <listitem><para>0: 50 kHz (China)</para></listitem>
+ <listitem><para>1: 100 kHz (Europe, Japan)</para></listitem>
+ <listitem><para>2: 200 kHz (USA)</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Changing the Grid</term>
+ <listitem>
+ <para>
+ echo 1 &gt; /sys/module/radio_cg2900/parameters/grid.
+ </para>
+ <para>
+ The above command sets the radio band spacing between
+ two adjacent radio channels, in this case sets to 100KHz
+ suitable for Europe. The change is applicable before
+ switching on FM Radio, otherwise the change takes effect
+ from next FM switch on.
+ </para>
+ <para>
+ Note: The Grid parameter cannot be changed during FM radio is operational.
+ </para>
+ <para>
+ The user must change the grid value and restart the FM radio when moving into a different radio region.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Checking the current Grid Value</term>
+ <listitem>
+ <para>
+ cat sys/module/radio_cg2900/parameters/grid.
+ </para>
+ <para>
+ The above command gets the radio band spacing
+ between two adjacent radio channels currently set.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>band</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>0</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Readable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter band in radio-cg2900.c defines the band to be used while switching on FM Radio.
+ <itemizedlist>
+ <listitem><para>0: 87.5 - 108 MHz (USA, Europe)</para></listitem>
+ <listitem><para>1: 76 - 90 MHz (Japan)</para></listitem>
+ <listitem><para>2: 70 - 108 MHz (China wide band)</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Changing the Band</term>
+ <listitem>
+ <para>
+ echo 0 &gt; /sys/module/radio_cg2900/parameters/band.
+ </para>
+ <para>
+ The above command sets the FM band to be used.
+ In this case, it sets the FM band 87.5 - 100 MHz.
+ The change is applicable before switching on FM Radio,
+ otherwise the change takes effect from next FM switch on.
+ </para>
+ <para>
+ Note: The band parameter cannot be changed during FM radio is operational.
+ </para>
+ <para>
+ The user must change the band value and restart the FM radio when moving into a different radio region.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Checking the current Band Value</term>
+ <listitem>
+ <para>
+ cat sys/module/radio_cg2900/parameters/band.
+ </para>
+ <para>
+ The above command gets the current radio band set.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>cg2900_fm_debug_level</term>
+ <listitem>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>Parameter type</term>
+ <listitem><synopsis><type>int</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Default value</term>
+ <listitem><para>1</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Runtime readable/modifiable</term>
+ <listitem><para>Readable</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The parameter CG2900_fm_debug_level in platformosapi.c defines the debug level that is currently used.
+ The higher the debug level the more print-outs are received in the terminal window.
+ The following values are supported:
+ <itemizedlist>
+ <listitem><para>1 = Error logs</para></listitem>
+ <listitem><para>2 = Info logs, e.g. function entries</para></listitem>
+ <listitem><para>3 = Debug logs</para></listitem>
+ <listitem><para>4 = HCI logs, i.e. contents of the transferred data</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Changing the Log Level</term>
+ <listitem>
+ <para>
+ echo 3 &gt; /sys/module/radio_cg2900/parameters/cg2900_fm_debug_level.
+ </para>
+ <para>
+ The above command sets the Logging level of FM Driver.
+ In this case, it set will print all the debug messages
+ except the HCI commands exchanged with FM Chip.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Checking the current Log Level</term>
+ <listitem>
+ <para>
+ cat sys/module/radio_cg2900/parameters/cg2900_fm_debug_level.
+ </para>
+ <para>
+ The above command gets the current debug log level set.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ </para>
+ </section>
+ <section id="driver-ioctl">
+ <title>Driver IO Control</title>
+ <para>
+ Describes the FM driver IO control parameters
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term><constant>VIDIOC_QUERYCAP</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Get</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_capability</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_QUERYCAP</constant> IOCTL is used to query the capabilities supported by FM Driver. IF the FM Driver supports FM Rx it should set the capabilities field bit should be bitwise OR'd with V4L2_CAP_TUNER, otherwise if it supports FM Tx, the capabilities field bit should be bitwise OR'd with V4L2_CAP_MODULATOR.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to retrive the Capabilities successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_G_TUNER</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Get</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_tuner</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_G_TUNER</constant> IOCTL gets the FM Radio Tuner properties supported by FM Radio. It is also used to retrieve RDS status, mono/stereo status and Signal strength of the tuned channel. These values are valid when FM is configured using IOCTL VIDIOC_S_TUNER, i.e in FM Rx mode.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to retrive the tuner properties successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ Note: Currently the retrieved signal strength is in decimals and not in "dBuV", proper external conversion required.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_S_TUNER</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_tuner</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_S_TUNER</constant> IOCTL configures the FM radio in Rx mode. Only 1 FM Tuner is supported by FM Driver.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to set the tuner properties successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_G_MODULATOR</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Get</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_tuner</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_G_MODULATOR</constant> IOCTL gets the FM Radio Modulator properties supported by FM Radio. It is also used to retrieve RDS status and mono/stereo status. These values are valid when FM is configured using IOCTL VIDIOC_S_MODULATOR, i.e in FM Tx mode.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to retrive the tuner properties successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_S_MODULATOR</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_modulator</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_S_MODULATOR</constant> IOCTL configures the FM radio in Tx mode. Only 1 FM Modulator is supported by FM Driver.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to set the modulator properties successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_S_FREQUENCY</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_frequency</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_S_FREQUENCY</constant> IOCTL sets the frequency on FM radio in Rx or Tx mode. The frequency parameter passed is in V4L2 format.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to set the frequency successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_G_FREQUENCY</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_modulator</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_G_FREQUENCY</constant> IOCTL retrives the currently set frequency on FM Radio in Rx or Tx mode.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to get the frequency successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_S_HW_FREQ_SEEK</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_hw_freq_seek</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_S_HW_FREQ_SEEK</constant> IOCTL starts the seek operation when FM Radio is configured in Rx mode. The direction parameter indicates the direction of seeking from the current station. At present the FM Driver ignores the wrap_Around parameter and unconditional wrap around is supported. If the operation is started successfully, the application should use poll() to identify when the seek is over.
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to start the seek successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_G_CTRL</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Get</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_control</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_G_CTRL</constant> IOCTL to retrive value of a paticular control. The following controls are supported by FM Driver:
+ <itemizedlist>
+ <listitem><para>
+ V4L2_CID_AUDIO_VOLUME
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_AUDIO_MUTE
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_AUDIO_BALANCE
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_RSSI_THRESHOLD
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_SELECT_ANTENNA
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_GET_RESULT
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT
+ </para></listitem>
+ </itemizedlist>
+ Generic returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to retrive the value of the control successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ Note: For V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT returned values are:
+ <itemizedlist>
+ <listitem><para> -1 AF Switch failed, the AF-RSSI was too low.</para></listitem>
+ <listitem><para> -2 AF Switch failed, the AF-PI Doesn't correlate.</para></listitem>
+ <listitem><para> -3 AF Switch failed, the AF-RDS SYNC Lost.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_S_CTRL</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_control</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_S_CTRL</constant> IOCTL to set value of a paticular control. The following controls are supported by FM Driver:
+ <itemizedlist>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_CHIP_STATE
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_BANDSCAN
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_BLOCKSCAN_START
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_SELECT_ANTENNA
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_RSSI_THRESHOLD
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_START
+ </para></listitem>
+ </itemizedlist>
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to set the value of the control successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_G_EXT_CTRLS</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Get</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_ext_controls</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_G_EXT_CTRLS</constant> IOCTL to retrive value of a paticular control. This is used when a control class is defined or when the value to be retrieved is more than 1 parameter(s). Only V4L2_CTRL_CLASS_FM_TX class is supported for this IOCTL in FM Driver. The following controls are supported by FM Driver:
+ <itemizedlist>
+ <listitem><para>
+ V4L2_CID_RDS_TX_DEVIATION
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_PILOT_TONE_ENABLED
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_PILOT_TONE_DEVIATION
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_TUNE_PREEMPHASIS
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_TUNE_POWER_LEVEL
+ </para></listitem>
+ </itemizedlist>
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to retrive the value(s) of the control successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>VIDIOC_S_EXT_CTRLS</constant></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>Direction</term>
+ <listitem><para>Set</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Parameter</term>
+ <listitem><synopsis><type>v4l2_ext_controls</type></synopsis></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>
+ The <constant>VIDIOC_S_CTRL</constant> IOCTL to set value of a paticular control when the parameters to be set are more than 1 parameter or when a control class is defined. At present only the V4L2_CTRL_CLASS_FM_TX and V4L2_CTRL_CLASS_USER control classes are supported by FM Driver. The following controls are supported by FM Driver:
+ <itemizedlist>
+ <listitem><para>
+ V4L2_CID_RDS_TX_DEVIATION
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_RDS_TX_PI
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_RDS_TX_PTY
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_RDS_TX_PS_NAME
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_RDS_TX_RADIO_TEXT
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_PILOT_TONE_ENABLED
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_PILOT_TONE_DEVIATION
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_TUNE_PREEMPHASIS
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_TUNE_POWER_LEVEL
+ </para></listitem>
+ <listitem><para>
+ V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START
+ </para></listitem>
+ </itemizedlist>
+ Returned values are:
+ <itemizedlist>
+ <listitem><para>If IOCTL is able to set the value of the control successfully without errors the IOCTL function will return 0.</para></listitem>
+ <listitem><para>A negative value will indicate error.</para></listitem>
+ </itemizedlist>
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ <section id="driver-sysfs">
+ <title>Driver Interaction with Sysfs</title>
+ <para>
+ Not Applicable
+ </para>
+ </section>
+ <section id="driver-proc">
+ <title>Driver Interaction using /proc filesystem</title>
+ <para>
+ Not Applicable
+ </para>
+ </section>
+ <section id="driver-other">
+ <title>Other means for Driver Interaction</title>
+ <para>
+ Not Applicable
+ </para>
+ </section>
+ <section id="driver-node">
+ <title>Driver Node File</title>
+ <variablelist>
+ <varlistentry>
+ <term>FM Radio Device</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/radio0</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>The radio device for FM Radio.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </section>
+ </chapter>
+ <chapter id="bugs">
+ <title>Known Bugs And Limitations</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>No known issues.</term>
+ <listitem>
+ <para>
+ <!-- Do NOT change the chapter id or title! -->
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+ <chapter id="internal-functions">
+ <title>Internal Functions Provided</title>
+ <para>
+ List of internal functions used in FM Driver.
+ </para>
+ <!-- Do NOT change the chapter id or title! -->
+ <section id="radio-cg2900.c">
+ <title>radio-cg2900.c</title>
+!Idrivers/media/radio/CG2900/radio-cg2900.c
+ </section>
+ <section id="cg2900_fm_api.h">
+ <title>cg2900_fm_api.h</title>
+!Idrivers/media/radio/CG2900/cg2900_fm_api.h
+ </section>
+ <section id="cg2900_fm_api.c">
+ <title>cg2900_fm_api.c</title>
+!Idrivers/media/radio/CG2900/cg2900_fm_api.c
+ </section>
+ <section id="cg2900_fm_driver.h">
+ <title>cg2900_fm_driver.h</title>
+!Idrivers/media/radio/CG2900/cg2900_fm_driver.h
+ </section>
+ <section id="cg2900_fm_driver.c">
+ <title>cg2900_fm_driver.c</title>
+!Idrivers/media/radio/CG2900/cg2900_fm_driver.c
+ </section>
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/db5500_keypad.tmpl b/Documentation/DocBook/db5500_keypad.tmpl
new file mode 100644
index 00000000000..a25fc990516
--- /dev/null
+++ b/Documentation/DocBook/db5500_keypad.tmpl
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="keypad-API-Guide">
+ <bookinfo>
+ <title>DB5500 Keypad</title>
+
+ <authorgroup>
+ <author>
+ <firstname>NaveenKumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ License terms: GNU General Public License (GPL) version 2.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the keypad
+ driver for internal keypad.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This db5500-keypad driver doesn't export any functions.
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the
+ structures which are used in the keypad driver.
+ </para>
+!Iarch/arm/mach-ux500/include/mach/db5500-keypad.h
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the
+ internal functions.
+ </para>
+!Idrivers/input/keyboard/db5500_keypad.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 9c27e5125dd..7514dbf0a67 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -446,4 +446,21 @@ X!Idrivers/video/console/fonts.c
!Edrivers/i2c/i2c-core.c
</chapter>
+ <chapter id="hsi">
+ <title>High Speed Synchronous Serial Interface (HSI)</title>
+
+ <para>
+ High Speed Synchronous Serial Interface (HSI) is a
+ serial interface mainly used for connecting application
+ engines (APE) with cellular modem engines (CMT) in cellular
+ handsets.
+
+ HSI provides multiplexing for up to 16 logical channels,
+ low-latency and full duplex communication.
+ </para>
+
+!Iinclude/linux/hsi/hsi.h
+!Edrivers/hsi/hsi.c
+ </chapter>
+
</book>
diff --git a/Documentation/DocBook/gpio.tmpl b/Documentation/DocBook/gpio.tmpl
new file mode 100644
index 00000000000..b69c2770210
--- /dev/null
+++ b/Documentation/DocBook/gpio.tmpl
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="GPIO">
+ <bookinfo>
+ <title>GPIO1B</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Alessandro</firstname>
+ <surname>Rubini</surname>
+ <affiliation>
+ <address>
+ <email>rubini@unipv.it</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Prafulla</firstname>
+ <surname>WADASKAR</surname>
+ <affiliation>
+ <address>
+ <email>prafulla.wadaskar@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2008-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the API's provided by the GPIO controller Driver.
+ </para>
+ <para>
+ Only the API specific to the Ux500 platform is listed here. For the generic GPIO
+ API, see <filename>Documentation/gpio.txt</filename> in the kernel source tree.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title> Public Interface </title>
+ <para>
+ This Section lists the API's provided by the GPIO controller driver to client drivers.
+ </para>
+ <para>
+ Only the API specific to the Ux500 platform is listed here. For the generic GPIO
+ API, see <filename>Documentation/gpio.txt</filename> in the kernel source tree.
+ </para>
+!Earch/arm/plat-nomadik/gpio.c
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/i2c.tmpl b/Documentation/DocBook/i2c.tmpl
new file mode 100644
index 00000000000..8a4cb49204e
--- /dev/null
+++ b/Documentation/DocBook/i2c.tmpl
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="I2C">
+ <bookinfo>
+ <title>I2C</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Srinidhi</firstname>
+ <surname>Kasagar</surname>
+ <affiliation>
+ <address>
+ <email>srinidhi.kasagar@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Sachin</firstname>
+ <surname>Verma</surname>
+ <affiliation>
+ <address>
+ <email>sachin.verma@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the API's provided by the I2C controller Driver.
+ Since this driver registers the transferfunction with kernel framework, there
+ are only private functions in this I2C bus driver. This driver currently
+ works only in master mode and does 7 bit adderssing only. There is no support
+ for 10 bit addressing. The driver currently supports standard mode (100KHz)
+ and Fast mode (400KHz) operation.
+ </para>
+ </chapter>
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable
+ </para>
+ </chapter>
+
+ <chapter id="private">
+ <title>Private Functions</title>
+ <para>
+ This Section lists the functions used internally by the I2C controller driver.
+ </para>
+!Idrivers/i2c/busses/i2c-nomadik.c
+ </chapter>
+
+</book>
diff --git a/Documentation/DocBook/i2s.tmpl b/Documentation/DocBook/i2s.tmpl
new file mode 100644
index 00000000000..6b6c50572e2
--- /dev/null
+++ b/Documentation/DocBook/i2s.tmpl
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="I2S">
+ <bookinfo>
+ <title>I2S</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Sandeep</firstname>
+ <surname>Kaushik</surname>
+ <affiliation>
+ <address>
+ <email>sandeep.kaushik@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2008-2009</year>
+ <holder>STMicroelectronics Pvt Ltd</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the APIs provided by the I2S Bus Driver. I2S bus supports different
+ protocols like I2S, PCM, SPI etc.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This Section lists the functions exported by the I2S bus driver. These functions cater to all the protocols
+ supported namely: I2S, PCM, SPI.
+ </para>
+!Edrivers/misc/i2s/i2s.c
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/lps001wp_prs.tmpl b/Documentation/DocBook/lps001wp_prs.tmpl
new file mode 100644
index 00000000000..4b3f69ab967
--- /dev/null
+++ b/Documentation/DocBook/lps001wp_prs.tmpl
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="LPS001WP-API-Guide">
+ <bookinfo>
+ <title>LPS001WP Pressure and temperature</title>
+
+ <authorgroup>
+ <author>
+ <firstname> Matteo Dameno,Carmine Iascone </firstname>
+ <surname></surname>
+ <affiliation>
+ <address>
+ <email>matteo.dameno@st.com,carmine.iascone@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2011</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ License terms: GNU General Public License (GPL) version 2.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the pressure and temperature sensor driver for LPS001WP chip.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the pressure/temperature sensor driver.
+ </para>
+!Iinclude/linux/input/lps001wp.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This pressure/temperature drivers don't export any functions.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Idrivers/input/misc/lps001wp_prs.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/lsm303dlh.tmpl b/Documentation/DocBook/lsm303dlh.tmpl
new file mode 100644
index 00000000000..1000481e205
--- /dev/null
+++ b/Documentation/DocBook/lsm303dlh.tmpl
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="LSM303DLH-API-Guide">
+ <bookinfo>
+ <title>LSM303DLH Accelerometer and Magnetometer</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Chethan Krishna</firstname>
+ <surname>N</surname>
+ <affiliation>
+ <address>
+ <email>chethan.krishna@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ License terms: GNU General Public License (GPL) version 2.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the accelerometer and magnetometer drivers for lsm303dlh sensor chip.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This accelerometer/magnetometer drivers don't export any functions.
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the accelerometer/magnetometer drivers.
+ </para>
+!Iinclude/linux/lsm303dlh.h
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Idrivers/hwmon/lsm303dlh_a.c
+!Idrivers/hwmon/lsm303dlhc_a.c
+!Idrivers/hwmon/lsm303dlh_m.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/msp.tmpl b/Documentation/DocBook/msp.tmpl
new file mode 100644
index 00000000000..55cec352a76
--- /dev/null
+++ b/Documentation/DocBook/msp.tmpl
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="MSP">
+ <bookinfo>
+ <title>MSP</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Sandeep</firstname>
+ <surname>Kaushik</surname>
+ <affiliation>
+ <address>
+ <email>sandeep.kaushik@st.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2008-2009</year>
+ <holder>STMicroelectronics Pvt Ltd</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the API's provided by the MSP controller Driver.
+ MSP controller supports different protocols like I2S, PCM, SPI etc.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="private">
+ <title>Private Functions</title>
+ <para>
+ This Section lists the functions used by the MSP controller driver.
+ These functions cater to all the protocols supported namely: I2S, PCM, SPI.
+ </para>
+!Idrivers/misc/i2s/msp_i2s.c
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/prcmu-fw-api.tmpl b/Documentation/DocBook/prcmu-fw-api.tmpl
new file mode 100644
index 00000000000..445a277933c
--- /dev/null
+++ b/Documentation/DocBook/prcmu-fw-api.tmpl
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STw4500">
+ <bookinfo>
+ <title>PRCMU Driver</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Sudeep Karkada</firstname>
+ <surname>Nagesha</surname>
+ <affiliation>
+ <address>
+ <email>sudeepkarkada.nagesha@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the PRCMU firmware interface driver.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="enum">
+ <title>Enumerations</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures
+ and enumerations which are used in the PRCMU firmware interface driver.
+ It is also required by the client drivers.
+ </para>
+!Iinclude/linux/mfd/dbx500-prcmu.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the kernel
+ API functions which are exported to the client drivers.
+ </para>
+!Edrivers/mfd/db8500-prcmu.c
+ </chapter>
+
+
+ </book>
diff --git a/Documentation/DocBook/shrm.tmpl b/Documentation/DocBook/shrm.tmpl
new file mode 100644
index 00000000000..b93bb065172
--- /dev/null
+++ b/Documentation/DocBook/shrm.tmpl
@@ -0,0 +1,139 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="SHRM">
+ <bookinfo>
+ <title>Shared Memory</title>
+ <authorgroup>
+ <author>
+ <firstname>Biju</firstname>
+ <surname>Das</surname>
+ <affiliation>
+ <address>
+ <email>biju.das@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Kumar</firstname>
+ <surname>Sanghvi</surname>
+ <affiliation>
+ <address>
+ <email>kumar.sanghvi@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Arun</firstname>
+ <surname>Murthy</surname>
+ <affiliation>
+ <address>
+ <email>arun.murthy@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009-2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+ <para>
+ Licence terms: GNU General Public Licence (GPL) version 2.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This Documentation describes the ST-Ericsson's adaptation on protocol used for CMT/APE communication when SHaRedMemory is used as IPC link.
+ </para>
+ </chapter>
+
+ <chapter id="design">
+ <title>Design</title>
+ <para>
+ The APE consists Cortex A9 dual core SMP, a multimedia DSP and PRCMU. Modem consists of 2 Cortex R4 ARM processor.
+ The exchange of messages between CMT(Cellular Mobile Terminal) and APE includes copying the data to a shared area DDR. This region is accessible by both CMT and APE. The design includes 2 channels common and audio. Common channel is used for exchanging ISI, RPC and SECURITY messages. Audio channel is used for exchanging AUDIO messages. Each channel consists of 2 FIFO. One FIFO for sending message from CMT to APE and other from APE to CMT. Each of these FIFO have write and read pointer shared between APE and CMT. Writer pointer is updated on copying the message to FIFO and reader will read the messages from the read pointer upto the writer pointer. Writer and reader notifications are used to notify the completion of read/write operation(seperate for APE and CMT). Driver includes 4 queues. Once the messages are sent from CMT to APE it resides in the FIFO and then copied to one of the 4 queues based on the message type(ISI, RPC, AUDIO, SECURITY) and then the net/char device interface fetches this message from the queue and copies to the user space buffer.
+ </para>
+ </chapter>
+
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <para>
+ The user space application sends ISI/RPC/AUDIO/SECURITY messages. ISI is sent through the phonet to shrm driver. For achieving this there are 2 interfaces to the shrm driver. Net interface used for exchanging the ISI message and char interface for RPC, AUDIO and SECURITY messages. On receiving any of these messages from the user space application, it is copied to a memory in kernel space. From here it is then copied to respective FIFO from where the CMT reads the message.
+ CMT(Cellular Mobile Terminal) writes messages to the respective FIFO and thereafter to respective queue. The net/char device copies this message from the queue to the user space buffer.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ Assumptions
+ 1. ApeShmFifo#0 is of 128kB in size. As this is used for transmission except CS audio call data. Expected message size is 1.5kB with a max of 16kB.
+ 2. ApeShmFifo#1 is of 4kB in size. This is used for transmission of CS audio call data. Expected message size is 24kb.
+ 3. CmtShmFifo#0 is of 128kB in size. As this is used for transmission except CS audio call data. Expected message size is 1.5kB with a max of 16kB.
+ 4. CmtShmFifo#1 is of 4kB in size. This is used for transmission of CS audio call data. Expected message size is 24kb.
+ The total size of the FIFO is 264 kB.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This Section lists the API's provided by the SHRM driver to phonet drivers.
+ </para>
+!Edrivers/modem/shrm/shrm_fifo.c
+ <para>
+ This Section lists the API's provided by the SHRM driver used in transmission of RPC, AUDIO and SECURITY messages.
+ </para>
+!Edrivers/char/shrm_char.c
+
+ </chapter>
+
+ <chapter id="private">
+ <title>Private Functions</title>
+ <para>
+ This Section lists the functions used internally by the SHRM driver to implement FIFO management. It physically reads/writes data to/from memory.
+ </para>
+!Idrivers/modem/shrm/shrm_fifo.c
+ <para>
+ This Section lists the functions used internally by the SHRM driver to implement the SHM protocol and handle all interrupt callback.
+ </para>
+!Idrivers/modem/shrm/shrm_protocol.c
+ <para>
+ This Section lists the functions used internally by the SHRM driver to implement Modem-Host communication L1 interface specifications.
+ </para>
+!Idrivers/modem/shrm/modem_shrm_driver.c
+ </chapter>
+
+ <chapter id="Other">
+ <title>Other Data Structures</title>
+ <para>
+ This Section lists some of the Data structure used by the SHRM driver.
+ </para>
+!Iinclude/linux/modem/shrm/shrm_driver.h
+!Iinclude/linux/modem/shrm/shrm_private.h
+ </chapter>
+</book>
diff --git a/Documentation/DocBook/ske_keypad.tmpl b/Documentation/DocBook/ske_keypad.tmpl
new file mode 100644
index 00000000000..030d5201990
--- /dev/null
+++ b/Documentation/DocBook/ske_keypad.tmpl
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="keypad-API-Guide">
+ <bookinfo>
+ <title>SKE Keypad</title>
+
+ <authorgroup>
+ <author>
+ <firstname>NaveenKumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ License terms: GNU General Public License (GPL) version 2.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the keypad driver for internal keypad.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the keypad driver.
+ </para>
+!Iarch/arm/plat-nomadik/include/plat/ske.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This ske keypad driver doesn't export any functions.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Idrivers/input/keyboard/nomadik-ske-keypad.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/ste_ff_vibra.tmpl b/Documentation/DocBook/ste_ff_vibra.tmpl
new file mode 100644
index 00000000000..cf5f159bed0
--- /dev/null
+++ b/Documentation/DocBook/ste_ff_vibra.tmpl
@@ -0,0 +1,217 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STE-Force-Feedback-Vibrator-API-Guide">
+ <bookinfo>
+ <title>Force Feedback Vibrator Driver</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Marcin</firstname>
+ <surname>Mielczarczyk</surname>
+ <affiliation>
+ <address>
+ <email>marcin.mielczarczyk@tieto.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+ <toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the implementation of ST-Ericsson's
+ Force Feedback Vibrator driver for the ST-Ericsson Linux platforms.
+ </para>
+ </chapter>
+
+ <chapter id="gettingstarted">
+ <title>Getting Started</title>
+ <para>
+ There are no special compilation flags needed to build the
+ Force Feedback Vibrator driver.
+ </para>
+
+ <section id="basic-tutorial">
+ <title>Basic Tutorial</title>
+ <para>
+ To enable the Force Feedback Vibrator driver using Kconfig, go to
+ <constant> Device Drivers -&gt; Input Device Support -&gt; Miscellaneous devices </constant>
+ and enable the following:
+ <itemizedlist>
+ <listitem><para>ST-Ericsson Force Feedback Vibrator</para></listitem>
+ </itemizedlist>
+ </para>
+ </section>
+
+ </chapter>
+
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <para>
+ Vibrator driver registers as memless force feedback input device.
+ </para>
+ </chapter>
+
+ <chapter id="driver-configuration">
+ <title>Driver Configuration and Interaction</title>
+ <para>
+ There are no configuration parameters for Force Feedback Vibrator driver.
+ </para>
+ <section id="driver-implemented-operations">
+ <title>Implemented operations in driver</title>
+ <para>
+ All available operations are provided by Memless Input Device class driver.
+ </para>
+ <para>
+ <table>
+ <title> Supported device driver operations </title>
+ <tgroup cols="2"><tbody>
+ <row><entry> open </entry> <entry> Calls ste_ff_vibra_open() function which initializaes workqueue </entry> </row>
+ <row><entry> close </entry> <entry> Calls ste_ff_vibra_close() function which cancels and destroys workqueue </entry> </row>
+ </tbody></tgroup>
+ </table>
+ </para>
+ </section>
+ <section id="driver-loading">
+ <title>Driver loading parameters</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+ <section id="driver-ioctl">
+ <title>Driver IO Control</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+
+ <section id="driver-sysfs">
+ <title>Driver Interaction with Sysfs</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+ <section id="driver-proc">
+ <title>Driver Interaction using /proc filesystem</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+
+ <section id="driver-other">
+ <title>Other means for Driver Interaction</title>
+ <para>
+ Not Applicable.
+ </para>
+ </section>
+
+ <section id="driver-node">
+ <title>Driver Node File</title>
+ <para>
+ Force Feedback Vibrator driver provides following node files:
+ </para>
+ <variablelist>
+ <varlistentry>
+ <term>eventX - Force Feedback Vibrator node file</term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>File</term>
+ <listitem><para><filename>/dev/input/eventX</filename></para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>Description</term>
+ <listitem>
+ <para>Node file of Force Feedback Vibrator driver</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </section>
+
+
+ </chapter>
+
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None.</term>
+ <listitem>
+ <para>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </para>
+ </chapter>
+
+<chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+
+</chapter>
+
+<chapter id="internal-functions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Edrivers/input/misc/ste_ff_vibra.c
+</chapter>
+
+</book>
diff --git a/Documentation/DocBook/stmpe.tmpl b/Documentation/DocBook/stmpe.tmpl
new file mode 100644
index 00000000000..9e64a00f6b3
--- /dev/null
+++ b/Documentation/DocBook/stmpe.tmpl
@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="STMPE MFD devices">
+ <bookinfo>
+ <title>STMPE IO-Port Expander guide</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Rabin</firstname>
+ <surname>Vincent</surname>
+ <affiliation>
+ <address>
+ <email>rabin.vincent@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the driver for STMicroelectronics
+ STMPExxxx port expander devices.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None.</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ List of public interfaces in stmpe driver
+ </para>
+!Edrivers/mfd/stmpe.c
+ </chapter>
+
+ <chapter id="private">
+ <title>Private Functions</title>
+ <para>
+ STMPE Keypad driver
+ STMPE GPIO driver
+ </para>
+ <section id="stmpe-keypad.c">
+ <title>stmpe-keypad.c</title>
+!Idrivers/input/keyboard/stmpe-keypad.c
+ </section>
+ </chapter>
+
+ <chapter id="Other">
+ <title>Other Data Structures</title>
+ <para>
+ This Section lists some of the Data structure used by the stmpe driver and client drivers.
+ </para>
+!Iinclude/linux/mfd/stmpe.h
+!Idrivers/mfd/stmpe.h
+</chapter>
+</book>
diff --git a/Documentation/DocBook/stylesheet.xsl b/Documentation/DocBook/stylesheet.xsl
index 85b25275196..b2769ce5c8f 100644..100755
--- a/Documentation/DocBook/stylesheet.xsl
+++ b/Documentation/DocBook/stylesheet.xsl
@@ -1,10 +1,18 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<param name="chunk.quietly">1</param>
-<param name="funcsynopsis.style">ansi</param>
-<param name="funcsynopsis.tabular.threshold">80</param>
-<param name="callout.graphics">0</param>
-<!-- <param name="paper.type">A4</param> -->
-<param name="generate.section.toc.level">2</param>
-<param name="use.id.as.filename">1</param>
-</stylesheet>
+<?xml version='1.0'?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:fo="http://www.w3.org/1999/XSL/Format"
+ version="1.0">
+ <xsl:param name="use.id.as.filename" select="'1'"/>
+ <xsl:param name="admon.graphics" select="'1'"/>
+ <xsl:param name="admon.graphics.path"></xsl:param>
+ <xsl:param name="chunk.section.depth" select="2"></xsl:param>
+ <xsl:param name="chunk.quietly">1</xsl:param>
+ <xsl:param name="html.stylesheet"
+ select="'style.css'"/>
+ <xsl:param name="section.autolabel" select="1"/>
+ <xsl:param name="table.section.depth" select="1"/>
+ <xsl:param name="toc.section.depth" select="5"/>
+ <xsl:template name="user.header.content">
+ <link href="../style.css" title="walsh" rel="stylesheet" type="text/css"/>
+ </xsl:template>
+</xsl:stylesheet> \ No newline at end of file
diff --git a/Documentation/DocBook/synaptics_rmi4_touchp.tmpl b/Documentation/DocBook/synaptics_rmi4_touchp.tmpl
new file mode 100644
index 00000000000..bc104eb4840
--- /dev/null
+++ b/Documentation/DocBook/synaptics_rmi4_touchp.tmpl
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="Synaptics RMI4 API Guide">
+ <bookinfo>
+ <title>Synaptics RMI4 Touch screen</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Naveen Kumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the functions provided by the
+ driver of touch panel for Synaptics RMI4 controller
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal
+ functions of the Tocuh panel driver.
+ </para>
+!Idrivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/tc_keypad.tmpl b/Documentation/DocBook/tc_keypad.tmpl
new file mode 100644
index 00000000000..3f2630d9d6c
--- /dev/null
+++ b/Documentation/DocBook/tc_keypad.tmpl
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="TC35893">
+ <bookinfo>
+ <title>TC35893 Keypad</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Jayeeta</firstname>
+ <surname>Banerjee</surname>
+ <affiliation>
+ <address>
+ <email>jayeeta.banerjee@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2010</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the keypad driver for TC35893 controller
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the keypad driver.
+ </para>
+!Iinclude/linux/mfd/tc3589x.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Idrivers/input/keyboard//tc3589x-keypad.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/touchp.tmpl b/Documentation/DocBook/touchp.tmpl
new file mode 100644
index 00000000000..4301b23bfc0
--- /dev/null
+++ b/Documentation/DocBook/touchp.tmpl
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="bu21013_ts">
+ <bookinfo>
+ <title>Touch screen ROHM BU21013MWV</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Naveen Kumar</firstname>
+ <surname>Gaddipati</surname>
+ <affiliation>
+ <address>
+ <email>naveen.gaddipati@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2009</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the functions provided by the driver of touch panel for BU21013 controller
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ Not Applicable.
+ </para>
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions of the Tocuh panel driver.
+ </para>
+!Idrivers/input/touchscreen/bu21013_ts.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/u5500_LogicalMailbox.tmpl b/Documentation/DocBook/u5500_LogicalMailbox.tmpl
new file mode 100644
index 00000000000..71a5d6c7c28
--- /dev/null
+++ b/Documentation/DocBook/u5500_LogicalMailbox.tmpl
@@ -0,0 +1,114 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="Mailbox LD">
+ <bookinfo>
+ <title>u5500 Mailbox Logical Driver</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Bibek</firstname>
+ <surname>Basu</surname>
+ <affiliation>
+ <address>
+ <email>bibek.basu@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2011</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Linux standard functions</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <!-- Do NOT remove the legal notice below -->
+
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes the API provided by the U5500 Mailbox Logical Driver.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ <variablelist>
+ <varlistentry>
+ <term>None</term>
+ <listitem>
+ <para>
+ None.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the U5500 Mailbox Logical Driver.
+ </para>
+!Iarch/arm/mach-ux500/include/mach/mbox_channels-db5500.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ List of public interfaces in stmpe driver
+ </para>
+!Edrivers/misc/mbox_channels-db5500.c
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the internal functions.
+ </para>
+!Idrivers/misc/mbox_channels-db5500.c
+ </chapter>
+
+ </book>
diff --git a/Documentation/DocBook/ux500_usb.tmpl b/Documentation/DocBook/ux500_usb.tmpl
new file mode 100644
index 00000000000..71b744386d4
--- /dev/null
+++ b/Documentation/DocBook/ux500_usb.tmpl
@@ -0,0 +1,151 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="USB-FUNCTION-Guide">
+ <bookinfo>
+ <title>USB Driver Function guide</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Praveena</firstname>
+ <surname>Nadahally</surname>
+ <affiliation>
+ <address>
+ <email>praveen.nadahally@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>Rajaram</firstname>
+ <surname>Ragupathy</surname>
+ <affiliation>
+ <address>
+ <email>ragupathy.rajaram@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ <author>
+ <firstname>SakethRam</firstname>
+ <surname>Bommisetti</surname>
+ <affiliation>
+ <address>
+ <email>sakethram.bommisetti@stericsson.com</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2011</year>
+ <holder>ST-Ericsson</holder>
+ </copyright>
+
+ <subjectset>
+ <subject>
+ <subjectterm>Connectivity</subjectterm>
+ </subject>
+ </subjectset>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This documentation describes ST-Ericsson's adaptation on USB external DMA and communication between Mentor USB IP controller and the USB
+ transreceiver
+ </para>
+ </chapter>
+
+ <chapter id="concepts">
+ <title>Concepts</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ In ST-Ericsson's USB driver, the open source linux gadget stack and Mentor IP USB 2.0 driver is used. Since the USB Transceiver and Mentor USB IP controller are on different hardware, API's are defined for the communication between them. These API's are available in ux500.c file.
+ The ST-Ericsson's USB driver doesn't have the internal DMA dedicated for USB. So, the external system DMA is used. The integration of external DMA with the mentor chip is available in ux500_dma.c file.
+ Changes have been made in the musb_core.c file where endpoints are configured as per the platform and also integrated DMA specific chnages in the musb_gadget.c file.
+ <!-- TODO: A brief introduction about the concepts
+ which are introduced by the driver.
+ Remove this chapter completely if there are no
+ special concepts introduced by this driver.
+ Do NOT change the chapter id or title! -->
+ <!-- TODO: This guideline for this chapter may be extended
+ during the user-guide guidelines drop. -->
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <!-- Do NOT change the chapter id or title! -->
+ <para>
+ <variablelist>
+
+ <varlistentry>
+ <term>None.</term>
+ <listitem>
+ <para>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </para>
+ </chapter>
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ The musb driver doesn't export any functions.
+ </para>
+ </chapter>
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ List of internal functions
+ </para>
+ <!-- Do NOT change the chapter id or title! -->
+ <!-- TODO: Replace with link to appropriate headerfile(s),
+ source file(s), or both. One per row, ensure the
+ exclamation mark is on the first column! If no
+ appropriate header or source file exist describing a public interface,
+ replace the inclusion with a paragraph containing the text
+ "Not Applicable"-->
+ <section id="ux500_dma.c">
+ <title>ux500_dma.c</title>
+!Idrivers/usb/musb/ux500_dma.c
+ </section>
+ <section id="ux500.c">
+ <title>ux500.c</title>
+!Idrivers/usb/musb/ux500.c
+ </section>
+</chapter>
+</book>
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 4840334ea97..2550754994b 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -224,6 +224,7 @@ Code Seq#(hex) Include File Comments
'j' 00-3F linux/joystick.h
'k' 00-0F linux/spi/spidev.h conflict!
'k' 00-05 video/kyro.h conflict!
+'k' 10-17 linux/hsi/hsi_char.h HSI character device
'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system
<http://web.archive.org/web/*/http://mikonos.dia.unisa.it/tcfs>
'l' 40-7F linux/udf_fs_i.h in development:
diff --git a/Documentation/trace/stm-trace.txt b/Documentation/trace/stm-trace.txt
new file mode 100644
index 00000000000..cd73c2b87b7
--- /dev/null
+++ b/Documentation/trace/stm-trace.txt
@@ -0,0 +1,193 @@
+ MIPI System Trace Module driver
+ ===============================
+
+Copyright (C) ST-Ericsson SA 2011
+ Authors: Pierre Peiffer <pierre dot peiffer at stericsson dot com>
+ Philippe Langlais <philippe dot langlais at linaro dot org>
+ License: The GNU Free Documentation License, Version 1.2
+ (dual licensed under the GPL v2)
+
+Hardware overview
+=================
+ This hardware collects and provides simple tracepoints,
+ so a system processor (in our case the main ARM CPU,
+ or some small CPUs and DSPs) can write some data,
+ up to 8 bytes, into a register and out comes a log entry
+ with a time stamp (20ns resolution) on one of 256 channels. Also
+ hardware tracepoints are supported.
+
+ This module external interface is a pad on the chip
+ which complies to the MIPI System Trace Protocol v1.0
+ (see http://www.mipi.org/specifications/debug)
+ and the actual trace output can be read by an
+ electronic probe, not by software so it cannot be intercepted by
+ the CPU and reach Linux userspace.
+
+ Bandwidth depends on number of lines & bus frequency (for example on ux500
+ SoC 4 lines at max 100MHz eg max 400Mbit/s shared between 7 cores).
+ Transmit FIFO size: 256 samples up to 8 bytes.
+ On ux500 platform there is 2 contiguous STM blocks (eg 512 channels)
+
+Software Overview
+=================
+ Write atomicity and write order on STM trace channels is ensured by the fact
+ we try to allocate one channel by execution thread (no concurrent access).
+ There is 2 modes one lossless but intrusive aka Software mode and
+ another lossy mode less intrusive aka Hardware mode, by default
+ all sources are configured in Hardware mode and enabled.
+ The end of data packet is marked by a time stamp on latest byte(s) only.
+
+Kernel API
+----------
+ Configuration functions:
+ output trace clock frequency, trace mode, output port configuration
+ and enable/disable STM trace sources
+ Expose a debugfs interface too for STM trace control
+
+ Alloc/free STM trace channel functions
+
+ Set of low level atomic trace functions for 1, 2, 4 or 8 bytes
+ with & w/o time stamp
+
+ Higher level lockless trace functions:
+ stm_trace_buffer:
+ allocate a channel in 128 highest channels available
+ output the trace buffer with arbitrary length
+ (latest byte(s) automatically time stamped) then free the channel
+ stm_trace_buffer_onchannel:
+ use given channel to output the trace buffer
+ with arbitrary length (latest byte(s) automatically time stamped)
+
+ File IO output console like interface (open, close, write)
+
+ See <trace/stm.h> & drivers/misc/stm.c for more detail
+
+debugfs API
+-----------
+clockdiv:
+ This is used to set or display the current clock divisor
+ that is configured
+
+connection:
+ This is used to set or display the current output connector
+ that is configured (common values, 0 not connected, 1 for default
+ connection, 3 on Ux500 for APE MIPI34 connection)
+
+free_channels:
+ This is used to display the total number of free channels
+
+masters_enable:
+ This sets or displays whether the STM trace sources
+ are activated. Each bits represent the state of corresponding source:
+ 0 for disable or 1 to enable it.
+
+masters_modes:
+ This sets or displays the STM trace sources modes.
+ Each bits represent the mode of corresponding source:
+ 0 for Sofware lossless mode or 1 for Hardware lossy mode.
+
+User API
+--------
+ IOCTLs or debugfs for controls
+ 2 levels API for tracing:
+ - Standard write function, in this case a channel is automatically
+ allocated at first write, after you can channel number
+ with IOCTL STM_GET_CHANNEL_NO
+ - mmap for direct access of all STM trace channels port plus
+ a set of IOCTLs for alloc/free channels, in this case you can
+ write your own lib to easiest its usage
+
+Examples of using the STM
+=========================
+First mount debugfs with:
+mount -t debugfs none /sys/kernel/debug
+
+In a shell scipt
+----------------
+It's as easy as:
+ echo "My trace point" > /dev/stm
+
+To avoid trace overflow, increase STM clock by decreasing the clockdiv with:
+ echo 1 >/sys/kernel/debug/stm/clockdiv # now use DIV2 instead of default DIV8
+If not enough you can disable some sources with:
+ echo YourEnableSources > /sys/kernel/debug/stm/masters_enable
+If always not enough the ultime intrusive way is to change the sources mode
+and set the corresponding sources in Software mode (set corresponding source
+bit to 0) with:
+ echo YourModeSources > /sys/kernel/debug/stm/masters_modes
+ (be aware some source doesn't support Software mode => keep it in HW mode)
+
+NB: on Ux500 platform, first you have to configure STM output port to switch
+APE tracing on MIPI34 connector with:
+ echo 3 > /sys/kernel/debug/stm/connection
+
+In C language
+-------------
+
+The easy way more intrusive (with STM buffer recopy):
+
+#include <trace/stm.h>
+
+int fd, i;
+char buf[1024]; // Try to align this buffer on 64 bits if possible
+
+ fd = open("/dev/stm", O_WRONLY);
+ snprintf(buf, 1024, "STM0 Hello world\n");
+ write(fd, buf, strlen(buf));
+ ioctl(fd, STM_GET_CHANNEL_NO, &i);
+ snprintf(buf, 1024, "Use channel #%d\n", i);
+ write(fd, buf, strlen(buf));
+ close(fd);
+
+NB: You can call open("/dev/stm", O_WRONLY) as many times as necessary
+to allocate a different channel to avoid concurrency in your
+multithreaded application.
+
+The more efficient way, use mmap'ed STM channels memory (to put in a lib):
+
+#include <trace/stm.h>
+
+int fd, i, c, l, maxChannels;
+char buf[1024]; // Try to align this buffer on 64 bits if possible
+volatile struct stm_channel *channels; // mmap'ed channels area
+
+ fd = open("/dev/stm", O_RDWR);
+ ioctl(fd0, STM_GET_NB_MAX_CHANNELS, &maxChannels);
+ channels = (struct stm_channel *)mmap(0, maxChannels*sizeof(*channels),
+ PROT_WRITE, MAP_SHARED, fd, 0);
+ assert(channels != MAP_FAILED);
+
+ if (!ioctl(fd, STM_GET_FREE_CHANNEL, &c)) {
+ l = snprintf(buf, 1024, "STM0 Hello world on channel #%d\n", c);
+ // lazy implementation you have to send buffer by 8 Bytes when possible
+ // and be sure you don't share this channel with others threads
+ for (i=0; i<l; i++) {
+ channels[c].stamp8 = buf[i];
+ }
+ ioctl(fd, STM_RELEASE_CHANNEL, c);
+ }
+ munmap((void *)channels, maxChannels*sizeof(*channels));
+ close(fd);
+
+Kernel Internal usages
+======================
+Dynamically channels dedicated for the kernel are allocated
+in the 128 highest ones
+
+Via menuconfig you can:
+- Duplicate printk output on a STM dedicated channel (255)
+- Have realtime ftrace output to a STM dedicated channel (254),
+ if corresponding TRACER is enabled
+- Have realtime sched context switch & sched wakeup output on dedicated channels
+ (253, 252), if corresponding TRACER is enabled
+- Have Stack Trace on dedicated channels (251)
+- Duplicate trace_printk output on dedicated channels (250 & 249)
+
+
+And in the future:
+------------------
+- Use it in standard kernel tracing infrastucture,
+ possibilities:
+ - Insert other STM trace calls before trace ring buffer write
+ - Substitute time stamping & trace ring buffer by STM
+
diff --git a/Makefile b/Makefile
index 1932984478c..aba19d81650 100644
--- a/Makefile
+++ b/Makefile
@@ -443,7 +443,7 @@ asm-generic:
no-dot-config-targets := clean mrproper distclean \
cscope gtags TAGS tags help %docs check% coccicheck \
include/linux/version.h headers_% archheaders \
- kernelversion %src-pkg
+ kernelrelease kernelversion %src-pkg
config-targets := 0
mixed-targets := 0
@@ -947,7 +947,7 @@ $(vmlinux-dirs): prepare scripts
# Store (new) KERNELRELASE string in include/config/kernel.release
include/config/kernel.release: include/config/auto.conf FORCE
$(Q)rm -f $@
- $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" > $@
+ $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion -s $(srctree) -t v$(KERNELVERSION))" > $@
# Things we need to do before we recursively start building the kernel
@@ -1463,7 +1463,7 @@ checkstack:
$(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
kernelrelease:
- @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+ @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion -s $(srctree) -t v$(KERNELVERSION))"
kernelversion:
@echo $(KERNELVERSION)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index dfb0312f4e7..46ad3ebd89b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -74,6 +74,10 @@ config KTIME_SCALAR
bool
default y
+config KTIME_SCALAR
+ bool
+ default y
+
config HAVE_TCM
bool
select GENERIC_ALLOCATOR
@@ -906,9 +910,11 @@ config ARCH_U8500
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
+ select HAVE_CLK
select ARCH_HAS_CPUFREQ
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
+ select NOMADIK_GPIO
help
Support for ST-Ericsson's Ux500 architecture
@@ -1577,7 +1583,7 @@ config LOCAL_TIMERS
config ARCH_NR_GPIO
int
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
- default 350 if ARCH_U8500
+ default 355 if ARCH_U8500
default 0
help
Maximum number of GPIOs in the system.
@@ -1723,7 +1729,9 @@ source "mm/Kconfig"
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ARCH_SHMOBILE
range 11 64 if ARCH_SHMOBILE
+ depends on SA1111 || UX500_SOC_DB8500
default "9" if SA1111
+ default "12" if UX500_SOC_DB8500
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -2038,6 +2046,13 @@ config KEXEC
initially work for you. It may help to enable device hotplugging
support.
+config CRASH_SWRESET
+ bool "Perform a software reset at a panic (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on KEXEC
+ help
+ If no crash kernel has been loaded, perform a SW reset as plan B.
+
config ATAGS_PROC
bool "Export atags in procfs"
depends on KEXEC
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e0d236d7ff7..4edabb439ac 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -302,6 +302,15 @@ config EARLY_PRINTK
kernel low-level debugging functions. Add earlyprintk to your
kernel parameters to enable this console.
+config PRINTK_LL
+ bool "Use printascii in printk"
+ depends on DEBUG_LL
+ help
+ Say Y here if you want to have printk send its output via the
+ kernel low-level debugging functions. This is useful if you
+ are debugging code that executes before the earlyprintk console
+ is initialized.
+
config OC_ETM
bool "On-chip ETM and ETB"
depends on ARM_AMBA
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 6ea9b6f3607..86d67017c1e 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_BOOTTIME) += boottime.o
diff --git a/arch/arm/common/boottime.c b/arch/arm/common/boottime.c
new file mode 100644
index 00000000000..73e9e04ed37
--- /dev/null
+++ b/arch/arm/common/boottime.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009-2010
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Store boot times measured during for example u-boot startup.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/boottime.h>
+#include <linux/string.h>
+#include <asm/setup.h>
+
+static u32 bootloader_idle;
+static u32 bootloader_total;
+
+static int __init boottime_parse_tag(const struct tag *tag)
+{
+ int i;
+ char buff[BOOTTIME_MAX_NAME_LEN];
+
+ bootloader_idle = tag->u.boottime.idle;
+ bootloader_total = tag->u.boottime.total;
+
+ for (i = 0; i < tag->u.boottime.num; i++) {
+ snprintf(buff, BOOTTIME_MAX_NAME_LEN, "%s+0x0/0x0",
+ tag->u.boottime.entry[i].name);
+ buff[BOOTTIME_MAX_NAME_LEN - 1] = '\0';
+ boottime_mark_wtime(buff, tag->u.boottime.entry[i].time);
+ }
+
+ return 0;
+}
+
+__tagtable(ATAG_BOOTTIME, boottime_parse_tag);
+
+int boottime_bootloader_idle(void)
+{
+ if (bootloader_total == 0)
+ return 0;
+
+ return (int) ((bootloader_idle) / (bootloader_total / 100));
+}
diff --git a/arch/arm/configs/u8500_android_defconfig b/arch/arm/configs/u8500_android_defconfig
new file mode 100644
index 00000000000..7a7fd9e28f9
--- /dev/null
+++ b/arch/arm/configs/u8500_android_defconfig
@@ -0,0 +1,333 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_BOOTTIME=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BLKDEV_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_ARCH_U8500=y
+CONFIG_UX500_SOC_DB8500=y
+CONFIG_MACH_HREFV60=y
+CONFIG_MACH_SNOWBALL=y
+CONFIG_DBX500_PRCMU_DEBUG=y
+# CONFIG_UX500_SUSPEND is not set
+CONFIG_UX500_SUSPEND_STANDBY=y
+CONFIG_UX500_SUSPEND_MEM=y
+CONFIG_UX500_SUSPEND_DBG=y
+CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART=y
+# CONFIG_UX500_USECASE_GOVERNOR is not set
+CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC=y
+CONFIG_DB8500_MLOADER=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_CMDLINE="root=/dev/ram0 init=init rw console=ttyAMA2,115200n8 mem=256M initrd=0x800000,72M"
+CONFIG_KEXEC=y
+CONFIG_CRASH_SWRESET=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_U8500_CPUIDLE_DEEPEST_STATE=2
+CONFIG_UX500_CPUIDLE_DEBUG=y
+CONFIG_FPE_NWFPE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_WAKELOCK=y
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_IPV6_SIT is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_PHONET=y
+CONFIG_NET_SCHED=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_PM=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_REGULATOR=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_CAIF=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=73728
+CONFIG_AB8500_PWM=y
+CONFIG_SENSORS_BH1780=y
+CONFIG_STE_TRACE_MODEM=y
+CONFIG_DISPDEV=y
+CONFIG_U8500_SIM_DETECT=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_HSI=m
+CONFIG_SMSC911X=y
+CONFIG_SMSC_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_ASYNC=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_STMPE=y
+CONFIG_KEYBOARD_TC3589X=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_BU21013=y
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=y
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_AB8500_ACCDET=y
+CONFIG_INPUT_AB8500_PONKEY=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_NOMADIK=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_NOMADIK=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_TC3589X=y
+CONFIG_GPIO_AB8500=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_AB8500_BM=y
+CONFIG_SENSORS_AB8500=y
+CONFIG_SENSORS_LSM303DLH=y
+CONFIG_SENSORS_LSM303DLHC=y
+CONFIG_SENSORS_L3G4200D=y
+CONFIG_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_U8500_WATCHDOG_DEBUG=y
+CONFIG_TPS6105X=y
+CONFIG_MFD_STMPE=y
+CONFIG_MFD_TC3589X=y
+CONFIG_AB5500_CORE=y
+CONFIG_AB8500_CORE=y
+CONFIG_MFD_DB8500_PRCMU=y
+CONFIG_REGULATOR_DEBUG=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_DB8500_PRCMU=y
+CONFIG_REGULATOR_AB8500_DEBUG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+CONFIG_RADIO_CG2900=y
+CONFIG_GPU_MALI=y
+CONFIG_FB=y
+CONFIG_FB_MCDE=y
+CONFIG_MCDE_FB_AVOID_REALLOC=y
+CONFIG_MCDE_DISPLAY_GENERIC_DSI=y
+CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0=y
+CONFIG_MCDE_DISPLAY_SONY_ACX424AKP_DSI=y
+CONFIG_MCDE_DISPLAY_AV8100=y
+CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE=y
+CONFIG_AV8100_HWTRIG_I2SDAT3=y
+CONFIG_FB_B2R2=y
+CONFIG_B2R2_PLUG_CONF=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_UX500=y
+CONFIG_SND_SOC_UX500_AB5500=y
+CONFIG_SND_SOC_UX500_AB8500=y
+CONFIG_SND_SOC_UX500_CG29XX=y
+CONFIG_SND_SOC_UX500_AV8100=y
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_MON=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_UX500=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_AB8500_USB=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+CONFIG_MMC_ARMMMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_LM3530=y
+CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AB=y
+CONFIG_RTC_DRV_AB8500=y
+CONFIG_DMADEVICES=y
+CONFIG_STE_DMA40=y
+CONFIG_STAGING=y
+CONFIG_AB5500_SIM=y
+CONFIG_CG2900=y
+CONFIG_CG2900_CHIP=y
+CONFIG_STLC2690_CHIP=y
+CONFIG_CG2900_UART=y
+CONFIG_CG2900_AUDIO=y
+CONFIG_CG2900_TEST=y
+CONFIG_BT_CG2900=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_CW1200=m
+CONFIG_CW1200_USE_GPIO_IRQ=y
+CONFIG_CW1200_DEBUGFS=y
+CONFIG_U8500_MMIO=y
+CONFIG_U8500_CM=y
+CONFIG_U8500_FLASH=y
+CONFIG_MODEM_U8500=y
+CONFIG_U8500_SHRM=y
+CONFIG_U8500_SHRM_MODEM_SILENT_RESET=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HFS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_UX500=y
+CONFIG_CRYPTO_DEV_UX500_HASH=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d7b6e7b727..2511cad8617 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -1,117 +1,313 @@
CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_BOOTTIME=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_DEFAULT_DEADLINE=y
CONFIG_ARCH_U8500=y
-CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
CONFIG_MACH_HREFV60=y
CONFIG_MACH_SNOWBALL=y
-CONFIG_MACH_U5500=y
+CONFIG_UX500_GPIO_KEYS=y
+CONFIG_DBX500_PRCMU_DEBUG=y
+# CONFIG_UX500_SUSPEND is not set
+CONFIG_UX500_SUSPEND_STANDBY=y
+CONFIG_UX500_SUSPEND_MEM=y
+CONFIG_UX500_SUSPEND_DBG=y
+CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART=y
+CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC=y
+CONFIG_DB8500_MLOADER=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_CMDLINE="root=/dev/ram0 init=init rw console=ttyAMA2,115200n8 mem=256M initrd=0x800000,72M"
+CONFIG_KEXEC=y
+CONFIG_CRASH_SWRESET=y
+CONFIG_CRASH_DUMP=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_IPV6_SIT is not set
CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_PHONET=y
-# CONFIG_WIRELESS is not set
+CONFIG_NET_SCHED=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_NET_9P=y
CONFIG_CAIF=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_BLK_DEV_RAM_SIZE=73728
CONFIG_MISC_DEVICES=y
CONFIG_AB8500_PWM=y
CONFIG_SENSORS_BH1780=y
+CONFIG_STE_TRACE_MODEM=y
+CONFIG_DISPDEV=y
+CONFIG_U8500_SIM_DETECT=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_HSI=m
CONFIG_SMSC911X=y
CONFIG_SMSC_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_ASYNC=y
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_NOMADIK=y
-CONFIG_KEYBOARD_STMPE=y
+CONFIG_KEYBOARD_NOMADIK_SKE=y
CONFIG_KEYBOARD_TC3589X=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_BU21013=y
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=y
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_AB8500_ACCDET=y
CONFIG_INPUT_AB8500_PONKEY=y
-# CONFIG_SERIO is not set
+CONFIG_INPUT_UINPUT=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_NOMADIK=y
-CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_NOMADIK=y
CONFIG_SPI=y
+CONFIG_STM_MSP_SPI=y
CONFIG_SPI_PL022=y
-CONFIG_GPIO_STMPE=y
+CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_TC3589X=y
-CONFIG_MFD_STMPE=y
+CONFIG_GPIO_AB8500=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_AB8500_BM=y
+CONFIG_SENSORS_AB8500=y
+CONFIG_SENSORS_DBX500=y
+CONFIG_SENSORS_LSM303DLH=y
+CONFIG_SENSORS_LSM303DLHC=y
+CONFIG_SENSORS_L3G4200D=y
+CONFIG_WATCHDOG=y
+CONFIG_UX500_WATCHDOG_DEBUG=y
CONFIG_MFD_TC3589X=y
CONFIG_AB5500_CORE=y
CONFIG_AB8500_CORE=y
+CONFIG_MFD_DB8500_PRCMU=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DEBUG=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
CONFIG_REGULATOR_AB8500=y
-# CONFIG_HID_SUPPORT is not set
+CONFIG_REGULATOR_DB8500_PRCMU=y
+CONFIG_REGULATOR_AB8500_DEBUG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+CONFIG_RADIO_CG2900=y
+CONFIG_DRM=y
+CONFIG_GPU_MALI=y
+CONFIG_FB=y
+CONFIG_FB_MCDE=y
+CONFIG_MCDE_FB_AVOID_REALLOC=y
+CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0=y
+CONFIG_MCDE_DISPLAY_SONY_ACX424AKP_DSI=y
+CONFIG_MCDE_DISPLAY_AV8100=y
+# CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE is not set
+CONFIG_AV8100_HWTRIG_I2SDAT3=y
+CONFIG_FB_B2R2=y
+CONFIG_B2R2_PLUG_CONF=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_UX500=y
+CONFIG_SND_SOC_UX500_AB5500=y
+CONFIG_SND_SOC_UX500_AB8500=y
+CONFIG_SND_SOC_UX500_CG29XX=y
+CONFIG_SND_SOC_UX500_AV8100=y
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_MON=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_UX500=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_LIBUSUAL=y
CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_USB_G_MULTI=m
+# CONFIG_USB_G_MULTI_RNDIS is not set
+CONFIG_USB_G_HID=m
CONFIG_AB8500_USB=y
CONFIG_MMC=y
-CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_BLOCK_MINORS=32
+# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_LM3530=y
-CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AB=y
CONFIG_RTC_DRV_AB8500=y
-CONFIG_RTC_DRV_PL031=y
CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_STAGING=y
+CONFIG_AB5500_SIM=y
+CONFIG_CG2900=y
+CONFIG_CG2900_CHIP=y
+CONFIG_STLC2690_CHIP=y
+CONFIG_CG2900_UART=y
+CONFIG_CG2900_AUDIO=y
+CONFIG_CG2900_TEST=y
+CONFIG_BT_CG2900=y
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_CW1200=m
+CONFIG_CW1200_USE_GPIO_IRQ=y
+CONFIG_CW1200_DEBUGFS=y
+CONFIG_U8500_MMIO=y
+CONFIG_U8500_CM=y
+CONFIG_U8500_FLASH=y
CONFIG_HSEM_U8500=y
+CONFIG_MODEM_U8500=y
+CONFIG_U8500_SHRM=y
+CONFIG_U8500_SHRM_MODEM_SILENT_RESET=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CONFIGFS_FS=m
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BLKDEV_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_FTRACE is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_UX500=y
+CONFIG_CRYPTO_DEV_UX500_HASH=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c7268..a4bf3199819 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -84,6 +84,14 @@
* - kaddr - page address
* - size - region size
*
+ * clean_dcache_all()
+ *
+ * Cleans the entire d-cache.
+ *
+ * flush_dcache_all()
+ *
+ * Flushes the entire d-cache.
+ *
* DMA Cache Coherency
* ===================
*
@@ -104,6 +112,9 @@ struct cpu_cache_fns {
void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
+ void (*clean_dcache_all)(void);
+ void (*flush_dcache_all)(void);
+
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
@@ -124,6 +135,8 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
+#define __cpuc_clean_dcache_all cpu_cache.clean_dcache_all
+#define __cpuc_flush_dcache_all cpu_cache.flush_dcache_all
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -144,6 +157,8 @@ extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
+extern void __cpuc_clean_dcache_all(void);
+extern void __cpuc_flush_dcache_all(void);
/*
* These are private to the dma-mapping API. Do not use directly.
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda18154..91063a3976f 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -8,7 +8,7 @@
#include <asm/param.h> /* HZ */
-extern void __delay(int loops);
+extern void __delay(unsigned long loops);
/*
* This function intentionally does not exist; if you see references to
@@ -40,5 +40,14 @@ extern void __const_udelay(unsigned long);
__const_udelay((n) * ((2199023U*HZ)>>11))) : \
__udelay(n))
+extern void (*delay_fn)(unsigned long);
+
+static inline void set_delay_fn(void (*fn)(unsigned long))
+{
+ delay_fn = fn;
+}
+
+extern void read_current_timer_delay_loop(unsigned long loops);
+
#endif /* defined(_ARM_DELAY_H) */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d9686..85cf3655914 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -96,8 +96,8 @@ struct elf32_hdr;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-extern int elf_check_arch(const struct elf32_hdr *);
-#define elf_check_arch elf_check_arch
+extern int arm_elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch(x) arm_elf_check_arch((const struct elf32_hdr *)(x))
#define vmcore_elf64_check_arch(x) (0)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9275828feb3..77432b1cacf 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -64,6 +64,12 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#define MT_DEVICE_CACHED 2
#define MT_DEVICE_WC 3
/*
+ * NOTE : U8500 v1.0/ED cut specific hack.
+ * look at the commit message for more details
+ */
+#define MT_BACKUP_RAM 4
+
+/*
* types 4 onwards can be found in asm/mach/map.h and are undefined
* for ioremap
*/
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352..e76b9eae8e4 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -33,6 +33,8 @@ struct outer_cache_fns {
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
+ void (*prefetch_enable)(void);
+ void (*prefetch_disable)(void);
void (*set_debug)(unsigned long);
void (*resume)(void);
};
@@ -81,6 +83,18 @@ static inline void outer_resume(void)
outer_cache.resume();
}
+static inline void outer_prefetch_enable(void)
+{
+ if (outer_cache.prefetch_enable)
+ outer_cache.prefetch_enable();
+}
+
+static inline void outer_prefetch_disable(void)
+{
+ if (outer_cache.prefetch_disable)
+ outer_cache.prefetch_disable();
+}
+
#else
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 23ebc0c82a3..ea1384fea05 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -143,6 +143,23 @@ struct tag_memclk {
__u32 fmemclk;
};
+/* for automatic boot timing testcases */
+#define ATAG_BOOTTIME 0x41000403
+#define BOOTTIME_MAX_NAME_LEN 64
+#define BOOTTIME_MAX 10
+
+struct boottime_entry {
+ u32 time; /* in us */
+ u8 name[BOOTTIME_MAX_NAME_LEN];
+};
+
+struct tag_boottime {
+ struct boottime_entry entry[BOOTTIME_MAX];
+ u32 idle; /* in us */
+ u32 total; /* in us */
+ u8 num;
+};
+
struct tag {
struct tag_header hdr;
union {
@@ -165,6 +182,10 @@ struct tag {
* DC21285 specific
*/
struct tag_memclk memclk;
+ /*
+ * Boot time
+ */
+ struct tag_boottime boottime;
} u;
};
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index ef9ffba97ad..7ea2460d9b6 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -25,4 +25,12 @@ extern void __iomem *twd_base;
void twd_timer_setup(struct clock_event_device *);
void twd_timer_stop(struct clock_event_device *);
+#if defined(CONFIG_HOTPLUG) || defined(CONFIG_CPU_IDLE)
+void twd_save(void);
+void twd_restore(void);
+#else
+static inline void twd_save(void) { }
+static inline void twd_restore(void) { }
+#endif
+
#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index e4c96cc6ec0..6ce949d7d17 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -111,6 +111,8 @@ extern void cpu_init(void);
void soft_restart(unsigned long);
extern void (*arm_pm_restart)(char str, const char *cmd);
+void cpu_idle_wait(void);
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5b0bce61eb6..f0c41295216 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -49,10 +49,6 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
- /* platform dependent support */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__const_udelay);
-
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index ddba41d1fcf..cac241a8415 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -4,11 +4,13 @@
#include <linux/binfmts.h>
#include <linux/elf.h>
-int elf_check_arch(const struct elf32_hdr *x)
+int arm_elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
/* Make sure it's an ARM executable */
+ if (x->e_ident[EI_CLASS] != ELF_CLASS)
+ return 0;
if (x->e_machine != EM_ARM)
return 0;
@@ -35,7 +37,7 @@ int elf_check_arch(const struct elf32_hdr *x)
}
return 1;
}
-EXPORT_SYMBOL(elf_check_arch);
+EXPORT_SYMBOL(arm_elf_check_arch);
void elf_set_personality(const struct elf32_hdr *x)
{
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index d6a95ef9131..39fb65fda43 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -854,6 +854,25 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
return ret;
}
+static int hw_breakpoint_undef(struct pt_regs *regs, unsigned int instr)
+{
+ int reg = (instr >> 12) & 15;
+
+ /* Fake sticky power-down cleared */
+ regs->uregs[reg] = 0;
+ regs->ARM_pc += 4;
+
+ return 0;
+}
+
+static struct undef_hook hw_breakpoint_hook = {
+ .instr_mask = 0xffff0fff,
+ .instr_val = 0xee110e95,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = hw_breakpoint_undef,
+};
+
/*
* One-time initialisation.
*/
@@ -900,6 +919,10 @@ static void reset_ctrl_regs(void *unused)
/*
* Ensure sticky power-down is clear (i.e. debug logic is
* powered up).
+ *
+ * This could raise an undefined instruction exception. If it
+ * does, it is fixed up with an undef hook which constructs
+ * a fake value with the sticky power-down bit cleared.
*/
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
if ((dbg_power & 0x1) == 0)
@@ -987,6 +1010,8 @@ static int __init arch_hw_breakpoint_init(void)
*/
register_undef_hook(&debug_reg_hook);
+ register_undef_hook(&hw_breakpoint_hook);
+
/*
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 764bd456d84..4fde2abd12b 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -46,6 +46,7 @@ void machine_crash_nonpanic_core(void *unused)
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
crash_save_cpu(&regs, smp_processor_id());
+ atomic_notifier_call_chain(&crash_percpu_notifier_list, 0, NULL);
flush_cache_all();
atomic_dec(&waiting_for_crash_ipi);
@@ -113,3 +114,13 @@ void machine_kexec(struct kimage *image)
soft_restart(reboot_code_buffer_phys);
}
+
+void machine_crash_swreset(void)
+{
+ printk(KERN_INFO "Software reset on panic!\n");
+
+ flush_cache_all();
+ outer_flush_all();
+ outer_disable();
+ arm_pm_restart(0, NULL);
+}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 971d65c253a..14b20051939 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -211,8 +211,17 @@ void cpu_idle(void)
leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id()))
+ if (cpu_is_offline(smp_processor_id())) {
+
+ /* NOTE : preempt_count() should be 0 for dying CPU
+ * as the CPU will use this very thread when
+ * it is alive
+ */
+ if (preempt_count())
+ preempt_enable_no_resched();
+
cpu_die();
+ }
#endif
local_irq_disable();
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555d..0697db65efa 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -58,10 +58,6 @@ void *return_address(unsigned int level)
#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-#if defined(CONFIG_ARM_UNWIND)
-#warning "TODO: return_address should use unwind tables"
-#endif
-
void *return_address(unsigned int level)
{
return NULL;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index cdeb727527d..712b1681f48 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -279,8 +279,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
notify_cpu_starting(cpu);
- calibrate_delay();
-
smp_store_cpu_info(cpu);
/*
@@ -433,7 +431,7 @@ static void ipi_timer(void)
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static void smp_timer_broadcast(const struct cpumask *mask)
+void smp_timer_broadcast(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_TIMER);
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 7a79b24597b..c731ac2f0bd 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,6 +31,9 @@ void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
+static DEFINE_PER_CPU(u32, twd_ctrl);
+static DEFINE_PER_CPU(u32, twd_load);
+
static struct clock_event_device __percpu **twd_evt;
static void twd_set_mode(enum clock_event_mode mode,
@@ -268,3 +271,24 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
}
+
+#if defined(CONFIG_HOTPLUG) || defined(CONFIG_CPU_IDLE)
+void twd_save(void)
+{
+ int this_cpu = smp_processor_id();
+
+ per_cpu(twd_ctrl, this_cpu) = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ per_cpu(twd_load, this_cpu) = __raw_readl(twd_base + TWD_TIMER_LOAD);
+
+}
+
+void twd_restore(void)
+{
+ int this_cpu = smp_processor_id();
+
+ __raw_writel(per_cpu(twd_ctrl, this_cpu),
+ twd_base + TWD_TIMER_CONTROL);
+ __raw_writel(per_cpu(twd_load, this_cpu),
+ twd_base + TWD_TIMER_LOAD);
+}
+#endif
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
deleted file mode 100644
index 3c9a05c8d20..00000000000
--- a/arch/arm/lib/delay.S
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * linux/arch/arm/lib/delay.S
- *
- * Copyright (C) 1995, 1996 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/param.h>
- .text
-
-.LC0: .word loops_per_jiffy
-.LC1: .word (2199023*HZ)>>11
-
-/*
- * r0 <= 2000
- * lpj <= 0x01ffffff (max. 3355 bogomips)
- * HZ <= 1000
- */
-
-ENTRY(__udelay)
- ldr r2, .LC1
- mul r0, r2, r0
-ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
- mov r1, #-1
- ldr r2, .LC0
- ldr r2, [r2] @ max = 0x01ffffff
- add r0, r0, r1, lsr #32-14
- mov r0, r0, lsr #14 @ max = 0x0001ffff
- add r2, r2, r1, lsr #32-10
- mov r2, r2, lsr #10 @ max = 0x00007fff
- mul r0, r2, r0 @ max = 2^32-1
- add r0, r0, r1, lsr #32-6
- movs r0, r0, lsr #6
- moveq pc, lr
-
-/*
- * loops = r0 * HZ * loops_per_jiffy / 1000000
- *
- * Oh, if only we had a cycle counter...
- */
-
-@ Delay routine
-ENTRY(__delay)
- subs r0, r0, #1
-#if 0
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
- movls pc, lr
- subs r0, r0, #1
-#endif
- bhi __delay
- mov pc, lr
-ENDPROC(__udelay)
-ENDPROC(__const_udelay)
-ENDPROC(__delay)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
new file mode 100644
index 00000000000..b8d636e8ef8
--- /dev/null
+++ b/arch/arm/lib/delay.c
@@ -0,0 +1,81 @@
+/*
+ * Originally from linux/arch/arm/lib/delay.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
+
+/*
+ * Oh, if only we had a cycle counter...
+ */
+static void delay_loop(unsigned long loops)
+{
+ asm volatile(
+ "1: subs %0, %0, #1 \n"
+ " bhi 1b \n"
+ : /* No output */
+ : "r" (loops)
+ );
+}
+
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+/*
+ * Assumes read_current_timer() is monotonically increasing
+ * across calls and wraps at most once within MAX_UDELAY_MS.
+ */
+void read_current_timer_delay_loop(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ read_current_timer(&bclock);
+ do {
+ read_current_timer(&now);
+ } while ((now - bclock) < loops);
+}
+#endif
+
+void (*delay_fn)(unsigned long) = delay_loop;
+
+/*
+ * loops = usecs * HZ * loops_per_jiffy / 1000000
+ */
+void __delay(unsigned long loops)
+{
+ delay_fn(loops);
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * 0 <= xloops <= 0x7fffff06
+ * loops_per_jiffy <= 0x01ffffff (max. 3355 bogomips)
+ */
+void __const_udelay(unsigned long xloops)
+{
+ unsigned long lpj;
+ unsigned long loops;
+
+ xloops >>= 14; /* max = 0x01ffffff */
+ lpj = loops_per_jiffy >> 10; /* max = 0x0001ffff */
+ loops = lpj * xloops; /* max = 0x00007fff */
+ loops >>= 6; /* max = 2^32-1 */
+
+ if (likely(loops))
+ __delay(loops);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+/*
+ * usecs <= 2000
+ * HZ <= 1000
+ */
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * ((2199023UL*HZ)>>11));
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index bd76394ccaf..b4b94ad97d2 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -189,6 +189,9 @@ ifneq ($(CONFIG_TIDSPBRIDGE),)
obj-y += dsp.o
endif
+omap-ssi-$(CONFIG_OMAP_SSI) := ssi.o
+obj-y += $(omap-ssi-m) $(omap-ssi-y)
+
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
diff --git a/arch/arm/mach-omap2/ssi.c b/arch/arm/mach-omap2/ssi.c
new file mode 100644
index 00000000000..e822a77f5ca
--- /dev/null
+++ b/arch/arm/mach-omap2/ssi.c
@@ -0,0 +1,134 @@
+/*
+ * linux/arch/arm/mach-omap2/ssi.c
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <plat/omap-pm.h>
+#include <plat/ssi.h>
+
+static struct omap_ssi_platform_data ssi_pdata = {
+ .num_ports = SSI_NUM_PORTS,
+ .get_dev_context_loss_count = omap_pm_get_dev_context_loss_count,
+};
+
+static struct resource ssi_resources[] = {
+ /* SSI controller */
+ [0] = {
+ .start = 0x48058000,
+ .end = 0x48058fff,
+ .name = "omap_ssi_sys",
+ .flags = IORESOURCE_MEM,
+ },
+ /* GDD */
+ [1] = {
+ .start = 0x48059000,
+ .end = 0x48059fff,
+ .name = "omap_ssi_gdd",
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = 71,
+ .end = 71,
+ .name = "ssi_gdd",
+ .flags = IORESOURCE_IRQ,
+ },
+ /* SSI port 1 */
+ [3] = {
+ .start = 0x4805a000,
+ .end = 0x4805a7ff,
+ .name = "omap_ssi_sst1",
+ .flags = IORESOURCE_MEM,
+ },
+ [4] = {
+ .start = 0x4805a800,
+ .end = 0x4805afff,
+ .name = "omap_ssi_ssr1",
+ .flags = IORESOURCE_MEM,
+ },
+ [5] = {
+ .start = 67,
+ .end = 67,
+ .name = "ssi_p1_mpu_irq0",
+ .flags = IORESOURCE_IRQ,
+ },
+ [6] = {
+ .start = 68,
+ .end = 68,
+ .name = "ssi_p1_mpu_irq1",
+ .flags = IORESOURCE_IRQ,
+ },
+ [7] = {
+ .start = 0,
+ .end = 0,
+ .name = "ssi_p1_cawake",
+ .flags = IORESOURCE_IRQ | IORESOURCE_UNSET,
+ },
+};
+
+static struct platform_device ssi_pdev = {
+ .name = "omap_ssi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ssi_resources),
+ .resource = ssi_resources,
+ .dev = {
+ .platform_data = &ssi_pdata,
+ },
+};
+
+int __init omap_ssi_config(struct omap_ssi_board_config *ssi_config)
+{
+ unsigned int port, offset, cawake_gpio;
+ int err;
+
+ ssi_pdata.num_ports = ssi_config->num_ports;
+ for (port = 0, offset = 7; port < ssi_config->num_ports;
+ port++, offset += 5) {
+ cawake_gpio = ssi_config->cawake_gpio[port];
+ if (!cawake_gpio)
+ continue; /* Nothing to do */
+ err = gpio_request(cawake_gpio, "cawake");
+ if (err < 0)
+ goto rback;
+ gpio_direction_input(cawake_gpio);
+ ssi_resources[offset].start = gpio_to_irq(cawake_gpio);
+ ssi_resources[offset].flags &= ~IORESOURCE_UNSET;
+ ssi_resources[offset].flags |= IORESOURCE_IRQ_HIGHEDGE |
+ IORESOURCE_IRQ_LOWEDGE;
+ }
+
+ return 0;
+rback:
+ dev_err(&ssi_pdev.dev, "Request cawake (gpio%d) failed\n", cawake_gpio);
+ while (port > 0)
+ gpio_free(ssi_config->cawake_gpio[--port]);
+
+ return err;
+}
+
+static int __init ssi_init(void)
+{
+ return platform_device_register(&ssi_pdev);
+}
+subsys_initcall(ssi_init);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c59e8b892d6..501821704f6 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -4,21 +4,26 @@ config UX500_SOC_COMMON
bool
default y
select ARM_GIC
- select HAS_MTU
+ select NOMADIK_GPIO
select PL310_ERRATA_753970
select ARM_ERRATA_754322
select ARM_ERRATA_764369
+ select SYS_SOC
+ select HAS_MTU
+
+config UX500_SOC_DBX500
+ depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
+ bool
menu "Ux500 SoC"
config UX500_SOC_DB5500
bool "DB5500"
- select MFD_DB5500_PRCMU
+ select UX500_SOC_DBX500
config UX500_SOC_DB8500
bool "DB8500"
- select MFD_DB8500_PRCMU
- select REGULATOR_DB8500_PRCMU
+ select UX500_SOC_DBX500
endmenu
@@ -27,13 +32,13 @@ menu "Ux500 target platform (boards)"
config MACH_U8500
bool "U8500 Development platform"
depends on UX500_SOC_DB8500
- select TPS6105X
help
Include support for the mop500 development platform.
config MACH_HREFV60
bool "U85000 Development platform, HREFv60 version"
depends on UX500_SOC_DB8500
+ select MACH_U8500
help
Include support for the HREFv60 new development platform.
@@ -51,6 +56,50 @@ config MACH_U5500
Include support for the U5500 development platform.
endmenu
+choice
+ prompt "Ux500 UIB Keylayout"
+ default KEYLAYOUT_LAYOUT1
+
+config KEYLAYOUT_LAYOUT1
+ bool "UIB Keylayout 1; for generic users"
+ help
+ Supported keylayout for some numerics, power/call buttons,
+ volume control etc
+
+config KEYLAYOUT_LAYOUT2
+ bool "UIB Keylayout 2; for connectivity users"
+ help
+ Supports keylayout numerics 0-9, left/right/up/down/back/
+ enter keys and special character "."(dot)
+
+endchoice
+
+choice
+ prompt "DBx500 sched_clock"
+
+config DBX500_SCHED_CLOCK_PRCMU
+ bool "PRCMU Timer sched_clock"
+ depends on CLKSRC_DBX500_PRCMU
+ select CLKSRC_DBX500_PRCMU_SCHED_CLOCK
+ help
+ Use the always on PRCMU Timer as sched_clock
+
+config DB5500_MTIMER_SCHED_CLOCK
+ bool "MTIMER sched_clock"
+ depends on CLKSRC_DB5500_MTIMER
+ select CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+ help
+ Use the always on MTIMER as sched_clock
+
+config DBX500_MTU_SCHED_CLOCK
+ bool "MTU sched_clock"
+ depends on HAS_MTU
+ select NOMADIK_MTU_SCHED_CLOCK
+ help
+ Use the Multi Timer Unit as the sched_clock.
+
+endchoice
+
config UX500_DEBUG_UART
int "Ux500 UART to use for low-level debug"
default 2
@@ -58,6 +107,12 @@ config UX500_DEBUG_UART
Choose the UART on which kernel low-level debug messages should be
output.
+config UX500_GPIO_KEYS
+ bool "Use gpio-keys for proximity and hal sensors"
+ depends on KEYBOARD_GPIO
+ help
+ Add proximity and hal sensors as a gpio keyboard.
+
config U5500_MODEM_IRQ
bool "Modem IRQ support"
depends on UX500_SOC_DB5500
@@ -65,11 +120,72 @@ config U5500_MODEM_IRQ
help
Add support for handling IRQ:s from modem side
-config U5500_MBOX
- bool "Mailbox support"
- depends on U5500_MODEM_IRQ
+config DBX500_PRCMU_DEBUG
+ bool "DBX500 PRCMU debug"
+ depends on ((MFD_DB5500_PRCMU || MFD_DB8500_PRCMU) && DEBUG_FS)
+ help
+ Add support for PRCMU debug
+
+config TEE_UX500
+ bool "Trusted Execution Environment (TEE) ux500 hardware support"
+ depends on TEE_SUPPORT
default y
help
- Add support for U5500 mailbox communication with modem side
+ Adds TEE hardware support for ux500 platforms.
+
+config TEE_SVP
+ bool "Trusted Execution Environment (TEE) ux500 SVP support"
+ depends on TEE_SUPPORT && UX500_SVP
+ default y
+ help
+ Adds TEE support for SVP in ux500 platforms.
+
+config UX500_DEBUG_HWREG
+ bool "Debug hardware registers from userspace"
+ depends on (DEBUG_FS && UX500_SOC_DB8500)
+ help
+ Adds various debug files to access registers.
+ This should never ever be used for anything else than debugging.
+
+config UX500_DEBUG_NO_LAUTERBACH
+ bool "Disable clocks needed for Lauterbach debugging"
+ help
+ Disable clocks needed for Lauterbach debugging at boot.
+ If yes, you will reduce the power consumption.
+
+config UX500_L2X0_PREFETCH_CTRL
+ bool "PL310 prefetch control"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && \
+ (TEE_UX500 && CACHE_L2X0)
+ default y
+ help
+ Adds interface to control instruction and data prefetch.
+ Communication with Trustzone is done through TEE driver.
+
+config UX500_DB_DUMP
+ bool "DBx500 register dump on crash"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500)
+ default y
+ help
+ Reads specific DBx500 register in case of kernel crash
+ and saves it.
+
+source "arch/arm/mach-ux500/pm/Kconfig"
+
+source "arch/arm/mach-ux500/Kconfig-arch"
+
+config DB8500_MLOADER
+ bool "Modem firmware upload/download support"
+ depends on UX500_SOC_DB8500
+ select DBX500_MLOADER
+ help
+ Adds Modem firmware upload/download support to DB8500.
+
+config U5500_MLOADER
+ bool "mLoader, mem config from kernel boot args exported to sysfs"
+ depends on UX500_SOC_DB5500
+ help
+ Link between boot args and user space program that loads the modem ELF.
+ This is used to expose the modem parameters using sysfs interface.
endif
diff --git a/arch/arm/mach-ux500/Kconfig-arch b/arch/arm/mach-ux500/Kconfig-arch
new file mode 100644
index 00000000000..c3e0fa583a1
--- /dev/null
+++ b/arch/arm/mach-ux500/Kconfig-arch
@@ -0,0 +1,85 @@
+config U8500_SECURE
+ bool "Support for running in Secure mode"
+ default n
+ help
+ Build the kernel to run in Secure mode.
+
+#Configuration for MCDE setup
+
+if FB_MCDE
+
+menu "Display setup"
+
+choice
+ prompt "TV output type"
+ default U8500_TV_OUTPUT_AV8100
+ help
+ Select the source of TV output to use
+
+config U8500_TV_OUTPUT_AV8100
+ bool "AV8100 (HDMI/CVBS)"
+ depends on MCDE_DISPLAY_AV8100
+
+config U8500_TV_OUTPUT_AB8500
+ bool "AB8500 (CVBS)"
+ depends on MCDE_DISPLAY_AB8500_DENC
+
+endchoice
+
+choice
+ prompt "Color depth"
+ depends on DISPLAY_GENERIC_PRIMARY
+ default MCDE_DISPLAY_PRIMARY_16BPP
+ help
+ Select color depth for primary display
+
+config MCDE_DISPLAY_PRIMARY_16BPP
+ bool "16 bpp"
+ help
+ 16 bpp color depth
+
+config MCDE_DISPLAY_PRIMARY_32BPP
+ bool "32 bpp"
+ help
+ 32 bpp color depth
+
+endchoice
+
+choice DISPLAY_GENERIC_DSI_PRIMARY_ROTATION
+ prompt "Enable main display rotation"
+ default DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_90
+ help
+ Set rotation of main display
+
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_0
+ bool "0 degrees"
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_90
+ bool "90 degrees"
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_180
+ bool "180 degrees"
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_270
+ bool "270 degrees"
+endchoice
+
+config DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE
+ int
+ default "0" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_0
+ default "90" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_90
+ default "180" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_180
+ default "270" if DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_270
+
+config DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ bool "Enable v-sync for primary display"
+ default n
+ help
+ Say yes to enable v-sync for primary display
+
+config DISPLAY_AV8100_TRIPPLE_BUFFER
+ bool "Enable tripple buffer for HDMI display"
+ depends on MCDE_DISPLAY_AV8100
+ help
+ Say yes to enable tripple buffer. You'll get double buffer otherwise
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 6bd2f451c18..c3a5e7621b2 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -1,21 +1,69 @@
#
-# Makefile for the linux kernel, U8500 machine.
+# Makefile for the linux kernel, UX500 machine.
#
-obj-y := clock.o cpu.o devices.o devices-common.o \
- id.o usb.o timer.o
+obj-y := clock.o cpu.o devices.o \
+ devices-common.o id.o pins.o \
+ usb.o reboot_reasons.o timer.o \
+ uart-db8500.o clock-debug.o
+obj-y += pm/
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
-obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o
-obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
+
+
+ifeq ($(CONFIG_UX500_SOC_DB5500), y)
+obj-$(CONFIG_UX500_SOC_DBX500) += cpu-db5500.o dma-db5500.o \
+ devices-db5500.o clock-db5500.o
+board-mcde-objs += board-u5500-mcde.o
+endif
+ifeq ($(CONFIG_UX500_SOC_DB8500), y)
+obj-$(CONFIG_UX500_SOC_DBX500) += cpu-db8500.o devices-db8500.o \
+ clock-db8500.o dma-db8500.o cpu-db9500.o
+board-mcde-objs += board-mop500-mcde.o
+endif
obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \
board-mop500-regulators.o \
board-mop500-uib.o board-mop500-stuib.o \
- board-mop500-u8500uib.o \
- board-mop500-pins.o
-obj-$(CONFIG_MACH_U5500) += board-u5500.o board-u5500-sdi.o
+ board-mop500-u8500uib.o board-mop500-pins.o \
+ board-mop500-bm.o \
+ board-pins-sleep-force.o
+obj-$(CONFIG_MACH_U5500) += board-u5500.o board-u5500-sdi.o \
+ board-u5500-regulators.o \
+ board-u5500-pins.o
+obj-$(CONFIG_U5500_MMIO) += board-u5500-mmio.o
+obj-$(CONFIG_U8500_MMIO) += board-mop500-mmio.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o
-obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o
+obj-$(CONFIG_TEE_UX500) += tee_ux500.o product.o
+obj-$(CONFIG_TEE_SVP) += tee_service_svp.o
+obj-$(CONFIG_TEE_SVP) += tee_ta_start_modem_svp.o
+obj-$(CONFIG_DB8500_MLOADER) += mloader-db8500.o
+obj-$(CONFIG_U5500_MLOADER) += mloader-db5500.o
+obj-$(CONFIG_UX500_DEBUG_HWREG) += hwreg.o
+obj-$(CONFIG_HWMEM) += hwmem-int.o
+obj-$(CONFIG_UX500_L2X0_PREFETCH_CTRL) += l2x0-prefetch.o
+obj-$(CONFIG_AB5500_BM) += board-u5500-bm.o
+obj-$(CONFIG_DBX500_PRCMU_DEBUG) += prcmu-debug.o
+obj-$(CONFIG_UX500_DB_DUMP) += dbx500_dump.o
+
+obj-$(CONFIG_HWMEM) += dcache.o
+ifdef CONFIG_STM_TRACE
+obj-$(CONFIG_MACH_U8500) += board-mop500-stm.o
+endif
+ifdef CONFIG_SENSORS_LSM303DLH
+obj-$(CONFIG_MACH_U8500) += board-mop500-sensors.o
+endif
+obj-$(CONFIG_FB_MCDE) += board-mcde.o
+ifdef CONFIG_STM_MSP_SPI
+obj-$(CONFIG_MACH_U8500) += board-mop500-msp.o
+endif
+ifdef CONFIG_CW1200
+obj-$(CONFIG_MACH_U8500) += board-mop500-wlan.o
+obj-$(CONFIG_MACH_U5500) += board-u5500-wlan.o
+endif
+ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+obj-$(CONFIG_MACH_U8500) += board-mop500-cyttsp.o
+obj-$(CONFIG_MACH_U5500) += board-u5500-cyttsp.o
+endif
diff --git a/arch/arm/mach-ux500/board-mop500-bm.c b/arch/arm/mach-ux500/board-mop500-bm.c
new file mode 100644
index 00000000000..66a8c55a0c6
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-bm.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U8500 board specific charger and battery initialization parameters.
+ *
+ * Author: Johan Palsson <johan.palsson@stericsson.com> for ST-Ericsson.
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ *
+ */
+
+#include <linux/power_supply.h>
+#include <linux/mfd/ab8500/bm.h>
+#include "board-mop500-bm.h"
+
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+/*
+ * These are the defined batteries that uses a NTC and ID resistor placed
+ * inside of the battery pack.
+ * Note that the res_to_temp table must be strictly sorted by falling resistance
+ * values to work.
+ */
+static struct res_to_temp temp_tbl_A[] = {
+ {-5, 53407},
+ { 0, 48594},
+ { 5, 43804},
+ {10, 39188},
+ {15, 34870},
+ {20, 30933},
+ {25, 27422},
+ {30, 24347},
+ {35, 21694},
+ {40, 19431},
+ {45, 17517},
+ {50, 15908},
+ {55, 14561},
+ {60, 13437},
+ {65, 12500},
+};
+static struct res_to_temp temp_tbl_B[] = {
+ {-5, 165418},
+ { 0, 159024},
+ { 5, 151921},
+ {10, 144300},
+ {15, 136424},
+ {20, 128565},
+ {25, 120978},
+ {30, 113875},
+ {35, 107397},
+ {40, 101629},
+ {45, 96592},
+ {50, 92253},
+ {55, 88569},
+ {60, 85461},
+ {65, 82869},
+};
+static struct v_to_cap cap_tbl_A[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+static struct v_to_cap cap_tbl_B[] = {
+ {4161, 100},
+ {4124, 98},
+ {4044, 90},
+ {4003, 85},
+ {3966, 80},
+ {3933, 75},
+ {3888, 67},
+ {3849, 60},
+ {3813, 55},
+ {3787, 47},
+ {3772, 30},
+ {3751, 25},
+ {3718, 20},
+ {3681, 16},
+ {3660, 14},
+ {3589, 10},
+ {3546, 7},
+ {3495, 4},
+ {3404, 2},
+ {3250, 0},
+};
+#endif
+static struct v_to_cap cap_tbl[] = {
+ {4186, 100},
+ {4163, 99},
+ {4114, 95},
+ {4068, 90},
+ {3990, 80},
+ {3926, 70},
+ {3898, 65},
+ {3866, 60},
+ {3833, 55},
+ {3812, 50},
+ {3787, 40},
+ {3768, 30},
+ {3747, 25},
+ {3730, 20},
+ {3705, 15},
+ {3699, 14},
+ {3684, 12},
+ {3672, 9},
+ {3657, 7},
+ {3638, 6},
+ {3556, 4},
+ {3424, 2},
+ {3317, 1},
+ {3094, 0},
+};
+
+/*
+ * Note that the res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct res_to_temp temp_tbl[] = {
+ {-5, 214834},
+ { 0, 162943},
+ { 5, 124820},
+ {10, 96520},
+ {15, 75306},
+ {20, 59254},
+ {25, 47000},
+ {30, 37566},
+ {35, 30245},
+ {40, 24520},
+ {45, 20010},
+ {50, 16432},
+ {55, 13576},
+ {60, 11280},
+ {65, 9425},
+};
+
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+/*
+ * Note that the batres_vs_temp table must be strictly sorted by falling
+ * temperature values to work.
+ */
+static struct batres_vs_temp temp_to_batres_tbl[] = {
+ { 40, 120},
+ { 30, 135},
+ { 20, 165},
+ { 10, 230},
+ { 00, 325},
+ {-10, 445},
+ {-20, 595},
+};
+#else
+/*
+ * Note that the batres_vs_temp table must be strictly sorted by falling
+ * temperature values to work.
+ */
+#ifdef CONFIG_AB8500_9100_LI_ION_BATTERY
+#define BATRES 180
+#else
+#define BATRES 300
+#endif
+static struct batres_vs_temp temp_to_batres_tbl[] = {
+ { 60, BATRES},
+ { 30, BATRES},
+ { 20, BATRES},
+ { 10, BATRES},
+ { 00, BATRES},
+ {-10, BATRES},
+ {-20, BATRES},
+};
+#endif
+static const struct battery_type bat_type[] = {
+ [BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+#ifdef CONFIG_AB8500_9100_LI_ION_BATTERY
+ .charge_full_design = 2600,
+#else
+ .charge_full_design = 612,
+#endif
+ .nominal_voltage = 3700,
+#ifdef CONFIG_AB8500_9100_LI_ION_BATTERY
+ .termination_vol = 4150,
+#else
+ .termination_vol = 4050,
+#endif
+ .termination_curr = 200,
+#ifdef CONFIG_AB8500_9100_LI_ION_BATTERY
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 520,
+ .normal_vol_lvl = 4200,
+#else
+ .recharge_vol = 3990,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+#endif
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl),
+ .batres_tbl = temp_to_batres_tbl,
+ },
+
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 53407,
+ .resis_low = 12500,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A),
+ .r_to_t_tbl = temp_tbl_A,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A),
+ .v_to_cap_tbl = cap_tbl_A,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl),
+ .batres_tbl = temp_to_batres_tbl,
+
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 165418,
+ .resis_low = 82869,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B),
+ .r_to_t_tbl = temp_tbl_B,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B),
+ .v_to_cap_tbl = cap_tbl_B,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl),
+ .batres_tbl = temp_to_batres_tbl,
+ },
+#else
+/*
+ * These are the batteries that doesn't have an internal NTC resistor to measure
+ * its temperature. The temperature in this case is measure with a NTC placed
+ * near the battery but on the PCB.
+ */
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 76000,
+ .resis_low = 53000,
+ .charge_full_design = 900,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl),
+ .batres_tbl = temp_to_batres_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 30000,
+ .resis_low = 10000,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl),
+ .batres_tbl = temp_to_batres_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 95000,
+ .resis_low = 76001,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl),
+ .batres_tbl = temp_to_batres_tbl,
+ },
+#endif
+};
+
+static char *ab8500_charger_supplied_to[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+ "ab8500_btemp",
+};
+
+static char *ab8500_btemp_supplied_to[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+};
+
+static char *ab8500_fg_supplied_to[] = {
+ "ab8500_chargalg",
+ "ab8500_usb",
+};
+
+static char *ab8500_chargalg_supplied_to[] = {
+ "ab8500_fg",
+};
+
+struct ab8500_charger_platform_data ab8500_charger_plat_data = {
+ .supplied_to = ab8500_charger_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_charger_supplied_to),
+ .autopower_cfg = false,
+};
+
+struct ab8500_btemp_platform_data ab8500_btemp_plat_data = {
+ .supplied_to = ab8500_btemp_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_btemp_supplied_to),
+};
+
+struct ab8500_fg_platform_data ab8500_fg_plat_data = {
+ .supplied_to = ab8500_fg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_fg_supplied_to),
+};
+
+struct ab8500_chargalg_platform_data ab8500_chargalg_plat_data = {
+ .supplied_to = ab8500_chargalg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab8500_chargalg_supplied_to),
+};
+
+static const struct ab8500_bm_capacity_levels cap_levels = {
+ .critical = 2,
+ .low = 10,
+ .normal = 70,
+ .high = 95,
+ .full = 100,
+};
+
+static const struct ab8500_fg_parameters fg = {
+ .recovery_sleep_timer = 10,
+ .recovery_total_time = 100,
+ .init_timer = 1,
+ .init_discard_time = 5,
+ .init_total_time = 40,
+ .high_curr_time = 60,
+ .accu_charging = 30,
+ .accu_high_curr = 30,
+ .high_curr_threshold = 50,
+ .lowbat_threshold = 3100,
+ .battok_falling_th_sel0 = 2860,
+ .battok_raising_th_sel1 = 2860,
+ .user_cap_limit = 15,
+ .maint_thres = 97,
+};
+
+static const struct ab8500_maxim_parameters maxi_params = {
+ .ena_maxi = true,
+ .chg_curr = 910,
+ .wait_cycles = 10,
+ .charger_curr_step = 100,
+};
+
+static const struct ab8500_bm_charger_parameters chg = {
+ .usb_volt_max = 5500,
+ .usb_curr_max = 1500,
+ .ac_volt_max = 7500,
+ .ac_curr_max = 1500,
+};
+
+struct ab8500_bm_data ab8500_bm_data = {
+ .temp_under = 3,
+ .temp_low = 8,
+ .temp_high = 43,
+ .temp_over = 48,
+ .main_safety_tmr_h = 4,
+ .temp_interval_chg = 20,
+ .temp_interval_nochg = 120,
+ .usb_safety_tmr_h = 4,
+ .bkup_bat_v = BUP_VCH_SEL_2P6V,
+ .bkup_bat_i = BUP_ICH_SEL_150UA,
+#ifdef CONFIG_AB8500_9100_LI_ION_BATTERY
+ .no_maintenance = true,
+#else
+ .no_maintenance = false,
+#endif
+#ifdef CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL
+ .adc_therm = ADC_THERM_BATCTRL,
+#else
+ .adc_therm = ADC_THERM_BATTEMP,
+#endif
+#ifdef CONFIG_AB8500_9100_LI_ION_BATTERY
+ .chg_unknown_bat = true,
+#else
+ .chg_unknown_bat = false,
+#endif
+ .enable_overshoot = false,
+ .fg_res = 100,
+ .cap_levels = &cap_levels,
+ .bat_type = bat_type,
+ .n_btypes = ARRAY_SIZE(bat_type),
+ .batt_id = 0,
+ .interval_charging = 5,
+ .interval_not_charging = 120,
+ .temp_hysteresis = 3,
+ .gnd_lift_resistance = 34,
+ .maxi = &maxi_params,
+ .chg_params = &chg,
+ .fg_params = &fg,
+};
diff --git a/arch/arm/mach-ux500/board-mop500-bm.h b/arch/arm/mach-ux500/board-mop500-bm.h
new file mode 100644
index 00000000000..eb2450f1ab5
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-bm.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U8500 board specific charger and battery initialization parameters.
+ *
+ * Author: Johan Palsson <johan.palsson@stericsson.com> for ST-Ericsson.
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ *
+ */
+
+#ifndef __BOARD_MOP500_BM_H
+#define __BOARD_MOP500_BM_H
+
+#include <linux/mfd/ab8500/bm.h>
+
+extern struct ab8500_charger_platform_data ab8500_charger_plat_data;
+extern struct ab8500_btemp_platform_data ab8500_btemp_plat_data;
+extern struct ab8500_fg_platform_data ab8500_fg_plat_data;
+extern struct ab8500_chargalg_platform_data ab8500_chargalg_plat_data;
+extern struct ab8500_bm_data ab8500_bm_data;
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500-cyttsp.c b/arch/arm/mach-ux500/board-mop500-cyttsp.c
new file mode 100755
index 00000000000..2bfa9fb17fa
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-cyttsp.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Author: Avinash A <avinash.a@stericsson.com> for ST-Ericsson
+ * License terms:GNU General Public License (GPL) version 2
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/cyttsp.h>
+#include <linux/delay.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/i2c.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/mfd/tc3589x.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/amba/pl022.h>
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/irqs-db8500.h>
+#include <asm/mach-types.h>
+#include "pins-db8500.h"
+#include "board-mop500.h"
+#include "devices-db8500.h"
+
+#define NUM_SSP_CLIENTS 10
+
+/* cyttsp_gpio_board_init : configures the touch panel. */
+static int cyttsp_plat_init(int on)
+{
+ int ret;
+
+ ret = gpio_direction_output(CYPRESS_SLAVE_SELECT_GPIO, 1);
+ if (ret < 0) {
+ pr_err("slave select gpio direction failed\n");
+ gpio_free(CYPRESS_SLAVE_SELECT_GPIO);
+ return ret;
+ }
+ return 0;
+}
+
+static struct pl022_ssp_controller mop500_spi2_data = {
+ .bus_id = SPI023_2_CONTROLLER,
+ .num_chipselect = NUM_SSP_CLIENTS,
+};
+
+static int cyttsp_wakeup(void)
+{
+ int ret;
+
+ ret = gpio_request(CYPRESS_TOUCH_INT_PIN, "Wakeup_pin");
+ if (ret < 0) {
+ pr_err("touch gpio failed\n");
+ return ret;
+ }
+ ret = gpio_direction_output(CYPRESS_TOUCH_INT_PIN, 1);
+ if (ret < 0) {
+ pr_err("touch gpio direction failed\n");
+ goto out;
+ }
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ /*
+ * To wake up the controller from sleep
+ * state the interrupt pin needs to be
+ * pulsed twice with a delay greater
+ * than 2 micro seconds.
+ */
+ udelay(3);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ ret = gpio_direction_input(CYPRESS_TOUCH_INT_PIN);
+ if (ret < 0) {
+ pr_err("touch gpio direction IN config failed\n");
+ goto out;
+ }
+out:
+ gpio_free(CYPRESS_TOUCH_INT_PIN);
+ return 0;
+}
+struct cyttsp_platform_data cyttsp_platdata = {
+ .maxx = 480,
+ .maxy = 854,
+ .flags = 0,
+ .gen = CY_GEN3,
+ .use_st = 0,
+ .use_mt = 1,
+ .use_trk_id = 0,
+ .use_hndshk = 0,
+ .use_sleep = 1,
+ .use_gestures = 0,
+ .use_load_file = 0,
+ .use_force_fw_update = 0,
+ .use_virtual_keys = 0,
+ /* activate up to 4 groups and set active distance */
+ .gest_set = CY_GEST_GRP_NONE | CY_ACT_DIST,
+ /* change scn_type to enable finger and/or stylus detection */
+ .scn_typ = 0xA5, /* autodetect finger+stylus; balanced mutual scan */
+ .act_intrvl = CY_ACT_INTRVL_DFLT, /* Active refresh interval; ms */
+ .tch_tmout = CY_TCH_TMOUT_DFLT, /* Active touch timeout; ms */
+ .lp_intrvl = CY_LP_INTRVL_DFLT, /* Low power refresh interval; ms */
+ .init = cyttsp_plat_init,
+ .mt_sync = input_mt_sync,
+ .wakeup = cyttsp_wakeup,
+ .name = CY_SPI_NAME,
+ .irq_gpio = CYPRESS_TOUCH_INT_PIN,
+ .rst_gpio = CYPRESS_TOUCH_RST_GPIO,
+};
+
+static void cyttsp_spi_cs_control(u32 command)
+{
+ if (command == SSP_CHIP_SELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 0);
+ else if (command == SSP_CHIP_DESELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 1);
+}
+
+static struct pl022_config_chip cyttsp_ssp_config_chip = {
+ .com_mode = INTERRUPT_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ /* we can act as master only */
+ .hierarchy = SSP_MASTER,
+ .slave_tx_disable = 0,
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_16_OR_MORE_EMPTY_LOC,
+ .ctrl_len = SSP_BITS_16,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ .cs_control = cyttsp_spi_cs_control,
+};
+
+static struct spi_board_info cypress_spi_devices[] = {
+ {
+ .modalias = CY_SPI_NAME,
+ .controller_data = &cyttsp_ssp_config_chip,
+ .platform_data = &cyttsp_platdata,
+ .max_speed_hz = 1000000,
+ .bus_num = SPI023_2_CONTROLLER,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ }
+};
+
+/*
+ * TC35893
+ */
+static const unsigned int sony_keymap[] = {
+ KEY(3, 1, KEY_END),
+ KEY(4, 1, KEY_HOME),
+ KEY(6, 4, KEY_VOLUMEDOWN),
+ KEY(4, 2, KEY_EMAIL),
+ KEY(3, 3, KEY_RIGHT),
+ KEY(2, 5, KEY_BACKSPACE),
+
+ KEY(6, 7, KEY_MENU),
+ KEY(5, 0, KEY_ENTER),
+ KEY(4, 3, KEY_0),
+ KEY(3, 4, KEY_DOT),
+ KEY(5, 2, KEY_UP),
+ KEY(3, 5, KEY_DOWN),
+
+ KEY(4, 5, KEY_SEND),
+ KEY(0, 5, KEY_BACK),
+ KEY(6, 2, KEY_VOLUMEUP),
+ KEY(1, 3, KEY_SPACE),
+ KEY(7, 6, KEY_LEFT),
+ KEY(5, 5, KEY_SEARCH),
+};
+
+static struct matrix_keymap_data sony_keymap_data = {
+ .keymap = sony_keymap,
+ .keymap_size = ARRAY_SIZE(sony_keymap),
+};
+
+static struct tc3589x_keypad_platform_data tc35893_data = {
+ .krow = TC_KPD_ROWS,
+ .kcol = TC_KPD_COLUMNS,
+ .debounce_period = TC_KPD_DEBOUNCE_PERIOD,
+ .settle_time = TC_KPD_SETTLE_TIME,
+ .irqtype = IRQF_TRIGGER_FALLING,
+ .enable_wakeup = true,
+ .keymap_data = &sony_keymap_data,
+ .no_autorepeat = true,
+};
+
+static struct tc3589x_platform_data tc3589x_keypad_data = {
+ .block = TC3589x_BLOCK_KEYPAD,
+ .keypad = &tc35893_data,
+ .irq_base = MOP500_EGPIO_IRQ_BASE,
+};
+
+static struct i2c_board_info __initdata mop500_i2c0_devices_u8500[] = {
+ {
+ I2C_BOARD_INFO("tc3589x", 0x44),
+ .platform_data = &tc3589x_keypad_data,
+ .irq = NOMADIK_GPIO_TO_IRQ(64),
+ .flags = I2C_CLIENT_WAKE,
+ },
+};
+
+void mop500_cyttsp_init(void)
+{
+ int ret = 0;
+
+ /*
+ * Enable the alternative C function
+ * in the PRCMU register
+ */
+ prcmu_enable_spi2();
+ ret = gpio_request(CYPRESS_SLAVE_SELECT_GPIO, "slave_select_gpio");
+ if (ret < 0)
+ pr_err("slave select gpio failed\n");
+ spi_register_board_info(cypress_spi_devices,
+ ARRAY_SIZE(cypress_spi_devices));
+}
+
+void __init mop500_u8500uib_r3_init(void)
+{
+ mop500_cyttsp_init();
+ db8500_add_spi2(&mop500_spi2_data);
+ nmk_config_pin((GPIO64_GPIO | PIN_INPUT_PULLUP), false);
+ mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
+ ARRAY_SIZE(mop500_i2c0_devices_u8500));
+ mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
+ ARRAY_SIZE(mop500_i2c0_devices_u8500));
+}
diff --git a/arch/arm/mach-ux500/board-mop500-mcde.c b/arch/arm/mach-ux500/board-mop500-mcde.c
new file mode 100644
index 00000000000..23da62638f8
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-mcde.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/mfd/ab8500/denc.h>
+#include <linux/workqueue.h>
+#include <linux/dispdev.h>
+#include <linux/compdev.h>
+#include <asm/mach-types.h>
+#include <linux/clk.h>
+#include <mach/devices.h>
+#include <video/av8100.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-vuib500-dpi.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+#include <video/mcde_display-av8100.h>
+#include <video/mcde_display-ab8500.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+#include <plat/pincfg.h>
+#include "pins-db8500.h"
+#include "pins.h"
+#include "board-mop500.h"
+
+#define DSI_UNIT_INTERVAL_0 0x9
+#define DSI_UNIT_INTERVAL_1 0x9
+#define DSI_UNIT_INTERVAL_2 0x5
+
+#define DSI_PLL_FREQ_HZ 840320000
+/* Based on PLL DDR Freq at 798,72 MHz */
+#define HDMI_FREQ_HZ 33280000
+#define TV_FREQ_HZ 38400000
+
+#ifdef CONFIG_U8500_TV_OUTPUT_AV8100
+/* The initialization of hdmi disp driver must be delayed in order to
+ * ensure that inputclk will be available (needed by hdmi hw) */
+static struct delayed_work work_dispreg_hdmi;
+#define DISPREG_HDMI_DELAY 6000
+#endif
+
+enum {
+ PRIMARY_DISPLAY_ID,
+ SECONDARY_DISPLAY_ID,
+ FICTIVE_DISPLAY_ID,
+ AV8100_DISPLAY_ID,
+ AB8500_DISPLAY_ID,
+ MCDE_NR_OF_DISPLAYS
+};
+
+static int display_initialized_during_boot;
+
+static int __init startup_graphics_setup(char *str)
+{
+
+ if (get_option(&str, &display_initialized_during_boot) != 1)
+ display_initialized_during_boot = 0;
+
+ switch (display_initialized_during_boot) {
+ case 1:
+ pr_info("Startup graphics support\n");
+ break;
+ case 0:
+ default:
+ pr_info("No startup graphics supported\n");
+ break;
+ };
+
+ return 1;
+}
+__setup("startup_graphics=", startup_graphics_setup);
+
+#if defined(CONFIG_U8500_TV_OUTPUT_AV8100) || \
+ defined(CONFIG_U8500_TV_OUTPUT_AB8500)
+static struct mcde_col_transform rgb_2_yCbCr_transform = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x10, 0x80, 0x80},
+};
+#endif
+
+static struct mcde_display_dsi_platform_data samsung_s6d16d0_pdata0 = {
+ .link = 0,
+};
+
+static struct mcde_display_device samsung_s6d16d0_display0 = {
+ .name = "samsung_s6d16d0",
+ .id = PRIMARY_DISPLAY_ID,
+ .chnl_id = MCDE_CHNL_A,
+ .fifo = MCDE_FIFO_A,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ /* TODO: Remove rotation buffers once ESRAM driver is completed */
+ .rotbuf1 = U8500_ESRAM_BASE + 0x20000 * 4 + 0x2000,
+ .rotbuf2 = U8500_ESRAM_BASE + 0x20000 * 4 + 0x10000,
+ .dev = {
+ .platform_data = &samsung_s6d16d0_pdata0,
+ },
+};
+
+static struct mcde_port sony_port0 = {
+ .link = 0,
+};
+
+static struct mcde_display_sony_acx424akp_platform_data
+ sony_acx424akp_display0_pdata = {
+ .reset_gpio = HREFV60_DISP1_RST_GPIO,
+};
+
+static struct mcde_display_device sony_acx424akp_display0 = {
+ .name = "mcde_disp_sony_acx424akp",
+ .id = PRIMARY_DISPLAY_ID,
+ .port = &sony_port0,
+ .chnl_id = MCDE_CHNL_A,
+ .fifo = MCDE_FIFO_A,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ .rotbuf1 = U8500_ESRAM_BASE + 0x20000 * 4 + 0x2000,
+ .rotbuf2 = U8500_ESRAM_BASE + 0x20000 * 4 + 0x10000,
+ .dev = {
+ .platform_data = &sony_acx424akp_display0_pdata,
+ },
+};
+
+static struct mcde_display_dsi_platform_data samsung_s6d16d0_pdata1 = {
+ .link = 1,
+};
+
+static struct mcde_display_device samsung_s6d16d0_display1 = {
+ .name = "samsung_s6d16d0",
+ .id = SECONDARY_DISPLAY_ID,
+ .chnl_id = MCDE_CHNL_C1,
+ .fifo = MCDE_FIFO_C1,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+ .synchronized_update = false,
+ .dev = {
+ .platform_data = &samsung_s6d16d0_pdata1,
+ },
+};
+
+#ifdef CONFIG_U8500_TV_OUTPUT_AB8500
+static struct mcde_port port_tvout1 = {
+ .type = MCDE_PORTTYPE_DPI,
+ .pixel_format = MCDE_PORTPIXFMT_DPI_24BPP,
+ .link = 1, /* channel B */
+ .sync_src = MCDE_SYNCSRC_OFF,
+ .update_auto_trig = true,
+ .phy = {
+ .dpi = {
+ .bus_width = 4, /* DDR mode */
+ .tv_mode = true,
+ .clock_div = MCDE_PORT_DPI_NO_CLOCK_DIV,
+ },
+ },
+};
+
+static struct ab8500_display_platform_data ab8500_display_pdata = {
+ .nr_regulators = 2,
+ .regulator_id = {"vtvout", "vcc-N2158"},
+ .rgb_2_yCbCr_transform = &rgb_2_yCbCr_transform,
+};
+
+static struct ux500_pins *tvout_pins;
+
+static int ab8500_platform_enable(struct mcde_display_device *ddev)
+{
+ int res = 0;
+
+ if (!tvout_pins) {
+ tvout_pins = ux500_pins_get("mcde-tvout");
+ if (!tvout_pins)
+ return -EINVAL;
+ }
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ res = ux500_pins_enable(tvout_pins);
+ if (res != 0)
+ goto failed;
+
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+static int ab8500_platform_disable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ res = ux500_pins_disable(tvout_pins);
+ if (res != 0)
+ goto failed;
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+static struct mcde_display_device tvout_ab8500_display = {
+ .name = "mcde_tv_ab8500",
+ .id = AB8500_DISPLAY_ID,
+ .port = &port_tvout1,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+ .native_x_res = 720,
+ .native_y_res = 576,
+ .dev = {
+ .platform_data = &ab8500_display_pdata,
+ },
+
+ /*
+ * We might need to describe the std here:
+ * - there are different PAL / NTSC formats (do they require MCDE
+ * settings?)
+ */
+ .platform_enable = ab8500_platform_enable,
+ .platform_disable = ab8500_platform_disable,
+};
+#endif
+
+#ifdef CONFIG_U8500_TV_OUTPUT_AV8100
+
+#if defined(CONFIG_AV8100_HWTRIG_INT)
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_TE0
+#elif defined(CONFIG_AV8100_HWTRIG_I2SDAT3)
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_TE1
+#elif defined(CONFIG_AV8100_HWTRIG_DSI_TE)
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_TE_POLLING
+#else
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_OFF
+#endif
+static struct mcde_port av8100_port2 = {
+ .type = MCDE_PORTTYPE_DSI,
+ .mode = MCDE_PORTMODE_CMD,
+ .pixel_format = MCDE_PORTPIXFMT_DSI_24BPP,
+ .ifc = 1,
+ .link = 2,
+ .sync_src = AV8100_SYNC_SRC,
+ .update_auto_trig = true,
+ .phy = {
+ .dsi = {
+ .num_data_lanes = 2,
+ .ui = DSI_UNIT_INTERVAL_2,
+ },
+ },
+ .hdmi_sdtv_switch = HDMI_SWITCH,
+};
+
+static struct mcde_display_hdmi_platform_data av8100_hdmi_pdata = {
+ .cvbs_regulator_id = "vcc-N2158",
+ .rgb_2_yCbCr_transform = &rgb_2_yCbCr_transform,
+};
+
+static struct ux500_pins *av8100_pins;
+static int av8100_platform_enable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ if (!av8100_pins) {
+ av8100_pins = ux500_pins_get("av8100-hdmi");
+ if (!av8100_pins) {
+ res = -EINVAL;
+ goto failed;
+ }
+ }
+
+ res = ux500_pins_enable(av8100_pins);
+ if (res != 0)
+ goto failed;
+
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+static int av8100_platform_disable(struct mcde_display_device *ddev)
+{
+ int res;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ res = ux500_pins_disable(av8100_pins);
+ if (res != 0)
+ goto failed;
+ return res;
+
+failed:
+ dev_warn(&ddev->dev, "Failure during %s\n", __func__);
+ return res;
+}
+
+static struct mcde_display_device av8100_hdmi = {
+ .name = "av8100_hdmi",
+ .id = AV8100_DISPLAY_ID,
+ .port = &av8100_port2,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB565,
+ .native_x_res = 1280,
+ .native_y_res = 720,
+ .dev = {
+ .platform_data = &av8100_hdmi_pdata,
+ },
+ .platform_enable = av8100_platform_enable,
+ .platform_disable = av8100_platform_disable,
+};
+
+static void delayed_work_dispreg_hdmi(struct work_struct *ptr)
+{
+ if (mcde_display_device_register(&av8100_hdmi))
+ pr_warning("Failed to register av8100_hdmi\n");
+}
+#endif /* CONFIG_U8500_TV_OUTPUT_AV8100 */
+
+/*
+* This function will create the framebuffer for the display that is registered.
+*/
+static int display_postregistered_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = dev;
+ u16 width, height;
+ u16 virtual_height;
+ u32 rotate = FB_ROTATE_UR;
+ struct fb_info *fbi;
+#if defined(CONFIG_DISPDEV) || defined(CONFIG_COMPDEV)
+ struct mcde_fb *mfb;
+#endif
+
+ if (event != MCDE_DSS_EVENT_DISPLAY_REGISTERED)
+ return 0;
+
+ if (ddev->id < 0 || ddev->id >= MCDE_NR_OF_DISPLAYS)
+ return 0;
+
+ mcde_dss_get_native_resolution(ddev, &width, &height);
+
+ if ((uib_is_u8500uib() || uib_is_stuib()) &&
+ ddev->id == PRIMARY_DISPLAY_ID) {
+ rotate = FB_ROTATE_CW;
+ swap(width, height);
+ }
+
+ virtual_height = height * 2;
+
+#ifndef CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ if (ddev->id == AV8100_DISPLAY_ID)
+ goto out;
+#endif
+
+ /* Create frame buffer */
+ fbi = mcde_fb_create(ddev, width, height, width, virtual_height,
+ ddev->default_pixel_format, rotate);
+ if (IS_ERR(fbi)) {
+ dev_warn(&ddev->dev,
+ "Failed to create fb for display %s\n", ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Framebuffer created (%s)\n", ddev->name);
+ }
+
+#ifdef CONFIG_DISPDEV
+ mfb = to_mcde_fb(fbi);
+
+ /* Create a dispdev overlay for this display */
+ if (dispdev_create(ddev, true, mfb->ovlys[0]) < 0) {
+ dev_warn(&ddev->dev,
+ "Failed to create disp for display %s\n", ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Disp dev created for (%s)\n", ddev->name);
+ }
+#endif
+
+#ifdef CONFIG_COMPDEV
+ mfb = to_mcde_fb(fbi);
+ /* Create a compdev overlay for this display */
+ if (compdev_create(ddev, mfb->ovlys[0]) < 0) {
+ dev_warn(&ddev->dev,
+ "Failed to create compdev for display %s\n",
+ ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "compdev created for (%s)\n",
+ ddev->name);
+ }
+#endif
+
+out:
+ return 0;
+
+display_postregistered_callback_err:
+ return -1;
+}
+
+static struct notifier_block display_nb = {
+ .notifier_call = display_postregistered_callback,
+};
+
+static int __init init_display_devices(void)
+{
+ if (!cpu_is_u8500())
+ return 0;
+
+ (void)mcde_dss_register_notifier(&display_nb);
+
+ /* Set powermode to STANDBY if startup graphics is executed */
+ if (display_initialized_during_boot) {
+ samsung_s6d16d0_display0.power_mode = MCDE_DISPLAY_PM_STANDBY;
+ sony_acx424akp_display0.power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* Display reset GPIO is different depending on reference boards */
+ if (machine_is_hrefv60()) {
+ samsung_s6d16d0_pdata0.reset_gpio = HREFV60_DISP1_RST_GPIO;
+ samsung_s6d16d0_pdata1.reset_gpio = HREFV60_DISP2_RST_GPIO;
+ } else {
+ samsung_s6d16d0_pdata0.reset_gpio = MOP500_DISP1_RST_GPIO;
+ samsung_s6d16d0_pdata1.reset_gpio = MOP500_DISP2_RST_GPIO;
+ }
+
+ /* Not all STUIBs supports VSYNC, disable vsync for STUIB */
+ if (uib_is_stuib())
+ samsung_s6d16d0_display0.synchronized_update = false;
+
+ /* Initialize all needed clocks*/
+ if (!display_initialized_during_boot) {
+ struct clk *clk_dsi_pll;
+ struct clk *clk_hdmi;
+ struct clk *clk_tv;
+
+ /*
+ * The TV CLK is used as parent for the
+ * DSI LP clock.
+ */
+ clk_tv = clk_get(&u8500_mcde_device.dev, "tv");
+ if (TV_FREQ_HZ != clk_round_rate(clk_tv, TV_FREQ_HZ))
+ pr_warning("%s: TV_CLK freq differs %ld\n", __func__,
+ clk_round_rate(clk_tv, TV_FREQ_HZ));
+ clk_set_rate(clk_tv, TV_FREQ_HZ);
+ clk_put(clk_tv);
+
+ /*
+ * The HDMI CLK is used as parent for the
+ * DSI HS clock.
+ */
+ clk_hdmi = clk_get(&u8500_mcde_device.dev, "hdmi");
+ if (HDMI_FREQ_HZ != clk_round_rate(clk_hdmi, HDMI_FREQ_HZ))
+ pr_warning("%s: HDMI freq differs %ld\n", __func__,
+ clk_round_rate(clk_hdmi, HDMI_FREQ_HZ));
+ clk_set_rate(clk_hdmi, HDMI_FREQ_HZ);
+ clk_put(clk_hdmi);
+
+ /*
+ * The DSI PLL CLK is used as DSI PLL for direct freq for
+ * link 2. Link 0/1 is then divided with 1/2/4 from this freq.
+ */
+ clk_dsi_pll = clk_get(&u8500_mcde_device.dev, "dsihs2");
+ if (DSI_PLL_FREQ_HZ != clk_round_rate(clk_dsi_pll,
+ DSI_PLL_FREQ_HZ))
+ pr_warning("%s: DSI_PLL freq differs %ld\n", __func__,
+ clk_round_rate(clk_dsi_pll, DSI_PLL_FREQ_HZ));
+ clk_set_rate(clk_dsi_pll, DSI_PLL_FREQ_HZ);
+ clk_put(clk_dsi_pll);
+ }
+
+ if (uib_is_u8500uib() || uib_is_stuib())
+ /* Samsung display on U8500 and ST UIB */
+ (void)mcde_display_device_register(&samsung_s6d16d0_display0);
+ else if (uib_is_u8500uibr3())
+ /* Sony display on U8500UIBV3 */
+ (void)mcde_display_device_register(&sony_acx424akp_display0);
+ else
+ pr_warning("Unknown UI board\n");
+
+ /* Display reset GPIO is different depending on reference boards */
+ if (uib_is_stuib())
+ (void)mcde_display_device_register(&samsung_s6d16d0_display1);
+
+#if defined(CONFIG_U8500_TV_OUTPUT_AV8100)
+ INIT_DELAYED_WORK_DEFERRABLE(&work_dispreg_hdmi,
+ delayed_work_dispreg_hdmi);
+ schedule_delayed_work(&work_dispreg_hdmi,
+ msecs_to_jiffies(DISPREG_HDMI_DELAY));
+#elif defined(CONFIG_U8500_TV_OUTPUT_AB8500)
+ (void)mcde_display_device_register(&tvout_ab8500_display);
+#endif
+
+ return 0;
+}
+module_init(init_display_devices);
diff --git a/arch/arm/mach-ux500/board-mop500-mmio.c b/arch/arm/mach-ux500/board-mop500-mmio.c
new file mode 100644
index 00000000000..355f835caa1
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-mmio.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ * Author: Joakim Axelsson <joakim.axelsson@stericsson.com> for ST-Ericsson
+ * Author: Rajat Verma <rajat.verma@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/vmalloc.h>
+#include <asm/mach-types.h>
+#include <plat/pincfg.h>
+#include <mach/gpio.h>
+#include <mach/devices.h>
+#include <mach/hardware.h>
+
+#include "pins-db8500.h"
+#include "pins.h"
+#include "board-mop500.h"
+#include "../drivers/staging/mmio/mmio.h"
+
+static pin_cfg_t i2c2_pins[] = {
+ GPIO8_I2C2_SDA,
+ GPIO9_I2C2_SCL
+};
+static pin_cfg_t ipi2c_pins[] = {
+ GPIO8_IPI2C_SDA,
+ GPIO9_IPI2C_SCL
+};
+static pin_cfg_t i2c_disable_pins[] = {
+ GPIO8_GPIO,
+ GPIO9_GPIO
+};
+static pin_cfg_t xshutdown_host[] = {
+ GPIO141_GPIO,
+ GPIO142_GPIO
+};
+static pin_cfg_t xshutdown_fw[] = {
+ GPIO141_IP_GPIO2,
+ GPIO142_IP_GPIO3
+};
+static pin_cfg_t xshutdown_disable[] = {
+ GPIO141_GPIO | PIN_OUTPUT_LOW,
+ GPIO142_GPIO | PIN_OUTPUT_LOW
+};
+
+struct mmio_board_data {
+ int number_of_regulators;
+ struct regulator **mmio_regulators;
+ /* Pin configs */
+ int xenon_charge;
+ struct mmio_gpio xshutdown_pins[CAMERA_SLOT_END];
+ /* Internal clocks */
+ struct clk *clk_ptr_bml;
+ struct clk *clk_ptr_ipi2c;
+ /* External clocks */
+ struct clk *clk_ptr_ext[CAMERA_SLOT_END];
+};
+
+/* Fill names of regulators required for powering up the
+ * camera sensor in below array */
+static char *regulator_names[] = {"vaux12v5" , "vddcsi1v2"};
+
+/* This function is used to translate the physical GPIO used for reset GPIO
+ * to logical IPGPIO that needs to be communicated to Firmware. so that
+ * firmware can control reset GPIO of a RAW Bayer sensor */
+static int mmio_get_ipgpio(struct mmio_platform_data *pdata, int gpio,
+ int *ip_gpio)
+{
+ int err = 0;
+ dev_dbg(pdata->dev, "%s() : IPGPIO requested for %d", __func__, gpio);
+ switch (gpio) {
+ case 67:
+ case 140:
+ *ip_gpio = 7;
+ break;
+ case 5:
+ case 66:
+ *ip_gpio = 6;
+ break;
+ case 81:
+ case 65:
+ *ip_gpio = 5;
+ break;
+ case 80:
+ case 64:
+ *ip_gpio = 4;
+ break;
+ case 10:
+ case 79:
+ case 142:
+ *ip_gpio = 3;
+ break;
+ case 11:
+ case 78:
+ case 141:
+ *ip_gpio = 2;
+ break;
+ case 7:
+ case 150:
+ *ip_gpio = 1;
+ break;
+ case 6:
+ case 149:
+ *ip_gpio = 0;
+ break;
+ default:
+ *ip_gpio = -1;
+ err = -1;
+ break;
+ }
+ return err;
+}
+
+static int mmio_clock_init(struct mmio_platform_data *pdata)
+{
+ int err;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+
+ extra->clk_ptr_bml = clk_get_sys("bml", NULL);
+ if (IS_ERR(extra->clk_ptr_bml)) {
+ err = PTR_ERR(extra->clk_ptr_bml);
+ dev_err(pdata->dev, "Error %d getting clock 'bml'\n", err);
+ goto err_bml_clk;
+ }
+ extra->clk_ptr_ipi2c = clk_get_sys("ipi2", NULL);
+ if (IS_ERR(extra->clk_ptr_ipi2c)) {
+ err = PTR_ERR(extra->clk_ptr_ipi2c);
+ dev_err(pdata->dev, "Error %d getting clock 'ipi2'\n", err);
+ goto err_ipi2c_clk;
+ }
+ extra->clk_ptr_ext[PRIMARY_CAMERA] = clk_get_sys("pri-cam", NULL);
+ if (IS_ERR(extra->clk_ptr_ext[PRIMARY_CAMERA])) {
+ err = PTR_ERR(extra->clk_ptr_ext[PRIMARY_CAMERA]);
+ dev_err(pdata->dev, "Error %d getting clock 'pri-cam'\n", err);
+ goto err_pri_ext_clk;
+ }
+ extra->clk_ptr_ext[SECONDARY_CAMERA] = clk_get_sys("sec-cam", NULL);
+ if (IS_ERR(extra->clk_ptr_ext[SECONDARY_CAMERA])) {
+ err = PTR_ERR(extra->clk_ptr_ext[SECONDARY_CAMERA]);
+ dev_err(pdata->dev, "Error %d getting clock 'sec-cam'\n", err);
+ goto err_sec_ext_clk;
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_sec_ext_clk:
+ clk_put(extra->clk_ptr_ext[PRIMARY_CAMERA]);
+err_pri_ext_clk:
+ clk_put(extra->clk_ptr_ipi2c);
+err_ipi2c_clk:
+ clk_put(extra->clk_ptr_bml);
+err_bml_clk:
+ return err;
+}
+static void mmio_clock_exit(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ clk_put(extra->clk_ptr_bml);
+ clk_put(extra->clk_ptr_ipi2c);
+ clk_put(extra->clk_ptr_ext[PRIMARY_CAMERA]);
+ clk_put(extra->clk_ptr_ext[SECONDARY_CAMERA]);
+}
+
+
+static int mmio_pin_cfg_init(struct mmio_platform_data *pdata)
+{
+ int err;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+
+ extra->xshutdown_pins[PRIMARY_CAMERA].gpio = XSHUTDOWN_PRIMARY_SENSOR;
+ extra->xshutdown_pins[PRIMARY_CAMERA].active_high = 0;
+ extra->xshutdown_pins[PRIMARY_CAMERA].udelay = 500;
+
+ extra->xshutdown_pins[SECONDARY_CAMERA].active_high = 0;
+ extra->xshutdown_pins[SECONDARY_CAMERA].udelay = 500;
+
+ /* Update GPIO mappings according to board */
+ if (machine_is_hrefv60()) {
+ extra->xenon_charge = HREFV60_MMIO_XENON_CHARGE;
+ xshutdown_host[SECONDARY_CAMERA] = GPIO140_GPIO;
+ xshutdown_fw[SECONDARY_CAMERA] = GPIO140_IP_GPIO7;
+ xshutdown_disable[SECONDARY_CAMERA] =
+ GPIO140_GPIO | PIN_OUTPUT_LOW;
+ extra->xshutdown_pins[SECONDARY_CAMERA].gpio = 140;
+ } else {
+ extra->xenon_charge = GPIO_MMIO_XENON_CHARGE;
+ xshutdown_host[SECONDARY_CAMERA] = GPIO142_GPIO;
+ xshutdown_fw[SECONDARY_CAMERA] = GPIO142_IP_GPIO3;
+ xshutdown_disable[SECONDARY_CAMERA] =
+ GPIO142_GPIO | PIN_OUTPUT_LOW;
+ extra->xshutdown_pins[SECONDARY_CAMERA].gpio = 142;
+ }
+ /* Setup Xenon Charge */
+ err = gpio_request(extra->xenon_charge, "xenon charge");
+ if (err) {
+ dev_err(pdata->dev, "Error %d while requesting xenon charge\n",
+ err);
+ goto err_xenon_gpio_req;
+ }
+ err = gpio_direction_output(extra->xenon_charge, 0);
+ if (err) {
+ dev_err(pdata->dev, "Error %d while setting xenon charge in"
+ "output mode\n", err);
+ goto err_xenon_gpio_set_dir;
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_xenon_gpio_set_dir:
+ gpio_free(extra->xenon_charge);
+err_xenon_gpio_req:
+ return err;
+}
+
+static void mmio_pin_cfg_exit(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ gpio_free(extra->xenon_charge);
+}
+
+/* For now, both sensors on HREF have some power up sequence. If different
+ * sequences are needed for primary and secondary sensors, it can be
+ * implemented easily. Just use camera_slot field of mmio_platform_data
+ * to determine which camera needs to be powered up */
+static int mmio_power_init(struct mmio_platform_data *pdata)
+{
+ int err = 0, i = 0;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ extra->number_of_regulators = sizeof(regulator_names)/
+ sizeof(regulator_names[0]);
+ extra->mmio_regulators =
+ kzalloc(sizeof(struct regulator *) * extra->number_of_regulators,
+ GFP_KERNEL);
+ if (!extra->mmio_regulators) {
+ dev_err(pdata->dev , "Error while allocating memory for mmio"
+ "regulators\n");
+ err = -ENOMEM;
+ goto err_no_mem_reg;
+ }
+ for (i = 0; i <
+ extra->number_of_regulators; i++) {
+ extra->mmio_regulators[i] =
+ regulator_get(pdata->dev, regulator_names[i]);
+ if (IS_ERR(extra->mmio_regulators[i])) {
+ err = PTR_ERR(extra->mmio_regulators[i]);
+ dev_err(pdata->dev , "Error %d getting regulator '%s'"
+ "\n", err, regulator_names[i]);
+ goto err_regulator;
+ }
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_regulator:
+ /* Return regulators we have already requested */
+ while (i--)
+ regulator_put(extra->mmio_regulators[i]);
+ kfree(extra->mmio_regulators);
+err_no_mem_reg:
+ return err;
+}
+
+static void mmio_power_exit(struct mmio_platform_data *pdata)
+{
+ int i = 0;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ for (i = 0; i < extra->number_of_regulators; i++)
+ regulator_put(extra->mmio_regulators[i]);
+ kfree(extra->mmio_regulators);
+}
+
+static int mmio_platform_init(struct mmio_platform_data *pdata)
+{
+ int err = 0;
+ struct mmio_board_data *extra = NULL;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ /* Alloc memory for our own extra data */
+ extra = kzalloc(sizeof(struct mmio_board_data), GFP_KERNEL);
+ if (!extra) {
+ dev_err(pdata->dev, "%s: memory alloc failed for "
+ "mmio_board_data\n", __func__);
+ err = -ENOMEM;
+ goto err_no_mem_extra;
+ }
+ /* Hook the data for other callbacks to use */
+ pdata->extra = extra;
+
+ pdata->camera_slot = -1;
+
+ err = mmio_power_init(pdata);
+ if (err)
+ goto err_regulator;
+ err = mmio_clock_init(pdata);
+ if (err)
+ goto err_clock;
+ err = mmio_pin_cfg_init(pdata);
+ if (err)
+ goto err_pin_cfg;
+ /* Store logical IPGPIO for physical reset GPIOs used */
+ err = mmio_get_ipgpio(pdata,
+ extra->xshutdown_pins[PRIMARY_CAMERA].gpio,
+ &(pdata->reset_ipgpio[PRIMARY_CAMERA]));
+ if (err) {
+ dev_err(pdata->dev, "Error getting ipgpio for pri cam\n");
+ goto err_ipgpio;
+ }
+ err = mmio_get_ipgpio(pdata,
+ extra->xshutdown_pins[SECONDARY_CAMERA].gpio,
+ &(pdata->reset_ipgpio[SECONDARY_CAMERA]));
+ if (err) {
+ dev_err(pdata->dev, "Error getting ipgpio for sec cam\n");
+ goto err_ipgpio;
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_ipgpio:
+ mmio_pin_cfg_exit(pdata);
+err_pin_cfg:
+ mmio_clock_exit(pdata);
+err_clock:
+ mmio_power_exit(pdata);
+err_regulator:
+ kfree(extra);
+err_no_mem_extra:
+ return err;
+}
+static void mmio_platform_exit(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ mmio_power_exit(pdata);
+ mmio_clock_exit(pdata);
+ mmio_pin_cfg_exit(pdata);
+ kfree(extra);
+ pdata->extra = NULL;
+}
+
+static int mmio_power_enable(struct mmio_platform_data *pdata)
+{
+ int err = 0, i = 0;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ /* Enable the regulators */
+ for (i = 0; i < extra->number_of_regulators; i++) {
+ err = regulator_enable(extra->mmio_regulators[i]);
+ if (IS_ERR(extra->mmio_regulators[i])) {
+ err = PTR_ERR(extra->mmio_regulators[i]);
+ dev_err(pdata->dev , "Error %d enabling regulator '%s'"
+ "\n", err, regulator_names[i]);
+ goto err_regulator;
+ }
+ }
+ /* Set Xenon Charge */
+ gpio_set_value_cansleep(extra->xenon_charge, 1);
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_regulator:
+ /* Disable regulators we already enabled */
+ while (i--)
+ regulator_disable(extra->mmio_regulators[i]);
+ return err;
+}
+
+static void mmio_power_disable(struct mmio_platform_data *pdata)
+{
+ int i;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ /* Disable the regulators */
+ for (i = 0; i < extra->number_of_regulators; i++)
+ regulator_disable(extra->mmio_regulators[i]);
+ /* Disable Xenon Charge */
+ gpio_set_value_cansleep(extra->xenon_charge, 0);
+}
+static int mmio_clock_enable(struct mmio_platform_data *pdata)
+{
+ int err = 0;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ /* Enable internal clocks */
+ err = clk_enable(extra->clk_ptr_bml);
+ if (err) {
+ dev_err(pdata->dev, "Error activating bml clock %d\n", err);
+ goto err_bml_clk;
+ }
+ err = clk_enable(extra->clk_ptr_ipi2c);
+ if (err) {
+ dev_err(pdata->dev, "Error activating i2c2 clock %d\n", err);
+ goto err_ipi2c_clk;
+ }
+ /* Enable appropriate external clock */
+ err = clk_enable(extra->clk_ptr_ext[pdata->camera_slot]);
+ if (err) {
+ dev_err(pdata->dev, "Error activating clock for sensor %d, err"
+ "%d\n", pdata->camera_slot, err);
+ goto err_ext_clk;
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_ext_clk:
+ clk_disable(extra->clk_ptr_ipi2c);
+err_ipi2c_clk:
+ clk_disable(extra->clk_ptr_bml);
+err_bml_clk:
+ return err;
+}
+
+static void mmio_clock_disable(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ clk_disable(extra->clk_ptr_bml);
+ clk_disable(extra->clk_ptr_ipi2c);
+ clk_disable(extra->clk_ptr_ext[pdata->camera_slot]);
+}
+
+
+static int mmio_config_xshutdown_pins(struct mmio_platform_data *pdata,
+ enum mmio_select_xshutdown_t select,
+ int is_active_high)
+{
+ int err = 0;
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ switch (select) {
+ case MMIO_ENABLE_XSHUTDOWN_HOST:
+ extra->xshutdown_pins[pdata->camera_slot].active_high =
+ is_active_high;
+ err = nmk_config_pin(xshutdown_host[pdata->camera_slot] |
+ (is_active_high ? PIN_OUTPUT_LOW : PIN_OUTPUT_HIGH),
+ 0);
+ break;
+ case MMIO_ENABLE_XSHUTDOWN_FW:
+ err = nmk_config_pin(xshutdown_fw[pdata->camera_slot], 0);
+ break;
+ case MMIO_DISABLE_XSHUTDOWN:
+ err = nmk_config_pin(xshutdown_disable[pdata->camera_slot],
+ 0);
+ break;
+ default:
+ break;
+ }
+ if (err)
+ dev_dbg(pdata->dev , "Error configuring xshutdown, err = %d\n",
+ err);
+ return err;
+}
+static void mmio_set_xshutdown(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ gpio_set_value(extra->xshutdown_pins[pdata->camera_slot].gpio ,
+ (extra->xshutdown_pins[pdata->camera_slot].active_high ? 1 :
+ 0));
+ udelay(extra->xshutdown_pins[pdata->camera_slot].udelay);
+}
+static int mmio_config_i2c_pins(struct mmio_platform_data *pdata,
+ enum mmio_select_i2c_t select)
+{
+ int err = 0;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ switch (select) {
+ case MMIO_ACTIVATE_I2C_HOST:
+ err = nmk_config_pins(i2c2_pins, ARRAY_SIZE(i2c2_pins));
+ break;
+ case MMIO_ACTIVATE_IPI2C2:
+ err = nmk_config_pins(ipi2c_pins, ARRAY_SIZE(ipi2c_pins));
+ break;
+ case MMIO_DEACTIVATE_I2C:
+ err = nmk_config_pins(i2c_disable_pins,
+ ARRAY_SIZE(i2c_disable_pins));
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+static struct mmio_platform_data mmio_config = {
+ .platform_init = mmio_platform_init,
+ .platform_exit = mmio_platform_exit,
+ .power_enable = mmio_power_enable,
+ .power_disable = mmio_power_disable,
+ .clock_enable = mmio_clock_enable,
+ .clock_disable = mmio_clock_disable,
+ .config_i2c_pins = mmio_config_i2c_pins,
+ .config_xshutdown_pins = mmio_config_xshutdown_pins,
+ .set_xshutdown = mmio_set_xshutdown,
+ .sia_base = U8500_SIA_BASE,
+ .cr_base = U8500_CR_BASE
+};
+
+struct platform_device ux500_mmio_device = {
+ .name = MMIO_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &mmio_config,
+ }
+};
diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c
new file mode 100644
index 00000000000..7a5a23baf87
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-msp.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+
+#include <plat/ste_dma40.h>
+#include <plat/pincfg.h>
+
+#include <mach/devices.h>
+#include <mach/ste-dma40-db8500.h>
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/msp.h>
+
+#include "board-mop500.h"
+#include "devices-db8500.h"
+#include "pins-db8500.h"
+
+/* MSP1/3 Tx/Rx usage protection */
+static DEFINE_SPINLOCK(msp_rxtx_lock);
+
+/* Reference Count */
+static int msp_rxtx_ref;
+
+static pin_cfg_t mop500_msp1_pins_init[] = {
+ GPIO33_MSP1_TXD | PIN_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE,
+ GPIO34_MSP1_TFS | PIN_INPUT_NOPULL | PIN_SLPM_WAKEUP_DISABLE,
+ GPIO35_MSP1_TCK | PIN_INPUT_NOPULL | PIN_SLPM_WAKEUP_DISABLE,
+ GPIO36_MSP1_RXD | PIN_INPUT_NOPULL | PIN_SLPM_WAKEUP_DISABLE,
+};
+
+static pin_cfg_t mop500_msp1_pins_exit[] = {
+ GPIO33_MSP1_TXD | PIN_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE,
+ GPIO34_MSP1_TFS | PIN_INPUT_NOPULL | PIN_SLPM_WAKEUP_ENABLE,
+ GPIO35_MSP1_TCK | PIN_INPUT_NOPULL | PIN_SLPM_WAKEUP_ENABLE,
+ GPIO36_MSP1_RXD | PIN_INPUT_NOPULL | PIN_SLPM_WAKEUP_ENABLE,
+};
+
+int msp13_i2s_init(void)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msp_rxtx_lock, flags);
+ if (msp_rxtx_ref == 0)
+ retval = nmk_config_pins(
+ ARRAY_AND_SIZE(mop500_msp1_pins_init));
+ if (!retval)
+ msp_rxtx_ref++;
+ spin_unlock_irqrestore(&msp_rxtx_lock, flags);
+
+ return retval;
+}
+
+int msp13_i2s_exit(void)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msp_rxtx_lock, flags);
+ WARN_ON(!msp_rxtx_ref);
+ msp_rxtx_ref--;
+ if (msp_rxtx_ref == 0)
+ retval = nmk_config_pins_sleep(
+ ARRAY_AND_SIZE(mop500_msp1_pins_exit));
+ spin_unlock_irqrestore(&msp_rxtx_lock, flags);
+
+ return retval;
+}
+
+static struct stedma40_chan_cfg msp0_dma_rx = {
+ .high_priority = true,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+
+ .src_dev_type = DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+
+ /* data_width is set during configuration */
+};
+
+static struct stedma40_chan_cfg msp0_dma_tx = {
+ .high_priority = true,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+
+ .src_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX,
+
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+
+ /* data_width is set during configuration */
+};
+
+static struct msp_i2s_platform_data msp0_platform_data = {
+ .id = MSP_0_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp0_dma_rx,
+ .msp_i2s_dma_tx = &msp0_dma_tx,
+};
+
+static struct stedma40_chan_cfg msp1_dma_rx = {
+ .high_priority = true,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+
+ .src_dev_type = DB8500_DMA_DEV30_MSP3_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+
+ /* data_width is set during configuration */
+};
+
+static struct stedma40_chan_cfg msp1_dma_tx = {
+ .high_priority = true,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+
+ .src_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV30_MSP1_TX,
+
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+
+ /* data_width is set during configuration */
+};
+
+static struct msp_i2s_platform_data msp1_platform_data = {
+ .id = MSP_1_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = NULL,
+ .msp_i2s_dma_tx = &msp1_dma_tx,
+ .msp_i2s_init = msp13_i2s_init,
+ .msp_i2s_exit = msp13_i2s_exit,
+};
+
+static struct stedma40_chan_cfg msp2_dma_rx = {
+ .high_priority = true,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+
+ .src_dev_type = DB8500_DMA_DEV14_MSP2_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+
+ /* MSP2 DMA doesn't work with PSIZE == 4 on DB8500v2 */
+ .src_info.psize = STEDMA40_PSIZE_LOG_1,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_1,
+
+ /* data_width is set during configuration */
+};
+
+static struct stedma40_chan_cfg msp2_dma_tx = {
+ .high_priority = true,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+
+ .src_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV14_MSP2_TX,
+
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+
+ .use_fixed_channel = true,
+ .phy_channel = 1,
+
+ /* data_width is set during configuration */
+};
+
+static struct msp_i2s_platform_data msp2_platform_data = {
+ .id = MSP_2_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp2_dma_rx,
+ .msp_i2s_dma_tx = &msp2_dma_tx,
+};
+
+static struct msp_i2s_platform_data msp3_platform_data = {
+ .id = MSP_3_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp1_dma_rx,
+ .msp_i2s_dma_tx = NULL,
+ .msp_i2s_init = msp13_i2s_init,
+ .msp_i2s_exit = msp13_i2s_exit,
+};
+
+void __init mop500_msp_init(void)
+{
+ db8500_add_msp0_i2s(&msp0_platform_data);
+ db8500_add_msp1_i2s(&msp1_platform_data);
+ db8500_add_msp2_i2s(&msp2_platform_data);
+ db8500_add_msp3_i2s(&msp3_platform_data);
+}
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
index 74bfcff2bdf..43d87c1f4df 100644
--- a/arch/arm/mach-ux500/board-mop500-pins.c
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -6,109 +6,50 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/string.h>
#include <asm/mach-types.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+
#include <mach/hardware.h>
+#include <mach/suspend.h>
#include "pins-db8500.h"
+#include "pins.h"
+#include "board-mop500.h"
+#include "board-pins-sleep-force.h"
+
+enum custom_pin_cfg_t {
+ PINS_FOR_DEFAULT,
+ PINS_FOR_U9500,
+};
+
+static enum custom_pin_cfg_t pinsfor;
static pin_cfg_t mop500_pins_common[] = {
- /* I2C */
- GPIO147_I2C0_SCL,
- GPIO148_I2C0_SDA,
- GPIO16_I2C1_SCL,
- GPIO17_I2C1_SDA,
- GPIO10_I2C2_SDA,
- GPIO11_I2C2_SCL,
- GPIO229_I2C3_SDA,
- GPIO230_I2C3_SCL,
-
- /* MSP0 */
+ /* uMSP0 */
GPIO12_MSP0_TXD,
GPIO13_MSP0_TFS,
GPIO14_MSP0_TCK,
GPIO15_MSP0_RXD,
/* MSP2: HDMI */
- GPIO193_MSP2_TXD,
- GPIO194_MSP2_TCK,
- GPIO195_MSP2_TFS,
+ GPIO193_MSP2_TXD | PIN_INPUT_PULLDOWN,
+ GPIO194_MSP2_TCK | PIN_INPUT_PULLDOWN,
+ GPIO195_MSP2_TFS | PIN_INPUT_PULLDOWN,
GPIO196_MSP2_RXD | PIN_OUTPUT_LOW,
+ /* LCD TE0 */
+ GPIO68_LCD_VSI0 | PIN_INPUT_PULLUP,
+
/* Touch screen INTERFACE */
GPIO84_GPIO | PIN_INPUT_PULLUP, /* TOUCH_INT1 */
/* STMPE1601/tc35893 keypad IRQ */
GPIO218_GPIO | PIN_INPUT_PULLUP,
- /* MMC0 (MicroSD card) */
- GPIO18_MC0_CMDDIR | PIN_OUTPUT_HIGH,
- GPIO19_MC0_DAT0DIR | PIN_OUTPUT_HIGH,
- GPIO20_MC0_DAT2DIR | PIN_OUTPUT_HIGH,
-
- GPIO22_MC0_FBCLK | PIN_INPUT_NOPULL,
- GPIO23_MC0_CLK | PIN_OUTPUT_LOW,
- GPIO24_MC0_CMD | PIN_INPUT_PULLUP,
- GPIO25_MC0_DAT0 | PIN_INPUT_PULLUP,
- GPIO26_MC0_DAT1 | PIN_INPUT_PULLUP,
- GPIO27_MC0_DAT2 | PIN_INPUT_PULLUP,
- GPIO28_MC0_DAT3 | PIN_INPUT_PULLUP,
-
- /* SDI1 (SDIO) */
- GPIO208_MC1_CLK | PIN_OUTPUT_LOW,
- GPIO209_MC1_FBCLK | PIN_INPUT_NOPULL,
- GPIO210_MC1_CMD | PIN_INPUT_PULLUP,
- GPIO211_MC1_DAT0 | PIN_INPUT_PULLUP,
- GPIO212_MC1_DAT1 | PIN_INPUT_PULLUP,
- GPIO213_MC1_DAT2 | PIN_INPUT_PULLUP,
- GPIO214_MC1_DAT3 | PIN_INPUT_PULLUP,
-
- /* MMC2 (On-board DATA INTERFACE eMMC) */
- GPIO128_MC2_CLK | PIN_OUTPUT_LOW,
- GPIO129_MC2_CMD | PIN_INPUT_PULLUP,
- GPIO130_MC2_FBCLK | PIN_INPUT_NOPULL,
- GPIO131_MC2_DAT0 | PIN_INPUT_PULLUP,
- GPIO132_MC2_DAT1 | PIN_INPUT_PULLUP,
- GPIO133_MC2_DAT2 | PIN_INPUT_PULLUP,
- GPIO134_MC2_DAT3 | PIN_INPUT_PULLUP,
- GPIO135_MC2_DAT4 | PIN_INPUT_PULLUP,
- GPIO136_MC2_DAT5 | PIN_INPUT_PULLUP,
- GPIO137_MC2_DAT6 | PIN_INPUT_PULLUP,
- GPIO138_MC2_DAT7 | PIN_INPUT_PULLUP,
-
- /* MMC4 (On-board STORAGE INTERFACE eMMC) */
- GPIO197_MC4_DAT3 | PIN_INPUT_PULLUP,
- GPIO198_MC4_DAT2 | PIN_INPUT_PULLUP,
- GPIO199_MC4_DAT1 | PIN_INPUT_PULLUP,
- GPIO200_MC4_DAT0 | PIN_INPUT_PULLUP,
- GPIO201_MC4_CMD | PIN_INPUT_PULLUP,
- GPIO202_MC4_FBCLK | PIN_INPUT_NOPULL,
- GPIO203_MC4_CLK | PIN_OUTPUT_LOW,
- GPIO204_MC4_DAT7 | PIN_INPUT_PULLUP,
- GPIO205_MC4_DAT6 | PIN_INPUT_PULLUP,
- GPIO206_MC4_DAT5 | PIN_INPUT_PULLUP,
- GPIO207_MC4_DAT4 | PIN_INPUT_PULLUP,
-
- /* SKE keypad */
- GPIO153_KP_I7,
- GPIO154_KP_I6,
- GPIO155_KP_I5,
- GPIO156_KP_I4,
- GPIO157_KP_O7,
- GPIO158_KP_O6,
- GPIO159_KP_O5,
- GPIO160_KP_O4,
- GPIO161_KP_I3,
- GPIO162_KP_I2,
- GPIO163_KP_I1,
- GPIO164_KP_I0,
- GPIO165_KP_O3,
- GPIO166_KP_O2,
- GPIO167_KP_O1,
- GPIO168_KP_O0,
-
/* UART */
/* uart-0 pins gpio configuration should be
* kept intact to prevent glitch in tx line
@@ -127,10 +68,6 @@ static pin_cfg_t mop500_pins_common[] = {
GPIO30_U2_TXD | PIN_OUTPUT_HIGH,
GPIO31_U2_CTSn | PIN_INPUT_PULLUP,
GPIO32_U2_RTSn | PIN_OUTPUT_HIGH,
-
- /* Display & HDMI HW sync */
- GPIO68_LCD_VSI0 | PIN_INPUT_PULLUP,
- GPIO69_LCD_VSI1 | PIN_INPUT_PULLUP,
};
static pin_cfg_t mop500_pins_default[] = {
@@ -140,10 +77,13 @@ static pin_cfg_t mop500_pins_default[] = {
GPIO145_SSP0_RXD | PIN_PULL_DOWN,
GPIO146_SSP0_TXD,
+ /* XENON Flashgun INTERFACE */
+ GPIO6_IP_GPIO0 | PIN_INPUT_PULLUP,/* XENON_FLASH_ID */
+ GPIO7_IP_GPIO1 | PIN_INPUT_PULLUP,/* XENON_READY */
GPIO217_GPIO | PIN_INPUT_PULLUP, /* TC35892 IRQ */
- /* SDI0 (MicroSD card) */
+ /* sdi0 (removable MMC/SD/SDIO cards) not handled by pm_runtime */
GPIO21_MC0_DAT31DIR | PIN_OUTPUT_HIGH,
/* UART */
@@ -155,13 +95,11 @@ static pin_cfg_t mop500_pins_default[] = {
static pin_cfg_t hrefv60_pins[] = {
/* WLAN */
- GPIO4_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
GPIO85_GPIO | PIN_OUTPUT_LOW,/* WLAN_ENA */
/* XENON Flashgun INTERFACE */
GPIO6_IP_GPIO0 | PIN_INPUT_PULLUP,/* XENON_FLASH_ID */
GPIO7_IP_GPIO1 | PIN_INPUT_PULLUP,/* XENON_READY */
- GPIO170_GPIO | PIN_OUTPUT_LOW, /* XENON_CHARGE */
/* Assistant LED INTERFACE */
GPIO21_GPIO | PIN_OUTPUT_LOW, /* XENON_EN1 */
@@ -172,7 +110,7 @@ static pin_cfg_t hrefv60_pins[] = {
GPIO32_GPIO | PIN_INPUT_PULLDOWN, /* Magnetometer DRDY */
/* Display Interface */
- GPIO65_GPIO | PIN_OUTPUT_LOW, /* DISP1 RST */
+ GPIO65_GPIO | PIN_OUTPUT_HIGH, /* DISP1 NO RST */
GPIO66_GPIO | PIN_OUTPUT_LOW, /* DISP2 RST */
/* Touch screen INTERFACE */
@@ -214,11 +152,8 @@ static pin_cfg_t hrefv60_pins[] = {
/* DiPro Sensor Interface */
GPIO139_GPIO | PIN_INPUT_PULLUP, /* DIPRO_INT */
- /* HAL SWITCH INTERFACE */
- GPIO145_GPIO | PIN_INPUT_PULLDOWN,/* HAL_SW */
-
/* Audio Amplifier Interface */
- GPIO149_GPIO | PIN_OUTPUT_LOW, /* VAUDIO_HF_EN */
+ GPIO149_GPIO | PIN_OUTPUT_HIGH, /* VAUDIO_HF_EN, enable MAX8968 */
/* GBF INTERFACE */
GPIO171_GPIO | PIN_OUTPUT_LOW, /* GBF_ENA_RESET */
@@ -230,10 +165,29 @@ static pin_cfg_t hrefv60_pins[] = {
GPIO82_GPIO | PIN_INPUT_PULLUP, /* ACC_INT1 */
GPIO83_GPIO | PIN_INPUT_PULLUP, /* ACC_INT2 */
- /* Proximity Sensor */
- GPIO217_GPIO | PIN_INPUT_PULLUP,
+ /* SD card detect */
+ GPIO95_GPIO | PIN_INPUT_PULLUP,
+};
+static pin_cfg_t u9500_pins[] = {
+ GPIO4_U1_RXD | PIN_INPUT_PULLUP,
+ GPIO5_U1_TXD | PIN_OUTPUT_HIGH,
+ GPIO144_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
+ /* HSI */
+ GPIO219_HSIR_FLA0 | PIN_INPUT_PULLDOWN,
+ GPIO220_HSIR_DAT0 | PIN_INPUT_PULLDOWN,
+ GPIO221_HSIR_RDY0 | PIN_OUTPUT_LOW,
+ GPIO222_HSIT_FLA0 | PIN_OUTPUT_LOW,
+ GPIO223_HSIT_DAT0 | PIN_OUTPUT_LOW,
+ GPIO224_HSIT_RDY0 | PIN_INPUT_PULLDOWN,
+ GPIO225_HSIT_CAWAKE0 | PIN_INPUT_PULLDOWN, /* CA_WAKE0 */
+ GPIO226_GPIO | PIN_OUTPUT_HIGH, /* AC_WAKE0 */
+};
+
+static pin_cfg_t u8500_pins[] = {
+ GPIO226_GPIO | PIN_OUTPUT_LOW, /* WLAN_PMU_EN */
+ GPIO4_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
};
static pin_cfg_t snowball_pins[] = {
@@ -274,15 +228,853 @@ static pin_cfg_t snowball_pins[] = {
/* RSTn_LAN */
GPIO141_GPIO | PIN_OUTPUT_HIGH,
+
+ /* Accelerometer/Magnetometer */
+ GPIO163_GPIO | PIN_INPUT_PULLUP, /* ACCEL_IRQ1 */
+ GPIO164_GPIO | PIN_INPUT_PULLUP, /* ACCEL_IRQ2 */
+ GPIO165_GPIO | PIN_INPUT_PULLUP, /* MAG_DRDY */
+
+ /* WLAN/GBF */
+ GPIO161_GPIO | PIN_OUTPUT_LOW, /* WLAN_PMU_EN */
+ GPIO171_GPIO | PIN_OUTPUT_HIGH,/* GBF_ENA */
+ GPIO215_GPIO | PIN_OUTPUT_LOW,/* WLAN_ENA */
+ GPIO216_GPIO | PIN_INPUT_PULLUP,/* WLAN_IRQ */
+};
+
+/*
+ * I2C
+ */
+
+static UX500_PINS(mop500_pins_i2c0,
+ GPIO147_I2C0_SCL |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+ GPIO148_I2C0_SDA |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+);
+
+static UX500_PINS(mop500_pins_i2c1,
+ GPIO16_I2C1_SCL |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+ GPIO17_I2C1_SDA |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+);
+
+static UX500_PINS(mop500_pins_i2c2,
+ GPIO10_I2C2_SDA |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+ GPIO11_I2C2_SCL |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+);
+
+static UX500_PINS(mop500_pins_i2c3,
+ GPIO229_I2C3_SDA |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+ GPIO230_I2C3_SCL |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+);
+
+static UX500_PINS(mop500_pins_mcde_tvout,
+ GPIO78_LCD_D8,
+ GPIO79_LCD_D9,
+ GPIO80_LCD_D10,
+ GPIO81_LCD_D11,
+ GPIO150_LCDA_CLK,
+);
+
+static UX500_PINS(mop500_pins_mcde_hdmi,
+ GPIO69_LCD_VSI1 | PIN_INPUT_PULLUP,
+);
+
+static UX500_PINS(mop500_pins_ske,
+ GPIO153_KP_I7 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO154_KP_I6 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO155_KP_I5 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO156_KP_I4 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO161_KP_I3 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO162_KP_I2 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO163_KP_I1 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO164_KP_I0 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO157_KP_O7 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO158_KP_O6 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO159_KP_O5 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO160_KP_O4 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO165_KP_O3 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO166_KP_O2 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO167_KP_O1 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO168_KP_O0 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+);
+
+/* sdi0 (removable MMC/SD/SDIO cards) */
+static UX500_PINS(mop500_pins_sdi0,
+ GPIO18_MC0_CMDDIR | PIN_OUTPUT_HIGH,
+ GPIO19_MC0_DAT0DIR | PIN_OUTPUT_HIGH,
+ GPIO20_MC0_DAT2DIR | PIN_OUTPUT_HIGH,
+
+ GPIO22_MC0_FBCLK | PIN_INPUT_NOPULL,
+ GPIO23_MC0_CLK | PIN_OUTPUT_LOW,
+ GPIO24_MC0_CMD | PIN_INPUT_PULLUP,
+ GPIO25_MC0_DAT0 | PIN_INPUT_PULLUP,
+ GPIO26_MC0_DAT1 | PIN_INPUT_PULLUP,
+ GPIO27_MC0_DAT2 | PIN_INPUT_PULLUP,
+ GPIO28_MC0_DAT3 | PIN_INPUT_PULLUP,
+);
+
+/* sdi1 (WLAN CW1200) */
+static UX500_PINS(mop500_pins_sdi1,
+ GPIO208_MC1_CLK | PIN_OUTPUT_LOW,
+ GPIO209_MC1_FBCLK | PIN_INPUT_NOPULL,
+ GPIO210_MC1_CMD | PIN_INPUT_PULLUP,
+ GPIO211_MC1_DAT0 | PIN_INPUT_PULLUP,
+ GPIO212_MC1_DAT1 | PIN_INPUT_PULLUP,
+ GPIO213_MC1_DAT2 | PIN_INPUT_PULLUP,
+ GPIO214_MC1_DAT3 | PIN_INPUT_PULLUP,
+);
+
+/* sdi2 (POP eMMC) */
+static UX500_PINS(mop500_pins_sdi2,
+ GPIO128_MC2_CLK | PIN_OUTPUT_LOW,
+ GPIO129_MC2_CMD | PIN_INPUT_PULLUP,
+ GPIO130_MC2_FBCLK | PIN_INPUT_NOPULL,
+ GPIO131_MC2_DAT0 | PIN_INPUT_PULLUP,
+ GPIO132_MC2_DAT1 | PIN_INPUT_PULLUP,
+ GPIO133_MC2_DAT2 | PIN_INPUT_PULLUP,
+ GPIO134_MC2_DAT3 | PIN_INPUT_PULLUP,
+ GPIO135_MC2_DAT4 | PIN_INPUT_PULLUP,
+ GPIO136_MC2_DAT5 | PIN_INPUT_PULLUP,
+ GPIO137_MC2_DAT6 | PIN_INPUT_PULLUP,
+ GPIO138_MC2_DAT7 | PIN_INPUT_PULLUP,
+);
+
+/* sdi4 (PCB eMMC) */
+static UX500_PINS(mop500_pins_sdi4,
+ GPIO197_MC4_DAT3 | PIN_INPUT_PULLUP,
+ GPIO198_MC4_DAT2 | PIN_INPUT_PULLUP,
+ GPIO199_MC4_DAT1 | PIN_INPUT_PULLUP,
+ GPIO200_MC4_DAT0 | PIN_INPUT_PULLUP,
+ GPIO201_MC4_CMD | PIN_INPUT_PULLUP,
+ GPIO202_MC4_FBCLK | PIN_INPUT_NOPULL,
+ GPIO203_MC4_CLK | PIN_OUTPUT_LOW,
+ GPIO204_MC4_DAT7 | PIN_INPUT_PULLUP,
+ GPIO205_MC4_DAT6 | PIN_INPUT_PULLUP,
+ GPIO206_MC4_DAT5 | PIN_INPUT_PULLUP,
+ GPIO207_MC4_DAT4 | PIN_INPUT_PULLUP,
+);
+
+/* USB */
+static UX500_PINS(mop500_pins_usb,
+ GPIO256_USB_NXT,
+ GPIO257_USB_STP | PIN_OUTPUT_HIGH,
+ GPIO258_USB_XCLK,
+ GPIO259_USB_DIR,
+ GPIO260_USB_DAT7,
+ GPIO261_USB_DAT6,
+ GPIO262_USB_DAT5,
+ GPIO263_USB_DAT4,
+ GPIO264_USB_DAT3,
+ GPIO265_USB_DAT2,
+ GPIO266_USB_DAT1,
+ GPIO267_USB_DAT0,
+);
+
+/* SPI2 */
+static UX500_PINS(mop500_pins_spi2,
+ GPIO216_GPIO | PIN_OUTPUT_HIGH,
+ GPIO218_SPI2_RXD | PIN_INPUT_PULLDOWN,
+ GPIO215_SPI2_TXD | PIN_OUTPUT_LOW,
+ GPIO217_SPI2_CLK | PIN_OUTPUT_LOW,
+);
+
+static UX500_PINS(mop500_pins_sensors1p_v60,
+ GPIO217_GPIO| PIN_INPUT_PULLUP |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+ GPIO145_GPIO | PIN_INPUT_PULLDOWN |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+ GPIO139_GPIO | PIN_INPUT_PULLUP |
+ PIN_SLPM_GPIO | PIN_SLPM_INPUT_NOPULL,
+);
+
+static UX500_PINS(mop500_pins_sensors1p,
+ PIN_CFG_INPUT(GPIO_PROX_SENSOR, GPIO, NOPULL),
+ PIN_CFG_INPUT(GPIO_HAL_SENSOR, GPIO, NOPULL),
+);
+
+static struct ux500_pin_lookup mop500_runtime_pins[] = {
+ PIN_LOOKUP("mcde-tvout", &mop500_pins_mcde_tvout),
+ PIN_LOOKUP("av8100-hdmi", &mop500_pins_mcde_hdmi),
+ PIN_LOOKUP("nmk-i2c.0", &mop500_pins_i2c0),
+ PIN_LOOKUP("nmk-i2c.1", &mop500_pins_i2c1),
+ PIN_LOOKUP("nmk-i2c.2", &mop500_pins_i2c2),
+ PIN_LOOKUP("nmk-i2c.3", &mop500_pins_i2c3),
+ PIN_LOOKUP("sdi0", &mop500_pins_sdi0),
+ PIN_LOOKUP("sdi1", &mop500_pins_sdi1),
+ PIN_LOOKUP("sdi2", &mop500_pins_sdi2),
+ PIN_LOOKUP("sdi4", &mop500_pins_sdi4),
+ PIN_LOOKUP("ab8500-usb.0", &mop500_pins_usb),
+ PIN_LOOKUP("spi2", &mop500_pins_spi2),
+};
+
+static struct ux500_pin_lookup mop500_runtime_pins_v60[] = {
+ PIN_LOOKUP("ske", &mop500_pins_ske),
+ PIN_LOOKUP("gpio-keys.0", &mop500_pins_sensors1p_v60),
};
+static struct ux500_pin_lookup mop500_runtime_pins_pre_v60[] = {
+ PIN_LOOKUP("ske", &mop500_pins_ske),
+ PIN_LOOKUP("gpio-keys.0", &mop500_pins_sensors1p),
+};
+
+/*
+ * Sleep pin configuration for u8500 platform.
+ * If another HW is used the GPIO's must be configured
+ * correctly when entering sleep for optimal power
+ * consumption.
+ */
+static pin_cfg_t mop500_pins_common_power_save_bank0[] = {
+ GPIO0_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO1_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO2_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO3_GPIO | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO4_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO5_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO6_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO7_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO8_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO9_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 10-11 - I2C2 */
+ GPIO10_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO11_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO12_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO13_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO14_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO15_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ /* 16-17 - I2C1 */
+ GPIO16_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO17_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO18_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO19_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO20_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO21_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO22_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO23_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO24_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO25_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO26_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO27_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO28_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO29_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO30_U2_TXD | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO31_U2_CTSn | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank0_href60[] = {
+ GPIO0_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO1_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO2_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO3_GPIO | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO4_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO5_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO6_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO7_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO8_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO9_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 10-11 - I2C2 */
+ GPIO10_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO11_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO12_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO13_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO14_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO15_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ /* 16-17 - I2C1 */
+ GPIO16_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO17_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO18_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO19_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO20_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO21_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO22_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO23_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO24_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO25_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO26_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO27_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO28_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO29_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO30_U2_TXD | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO31_U2_CTSn | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank1[] = {
+ GPIO32_U2_RTSn | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO33_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO34_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO35_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO36_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank2[] = {
+ GPIO64_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO65_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO66_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO67_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO68_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO69_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO70_STMAPE_CLK | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO71_STMAPE_DAT3 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO72_STMAPE_DAT2 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO73_STMAPE_DAT1 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO74_STMAPE_DAT0 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO75_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO76_U2_TXD | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO77_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO78_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO79_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO80_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO81_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO82_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO83_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO84_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO85_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO86_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO87_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO88_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO89_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO90_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO91_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO92_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO93_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO94_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO95_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank2_href60[] = {
+ GPIO64_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO65_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO66_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO67_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO68_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO69_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO70_STMAPE_CLK | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO71_STMAPE_DAT3 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO72_STMAPE_DAT2 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO73_STMAPE_DAT1 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO74_STMAPE_DAT0 | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_DISABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO75_U2_RXD | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO76_U2_TXD | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO77_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO78_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO79_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO80_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO81_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO82_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO83_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO84_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO85_GPIO,
+ GPIO86_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO87_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO88_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO89_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO90_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO91_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO92_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO93_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO94_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO95_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank3[] = {
+ GPIO96_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO97_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank4[] = {
+ GPIO128_MC2_CLK | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO129_MC2_CMD | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO130_MC2_FBCLK | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO131_MC2_DAT0 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO132_MC2_DAT1 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO133_MC2_DAT2 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO134_MC2_DAT3 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO135_MC2_DAT4 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO136_MC2_DAT5 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO137_MC2_DAT6 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO138_MC2_DAT7 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO139_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO140_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO141_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO142_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO143_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO144_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 145 - HAL sensor (on v60 and later) */
+ GPIO145_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO146_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ /* 147-148 - I2C0 */
+ GPIO147_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO148_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO149_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO150_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO151_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO152_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO153_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO154_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO155_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO156_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO157_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO158_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO159_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank4_u9500_uibr3[] = {
+ GPIO128_MC2_CLK | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO129_MC2_CMD | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO130_MC2_FBCLK | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO131_MC2_DAT0 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO132_MC2_DAT1 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO133_MC2_DAT2 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO134_MC2_DAT3 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO135_MC2_DAT4 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO136_MC2_DAT5 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO137_MC2_DAT6 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO138_MC2_DAT7 | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO139_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO140_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO141_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO142_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO143_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO144_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 145 - HAL sensor (on v60 and later) */
+ GPIO145_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ /* 147-148 - I2C0 */
+ GPIO147_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO148_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO149_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO150_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO151_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO152_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO153_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO154_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO155_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO156_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO157_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO158_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO159_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank5[] = {
+ GPIO160_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO161_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO162_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO163_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO164_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO165_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO166_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO167_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO168_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO169_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO170_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO171_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank5_href60[] = {
+ GPIO160_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO161_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO162_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO163_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO164_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO165_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO166_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO167_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO168_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO169_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO170_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO171_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank6[] = {
+ GPIO192_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO193_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO194_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO195_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO196_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO197_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO198_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO199_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO200_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO201_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO202_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO203_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO204_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO205_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO206_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO207_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO208_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO209_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO210_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO211_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO212_GPIO,
+ GPIO213_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO214_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO215_GPIO,
+
+ GPIO216_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO217_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO218_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO219_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO220_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO221_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO222_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO223_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank6_href60[] = {
+ GPIO192_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO193_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO194_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO195_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO196_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO197_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO198_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO199_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO200_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO201_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO202_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO203_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO204_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO205_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO206_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO207_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO208_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO209_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO210_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO211_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO212_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO213_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO214_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO215_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO216_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 217 - Proximity */
+ GPIO217_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO218_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO219_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO220_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO221_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO222_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO223_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank6_u9500[] = {
+ GPIO192_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO193_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO194_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO195_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO196_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO197_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO198_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO199_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO200_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO201_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO202_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO203_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO204_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO205_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO206_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO207_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO208_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO209_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO210_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO211_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO212_GPIO,
+ GPIO213_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO214_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO215_GPIO,
+
+ GPIO216_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO217_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO218_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO219_GPIO | PIN_SLPM_INPUT_PULLDOWN,
+
+ GPIO220_GPIO | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO221_GPIO | PIN_SLPM_OUTPUT_LOW,
+ GPIO222_GPIO | PIN_SLPM_OUTPUT_LOW,
+ GPIO223_GPIO | PIN_SLPM_OUTPUT_LOW,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank7[] = {
+ GPIO224_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO225_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO226_GPIO | PIN_SLPM_DIR_OUTPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO227_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO228_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 229-230 - I2C3 */
+ GPIO229_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO230_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank7_href60[] = {
+ GPIO224_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO225_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO226_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO227_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO228_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ /* 229-230 - I2C3 */
+ GPIO229_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO230_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank7_u9500[] = {
+ GPIO224_GPIO | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO225_GPIO | PIN_SLPM_INPUT_PULLDOWN | PIN_SLPM_WAKEUP_ENABLE,
+ GPIO226_GPIO | PIN_SLPM_OUTPUT_LOW,
+ GPIO227_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+
+ GPIO228_GPIO | PIN_SLPM_OUTPUT_LOW | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO229_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO230_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+};
+
+static pin_cfg_t mop500_pins_common_power_save_bank8[] = {
+ GPIO256_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO257_GPIO | PIN_SLPM_OUTPUT_HIGH | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_DISABLED,
+ GPIO258_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO259_GPIO | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+
+ GPIO260_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO261_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO262_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO263_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+
+ GPIO264_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO265_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO266_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+ GPIO267_GPIO | PIN_SLPM_DIR_INPUT | PIN_SLPM_WAKEUP_ENABLE | PIN_SLPM_PDIS_ENABLED,
+};
+
+static void mop500_pins_suspend_force(void)
+{
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank0_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank0_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank0,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank0));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank1,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank1));
+
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank2_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank2_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank2,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank2));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank3,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank3));
+
+
+ if (pins_for_u9500() && uib_is_u8500uibr3())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank4_u9500_uibr3,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank4_u9500_uibr3));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank4,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank4));
+
+ if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank5_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank5_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank5,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank5));
+
+ if (pins_for_u9500())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank6_u9500,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6_u9500));
+ else if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank6_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank6,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6));
+
+ if (pins_for_u9500())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank7_u9500,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7_u9500));
+ else if (machine_is_hrefv60())
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank7_href60,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7_href60));
+ else
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank7,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7));
+
+ sleep_pins_config_pm(mop500_pins_common_power_save_bank8,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank8));
+}
+
+/*
+ * This function is called to force gpio power save
+ * mux settings during suspend.
+ * This is a temporary solution until all drivers are
+ * controlling their pin settings when in inactive mode.
+ */
+static void mop500_pins_suspend_force_mux(void)
+{
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank0,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank0));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank1,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank1));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank2,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank2));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank3,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank3));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank4,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank4));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank5,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank5));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank6,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank6));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank7,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank7));
+
+ sleep_pins_config_pm_mux(mop500_pins_common_power_save_bank8,
+ ARRAY_SIZE(mop500_pins_common_power_save_bank8));
+}
+
+/*
+ * passing "pinsfor=" in kernel cmdline allows for custom
+ * configuration of GPIOs on u8500 derived boards.
+ */
+static int __init early_pinsfor(char *p)
+{
+ pinsfor = PINS_FOR_DEFAULT;
+
+ if (strcmp(p, "u9500-21") == 0)
+ pinsfor = PINS_FOR_U9500;
+
+ return 0;
+}
+early_param("pinsfor", early_pinsfor);
+
+int pins_for_u9500(void)
+{
+ if (pinsfor == PINS_FOR_U9500)
+ return 1;
+
+ return 0;
+}
+
+static UX500_PINS(mop500_offchip_gpio_cfg,
+ /*
+ * Workaround for auto shutdown of 3.2MHz oscillator during
+ * deep sleep. APESPICSn/GPIO37 must be floating on the board
+ * to use this fix.
+ */
+ AB8500_PIN_GPIO37 | PIN_OUTPUT_HIGH,
+);
+
void __init mop500_pins_init(void)
{
nmk_config_pins(mop500_pins_common,
ARRAY_SIZE(mop500_pins_common));
+ ux500_pins_add(mop500_runtime_pins, ARRAY_SIZE(mop500_runtime_pins));
+
+ ux500_pins_add(mop500_runtime_pins_pre_v60,
+ ARRAY_SIZE(mop500_runtime_pins_pre_v60));
+
+ switch (pinsfor) {
+ case PINS_FOR_U9500:
+ nmk_config_pins(u9500_pins, ARRAY_SIZE(u9500_pins));
+ break;
+
+ case PINS_FOR_DEFAULT:
+ nmk_config_pins(u8500_pins, ARRAY_SIZE(u8500_pins));
+ default:
+ break;
+ }
+
nmk_config_pins(mop500_pins_default,
ARRAY_SIZE(mop500_pins_default));
+
+ suspend_set_pins_force_fn(mop500_pins_suspend_force,
+ mop500_pins_suspend_force_mux);
}
void __init snowball_pins_init(void)
@@ -290,8 +1082,14 @@ void __init snowball_pins_init(void)
nmk_config_pins(mop500_pins_common,
ARRAY_SIZE(mop500_pins_common));
- nmk_config_pins(snowball_pins,
- ARRAY_SIZE(snowball_pins));
+ ux500_pins_add(mop500_runtime_pins, ARRAY_SIZE(mop500_runtime_pins));
+
+ nmk_config_pins(u8500_pins, ARRAY_SIZE(u8500_pins));
+
+ nmk_config_pins(snowball_pins, ARRAY_SIZE(snowball_pins));
+
+ suspend_set_pins_force_fn(mop500_pins_suspend_force,
+ mop500_pins_suspend_force_mux);
}
void __init hrefv60_pins_init(void)
@@ -299,6 +1097,35 @@ void __init hrefv60_pins_init(void)
nmk_config_pins(mop500_pins_common,
ARRAY_SIZE(mop500_pins_common));
+ ux500_pins_add(mop500_runtime_pins, ARRAY_SIZE(mop500_runtime_pins));
+
+ ux500_pins_add(mop500_runtime_pins_v60,
+ ARRAY_SIZE(mop500_runtime_pins_v60));
+
nmk_config_pins(hrefv60_pins,
ARRAY_SIZE(hrefv60_pins));
+
+ switch (pinsfor) {
+ case PINS_FOR_U9500:
+ nmk_config_pins(u9500_pins, ARRAY_SIZE(u9500_pins));
+ break;
+
+ case PINS_FOR_DEFAULT:
+ nmk_config_pins(u8500_pins, ARRAY_SIZE(u8500_pins));
+ default:
+ break;
+ }
+
+ suspend_set_pins_force_fn(mop500_pins_suspend_force,
+ mop500_pins_suspend_force_mux);
+}
+
+static int __init mop500_offchip_gpio_init(void)
+{
+ if (machine_is_hrefv60())
+ ux500_offchip_gpio_init(&mop500_offchip_gpio_cfg);
+
+ return 0;
}
+/* Let gpio chip drivers initialize. */
+late_initcall(mop500_offchip_gpio_init);
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 2735d03996c..e7b095be633 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -11,8 +11,51 @@
#include <linux/kernel.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
+#include <mach/id.h> /* to identify older boards for fixes */
#include "board-mop500-regulators.h"
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+/*
+ * GPIO regulator controlled by the ab8500 GPIO16
+ */
+static struct regulator_consumer_supply gpio_wlan_vbat_consumers[] = {
+ /* for cg2900 chip */
+ REGULATOR_SUPPLY("vdd", "cg2900-uart.0"),
+ /* for cw1200 chip */
+ REGULATOR_SUPPLY("vdd", "cw1200_wlan"),
+};
+
+struct regulator_init_data gpio_wlan_vbat_regulator = {
+ .constraints = {
+ .name = "WLAN-VBAT",
+ .min_uV = 3600000,
+ .max_uV = 3600000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(gpio_wlan_vbat_consumers),
+ .consumer_supplies = gpio_wlan_vbat_consumers,
+};
+
+/*
+ * GPIO regulator controlled by the ab8500 GPIO26
+ */
+static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
+ /* for LAN chip */
+ REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
+struct regulator_init_data gpio_en_3v3_regulator = {
+ .constraints = {
+ .name = "EN-3V3",
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(gpio_en_3v3_consumers),
+ .consumer_supplies = gpio_en_3v3_consumers,
+};
+#endif
+
/*
* TPS61052 regulator
*/
@@ -38,21 +81,37 @@ struct regulator_init_data tps61052_regulator = {
};
static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
- /* External displays, connector on board 2v5 power supply */
- REGULATOR_SUPPLY("vaux12v5", "mcde.0"),
+ /* Main display, u8500 R3 uib */
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ /* Main display, u8500 uib and ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
+ /* Secondary display, ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
/* SFH7741 proximity sensor */
REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
/* BH1780GLS ambient light sensor */
REGULATOR_SUPPLY("vcc", "2-0029"),
/* lsm303dlh accelerometer */
- REGULATOR_SUPPLY("vdd", "3-0018"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.0"),
/* lsm303dlh magnetometer */
- REGULATOR_SUPPLY("vdd", "3-001e"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.1"),
/* Rohm BU21013 Touchscreen devices */
REGULATOR_SUPPLY("avdd", "3-005c"),
REGULATOR_SUPPLY("avdd", "3-005d"),
/* Synaptics RMI4 Touchscreen device */
REGULATOR_SUPPLY("vdd", "3-004b"),
+ /* L3G4200D Gyroscope device */
+ REGULATOR_SUPPLY("vdd", "l3g4200d"),
+ /* Proximity and Hal sensor device */
+ REGULATOR_SUPPLY("vdd", "sensor1p.0"),
+ /* Ambient light sensor device */
+ REGULATOR_SUPPLY("vdd", "3-0029"),
+ /* Pressure sensor device */
+ REGULATOR_SUPPLY("vdd", "2-005c"),
+ /* Cypress TrueTouch Touchscreen device */
+ REGULATOR_SUPPLY("vcpin", "spi8.0"),
+ /* Camera device */
+ REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
};
static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
@@ -60,9 +119,16 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sdi4"),
/* AB8500 audio codec */
REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+ /* AB8500 accessory detect 1 */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
+ /* AV8100 HDMI device */
+ REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
};
static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
+ REGULATOR_SUPPLY("v-SD-STM", "stm"),
/* External MMC slot power */
REGULATOR_SUPPLY("vmmc", "sdi0"),
};
@@ -72,6 +138,30 @@ static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
/* Internal general-purpose ADC */
REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
+};
+
+static struct regulator_consumer_supply ab8500_vaudio_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-audio", NULL),
+};
+
+static struct regulator_consumer_supply ab8500_vamic1_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-amic1", NULL),
+};
+
+static struct regulator_consumer_supply ab8500_vamic2_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-amic2", NULL),
+};
+
+static struct regulator_consumer_supply ab8500_vdmic_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-dmic", NULL),
};
static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
@@ -82,74 +172,85 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
};
static struct regulator_consumer_supply ab8500_vana_consumers[] = {
- /* External displays, connector on board, 1v8 power supply */
- REGULATOR_SUPPLY("vsmps2", "mcde.0"),
+ /* DB8500 DSI */
+ REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
+ /* DB8500 CSI */
+ REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab8500_sysclkreq_2_consumers[] = {
+ /* CG2900 device */
+ REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
+};
+
+static struct regulator_consumer_supply ab8500_sysclkreq_4_consumers[] = {
+ /* CW1200 device */
+ REGULATOR_SUPPLY("wlan_1v8", "cw1200_wlan.0"),
};
/* ab8500 regulator register initialization */
-struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
+static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
/*
* VanaRequestCtrl = HP/LP depending on VxRequest
* VextSupply1RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
/*
* VextSupply2RequestCtrl = HP/LP depending on VxRequest
* VextSupply3RequestCtrl = HP/LP depending on VxRequest
* Vaux1RequestCtrl = HP/LP depending on VxRequest
* Vaux2RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
/*
* Vaux3RequestCtrl = HP/LP depending on VxRequest
* SwHPReq = Control through SWValid disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
/*
* VanaSysClkReq1HPValid = disabled
* Vaux1SysClkReq1HPValid = disabled
* Vaux2SysClkReq1HPValid = disabled
* Vaux3SysClkReq1HPValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
/*
* VextSupply1SysClkReq1HPValid = disabled
* VextSupply2SysClkReq1HPValid = disabled
* VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
/*
* VanaHwHPReq1Valid = disabled
* Vaux1HwHPreq1Valid = disabled
* Vaux2HwHPReq1Valid = disabled
* Vaux3HwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq1Valid = disabled
* VextSupply2HwHPReq1Valid = disabled
* VextSupply3HwHPReq1Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
/*
* VanaHwHPReq2Valid = disabled
* Vaux1HwHPReq2Valid = disabled
* Vaux2HwHPReq2Valid = disabled
* Vaux3HwHPReq2Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq2Valid = disabled
* VextSupply2HwHPReq2Valid = disabled
* VextSupply3HwHPReq2Valid = HWReq2 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
/*
* VanaSwHPReqValid = disabled
* Vaux1SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
/*
* Vaux2SwHPReqValid = disabled
* Vaux3SwHPReqValid = disabled
@@ -157,7 +258,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VextSupply2SwHPReqValid = disabled
* VextSupply3SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
/*
* SysClkReq2Valid1 = SysClkReq2 controlled
* SysClkReq3Valid1 = disabled
@@ -167,7 +268,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid1 = disabled
* SysClkReq8Valid1 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
/*
* SysClkReq2Valid2 = disabled
* SysClkReq3Valid2 = disabled
@@ -177,7 +278,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid2 = disabled
* SysClkReq8Valid2 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
/*
* VTVoutEna = disabled
* Vintcore12Ena = disabled
@@ -185,66 +286,62 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* Vintcore12LP = inactive (HP)
* VTVoutLP = inactive (HP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10),
+ INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
/*
* VaudioEna = disabled
* VdmicEna = disabled
* Vamic1Ena = disabled
* Vamic2Ena = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
/*
* Vamic1_dzout = high-Z when Vamic1 is disabled
* Vamic2_dzout = high-Z when Vamic2 is disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
/*
- * VPll = Hw controlled
+ * VPll = Hw controlled (NOTE! PRCMU bits)
* VanaRegu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02),
+ INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
/*
* VrefDDREna = disabled
* VrefDDRSleepMode = inactive (no pulldown)
*/
- INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
/*
- * VextSupply1Regu = HW control
- * VextSupply2Regu = HW control
- * VextSupply3Regu = HW control
+ * VextSupply1Regu = force LP
+ * VextSupply2Regu = force OFF
+ * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
* ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
* ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
*/
- INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
/*
* Vaux1Regu = force HP
* Vaux2Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
/*
- * Vaux3regu = force off
+ * Vaux3Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
/*
- * Vsmps1 = 1.15V
+ * Vaux1Sel = 2.8 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24),
- /*
- * Vaux1Sel = 2.5 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
/*
* Vaux2Sel = 2.9 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
/*
* Vaux3Sel = 2.91 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
/*
* VextSupply12LP = disabled (no LP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
/*
* Vaux1Disch = short discharge time
* Vaux2Disch = short discharge time
@@ -253,23 +350,24 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VTVoutDisch = short discharge time
* VaudioDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
/*
* VanaDisch = short discharge time
* VdmicPullDownEna = pulldown disabled when Vdmic is disabled
* VdmicDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
};
/* AB8500 regulators */
-struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
/* supplies to the display/camera */
[AB8500_LDO_AUX1] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-DISPLAY",
- .min_uV = 2500000,
- .max_uV = 2900000,
+ .min_uV = 2800000,
+ .max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1, /* display is on at boot */
@@ -286,24 +384,32 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
},
/* supplies to the on-board eMMC */
[AB8500_LDO_AUX2] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-eMMC1",
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
.consumer_supplies = ab8500_vaux2_consumers,
},
/* supply for VAUX3, supplies to SDcard slots */
[AB8500_LDO_AUX3] = {
+ .supply_regulator = "ab8500-ext-supply3",
.constraints = {
.name = "V-MMC-SD",
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
.consumer_supplies = ab8500_vaux3_consumers,
@@ -323,6 +429,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-AUD",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaudio_consumers),
+ .consumer_supplies = ab8500_vaudio_consumers,
},
/* supply for v-anamic1 VAMic1-LDO */
[AB8500_LDO_ANAMIC1] = {
@@ -330,6 +438,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-AMIC1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
+ .consumer_supplies = ab8500_vamic1_consumers,
},
/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
[AB8500_LDO_ANAMIC2] = {
@@ -337,6 +447,8 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-AMIC2",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
+ .consumer_supplies = ab8500_vamic2_consumers,
},
/* supply for v-dmic, VDMIC LDO */
[AB8500_LDO_DMIC] = {
@@ -344,23 +456,162 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.name = "V-DMIC",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers),
+ .consumer_supplies = ab8500_vdmic_consumers,
},
/* supply for v-intcore12, VINTCORE12 LDO */
[AB8500_LDO_INTCORE] = {
.constraints = {
.name = "V-INTCORE",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
.consumer_supplies = ab8500_vintcore_consumers,
},
- /* supply for U8500 CSI/DSI, VANA LDO */
+ /* supply for U8500 CSI-DSI, VANA LDO */
[AB8500_LDO_ANA] = {
.constraints = {
- .name = "V-CSI/DSI",
+ .name = "V-CSI-DSI",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
.consumer_supplies = ab8500_vana_consumers,
},
+ /* sysclkreq 2 pin */
+ [AB8500_SYSCLKREQ_2] = {
+ .constraints = {
+ .name = "V-SYSCLKREQ-2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_sysclkreq_2_consumers),
+ .consumer_supplies = ab8500_sysclkreq_2_consumers,
+ },
+ /* sysclkreq 4 pin */
+ [AB8500_SYSCLKREQ_4] = {
+ .constraints = {
+ .name = "V-SYSCLKREQ-4",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_sysclkreq_4_consumers),
+ .consumer_supplies = ab8500_sysclkreq_4_consumers,
+ },
};
+
+/* supply for VextSupply3 */
+static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
+ /* SIM supply for 3 V SIM cards */
+ REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
+};
+
+/* extended configuration for VextSupply2, only used for HREFP_V20 boards */
+static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = {
+ .hwreq = true,
+};
+
+/*
+ * AB8500 external regulators
+ */
+static struct regulator_init_data ab8500_ext_regulators[] = {
+ /* fixed Vbat supplies VSMPS1_EXT_1V8 */
+ [AB8500_EXT_SUPPLY1] = {
+ .constraints = {
+ .name = "ab8500-ext-supply1",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .initial_mode = REGULATOR_MODE_IDLE,
+ .boot_on = 1,
+ .always_on = 1,
+ },
+ },
+ /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
+ [AB8500_EXT_SUPPLY2] = {
+ .constraints = {
+ .name = "ab8500-ext-supply2",
+ .min_uV = 1360000,
+ .max_uV = 1360000,
+ },
+ },
+ /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
+ [AB8500_EXT_SUPPLY3] = {
+ .constraints = {
+ .name = "ab8500-ext-supply3",
+ .min_uV = 3400000,
+ .max_uV = 3400000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_ext_supply3_consumers),
+ .consumer_supplies = ab8500_ext_supply3_consumers,
+ },
+};
+
+struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
+ .reg_init = ab8500_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
+ .regulator = ab8500_regulators,
+ .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .ext_regulator = ab8500_ext_regulators,
+ .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
+};
+
+static void ab8500_modify_reg_init(int id, u8 mask, u8 value)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) {
+ if (ab8500_reg_init[i].id == id) {
+ u8 initval = ab8500_reg_init[i].value;
+ initval = (initval & ~mask) | (value & mask);
+ ab8500_reg_init[i].value = initval;
+
+ BUG_ON(mask & ~ab8500_reg_init[i].mask);
+ return;
+ }
+ }
+
+ BUG_ON(1);
+}
+
+void mop500_regulator_init(void)
+{
+ struct regulator_init_data *regulator;
+
+ /*
+ * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for
+ * all HREFP_V20 boards)
+ */
+ if (cpu_is_u8500v20()) {
+ /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */
+ ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01);
+
+ /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */
+ ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2,
+ 0x20, 0x20);
+
+ /* VextSupply2 = force HP at initialization */
+ ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04);
+
+ /* enable VextSupply2 during platform active */
+ regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
+ regulator->constraints.always_on = 1;
+
+ /* disable VextSupply2 in suspend */
+ regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
+ regulator->constraints.state_mem.disabled = 1;
+ regulator->constraints.state_standby.disabled = 1;
+
+ /* enable VextSupply2 HW control (used in suspend) */
+ regulator->driver_data = (void *)&ab8500_ext_supply2;
+ }
+}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index 94992158d96..b5fc81a3649 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -14,9 +14,11 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
-extern struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS];
-extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
extern struct regulator_init_data tps61052_regulator;
+extern struct regulator_init_data gpio_wlan_vbat_regulator;
+extern struct regulator_init_data gpio_en_3v3_regulator;
+
+void mop500_regulator_init(void);
#endif
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 5dde4d4ebe8..118d29b863f 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -11,15 +11,16 @@
#include <linux/amba/mmci.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/mach-types.h>
#include <plat/ste_dma40.h>
#include <mach/devices.h>
#include <mach/hardware.h>
+#include <mach/ste-dma40-db8500.h>
#include "devices-db8500.h"
#include "board-mop500.h"
-#include "ste-dma40-db8500.h"
/*
* v2 has a new version of this block that need to be forced, the number found
@@ -31,72 +32,101 @@
* SDI 0 (MicroSD slot)
*/
-/* MMCIPOWER bits */
-#define MCI_DATA2DIREN (1 << 2)
-#define MCI_CMDDIREN (1 << 3)
-#define MCI_DATA0DIREN (1 << 4)
-#define MCI_DATA31DIREN (1 << 5)
-#define MCI_FBCLKEN (1 << 7)
-
/* GPIO pins used by the sdi0 level shifter */
static int sdi0_en = -1;
static int sdi0_vsel = -1;
-static u32 mop500_sdi0_vdd_handler(struct device *dev, unsigned int vdd,
- unsigned char power_mode)
+static int mop500_sdi0_ios_handler(struct device *dev, struct mmc_ios *ios)
{
- switch (power_mode) {
+ static unsigned char power_mode = MMC_POWER_ON;
+ static unsigned char signal_voltage = MMC_SIGNAL_VOLTAGE_330;
+
+ if (signal_voltage == ios->signal_voltage)
+ goto do_power;
+
+ /*
+ * We need to re-init the levelshifter when switching I/O voltage level.
+ * Max discharge time according to ST6G3244ME spec is 1 ms.
+ */
+ if (power_mode == MMC_POWER_ON) {
+ power_mode = MMC_POWER_OFF;
+ gpio_direction_output(sdi0_en, 0);
+ msleep(1);
+ }
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ gpio_direction_output(sdi0_vsel, 0);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ gpio_direction_output(sdi0_vsel, 1);
+ break;
+ default:
+ pr_warning("Non supported signal voltage for levelshifter.\n");
+ break;
+ }
+
+ signal_voltage = ios->signal_voltage;
+
+do_power:
+ if (power_mode == ios->power_mode)
+ return 0;
+
+ switch (ios->power_mode) {
case MMC_POWER_UP:
+ break;
case MMC_POWER_ON:
- /*
- * Level shifter voltage should depend on vdd to when deciding
- * on either 1.8V or 2.9V. Once the decision has been made the
- * level shifter must be disabled and re-enabled with a changed
- * select signal in order to switch the voltage. Since there is
- * no framework support yet for indicating 1.8V in vdd, use the
- * default 2.9V.
- */
- gpio_direction_output(sdi0_vsel, 0);
gpio_direction_output(sdi0_en, 1);
+ /* Max settling time according to ST6G3244ME spec is 100 us. */
+ udelay(100);
break;
case MMC_POWER_OFF:
- gpio_direction_output(sdi0_vsel, 0);
gpio_direction_output(sdi0_en, 0);
break;
}
- return MCI_FBCLKEN | MCI_CMDDIREN | MCI_DATA0DIREN |
- MCI_DATA2DIREN | MCI_DATA31DIREN;
+ power_mode = ios->power_mode;
+
+ return 0;
}
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
- .src_dev_type = DB8500_DMA_DEV29_SD_MM0_RX,
+ .src_dev_type = DB8500_DMA_DEV1_SD_MMC0_RX,
.dst_dev_type = STEDMA40_DEV_DST_MEMORY,
.src_info.data_width = STEDMA40_WORD_WIDTH,
.dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .use_fixed_channel = true,
+ .phy_channel = 0,
};
static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.src_dev_type = STEDMA40_DEV_SRC_MEMORY,
- .dst_dev_type = DB8500_DMA_DEV29_SD_MM0_TX,
+ .dst_dev_type = DB8500_DMA_DEV1_SD_MMC0_TX,
.src_info.data_width = STEDMA40_WORD_WIDTH,
.dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .use_fixed_channel = true,
+ .phy_channel = 0,
};
#endif
static struct mmci_platform_data mop500_sdi0_data = {
- .vdd_handler = mop500_sdi0_vdd_handler,
- .ocr_mask = MMC_VDD_29_30,
+ .ios_handler = mop500_sdi0_ios_handler,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA |
MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_MMC_HIGHSPEED,
+ MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_UHS_SDR12 |
+ MMC_CAP_UHS_SDR25,
.gpio_wp = -1,
+ .sigdir = MCI_ST_FBCLKEN |
+ MCI_ST_CMDDIREN |
+ MCI_ST_DATA0DIREN |
+ MCI_ST_DATA2DIREN,
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &mop500_sdi0_dma_cfg_rx,
@@ -104,36 +134,6 @@ static struct mmci_platform_data mop500_sdi0_data = {
#endif
};
-static void sdi0_configure(void)
-{
- int ret;
-
- ret = gpio_request(sdi0_en, "level shifter enable");
- if (!ret)
- ret = gpio_request(sdi0_vsel,
- "level shifter 1v8-3v select");
-
- if (ret) {
- pr_warning("unable to config sdi0 gpios for level shifter.\n");
- return;
- }
-
- /* Select the default 2.9V and enable level shifter */
- gpio_direction_output(sdi0_vsel, 0);
- gpio_direction_output(sdi0_en, 1);
-
- /* Add the device, force v2 to subrevision 1 */
- db8500_add_sdi0(&mop500_sdi0_data, U8500_SDI_V2_PERIPHID);
-}
-
-void mop500_sdi_tc35892_init(void)
-{
- mop500_sdi0_data.gpio_cd = GPIO_SDMMC_CD;
- sdi0_en = GPIO_SDMMC_EN;
- sdi0_vsel = GPIO_SDMMC_1V8_3V_SEL;
- sdi0_configure();
-}
-
/*
* SDI1 (SDIO WLAN)
*/
@@ -170,10 +170,40 @@ static struct mmci_platform_data mop500_sdi1_data = {
#endif
};
+static void sdi0_sdi1_configure(void)
+{
+ int ret;
+
+ ret = gpio_request(sdi0_en, "level shifter enable");
+ if (!ret)
+ ret = gpio_request(sdi0_vsel,
+ "level shifter 1v8-3v select");
+
+ if (ret) {
+ pr_warning("unable to config sdi0 gpios for level shifter.\n");
+ return;
+ }
+
+ /* Select the default 2.9V and enable level shifter */
+ gpio_direction_output(sdi0_vsel, 0);
+ gpio_direction_output(sdi0_en, 1);
+
+ /* Add the device, force v2 to subrevision 1 */
+ db8500_add_sdi0(&mop500_sdi0_data, U8500_SDI_V2_PERIPHID);
+ db8500_add_sdi1(&mop500_sdi1_data, U8500_SDI_V2_PERIPHID);
+}
+
+void mop500_sdi_tc35892_init(void)
+{
+ mop500_sdi0_data.gpio_cd = GPIO_SDMMC_CD;
+ sdi0_en = GPIO_SDMMC_EN;
+ sdi0_vsel = GPIO_SDMMC_1V8_3V_SEL;
+ sdi0_sdi1_configure();
+}
+
/*
* SDI 2 (POP eMMC, not on DB8500ed)
*/
-
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi2_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
@@ -198,7 +228,9 @@ static struct mmci_platform_data mop500_sdi2_data = {
.ocr_mask = MMC_VDD_165_195,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
- MMC_CAP_MMC_HIGHSPEED,
+ MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_ERASE,
+ .capabilities2 = MMC_CAP2_NO_SLEEP_CMD,
.gpio_cd = -1,
.gpio_wp = -1,
#ifdef CONFIG_STE_DMA40
@@ -233,7 +265,6 @@ static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = {
#endif
static struct mmci_platform_data mop500_sdi4_data = {
- .ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED,
@@ -253,7 +284,7 @@ void __init mop500_sdi_init(void)
/* On-board eMMC */
db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
/*
- * On boards with the TC35892 GPIO expander, sdi0 will finally
+ * On boards with the TC35892 GPIO expander, sdi0 and sdi1 will finally
* be added when the TC35892 initializes and calls
* mop500_sdi_tc35892_init() above.
*/
@@ -270,7 +301,7 @@ void __init snowball_sdi_init(void)
mop500_sdi0_data.cd_invert = true;
sdi0_en = SNOWBALL_SDMMC_EN_GPIO;
sdi0_vsel = SNOWBALL_SDMMC_1V8_3V_GPIO;
- sdi0_configure();
+ sdi0_sdi1_configure();
}
void __init hrefv60_sdi_init(void)
@@ -283,7 +314,5 @@ void __init hrefv60_sdi_init(void)
mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
sdi0_en = HREFV60_SDMMC_EN_GPIO;
sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO;
- sdi0_configure();
- /* WLAN SDIO channel */
- db8500_add_sdi1(&mop500_sdi1_data, U8500_SDI_V2_PERIPHID);
+ sdi0_sdi1_configure();
}
diff --git a/arch/arm/mach-ux500/board-mop500-sensors.c b/arch/arm/mach-ux500/board-mop500-sensors.c
new file mode 100644
index 00000000000..e62112de204
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-sensors.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/gpio.h>
+#include <linux/lsm303dlh.h>
+#include <linux/l3g4200d.h>
+#include <linux/i2c.h>
+#include <linux/input/lps001wp.h>
+#include <asm/mach-types.h>
+#include <mach/id.h>
+
+#include "board-mop500.h"
+
+/*
+ * LSM303DLH accelerometer + magnetometer & L3G4200D Gyroscope sensors
+ */
+static struct lsm303dlh_platform_data __initdata lsm303dlh_pdata = {
+ .name_a = "lsm303dlh.0",
+ .name_m = "lsm303dlh.1",
+ .axis_map_x = 0,
+ .axis_map_y = 1,
+ .axis_map_z = 2,
+ .negative_x = 1,
+ .negative_y = 1,
+ .negative_z = 0,
+};
+
+static struct l3g4200d_gyr_platform_data __initdata l3g4200d_pdata = {
+ .name_gyr = "l3g4200d",
+ .axis_map_x = 1,
+ .axis_map_y = 0,
+ .axis_map_z = 2,
+ .negative_x = 0,
+ .negative_y = 0,
+ .negative_z = 1,
+};
+
+/*
+ * Platform data for pressure sensor,
+ * poll interval and min interval in millseconds.
+ */
+static struct lps001wp_prs_platform_data __initdata lps001wp_pdata = {
+ .poll_interval = 1000,
+ .min_interval = 10,
+};
+
+static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
+ {
+ /* LSM303DLH Magnetometer */
+ I2C_BOARD_INFO("lsm303dlh_m", 0x1E),
+ .platform_data = &lsm303dlh_pdata,
+ },
+ {
+ /* L3G4200D Gyroscope */
+ I2C_BOARD_INFO("l3g4200d", 0x68),
+ .platform_data = &l3g4200d_pdata,
+ },
+ {
+ /* LSP001WM Barometer */
+ I2C_BOARD_INFO("lps001wp_prs", 0x5C),
+ .platform_data = &lps001wp_pdata,
+ },
+};
+
+/*
+ * Break this out due to the fact that this have changed address on snowball
+ */
+static struct i2c_board_info __initdata mop500_2_i2c2_devices[] = {
+ {
+ /* LSM303DLH Accelerometer */
+ I2C_BOARD_INFO("lsm303dlh_a", 0x18),
+ .platform_data = &lsm303dlh_pdata,
+ },
+};
+
+/*
+ * This is needed due to the fact that the i2c address changed in V7 =<
+ * and there is no way of knowing if the HW is V7 or higher so we just
+ * have to try and fail.
+ */
+static struct i2c_board_info __initdata snowball_i2c2_devices[] = {
+ {
+ /* LSM303DLH Accelerometer */
+ I2C_BOARD_INFO("lsm303dlhc_a", 0x19),
+ .platform_data = &lsm303dlh_pdata,
+ },
+};
+
+
+/*
+ * Register/Add i2c sensors
+ */
+void mop500_sensors_i2c_add(int busnum, struct i2c_board_info const *info,
+ unsigned n)
+{
+ struct i2c_adapter *adap;
+ struct i2c_client *client;
+ int i;
+
+ adap = i2c_get_adapter(busnum);
+ if (!adap) {
+ /* We have no i2c adapter yet lets create it. */
+ pr_info(__FILE__ ": Creating i2c adapter %d\n", busnum);
+ i2c_register_board_info(busnum, info, n);
+ return;
+ }
+
+ for (i = 0; i < n; i++) {
+ client = i2c_new_device(adap, &info[i]);
+ if (!client)
+ pr_err(__FILE__ ": failed to register %s to i2c%d\n",
+ info[i].type,
+ busnum);
+ }
+
+ i2c_put_adapter(adap);
+}
+
+/*
+ * Register/Add i2c sensors
+ */
+void mop500_sensors_probe_add_lsm303dlh_a(void)
+{
+ static const int busnum = 2;
+ struct i2c_adapter *adap;
+ struct i2c_client *client;
+ static const unsigned short i2c_addr_list[] = {
+ 0x18, 0x19, I2C_CLIENT_END };
+ struct i2c_board_info i2c_info = {
+ /* LSM303DLH Accelerometer */
+ I2C_BOARD_INFO("lsm303dlh_a", 0),
+ .platform_data = &lsm303dlh_pdata,
+ };
+
+ adap = i2c_get_adapter(busnum);
+ if (!adap) {
+ /* We have no i2c adapter yet lets create it. */
+ pr_err(__FILE__ ": Could not get adapter %d\n", busnum);
+ return;
+ }
+ client = i2c_new_probed_device(adap, &i2c_info,
+ i2c_addr_list, NULL);
+ if (!client)
+ pr_err(__FILE__ ": failed to register %s to i2c%d\n",
+ i2c_info.type,
+ busnum);
+ i2c_put_adapter(adap);
+}
+
+/*
+ * Check which accelerometer chip is mounted on UIB and
+ * read the chip ID to detect whether chip is LSM303DHL/LSM303DHLC.
+ */
+static int mop500_get_acc_id(void)
+{
+ int status;
+ union i2c_smbus_data data;
+ struct i2c_adapter *i2c2;
+
+ i2c2 = i2c_get_adapter(2);
+ if (!i2c2) {
+ pr_err("failed to get i2c adapter\n");
+ return -1;
+ }
+ status = i2c_smbus_xfer(i2c2, 0x18 , 0 ,
+ I2C_SMBUS_READ, 0x0F ,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (status < 0) {
+ status = i2c_smbus_xfer(i2c2, 0x19 , 0 ,
+ I2C_SMBUS_READ, 0x0F ,
+ I2C_SMBUS_BYTE_DATA, &data);
+ }
+ i2c_put_adapter(i2c2);
+ return (status < 0) ? status : data.byte;
+}
+
+static int __init mop500_sensors_init(void)
+{
+ int ret;
+
+ if (!machine_is_snowball() && !uib_is_stuib() &&
+ !uib_is_u8500uib() && !uib_is_u8500uibr3())
+ return 0;
+
+ if (machine_is_hrefv60()) {
+ lsm303dlh_pdata.irq_a1 = HREFV60_ACCEL_INT1_GPIO;
+ lsm303dlh_pdata.irq_a2 = HREFV60_ACCEL_INT2_GPIO;
+ lsm303dlh_pdata.irq_m = HREFV60_MAGNET_DRDY_GPIO;
+ } else if (machine_is_snowball()) {
+ lsm303dlh_pdata.irq_a1 = SNOWBALL_ACCEL_INT1_GPIO;
+ lsm303dlh_pdata.irq_a2 = SNOWBALL_ACCEL_INT2_GPIO;
+ lsm303dlh_pdata.irq_m = SNOWBALL_MAGNET_DRDY_GPIO;
+ } else {
+ lsm303dlh_pdata.irq_a1 = GPIO_ACCEL_INT1;
+ lsm303dlh_pdata.irq_a2 = GPIO_ACCEL_INT2;
+ lsm303dlh_pdata.irq_m = GPIO_MAGNET_DRDY;
+ }
+
+ /* Special sensors data for 8500 UIBs */
+ if (uib_is_u8500uib() || uib_is_u8500uibr3()) {
+ lsm303dlh_pdata.axis_map_x = 1;
+ lsm303dlh_pdata.axis_map_y = 0;
+ lsm303dlh_pdata.negative_x = 0;
+ lsm303dlh_pdata.negative_y = 0;
+ lsm303dlh_pdata.negative_z = 1;
+
+ l3g4200d_pdata.axis_map_x = 0;
+ l3g4200d_pdata.axis_map_y = 1;
+ l3g4200d_pdata.negative_x = 1;
+ l3g4200d_pdata.negative_y = 0;
+ l3g4200d_pdata.negative_z = 1;
+ }
+
+ ret = mop500_get_acc_id();
+ if (ret < 0)
+ printk(KERN_ERR " Failed to get Accelerometer chip ID\n");
+ else
+ lsm303dlh_pdata.chip_id = ret;
+
+ if (machine_is_snowball()) {
+ if (cpu_is_u8500v21())
+ /* This is ugly but we cant know what address to use */
+ mop500_sensors_probe_add_lsm303dlh_a();
+ else /* Add the accelerometer with new addr */
+ mop500_sensors_i2c_add(2, snowball_i2c2_devices,
+ ARRAY_SIZE(snowball_i2c2_devices));
+ } else /* none snowball have the old addr */
+ mop500_sensors_i2c_add(2, mop500_2_i2c2_devices,
+ ARRAY_SIZE(mop500_2_i2c2_devices));
+
+ mop500_sensors_i2c_add(2, mop500_i2c2_devices,
+ ARRAY_SIZE(mop500_i2c2_devices));
+ return 0;
+}
+
+module_init(mop500_sensors_init);
diff --git a/arch/arm/mach-ux500/board-mop500-stm.c b/arch/arm/mach-ux500/board-mop500-stm.c
new file mode 100644
index 00000000000..d38595b11b2
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-stm.c
@@ -0,0 +1,441 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ *
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Author: Olivier Germain <olivier.germain@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/mach-types.h>
+#include <plat/pincfg.h>
+#include <mach/devices.h>
+#include <asm/io.h>
+#include <trace/stm.h>
+#include "pins-db8500.h"
+
+#define HREFV60_SDMMC_EN_GPIO 169
+#define HREFV60_SDMMC_1V8_3V_GPIO 5
+
+#define STM_DEVICE (&u8500_stm_device.dev)
+#define STM_ERR(msg) dev_err(STM_DEVICE, msg)
+#define STM_WARN(msg) dev_warn(STM_DEVICE, msg)
+
+static struct regulator *regulator_aux3;
+static enum stm_connection_type
+ stm_current_connection = STM_STE_INVALID_CONNECTION;
+
+static pin_cfg_t mop500_stm_mipi34_pins[] = {
+ GPIO70_STMAPE_CLK | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO71_STMAPE_DAT3 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO72_STMAPE_DAT2 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO73_STMAPE_DAT1 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO74_STMAPE_DAT0 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO75_U2_RXD | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO76_U2_TXD | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+};
+
+static pin_cfg_t mop500_stm_mipi60_pins[] = {
+ GPIO153_U2_RXD,
+ GPIO154_U2_TXD,
+ GPIO155_STMAPE_CLK,
+ GPIO156_STMAPE_DAT3,
+ GPIO157_STMAPE_DAT2,
+ GPIO158_STMAPE_DAT1,
+ GPIO159_STMAPE_DAT0,
+};
+
+static pin_cfg_t mop500_stm_ape_microsd_pins[] = {
+ GPIO23_MS_CLK | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO24_MS_BS | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO25_MS_DAT0 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO26_MS_DAT1 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO27_MS_DAT2 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO28_MS_DAT3 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+};
+
+static pin_cfg_t mop500_ske_pins[] = {
+ GPIO153_KP_I7 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO154_KP_I6 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO155_KP_I5 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO156_KP_I4 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO161_KP_I3 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO162_KP_I2 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO163_KP_I1 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO164_KP_I0 | PIN_INPUT_PULLDOWN | PIN_SLPM_INPUT_PULLUP,
+ GPIO157_KP_O7 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO158_KP_O6 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO159_KP_O5 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO160_KP_O4 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO165_KP_O3 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO166_KP_O2 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO167_KP_O1 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO168_KP_O0 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+};
+
+static pin_cfg_t mop500_stm_modem_microsd_pins[] = {
+ GPIO18_GPIO | PIN_OUTPUT_LOW,
+ GPIO19_GPIO | PIN_OUTPUT_HIGH,
+ GPIO20_GPIO | PIN_OUTPUT_HIGH,
+ GPIO22_GPIO | PIN_INPUT_PULLUP,
+ GPIO23_STMMOD_CLK | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO24_UARTMOD_RXD | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO25_STMMOD_DAT0 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO26_STMMOD_DAT1 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO27_STMMOD_DAT2 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+ GPIO28_STMMOD_DAT3 | PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP,
+};
+
+/* sdi0 (removable MMC/SD/SDIO cards) */
+static pin_cfg_t mop500_sdi0_pins[] = {
+ GPIO18_MC0_CMDDIR | PIN_OUTPUT_HIGH,
+ GPIO19_MC0_DAT0DIR | PIN_OUTPUT_HIGH,
+ GPIO20_MC0_DAT2DIR | PIN_OUTPUT_HIGH,
+
+ GPIO22_MC0_FBCLK | PIN_INPUT_NOPULL,
+ GPIO23_MC0_CLK | PIN_OUTPUT_LOW,
+ GPIO24_MC0_CMD | PIN_INPUT_PULLUP,
+ GPIO25_MC0_DAT0 | PIN_INPUT_PULLUP,
+ GPIO26_MC0_DAT1 | PIN_INPUT_PULLUP,
+ GPIO27_MC0_DAT2 | PIN_INPUT_PULLUP,
+ GPIO28_MC0_DAT3 | PIN_INPUT_PULLUP,
+};
+
+static int stm_ste_disable_ape_on_mipi60(void)
+{
+ int retval;
+
+ retval = nmk_config_pins_sleep(ARRAY_AND_SIZE(mop500_stm_mipi60_pins));
+ if (retval)
+ STM_ERR("Failed to disable MIPI60\n");
+ else {
+ retval = nmk_config_pins(ARRAY_AND_SIZE(mop500_ske_pins));
+ if (retval)
+ STM_ERR("Failed to enable SKE gpio\n");
+ }
+ return retval;
+}
+
+static int stm_enable_ape_microsd(void)
+{
+ int retval;
+
+ /*
+ * Configure STM APE on GPIO23,GPIO28,GPIO27,GPIO26,GPIO25
+ * On HREF board an external SD buffer exist (ST6G3244ME)
+ * to perform level conversion from 1.8v to 3.3V on SD card signals
+ * When STM is redirected on micro SD connector GPIO18,GP19,GPIO20
+ * are configured in standard GPIO mode and are used to configure
+ * direction on external SD buffer ST6G3244ME.
+ */
+
+ retval = nmk_config_pins(ARRAY_AND_SIZE(mop500_stm_ape_microsd_pins));
+ if (retval)
+ STM_ERR("Failed to enable STM APE on MICRO SD\n");
+
+ /* Enable altC1 on GPIO23-28 (STMAPE) */
+ prcmu_enable_stm_ape();
+
+ return retval;
+}
+
+static int stm_disable_ape_microsd(void)
+{
+ int retval;
+
+ /* Disable altC1 on GPIO23-28 (STMAPE) */
+ prcmu_disable_stm_ape();
+
+ /* Reconfigure GPIO for SD */
+ retval = nmk_config_pins_sleep(ARRAY_AND_SIZE(mop500_sdi0_pins));
+ if (retval)
+ STM_ERR("Failed to disable STM APE on MICRO SD "
+ "and to reconfigure GPIO for SD\n");
+
+ return retval;
+}
+
+static int stm_enable_modem_microsd(void)
+{
+ int retval;
+
+ /*
+ * Configure STM APE on GPIO23,GPIO28,GPIO27,GPIO26,GPIO25
+ * On HREF board an external SD buffer exist (ST6G3244ME)
+ * to perform level conversion from 1.8v to 3.3V on SD card
+ * signals. When STM is redirected on micro SD connector
+ * GPIO18,GP19,GPIO20 are configured in standard GPIO mode
+ * and are used to configure direction on external SD buffer
+ * ST6G3244ME.
+ */
+
+ retval = nmk_config_pins(ARRAY_AND_SIZE(mop500_stm_modem_microsd_pins));
+ if (retval)
+ STM_ERR("Failed to enable STM MODEM on MICRO SD\n");
+
+ return retval;
+}
+
+static int stm_disable_modem_microsd(void)
+{
+ int retval;
+
+ /* Reconfigure GPIO for SD */
+ retval = nmk_config_pins_sleep(ARRAY_AND_SIZE(mop500_sdi0_pins));
+ if (retval)
+ STM_ERR("Failed to disable STM MODEM on MICRO SD "
+ "and to reconfigure GPIO for SD\n");
+
+ return retval;
+}
+
+/* Enable or disable micro sd card buffers on HREF */
+static void control_level_shifter_for_microsd(int gpio_dir)
+{
+ int gpio[2];
+
+ if (machine_is_hrefv60()) {
+ gpio[0] = HREFV60_SDMMC_EN_GPIO;
+ gpio[1] = HREFV60_SDMMC_1V8_3V_GPIO;
+ } else {
+ gpio[0] = MOP500_EGPIO(17);
+ gpio[1] = MOP500_EGPIO(18);
+ }
+
+ /* Select the default 2.9V and enable / disable level shifter */
+ gpio_direction_output(gpio[1], 0);
+ gpio_direction_output(gpio[0], gpio_dir);
+}
+
+/* Enable micro sd card buffers on HREF */
+static int enable_level_shifter_for_microsd(void)
+{
+ control_level_shifter_for_microsd(1);
+ STM_WARN("Level Shifter for SD card connector on.\n");
+ return 0;
+}
+
+/* Disable micro sd card buffers on HREF */
+static int disable_level_shifter_for_microsd(void)
+{
+ control_level_shifter_for_microsd(0);
+ STM_WARN("Level Shifter for SD card connector off.\n");
+ return 0;
+}
+
+/* Enable VAUX3 to power on buffer on STM MICRO SD cable */
+static int enable_vaux3_for_microsd_cable(void)
+{
+ int error;
+
+ regulator_aux3 = regulator_get(&u8500_stm_device.dev, "v-SD-STM");
+
+ if (IS_ERR(regulator_aux3)) {
+ error = PTR_ERR(regulator_aux3);
+ STM_ERR("Failed to get regulator, supply: v-SD-STM\n");
+ return error;
+ }
+
+ error = regulator_enable(regulator_aux3);
+
+ if (error) {
+ STM_ERR("Unable to enable regulator on SD card connector\n");
+ return error;
+ }
+
+ STM_WARN("Regulator on SD card connector power on.\n");
+ return error;
+}
+
+/* Disable VAUX3 to power off buffer on STM MICRO SD cable */
+static int disable_vaux3_for_microsd_cable(void)
+{
+ int error = 0;
+
+ error = regulator_disable(regulator_aux3);
+
+ if (regulator_aux3)
+ regulator_put(regulator_aux3);
+
+ STM_WARN("Regulator for stm on SD card connector power off.\n");
+
+ return error;
+
+}
+
+static int stm_ste_connection(enum stm_connection_type con_type)
+{
+ int retval = -EINVAL;
+
+ /* Check if connection type has been changed */
+ if (con_type == stm_current_connection)
+ return 0;
+
+ if (con_type != STM_DISCONNECT) {
+ /* Always enable MIPI34 GPIO pins */
+ retval = nmk_config_pins(
+ ARRAY_AND_SIZE(mop500_stm_mipi34_pins));
+ if (retval) {
+ STM_ERR("Failed to enable MIPI34\n");
+ goto stm_ste_connection_error;
+ }
+ }
+
+ switch (con_type) {
+ case STM_DEFAULT_CONNECTION:
+ case STM_STE_MODEM_ON_MIPI34_NONE_ON_MIPI60:
+ /* Enable altC3 on GPIO70-74 (STMMOD) & GPIO75-76 (UARTMOD) */
+ prcmu_enable_stm_mod_uart();
+ retval = stm_ste_disable_ape_on_mipi60();
+ break;
+
+ case STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60:
+ /* Disable altC3 on GPIO70-74 (STMMOD) & GPIO75-76 (UARTMOD) */
+ prcmu_disable_stm_mod_uart();
+ retval = stm_ste_disable_ape_on_mipi60();
+ break;
+
+ case STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60:
+ /* Enable altC3 on GPIO70-74 (STMMOD) and GPIO75-76 (UARTMOD) */
+ prcmu_enable_stm_mod_uart();
+ /* Enable APE on MIPI60 */
+ retval = nmk_config_pins_sleep(ARRAY_AND_SIZE(mop500_ske_pins));
+ if (retval)
+ STM_ERR("Failed to disable SKE GPIO\n");
+ else {
+ retval = nmk_config_pins(
+ ARRAY_AND_SIZE(mop500_stm_mipi60_pins));
+ if (retval)
+ STM_ERR("Failed to enable MIPI60\n");
+ }
+ break;
+
+ case STM_STE_MODEM_ON_MICROSD:
+ /* Disable APE on micro SD */
+ retval = stm_disable_ape_microsd();
+ /* Enable modem on micro SD */
+ if (!retval)
+ retval = stm_enable_modem_microsd();
+ /* Enable SD card buffer and regulator on href */
+ if (!retval && (stm_current_connection
+ != STM_STE_APE_ON_MICROSD)) {
+ enable_level_shifter_for_microsd();
+ enable_vaux3_for_microsd_cable();
+ }
+ break;
+
+ case STM_STE_APE_ON_MICROSD:
+ /* Disable modem on micro SD */
+ retval = stm_disable_modem_microsd();
+ /* Enable ape on micro SD */
+ if (!retval)
+ retval = stm_enable_ape_microsd();
+ /* Enable SD card buffer and regulator on href */
+ if (!retval && (stm_current_connection
+ != STM_STE_MODEM_ON_MICROSD)) {
+ enable_level_shifter_for_microsd();
+ enable_vaux3_for_microsd_cable();
+ }
+ break;
+
+ case STM_DISCONNECT:
+ retval = nmk_config_pins_sleep(
+ ARRAY_AND_SIZE(mop500_stm_mipi34_pins));
+ if (retval)
+ STM_ERR("Failed to disable MIPI34\n");
+
+ retval = stm_ste_disable_ape_on_mipi60();
+ if (retval)
+ STM_ERR("Failed to disable MIPI60\n");
+
+ retval = stm_disable_modem_microsd();
+ if (retval)
+ STM_ERR("Failed to disable modem on microsd\n");
+
+ retval = stm_disable_ape_microsd();
+ if (retval)
+ STM_ERR("Failed to disable ape on microsd\n");
+ break;
+
+ default:
+ STM_ERR("Bad connection type\n");
+ goto stm_ste_connection_error;
+ }
+
+ /* Disable power for microsd */
+ if ((stm_current_connection == STM_STE_MODEM_ON_MICROSD)
+ || (stm_current_connection == STM_STE_APE_ON_MICROSD)) {
+ if ((con_type != STM_STE_MODEM_ON_MICROSD)
+ && (con_type != STM_STE_APE_ON_MICROSD)) {
+ disable_vaux3_for_microsd_cable();
+ disable_level_shifter_for_microsd();
+ }
+ }
+
+ stm_current_connection = con_type;
+
+stm_ste_connection_error:
+ return retval;
+}
+
+/* Possible STM sources (masters) on ux500 */
+enum stm_master {
+ STM_ARM0 = 0,
+ STM_ARM1 = 1,
+ STM_SVA = 2,
+ STM_SIA = 3,
+ STM_SIA_XP70 = 4,
+ STM_PRCMU = 5,
+ STM_MCSBAG = 9
+};
+
+#define STM_ENABLE_ARM0 BIT(STM_ARM0)
+#define STM_ENABLE_ARM1 BIT(STM_ARM1)
+#define STM_ENABLE_SVA BIT(STM_SVA)
+#define STM_ENABLE_SIA BIT(STM_SIA)
+#define STM_ENABLE_SIA_XP70 BIT(STM_SIA_XP70)
+#define STM_ENABLE_PRCMU BIT(STM_PRCMU)
+#define STM_ENABLE_MCSBAG BIT(STM_MCSBAG)
+
+/*
+ * These are the channels used by NMF and some external softwares
+ * expect the NMF traces to be output on these channels
+ * For legacy reason, we need to reserve them.
+ */
+static const s16 stm_channels_reserved[] = {
+ 100, /* NMF MPCEE channel */
+ 101, /* NMF CM channel */
+ 151, /* NMF HOSTEE channel */
+};
+
+/* On Ux500 we 2 consecutive STMs therefore 512 channels available */
+static struct stm_platform_data stm_pdata = {
+ .regs_phys_base = U8500_STM_REG_BASE,
+ .channels_phys_base = U8500_STM_BASE,
+ .id_mask = 0x000fffff, /* Ignore revisions differences */
+ .channels_reserved = stm_channels_reserved,
+ .channels_reserved_sz = ARRAY_SIZE(stm_channels_reserved),
+ /* Enable all except MCSBAG */
+ .masters_enabled = STM_ENABLE_ARM0 | STM_ENABLE_ARM1 |
+ STM_ENABLE_SVA | STM_ENABLE_PRCMU |
+ STM_ENABLE_SIA | STM_ENABLE_SIA_XP70,
+ /* Provide function for MIPI34/MIPI60 STM connection */
+ .stm_connection = stm_ste_connection,
+};
+
+struct platform_device u8500_stm_device = {
+ .name = "stm",
+ .id = -1,
+ .dev = {
+ .platform_data = &stm_pdata,
+ },
+};
diff --git a/arch/arm/mach-ux500/board-mop500-stuib.c b/arch/arm/mach-ux500/board-mop500-stuib.c
index 8c979770d87..3a01b119511 100644
--- a/arch/arm/mach-ux500/board-mop500-stuib.c
+++ b/arch/arm/mach-ux500/board-mop500-stuib.c
@@ -11,33 +11,70 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#ifdef CONFIG_U8500_FLASH
+#include <../drivers/staging/camera_flash/adp1653_plat.h>
+#endif
#include <linux/input/matrix_keypad.h>
#include <asm/mach-types.h>
#include "board-mop500.h"
-/* STMPE/SKE keypad use this key layout */
+/*
+ * ux500 keymaps
+ *
+ * Organized row-wise as on the UIB, starting at the top-left
+ *
+ * we support two key layouts, specific to requirements. The first
+ * keylayout includes controls for power/volume a few generic keys;
+ * the second key layout contains the full numeric layout, enter/back/left
+ * buttons along with a "."(dot), specifically for connectivity testing
+ */
static const unsigned int mop500_keymap[] = {
+#if defined(CONFIG_KEYLAYOUT_LAYOUT1)
KEY(2, 5, KEY_END),
- KEY(4, 1, KEY_POWER),
+ KEY(4, 1, KEY_HOME),
KEY(3, 5, KEY_VOLUMEDOWN),
- KEY(1, 3, KEY_3),
+ KEY(1, 3, KEY_EMAIL),
KEY(5, 2, KEY_RIGHT),
- KEY(5, 0, KEY_9),
+ KEY(5, 0, KEY_BACKSPACE),
KEY(0, 5, KEY_MENU),
KEY(7, 6, KEY_ENTER),
KEY(4, 5, KEY_0),
- KEY(6, 7, KEY_2),
+ KEY(6, 7, KEY_DOT),
KEY(3, 4, KEY_UP),
KEY(3, 3, KEY_DOWN),
KEY(6, 4, KEY_SEND),
KEY(6, 2, KEY_BACK),
KEY(4, 2, KEY_VOLUMEUP),
- KEY(5, 5, KEY_1),
+ KEY(5, 5, KEY_SPACE),
KEY(4, 3, KEY_LEFT),
+ KEY(3, 2, KEY_SEARCH),
+#elif defined(CONFIG_KEYLAYOUT_LAYOUT2)
+ KEY(2, 5, KEY_RIGHT),
+ KEY(4, 1, KEY_ENTER),
+ KEY(3, 5, KEY_MENU),
+ KEY(1, 3, KEY_3),
+ KEY(5, 2, KEY_6),
+ KEY(5, 0, KEY_9),
+
+ KEY(0, 5, KEY_UP),
+ KEY(7, 6, KEY_DOWN),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_2),
+ KEY(3, 4, KEY_5),
+ KEY(3, 3, KEY_8),
+
+ KEY(6, 4, KEY_LEFT),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_KPDOT),
+ KEY(5, 5, KEY_1),
+ KEY(4, 3, KEY_4),
KEY(3, 2, KEY_7),
+#else
+#warning "No keypad layout defined."
+#endif
};
static const struct matrix_keymap_data mop500_keymap_data = {
@@ -73,6 +110,24 @@ static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
},
};
+#ifdef CONFIG_U8500_FLASH
+/*
+ * Config data for the flash
+ */
+static struct adp1653_platform_data __initdata adp1653_pdata_u8500_uib = {
+ .irq_no = CAMERA_FLASH_INT_PIN
+};
+#endif
+
+static struct i2c_board_info __initdata mop500_i2c2_devices_stuib[] = {
+#ifdef CONFIG_U8500_FLASH
+ {
+ I2C_BOARD_INFO("adp1653", 0x30),
+ .platform_data = &adp1653_pdata_u8500_uib
+ }
+#endif
+};
+
/*
* BU21013 ROHM touchscreen interface on the STUIBs
*/
@@ -111,6 +166,7 @@ static int bu21013_gpio_board_init(int reset_pin)
__func__);
return retval;
}
+ gpio_set_value_cansleep(reset_pin, 1);
}
return retval;
@@ -133,7 +189,8 @@ static int bu21013_gpio_board_exit(int reset_pin)
__func__);
return retval;
}
- gpio_set_value(reset_pin, 0);
+ gpio_set_value_cansleep(reset_pin, 0);
+ gpio_free(reset_pin);
}
bu21013_devices--;
@@ -157,9 +214,19 @@ static struct bu21013_platform_device tsc_plat_device = {
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
- .ext_clk = false,
- .x_flip = false,
- .y_flip = true,
+ .x_max_res = 480,
+ .y_max_res = 864,
+ .portrait = true,
+ .has_ext_clk = true,
+ .enable_ext_clk = false,
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE) && \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE == 270
+ .x_flip = true,
+ .y_flip = false,
+#else
+ .x_flip = false,
+ .y_flip = true,
+#endif
};
static struct bu21013_platform_device tsc_plat2_device = {
@@ -169,18 +236,28 @@ static struct bu21013_platform_device tsc_plat2_device = {
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
- .ext_clk = false,
- .x_flip = false,
- .y_flip = true,
+ .x_max_res = 480,
+ .y_max_res = 864,
+ .portrait = true,
+ .has_ext_clk = true,
+ .enable_ext_clk = false,
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE) && \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE == 270
+ .x_flip = true,
+ .y_flip = false,
+#else
+ .x_flip = false,
+ .y_flip = true,
+#endif
};
static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
{
- I2C_BOARD_INFO("bu21013_tp", 0x5C),
+ I2C_BOARD_INFO("bu21013_ts", 0x5C),
.platform_data = &tsc_plat_device,
},
{
- I2C_BOARD_INFO("bu21013_tp", 0x5D),
+ I2C_BOARD_INFO("bu21013_ts", 0x5D),
.platform_data = &tsc_plat2_device,
},
@@ -191,15 +268,25 @@ void __init mop500_stuib_init(void)
if (machine_is_hrefv60()) {
tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
+#ifdef CONFIG_U8500_FLASH
+ adp1653_pdata_u8500_uib.enable_gpio =
+ HREFV60_CAMERA_FLASH_ENABLE;
+#endif
} else {
tsc_plat_device.cs_pin = GPIO_BU21013_CS;
tsc_plat2_device.cs_pin = GPIO_BU21013_CS;
-
+#ifdef CONFIG_U8500_FLASH
+ adp1653_pdata_u8500_uib.enable_gpio =
+ GPIO_CAMERA_FLASH_ENABLE;
+#endif
}
mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
ARRAY_SIZE(mop500_i2c0_devices_stuib));
+ mop500_uib_i2c_add(2, mop500_i2c2_devices_stuib,
+ ARRAY_SIZE(mop500_i2c2_devices_stuib));
+
mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib,
ARRAY_SIZE(u8500_i2c3_devices_stuib));
}
diff --git a/arch/arm/mach-ux500/board-mop500-u8500uib.c b/arch/arm/mach-ux500/board-mop500-u8500uib.c
index feb5744d98b..d0631303a0b 100644
--- a/arch/arm/mach-ux500/board-mop500-u8500uib.c
+++ b/arch/arm/mach-ux500/board-mop500-u8500uib.c
@@ -8,11 +8,22 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#ifdef CONFIG_U8500_FLASH
+#include <../drivers/staging/camera_flash/adp1653_plat.h>
+#endif
#include <linux/gpio.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_SENSORS_LSM303DLH
+#include <linux/lsm303dlh.h>
+#endif
+#ifdef CONFIG_SENSORS_L3G4200D
+#include <linux/l3g4200d.h>
+#endif
#include <linux/mfd/tc3589x.h>
#include <linux/input/matrix_keypad.h>
+#include <asm/mach-types.h>
+#include <linux/gpio.h>
#include <mach/irqs.h>
#include "board-mop500.h"
@@ -21,12 +32,28 @@
struct i2c_board_info __initdata __weak mop500_i2c3_devices_u8500[] = {
};
+#ifdef CONFIG_U8500_FLASH
+static struct adp1653_platform_data __initdata adp1653_pdata_u8500_uib = {
+ .irq_no = CAMERA_FLASH_INT_PIN
+};
+#endif
+
+static struct i2c_board_info __initdata mop500_i2c2_devices_u8500[] = {
+#ifdef CONFIG_U8500_FLASH
+ {
+ I2C_BOARD_INFO("adp1653", 0x30),
+ .platform_data = &adp1653_pdata_u8500_uib
+ }
+#endif
+};
+
+
/*
* TC35893
*/
static const unsigned int u8500_keymap[] = {
KEY(3, 1, KEY_END),
- KEY(4, 1, KEY_POWER),
+ KEY(4, 1, KEY_HOME),
KEY(6, 4, KEY_VOLUMEDOWN),
KEY(4, 2, KEY_EMAIL),
KEY(3, 3, KEY_RIGHT),
@@ -87,4 +114,15 @@ void __init mop500_u8500uib_init(void)
mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
ARRAY_SIZE(mop500_i2c0_devices_u8500));
+#ifdef CONFIG_U8500_FLASH
+ if (machine_is_hrefv60())
+ adp1653_pdata_u8500_uib.enable_gpio =
+ HREFV60_CAMERA_FLASH_ENABLE;
+ else
+ adp1653_pdata_u8500_uib.enable_gpio =
+ GPIO_CAMERA_FLASH_ENABLE;
+#endif
+
+ mop500_uib_i2c_add(2, mop500_i2c2_devices_u8500,
+ ARRAY_SIZE(mop500_i2c2_devices_u8500));
}
diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c
index 5af36aa56c0..908c5d973da 100644
--- a/arch/arm/mach-ux500/board-mop500-uib.c
+++ b/arch/arm/mach-ux500/board-mop500-uib.c
@@ -1,4 +1,5 @@
/*
+
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
@@ -10,13 +11,22 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/regulator/consumer.h>
#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
+#include "pins.h"
#include "board-mop500.h"
enum mop500_uib {
STUIB,
U8500UIB,
+ U8500UIB_R3,
+ NO_UIB,
};
struct uib {
@@ -25,6 +35,8 @@ struct uib {
void (*init)(void);
};
+static u8 type_of_uib = NO_UIB;
+
static struct uib __initdata mop500_uibs[] = {
[STUIB] = {
.name = "ST-UIB",
@@ -36,9 +48,16 @@ static struct uib __initdata mop500_uibs[] = {
.option = "u8500uib",
.init = mop500_u8500uib_init,
},
+#ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+ [U8500UIB_R3] = {
+ .name = "U8500-UIBR3",
+ .option = "u8500uibr3",
+ .init = mop500_u8500uib_r3_init,
+ },
+#endif
};
-static struct uib *mop500_uib;
+static struct uib __initdata *mop500_uib;
static int __init mop500_uib_setup(char *str)
{
@@ -64,7 +83,7 @@ __setup("uib=", mop500_uib_setup);
* The UIBs are detected after the I2C host controllers are registered, so
* i2c_register_board_info() can't be used.
*/
-void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+void mop500_uib_i2c_add(int busnum, struct i2c_board_info const *info,
unsigned n)
{
struct i2c_adapter *adap;
@@ -90,26 +109,137 @@ void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
static void __init __mop500_uib_init(struct uib *uib, const char *why)
{
pr_info("%s (%s)\n", uib->name, why);
+
+ if (strcmp("stuib", uib->option) == 0)
+ type_of_uib = STUIB;
+ else if (strcmp("u8500uib", uib->option) == 0)
+ type_of_uib = U8500UIB;
+ else if (strcmp("u8500uibr3", uib->option) == 0)
+ type_of_uib = U8500UIB_R3;
+
uib->init();
}
+int uib_is_stuib(void)
+{
+ return (type_of_uib == STUIB);
+}
+
+int uib_is_u8500uib(void)
+{
+ return (type_of_uib == U8500UIB);
+}
+
+int uib_is_u8500uibr3(void)
+{
+ return (type_of_uib == U8500UIB_R3);
+}
+
+
+#ifdef CONFIG_UX500_GPIO_KEYS
+static struct gpio_keys_button mop500_gpio_keys[] = {
+ {
+ .desc = "SFH7741 Proximity Sensor",
+ .type = EV_SW,
+ .code = SW_FRONT_PROXIMITY,
+ .active_low = 0,
+ .can_disable = 1,
+ },
+ {
+ .desc = "HED54XXU11 Hall Effect Sensor",
+ .type = EV_SW,
+ .code = SW_LID, /* FIXME arbitrary usage */
+ .active_low = 0,
+ .can_disable = 1,
+ }
+};
+
+static struct regulator *gpio_keys_regulator;
+static int mop500_gpio_keys_activate(struct device *dev);
+static void mop500_gpio_keys_deactivate(struct device *dev);
+
+static struct gpio_keys_platform_data mop500_gpio_keys_data = {
+ .buttons = mop500_gpio_keys,
+ .nbuttons = ARRAY_SIZE(mop500_gpio_keys),
+ .enable = mop500_gpio_keys_activate,
+ .disable = mop500_gpio_keys_deactivate,
+};
+
+static struct platform_device mop500_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .dev = {
+ .platform_data = &mop500_gpio_keys_data,
+ },
+};
+
+static int mop500_gpio_keys_activate(struct device *dev)
+{
+ gpio_keys_regulator = regulator_get(&mop500_gpio_keys_device.dev,
+ "vcc");
+ if (IS_ERR(gpio_keys_regulator)) {
+ dev_err(&mop500_gpio_keys_device.dev, "no regulator\n");
+ return PTR_ERR(gpio_keys_regulator);
+ }
+ regulator_enable(gpio_keys_regulator);
+
+ /*
+ * Please be aware that the start-up time of the SFH7741 is
+ * 120 ms and during that time the output is undefined.
+ */
+
+ return 0;
+}
+
+static void mop500_gpio_keys_deactivate(struct device *dev)
+{
+ if (!IS_ERR(gpio_keys_regulator)) {
+ regulator_disable(gpio_keys_regulator);
+ regulator_put(gpio_keys_regulator);
+ }
+}
+
+static __init void mop500_gpio_keys_init(void)
+{
+ struct ux500_pins *gpio_keys_pins = ux500_pins_get("gpio-keys.0");
+
+ if (gpio_keys_pins == NULL) {
+ pr_err("gpio_keys: Fail to get pins\n");
+ return;
+ }
+
+ ux500_pins_enable(gpio_keys_pins);
+ if (type_of_uib == U8500UIB_R3)
+ mop500_gpio_keys[0].gpio = PIN_NUM(gpio_keys_pins->cfg[2]);
+ else
+ mop500_gpio_keys[0].gpio = PIN_NUM(gpio_keys_pins->cfg[0]);
+ mop500_gpio_keys[1].gpio = PIN_NUM(gpio_keys_pins->cfg[1]);
+}
+#else
+static inline void mop500_gpio_keys_init(void) { }
+#endif
+
+/* add any platform devices here - TODO */
+static struct platform_device *mop500_uib_platform_devs[] __initdata = {
+#ifdef CONFIG_UX500_GPIO_KEYS
+ &mop500_gpio_keys_device,
+#endif
+};
+
/*
* Detect the UIB attached based on the presence or absence of i2c devices.
*/
static int __init mop500_uib_init(void)
{
- struct uib *uib = mop500_uib;
+ struct uib *uib = mop500_uibs;
struct i2c_adapter *i2c0;
+ struct i2c_adapter *i2c3;
int ret;
- if (!cpu_is_u8500())
+ /* snowball and non u8500 cpus dont have uib */
+ if (!cpu_is_u8500() || machine_is_snowball())
return -ENODEV;
- if (uib) {
- __mop500_uib_init(uib, "from uib= boot argument");
- return 0;
- }
-
i2c0 = i2c_get_adapter(0);
if (!i2c0) {
__mop500_uib_init(&mop500_uibs[STUIB],
@@ -121,14 +251,32 @@ static int __init mop500_uib_init(void)
ret = i2c_smbus_xfer(i2c0, 0x44, 0, I2C_SMBUS_WRITE, 0,
I2C_SMBUS_QUICK, NULL);
i2c_put_adapter(i2c0);
+ i2c3 = i2c_get_adapter(3);
+ if (!i2c3) {
+ __mop500_uib_init(&mop500_uibs[STUIB],
+ "fallback, could not get i2c3");
+ return -ENODEV;
+ }
- if (ret == 0)
- uib = &mop500_uibs[U8500UIB];
- else
- uib = &mop500_uibs[STUIB];
-
+ if (ret == 0) {
+ ret = i2c_smbus_xfer(i2c3, 0x4B, 0, I2C_SMBUS_WRITE, 0,
+ I2C_SMBUS_QUICK, NULL);
+ i2c_put_adapter(i2c3);
+ if (ret == 0)
+ uib = &mop500_uibs[U8500UIB];
+ else
+ uib = &mop500_uibs[U8500UIB_R3];
+ } else {
+ ret = i2c_smbus_xfer(i2c3, 0x5C, 0, I2C_SMBUS_WRITE, 0,
+ I2C_SMBUS_QUICK, NULL);
+ i2c_put_adapter(i2c3);
+ if (ret == 0)
+ uib = &mop500_uibs[STUIB];
+ }
__mop500_uib_init(uib, "detected");
-
+ mop500_gpio_keys_init();
+ platform_add_devices(mop500_uib_platform_devs,
+ ARRAY_SIZE(mop500_uib_platform_devs));
return 0;
}
diff --git a/arch/arm/mach-ux500/board-mop500-wlan.c b/arch/arm/mach-ux500/board-mop500-wlan.c
new file mode 100644
index 00000000000..46037ca76ae
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-wlan.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <plat/pincfg.h>
+#include <linux/clk.h>
+#include <mach/cw1200_plat.h>
+
+#include "pins.h"
+
+static void cw1200_release(struct device *dev);
+static int cw1200_power_ctrl(const struct cw1200_platform_data *pdata,
+ bool enable);
+static int cw1200_clk_ctrl(const struct cw1200_platform_data *pdata,
+ bool enable);
+
+static struct resource cw1200_href_resources[] = {
+ {
+ .start = 215,
+ .end = 215,
+ .flags = IORESOURCE_IO,
+ .name = "cw1200_reset",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(216),
+ .end = NOMADIK_GPIO_TO_IRQ(216),
+ .flags = IORESOURCE_IRQ,
+ .name = "cw1200_irq",
+ },
+};
+
+static struct resource cw1200_href60_resources[] = {
+ {
+ .start = 85,
+ .end = 85,
+ .flags = IORESOURCE_IO,
+ .name = "cw1200_reset",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(4),
+ .end = NOMADIK_GPIO_TO_IRQ(4),
+ .flags = IORESOURCE_IRQ,
+ .name = "cw1200_irq",
+ },
+};
+
+static struct resource cw1200_u9500_resources[] = {
+ {
+ .start = 85,
+ .end = 85,
+ .flags = IORESOURCE_IO,
+ .name = "cw1200_reset",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(144),
+ .end = NOMADIK_GPIO_TO_IRQ(144),
+ .flags = IORESOURCE_IRQ,
+ .name = "cw1200_irq",
+ },
+};
+
+static struct cw1200_platform_data cw1200_platform_data = {
+ .clk_ctrl = cw1200_clk_ctrl,
+};
+
+static struct platform_device cw1200_device = {
+ .name = "cw1200_wlan",
+ .dev = {
+ .platform_data = &cw1200_platform_data,
+ .release = cw1200_release,
+ .init_name = "cw1200_wlan",
+ },
+};
+
+const struct cw1200_platform_data *cw1200_get_platform_data(void)
+{
+ return &cw1200_platform_data;
+}
+EXPORT_SYMBOL_GPL(cw1200_get_platform_data);
+
+static int cw1200_pins_enable(bool enable)
+{
+ struct ux500_pins *pins = NULL;
+ int ret = 0;
+
+ pins = ux500_pins_get("sdi1");
+
+ if (!pins) {
+ printk(KERN_ERR "cw1200: Pins are not found. "
+ "Check platform data.\n");
+ return -ENOENT;
+ }
+
+ if (enable)
+ ret = ux500_pins_enable(pins);
+ else
+ ret = ux500_pins_disable(pins);
+
+ if (ret)
+ printk(KERN_ERR "cw1200: Pins can not be %s: %d.\n",
+ enable ? "enabled" : "disabled",
+ ret);
+
+ ux500_pins_put(pins);
+
+ return ret;
+}
+
+static int cw1200_power_ctrl(const struct cw1200_platform_data *pdata,
+ bool enable)
+{
+ static const char *vdd_name = "vdd";
+ struct regulator *vdd;
+ int ret = 0;
+
+ vdd = regulator_get(&cw1200_device.dev, vdd_name);
+ if (IS_ERR(vdd)) {
+ ret = PTR_ERR(vdd);
+ dev_warn(&cw1200_device.dev,
+ "%s: Failed to get regulator '%s': %d\n",
+ __func__, vdd_name, ret);
+ } else {
+ if (enable)
+ ret = regulator_enable(vdd);
+ else
+ ret = regulator_disable(vdd);
+
+ if (ret) {
+ dev_warn(&cw1200_device.dev,
+ "%s: Failed to %s regulator '%s': %d\n",
+ __func__, enable ? "enable" : "disable",
+ vdd_name, ret);
+ }
+ regulator_put(vdd);
+ }
+ return ret;
+}
+
+static int cw1200_clk_ctrl(const struct cw1200_platform_data *pdata,
+ bool enable)
+{
+ static const char *clock_name = "sys_clk_out";
+ struct clk *clk_dev;
+ int ret = 0;
+
+ clk_dev = clk_get(&cw1200_device.dev, clock_name);
+
+ if (IS_ERR(clk_dev)) {
+ ret = PTR_ERR(clk_dev);
+ dev_warn(&cw1200_device.dev,
+ "%s: Failed to get clk '%s': %d\n",
+ __func__, clock_name, ret);
+
+ } else {
+
+ if (enable)
+ ret = clk_enable(clk_dev);
+ else
+ clk_disable(clk_dev);
+
+ if (ret) {
+ dev_warn(&cw1200_device.dev,
+ "%s: Failed to %s clk enable: %d\n",
+ __func__, clock_name, ret);
+ }
+ }
+
+ return ret;
+}
+
+int __init mop500_wlan_init(void)
+{
+ int ret;
+
+ if (pins_for_u9500()) {
+ cw1200_device.num_resources = ARRAY_SIZE(cw1200_u9500_resources);
+ cw1200_device.resource = cw1200_u9500_resources;
+ } else if (machine_is_u8500() || machine_is_nomadik() || machine_is_snowball()) {
+ cw1200_device.num_resources = ARRAY_SIZE(cw1200_href_resources);
+ cw1200_device.resource = cw1200_href_resources;
+ } else if (machine_is_hrefv60()) {
+ cw1200_device.num_resources =
+ ARRAY_SIZE(cw1200_href60_resources);
+ cw1200_device.resource = cw1200_href60_resources;
+ } else {
+ dev_err(&cw1200_device.dev,
+ "Unsupported mach type %d "
+ "(check mach-types.h)\n",
+ __machine_arch_type);
+ return -ENOTSUPP;
+ }
+
+ if (machine_is_snowball())
+ cw1200_platform_data.mmc_id = "mmc2";
+ else
+ cw1200_platform_data.mmc_id = "mmc3";
+
+ cw1200_platform_data.reset = &cw1200_device.resource[0];
+ cw1200_platform_data.irq = &cw1200_device.resource[1];
+
+ cw1200_device.dev.release = cw1200_release;
+
+ if (machine_is_snowball())
+ cw1200_platform_data.power_ctrl = cw1200_power_ctrl;
+
+ ret = cw1200_pins_enable(true);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = platform_device_register(&cw1200_device);
+ if (ret)
+ cw1200_pins_enable(false);
+
+ return ret;
+}
+
+static void cw1200_release(struct device *dev)
+{
+ cw1200_pins_enable(false);
+}
diff --git a/arch/arm/mach-ux500/board-mop500-wlan.h b/arch/arm/mach-ux500/board-mop500-wlan.h
new file mode 100644
index 00000000000..c6788adc46f
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-wlan.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U8500 board specific cw1200 (WLAN device) initialization.
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ */
+
+#ifndef __BOARD_MOP500_WLAN_H
+#define __BOARD_MOP500_WLAN_H
+
+int mop500_wlan_init(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 5c00712907d..269cb976d9e 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -15,22 +15,34 @@
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
#include <linux/amba/serial.h>
#include <linux/spi/spi.h>
+#ifdef CONFIG_HSI
+#include <linux/hsi/hsi.h>
+#endif
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/ab8500.h>
#include <linux/mfd/tc3589x.h>
-#include <linux/mfd/tps6105x.h>
#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/regulator/fixed.h>
#include <linux/leds-lp5521.h>
#include <linux/input.h>
#include <linux/smsc911x.h>
#include <linux/gpio_keys.h>
#include <linux/delay.h>
-
+#include <linux/mfd/ab8500/denc.h>
+#ifdef CONFIG_STM_MSP_SPI
+#include <linux/spi/stm_msp.h>
+#endif
+#include <linux/leds_pwm.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio/nomadik.h>
#include <linux/leds.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
@@ -38,23 +50,52 @@
#include <plat/i2c.h>
#include <plat/ste_dma40.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+#include <mach/abx500-accdet.h>
+#endif
#include <mach/irqs.h>
+#include <mach/ste-dma40-db8500.h>
+#ifdef CONFIG_U8500_SIM_DETECT
+#include <mach/sim_detect.h>
+#endif
+#ifdef CONFIG_CRYPTO_DEV_UX500
+#include <mach/crypto-ux500.h>
+#endif
+#include <mach/pm.h>
+#ifdef CONFIG_AV8100
+#include <video/av8100.h>
+#endif
+
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+#include <plat/ske.h>
+#include "pins.h"
+#endif
#include "pins-db8500.h"
-#include "ste-dma40-db8500.h"
#include "devices-db8500.h"
#include "board-mop500.h"
#include "board-mop500-regulators.h"
+#include "board-ux500-usb.h"
+#include "board-mop500-bm.h"
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+#include "board-mop500-wlan.h"
+#endif
+
+#ifdef CONFIG_AB8500_DENC
+static struct ab8500_denc_platform_data ab8500_denc_pdata = {
+ .ddr_enable = true,
+ .ddr_little_endian = false,
+};
+#endif
static struct gpio_led snowball_led_array[] = {
{
.name = "user_led",
- .default_trigger = "none",
+ .default_trigger = "heartbeat",
.gpio = 142,
},
};
@@ -72,85 +113,114 @@ static struct platform_device snowball_led_dev = {
};
static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
- .gpio_base = MOP500_AB8500_GPIO(0),
+ .gpio_base = AB8500_PIN_GPIO1,
.irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
- /* config_reg is the initial configuration of ab8500 pins.
+ /*
+ * config_reg is the initial configuration of ab8500 pins.
* The pins can be configured as GPIO or alt functions based
* on value present in GpioSel1 to GpioSel6 and AlternatFunction
* register. This is the array of 7 configuration settings.
* One has to compile time decide these settings. Below is the
* explanation of these setting
- * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
- * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
- * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
- * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
- * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
- * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
+ * GpioSel1 = 0x0F => Pin GPIO1 (SysClkReq2)
+ * Pin GPIO2 (SysClkReq3)
+ * Pin GPIO3 (SysClkReq4)
+ * Pin GPIO4 (SysClkReq6) are configured as GPIO
+ * GpioSel2 = 0x9E => Pins GPIO10 to GPIO13 are configured as GPIO
+ * GpioSel3 = 0x80 => Pin GPIO24 (SysClkReq7) is configured as GPIO
+ * GpioSel4 = 0x01 => Pin GPIO25 (SysClkReq8) is configured as GPIO
+ * GpioSel5 = 0x78 => Pin GPIO36 (ApeSpiClk)
+ * Pin GPIO37 (ApeSpiCSn)
+ * Pin GPIO38 (ApeSpiDout)
+ * Pin GPIO39 (ApeSpiDin) are configured as GPIO
+ * GpioSel6 = 0x02 => Pin GPIO42 (SysClkReq5) is configured as GPIO
* AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
* as GPIO then this register selectes the alternate fucntions
*/
- .config_reg = {0x00, 0x1E, 0x80, 0x01,
- 0x7A, 0x00, 0x00},
+ .config_reg = {0x0F, 0x9E, 0x80, 0x01, 0x78, 0x02, 0x00},
+
+ /*
+ * config_direction allows for the initial GPIO direction to
+ * be set. For Snowball we set GPIO26 to output.
+ */
+ .config_direction = {0x00, 0x00, 0x00, 0x02, 0x00, 0x00},
+
+ /*
+ * config_pullups allows for the intial configuration of the
+ * GPIO pullup/pulldown configuration.
+ */
+ .config_pullups = {0xE0, 0x01, 0x00, 0x00, 0x00, 0x00},
};
+static struct ab8500_sysctrl_platform_data ab8500_sysctrl_pdata = {
+ /*
+ * SysClkReq1RfClkBuf - SysClkReq8RfClkBuf
+ * The initial values should not be changed because of the way
+ * the system works today
+ */
+ .initial_req_buf_config
+ = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+};
+
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+static struct abx500_accdet_platform_data ab8500_accdet_pdata = {
+ .btn_keycode = KEY_MEDIA,
+ .accdet1_dbth = ACCDET1_TH_1200mV | ACCDET1_DB_70ms,
+ .accdet2122_th = ACCDET21_TH_1000mV | ACCDET22_TH_1000mV,
+ .video_ctrl_gpio = AB8500_PIN_GPIO35,
+};
+#endif
+
static struct gpio_keys_button snowball_key_array[] = {
{
- .gpio = 32,
- .type = EV_KEY,
- .code = KEY_1,
- .desc = "userpb",
+ .gpio = 32,
+ .type = EV_KEY,
+ .code = KEY_1,
+ .desc = "userpb",
.active_low = 1,
.debounce_interval = 50,
- .wakeup = 1,
+ .wakeup = 1,
},
{
- .gpio = 151,
- .type = EV_KEY,
- .code = KEY_2,
- .desc = "extkb1",
+ .gpio = 151,
+ .type = EV_KEY,
+ .code = KEY_2,
+ .desc = "extkb1",
.active_low = 1,
.debounce_interval = 50,
- .wakeup = 1,
+ .wakeup = 1,
},
{
- .gpio = 152,
- .type = EV_KEY,
- .code = KEY_3,
- .desc = "extkb2",
+ .gpio = 152,
+ .type = EV_KEY,
+ .code = KEY_3,
+ .desc = "extkb2",
.active_low = 1,
.debounce_interval = 50,
- .wakeup = 1,
+ .wakeup = 1,
},
{
- .gpio = 161,
- .type = EV_KEY,
- .code = KEY_4,
- .desc = "extkb3",
+ .gpio = 162,
+ .type = EV_KEY,
+ .code = KEY_5,
+ .desc = "extkb4",
.active_low = 1,
.debounce_interval = 50,
- .wakeup = 1,
- },
- {
- .gpio = 162,
- .type = EV_KEY,
- .code = KEY_5,
- .desc = "extkb4",
- .active_low = 1,
- .debounce_interval = 50,
- .wakeup = 1,
+ .wakeup = 1,
},
};
static struct gpio_keys_platform_data snowball_key_data = {
- .buttons = snowball_key_array,
+ .buttons = snowball_key_array,
.nbuttons = ARRAY_SIZE(snowball_key_array),
};
static struct platform_device snowball_key_dev = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
.platform_data = &snowball_key_data,
+ .pm_domain = &ux500_dev_power_domain,
}
};
@@ -176,21 +246,45 @@ static struct resource sbnet_res[] = {
};
static struct platform_device snowball_sbnet_dev = {
- .name = "smsc911x",
+ .name = "smsc911x",
.num_resources = ARRAY_SIZE(sbnet_res),
.resource = sbnet_res,
- .dev = {
+ .dev = {
.platform_data = &snowball_sbnet_cfg,
},
};
+#ifdef CONFIG_MODEM_U8500
+static struct platform_device u8500_modem_dev = {
+ .name = "u8500-modem",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+#endif
+
static struct ab8500_platform_data ab8500_platdata = {
.irq_base = MOP500_AB8500_IRQ_BASE,
- .regulator_reg_init = ab8500_regulator_reg_init,
- .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
- .regulator = ab8500_regulators,
- .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .regulator = &ab8500_regulator_plat_data,
+#ifdef CONFIG_AB8500_DENC
+ .denc = &ab8500_denc_pdata,
+#endif
+ .battery = &ab8500_bm_data,
+ .charger = &ab8500_charger_plat_data,
+ .btemp = &ab8500_btemp_plat_data,
+ .fg = &ab8500_fg_plat_data,
+ .chargalg = &ab8500_chargalg_plat_data,
.gpio = &ab8500_gpio_pdata,
+ .usb = &abx500_usbgpio_plat_data,
+ .sysctrl = &ab8500_sysctrl_pdata,
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ .accdet = &ab8500_accdet_pdata,
+#endif
+#ifdef CONFIG_PM
+ .pm_power_off = true,
+#endif
+ .thermal_time_out = 20, /* seconds */
};
static struct resource ab8500_resources[] = {
@@ -211,15 +305,196 @@ struct platform_device ab8500_device = {
.resource = ab8500_resources,
};
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+
/*
- * TPS61052
+ * Nomadik SKE keypad
*/
+#define ROW_PIN_I0 164
+#define ROW_PIN_I1 163
+#define ROW_PIN_I2 162
+#define ROW_PIN_I3 161
+#define ROW_PIN_I4 156
+#define ROW_PIN_I5 155
+#define ROW_PIN_I6 154
+#define ROW_PIN_I7 153
+#define COL_PIN_O0 168
+#define COL_PIN_O1 167
+#define COL_PIN_O2 166
+#define COL_PIN_O3 165
+#define COL_PIN_O4 160
+#define COL_PIN_O5 159
+#define COL_PIN_O6 158
+#define COL_PIN_O7 157
+
+static int ske_kp_rows[] = {
+ ROW_PIN_I0, ROW_PIN_I1, ROW_PIN_I2, ROW_PIN_I3,
+ ROW_PIN_I4, ROW_PIN_I5, ROW_PIN_I6, ROW_PIN_I7,
+};
+static int ske_kp_cols[] = {
+ COL_PIN_O0, COL_PIN_O1, COL_PIN_O2, COL_PIN_O3,
+ COL_PIN_O4, COL_PIN_O5, COL_PIN_O6, COL_PIN_O7,
+};
-static struct tps6105x_platform_data mop500_tps61052_data = {
- .mode = TPS6105X_MODE_VOLTAGE,
- .regulator_data = &tps61052_regulator,
+static bool ske_config;
+/*
+ * ske_set_gpio_row: request and set gpio rows
+ */
+static int ske_set_gpio_row(int gpio)
+{
+ int ret;
+
+ if (!ske_config) {
+ ret = gpio_request(gpio, "ske-kp");
+ if (ret < 0) {
+ pr_err("ske_set_gpio_row: gpio request failed\n");
+ return ret;
+ }
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret < 0) {
+ pr_err("ske_set_gpio_row: gpio direction failed\n");
+ gpio_free(gpio);
+ }
+
+ return ret;
+}
+
+/*
+ * ske_kp_init - enable the gpio configuration
+ */
+static int ske_kp_init(void)
+{
+ struct ux500_pins *pins;
+ int ret, i;
+
+ pins = ux500_pins_get("ske");
+ if (pins)
+ ux500_pins_enable(pins);
+
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ ret = ske_set_gpio_row(ske_kp_rows[i]);
+ if (ret < 0) {
+ pr_err("ske_kp_init: failed init\n");
+ return ret;
+ }
+ }
+ if (!ske_config)
+ ske_config = true;
+
+ return 0;
+}
+
+static int ske_kp_exit(void)
+{
+ struct ux500_pins *pins;
+
+ pins = ux500_pins_get("ske");
+ if (pins)
+ ux500_pins_disable(pins);
+
+ return 0;
+}
+
+static const unsigned int mop500_ske_keymap[] = {
+#if defined(CONFIG_KEYLAYOUT_LAYOUT1)
+ KEY(2, 5, KEY_END),
+ KEY(4, 1, KEY_HOME),
+ KEY(3, 5, KEY_VOLUMEDOWN),
+ KEY(1, 3, KEY_EMAIL),
+ KEY(5, 2, KEY_RIGHT),
+ KEY(5, 0, KEY_BACKSPACE),
+
+ KEY(0, 5, KEY_MENU),
+ KEY(7, 6, KEY_ENTER),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_DOT),
+ KEY(3, 4, KEY_UP),
+ KEY(3, 3, KEY_DOWN),
+
+ KEY(6, 4, KEY_SEND),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_VOLUMEUP),
+ KEY(5, 5, KEY_SPACE),
+ KEY(4, 3, KEY_LEFT),
+ KEY(3, 2, KEY_SEARCH),
+#elif defined(CONFIG_KEYLAYOUT_LAYOUT2)
+ KEY(2, 5, KEY_RIGHT),
+ KEY(4, 1, KEY_ENTER),
+ KEY(3, 5, KEY_MENU),
+ KEY(1, 3, KEY_3),
+ KEY(5, 2, KEY_6),
+ KEY(5, 0, KEY_9),
+
+ KEY(0, 5, KEY_UP),
+ KEY(7, 6, KEY_DOWN),
+ KEY(4, 5, KEY_0),
+ KEY(6, 7, KEY_2),
+ KEY(3, 4, KEY_5),
+ KEY(3, 3, KEY_8),
+
+ KEY(6, 4, KEY_LEFT),
+ KEY(6, 2, KEY_BACK),
+ KEY(4, 2, KEY_KPDOT),
+ KEY(5, 5, KEY_1),
+ KEY(4, 3, KEY_4),
+ KEY(3, 2, KEY_7),
+#else
+#warning "No keypad layout defined."
+#endif
+};
+
+static struct matrix_keymap_data mop500_ske_keymap_data = {
+ .keymap = mop500_ske_keymap,
+ .keymap_size = ARRAY_SIZE(mop500_ske_keymap),
};
+
+
+static struct ske_keypad_platform_data mop500_ske_keypad_data = {
+ .init = ske_kp_init,
+ .exit = ske_kp_exit,
+ .gpio_input_pins = ske_kp_rows,
+ .gpio_output_pins = ske_kp_cols,
+ .keymap_data = &mop500_ske_keymap_data,
+ .no_autorepeat = true,
+ .krow = SKE_KPD_MAX_ROWS, /* 8x8 matrix */
+ .kcol = SKE_KPD_MAX_COLS,
+ .debounce_ms = 20, /* in timeout period */
+ .switch_delay = 200, /* in jiffies */
+};
+
+#endif
+
+
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+/*
+ * GPIO-regulator wlan vbat data
+ */
+static struct fixed_voltage_config snowball_gpio_wlan_vbat_data = {
+ .supply_name = "WLAN-VBAT",
+ .gpio = SNOWBALL_EN_3V6_GPIO,
+ .microvolts = 3600000,
+ .enable_high = 1,
+ .init_data = &gpio_wlan_vbat_regulator,
+ .startup_delay = 3500, /* Startup time */
+};
+
+/*
+ * GPIO-regulator en 3v3 vbat data
+ */
+
+static struct fixed_voltage_config snowball_gpio_en_3v3_data = {
+ .supply_name = "EN-3V3",
+ .gpio = SNOWBALL_EN_3V3_ETH_GPIO,
+ .microvolts = 3300000,
+ .enable_high = 1,
+ .init_data = &gpio_en_3v3_regulator,
+ .startup_delay = 5000, /* 1200us according to data sheet */
+};
+#endif
+
/*
* TC35892
*/
@@ -241,53 +516,64 @@ static struct tc3589x_platform_data mop500_tc35892_data = {
};
static struct lp5521_led_config lp5521_pri_led[] = {
- [0] = {
- .chan_nr = 0,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [1] = {
- .chan_nr = 1,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [2] = {
- .chan_nr = 2,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
+ [0] = {
+ .chan_nr = 0,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [1] = {
+ .chan_nr = 1,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [2] = {
+ .chan_nr = 2,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
};
+#ifdef CONFIG_AV8100
+static struct av8100_platform_data av8100_plat_data = {
+ .irq = NOMADIK_GPIO_TO_IRQ(192),
+ .reset = MOP500_HDMI_RST_GPIO,
+ .inputclk_id = "sysclk2",
+ .regulator_pwr_id = "hdmi_1v8",
+ .alt_powerupseq = true,
+ .mclk_freq = 3, /* MCLK_RNG_31_38 */
+};
+#endif
+
static struct lp5521_platform_data __initdata lp5521_pri_data = {
- .label = "lp5521_pri",
- .led_config = &lp5521_pri_led[0],
- .num_channels = 3,
- .clock_mode = LP5521_CLOCK_EXT,
+ .label = "lp5521_pri",
+ .led_config = &lp5521_pri_led[0],
+ .num_channels = 3,
+ .clock_mode = LP5521_CLOCK_EXT,
};
static struct lp5521_led_config lp5521_sec_led[] = {
- [0] = {
- .chan_nr = 0,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [1] = {
- .chan_nr = 1,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
- [2] = {
- .chan_nr = 2,
- .led_current = 0x2f,
- .max_current = 0x5f,
- },
+ [0] = {
+ .chan_nr = 0,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [1] = {
+ .chan_nr = 1,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
+ [2] = {
+ .chan_nr = 2,
+ .led_current = 0x2f,
+ .max_current = 0x5f,
+ },
};
static struct lp5521_platform_data __initdata lp5521_sec_data = {
- .label = "lp5521_sec",
- .led_config = &lp5521_sec_led[0],
- .num_channels = 3,
- .clock_mode = LP5521_CLOCK_EXT,
+ .label = "lp5521_sec",
+ .led_config = &lp5521_sec_led[0],
+ .num_channels = 3,
+ .clock_mode = LP5521_CLOCK_EXT,
};
static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
@@ -296,15 +582,26 @@ static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
.irq = NOMADIK_GPIO_TO_IRQ(217),
.platform_data = &mop500_tc35892_data,
},
- /* I2C0 devices only available prior to HREFv60 */
+#ifdef CONFIG_AV8100
{
- I2C_BOARD_INFO("tps61052", 0x33),
- .platform_data = &mop500_tps61052_data,
+ I2C_BOARD_INFO("av8100", 0x70),
+ .platform_data = &av8100_plat_data,
},
+#endif
+ /* I2C0 devices only available prior to HREFv60 */
};
#define NUM_PRE_V60_I2C0_DEVICES 1
+static struct i2c_board_info __initdata snowball_i2c0_devices[] = {
+#ifdef CONFIG_AV8100
+ {
+ I2C_BOARD_INFO("av8100", 0x70),
+ .platform_data = &av8100_plat_data,
+ },
+#endif
+};
+
static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
{
/* lp5521 LED driver, 1st device */
@@ -345,13 +642,13 @@ static struct nmk_i2c_controller u8500_i2c##id##_data = { \
/*
* The board uses 4 i2c controllers, initialize all of
* them with slave data setup time of 250 ns,
- * Tx & Rx FIFO threshold values as 8 and standard
+ * Tx & Rx FIFO threshold values as 1 and standard
* mode of operation
*/
-U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
+U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 400000, 200, I2C_FREQ_MODE_FAST);
static void __init mop500_i2c_init(void)
{
@@ -361,60 +658,269 @@ static void __init mop500_i2c_init(void)
db8500_add_i2c3(&u8500_i2c3_data);
}
-static struct gpio_keys_button mop500_gpio_keys[] = {
- {
- .desc = "SFH7741 Proximity Sensor",
- .type = EV_SW,
- .code = SW_FRONT_PROXIMITY,
- .active_low = 0,
- .can_disable = 1,
- }
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+static struct platform_device snowball_gpio_wlan_vbat_regulator_device = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &snowball_gpio_wlan_vbat_data,
+ },
+};
+
+static struct platform_device snowball_gpio_en_3v3_regulator_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &snowball_gpio_en_3v3_data,
+ },
+};
+#endif
+
+#ifdef CONFIG_LEDS_PWM
+static struct led_pwm pwm_leds_data[] = {
+ [0] = {
+ .name = "lcd-backlight",
+ .pwm_id = 1,
+ .max_brightness = 255,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+ [1] = {
+ .name = "sec-lcd-backlight",
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+};
+
+static struct led_pwm_platform_data u8500_leds_data = {
+ .num_leds = 1,
+ .leds = pwm_leds_data,
+};
+
+static struct platform_device ux500_leds_device = {
+ .name = "leds_pwm",
+ .dev = {
+ .platform_data = &u8500_leds_data,
+ },
+};
+#endif
+
+#ifdef CONFIG_BACKLIGHT_PWM
+static struct platform_pwm_backlight_data u8500_backlight_data[] = {
+ [0] = {
+ .pwm_id = 1,
+ .max_brightness = 255,
+ .dft_brightness = 200,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
+ [1] = {
+ .pwm_id = 2,
+ .max_brightness = 255,
+ .dft_brightness = 200,
+ .lth_brightness = 90,
+ .pwm_period_ns = 1023,
+ },
};
-static struct regulator *prox_regulator;
-static int mop500_prox_activate(struct device *dev);
-static void mop500_prox_deactivate(struct device *dev);
+static struct platform_device ux500_backlight_device[] = {
+ [0] = {
+ .name = "pwm-backlight",
+ .id = 0,
+ .dev = {
+ .platform_data = &u8500_backlight_data[0],
+ },
+ },
+ [1] = {
+ .name = "pwm-backlight",
+ .id = 1,
+ .dev = {
+ .platform_data = &u8500_backlight_data[1],
+ },
+ },
+};
+#endif
-static struct gpio_keys_platform_data mop500_gpio_keys_data = {
- .buttons = mop500_gpio_keys,
- .nbuttons = ARRAY_SIZE(mop500_gpio_keys),
- .enable = mop500_prox_activate,
- .disable = mop500_prox_deactivate,
+/* Force feedback vibrator device */
+static struct platform_device ste_ff_vibra_device = {
+ .name = "ste_ff_vibra"
};
-static struct platform_device mop500_gpio_keys_device = {
- .name = "gpio-keys",
+#ifdef CONFIG_HSI
+static struct hsi_board_info __initdata u8500_hsi_devices[] = {
+ {
+ .name = "hsi_char",
+ .hsi_id = 0,
+ .port = 0,
+ .tx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 1,
+ .speed = 200000,
+ .ch_prio = {},
+ {.arb_mode = HSI_ARB_RR},
+ },
+ .rx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 1,
+ .speed = 200000,
+ .ch_prio = {},
+ {.flow = HSI_FLOW_SYNC},
+ },
+ },
+ {
+ .name = "hsi_test",
+ .hsi_id = 0,
+ .port = 0,
+ .tx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 2,
+ .speed = 100000,
+ .ch_prio = {},
+ {.arb_mode = HSI_ARB_RR},
+ },
+ .rx_cfg = {
+ .mode = HSI_MODE_FRAME,
+ .channels = 2,
+ .speed = 200000,
+ .ch_prio = {},
+ {.flow = HSI_FLOW_SYNC},
+ },
+ },
+ {
+ .name = "cfhsi_v3_driver",
+ .hsi_id = 0,
+ .port = 0,
+ .tx_cfg = {
+ .mode = HSI_MODE_STREAM,
+ .channels = 2,
+ .speed = 20000,
+ .ch_prio = {},
+ {.arb_mode = HSI_ARB_RR},
+ },
+ .rx_cfg = {
+ .mode = HSI_MODE_STREAM,
+ .channels = 2,
+ .speed = 200000,
+ .ch_prio = {},
+ {.flow = HSI_FLOW_SYNC},
+ },
+ },
+};
+#endif
+
+#ifdef CONFIG_U8500_SIM_DETECT
+static struct sim_detect_platform_data sim_detect_pdata = {
+ .irq_num = MOP500_AB8500_VIR_GPIO_IRQ(6),
+};
+struct platform_device u8500_sim_detect_device = {
+ .name = "sim-detect",
.id = 0,
.dev = {
- .platform_data = &mop500_gpio_keys_data,
+ .platform_data = &sim_detect_pdata,
},
};
+#endif
-static int mop500_prox_activate(struct device *dev)
-{
- prox_regulator = regulator_get(&mop500_gpio_keys_device.dev,
- "vcc");
- if (IS_ERR(prox_regulator)) {
- dev_err(&mop500_gpio_keys_device.dev,
- "no regulator\n");
- return PTR_ERR(prox_regulator);
+#ifdef CONFIG_CRYPTO_DEV_UX500
+static struct cryp_platform_data u8500_cryp1_platform_data = {
+ .mem_to_engine = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV48_CAC1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ },
+ .engine_to_mem = {
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV48_CAC1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
}
- regulator_enable(prox_regulator);
- return 0;
-}
+};
-static void mop500_prox_deactivate(struct device *dev)
-{
- regulator_disable(prox_regulator);
- regulator_put(prox_regulator);
-}
+static struct stedma40_chan_cfg u8500_hash_dma_cfg_tx = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV50_HAC1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_16,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_16,
+};
+
+static struct hash_platform_data u8500_hash1_platform_data = {
+ .mem_to_engine = &u8500_hash_dma_cfg_tx,
+ .dma_filter = stedma40_filter,
+};
+#endif
/* add any platform devices here - TODO */
static struct platform_device *mop500_platform_devs[] __initdata = {
- &mop500_gpio_keys_device,
- &ab8500_device,
+#ifdef CONFIG_U8500_SIM_DETECT
+ &u8500_sim_detect_device,
+#endif
+ &u8500_shrm_device,
+ &ste_ff_vibra_device,
+#ifdef CONFIG_U8500_MMIO
+ &ux500_mmio_device,
+#endif
+ &ux500_hwmem_device,
+#ifdef CONFIG_FB_MCDE
+ &u8500_mcde_device,
+#endif
+ &u8500_b2r2_device,
+ &u8500_thsens_device,
+#ifdef CONFIG_STE_TRACE_MODEM
+ &u8500_trace_modem,
+#endif
+#ifdef CONFIG_LEDS_PWM
+ &ux500_leds_device,
+#endif
+#ifdef CONFIG_BACKLIGHT_PWM
+ &ux500_backlight_device[0],
+ &ux500_backlight_device[1],
+#endif
+#ifdef CONFIG_DB8500_MLOADER
+ &mloader_fw_device,
+#endif
+#ifdef CONFIG_HSI
+ &u8500_hsi_device,
+#endif
+#ifdef CONFIG_MODEM_U8500
+ &u8500_modem_dev,
+#endif
};
+#ifdef CONFIG_STM_MSP_SPI
+/*
+ * MSP-SPI
+ */
+
+#define NUM_MSP_CLIENTS 10
+
+static struct stm_msp_controller mop500_msp2_spi_data = {
+ .id = 2,
+ .num_chipselect = NUM_MSP_CLIENTS,
+ .base_addr = U8500_MSP2_BASE,
+ .device_name = "msp2",
+};
+
+/*
+ * SSP
+ */
+
+#define NUM_SSP_CLIENTS 10
+
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg ssp0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
@@ -436,27 +942,33 @@ static struct stedma40_chan_cfg ssp0_dma_cfg_tx = {
#endif
static struct pl022_ssp_controller ssp0_platform_data = {
- .bus_id = 0,
+ .bus_id = 4,
#ifdef CONFIG_STE_DMA40
.enable_dma = 1,
.dma_filter = stedma40_filter,
.dma_rx_param = &ssp0_dma_cfg_rx,
.dma_tx_param = &ssp0_dma_cfg_tx,
-#else
- .enable_dma = 0,
#endif
/* on this platform, gpio 31,142,144,214 &
* 224 are connected as chip selects
*/
- .num_chipselect = 5,
+ .num_chipselect = NUM_SSP_CLIENTS,
};
+
static void __init mop500_spi_init(void)
{
db8500_add_ssp0(&ssp0_platform_data);
+ if (!machine_is_snowball())
+ db8500_add_msp2_spi(&mop500_msp2_spi_data);
}
+#else
+static void __init mop500_spi_init(void)
+{
+}
+#endif /* CONFIG_STM_MSP_SPI */
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
static struct stedma40_chan_cfg uart0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
@@ -522,6 +1034,7 @@ static pin_cfg_t mop500_pins_uart0[] = {
#define PRCC_K_SOFTRST_SET 0x18
#define PRCC_K_SOFTRST_CLEAR 0x1C
+/* pl011 reset */
static void ux500_uart0_reset(void)
{
void __iomem *prcc_rst_set, *prcc_rst_clr;
@@ -560,8 +1073,10 @@ static void ux500_uart0_exit(void)
pr_err("pl011: uart pins_disable failed\n");
}
+
+
static struct amba_pl011_data uart0_plat = {
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
.dma_filter = stedma40_filter,
.dma_rx_param = &uart0_dma_cfg_rx,
.dma_tx_param = &uart0_dma_cfg_tx,
@@ -572,7 +1087,7 @@ static struct amba_pl011_data uart0_plat = {
};
static struct amba_pl011_data uart1_plat = {
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
.dma_filter = stedma40_filter,
.dma_rx_param = &uart1_dma_cfg_rx,
.dma_tx_param = &uart1_dma_cfg_tx,
@@ -580,7 +1095,7 @@ static struct amba_pl011_data uart1_plat = {
};
static struct amba_pl011_data uart2_plat = {
-#ifdef CONFIG_STE_DMA40
+#ifdef CONFIG_STE_DMA40_REMOVE
.dma_filter = stedma40_filter,
.dma_rx_param = &uart2_dma_cfg_rx,
.dma_tx_param = &uart2_dma_cfg_tx,
@@ -594,23 +1109,44 @@ static void __init mop500_uart_init(void)
db8500_add_uart2(&uart2_plat);
}
+static void __init u8500_cryp1_hash1_init(void)
+{
+#ifdef CONFIG_CRYPTO_DEV_UX500
+ db8500_add_cryp1(&u8500_cryp1_platform_data);
+ db8500_add_hash1(&u8500_hash1_platform_data);
+#endif
+}
+
static struct platform_device *snowball_platform_devs[] __initdata = {
+ &ux500_hwmem_device,
&snowball_led_dev,
&snowball_key_dev,
+#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
+ &snowball_gpio_en_3v3_regulator_device,
+ &snowball_gpio_wlan_vbat_regulator_device,
+#endif
&snowball_sbnet_dev,
- &ab8500_device,
+#ifdef CONFIG_FB_MCDE
+ &u8500_mcde_device,
+#endif
+ &u8500_b2r2_device,
};
static void __init mop500_init_machine(void)
{
- int i2c0_devs;
-
- mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR;
-
u8500_init_devices();
mop500_pins_init();
+ mop500_regulator_init();
+
+ u8500_cryp1_hash1_init();
+
+#ifdef CONFIG_HSI
+ hsi_register_board_info(u8500_hsi_devices,
+ ARRAY_SIZE(u8500_hsi_devices));
+#endif
+
platform_add_devices(mop500_platform_devs,
ARRAY_SIZE(mop500_platform_devs));
@@ -618,12 +1154,23 @@ static void __init mop500_init_machine(void)
mop500_sdi_init();
mop500_spi_init();
mop500_uart_init();
+#ifdef CONFIG_STM_MSP_SPI
+ mop500_msp_init();
+#endif
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+ mop500_wlan_init();
+#endif
+
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+ db8500_add_ske_keypad(&mop500_ske_keypad_data);
+#endif
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
+ platform_device_register(&ab8500_device);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
+ i2c_register_board_info(0, mop500_i2c0_devices,
+ ARRAY_SIZE(mop500_i2c0_devices));
i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
+ ARRAY_SIZE(mop500_i2c2_devices));
/* This board has full regulator constraints */
regulator_has_full_constraints();
@@ -631,12 +1178,19 @@ static void __init mop500_init_machine(void)
static void __init snowball_init_machine(void)
{
- int i2c0_devs;
-
u8500_init_devices();
snowball_pins_init();
+ mop500_regulator_init();
+
+ u8500_cryp1_hash1_init();
+
+#ifdef CONFIG_HSI
+ hsi_register_board_info(u8500_hsi_devices,
+ ARRAY_SIZE(u8500_hsi_devices));
+#endif
+
platform_add_devices(snowball_platform_devs,
ARRAY_SIZE(snowball_platform_devs));
@@ -644,11 +1198,17 @@ static void __init snowball_init_machine(void)
snowball_sdi_init();
mop500_spi_init();
mop500_uart_init();
+#ifdef CONFIG_STM_MSP_SPI
+ mop500_msp_init();
+#endif
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+ mop500_wlan_init();
+#endif
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
- i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
+ platform_device_register(&ab8500_device);
+
+ i2c_register_board_info(0, snowball_i2c0_devices,
+ ARRAY_SIZE(snowball_i2c0_devices));
/* This board has full regulator constraints */
regulator_has_full_constraints();
@@ -656,19 +1216,37 @@ static void __init snowball_init_machine(void)
static void __init hrefv60_init_machine(void)
{
- int i2c0_devs;
-
/*
* The HREFv60 board removed a GPIO expander and routed
* all these GPIO pins to the internal GPIO controller
* instead.
*/
- mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO;
+
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ /*
+ * On boards hrefpv60 and later, the accessory insertion/removal,
+ * button press/release are inverted.
+ */
+ ab8500_accdet_pdata.is_detection_inverted = true;
+#endif
u8500_init_devices();
hrefv60_pins_init();
+ mop500_regulator_init();
+
+ u8500_cryp1_hash1_init();
+
+#ifdef CONFIG_HSI
+ hsi_register_board_info(u8500_hsi_devices,
+ ARRAY_SIZE(u8500_hsi_devices));
+#endif
+#ifdef CONFIG_LEDS_PWM
+ if (uib_is_stuib())
+ u8500_leds_data.num_leds = 2;
+#endif
+
platform_add_devices(mop500_platform_devs,
ARRAY_SIZE(mop500_platform_devs));
@@ -676,14 +1254,23 @@ static void __init hrefv60_init_machine(void)
hrefv60_sdi_init();
mop500_spi_init();
mop500_uart_init();
+#ifdef CONFIG_STM_MSP_SPI
+ mop500_msp_init();
+#endif
+#if defined(CONFIG_CW1200) || defined(CONFIG_CW1200_MODULE)
+ mop500_wlan_init();
+#endif
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
+#ifdef CONFIG_KEYBOARD_NOMADIK_SKE
+ db8500_add_ske_keypad(&mop500_ske_keypad_data);
+#endif
- i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
+ platform_device_register(&ab8500_device);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
+ i2c_register_board_info(0, mop500_i2c0_devices,
+ ARRAY_SIZE(mop500_i2c0_devices));
i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
+ ARRAY_SIZE(mop500_i2c2_devices));
/* This board has full regulator constraints */
regulator_has_full_constraints();
@@ -698,6 +1285,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = mop500_init_machine,
+ .restart = ux500_restart,
MACHINE_END
MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
@@ -707,9 +1295,10 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = hrefv60_init_machine,
+ .restart = ux500_restart,
MACHINE_END
-MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
+MACHINE_START(SNOWBALL, "ST-Ericsson Snowball platform")
.atag_offset = 0x100,
.map_io = u8500_map_io,
.init_irq = ux500_init_irq,
@@ -717,4 +1306,5 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = snowball_init_machine,
+ .restart = ux500_restart,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index f926d3db620..5786f49fbf8 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -7,6 +7,10 @@
#ifndef __BOARD_MOP500_H
#define __BOARD_MOP500_H
+/* This defines the NOMADIK_NR_GPIO */
+#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <mach/gpio.h>
+
/* Snowball specific GPIO assignments, this board has no GPIO expander */
#define SNOWBALL_ACCEL_INT1_GPIO 163
#define SNOWBALL_ACCEL_INT2_GPIO 164
@@ -41,7 +45,6 @@
#define CYPRESS_SLAVE_SELECT_GPIO 216
/* GPIOs on the TC35892 expander */
-#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
#define GPIO_MAGNET_DRDY MOP500_EGPIO(1)
#define GPIO_SDMMC_CD MOP500_EGPIO(3)
#define GPIO_CAMERA_FLASH_ENABLE MOP500_EGPIO(4)
@@ -55,7 +58,6 @@
#define MOP500_DISP1_RST_GPIO MOP500_EGPIO(15)
#define GPIO_SDMMC_EN MOP500_EGPIO(17)
#define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18)
-#define MOP500_EGPIO_END MOP500_EGPIO(24)
/*
* GPIOs on the AB8500 mixed-signals circuit
@@ -63,7 +65,7 @@
* because the AB8500 GPIO pins are enumbered starting from 1, so the value in
* parens matches the GPIO pin number in the data sheet.
*/
-#define MOP500_AB8500_GPIO(x) (MOP500_EGPIO_END + (x) - 1)
+#define MOP500_AB8500_PIN_GPIO(x) (MOP500_EGPIO_END + (x) - 1)
/*Snowball AB8500 GPIO */
#define SNOWBALL_VSMPS2_1V8_GPIO MOP500_AB8500_PIN_GPIO(1) /* SYSCLKREQ2/GPIO1 */
#define SNOWBALL_PM_GPIO1_GPIO MOP500_AB8500_PIN_GPIO(2) /* SYSCLKREQ3/GPIO2 */
@@ -81,11 +83,21 @@ extern void hrefv60_sdi_init(void);
extern void mop500_sdi_tc35892_init(void);
void __init mop500_u8500uib_init(void);
void __init mop500_stuib_init(void);
+void __init mop500_msp_init(void);
void __init mop500_pins_init(void);
void __init hrefv60_pins_init(void);
void __init snowball_pins_init(void);
+void mop500_cyttsp_init(void);
+void __init mop500_u8500uib_r3_init(void);
-void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+void mop500_uib_i2c_add(int busnum, struct i2c_board_info const *info,
unsigned n);
+int msp13_i2s_init(void);
+int msp13_i2s_exit(void);
+
+int uib_is_stuib(void);
+int uib_is_u8500uib(void);
+int uib_is_u8500uibr3(void);
+
#endif
diff --git a/arch/arm/mach-ux500/board-pins-sleep-force.c b/arch/arm/mach-ux500/board-pins-sleep-force.c
new file mode 100644
index 00000000000..91eb1cefc22
--- /dev/null
+++ b/arch/arm/mach-ux500/board-pins-sleep-force.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+#include <linux/gpio/nomadik.h>
+#include <mach/hardware.h>
+
+#include "board-pins-sleep-force.h"
+#include "pins-db8500.h"
+#include "pins.h"
+
+static u32 u8500_gpio_banks[] = {U8500_GPIOBANK0_BASE,
+ U8500_GPIOBANK1_BASE,
+ U8500_GPIOBANK2_BASE,
+ U8500_GPIOBANK3_BASE,
+ U8500_GPIOBANK4_BASE,
+ U8500_GPIOBANK5_BASE,
+ U8500_GPIOBANK6_BASE,
+ U8500_GPIOBANK7_BASE,
+ U8500_GPIOBANK8_BASE};
+
+/*
+ * This function is called to force gpio power save
+ * settings during suspend.
+ */
+void sleep_pins_config_pm(pin_cfg_t *cfgs, int num)
+{
+ int i = 0;
+ int gpio = 0;
+ u32 w_imsc = 0;
+ u32 imsc = 0;
+ u32 offset;
+ u32 bitmask = 1;
+ u32 dirs_register = 0;
+ u32 dirc_register = 0;
+ u32 dats_register = 0;
+ u32 datc_register = 0;
+ u32 pdis_register_disable = 0;
+ u32 pdis_register_enabled = 0;
+ u32 slpm_register_disabled = 0;
+ u32 slpm_register_enabled = 0;
+ u32 bankaddr = 0;
+
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* Get the bank number the pin is mapped to */
+ bankaddr = IO_ADDRESS(u8500_gpio_banks[(gpio >> GPIO_BLOCK_SHIFT)]);
+
+ w_imsc = readl(bankaddr + NMK_GPIO_RWIMSC) |
+ readl(bankaddr + NMK_GPIO_FWIMSC);
+
+ imsc = readl(bankaddr + NMK_GPIO_RIMSC) |
+ readl(bankaddr + NMK_GPIO_FIMSC);
+
+ for (i = 0; i < num; i++) {
+ /* Get the pin number */
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* get the offest into the register */
+ offset = gpio % NMK_GPIO_PER_CHIP;
+ /* Set the bit to toggle */
+ bitmask = 1 << offset ;
+
+ /* Next we check for direction (INPUT/OUTPUT) */
+ switch (PIN_SLPM_DIR(cfgs[i])) {
+ case GPIO_IS_INPUT:
+ /* GPIO is set to input */
+ dirc_register |= bitmask;
+
+ /*
+ * Next check for pull (PULLUP/PULLDOWN)
+ * and configure accordingly.
+ */
+ switch (PIN_SLPM_PULL(cfgs[i])) {
+ case GPIO_PULL_UPDOWN_DISABLED:
+ pdis_register_disable |= bitmask;
+ break;
+
+ case GPIO_IS_PULLUP:
+ dats_register |= bitmask;
+ pdis_register_enabled |= bitmask;
+ break;
+
+ case GPIO_IS_PULLDOWN:
+ datc_register |= bitmask;
+ pdis_register_enabled |= bitmask;
+ break;
+
+ case GPIO_PULL_NO_CHANGE:
+ break;
+
+ default:
+ BUG();
+ break;
+
+ }
+ break;
+
+ case GPIO_IS_OUTPUT:
+ /* GPIO is set to output */
+ dirs_register |= bitmask;
+
+ /*
+ * Since its output there should not
+ * be a need to disable PULL UP/DOWN
+ * but better safe than sorry.
+ */
+ pdis_register_disable |= bitmask;
+ /* Next we check for setting GPIO HIGH/LOW */
+ switch (PIN_SLPM_VAL(cfgs[i])) {
+ case GPIO_IS_OUTPUT_LOW:
+ /* GPIO is set to LOW */
+ datc_register |= bitmask;
+ break;
+
+ case GPIO_IS_OUTPUT_HIGH:
+ /* GPIO is set to high */
+ dats_register |= bitmask;
+ break;
+
+ case GPIO_IS_NO_CHANGE:
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ break;
+ case GPIO_IS_NOT_CHANGED:
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ /* Next check for Sleep Power Managment (SLPM) */
+ switch (PIN_SLPM(cfgs[i])) {
+ case GPIO_WAKEUP_IS_ENABLED:
+ slpm_register_enabled |= bitmask;
+ break;
+
+ case GPIO_WAKEUP_IS_DISBLED:
+ slpm_register_disabled |= bitmask;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ /* Next check for Sleep Power Managment (SLPM) */
+ switch (PIN_SLPM_PDIS(cfgs[i])) {
+ case GPIO_PDIS_NO_CHANGE:
+ break;
+
+ case GPIO_PDIS_DISABLED:
+ pdis_register_disable |= bitmask;
+ break;
+
+ case GPIO_PDIS_ENABLED:
+ pdis_register_enabled |= bitmask;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ }
+
+ /* Write the register settings GPIO direction */
+ writel(dirs_register & ~w_imsc, bankaddr + NMK_GPIO_DIRS);
+ writel(dirc_register, bankaddr + NMK_GPIO_DIRC);
+
+ writel(datc_register & ~w_imsc, bankaddr + NMK_GPIO_DATC);
+ writel(dats_register & ~w_imsc, bankaddr + NMK_GPIO_DATS);
+
+ /* Write the PDIS enable/disable */
+ writel(readl(bankaddr + NMK_GPIO_PDIS)
+ | (pdis_register_disable & ~w_imsc & ~imsc),
+ bankaddr + NMK_GPIO_PDIS);
+ writel(readl(bankaddr + NMK_GPIO_PDIS)
+ & (~pdis_register_enabled & ~w_imsc & ~imsc),
+ bankaddr + NMK_GPIO_PDIS);
+
+ /* Write the SLPM enable/disable */
+ writel(readl(bankaddr + NMK_GPIO_SLPC) | slpm_register_disabled,
+ bankaddr + NMK_GPIO_SLPC);
+ writel(readl(bankaddr + NMK_GPIO_SLPC) & ~slpm_register_enabled,
+ bankaddr + NMK_GPIO_SLPC);
+}
+
+void sleep_pins_config_pm_mux(pin_cfg_t *cfgs, int num)
+{
+ int i = 0;
+ int gpio = 0;
+ u32 offset;
+ u32 bitmask = 1;
+ u32 gpio_afsla_register_set = 0;
+ u32 gpio_afslb_register_set = 0;
+ u32 gpio_afsla_register_clear = 0;
+ u32 gpio_afslb_register_clear = 0;
+ u32 bankaddr = 0;
+
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* Get the bank number the pin is mapped to */
+ bankaddr = IO_ADDRESS(u8500_gpio_banks[(gpio >> GPIO_BLOCK_SHIFT)]);
+
+ for (i = 0; i < num; i++) {
+ /* Get the pin number */
+ gpio = PIN_NUM(cfgs[i]);
+
+ /* get the offset into the register */
+ offset = gpio % NMK_GPIO_PER_CHIP;
+ /* Set the bit to toggle */
+ bitmask = 1 << offset ;
+
+ /* First check for ALT pin configuration */
+ switch (PIN_ALT(cfgs[i])) {
+ case NMK_GPIO_ALT_GPIO:
+ /* Set bit to configured as GPIO */
+ gpio_afsla_register_clear |= bitmask;
+ gpio_afslb_register_clear |= bitmask;
+ break;
+
+ case NMK_GPIO_ALT_A:
+ /* ALT A setting so set corresponding bit */
+ gpio_afsla_register_set |= bitmask;
+ break;
+
+ case NMK_GPIO_ALT_B:
+ /* ALT B setting so set corresponding bit */
+ gpio_afslb_register_set |= bitmask;
+ break;
+
+ case NMK_GPIO_ALT_C:
+ /* ALT C setting so set corresponding bits */
+ gpio_afsla_register_set |= bitmask;
+ gpio_afslb_register_set |= bitmask;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+ }
+ /* Set bits that configures GPIO */
+ writel(readl(bankaddr + NMK_GPIO_AFSLA)
+ & ~gpio_afsla_register_clear, bankaddr + NMK_GPIO_AFSLA);
+ writel(readl(bankaddr + NMK_GPIO_AFSLB)
+ & ~gpio_afslb_register_clear, bankaddr + NMK_GPIO_AFSLB);
+
+ /* Set bits that configures ALT_X */
+ writel(readl(bankaddr + NMK_GPIO_AFSLA)
+ | gpio_afsla_register_set, bankaddr + NMK_GPIO_AFSLA);
+ writel(readl(bankaddr + NMK_GPIO_AFSLB)
+ | gpio_afslb_register_set, bankaddr + NMK_GPIO_AFSLB);
+}
diff --git a/arch/arm/mach-ux500/board-pins-sleep-force.h b/arch/arm/mach-ux500/board-pins-sleep-force.h
new file mode 100644
index 00000000000..0949c9bfcda
--- /dev/null
+++ b/arch/arm/mach-ux500/board-pins-sleep-force.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_PINS_SLEEP_FORCE_H
+#define __BOARD_PINS_SLEEP_FORCE_H
+
+#include <plat/pincfg.h>
+
+#define NMK_GPIO_PER_CHIP 32
+#define GPIO_BLOCK_SHIFT 5
+
+#define GPIO_IS_NOT_CHANGED 0
+#define GPIO_IS_INPUT 1
+#define GPIO_IS_OUTPUT 2
+
+#define GPIO_WAKEUP_IS_ENABLED 0
+#define GPIO_WAKEUP_IS_DISBLED 1
+
+#define GPIO_IS_NO_CHANGE 0
+#define GPIO_IS_OUTPUT_LOW 1
+#define GPIO_IS_OUTPUT_HIGH 2
+
+#define GPIO_PULL_NO_CHANGE 0
+#define GPIO_PULL_UPDOWN_DISABLED 1
+#define GPIO_IS_PULLUP 2
+#define GPIO_IS_PULLDOWN 3
+
+#define GPIO_PDIS_NO_CHANGE 0
+#define GPIO_PDIS_DISABLED 1
+#define GPIO_PDIS_ENABLED 2
+
+void sleep_pins_config_pm_mux(pin_cfg_t *cfgs, int num);
+void sleep_pins_config_pm(pin_cfg_t *cfgs, int num);
+
+#endif
diff --git a/arch/arm/mach-ux500/board-u5500-bm.c b/arch/arm/mach-ux500/board-u5500-bm.c
new file mode 100644
index 00000000000..f7ca803da42
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-bm.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U5500 board specific charger and battery initialization parameters.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/power_supply.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include "board-u5500-bm.h"
+
+#ifdef CONFIG_AB5500_BATTERY_THERM_ON_BATCTRL
+/*
+ * These are the defined batteries that uses a NTC and ID resistor placed
+ * inside of the battery pack.
+ * Note that the abx500_res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct abx500_res_to_temp temp_tbl_type1[] = {
+ {-20, 67400},
+ { 0, 49200},
+ { 5, 44200},
+ { 10, 39400},
+ { 15, 35000},
+ { 20, 31000},
+ { 25, 27400},
+ { 30, 24300},
+ { 35, 21700},
+ { 40, 19400},
+ { 45, 17500},
+ { 50, 15900},
+ { 55, 14600},
+ { 60, 13500},
+ { 65, 12500},
+ { 70, 11800},
+ {100, 9200},
+};
+
+static struct abx500_res_to_temp temp_tbl_type2[] = {
+ {-20, 180700},
+ { 0, 160000},
+ { 5, 152700},
+ { 10, 144900},
+ { 15, 136800},
+ { 20, 128700},
+ { 25, 121000},
+ { 30, 113800},
+ { 35, 107300},
+ { 40, 101500},
+ { 45, 96500},
+ { 50, 92200},
+ { 55, 88600},
+ { 60, 85600},
+ { 65, 83000},
+ { 70, 80900},
+ {100, 73900},
+};
+
+static struct abx500_res_to_temp temp_tbl_A[] = {
+ {-5, 53407},
+ { 0, 48594},
+ { 5, 43804},
+ {10, 39188},
+ {15, 34870},
+ {20, 30933},
+ {25, 27422},
+ {30, 24347},
+ {35, 21694},
+ {40, 19431},
+ {45, 17517},
+ {50, 15908},
+ {55, 14561},
+ {60, 13437},
+ {65, 12500},
+};
+
+static struct abx500_res_to_temp temp_tbl_B[] = {
+ {-5, 165418},
+ { 0, 159024},
+ { 5, 151921},
+ {10, 144300},
+ {15, 136424},
+ {20, 128565},
+ {25, 120978},
+ {30, 113875},
+ {35, 107397},
+ {40, 101629},
+ {45, 96592},
+ {50, 92253},
+ {55, 88569},
+ {60, 85461},
+ {65, 82869},
+};
+
+static struct abx500_v_to_cap cap_tbl_type1[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+
+static struct abx500_v_to_cap cap_tbl_A[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+static struct abx500_v_to_cap cap_tbl_B[] = {
+ {4161, 100},
+ {4124, 98},
+ {4044, 90},
+ {4003, 85},
+ {3966, 80},
+ {3933, 75},
+ {3888, 67},
+ {3849, 60},
+ {3813, 55},
+ {3787, 47},
+ {3772, 30},
+ {3751, 25},
+ {3718, 20},
+ {3681, 16},
+ {3660, 14},
+ {3589, 10},
+ {3546, 7},
+ {3495, 4},
+ {3404, 2},
+ {3250, 0},
+};
+#endif
+static struct abx500_v_to_cap cap_tbl[] = {
+ {4186, 100},
+ {4163, 99},
+ {4114, 95},
+ {4068, 90},
+ {3990, 80},
+ {3926, 70},
+ {3898, 65},
+ {3866, 60},
+ {3833, 55},
+ {3812, 50},
+ {3787, 40},
+ {3768, 30},
+ {3747, 25},
+ {3730, 20},
+ {3705, 15},
+ {3699, 14},
+ {3684, 12},
+ {3672, 9},
+ {3657, 7},
+ {3638, 6},
+ {3556, 4},
+ {3424, 2},
+ {3317, 1},
+ {3094, 0},
+};
+
+/*
+ * Note that the abx500_res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct abx500_res_to_temp temp_tbl[] = {
+ {-5, 214834},
+ { 0, 162943},
+ { 5, 124820},
+ {10, 96520},
+ {15, 75306},
+ {20, 59254},
+ {25, 47000},
+ {30, 37566},
+ {35, 30245},
+ {40, 24520},
+ {45, 20010},
+ {50, 16432},
+ {55, 13576},
+ {60, 11280},
+ {65, 9425},
+};
+
+static const struct abx500_battery_type bat_type[] = {
+ [BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_vol = 3990,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+
+#ifdef CONFIG_AB5500_BATTERY_THERM_ON_BATCTRL
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 70000,
+ .resis_low = 8200,
+ .battery_resistance = 300,
+ .charge_full_design = 1500,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_type1),
+ .r_to_t_tbl = temp_tbl_type1,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_type1),
+ .v_to_cap_tbl = cap_tbl_type1,
+
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 165418,
+ .resis_low = 82869,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B),
+ .r_to_t_tbl = temp_tbl_B,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B),
+ .v_to_cap_tbl = cap_tbl_B,
+ },
+#else
+/*
+ * These are the batteries that doesn't have an internal NTC resistor to measure
+ * its temperature. The temperature in this case is measure with a NTC placed
+ * near the battery but on the PCB.
+ */
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 76000,
+ .resis_low = 53000,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 30000,
+ .resis_low = 10000,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+ {
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 95000,
+ .resis_low = 76001,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4025,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4025,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ },
+#endif
+};
+
+static char *ab5500_charger_supplied_to[] = {
+ "abx500_chargalg",
+ "ab5500_fg",
+ "ab5500_btemp",
+};
+
+static char *ab5500_btemp_supplied_to[] = {
+ "abx500_chargalg",
+ "ab5500_fg",
+};
+
+static char *ab5500_fg_supplied_to[] = {
+ "abx500_chargalg",
+};
+
+static char *abx500_chargalg_supplied_to[] = {
+ "ab5500_fg",
+};
+
+struct abx500_charger_platform_data ab5500_charger_plat_data = {
+ .supplied_to = ab5500_charger_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab5500_charger_supplied_to),
+};
+
+struct abx500_btemp_platform_data ab5500_btemp_plat_data = {
+ .supplied_to = ab5500_btemp_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab5500_btemp_supplied_to),
+};
+
+struct abx500_fg_platform_data ab5500_fg_plat_data = {
+ .supplied_to = ab5500_fg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(ab5500_fg_supplied_to),
+};
+
+struct abx500_chargalg_platform_data abx500_chargalg_plat_data = {
+ .supplied_to = abx500_chargalg_supplied_to,
+ .num_supplicants = ARRAY_SIZE(abx500_chargalg_supplied_to),
+};
+
+static const struct abx500_bm_capacity_levels cap_levels = {
+ .critical = 2,
+ .low = 10,
+ .normal = 70,
+ .high = 95,
+ .full = 100,
+};
+
+static const struct abx500_fg_parameters fg = {
+ .recovery_sleep_timer = 10,
+ .recovery_total_time = 100,
+ .init_timer = 1,
+ .init_discard_time = 5,
+ .init_total_time = 40,
+ .high_curr_time = 60,
+ .accu_charging = 30,
+ .accu_high_curr = 30,
+ .high_curr_threshold = 50,
+ .lowbat_threshold = 3560,
+ .overbat_threshold = 4400,
+};
+
+static const struct abx500_maxim_parameters maxi_params = {
+ .ena_maxi = true,
+ .chg_curr = 910,
+ .wait_cycles = 10,
+ .charger_curr_step = 100,
+};
+
+static const struct abx500_bm_charger_parameters chg = {
+ .usb_volt_max = 5500,
+ .usb_curr_max = 1500,
+ .ac_volt_max = 7500,
+ .ac_curr_max = 1500,
+};
+
+struct abx500_bm_data ab5500_bm_data = {
+ .temp_under = 3,
+ .temp_low = 8,
+ /* TODO: Need to verify the temp values */
+ .temp_high = 155,
+ .temp_over = 160,
+ .main_safety_tmr_h = 4,
+ .usb_safety_tmr_h = 4,
+ .bkup_bat_v = 0x00,
+ .bkup_bat_i = 0x00,
+ .no_maintenance = true,
+#ifdef CONFIG_AB5500_BATTERY_THERM_ON_BATCTRL
+ .adc_therm = ABx500_ADC_THERM_BATCTRL,
+#else
+ .adc_therm = ABx500_ADC_THERM_BATTEMP,
+#endif
+ .chg_unknown_bat = false,
+ .enable_overshoot = false,
+ .fg_res = 200,
+ .cap_levels = &cap_levels,
+ .bat_type = bat_type,
+ .n_btypes = ARRAY_SIZE(bat_type),
+ .batt_id = 0,
+ .interval_charging = 5,
+ .interval_not_charging = 120,
+ .temp_hysteresis = 3,
+ .maxi = &maxi_params,
+ .chg_params = &chg,
+ .fg_params = &fg,
+};
+
+/* ab5500 energy management platform data */
+struct abx500_bm_plat_data abx500_bm_pt_data = {
+ .battery = &ab5500_bm_data,
+ .charger = &ab5500_charger_plat_data,
+ .btemp = &ab5500_btemp_plat_data,
+ .fg = &ab5500_fg_plat_data,
+ .chargalg = &abx500_chargalg_plat_data,
+};
diff --git a/arch/arm/mach-ux500/board-u5500-bm.h b/arch/arm/mach-ux500/board-u5500-bm.h
new file mode 100644
index 00000000000..a6346905911
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-bm.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U5500 board specific charger and battery initialization parameters.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#ifndef __BOARD_U5500_BM_H
+#define __BOARD_U5500_BM_H
+
+#include <linux/mfd/abx500/ab5500-bm.h>
+
+extern struct abx500_charger_platform_data ab5500_charger_plat_data;
+extern struct abx500_btemp_platform_data ab5500_btemp_plat_data;
+extern struct abx500_fg_platform_data ab5500_fg_plat_data;
+extern struct abx500_chargalg_platform_data abx500_chargalg_plat_data;
+extern struct abx500_bm_data ab5500_bm_data;
+extern struct abx500_bm_plat_data abx500_bm_pt_data;
+
+#endif
diff --git a/arch/arm/mach-ux500/board-u5500-cyttsp.c b/arch/arm/mach-ux500/board-u5500-cyttsp.c
new file mode 100755
index 00000000000..47eaa36c030
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-cyttsp.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Author: Avinash A <avinash.a@stericsson.com> for ST-Ericsson
+ * License terms:GNU General Public License (GPL) version 2
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/cyttsp.h>
+#include <linux/delay.h>
+#include <linux/amba/pl022.h>
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+
+#include "pins-db5500.h"
+#include "board-u5500.h"
+
+/* cyttsp_gpio_board_init : configures the touch panel. */
+static int cyttsp_plat_init(int on)
+{
+ int ret;
+
+ ret = gpio_direction_output(CYPRESS_SLAVE_SELECT_GPIO, 1);
+ if (ret < 0) {
+ pr_err("slave select gpio direction failed\n");
+ gpio_free(CYPRESS_SLAVE_SELECT_GPIO);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cyttsp_wakeup(void)
+{
+ int ret;
+
+ ret = gpio_request(CYPRESS_TOUCH_INT_PIN, "Wakeup_pin");
+ if (ret < 0) {
+ pr_err("touch gpio failed\n");
+ return ret;
+ }
+ ret = gpio_direction_output(CYPRESS_TOUCH_INT_PIN, 1);
+ if (ret < 0) {
+ pr_err("touch gpio direction failed\n");
+ goto out;
+ }
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ /*
+ * To wake up the controller from sleep
+ * state the interrupt pin needs to be
+ * pulsed twice with a delay greater
+ * than 2 micro seconds.
+ */
+ udelay(3);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 0);
+ gpio_set_value(CYPRESS_TOUCH_INT_PIN, 1);
+ ret = gpio_direction_input(CYPRESS_TOUCH_INT_PIN);
+ if (ret < 0) {
+ pr_err("touch gpio direction IN config failed\n");
+ goto out;
+ }
+out:
+ gpio_free(CYPRESS_TOUCH_INT_PIN);
+ return 0;
+}
+static struct cyttsp_platform_data cyttsp_spi_platdata = {
+ .maxx = 480,
+ .maxy = 854,
+ .flags = 0,
+ .gen = CY_GEN3,
+ .use_st = 0,
+ .use_mt = 1,
+ .use_trk_id = 0,
+ .use_hndshk = 0,
+ .use_sleep = 1,
+ .use_gestures = 0,
+ .use_load_file = 0,
+ .use_force_fw_update = 0,
+ .use_virtual_keys = 0,
+ /* activate up to 4 groups and set active distance */
+ .gest_set = CY_GEST_GRP_NONE | CY_ACT_DIST,
+ /* change scn_type to enable finger and/or stylus detection */
+ .scn_typ = 0xA5, /* autodetect finger+stylus; balanced mutual scan */
+ .act_intrvl = CY_ACT_INTRVL_DFLT, /* Active refresh interval; ms */
+ .tch_tmout = CY_TCH_TMOUT_DFLT, /* Active touch timeout; ms */
+ .lp_intrvl = CY_LP_INTRVL_DFLT, /* Low power refresh interval; ms */
+ .init = cyttsp_plat_init,
+ .mt_sync = input_mt_sync,
+ .wakeup = cyttsp_wakeup,
+ .name = CY_SPI_NAME,
+ .irq_gpio = CYPRESS_TOUCH_INT_PIN,
+ .rst_gpio = CYPRESS_TOUCH_RST_GPIO,
+};
+
+static void cyttsp_spi_cs_control(u32 command)
+{
+ if (command == SSP_CHIP_SELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 0);
+ else if (command == SSP_CHIP_DESELECT)
+ gpio_set_value(CYPRESS_SLAVE_SELECT_GPIO, 1);
+}
+
+static struct pl022_config_chip cyttsp_ssp_config_chip = {
+ .com_mode = INTERRUPT_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ /* we can act as master only */
+ .hierarchy = SSP_MASTER,
+ .slave_tx_disable = 0,
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_16_OR_MORE_EMPTY_LOC,
+ .ctrl_len = SSP_BITS_16,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ .cs_control = cyttsp_spi_cs_control,
+};
+
+static struct spi_board_info cypress_spi_devices[] = {
+ {
+ .modalias = CY_SPI_NAME,
+ .controller_data = &cyttsp_ssp_config_chip,
+ .platform_data = &cyttsp_spi_platdata,
+ .max_speed_hz = 1000000,
+ .bus_num = 1,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ }
+};
+
+void u5500_cyttsp_init(void)
+{
+ int ret = 0;
+
+ ret = gpio_request(CYPRESS_SLAVE_SELECT_GPIO, "slave_select_gpio");
+ if (ret < 0) {
+ pr_err("slave select gpio failed\n");
+ return;
+ }
+ if (cpu_is_u5500v2())
+ cyttsp_spi_platdata.invert = true;
+ spi_register_board_info(cypress_spi_devices,
+ ARRAY_SIZE(cypress_spi_devices));
+}
diff --git a/arch/arm/mach-ux500/board-u5500-mcde.c b/arch/arm/mach-ux500/board-u5500-mcde.c
new file mode 100644
index 00000000000..5c4ba76055d
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-mcde.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/dispdev.h>
+#include <video/av8100.h>
+#include <asm/mach-types.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-generic_dsi.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+#include <video/mcde_display-av8100.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+
+#define DSI_UNIT_INTERVAL_0 0xA
+#define DSI_UNIT_INTERVAL_2 0x5
+
+/* The initialization of hdmi disp driver must be delayed in order to
+ * ensure that inputclk will be available (needed by hdmi hw) */
+static struct delayed_work work_dispreg_hdmi;
+#define DISPREG_HDMI_DELAY 6000
+
+enum {
+ PRIMARY_DISPLAY_ID,
+ AV8100_DISPLAY_ID,
+ MCDE_NR_OF_DISPLAYS
+};
+
+static int display_initialized_during_boot;
+
+static int __init startup_graphics_setup(char *str)
+{
+
+ if (get_option(&str, &display_initialized_during_boot) != 1)
+ display_initialized_during_boot = 0;
+
+ switch (display_initialized_during_boot) {
+ case 1:
+ pr_info("Startup graphics support\n");
+ break;
+ case 0:
+ default:
+ pr_info("No startup graphics supported\n");
+ break;
+ };
+
+ return 1;
+}
+__setup("startup_graphics=", startup_graphics_setup);
+
+static struct mcde_col_transform rgb_2_yCbCr_transform = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x10, 0x80, 0x80},
+};
+
+static struct mcde_port sony_port0 = {
+ .link = 0,
+};
+
+static struct mcde_display_sony_acx424akp_platform_data \
+ sony_acx424akp_display0_pdata = {
+ .reset_gpio = 226,
+};
+
+static struct mcde_display_device sony_acx424akp_display0 = {
+ .name = "mcde_disp_sony_acx424akp",
+ .id = PRIMARY_DISPLAY_ID,
+ .port = &sony_port0,
+ .chnl_id = MCDE_CHNL_A,
+ .fifo = MCDE_FIFO_A,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGBA8888,
+#ifdef CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_VSYNC
+ .synchronized_update = true,
+#else
+ .synchronized_update = false,
+#endif
+ .rotbuf1 = U5500_ESRAM_BASE + 0x20000 * 2,
+ .rotbuf2 = U5500_ESRAM_BASE + 0x20000 * 2 + 0x10000,
+ .dev = {
+ .platform_data = &sony_acx424akp_display0_pdata,
+ },
+};
+
+#if defined(CONFIG_AV8100_HWTRIG_INT)
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_TE0
+#elif defined(CONFIG_AV8100_HWTRIG_I2SDAT3)
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_TE1
+#elif defined(CONFIG_AV8100_HWTRIG_DSI_TE)
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_TE_POLLING
+#else
+ #define AV8100_SYNC_SRC MCDE_SYNCSRC_OFF
+#endif
+static struct mcde_port av8100_port2 = {
+ .type = MCDE_PORTTYPE_DSI,
+ .mode = MCDE_PORTMODE_CMD,
+ .pixel_format = MCDE_PORTPIXFMT_DSI_24BPP,
+ .link = 1,
+ .sync_src = AV8100_SYNC_SRC,
+ .update_auto_trig = true,
+ .phy = {
+ .dsi = {
+ .num_data_lanes = 2,
+ .ui = DSI_UNIT_INTERVAL_2,
+ },
+ },
+ .hdmi_sdtv_switch = HDMI_SWITCH,
+};
+
+static struct mcde_display_hdmi_platform_data av8100_hdmi_pdata = {
+ .rgb_2_yCbCr_transform = &rgb_2_yCbCr_transform,
+};
+
+static struct mcde_display_device av8100_hdmi = {
+ .name = "av8100_hdmi",
+ .id = AV8100_DISPLAY_ID,
+ .port = &av8100_port2,
+ .chnl_id = MCDE_CHNL_B,
+ .fifo = MCDE_FIFO_B,
+ .default_pixel_format = MCDE_OVLYPIXFMT_RGB888,
+ .native_x_res = 1280,
+ .native_y_res = 720,
+ .dev = {
+ .platform_data = &av8100_hdmi_pdata,
+ },
+};
+
+static void delayed_work_dispreg_hdmi(struct work_struct *ptr)
+{
+ if (mcde_display_device_register(&av8100_hdmi))
+ pr_warning("Failed to register av8100_hdmi\n");
+}
+
+/*
+* This function will create the framebuffer for the display that is registered.
+*/
+static int display_postregistered_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = dev;
+ u16 width, height;
+ u16 virtual_height;
+ u32 rotate = FB_ROTATE_UR;
+ struct fb_info *fbi;
+#ifdef CONFIG_DISPDEV
+ struct mcde_fb *mfb;
+#endif
+
+ if (event != MCDE_DSS_EVENT_DISPLAY_REGISTERED)
+ return 0;
+
+ if (ddev->id < PRIMARY_DISPLAY_ID || ddev->id >= MCDE_NR_OF_DISPLAYS)
+ return 0;
+
+ mcde_dss_get_native_resolution(ddev, &width, &height);
+
+ virtual_height = height * 2;
+
+#ifndef CONFIG_MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ if (ddev->id == AV8100_DISPLAY_ID)
+ goto out;
+#endif
+
+ /* Create frame buffer */
+ fbi = mcde_fb_create(ddev, width, height, width, virtual_height,
+ ddev->default_pixel_format, rotate);
+ if (IS_ERR(fbi)) {
+ dev_warn(&ddev->dev,
+ "Failed to create fb for display %s\n", ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Framebuffer created (%s)\n", ddev->name);
+ }
+
+#ifdef CONFIG_DISPDEV
+ mfb = to_mcde_fb(fbi);
+
+ /* Create a dispdev overlay for this display */
+ if (dispdev_create(ddev, true, mfb->ovlys[0]) < 0) {
+ dev_warn(&ddev->dev,
+ "Failed to create disp for display %s\n", ddev->name);
+ goto display_postregistered_callback_err;
+ } else {
+ dev_info(&ddev->dev, "Disp dev created for (%s)\n", ddev->name);
+ }
+#endif
+
+out:
+ return 0;
+
+display_postregistered_callback_err:
+ return -1;
+}
+
+static struct notifier_block display_nb = {
+ .notifier_call = display_postregistered_callback,
+};
+
+int __init init_u5500_display_devices(void)
+{
+ if (!cpu_is_u5500())
+ return 0;
+
+ (void)mcde_dss_register_notifier(&display_nb);
+
+ if (display_initialized_during_boot)
+ sony_acx424akp_display0.power_mode = MCDE_DISPLAY_PM_STANDBY;
+
+ (void)mcde_display_device_register(&sony_acx424akp_display0);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_dispreg_hdmi,
+ delayed_work_dispreg_hdmi);
+ schedule_delayed_work(&work_dispreg_hdmi,
+ msecs_to_jiffies(DISPREG_HDMI_DELAY));
+
+ return 0;
+}
+module_init(init_u5500_display_devices);
+
diff --git a/arch/arm/mach-ux500/board-u5500-mmio.c b/arch/arm/mach-ux500/board-u5500-mmio.c
new file mode 100644
index 00000000000..ded7cb94c1f
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-mmio.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ * Author: Joakim Axelsson <joakim.axelsson@stericsson.com> for ST-Ericsson
+ * Author: Rajat Verma <rajat.verma@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/vmalloc.h>
+#include <plat/pincfg.h>
+#include <mach/gpio.h>
+#include <mach/devices.h>
+#include "board-u5500.h"
+#include <linux/mmio.h>
+
+struct mmio_board_data {
+ int number_of_regulators;
+ struct regulator **mmio_regulators;
+ /* * Pin configs */
+ struct mmio_gpio xshutdown_pins[CAMERA_SLOT_END];
+ /* * Internal clocks */
+ struct clk *clk_ptr_bml;
+ struct clk *clk_ptr_ipi2c;
+ /* * External clocks */
+ struct clk *clk_ptr_ext[CAMERA_SLOT_END];
+};
+
+/*
+ * Fill names of regulators required for powering up the
+ * camera sensor in below array
+ */
+static char *regulator_names[] = {"v-mmio-camera", "v-ana"};
+
+static int mmio_clock_init(struct mmio_platform_data *pdata)
+{
+ int err;
+ struct mmio_board_data *extra = pdata->extra;
+
+ extra->clk_ptr_ext[PRIMARY_CAMERA] =
+ clk_get(pdata->dev, "primary-cam");
+ if (IS_ERR(extra->clk_ptr_ext[PRIMARY_CAMERA])) {
+ err = PTR_ERR(extra->clk_ptr_ext[PRIMARY_CAMERA]);
+ dev_err(pdata->dev,
+ "Error %d clock 'primary-cam'\n", err);
+ goto err_pri_ext_clk;
+ }
+ extra->clk_ptr_ext[SECONDARY_CAMERA] =
+ clk_get(pdata->dev, "secondary-cam");
+ if (IS_ERR(extra->clk_ptr_ext[SECONDARY_CAMERA])) {
+ err = PTR_ERR(extra->clk_ptr_ext[SECONDARY_CAMERA]);
+ dev_err(pdata->dev,
+ "Error %d clock 'secondary-cam'\n", err);
+ goto err_sec_ext_clk;
+ }
+
+ return 0;
+err_sec_ext_clk:
+ clk_put(extra->clk_ptr_ext[PRIMARY_CAMERA]);
+err_pri_ext_clk:
+ return err;
+}
+
+static void mmio_clock_exit(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+
+ clk_put(extra->clk_ptr_ext[PRIMARY_CAMERA]);
+ clk_put(extra->clk_ptr_ext[SECONDARY_CAMERA]);
+}
+
+static int mmio_pin_cfg_init(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+
+ extra->xshutdown_pins[PRIMARY_CAMERA].gpio =
+ GPIO_PRIMARY_CAM_XSHUTDOWN;
+ extra->xshutdown_pins[PRIMARY_CAMERA].active_high = 0;
+ extra->xshutdown_pins[PRIMARY_CAMERA].udelay = 250;
+
+ extra->xshutdown_pins[SECONDARY_CAMERA].gpio =
+ GPIO_SECONDARY_CAM_XSHUTDOWN;
+ extra->xshutdown_pins[SECONDARY_CAMERA].active_high = 0;
+ extra->xshutdown_pins[SECONDARY_CAMERA].udelay = 250;
+
+ return 0;
+}
+
+static void mmio_pin_cfg_exit(struct mmio_platform_data *pdata)
+{
+}
+
+/*
+ * For now, both sensors on B5500/S5500 have some power up sequence. If
+ * different sequences are needed for primary and secondary sensors, it can
+ * be implemented easily. Just use camera_slot field of mmio_platform_data
+ * to determine which camera needs to be powered up
+ */
+static int mmio_power_init(struct mmio_platform_data *pdata)
+{
+ int err = 0, i = 0;
+ struct mmio_board_data *extra = pdata->extra;
+
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ extra->number_of_regulators = ARRAY_SIZE(regulator_names);
+ extra->mmio_regulators =
+ kzalloc(sizeof(struct regulator *) * extra->number_of_regulators,
+ GFP_KERNEL);
+ if (!extra->mmio_regulators) {
+ dev_err(pdata->dev
+ , "Error allocating memory for mmio regulators\n");
+ err = -ENOMEM;
+ goto err_no_mem_reg;
+ }
+ for (i = 0; i <
+ extra->number_of_regulators; i++) {
+ extra->mmio_regulators[i] =
+ regulator_get(pdata->dev, regulator_names[i]);
+ if (IS_ERR(extra->mmio_regulators[i])) {
+ err = PTR_ERR(extra->mmio_regulators[i]);
+ dev_err(pdata->dev
+ , "Error %d getting regulator '%s'\n"
+ , err, regulator_names[i]);
+ goto err_regulator;
+ }
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_regulator:
+ /*
+ * Return regulators we have already requested
+ */
+ while (i--)
+ regulator_put(extra->mmio_regulators[i]);
+ kfree(extra->mmio_regulators);
+err_no_mem_reg:
+ return err;
+}
+
+static void mmio_power_exit(struct mmio_platform_data *pdata)
+{
+ int i = 0;
+ struct mmio_board_data *extra = pdata->extra;
+
+ for (i = 0; i < extra->number_of_regulators; i++)
+ regulator_put(extra->mmio_regulators[i]);
+ kfree(extra->mmio_regulators);
+}
+
+static int mmio_platform_init(struct mmio_platform_data *pdata)
+{
+ int err = 0;
+ struct mmio_board_data *extra = NULL;
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ /*
+ * Alloc memory for our own extra data
+ */
+ extra = kzalloc(sizeof(struct mmio_board_data), GFP_KERNEL);
+ if (!extra) {
+ dev_err(pdata->dev, "%s: memory alloc failed for "
+ "mmio_board_data\n", __func__);
+ err = -ENOMEM;
+ goto err_no_mem_extra;
+ }
+ /*
+ * Hook the data for other callbacks to use
+ */
+ pdata->extra = extra;
+
+ pdata->camera_slot = -1;
+
+ err = mmio_power_init(pdata);
+ if (err)
+ goto err_regulator;
+ err = mmio_clock_init(pdata);
+ if (err)
+ goto err_clock;
+ err = mmio_pin_cfg_init(pdata);
+ if (err)
+ goto err_pin_cfg;
+
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+
+err_pin_cfg:
+ mmio_clock_exit(pdata);
+err_clock:
+ mmio_power_exit(pdata);
+err_regulator:
+ kfree(extra);
+err_no_mem_extra:
+ return err;
+}
+
+static void mmio_platform_exit(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+
+ mmio_power_exit(pdata);
+ mmio_clock_exit(pdata);
+ mmio_pin_cfg_exit(pdata);
+ kfree(extra);
+ pdata->extra = NULL;
+}
+
+static int mmio_power_enable(struct mmio_platform_data *pdata)
+{
+ int err = 0, i = 0;
+ struct mmio_board_data *extra = pdata->extra;
+
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ /*
+ * Enable the regulators
+ */
+ for (i = 0; i < extra->number_of_regulators; i++) {
+ err = regulator_enable(extra->mmio_regulators[i]);
+ if (IS_ERR(extra->mmio_regulators[i])) {
+ err = PTR_ERR(extra->mmio_regulators[i]);
+ dev_err(pdata->dev , "Error %d enabling regulator '%s'"
+ "\n", err, regulator_names[i]);
+ goto err_regulator;
+ }
+ }
+
+ err = gpio_request(GPIO_CAMERA_PMIC_EN, "Camera PMIC GPIO");
+ if (err) {
+ dev_err(pdata->dev, "Error %d while requesting"
+ "Camera PMIC GPIO\n",
+ err);
+ return err;
+ }
+
+ err = gpio_direction_output(GPIO_CAMERA_PMIC_EN, 0);
+ if (err) {
+ dev_err(pdata->dev, "Error %d while setting"
+ "Camera PMIC GPIO"
+ "output mode\n", err);
+ return err;
+ }
+
+ if (!(u5500_board_is_s5500()))
+ gpio_set_value(GPIO_CAMERA_PMIC_EN, 1);
+ else
+ gpio_set_value(GPIO_CAMERA_PMIC_EN, 0);
+
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_regulator:
+ /*
+ * Disable regulators we already enabled
+ */
+ while (i--)
+ regulator_disable(extra->mmio_regulators[i]);
+ return err;
+}
+
+static void mmio_power_disable(struct mmio_platform_data *pdata)
+{
+ int i;
+ struct mmio_board_data *extra = pdata->extra;
+ /*
+ * Disable the regulators
+ */
+ for (i = 0; i < extra->number_of_regulators; i++)
+ regulator_disable(extra->mmio_regulators[i]);
+
+ if (!(u5500_board_is_s5500()))
+ gpio_set_value(GPIO_CAMERA_PMIC_EN, 0);
+ else
+ gpio_set_value(GPIO_CAMERA_PMIC_EN, 1);
+
+ gpio_free(GPIO_CAMERA_PMIC_EN);
+}
+
+static int mmio_clock_enable(struct mmio_platform_data *pdata)
+{
+ int err = 0;
+ struct mmio_board_data *extra = pdata->extra;
+
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+
+ /*
+ * Enable appropriate external clock
+ */
+ err = clk_enable(extra->clk_ptr_ext[pdata->camera_slot]);
+ if (err) {
+ dev_err(pdata->dev, "Error activating clock for sensor %d, err"
+ "%d\n", pdata->camera_slot, err);
+ goto err_ext_clk;
+ }
+ dev_dbg(pdata->dev , "Board %s() Exit\n", __func__);
+ return 0;
+err_ext_clk:
+ return err;
+}
+
+static void mmio_clock_disable(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+
+ clk_disable(extra->clk_ptr_ext[pdata->camera_slot]);
+}
+
+static int mmio_config_xshutdown_pins(struct mmio_platform_data *pdata,
+ enum mmio_select_xshutdown_t select,
+ int is_active_high)
+{
+ int err = 0;
+ struct mmio_board_data *extra = pdata->extra;
+
+ dev_dbg(pdata->dev , "Board %s() Enter\n", __func__);
+ switch (select) {
+ case MMIO_ENABLE_XSHUTDOWN_HOST:
+ extra->xshutdown_pins[pdata->camera_slot].active_high =
+ is_active_high;
+ dev_dbg(pdata->dev , "Enabling Xshutdown GPIO PIN = %d",
+ extra->xshutdown_pins[pdata->camera_slot].gpio);
+
+ err = gpio_request
+ (extra->xshutdown_pins[pdata->camera_slot].gpio,
+ "MMIO GPIO");
+ if (err) {
+ dev_err(pdata->dev, "Error %d while requesting"
+ "Xshutdown MMIO GPIO\n",
+ err);
+ return err;
+ }
+
+ err = gpio_direction_output
+ (extra->xshutdown_pins[pdata->camera_slot].gpio,
+ 0);
+ if (err) {
+ dev_err(pdata->dev, "Error %d while setting"
+ "Xshutdown MMIO GPIO"
+ "output mode\n", err);
+ return err;
+ }
+ break;
+ case MMIO_DISABLE_XSHUTDOWN:
+ dev_dbg(pdata->dev , "Disabling Xshutdown GPIO PIN = %d",
+ extra->xshutdown_pins[pdata->camera_slot].gpio);
+ gpio_free(extra->xshutdown_pins[pdata->camera_slot].gpio);
+ break;
+ default:
+ break;
+ }
+ if (err)
+ dev_err(pdata->dev , "Error configuring xshutdown, err = %d\n",
+ err);
+ return err;
+}
+
+static void mmio_set_xshutdown(struct mmio_platform_data *pdata)
+{
+ struct mmio_board_data *extra = pdata->extra;
+
+ gpio_set_value(extra->xshutdown_pins[pdata->camera_slot].gpio ,
+ (extra->xshutdown_pins[pdata->camera_slot].active_high
+ ? 1 : 0));
+ udelay(extra->xshutdown_pins[pdata->camera_slot].udelay);
+}
+
+/*
+ * TODO: This function would be removed in futute.
+ * Since this function is called frequently
+ * from HSM Camera code , it is kept for Legacy.
+ */
+static int mmio_config_i2c_pins(struct mmio_platform_data *pdata,
+ enum mmio_select_i2c_t select)
+{
+ int err = 0;
+
+ switch (select) {
+ case MMIO_ACTIVATE_I2C_HOST:
+ dev_dbg(pdata->dev , "Activate I2C from Host called\n");
+ break;
+ case MMIO_DEACTIVATE_I2C:
+ dev_dbg(pdata->dev , "DeActivate I2C from Host called\n");
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct mmio_platform_data mmio_config = {
+ .platform_init = mmio_platform_init,
+ .platform_exit = mmio_platform_exit,
+ .power_enable = mmio_power_enable,
+ .power_disable = mmio_power_disable,
+ .clock_enable = mmio_clock_enable,
+ .clock_disable = mmio_clock_disable,
+ .config_i2c_pins = mmio_config_i2c_pins,
+ .config_xshutdown_pins = mmio_config_xshutdown_pins,
+ .set_xshutdown = mmio_set_xshutdown
+};
+
+struct platform_device u5500_mmio_device = {
+ .name = MMIO_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &mmio_config,
+ }
+};
diff --git a/arch/arm/mach-ux500/board-u5500-pins.c b/arch/arm/mach-ux500/board-u5500-pins.c
new file mode 100644
index 00000000000..a6b63ca3ead
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-pins.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <plat/pincfg.h>
+
+#include "pins-db5500.h"
+#include "pins.h"
+
+static pin_cfg_t u5500_pins_default[] = {
+ GPIO77_ACCTX_ON | PIN_SLPM_ALTFUNC,
+ GPIO79_ACCSIM_Clk | PIN_SLPM_ALTFUNC,
+ GPIO80_ACCSIM_Da | PIN_SLPM_ALTFUNC,
+ GPIO81_ACCSIM_Reset | PIN_SLPM_ALTFUNC,
+ GPIO82_ACCSIM_DDir | PIN_SLPM_ALTFUNC,
+
+ /* MSP */
+ GPIO32_MSP0_TCK | PIN_INPUT_PULLDOWN,
+ GPIO33_MSP0_TFS | PIN_INPUT_PULLDOWN,
+ GPIO34_MSP0_TXD | PIN_INPUT_PULLDOWN,
+ GPIO35_MSP0_RXD | PIN_INPUT_PULLDOWN,
+ GPIO96_MSP1_TCK | PIN_INPUT_PULLDOWN,
+ GPIO97_MSP1_TFS | PIN_INPUT_PULLDOWN,
+ GPIO98_MSP1_TXD | PIN_INPUT_PULLDOWN,
+ GPIO99_MSP1_RXD | PIN_INPUT_PULLDOWN,
+ GPIO220_MSP2_TCK | PIN_OUTPUT_LOW,
+ GPIO221_MSP2_TFS | PIN_OUTPUT_LOW,
+ GPIO222_MSP2_TXD | PIN_OUTPUT_LOW,
+
+ /* DISPLAY_ENABLE */
+ GPIO226_GPIO | PIN_OUTPUT_HIGH,
+
+ /* Backlight Enable */
+ GPIO224_GPIO | PIN_OUTPUT_HIGH,
+
+ /* UART0 */
+ GPIO28_U0_TXD | PIN_OUTPUT_HIGH,
+ GPIO29_U0_RXD | PIN_INPUT_PULLUP,
+
+ /* UART3 */
+ GPIO165_U3_RXD | PIN_INPUT_PULLUP,
+ GPIO166_U3_TXD | PIN_OUTPUT_HIGH | PIN_LOWEMI_ENABLED,
+ GPIO167_U3_RTSn | PIN_OUTPUT_HIGH | PIN_LOWEMI_ENABLED,
+ GPIO168_U3_CTSn | PIN_INPUT_PULLUP,
+
+ /* AB5500 */
+ GPIO78_IRQn | PIN_SLPM_NOCHANGE,
+ GPIO100_I2C0_SCL | PIN_INPUT_PULLUP | PIN_SLPM_NOCHANGE,
+ GPIO101_I2C0_SDA | PIN_SLPM_NOCHANGE,
+
+ /* TOUCH_IRQ */
+ GPIO179_GPIO | PIN_INPUT_PULLUP,
+
+ /* SD-CARD detect/levelshifter pins */
+ GPIO180_GPIO | PIN_INPUT_NOPULL, /* SD_CARD_DETn */
+ GPIO227_GPIO | PIN_OUTPUT_LOW, /* SD_CARD_CTRL */
+ GPIO185_GPIO | PIN_OUTPUT_LOW, /* SD_CARD_VSEL */
+
+ /* Display & HDMI HW sync */
+ GPIO204_LCD_VSI1 | PIN_INPUT_PULLUP,
+
+ /* TVOUT (connected, but unused) */
+ GPIO205_GPIO | PIN_OUTPUT_LOW,
+ GPIO206_GPIO | PIN_OUTPUT_LOW,
+ GPIO207_GPIO | PIN_OUTPUT_LOW,
+ GPIO208_GPIO | PIN_OUTPUT_LOW,
+ GPIO209_GPIO | PIN_INPUT_PULLDOWN,
+
+ /* Display (connected to NT35560 / TE, but unused) */
+ GPIO211_GPIO | PIN_INPUT_PULLDOWN,
+
+ /* Camera & MMIO XshutDown*/
+ GPIO1_GPIO | PIN_OUTPUT_LOW,
+ GPIO2_GPIO | PIN_OUTPUT_LOW,
+
+ /* USB chip select */
+ GPIO76_GPIO | PIN_OUTPUT_LOW,
+
+ GPIO202_ACCU0_RXD | PIN_INPUT_PULLUP | PIN_SLPM_NOCHANGE,
+ GPIO203_ACCU0_TXD | PIN_OUTPUT_HIGH | PIN_SLPM_NOCHANGE,
+
+ /* Board Id Identification B5500 or S5500 */
+ GPIO0_GPIO | PIN_INPUT_PULLUP,
+ GPIO214_GPIO | PIN_OUTPUT_LOW, /* SW_CRASH_INDICATOR */
+
+ /* Touchscreen chip select */
+ GPIO186_GPIO | PIN_OUTPUT_HIGH | PIN_LOWEMI_ENABLED,
+
+ GPIO133_GPIO | PIN_OUTPUT_LOW, /* DUALSIMRESETn */
+ GPIO187_GPIO | PIN_OUTPUT_HIGH, /* Dual SIM CS */
+
+ GPIO163_GPIO | PIN_INPUT_PULLUP, /* SERVICEn */
+
+ GPIO223_GPIO | PIN_INPUT_PULLDOWN, /* HDMI_INT */
+ GPIO225_GPIO | PIN_OUTPUT_LOW, /* HDMI_ENABLE */
+};
+
+static UX500_PINS(db5500_kp_pins,
+ /* Keypad */
+ GPIO128_KP_I0 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO130_KP_I1 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO132_KP_I2 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO134_KP_I3 | PIN_INPUT_PULLUP | PIN_SLPM_INPUT_PULLUP,
+ GPIO137_KP_O4 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+ GPIO139_KP_O5 | PIN_INPUT_PULLUP | PIN_SLPM_OUTPUT_LOW,
+);
+
+static UX500_PINS(db5500_pins_sdi0,
+ /* SDI0 (eMMC) */
+ GPIO5_MC0_DAT0 | PIN_INPUT_PULLUP,
+ GPIO6_MC0_DAT1 | PIN_INPUT_PULLUP,
+ GPIO7_MC0_DAT2 | PIN_INPUT_PULLUP,
+ GPIO8_MC0_DAT3 | PIN_INPUT_PULLUP,
+ GPIO9_MC0_DAT4 | PIN_INPUT_PULLUP,
+ GPIO10_MC0_DAT5 | PIN_INPUT_PULLUP,
+ GPIO11_MC0_DAT6 | PIN_INPUT_PULLUP,
+ GPIO12_MC0_DAT7 | PIN_INPUT_PULLUP,
+ GPIO13_MC0_CMD | PIN_INPUT_PULLUP,
+ GPIO14_MC0_CLK | PIN_OUTPUT_LOW,
+);
+
+static UX500_PINS(db5500_pins_sdi1,
+ /* SDI1 (SD-CARD) */
+ GPIO191_MC1_DAT0 | PIN_INPUT_PULLUP,
+ GPIO192_MC1_DAT1 | PIN_INPUT_PULLUP,
+ GPIO193_MC1_DAT2 | PIN_INPUT_PULLUP,
+ GPIO194_MC1_DAT3 | PIN_INPUT_PULLUP,
+ GPIO195_MC1_CLK | PIN_OUTPUT_LOW,
+ GPIO196_MC1_CMD | PIN_INPUT_PULLUP,
+ GPIO197_MC1_CMDDIR | PIN_OUTPUT_HIGH,
+ GPIO198_MC1_FBCLK | PIN_INPUT_PULLDOWN,
+ GPIO199_MC1_DAT0DIR | PIN_OUTPUT_HIGH,
+);
+
+static UX500_PINS(db5500_pins_sdi2,
+ /* SDI2 (eMMC) */
+ GPIO16_MC2_CMD | PIN_INPUT_PULLUP,
+ GPIO17_MC2_CLK | PIN_OUTPUT_LOW,
+ GPIO23_MC2_DAT0 | PIN_INPUT_PULLUP,
+ GPIO19_MC2_DAT1 | PIN_INPUT_PULLUP,
+ GPIO24_MC2_DAT2 | PIN_INPUT_PULLUP,
+ GPIO20_MC2_DAT3 | PIN_INPUT_PULLUP,
+ GPIO25_MC2_DAT4 | PIN_INPUT_PULLUP,
+ GPIO21_MC2_DAT5 | PIN_INPUT_PULLUP,
+ GPIO26_MC2_DAT6 | PIN_INPUT_PULLUP,
+ GPIO22_MC2_DAT7 | PIN_INPUT_PULLUP
+);
+
+static UX500_PINS(db5500_pins_sdi3,
+ /* SDI3 (SDIO) */
+ GPIO171_MC3_DAT0 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO172_MC3_DAT1 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO173_MC3_DAT2 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO174_MC3_DAT3 | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO175_MC3_CMD | PIN_INPUT_PULLUP | PIN_LOWEMI_ENABLED,
+ GPIO176_MC3_CLK | PIN_OUTPUT_LOW,
+);
+
+static UX500_PINS(u5500_pins_i2c1,
+ GPIO3_I2C1_SCL | PIN_INPUT_NOPULL | PIN_LOWEMI_ENABLED,
+ GPIO4_I2C1_SDA | PIN_INPUT_NOPULL | PIN_LOWEMI_ENABLED,
+);
+
+static UX500_PINS(u5500_pins_i2c2,
+ GPIO218_I2C2_SCL | PIN_INPUT_NOPULL | PIN_LOWEMI_ENABLED,
+ GPIO219_I2C2_SDA | PIN_INPUT_NOPULL | PIN_LOWEMI_ENABLED,
+);
+
+static UX500_PINS(u5500_pins_i2c3,
+ GPIO177_I2C3_SCL | PIN_INPUT_NOPULL | PIN_LOWEMI_ENABLED,
+ GPIO178_I2C3_SDA | PIN_INPUT_NOPULL | PIN_LOWEMI_ENABLED,
+);
+
+static UX500_PINS(u5500_pins_spi3,
+ GPIO188_SPI3_RXD | PIN_INPUT_PULLUP,
+ GPIO189_SPI3_TXD | PIN_OUTPUT_LOW | PIN_LOWEMI_ENABLED,
+ GPIO190_SPI3_CLK | PIN_OUTPUT_LOW | PIN_LOWEMI_ENABLED,
+);
+
+/* USB */
+static UX500_PINS(u5500_pins_usb,
+ GPIO74_USB_NXT | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO72_USB_STP | PIN_OUTPUT_HIGH | PIN_SLPM_OUTPUT_HIGH,
+ GPIO75_USB_XCLK | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO73_USB_DIR | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO71_USB_DAT7 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO70_USB_DAT6 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO69_USB_DAT5 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO68_USB_DAT4 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO67_USB_DAT3 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO66_USB_DAT2 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO65_USB_DAT1 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+ GPIO64_USB_DAT0 | PIN_INPUT_NOPULL | PIN_SLPM_INPUT_PULLDOWN,
+);
+
+static struct ux500_pin_lookup u5500_pins[] = {
+ PIN_LOOKUP("nmk-i2c.1", &u5500_pins_i2c1),
+ PIN_LOOKUP("nmk-i2c.2", &u5500_pins_i2c2),
+ PIN_LOOKUP("nmk-i2c.3", &u5500_pins_i2c3),
+ PIN_LOOKUP("spi3", &u5500_pins_spi3),
+ PIN_LOOKUP("db5500_kp", &db5500_kp_pins),
+ PIN_LOOKUP("ab5500-usb.0", &u5500_pins_usb),
+ PIN_LOOKUP("sdi0", &db5500_pins_sdi0),
+ PIN_LOOKUP("sdi1", &db5500_pins_sdi1),
+ PIN_LOOKUP("sdi2", &db5500_pins_sdi2),
+ PIN_LOOKUP("sdi3", &db5500_pins_sdi3),
+};
+
+void __init u5500_pins_init(void)
+{
+ nmk_config_pins(u5500_pins_default, ARRAY_SIZE(u5500_pins_default));
+ ux500_pins_add(u5500_pins, ARRAY_SIZE(u5500_pins));
+}
+
+/* Stub function to make board-ux500-cg2900.c compile within a U5500 configuration */
+int pins_for_u9500(void)
+{
+ return 0;
+}
diff --git a/arch/arm/mach-ux500/board-u5500-regulators.c b/arch/arm/mach-ux500/board-u5500-regulators.c
new file mode 100644
index 00000000000..9e343259e53
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-regulators.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab5500.h>
+
+#include "regulator-u5500.h"
+#include "board-u5500.h"
+
+/*
+ * AB5500
+ */
+
+static struct regulator_consumer_supply ab5500_ldo_g_consumers[] = {
+ REGULATOR_SUPPLY("vmmc", "sdi1"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_h_consumers[] = {
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ REGULATOR_SUPPLY("vdd", "1-004b"), /* Synaptics */
+ REGULATOR_SUPPLY("vin", "2-0036"), /* LM3530 */
+ REGULATOR_SUPPLY("vcpin", "spi1.0"),
+ REGULATOR_SUPPLY("v-ana", "mmio_camera"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.0"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.1"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_k_consumers[] = {
+ REGULATOR_SUPPLY("v-mmio-camera", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_h_consumers_pre_r3a[] = {
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ REGULATOR_SUPPLY("vdd", "1-004b"), /* Synaptics */
+ REGULATOR_SUPPLY("vin", "2-0036"), /* LM3530 */
+ REGULATOR_SUPPLY("vcpin", "spi1.0"),
+ REGULATOR_SUPPLY("v-ana", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_k_consumers_pre_r3a[] = {
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.0"),
+ REGULATOR_SUPPLY("vdd", "lsm303dlh.1"),
+ REGULATOR_SUPPLY("v-mmio-camera", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_l_consumers[] = {
+ REGULATOR_SUPPLY("vmmc", "sdi0"),
+ REGULATOR_SUPPLY("vmmc", "sdi2"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_vdigmic_consumers[] = {
+ REGULATOR_SUPPLY("vdigmic", "ab5500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab5500_ldo_sim_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.5"),
+};
+
+static struct regulator_consumer_supply ab5500_bias2_consumers[] = {
+ REGULATOR_SUPPLY("v-amic", NULL),
+};
+
+static struct regulator_init_data
+ab5500_regulator_init_data[AB5500_NUM_REGULATORS] = {
+ /* SD Card */
+ [AB5500_LDO_G] = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2910000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .consumer_supplies = ab5500_ldo_g_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_g_consumers),
+ },
+ /* Display */
+ [AB5500_LDO_H] = {
+ .constraints = {
+ .min_uV = 2790000,
+ .max_uV = 2790000,
+ .apply_uV = 1,
+ .boot_on = 1, /* display on during boot */
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_h_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_h_consumers),
+ },
+ /* Camera */
+ [AB5500_LDO_K] = {
+ .constraints = {
+ .min_uV = 2790000,
+ .max_uV = 2790000,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_k_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_k_consumers),
+ },
+ /* External eMMC */
+ [AB5500_LDO_L] = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2910000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .consumer_supplies = ab5500_ldo_l_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_l_consumers),
+ },
+ [AB5500_LDO_VDIGMIC] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_vdigmic_consumers,
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab5500_ldo_vdigmic_consumers),
+ },
+ [AB5500_LDO_SIM] = {
+ .constraints = {
+ .min_uV = 1875000,
+ .max_uV = 2900000,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_ldo_sim_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_ldo_sim_consumers),
+ },
+ [AB5500_BIAS2] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = ab5500_bias2_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(ab5500_bias2_consumers),
+ },
+};
+
+static struct ab5500_regulator_data
+ab5500_regulator_data[AB5500_NUM_REGULATORS] = {
+ [AB5500_LDO_H] = {
+ /*
+ * The sub camera on the dev boards needs both supplies to be
+ * on to avoid high leakage.
+ */
+ .off_is_lowpower = true,
+ },
+};
+
+struct ab5500_regulator_platform_data u5500_ab5500_regulator_data = {
+ .regulator = ab5500_regulator_init_data,
+ .data = ab5500_regulator_data,
+ .num_regulator = ARRAY_SIZE(ab5500_regulator_init_data),
+};
+
+
+static void __init u5500_regulators_init_debug(void)
+{
+ const char data[] = "debug";
+ int i;
+
+ for (i = 0; i < 6; i++)
+ platform_device_register_data(NULL, "reg-virt-consumer", i,
+ data, sizeof(data));
+}
+
+static struct regulator_consumer_supply u5500_vio_consumers[] = {
+ REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
+};
+
+static struct regulator_init_data u5500_vio_init_data = {
+ .constraints.always_on = 1,
+ .consumer_supplies = u5500_vio_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(u5500_vio_consumers),
+};
+
+static struct fixed_voltage_config u5500_vio_pdata __initdata = {
+ .supply_name = "vio_1v8",
+ .microvolts = 1800000,
+ .init_data = &u5500_vio_init_data,
+ .gpio = -EINVAL,
+};
+
+void __init u5500_regulators_init(void)
+{
+ if (u5500_board_is_pre_r3a()) {
+ struct regulator_init_data *rid = ab5500_regulator_init_data;
+
+ rid[AB5500_LDO_K].consumer_supplies
+ = ab5500_ldo_k_consumers_pre_r3a;
+ rid[AB5500_LDO_K].num_consumer_supplies
+ = ARRAY_SIZE(ab5500_ldo_k_consumers_pre_r3a);
+
+ rid[AB5500_LDO_H].consumer_supplies
+ = ab5500_ldo_h_consumers_pre_r3a;
+ rid[AB5500_LDO_H].num_consumer_supplies
+ = ARRAY_SIZE(ab5500_ldo_h_consumers_pre_r3a);
+ }
+
+ u5500_regulators_init_debug();
+
+ platform_device_register_data(NULL, "reg-fixed-voltage", -1,
+ &u5500_vio_pdata,
+ sizeof(u5500_vio_pdata));
+
+ regulator_has_full_constraints();
+}
diff --git a/arch/arm/mach-ux500/board-u5500-sdi.c b/arch/arm/mach-ux500/board-u5500-sdi.c
index 63c3f8058ff..101a69f2fbc 100644
--- a/arch/arm/mach-ux500/board-u5500-sdi.c
+++ b/arch/arm/mach-ux500/board-u5500-sdi.c
@@ -5,34 +5,28 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/amba/bus.h>
#include <linux/amba/mmci.h>
#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
-#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
-#include <mach/db5500-regs.h>
+#include <asm/mach-types.h>
#include <plat/ste_dma40.h>
+#include <mach/devices.h>
+#include <mach/hardware.h>
+#include <mach/ste-dma40-db5500.h>
-#include "pins-db5500.h"
#include "devices-db5500.h"
-#include "ste-dma40-db5500.h"
-
-static pin_cfg_t u5500_sdi_pins[] = {
- /* SDI0 (POP eMMC) */
- GPIO5_MC0_DAT0 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO6_MC0_DAT1 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO7_MC0_DAT2 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO8_MC0_DAT3 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO9_MC0_DAT4 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO10_MC0_DAT5 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO11_MC0_DAT6 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO12_MC0_DAT7 | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO13_MC0_CMD | PIN_DIR_INPUT | PIN_PULL_UP,
- GPIO14_MC0_CLK | PIN_DIR_OUTPUT | PIN_VAL_LOW,
-};
+#include "board-u5500.h"
+/*
+ * SDI 0 (eMMC)
+ */
#ifdef CONFIG_STE_DMA40
-struct stedma40_chan_cfg u5500_sdi0_dma_cfg_rx = {
+static struct stedma40_chan_cfg sdi0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.src_dev_type = DB5500_DMA_DEV24_SDMMC0_RX,
@@ -41,7 +35,7 @@ struct stedma40_chan_cfg u5500_sdi0_dma_cfg_rx = {
.dst_info.data_width = STEDMA40_WORD_WIDTH,
};
-static struct stedma40_chan_cfg u5500_sdi0_dma_cfg_tx = {
+static struct stedma40_chan_cfg sdi0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.src_dev_type = STEDMA40_DEV_SRC_MEMORY,
@@ -57,18 +51,206 @@ static struct mmci_platform_data u5500_sdi0_data = {
.capabilities = MMC_CAP_4_BIT_DATA |
MMC_CAP_8_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED,
+ .capabilities2 = MMC_CAP2_NO_SLEEP_CMD,
.gpio_cd = -1,
.gpio_wp = -1,
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
- .dma_rx_param = &u5500_sdi0_dma_cfg_rx,
- .dma_tx_param = &u5500_sdi0_dma_cfg_tx,
+ .dma_rx_param = &sdi0_dma_cfg_rx,
+ .dma_tx_param = &sdi0_dma_cfg_tx,
+#endif
+};
+
+/*
+ * SDI 1 (MicroSD slot)
+ */
+
+static int u5500_sdi1_ios_handler(struct device *dev, struct mmc_ios *ios)
+{
+ static int power_mode = -1;
+
+ if (power_mode == ios->power_mode)
+ return 0;
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ break;
+ case MMC_POWER_ON:
+ /*
+ * Level shifter voltage should depend on vdd to when deciding
+ * on either 1.8V or 2.9V. Once the decision has been made the
+ * level shifter must be disabled and re-enabled with a changed
+ * select signal in order to switch the voltage. Since there is
+ * no framework support yet for indicating 1.8V in vdd, use the
+ * default 2.9V.
+ */
+ gpio_set_value_cansleep(GPIO_MMC_CARD_CTRL, 1);
+ udelay(100);
+ break;
+ case MMC_POWER_OFF:
+ gpio_set_value_cansleep(GPIO_MMC_CARD_CTRL, 0);
+ break;
+ }
+
+ power_mode = ios->power_mode;
+ return 0;
+}
+
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg sdi1_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV34_SDMMC1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi1_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV34_SDMMC1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+
+static struct mmci_platform_data u5500_sdi1_data = {
+ .ios_handler = u5500_sdi1_ios_handler,
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA |
+ MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_MMC_HIGHSPEED,
+ .gpio_cd = GPIO_SDMMC_CD,
+ .gpio_wp = -1,
+ .cd_invert = true,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi1_dma_cfg_rx,
+ .dma_tx_param = &sdi1_dma_cfg_tx,
#endif
};
+/*
+ * SDI2 (EMMC2)
+ */
+
+static struct stedma40_chan_cfg sdi2_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV26_SDMMC2_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi2_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV26_SDMMC2_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct mmci_platform_data u5500_sdi2_data = {
+ .ocr_mask = MMC_VDD_165_195,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA |
+ MMC_CAP_8_BIT_DATA |
+ MMC_CAP_MMC_HIGHSPEED,
+ .capabilities2 = MMC_CAP2_NO_SLEEP_CMD,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi2_dma_cfg_rx,
+ .dma_tx_param = &sdi2_dma_cfg_tx,
+#endif
+};
+
+/*
+ * SDI 3 (SDIO WLAN)
+ */
+#ifdef SDIO_DMA_ON
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg sdi3_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV27_SDMMC3_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi3_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV27_SDMMC3_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+#endif
+
+static struct mmci_platform_data u5500_sdi3_data = {
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+#ifdef SDIO_DMA_ON
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi3_dma_cfg_rx,
+ .dma_tx_param = &sdi3_dma_cfg_tx,
+#endif
+#endif
+};
+
+static void sdi1_configure(void)
+{
+ int pin[2];
+ int ret;
+
+ /* Level-shifter GPIOs */
+ pin[0] = GPIO_MMC_CARD_CTRL;
+ pin[1] = GPIO_MMC_CARD_VSEL;
+
+ ret = gpio_request(pin[0], "MMC_CARD_CTRL");
+ if (!ret)
+ ret = gpio_request(pin[1], "MMC_CARD_VSEL");
+
+ if (ret) {
+ pr_warning("unable to config sdi0 gpios for level shifter.\n");
+ return;
+ }
+ /* Select the default 2.9V and eanble level shifter */
+ gpio_direction_output(pin[0], 1);
+ gpio_direction_output(pin[1], 0);
+}
+
void __init u5500_sdi_init(void)
{
- nmk_config_pins(u5500_sdi_pins, ARRAY_SIZE(u5500_sdi_pins));
+ u32 periphid = 0x10480180;
+
+ /*
+ * Fix me in 5500 v2.1
+ * Dynamic detection of booting device by reading
+ * ROM debug register from BACKUP RAM and register the
+ * corresponding EMMC.
+ * This is done due to wrong configuration of MMC0 clock
+ * in ROM code for u5500 v2.
+ */
+ if (u5500_get_boot_mmc() == 2)
+ db5500_add_sdi2(&u5500_sdi2_data, periphid);
+ else
+ db5500_add_sdi0(&u5500_sdi0_data, periphid);
- db5500_add_sdi0(&u5500_sdi0_data);
+ sdi1_configure();
+ db5500_add_sdi1(&u5500_sdi1_data, periphid);
+ db5500_add_sdi3(&u5500_sdi3_data, periphid);
}
diff --git a/arch/arm/mach-ux500/board-u5500-wlan.c b/arch/arm/mach-ux500/board-u5500-wlan.c
new file mode 100644
index 00000000000..fd64089108a
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-wlan.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *Author: Bartosz Markowski <bartosz.markowski@tieto.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include "pins.h"
+#include <mach/cw1200_plat.h>
+
+
+static void cw1200_release(struct device *dev);
+static int cw1200_prcmu_ctrl(const struct cw1200_platform_data *pdata,
+ bool enable);
+
+static struct resource cw1200_u5500_resources[] = {
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(129),
+ .end = NOMADIK_GPIO_TO_IRQ(129),
+ .flags = IORESOURCE_IRQ,
+ .name = "cw1200_irq",
+ },
+};
+
+static struct cw1200_platform_data cw1200_u5500_platform_data = {
+ .prcmu_ctrl = cw1200_prcmu_ctrl,
+};
+
+static struct platform_device cw1200_device = {
+ .name = "cw1200_wlan",
+ .dev = {
+ .platform_data = &cw1200_u5500_platform_data,
+ .release = cw1200_release,
+ .init_name = "cw1200_wlan",
+ },
+};
+
+const struct cw1200_platform_data *cw1200_u5500_get_platform_data(void)
+{
+ return &cw1200_u5500_platform_data;
+}
+EXPORT_SYMBOL_GPL(cw1200_u5500_get_platform_data);
+
+static int cw1200_prcmu_ctrl(const struct cw1200_platform_data *pdata,
+ bool enable)
+{
+ int ret;
+
+ if (enable)
+ ret = prcmu_resetout(2, 1);
+ else
+ ret = prcmu_resetout(2, 0);
+
+ return ret;
+}
+
+int __init u5500_wlan_init(void)
+{
+ if (machine_is_u5500()) {
+ cw1200_device.num_resources = ARRAY_SIZE(cw1200_u5500_resources);
+ cw1200_device.resource = cw1200_u5500_resources;
+ } else {
+ dev_err(&cw1200_device.dev,
+ "Unsupported mach type %d "
+ "(check mach-types.h)\n",
+ __machine_arch_type);
+ return -ENOTSUPP;
+ }
+
+ cw1200_u5500_platform_data.mmc_id = "mmc2";
+ cw1200_u5500_platform_data.irq = &cw1200_device.resource[0];
+
+ cw1200_device.dev.release = cw1200_release;
+
+ return platform_device_register(&cw1200_device);
+}
+
+static void cw1200_release(struct device *dev)
+{
+
+}
diff --git a/arch/arm/mach-ux500/board-u5500-wlan.h b/arch/arm/mach-ux500/board-u5500-wlan.h
new file mode 100644
index 00000000000..89fd41166fd
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500-wlan.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * U5500 board specific cw1200 (WLAN device) initialization.
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ * Author: Bartosz Markowski <bartosz.markowski@tieto.com> for ST-Ericsson
+ *
+ */
+
+#ifndef __BOARD_U5500_WLAN_H
+#define __BOARD_U5500_WLAN_H
+
+int u5500_wlan_init(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
index 9de9e9c4dbb..fbcdfedff8a 100644
--- a/arch/arm/mach-ux500/board-u5500.c
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -1,7 +1,6 @@
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
*/
@@ -9,8 +8,25 @@
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/irq.h>
+#include <linux/gpio/nomadik.h>
#include <linux/i2c.h>
#include <linux/mfd/abx500/ab5500.h>
+#include <linux/amba/pl022.h>
+#include <linux/delay.h>
+#include <linux/led-lm3530.h>
+#include <../drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h>
+#include <linux/input/matrix_keypad.h>
+#ifdef CONFIG_SENSORS_LSM303DLH
+#include <linux/lsm303dlh.h>
+#endif
+#include <linux/leds-ab5500.h>
+#ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+#include <linux/cyttsp.h>
+#endif
+
+#ifdef CONFIG_AV8100
+#include <video/av8100.h>
+#endif
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
@@ -18,36 +34,134 @@
#include <plat/pincfg.h>
#include <plat/i2c.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
+#include <mach/ste-dma40-db5500.h>
+#ifdef CONFIG_UX500_SOC_DBX500
+#include <mach/msp.h>
+#endif
#include <mach/devices.h>
#include <mach/setup.h>
+#include <mach/db5500-keypad.h>
+#include <mach/crypto-ux500.h>
+#include <mach/abx500-accdet.h>
+#include <mach/usb.h>
#include "pins-db5500.h"
+#include "pins.h"
#include "devices-db5500.h"
-#include <linux/led-lm3530.h>
+#include "board-u5500.h"
+#include "board-u5500-bm.h"
+#include "board-u5500-wlan.h"
+#include "board-ux500-usb.h"
+
+#ifdef CONFIG_SENSORS_LSM303DLH
+/*
+ * LSM303DLH
+ */
+
+static struct lsm303dlh_platform_data __initdata lsm303dlh_pdata = {
+ .name_a = "lsm303dlh.0",
+ .name_m = "lsm303dlh.1",
+ .axis_map_x = 1,
+ .axis_map_y = 0,
+ .axis_map_z = 2,
+/* display is mounted reverse in the hardware */
+ .negative_x = 1,
+ .negative_y = 1,
+ .negative_z = 1,
+};
+#endif
+
+/*
+ * Touchscreen
+ */
+static struct synaptics_rmi4_platform_data rmi4_i2c_platformdata = {
+ .irq_number = NOMADIK_GPIO_TO_IRQ(179),
+ .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+#if defined(CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE) && \
+ CONFIG_DISPLAY_GENERIC_DSI_PRIMARY_ROTATION_ANGLE == 270
+ .x_flip = true,
+ .y_flip = false,
+#else
+ .x_flip = false,
+ .y_flip = true,
+#endif
+ .regulator_en = true,
+};
+
+static struct av8100_platform_data av8100_plat_data = {
+ .irq = NOMADIK_GPIO_TO_IRQ(223),
+ .reset = 225,
+ .alt_powerupseq = true,
+ .mclk_freq = 1, /* MCLK_RNG_22_27 */
+};
/*
- * GPIO
+ * leds LM3530
*/
+static struct lm3530_platform_data u5500_als_platform_data = {
+ .mode = LM3530_BL_MODE_MANUAL,
+ .als_input_mode = LM3530_INPUT_ALS1,
+ .max_current = LM3530_FS_CURR_26mA,
+ .pwm_pol_hi = true,
+ .als_avrg_time = LM3530_ALS_AVRG_TIME_4096ms,
+ .brt_ramp_law = 1, /* Linear */
+ .brt_ramp_fall = LM3530_RAMP_TIME_260ms,
+ .brt_ramp_rise = LM3530_RAMP_TIME_260ms,
+ .als1_resistor_sel = LM3530_ALS_IMPD_13_53kOhm,
+ .als2_resistor_sel = LM3530_ALS_IMPD_Z,
+ .als_vmin = 730, /* mV */
+ .als_vmax = 1020, /* mV */
+ .brt_val = 0x7F, /* Max brightness */
+ .hw_en_gpio = LM3530_BL_ENABLE_GPIO,
+};
-static pin_cfg_t u5500_pins[] = {
- /* I2C */
- GPIO218_I2C2_SCL | PIN_INPUT_PULLUP,
- GPIO219_I2C2_SDA | PIN_INPUT_PULLUP,
- /* DISPLAY_ENABLE */
- GPIO226_GPIO | PIN_OUTPUT_LOW,
+/* leds-ab5500 */
+static struct ab5500_hvleds_platform_data ab5500_hvleds_data = {
+ .hw_fade = false,
+ .leds = {
+ [0] = {
+ .name = "red",
+ .led_on = true,
+ .led_id = 0,
+ .fade_hi = 255,
+ .fade_lo = 0,
+ .max_current = 10, /* wrong value may damage h/w */
+ },
+ [1] = {
+ .name = "green",
+ .led_on = true,
+ .led_id = 1,
+ .fade_hi = 255,
+ .fade_lo = 0,
+ .max_current = 10, /* wrong value may damage h/w */
+ },
+ [2] {
+ .name = "blue",
+ .led_on = true,
+ .led_id = 2,
+ .fade_hi = 255,
+ .fade_lo = 0,
+ .max_current = 10, /* wrong value may damage h/w */
+ },
+ },
+};
- /* Backlight Enbale */
- GPIO224_GPIO | PIN_OUTPUT_HIGH,
+static struct ab5500_ponkey_platform_data ab5500_ponkey_data = {
+ /*
+ * Shutdown time in secs. Can be set
+ * to 10sec, 5sec and 0sec(disabled)
+ */
+ .shutdown_secs = 10,
};
+
/*
* I2C
*/
-#define U5500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \
+#define U5500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \
static struct nmk_i2c_controller u5500_i2c##id##_data = { \
/* \
* slave data setup time, which is \
@@ -62,31 +176,35 @@ static struct nmk_i2c_controller u5500_i2c##id##_data = { \
.rft = _rft, \
/* std. mode operation */ \
.clk_freq = clk, \
+ /* Slave response timeout(ms) */\
+ .timeout = t_out, \
.sm = _sm, \
}
+
/*
- * The board uses TODO <3> i2c controllers, initialize all of
+ * The board uses 3 i2c controllers, initialize all of
* them with slave data setup time of 250 ns,
* Tx & Rx FIFO threshold values as 1 and standard
* mode of operation
*/
-U5500_I2C_CONTROLLER(2, 0xe, 1, 1, 400000, I2C_FREQ_MODE_FAST);
+U5500_I2C_CONTROLLER(1, 0xe, 1, 10, 400000, 200, I2C_FREQ_MODE_FAST);
+U5500_I2C_CONTROLLER(2, 0xe, 1, 10, 400000, 200, I2C_FREQ_MODE_FAST);
+U5500_I2C_CONTROLLER(3, 0xe, 1, 10, 400000, 200, I2C_FREQ_MODE_FAST);
-static struct lm3530_platform_data u5500_als_platform_data = {
- .mode = LM3530_BL_MODE_MANUAL,
- .als_input_mode = LM3530_INPUT_ALS1,
- .max_current = LM3530_FS_CURR_26mA,
- .pwm_pol_hi = true,
- .als_avrg_time = LM3530_ALS_AVRG_TIME_512ms,
- .brt_ramp_law = 1, /* Linear */
- .brt_ramp_fall = LM3530_RAMP_TIME_8s,
- .brt_ramp_rise = LM3530_RAMP_TIME_8s,
- .als1_resistor_sel = LM3530_ALS_IMPD_13_53kOhm,
- .als2_resistor_sel = LM3530_ALS_IMPD_Z,
- .als_vmin = 730, /* mV */
- .als_vmax = 1020, /* mV */
- .brt_val = 0x7F, /* Max brightness */
+static struct i2c_board_info __initdata u5500_i2c2_sensor_devices[] = {
+#ifdef CONFIG_SENSORS_LSM303DLH
+ {
+ /* LSM303DLHC Accelerometer */
+ I2C_BOARD_INFO("lsm303dlhc_a", 0x19),
+ .platform_data = &lsm303dlh_pdata,
+ },
+ {
+ /* LSM303DLH Magnetometer */
+ I2C_BOARD_INFO("lsm303dlh_m", 0x1E),
+ .platform_data = &lsm303dlh_pdata,
+ },
+#endif
};
static struct i2c_board_info __initdata u5500_i2c2_devices[] = {
@@ -95,54 +213,532 @@ static struct i2c_board_info __initdata u5500_i2c2_devices[] = {
I2C_BOARD_INFO("lm3530-led", 0x36),
.platform_data = &u5500_als_platform_data,
},
+ {
+ I2C_BOARD_INFO("av8100", 0x70),
+ .platform_data = &av8100_plat_data,
+ },
};
-static void __init u5500_i2c_init(void)
+/*
+ * Keypad
+ */
+
+#define ROW_PIN_I0 128
+#define ROW_PIN_I1 130
+#define ROW_PIN_I2 132
+#define ROW_PIN_I3 134
+#define COL_PIN_O4 137
+#define COL_PIN_O5 139
+
+static int db5500_kp_rows[] = {
+ ROW_PIN_I0, ROW_PIN_I1, ROW_PIN_I2, ROW_PIN_I3,
+};
+
+static int db5500_kp_cols[] = {
+ COL_PIN_O4, COL_PIN_O5,
+};
+
+static bool db5500_config;
+static int db5500_set_gpio_row(int gpio)
+{
+ int ret = -1;
+
+
+ if (!db5500_config) {
+ ret = gpio_request(gpio, "db5500_kpd");
+ if (ret < 0) {
+ pr_err("db5500_set_gpio_row: gpio request failed\n");
+ return ret;
+ }
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret < 0) {
+ pr_err("db5500_set_gpio_row: gpio direction failed\n");
+ gpio_free(gpio);
+ }
+
+ return ret;
+}
+
+static int db5500_kp_init(void)
+{
+ struct ux500_pins *pins;
+ int ret, i;
+
+ pins = ux500_pins_get("db5500_kp");
+ if (pins)
+ ux500_pins_enable(pins);
+
+ for (i = 0; i < ARRAY_SIZE(db5500_kp_rows); i++) {
+ ret = db5500_set_gpio_row(db5500_kp_rows[i]);
+ if (ret < 0) {
+ pr_err("db5500_kp_init: failed init\n");
+ return ret;
+ }
+ }
+
+ if (!db5500_config)
+ db5500_config = true;
+
+ return 0;
+}
+
+static int db5500_kp_exit(void)
+{
+ struct ux500_pins *pins;
+
+ pins = ux500_pins_get("db5500_kp");
+ if (pins)
+ ux500_pins_disable(pins);
+
+ return 0;
+}
+
+static const unsigned int u5500_keymap[] = {
+ KEY(4, 0, KEY_CAMERA), /* Camera2 */
+ KEY(4, 1, KEY_CAMERA_FOCUS), /* Camera1 */
+ KEY(4, 2, KEY_MENU),
+ KEY(4, 3, KEY_BACK),
+ KEY(5, 2, KEY_SEND),
+ KEY(5, 3, KEY_HOME),
+#ifndef CONFIG_INPUT_AB8500_PONKEY
+ /* AB5500 ONSWa is also hooked up to this key */
+ KEY(8, 0, KEY_END),
+#endif
+ KEY(8, 1, KEY_VOLUMEUP),
+ KEY(8, 2, KEY_VOLUMEDOWN),
+};
+
+static struct matrix_keymap_data u5500_keymap_data = {
+ .keymap = u5500_keymap,
+ .keymap_size = ARRAY_SIZE(u5500_keymap),
+};
+
+static struct db5500_keypad_platform_data u5500_keypad_board = {
+ .init = db5500_kp_init,
+ .exit = db5500_kp_exit,
+ .gpio_input_pins = db5500_kp_rows,
+ .gpio_output_pins = db5500_kp_cols,
+ .keymap_data = &u5500_keymap_data,
+ .no_autorepeat = true,
+ .krow = ARRAY_SIZE(db5500_kp_rows),
+ .kcol = ARRAY_SIZE(db5500_kp_cols),
+ .debounce_ms = 40, /* milliseconds */
+ .switch_delay = 200, /* in jiffies */
+};
+
+#ifdef CONFIG_UX500_SOC_DBX500
+/*
+ * MSP
+ */
+
+#define MSP_DMA(num, eventline) \
+static struct stedma40_chan_cfg msp##num##_dma_rx = { \
+ .high_priority = true, \
+ .dir = STEDMA40_PERIPH_TO_MEM, \
+ .src_dev_type = eventline##_RX, \
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY, \
+ .src_info.psize = STEDMA40_PSIZE_LOG_4, \
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4, \
+}; \
+ \
+static struct stedma40_chan_cfg msp##num##_dma_tx = { \
+ .high_priority = true, \
+ .dir = STEDMA40_MEM_TO_PERIPH, \
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY, \
+ .dst_dev_type = eventline##_TX, \
+ .src_info.psize = STEDMA40_PSIZE_LOG_4, \
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4, \
+}
+
+MSP_DMA(0, DB5500_DMA_DEV9_MSP0);
+MSP_DMA(1, DB5500_DMA_DEV10_MSP1);
+MSP_DMA(2, DB5500_DMA_DEV11_MSP2);
+
+static struct msp_i2s_platform_data u5500_msp0_data = {
+ .id = MSP_0_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp0_dma_rx,
+ .msp_i2s_dma_tx = &msp0_dma_tx,
+};
+
+static struct msp_i2s_platform_data u5500_msp1_data = {
+ .id = MSP_1_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp1_dma_rx,
+ .msp_i2s_dma_tx = &msp1_dma_tx,
+};
+
+static struct msp_i2s_platform_data u5500_msp2_data = {
+ .id = MSP_2_I2S_CONTROLLER,
+ .msp_i2s_dma_rx = &msp2_dma_rx,
+ .msp_i2s_dma_tx = &msp2_dma_tx,
+};
+
+static void __init u5500_msp_init(void)
+{
+ db5500_add_msp0_i2s(&u5500_msp0_data);
+ db5500_add_msp1_i2s(&u5500_msp1_data);
+ db5500_add_msp2_i2s(&u5500_msp2_data);
+}
+#else
+static void __init u5500_msp_init(void)
{
- db5500_add_i2c2(&u5500_i2c2_data);
- i2c_register_board_info(2, ARRAY_AND_SIZE(u5500_i2c2_devices));
}
+#endif
+
+/*
+ * SPI
+ */
+
+static struct pl022_ssp_controller u5500_spi3_data = {
+ .bus_id = 1,
+ .num_chipselect = 4, /* 3 possible CS lines + 1 for tests */
+};
+
+static void __init u5500_spi_init(void)
+{
+ db5500_add_spi3(&u5500_spi3_data);
+}
+
+static struct resource ab5500_resources[] = {
+ [0] = {
+ .start = IRQ_DB5500_PRCMU_ABB,
+ .end = IRQ_DB5500_PRCMU_ABB,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+static struct abx500_accdet_platform_data ab5500_accdet_pdata = {
+ .btn_keycode = KEY_MEDIA,
+ .accdet1_dbth = ACCDET1_TH_300mV | ACCDET1_DB_10ms,
+ .accdet2122_th = ACCDET21_TH_300mV | ACCDET22_TH_300mV,
+ .is_detection_inverted = false,
+ };
+#endif
static struct ab5500_platform_data ab5500_plf_data = {
.irq = {
- .base = 0,
- .count = 0,
+ .base = IRQ_AB5500_BASE,
+ .count = AB5500_NR_IRQS,
},
- .init_settings = NULL,
- .init_settings_sz = 0,
- .pm_power_off = false,
+ .pm_power_off = true,
+ .regulator = &u5500_ab5500_regulator_data,
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+ .dev_data[AB5500_DEVID_ACCDET] = &ab5500_accdet_pdata,
+ .dev_data_sz[AB5500_DEVID_ACCDET] = sizeof(ab5500_accdet_pdata),
+#endif
+ .dev_data[AB5500_DEVID_LEDS] = &ab5500_hvleds_data,
+ .dev_data_sz[AB5500_DEVID_LEDS] = sizeof(ab5500_hvleds_data),
+ .init_settings = (struct abx500_init_settings[]){
+ {
+ .bank = 0x3,
+ .reg = 0x17,
+ .setting = 0x0F,
+ },
+ {
+ .bank = 0x3,
+ .reg = 0x18,
+ .setting = 0x10,
+ },
+ },
+ .init_settings_sz = 2,
+#if defined(CONFIG_AB5500_BM)
+ .dev_data[AB5500_DEVID_CHARGALG] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_CHARGALG] = sizeof(abx500_bm_pt_data),
+ .dev_data[AB5500_DEVID_CHARGER] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_CHARGER] = sizeof(abx500_bm_pt_data),
+ .dev_data[AB5500_DEVID_FG] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_FG] = sizeof(abx500_bm_pt_data),
+ .dev_data[AB5500_DEVID_BTEMP] = &abx500_bm_pt_data,
+ .dev_data_sz[AB5500_DEVID_BTEMP] = sizeof(abx500_bm_pt_data),
+#endif
+ .dev_data[AB5500_DEVID_ONSWA] = &ab5500_ponkey_data,
+ .dev_data_sz[AB5500_DEVID_ONSWA] = sizeof(ab5500_ponkey_data),
+ .dev_data[AB5500_DEVID_USB] = &abx500_usbgpio_plat_data,
+ .dev_data_sz[AB5500_DEVID_USB] = sizeof(abx500_usbgpio_plat_data),
};
-static struct platform_device ab5500_device = {
+static struct platform_device u5500_ab5500_device = {
.name = "ab5500-core",
.id = 0,
.dev = {
.platform_data = &ab5500_plf_data,
},
+ .num_resources = 1,
+ .resource = ab5500_resources,
+};
+
+static struct platform_device u5500_mloader_device = {
+ .name = "db5500_mloader",
+ .id = -1,
.num_resources = 0,
};
+static struct cryp_platform_data u5500_cryp1_platform_data = {
+ .mem_to_engine = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV48_CRYPTO1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ },
+ .engine_to_mem = {
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB5500_DMA_DEV48_CRYPTO1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_4,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_4,
+ }
+};
+
+static struct stedma40_chan_cfg u5500_hash_dma_cfg_tx = {
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB5500_DMA_DEV50_HASH1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+ .mode = STEDMA40_MODE_LOGICAL,
+ .src_info.psize = STEDMA40_PSIZE_LOG_16,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_16,
+};
+
+static struct hash_platform_data u5500_hash1_platform_data = {
+ .mem_to_engine = &u5500_hash_dma_cfg_tx,
+ .dma_filter = stedma40_filter,
+};
+
+/* modem crash dump detection driver data */
+static struct resource mcdd_resources[] = {
+ {
+ .name = "mcdd_intreset_addr",
+ .start = U5500_INTCON_MBOX1_INT_RESET_ADDR,
+ .end = U5500_INTCON_MBOX1_INT_RESET_ADDR,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mcdd_mbox_irq",
+ .start = MBOX_PAIR1_VIRT_IRQ,
+ .end = MBOX_PAIR1_VIRT_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device u5500_mcdd_device = {
+ .name = "u5500-mcdd-modem",
+ .id = 0,
+ .resource = mcdd_resources,
+ .num_resources = ARRAY_SIZE(mcdd_resources),
+};
+
static struct platform_device *u5500_platform_devices[] __initdata = {
- &ab5500_device,
+ &u5500_ab5500_device,
+#ifdef CONFIG_FB_MCDE
+ &u5500_mcde_device,
+#endif
+ &ux500_hwmem_device,
+ &u5500_b2r2_device,
+ &u5500_mloader_device,
+#ifdef CONFIG_U5500_MMIO
+ &u5500_mmio_device,
+#endif
+ &u5500_thsens_device,
+ &u5500_mcdd_device,
};
+#define BACKUPRAM_ROM_DEBUG_ADDR 0xFFC
+#define MMC_BLOCK_ID 0x20
+
+int u5500_get_boot_mmc(void)
+{
+ unsigned int mmcblk;
+
+ mmcblk = readl(__io_address(U5500_BACKUPRAM1_BASE) +
+ BACKUPRAM_ROM_DEBUG_ADDR);
+
+ if (mmcblk & MMC_BLOCK_ID)
+ return 2;
+
+ return 0;
+}
+
+/*
+ * R3A (and presumably, future) S5500 boards have different regulator
+ * assignments from the earlier boards. Since there's no clean way to identify
+ * the board revision from hardware, we use the fact that R2A boots from MMC0
+ * (via peripheral boot) and R3A boots from MMC2 to distinguish them.
+ */
+bool u5500_board_is_pre_r3a(void)
+{
+ if (!cpu_is_u5500v20())
+ return false;
+
+ if (!u5500_board_is_s5500())
+ return true;
+
+ if (u5500_get_boot_mmc() == 2)
+ return false;
+
+ return true;
+}
+
+
+/*
+ * This function check whether it is Small S5500 board
+ * GPIO0 is HIGH for S5500
+ */
+bool u5500_board_is_s5500(void)
+{
+ static bool s5500;
+ static bool once;
+ int err, val;
+
+ if (once)
+ return s5500;
+
+ err = gpio_request(GPIO_BOARD_VERSION, "Board Version");
+ if (err) {
+ pr_err("Error %d while requesting GPIO for Board Version\n",
+ err);
+ return err;
+ }
+
+ err = gpio_direction_input(GPIO_BOARD_VERSION);
+ if (err) {
+ pr_err("Error %d while setting GPIO for Board Version"
+ "output mode\n", err);
+ return err;
+ }
+
+ val = gpio_get_value(GPIO_BOARD_VERSION);
+
+ gpio_free(GPIO_BOARD_VERSION);
+
+ s5500 = val;
+ once = true;
+
+ return val;
+}
+
+static long u5500_panic_blink(int state)
+{
+ gpio_direction_output(GPIO_SW_CRASH_INDICATOR, state);
+ return 0;
+}
+
+#define PRCC_K_SOFTRST_SET 0x18
+#define PRCC_K_SOFTRST_CLEAR 0x1C
+/* pl011 reset */
+static void ux500_uart3_reset(void)
+{
+ void __iomem *prcc_rst_set, *prcc_rst_clr;
+
+ prcc_rst_set = __io_address(U5500_CLKRST5_BASE +
+ PRCC_K_SOFTRST_SET);
+ prcc_rst_clr = __io_address(U5500_CLKRST5_BASE +
+ PRCC_K_SOFTRST_CLEAR);
+
+ /*
+ * Activate soft reset PRCC_K_SOFTRST_CLEAR
+ *
+ * As we are dealing with IP register lockup
+ * so to make double sure that IP gets reset
+ * and reset pulse remains for more than one
+ * clock cycle a delay is added.
+ */
+ writel((readl(prcc_rst_clr) | 0x08), prcc_rst_clr);
+ udelay(1);
+
+ /* Release soft reset PRCC_K_SOFTRST_SET */
+ writel((readl(prcc_rst_set) | 0x08), prcc_rst_set);
+ udelay(1);
+}
+
+static struct amba_pl011_data uart3_plat = {
+ .reset = ux500_uart3_reset,
+};
+
+static void __init u5500_i2c_init(void)
+{
+ db5500_add_i2c1(&u5500_i2c1_data);
+ db5500_add_i2c2(&u5500_i2c2_data);
+ db5500_add_i2c3(&u5500_i2c3_data);
+
+ i2c_register_board_info(2, ARRAY_AND_SIZE(u5500_i2c2_devices));
+ i2c_register_board_info(2, ARRAY_AND_SIZE(u5500_i2c2_sensor_devices));
+}
+
static void __init u5500_uart_init(void)
{
db5500_add_uart0(NULL);
db5500_add_uart1(NULL);
db5500_add_uart2(NULL);
+ db5500_add_uart3(&uart3_plat);
+}
+
+static void __init u5500_cryp1_hash1_init(void)
+{
+ db5500_add_cryp1(&u5500_cryp1_platform_data);
+ db5500_add_hash1(&u5500_hash1_platform_data);
+}
+
+static int __init u5500_accel_sensor_init(void)
+{
+ int status;
+ union i2c_smbus_data data;
+ struct i2c_adapter *i2c2;
+
+ i2c2 = i2c_get_adapter(2);
+ if (!i2c2) {
+ pr_err("failed to get i2c adapter\n");
+ return;
+ }
+ status = i2c_smbus_xfer(i2c2, 0x19 , 0,
+ I2C_SMBUS_READ, 0x0F ,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (status < 0)
+ lsm303dlh_pdata.chip_id = 0;
+ else
+ lsm303dlh_pdata.chip_id = data.byte;
+ i2c_put_adapter(i2c2);
}
+module_init(u5500_accel_sensor_init);
static void __init u5500_init_machine(void)
{
u5500_init_devices();
- nmk_config_pins(u5500_pins, ARRAY_SIZE(u5500_pins));
+ u5500_regulators_init();
+ u5500_pins_init();
+
u5500_i2c_init();
+ u5500_msp_init();
+ u5500_spi_init();
+
u5500_sdi_init();
u5500_uart_init();
+ u5500_wlan_init();
+
+ db5500_add_keypad(&u5500_keypad_board);
+ u5500_cryp1_hash1_init();
+
+#ifdef CONFIG_TOUCHSCREEN_CYTTSP_SPI
+ u5500_cyttsp_init();
+#endif
+
platform_add_devices(u5500_platform_devices,
ARRAY_SIZE(u5500_platform_devices));
+
+ if (!gpio_request_one(GPIO_SW_CRASH_INDICATOR, GPIOF_OUT_INIT_LOW,
+ "SW_CRASH_INDICATOR"))
+ panic_blink = u5500_panic_blink;
}
MACHINE_START(U5500, "ST-Ericsson U5500 Platform")
@@ -152,4 +748,14 @@ MACHINE_START(U5500, "ST-Ericsson U5500 Platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = u5500_init_machine,
+ .restart = ux500_restart,
+MACHINE_END
+
+MACHINE_START(B5500, "ST-Ericsson U5500 Big Board")
+ .atag_offset = 0x00000100,
+ .map_io = u5500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
+ .init_machine = u5500_init_machine,
+ .restart = ux500_restart,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-u5500.h b/arch/arm/mach-ux500/board-u5500.h
new file mode 100644
index 00000000000..a7e4bbbc714
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_U5500_H
+#define __BOARD_U5500_H
+
+#define GPIO_SDMMC_CD 180
+#define GPIO_MMC_CARD_CTRL 227
+#define GPIO_MMC_CARD_VSEL 185
+#define GPIO_BOARD_VERSION 0
+#define GPIO_PRIMARY_CAM_XSHUTDOWN 1
+#define GPIO_SECONDARY_CAM_XSHUTDOWN 2
+#define GPIO_CAMERA_PMIC_EN 212
+#define GPIO_SW_CRASH_INDICATOR 214
+
+#define CYPRESS_TOUCH_INT_PIN 179
+#define CYPRESS_TOUCH_RST_GPIO 135
+#define CYPRESS_SLAVE_SELECT_GPIO 186
+
+#define LM3530_BL_ENABLE_GPIO 224
+
+struct ab5500_regulator_platform_data;
+extern struct ab5500_regulator_platform_data u5500_ab5500_regulator_data;
+
+extern void u5500_pins_init(void);
+extern void __init u5500_regulators_init(void);
+void u5500_cyttsp_init(void);
+bool u5500_board_is_s5500(void);
+int u5500_get_boot_mmc(void);
+bool u5500_board_is_pre_r3a(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/board-ux500-usb.h b/arch/arm/mach-ux500/board-ux500-usb.h
new file mode 100644
index 00000000000..6b35a181c0a
--- /dev/null
+++ b/arch/arm/mach-ux500/board-ux500-usb.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Saketh Ram Bommisetti <sakethram.bommisetti@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __BOARD_UX500_USB_H
+#define __BOARD_UX500_USB_H
+
+extern struct abx500_usbgpio_platform_data abx500_usbgpio_plat_data;
+
+#endif
diff --git a/arch/arm/mach-ux500/clock-db5500.c b/arch/arm/mach-ux500/clock-db5500.c
new file mode 100644
index 00000000000..7e8805f595c
--- /dev/null
+++ b/arch/arm/mach-ux500/clock-db5500.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson SA
+ * Copyright (C) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <plat/pincfg.h>
+
+#include <mach/hardware.h>
+
+#include "clock.h"
+#include "pins-db5500.h"
+
+static DEFINE_MUTEX(sysclk_mutex);
+static DEFINE_MUTEX(pll_mutex);
+
+/* SysClk operations. */
+static int sysclk_enable(struct clk *clk)
+{
+ return prcmu_request_clock(PRCMU_SYSCLK, true);
+}
+
+static void sysclk_disable(struct clk *clk)
+{
+
+ prcmu_request_clock(PRCMU_SYSCLK, false);
+ return;
+}
+
+static struct clkops sysclk_ops = {
+ .enable = sysclk_enable,
+ .disable = sysclk_disable,
+};
+
+static int rtc_clk_enable(struct clk *clk)
+{
+ return ab5500_clock_rtc_enable(clk->cg_sel, true);
+}
+
+static void rtc_clk_disable(struct clk *clk)
+{
+ int ret = ab5500_clock_rtc_enable(clk->cg_sel, false);
+
+ if (ret)
+ pr_err("clock: %s failed to disable: %d\n", clk->name, ret);
+}
+
+static struct clkops rtc_clk_ops = {
+ .enable = rtc_clk_enable,
+ .disable = rtc_clk_disable,
+};
+
+static pin_cfg_t clkout0_pins[] = {
+ GPIO161_CLKOUT_0 | PIN_OUTPUT_LOW,
+};
+
+static pin_cfg_t clkout1_pins[] = {
+ GPIO162_CLKOUT_1 | PIN_OUTPUT_LOW,
+};
+
+static int clkout0_enable(struct clk *clk)
+{
+ return nmk_config_pins(clkout0_pins, ARRAY_SIZE(clkout0_pins));
+}
+
+static void clkout0_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pins_sleep(clkout0_pins, ARRAY_SIZE(clkout0_pins));
+ if (!r)
+ return;
+
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static int clkout1_enable(struct clk *clk)
+{
+ return nmk_config_pins(clkout1_pins, ARRAY_SIZE(clkout0_pins));
+}
+
+static void clkout1_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pins_sleep(clkout1_pins, ARRAY_SIZE(clkout1_pins));
+ if (!r)
+ return;
+
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static struct clkops clkout0_ops = {
+ .enable = clkout0_enable,
+ .disable = clkout0_disable,
+};
+
+static struct clkops clkout1_ops = {
+ .enable = clkout1_enable,
+ .disable = clkout1_disable,
+};
+
+#define PRCM_CLKOCR2 0x58C
+#define PRCM_CLKOCR2_REFCLK (1 << 0)
+#define PRCM_CLKOCR2_STATIC0 (1 << 2)
+
+static int clkout2_enable(struct clk *clk)
+{
+ prcmu_write(PRCM_CLKOCR2, PRCM_CLKOCR2_REFCLK);
+ return 0;
+}
+
+static void clkout2_disable(struct clk *clk)
+{
+ prcmu_write(PRCM_CLKOCR2, PRCM_CLKOCR2_STATIC0);
+}
+
+static struct clkops clkout2_ops = {
+ .enable = clkout2_enable,
+ .disable = clkout2_disable,
+};
+
+#define DEF_PER1_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST1_BASE, _cg_bit, &per1clk)
+#define DEF_PER2_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST2_BASE, _cg_bit, &per2clk)
+#define DEF_PER3_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST3_BASE, _cg_bit, &per3clk)
+#define DEF_PER5_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST5_BASE, _cg_bit, &per5clk)
+#define DEF_PER6_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U5500_CLKRST6_BASE, _cg_bit, &per6clk)
+
+#define DEF_PER1_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST1_BASE, _cg_bit, _parent, &per1clk)
+#define DEF_PER2_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST2_BASE, _cg_bit, _parent, &per2clk)
+#define DEF_PER3_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST3_BASE, _cg_bit, _parent, &per3clk)
+#define DEF_PER5_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST5_BASE, _cg_bit, _parent, &per5clk)
+#define DEF_PER6_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U5500_CLKRST6_BASE, _cg_bit, _parent, &per6clk)
+
+/* Clock sources. */
+
+static struct clk soc0_pll = {
+ .name = "soc0_pll",
+ .ops = &prcmu_clk_ops,
+ .mutex = &pll_mutex,
+ .cg_sel = PRCMU_PLLSOC0,
+};
+
+static struct clk soc1_pll = {
+ .name = "soc1_pll",
+ .ops = &prcmu_clk_ops,
+ .mutex = &pll_mutex,
+ .cg_sel = PRCMU_PLLSOC1,
+};
+
+static struct clk ddr_pll = {
+ .name = "ddr_pll",
+ .ops = &prcmu_clk_ops,
+ .mutex = &pll_mutex,
+ .cg_sel = PRCMU_PLLDDR,
+};
+
+static struct clk sysclk = {
+ .name = "sysclk",
+ .ops = &sysclk_ops,
+ .rate = 26000000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk rtc32k = {
+ .name = "rtc32k",
+ .rate = 32768,
+};
+
+static struct clk kbd32k = {
+ .name = "kbd32k",
+ .rate = 32768,
+};
+
+static struct clk clk_dummy = {
+ .name = "dummy",
+};
+
+static struct clk rtc_clk1 = {
+ .name = "rtc_clk1",
+ .ops = &rtc_clk_ops,
+ .cg_sel = 1,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk clkout0 = {
+ .name = "clkout0",
+ .ops = &clkout0_ops,
+ .parent = &sysclk,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk clkout1 = {
+ .name = "clkout1",
+ .ops = &clkout1_ops,
+ .parent = &sysclk,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk clkout2 = {
+ .name = "clkout2",
+ .ops = &clkout2_ops,
+ .parent = &sysclk,
+ .mutex = &sysclk_mutex,
+};
+
+static DEFINE_MUTEX(parented_prcmu_mutex);
+
+#define DEF_PRCMU_CLK_PARENT(_name, _cg_sel, _rate, _parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ .parent = _parent, \
+ .mutex = &parented_prcmu_mutex, \
+ }
+
+static DEFINE_MUTEX(prcmu_client_mutex);
+
+#define DEF_PRCMU_CLIENT_CLK(_name, _cg_sel, _rate) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ .mutex = &prcmu_client_mutex, \
+ }
+
+static DEF_PRCMU_CLK(dmaclk, PRCMU_DMACLK, 200000000);
+static DEF_PRCMU_CLK(b2r2clk, PRCMU_B2R2CLK, 200000000);
+static DEF_PRCMU_CLK(sgaclk, PRCMU_SGACLK, 199900000);
+static DEF_PRCMU_CLK(uartclk, PRCMU_UARTCLK, 36360000);
+static DEF_PRCMU_CLK(msp02clk, PRCMU_MSP02CLK, 13000000);
+static DEF_PRCMU_CLIENT_CLK(msp1clk, PRCMU_MSP1CLK, 26000000);
+static DEF_PRCMU_CLIENT_CLK(cdclk, PRCMU_CDCLK, 26000000);
+static DEF_PRCMU_CLK(i2cclk, PRCMU_I2CCLK, 24000000);
+static DEF_PRCMU_CLK_PARENT(irdaclk, PRCMU_IRDACLK, 48000000, &soc1_pll);
+static DEF_PRCMU_CLK_PARENT(irrcclk, PRCMU_IRRCCLK, 48000000, &soc1_pll);
+static DEF_PRCMU_CLK(rngclk, PRCMU_RNGCLK, 26000000);
+static DEF_PRCMU_CLK(pwmclk, PRCMU_PWMCLK, 26000000);
+static DEF_PRCMU_CLK(sdmmcclk, PRCMU_SDMMCCLK, 50000000);
+static DEF_PRCMU_CLK(spare1clk, PRCMU_SPARE1CLK, 50000000);
+static DEF_PRCMU_CLK(per1clk, PRCMU_PER1CLK, 133330000);
+static DEF_PRCMU_CLK(per2clk, PRCMU_PER2CLK, 133330000);
+static DEF_PRCMU_CLK(per3clk, PRCMU_PER3CLK, 133330000);
+static DEF_PRCMU_CLK(per5clk, PRCMU_PER5CLK, 133330000);
+static DEF_PRCMU_CLK(per6clk, PRCMU_PER6CLK, 133330000);
+static DEF_PRCMU_CLK(hdmiclk, PRCMU_HDMICLK, 26000000);
+static DEF_PRCMU_CLK(apeatclk, PRCMU_APEATCLK, 200000000);
+static DEF_PRCMU_CLK(apetraceclk, PRCMU_APETRACECLK, 266000000);
+static DEF_PRCMU_CLK(mcdeclk, PRCMU_MCDECLK, 160000000);
+static DEF_PRCMU_CLK(tvclk, PRCMU_TVCLK, 40000000);
+static DEF_PRCMU_CLK(dsialtclk, PRCMU_DSIALTCLK, 400000000);
+static DEF_PRCMU_CLK(timclk, PRCMU_TIMCLK, 3250000);
+static DEF_PRCMU_CLK_PARENT(svaclk, PRCMU_SVACLK, 156000000, &soc1_pll);
+static DEF_PRCMU_CLK(siaclk, PRCMU_SIACLK, 133330000);
+
+/* PRCC PClocks */
+
+static DEF_PER1_PCLK(0, p1_pclk0);
+static DEF_PER1_PCLK(1, p1_pclk1);
+static DEF_PER1_PCLK(2, p1_pclk2);
+static DEF_PER1_PCLK(3, p1_pclk3);
+static DEF_PER1_PCLK(4, p1_pclk4);
+static DEF_PER1_PCLK(5, p1_pclk5);
+static DEF_PER1_PCLK(6, p1_pclk6);
+
+static DEF_PER2_PCLK(0, p2_pclk0);
+static DEF_PER2_PCLK(1, p2_pclk1);
+
+static DEF_PER3_PCLK(0, p3_pclk0);
+static DEF_PER3_PCLK(1, p3_pclk1);
+static DEF_PER3_PCLK(2, p3_pclk2);
+
+static DEF_PER5_PCLK(0, p5_pclk0);
+static DEF_PER5_PCLK(1, p5_pclk1);
+static DEF_PER5_PCLK(2, p5_pclk2);
+static DEF_PER5_PCLK(3, p5_pclk3);
+static DEF_PER5_PCLK(4, p5_pclk4);
+static DEF_PER5_PCLK(5, p5_pclk5);
+static DEF_PER5_PCLK(6, p5_pclk6);
+static DEF_PER5_PCLK(7, p5_pclk7);
+static DEF_PER5_PCLK(8, p5_pclk8);
+static DEF_PER5_PCLK(9, p5_pclk9);
+static DEF_PER5_PCLK(10, p5_pclk10);
+static DEF_PER5_PCLK(11, p5_pclk11);
+static DEF_PER5_PCLK(12, p5_pclk12);
+static DEF_PER5_PCLK(13, p5_pclk13);
+static DEF_PER5_PCLK(14, p5_pclk14);
+static DEF_PER5_PCLK(15, p5_pclk15);
+
+static DEF_PER6_PCLK(0, p6_pclk0);
+static DEF_PER6_PCLK(1, p6_pclk1);
+static DEF_PER6_PCLK(2, p6_pclk2);
+static DEF_PER6_PCLK(3, p6_pclk3);
+static DEF_PER6_PCLK(4, p6_pclk4);
+static DEF_PER6_PCLK(5, p6_pclk5);
+static DEF_PER6_PCLK(6, p6_pclk6);
+static DEF_PER6_PCLK(7, p6_pclk7);
+
+/* MSP0 */
+static DEF_PER1_KCLK(0, p1_msp0_kclk, &msp02clk);
+static DEF_PER_CLK(p1_msp0_clk, &p1_pclk0, &p1_msp0_kclk);
+
+/* SDI0 */
+static DEF_PER1_KCLK(1, p1_sdi0_kclk, &spare1clk); /* &sdmmcclk on v1 */
+static DEF_PER_CLK(p1_sdi0_clk, &p1_pclk1, &p1_sdi0_kclk);
+
+/* SDI2 */
+static DEF_PER1_KCLK(2, p1_sdi2_kclk, &sdmmcclk);
+static DEF_PER_CLK(p1_sdi2_clk, &p1_pclk2, &p1_sdi2_kclk);
+
+/* UART0 */
+static DEF_PER1_KCLK(3, p1_uart0_kclk, &uartclk);
+static DEF_PER_CLK(p1_uart0_clk, &p1_pclk3, &p1_uart0_kclk);
+
+/* I2C1 */
+static DEF_PER1_KCLK(4, p1_i2c1_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c1_clk, &p1_pclk4, &p1_i2c1_kclk);
+
+/* PWM */
+static DEF_PER3_KCLK(0, p3_pwm_kclk, &pwmclk);
+static DEF_PER_CLK(p3_pwm_clk, &p3_pclk1, &p3_pwm_kclk);
+
+/* KEYPAD */
+static DEF_PER3_KCLK(0, p3_keypad_kclk, &kbd32k);
+static DEF_PER_CLK(p3_keypad_clk, &p3_pclk0, &p3_keypad_kclk);
+
+/* MSP2 */
+static DEF_PER5_KCLK(0, p5_msp2_kclk, &msp02clk);
+static DEF_PER_CLK(p5_msp2_clk, &p5_pclk0, &p5_msp2_kclk);
+
+/* UART1 */
+static DEF_PER5_KCLK(1, p5_uart1_kclk, &uartclk);
+static DEF_PER_CLK(p5_uart1_clk, &p5_pclk1, &p5_uart1_kclk);
+
+/* UART2 */
+static DEF_PER5_KCLK(2, p5_uart2_kclk, &uartclk);
+static DEF_PER_CLK(p5_uart2_clk, &p5_pclk2, &p5_uart2_kclk);
+
+/* UART3 */
+static DEF_PER5_KCLK(3, p5_uart3_kclk, &uartclk);
+static DEF_PER_CLK(p5_uart3_clk, &p5_pclk3, &p5_uart3_kclk);
+
+/* SDI1 */
+static DEF_PER5_KCLK(4, p5_sdi1_kclk, &sdmmcclk);
+static DEF_PER_CLK(p5_sdi1_clk, &p5_pclk4, &p5_sdi1_kclk);
+
+/* SDI3 */
+static DEF_PER5_KCLK(5, p5_sdi3_kclk, &sdmmcclk);
+static DEF_PER_CLK(p5_sdi3_clk, &p5_pclk5, &p5_sdi3_kclk);
+
+/* SDI4 */
+static DEF_PER5_KCLK(6, p5_sdi4_kclk, &sdmmcclk);
+static DEF_PER_CLK(p5_sdi4_clk, &p5_pclk6, &p5_sdi4_kclk);
+
+/* I2C2 */
+static DEF_PER5_KCLK(7, p5_i2c2_kclk, &i2cclk);
+static DEF_PER_CLK(p5_i2c2_clk, &p5_pclk7, &p5_i2c2_kclk);
+
+/* I2C3 */
+static DEF_PER5_KCLK(8, p5_i2c3_kclk, &i2cclk);
+static DEF_PER_CLK(p5_i2c3_clk, &p5_pclk8, &p5_i2c3_kclk);
+
+/* IRRC */
+static DEF_PER5_KCLK(9, p5_irrc_kclk, &irrcclk);
+static DEF_PER_CLK(p5_irrc_clk, &p5_pclk9, &p5_irrc_kclk);
+
+/* IRDA */
+static DEF_PER5_KCLK(10, p5_irda_kclk, &irdaclk);
+static DEF_PER_CLK(p5_irda_clk, &p5_pclk10, &p5_irda_kclk);
+
+/* RNG */
+static DEF_PER6_KCLK(0, p6_rng_kclk, &rngclk);
+static DEF_PER_CLK(p6_rng_clk, &p6_pclk0, &p6_rng_kclk);
+
+/* MTU:S */
+
+/* MTU0 */
+static DEF_PER_CLK(p6_mtu0_clk, &p6_pclk6, &timclk);
+
+/* MTU1 */
+static DEF_PER_CLK(p6_mtu1_clk, &p6_pclk7, &timclk);
+
+static struct clk *db5500_dbg_clks[] __initdata = {
+ /* Clock sources */
+ &soc0_pll,
+ &soc1_pll,
+ &ddr_pll,
+ &sysclk,
+ &rtc32k,
+
+ /* PRCMU clocks */
+ &sgaclk,
+ &siaclk,
+ &svaclk,
+ &uartclk,
+ &msp02clk,
+ &msp1clk,
+ &cdclk,
+ &i2cclk,
+ &irdaclk,
+ &irrcclk,
+ &sdmmcclk,
+ &spare1clk,
+ &per1clk,
+ &per2clk,
+ &per3clk,
+ &per5clk,
+ &per6clk,
+ &hdmiclk,
+ &apeatclk,
+ &apetraceclk,
+ &mcdeclk,
+ &dsialtclk,
+ &dmaclk,
+ &b2r2clk,
+ &tvclk,
+ &rngclk,
+ &pwmclk,
+
+ /* PRCC clocks */
+ &p1_pclk0,
+ &p1_pclk1,
+ &p1_pclk2,
+ &p1_pclk3,
+ &p1_pclk4,
+ &p1_pclk5,
+ &p1_pclk6,
+
+ &p2_pclk0,
+ &p2_pclk1,
+
+ &p3_pclk0,
+ &p3_pclk1,
+ &p3_pclk2,
+
+ &p5_pclk0,
+ &p5_pclk1,
+ &p5_pclk2,
+ &p5_pclk3,
+ &p5_pclk4,
+ &p5_pclk5,
+ &p5_pclk6,
+ &p5_pclk7,
+ &p5_pclk8,
+ &p5_pclk9,
+ &p5_pclk10,
+ &p5_pclk11,
+ &p5_pclk12,
+ &p5_pclk13,
+ &p5_pclk14,
+ &p5_pclk15,
+
+ &p6_pclk0,
+ &p6_pclk1,
+ &p6_pclk2,
+ &p6_pclk3,
+ &p6_pclk4,
+ &p6_pclk5,
+ &p6_pclk6,
+ &p6_pclk7,
+
+ /* Clock sources */
+ &clkout0,
+ &clkout1,
+ &clkout2,
+ &rtc_clk1,
+};
+
+static struct clk_lookup u8500_common_clock_sources[] = {
+ CLK_LOOKUP(soc0_pll, NULL, "soc0_pll"),
+ CLK_LOOKUP(soc1_pll, NULL, "soc1_pll"),
+ CLK_LOOKUP(ddr_pll, NULL, "ddr_pll"),
+ CLK_LOOKUP(sysclk, NULL, "sysclk"),
+ CLK_LOOKUP(rtc32k, NULL, "clk32k"),
+};
+
+static struct clk_lookup db5500_prcmu_clocks[] = {
+ CLK_LOOKUP(sgaclk, "mali", NULL),
+ CLK_LOOKUP(siaclk, "mmio_camera", "sia"),
+ CLK_LOOKUP(svaclk, "hva", NULL),
+ CLK_LOOKUP(uartclk, "UART", NULL),
+ CLK_LOOKUP(msp02clk, "MSP02", NULL),
+ CLK_LOOKUP(msp1clk, "ux500-msp-i2s.1", NULL),
+ CLK_LOOKUP(cdclk, "cable_detect.0", NULL),
+ CLK_LOOKUP(i2cclk, "I2C", NULL),
+ CLK_LOOKUP(sdmmcclk, "sdmmc", NULL),
+ CLK_LOOKUP(per1clk, "PERIPH1", NULL),
+ CLK_LOOKUP(per2clk, "PERIPH2", NULL),
+ CLK_LOOKUP(per3clk, "PERIPH3", NULL),
+ CLK_LOOKUP(per5clk, "PERIPH5", NULL),
+ CLK_LOOKUP(per6clk, "PERIPH6", NULL),
+ CLK_LOOKUP(hdmiclk, "mcde", "hdmi"),
+ CLK_LOOKUP(apeatclk, "apeat", NULL),
+ CLK_LOOKUP(apetraceclk, "apetrace", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", "mcde"),
+ CLK_LOOKUP(dmaclk, "dma40.0", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2_bus", NULL),
+ CLK_LOOKUP(b2r2clk, "U8500-B2R2.0", NULL),
+ CLK_LOOKUP(tvclk, "tv", NULL),
+ CLK_LOOKUP(tvclk, "mcde", "tv"),
+};
+
+static struct clk_lookup db5500_prcc_clocks[] = {
+ CLK_LOOKUP(p1_msp0_clk, "ux500-msp-i2s.0", NULL),
+ CLK_LOOKUP(p1_sdi0_clk, "sdi0", NULL),
+ CLK_LOOKUP(p1_sdi2_clk, "sdi2", NULL),
+ CLK_LOOKUP(p1_uart0_clk, "uart0", NULL),
+ CLK_LOOKUP(p1_i2c1_clk, "nmk-i2c.1", NULL),
+ CLK_LOOKUP(p1_pclk5, "gpio.0", NULL),
+ CLK_LOOKUP(p1_pclk5, "gpio.1", NULL),
+ CLK_LOOKUP(p1_pclk6, "fsmc", NULL),
+
+ CLK_LOOKUP(p2_pclk0, "musb-ux500.0", "usb"),
+ CLK_LOOKUP(p2_pclk1, "gpio.2", NULL),
+
+ CLK_LOOKUP(p3_keypad_clk, "db5500-keypad", NULL),
+ CLK_LOOKUP(p3_pwm_clk, "pwm", NULL),
+ CLK_LOOKUP(p3_pclk2, "gpio.4", NULL),
+
+ CLK_LOOKUP(p5_msp2_clk, "ux500-msp-i2s.2", NULL),
+ CLK_LOOKUP(p5_uart1_clk, "uart1", NULL),
+ CLK_LOOKUP(p5_uart2_clk, "uart2", NULL),
+ CLK_LOOKUP(p5_uart3_clk, "uart3", NULL),
+ CLK_LOOKUP(p5_sdi1_clk, "sdi1", NULL),
+ CLK_LOOKUP(p5_sdi3_clk, "sdi3", NULL),
+ CLK_LOOKUP(p5_sdi4_clk, "sdi4", NULL),
+ CLK_LOOKUP(p5_i2c2_clk, "nmk-i2c.2", NULL),
+ CLK_LOOKUP(p5_i2c3_clk, "nmk-i2c.3", NULL),
+ CLK_LOOKUP(p5_irrc_clk, "irrc", NULL),
+ CLK_LOOKUP(p5_irda_clk, "irda", NULL),
+ CLK_LOOKUP(p5_pclk11, "spi0", NULL),
+ CLK_LOOKUP(p5_pclk12, "spi1", NULL),
+ CLK_LOOKUP(p5_pclk13, "spi2", NULL),
+ CLK_LOOKUP(p5_pclk14, "spi3", NULL),
+ CLK_LOOKUP(p5_pclk15, "gpio.5", NULL),
+ CLK_LOOKUP(p5_pclk15, "gpio.6", NULL),
+ CLK_LOOKUP(p5_pclk15, "gpio.7", NULL),
+
+ CLK_LOOKUP(p6_rng_clk, "rng", NULL),
+ CLK_LOOKUP(p6_pclk1, "cryp0", NULL),
+ CLK_LOOKUP(p6_pclk2, "hash0", NULL),
+ CLK_LOOKUP(p6_pclk3, "pka", NULL),
+ CLK_LOOKUP(p6_pclk4, "hash1", NULL),
+ CLK_LOOKUP(p6_pclk1, "cryp1", NULL),
+ CLK_LOOKUP(p6_pclk5, "cfgreg", NULL),
+ CLK_LOOKUP(p6_mtu0_clk, "mtu0", NULL),
+ CLK_LOOKUP(p6_mtu1_clk, "mtu1", NULL),
+
+ /*
+ * Dummy clock sets up the GPIOs.
+ */
+ CLK_LOOKUP(clk_dummy, "gpio.3", NULL),
+};
+
+static struct clk_lookup db5500_clkouts[] = {
+ CLK_LOOKUP(clkout1, "mmio_camera", "primary-cam"),
+ CLK_LOOKUP(clkout1, "mmio_camera", "secondary-cam"),
+ CLK_LOOKUP(clkout2, "ab5500-usb.0", "sysclk"),
+ CLK_LOOKUP(clkout2, "ab5500-codec.0", "sysclk"),
+};
+
+static struct clk_lookup u5500_clocks[] = {
+ CLK_LOOKUP(rtc_clk1, "cg2900-uart.0", "lpoclk"),
+};
+
+static const char *db5500_boot_clk[] __initdata = {
+ "spi0",
+ "spi1",
+ "spi2",
+ "spi3",
+ "uart0",
+ "uart1",
+ "uart2",
+ "uart3",
+ "sdi0",
+ "sdi1",
+ "sdi2",
+ "sdi3",
+ "sdi4",
+};
+
+static struct clk *boot_clks[ARRAY_SIZE(db5500_boot_clk)] __initdata;
+
+static int __init db5500_boot_clk_disable(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(db5500_boot_clk); i++) {
+ clk_disable(boot_clks[i]);
+ clk_put(boot_clks[i]);
+ }
+
+ return 0;
+}
+late_initcall_sync(db5500_boot_clk_disable);
+
+static void __init db5500_boot_clk_enable(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(db5500_boot_clk); i++) {
+ boot_clks[i] = clk_get_sys(db5500_boot_clk[i], NULL);
+ BUG_ON(IS_ERR(boot_clks[i]));
+ clk_enable(boot_clks[i]);
+ }
+}
+
+static void configure_clkouts(void)
+{
+ /* div parameter does not matter for sel0 REF_CLK */
+ WARN_ON(prcmu_config_clkout(DB5500_CLKOUT0,
+ DB5500_CLKOUT_REF_CLK_SEL0, 0));
+ WARN_ON(prcmu_config_clkout(DB5500_CLKOUT1,
+ DB5500_CLKOUT_REF_CLK_SEL0, 0));
+}
+
+static struct clk *db5500_clks_tobe_disabled[] __initdata = {
+ &siaclk,
+ &sgaclk,
+ &sdmmcclk,
+ &p1_pclk0,
+ &p1_pclk6,
+ &p3_keypad_clk,
+ &p3_pclk1,
+ &p5_pclk0,
+ &p5_pclk11,
+ &p5_pclk12,
+ &p5_pclk13,
+ &p5_pclk14,
+ &p6_pclk4,
+ &p6_pclk5,
+ &p6_pclk7,
+ &p5_uart1_clk,
+ &p5_uart2_clk,
+ &p5_uart3_clk,
+ &p5_sdi1_clk,
+ &p5_sdi3_clk,
+ &p5_sdi4_clk,
+ &p5_i2c3_clk,
+ &pwmclk,
+ &svaclk,
+ &clkout2,
+};
+
+static int __init init_clock_states(void)
+{
+ int i = 0;
+ /*
+ * The following clks are shared with secure world.
+ * Currently this leads to a limitation where we need to
+ * enable them at all times.
+ */
+ clk_enable(&p6_pclk1);
+ clk_enable(&p6_pclk2);
+ clk_enable(&p6_pclk3);
+ clk_enable(&p6_rng_clk);
+
+ /*
+ * Disable clocks that are on at boot, but should be off.
+ */
+ for (i = 0; i < ARRAY_SIZE(db5500_clks_tobe_disabled); i++) {
+ if (!clk_enable(db5500_clks_tobe_disabled[i]))
+ clk_disable(db5500_clks_tobe_disabled[i]);
+ }
+ return 0;
+}
+late_initcall(init_clock_states);
+
+int __init db5500_clk_init(void)
+{
+ if (ux500_is_svp()) {
+ prcmu_clk_ops.enable = NULL;
+ prcmu_clk_ops.disable = NULL;
+ prcc_pclk_ops.enable = NULL;
+ prcc_pclk_ops.disable = NULL;
+ prcc_kclk_ops.enable = NULL;
+ prcc_kclk_ops.disable = NULL;
+ }
+ prcmu_clk_ops.get_rate = NULL;
+
+ clkdev_add_table(u8500_common_clock_sources,
+ ARRAY_SIZE(u8500_common_clock_sources));
+
+ clkdev_add_table(db5500_prcmu_clocks, ARRAY_SIZE(db5500_prcmu_clocks));
+ clkdev_add_table(db5500_prcc_clocks, ARRAY_SIZE(db5500_prcc_clocks));
+ clkdev_add_table(db5500_clkouts, ARRAY_SIZE(db5500_clkouts));
+ clkdev_add_table(u5500_clocks, ARRAY_SIZE(u5500_clocks));
+
+ db5500_boot_clk_enable();
+
+ /*
+ * The following clks are shared with secure world.
+ * Currently this leads to a limitation where we need to
+ * enable them at all times.
+ */
+ clk_enable(&p6_pclk1);
+ clk_enable(&p6_pclk2);
+ clk_enable(&p6_pclk3);
+ clk_enable(&p6_rng_clk);
+
+ configure_clkouts();
+
+ return 0;
+}
+
+int __init db5500_clk_debug_init(void)
+{
+ return dbx500_clk_debug_init(db5500_dbg_clks,
+ ARRAY_SIZE(db5500_dbg_clks));
+}
diff --git a/arch/arm/mach-ux500/clock-db8500.c b/arch/arm/mach-ux500/clock-db8500.c
new file mode 100644
index 00000000000..d157478cb1b
--- /dev/null
+++ b/arch/arm/mach-ux500/clock-db8500.c
@@ -0,0 +1,1162 @@
+/*
+ * Copyright (C) 2009-2011 ST-Ericsson SA
+ * Copyright (C) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/consumer.h>
+
+#include <plat/pincfg.h>
+
+#include <mach/hardware.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "clock.h"
+#include "pins-db8500.h"
+#include "product.h"
+#include "prcc.h"
+
+static DEFINE_MUTEX(soc0_pll_mutex);
+static DEFINE_MUTEX(soc1_pll_mutex);
+static DEFINE_MUTEX(sysclk_mutex);
+static DEFINE_MUTEX(ab_ulpclk_mutex);
+static DEFINE_MUTEX(ab_intclk_mutex);
+static DEFINE_MUTEX(clkout0_mutex);
+static DEFINE_MUTEX(dsi_pll_mutex);
+
+static struct delayed_work sysclk_disable_work;
+
+/* PLL operations. */
+
+static unsigned long pll_get_rate(struct clk *clk)
+{
+ return prcmu_clock_rate(clk->cg_sel);
+}
+
+static struct clkops pll_ops = {
+ .get_rate = pll_get_rate,
+};
+
+/* SysClk operations. */
+
+static int request_sysclk(bool enable)
+{
+ static int requests;
+
+ if ((enable && (requests++ == 0)) || (!enable && (--requests == 0)))
+ return prcmu_request_clock(PRCMU_SYSCLK, enable);
+ return 0;
+}
+
+static int sysclk_enable(struct clk *clk)
+{
+ static bool swat_enable;
+ int r;
+
+ if (!swat_enable) {
+ r = ab8500_sysctrl_set(AB8500_SWATCTRL,
+ AB8500_SWATCTRL_SWATENABLE);
+ if (r)
+ return r;
+
+ swat_enable = true;
+ }
+
+ r = request_sysclk(true);
+ if (r)
+ return r;
+
+ if (clk->cg_sel) {
+ r = ab8500_sysctrl_set(AB8500_SYSULPCLKCTRL1, (u8)clk->cg_sel);
+ if (r)
+ (void)request_sysclk(false);
+ }
+ return r;
+}
+
+static void sysclk_disable(struct clk *clk)
+{
+ int r;
+
+ if (clk->cg_sel) {
+ r = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ (u8)clk->cg_sel);
+ if (r)
+ goto disable_failed;
+ }
+ r = request_sysclk(false);
+ if (r)
+ goto disable_failed;
+ return;
+
+disable_failed:
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static struct clkops sysclk_ops = {
+ .enable = sysclk_enable,
+ .disable = sysclk_disable,
+};
+
+/* AB8500 UlpClk operations */
+
+static int ab_ulpclk_enable(struct clk *clk)
+{
+ int err;
+
+ if (clk->regulator == NULL) {
+ struct regulator *reg;
+
+ reg = regulator_get(NULL, "v-intcore");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ clk->regulator = reg;
+ }
+ err = regulator_set_optimum_mode(clk->regulator, 1500);
+ if (unlikely(err < 0))
+ goto regulator_enable_error;
+ err = regulator_enable(clk->regulator);
+ if (unlikely(err))
+ goto regulator_enable_error;
+ err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCONF,
+ AB8500_SYSULPCLKCONF_ULPCLKCONF_MASK);
+ if (unlikely(err))
+ goto enable_error;
+ err = ab8500_sysctrl_set(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_ULPCLKREQ);
+ if (unlikely(err))
+ goto enable_error;
+ /* Unknown/undocumented PLL locking time => wait 1 ms. */
+ mdelay(1);
+ return 0;
+
+enable_error:
+ (void)regulator_disable(clk->regulator);
+regulator_enable_error:
+ return err;
+}
+
+static void ab_ulpclk_disable(struct clk *clk)
+{
+ int err;
+
+ err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_ULPCLKREQ);
+ if (unlikely(regulator_disable(clk->regulator) || err))
+ goto out_err;
+
+ regulator_set_optimum_mode(clk->regulator, 0);
+
+ return;
+
+out_err:
+ pr_err("clock: %s failed to disable %s.\n", __func__, clk->name);
+}
+
+static struct clkops ab_ulpclk_ops = {
+ .enable = ab_ulpclk_enable,
+ .disable = ab_ulpclk_disable,
+};
+
+/* AB8500 intclk operations */
+
+enum ab_intclk_parent {
+ AB_INTCLK_PARENT_SYSCLK,
+ AB_INTCLK_PARENT_ULPCLK,
+ AB_INTCLK_PARENTS_END,
+ NUM_AB_INTCLK_PARENTS
+};
+
+static int ab_intclk_enable(struct clk *clk)
+{
+ if (clk->parent == clk->parents[AB_INTCLK_PARENT_ULPCLK]) {
+ return ab8500_sysctrl_write(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK,
+ (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT));
+ }
+ return 0;
+}
+
+static void ab_intclk_disable(struct clk *clk)
+{
+ if (clk->parent == clk->parents[AB_INTCLK_PARENT_SYSCLK])
+ return;
+
+ if (ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK)) {
+ pr_err("clock: %s failed to disable %s.\n", __func__,
+ clk->name);
+ }
+}
+
+static int ab_intclk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int err;
+
+ if (!clk->enabled)
+ return 0;
+
+ err = __clk_enable(parent, clk->mutex);
+
+ if (unlikely(err))
+ goto parent_enable_error;
+
+ if (parent == clk->parents[AB_INTCLK_PARENT_ULPCLK]) {
+ err = ab8500_sysctrl_write(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK,
+ (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT));
+ } else {
+ err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK);
+ }
+ if (unlikely(err))
+ goto config_error;
+
+ __clk_disable(clk->parent, clk->mutex);
+
+ return 0;
+
+config_error:
+ __clk_disable(parent, clk->mutex);
+parent_enable_error:
+ return err;
+}
+
+static struct clkops ab_intclk_ops = {
+ .enable = ab_intclk_enable,
+ .disable = ab_intclk_disable,
+ .set_parent = ab_intclk_set_parent,
+};
+
+/* AB8500 audio clock operations */
+
+static int audioclk_enable(struct clk *clk)
+{
+ return ab8500_sysctrl_set(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_AUDIOCLKENA);
+}
+
+static void audioclk_disable(struct clk *clk)
+{
+ if (ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ AB8500_SYSULPCLKCTRL1_AUDIOCLKENA)) {
+ pr_err("clock: %s failed to disable %s.\n", __func__,
+ clk->name);
+ }
+}
+
+static struct clkops audioclk_ops = {
+ .enable = audioclk_enable,
+ .disable = audioclk_disable,
+};
+
+/* Primary camera clock operations */
+static int clkout0_enable(struct clk *clk)
+{
+ int r;
+
+ if (clk->regulator == NULL) {
+ struct regulator *reg;
+
+ reg = regulator_get(NULL, "v-ape");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ clk->regulator = reg;
+ }
+ r = regulator_enable(clk->regulator);
+ if (r)
+ goto regulator_failed;
+ r = prcmu_config_clkout(0, PRCMU_CLKSRC_CLK38M, 4);
+ if (r)
+ goto config_failed;
+ r = nmk_config_pin(GPIO227_CLKOUT1, false);
+ if (r)
+ goto gpio_failed;
+ return r;
+
+gpio_failed:
+ (void)prcmu_config_clkout(0, PRCMU_CLKSRC_CLK38M, 0);
+config_failed:
+ (void)regulator_disable(clk->regulator);
+regulator_failed:
+ return r;
+}
+
+static void clkout0_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pin((GPIO227_GPIO | PIN_OUTPUT_LOW), false);
+ if (r)
+ goto disable_failed;
+ (void)prcmu_config_clkout(0, PRCMU_CLKSRC_CLK38M, 0);
+ (void)regulator_disable(clk->regulator);
+ return;
+
+disable_failed:
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+/* Touch screen/secondary camera clock operations. */
+static int clkout1_enable(struct clk *clk)
+{
+ int r;
+
+ if (clk->regulator == NULL) {
+ struct regulator *reg;
+
+ reg = regulator_get(NULL, "v-ape");
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+ clk->regulator = reg;
+ }
+ r = regulator_enable(clk->regulator);
+ if (r)
+ goto regulator_failed;
+ r = prcmu_config_clkout(1, PRCMU_CLKSRC_SYSCLK, 4);
+ if (r)
+ goto config_failed;
+ r = nmk_config_pin(GPIO228_CLKOUT2, false);
+ if (r)
+ goto gpio_failed;
+ return r;
+
+gpio_failed:
+ (void)prcmu_config_clkout(1, PRCMU_CLKSRC_SYSCLK, 0);
+config_failed:
+ (void)regulator_disable(clk->regulator);
+regulator_failed:
+ return r;
+}
+
+static void clkout1_disable(struct clk *clk)
+{
+ int r;
+
+ r = nmk_config_pin((GPIO228_GPIO | PIN_OUTPUT_LOW), false);
+ if (r)
+ goto disable_failed;
+ (void)prcmu_config_clkout(1, PRCMU_CLKSRC_SYSCLK, 0);
+ (void)regulator_disable(clk->regulator);
+ return;
+
+disable_failed:
+ pr_err("clock: failed to disable %s.\n", clk->name);
+}
+
+static struct clkops clkout0_ops = {
+ .enable = clkout0_enable,
+ .disable = clkout0_disable,
+};
+
+static struct clkops clkout1_ops = {
+ .enable = clkout1_enable,
+ .disable = clkout1_disable,
+};
+
+#define DEF_PER1_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST1_BASE, _cg_bit, &per1clk)
+#define DEF_PER2_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST2_BASE, _cg_bit, &per2clk)
+#define DEF_PER3_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST3_BASE, _cg_bit, &per3clk)
+#define DEF_PER5_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST5_BASE, _cg_bit, &per5clk)
+#define DEF_PER6_PCLK(_cg_bit, _name) \
+ DEF_PRCC_PCLK(_name, U8500_CLKRST6_BASE, _cg_bit, &per6clk)
+
+#define DEF_PER1_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST1_BASE, _cg_bit, _parent, &per1clk)
+#define DEF_PER2_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST2_BASE, _cg_bit, _parent, &per2clk)
+#define DEF_PER3_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST3_BASE, _cg_bit, _parent, &per3clk)
+#define DEF_PER5_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST5_BASE, _cg_bit, _parent, &per5clk)
+#define DEF_PER6_KCLK(_cg_bit, _name, _parent) \
+ DEF_PRCC_KCLK(_name, U8500_CLKRST6_BASE, _cg_bit, _parent, &per6clk)
+
+/* Clock sources. */
+
+static struct clk soc0_pll = {
+ .name = "soc0_pll",
+ .ops = &prcmu_clk_ops,
+ .cg_sel = PRCMU_PLLSOC0,
+ .mutex = &soc0_pll_mutex,
+};
+
+static struct clk soc1_pll = {
+ .name = "soc1_pll",
+ .ops = &prcmu_clk_ops,
+ .cg_sel = PRCMU_PLLSOC1,
+ .mutex = &soc1_pll_mutex,
+};
+
+static struct clk ddr_pll = {
+ .name = "ddr_pll",
+ .ops = &pll_ops,
+ .cg_sel = PRCMU_PLLDDR,
+};
+
+static struct clk ulp38m4 = {
+ .name = "ulp38m4",
+ .rate = 38400000,
+};
+
+static struct clk sysclk = {
+ .name = "sysclk",
+ .ops = &sysclk_ops,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk sysclk2 = {
+ .name = "sysclk2",
+ .ops = &sysclk_ops,
+ .cg_sel = AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk sysclk3 = {
+ .name = "sysclk3",
+ .ops = &sysclk_ops,
+ .cg_sel = AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk sysclk4 = {
+ .name = "sysclk4",
+ .ops = &sysclk_ops,
+ .cg_sel = AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ,
+ .rate = 38400000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk rtc32k = {
+ .name = "rtc32k",
+ .rate = 32768,
+};
+
+static struct clk clkout0 = {
+ .name = "clkout0",
+ .ops = &clkout0_ops,
+ .parent = &ulp38m4,
+ .rate = 9600000,
+ .mutex = &clkout0_mutex,
+};
+
+static struct clk clkout1 = {
+ .name = "clkout1",
+ .ops = &clkout1_ops,
+ .parent = &sysclk,
+ .rate = 9600000,
+ .mutex = &sysclk_mutex,
+};
+
+static struct clk ab_ulpclk = {
+ .name = "ab_ulpclk",
+ .ops = &ab_ulpclk_ops,
+ .rate = 38400000,
+ .mutex = &ab_ulpclk_mutex,
+};
+
+static struct clk *ab_intclk_parents[NUM_AB_INTCLK_PARENTS] = {
+ [AB_INTCLK_PARENT_SYSCLK] = &sysclk,
+ [AB_INTCLK_PARENT_ULPCLK] = &ab_ulpclk,
+ [AB_INTCLK_PARENTS_END] = NULL,
+};
+
+static struct clk ab_intclk = {
+ .name = "ab_intclk",
+ .ops = &ab_intclk_ops,
+ .mutex = &ab_intclk_mutex,
+ .parent = &sysclk,
+ .parents = ab_intclk_parents,
+};
+
+static struct clk audioclk = {
+ .name = "audioclk",
+ .ops = &audioclk_ops,
+ .mutex = &ab_intclk_mutex,
+ .parent = &ab_intclk,
+};
+
+static DEF_PRCMU_CLK(sgaclk, PRCMU_SGACLK, 320000000);
+static DEF_PRCMU_CLK(uartclk, PRCMU_UARTCLK, 38400000);
+static DEF_PRCMU_CLK(msp02clk, PRCMU_MSP02CLK, 19200000);
+static DEF_PRCMU_CLK(msp1clk, PRCMU_MSP1CLK, 19200000);
+static DEF_PRCMU_CLK(i2cclk, PRCMU_I2CCLK, 24000000);
+static DEF_PRCMU_CLK(slimclk, PRCMU_SLIMCLK, 19200000);
+static DEF_PRCMU_CLK(per1clk, PRCMU_PER1CLK, 133330000);
+static DEF_PRCMU_CLK(per2clk, PRCMU_PER2CLK, 133330000);
+static DEF_PRCMU_CLK(per3clk, PRCMU_PER3CLK, 133330000);
+static DEF_PRCMU_CLK(per5clk, PRCMU_PER5CLK, 133330000);
+static DEF_PRCMU_CLK(per6clk, PRCMU_PER6CLK, 133330000);
+static DEF_PRCMU_CLK(per7clk, PRCMU_PER7CLK, 100000000);
+static DEF_PRCMU_SCALABLE_CLK(lcdclk, PRCMU_LCDCLK);
+static DEF_PRCMU_OPP100_CLK(bmlclk, PRCMU_BMLCLK, 200000000);
+static DEF_PRCMU_SCALABLE_CLK(hsitxclk, PRCMU_HSITXCLK);
+static DEF_PRCMU_SCALABLE_CLK(hsirxclk, PRCMU_HSIRXCLK);
+static DEF_PRCMU_SCALABLE_CLK(hdmiclk, PRCMU_HDMICLK);
+static DEF_PRCMU_CLK(apeatclk, PRCMU_APEATCLK, 160000000);
+static DEF_PRCMU_CLK(apetraceclk, PRCMU_APETRACECLK, 160000000);
+static DEF_PRCMU_CLK(mcdeclk, PRCMU_MCDECLK, 160000000);
+static DEF_PRCMU_OPP100_CLK(ipi2cclk, PRCMU_IPI2CCLK, 24000000);
+static DEF_PRCMU_CLK(dsialtclk, PRCMU_DSIALTCLK, 384000000);
+static DEF_PRCMU_CLK(dmaclk, PRCMU_DMACLK, 200000000);
+static DEF_PRCMU_CLK(b2r2clk, PRCMU_B2R2CLK, 200000000);
+static DEF_PRCMU_SCALABLE_CLK(tvclk, PRCMU_TVCLK);
+/* TODO: For SSPCLK, the spec says 24MHz, while the old driver says 48MHz. */
+static DEF_PRCMU_CLK(sspclk, PRCMU_SSPCLK, 24000000);
+static DEF_PRCMU_CLK(rngclk, PRCMU_RNGCLK, 19200000);
+static DEF_PRCMU_CLK(uiccclk, PRCMU_UICCCLK, 48000000);
+static DEF_PRCMU_CLK(timclk, PRCMU_TIMCLK, 2400000);
+static DEF_PRCMU_CLK(sdmmcclk, PRCMU_SDMMCCLK, 50000000);
+
+static struct clk dsi_pll = {
+ .name = "dsi_pll",
+ .ops = &prcmu_scalable_clk_ops,
+ .cg_sel = PRCMU_PLLDSI,
+ .parent = &hdmiclk,
+ .mutex = &dsi_pll_mutex,
+};
+
+static struct clk dsi0clk = {
+ .name = "dsi0clk",
+ .ops = &prcmu_scalable_clk_ops,
+ .cg_sel = PRCMU_DSI0CLK,
+ .parent = &dsi_pll,
+ .mutex = &dsi_pll_mutex,
+};
+
+static struct clk dsi1clk = {
+ .name = "dsi1clk",
+ .ops = &prcmu_scalable_clk_ops,
+ .cg_sel = PRCMU_DSI1CLK,
+ .parent = &dsi_pll,
+ .mutex = &dsi_pll_mutex,
+};
+
+static struct clk dsi0escclk = {
+ .name = "dsi0escclk",
+ .ops = &prcmu_scalable_clk_ops,
+ .cg_sel = PRCMU_DSI0ESCCLK,
+ .parent = &tvclk,
+};
+
+static struct clk dsi1escclk = {
+ .name = "dsi1escclk",
+ .ops = &prcmu_scalable_clk_ops,
+ .cg_sel = PRCMU_DSI1ESCCLK,
+ .parent = &tvclk,
+};
+
+static struct clk dsi2escclk = {
+ .name = "dsi2escclk",
+ .ops = &prcmu_scalable_clk_ops,
+ .cg_sel = PRCMU_DSI2ESCCLK,
+ .parent = &tvclk,
+};
+
+/* PRCC PClocks */
+
+static DEF_PER1_PCLK(0, p1_pclk0);
+static DEF_PER1_PCLK(1, p1_pclk1);
+static DEF_PER1_PCLK(2, p1_pclk2);
+static DEF_PER1_PCLK(3, p1_pclk3);
+static DEF_PER1_PCLK(4, p1_pclk4);
+static DEF_PER1_PCLK(5, p1_pclk5);
+static DEF_PER1_PCLK(6, p1_pclk6);
+static DEF_PER1_PCLK(7, p1_pclk7);
+static DEF_PER1_PCLK(8, p1_pclk8);
+static DEF_PER1_PCLK(9, p1_pclk9);
+static DEF_PER1_PCLK(10, p1_pclk10);
+static DEF_PER1_PCLK(11, p1_pclk11);
+
+static DEF_PER2_PCLK(0, p2_pclk0);
+static DEF_PER2_PCLK(1, p2_pclk1);
+static DEF_PER2_PCLK(2, p2_pclk2);
+static DEF_PER2_PCLK(3, p2_pclk3);
+static DEF_PER2_PCLK(4, p2_pclk4);
+static DEF_PER2_PCLK(5, p2_pclk5);
+static DEF_PER2_PCLK(6, p2_pclk6);
+static DEF_PER2_PCLK(7, p2_pclk7);
+static DEF_PER2_PCLK(8, p2_pclk8);
+static DEF_PER2_PCLK(9, p2_pclk9);
+static DEF_PER2_PCLK(10, p2_pclk10);
+static DEF_PER2_PCLK(11, p2_pclk11);
+
+static DEF_PER3_PCLK(0, p3_pclk0);
+static DEF_PER3_PCLK(1, p3_pclk1);
+static DEF_PER3_PCLK(2, p3_pclk2);
+static DEF_PER3_PCLK(3, p3_pclk3);
+static DEF_PER3_PCLK(4, p3_pclk4);
+static DEF_PER3_PCLK(5, p3_pclk5);
+static DEF_PER3_PCLK(6, p3_pclk6);
+static DEF_PER3_PCLK(7, p3_pclk7);
+static DEF_PER3_PCLK(8, p3_pclk8);
+
+static DEF_PER5_PCLK(0, p5_pclk0);
+static DEF_PER5_PCLK(1, p5_pclk1);
+
+static DEF_PER6_PCLK(0, p6_pclk0);
+static DEF_PER6_PCLK(1, p6_pclk1);
+static DEF_PER6_PCLK(2, p6_pclk2);
+static DEF_PER6_PCLK(3, p6_pclk3);
+static DEF_PER6_PCLK(4, p6_pclk4);
+static DEF_PER6_PCLK(5, p6_pclk5);
+static DEF_PER6_PCLK(6, p6_pclk6);
+static DEF_PER6_PCLK(7, p6_pclk7);
+
+/* UART0 */
+static DEF_PER1_KCLK(0, p1_uart0_kclk, &uartclk);
+static DEF_PER_CLK(p1_uart0_clk, &p1_pclk0, &p1_uart0_kclk);
+
+/* UART1 */
+static DEF_PER1_KCLK(1, p1_uart1_kclk, &uartclk);
+static DEF_PER_CLK(p1_uart1_clk, &p1_pclk1, &p1_uart1_kclk);
+
+/* I2C1 */
+static DEF_PER1_KCLK(2, p1_i2c1_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c1_clk, &p1_pclk2, &p1_i2c1_kclk);
+
+/* MSP0 */
+static DEF_PER1_KCLK(3, p1_msp0_kclk, &msp02clk);
+static DEF_PER_CLK(p1_msp0_clk, &p1_pclk3, &p1_msp0_kclk);
+
+/* MSP1 */
+static DEF_PER1_KCLK(4, p1_msp1_kclk, &msp1clk);
+static DEF_PER_CLK(p1_msp1_clk, &p1_pclk4, &p1_msp1_kclk);
+
+/* SDI0 */
+static DEF_PER1_KCLK(5, p1_sdi0_kclk, &sdmmcclk);
+static DEF_PER_CLK(p1_sdi0_clk, &p1_pclk5, &p1_sdi0_kclk);
+
+/* I2C2 */
+static DEF_PER1_KCLK(6, p1_i2c2_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c2_clk, &p1_pclk6, &p1_i2c2_kclk);
+
+/* SLIMBUS0 */
+static DEF_PER1_KCLK(3, p1_slimbus0_kclk, &slimclk);
+static DEF_PER_CLK(p1_slimbus0_clk, &p1_pclk8, &p1_slimbus0_kclk);
+
+/* I2C4 */
+static DEF_PER1_KCLK(9, p1_i2c4_kclk, &i2cclk);
+static DEF_PER_CLK(p1_i2c4_clk, &p1_pclk10, &p1_i2c4_kclk);
+
+/* MSP3 */
+static DEF_PER1_KCLK(10, p1_msp3_kclk, &msp1clk);
+static DEF_PER_CLK(p1_msp3_clk, &p1_pclk11, &p1_msp3_kclk);
+
+/* I2C3 */
+static DEF_PER2_KCLK(0, p2_i2c3_kclk, &i2cclk);
+static DEF_PER_CLK(p2_i2c3_clk, &p2_pclk0, &p2_i2c3_kclk);
+
+/* SDI4 */
+static DEF_PER2_KCLK(2, p2_sdi4_kclk, &sdmmcclk);
+static DEF_PER_CLK(p2_sdi4_clk, &p2_pclk4, &p2_sdi4_kclk);
+
+/* MSP2 */
+static DEF_PER2_KCLK(3, p2_msp2_kclk, &msp02clk);
+static DEF_PER_CLK(p2_msp2_clk, &p2_pclk5, &p2_msp2_kclk);
+
+/* SDI1 */
+static DEF_PER2_KCLK(4, p2_sdi1_kclk, &sdmmcclk);
+static DEF_PER_CLK(p2_sdi1_clk, &p2_pclk6, &p2_sdi1_kclk);
+
+/* SDI3 */
+static DEF_PER2_KCLK(5, p2_sdi3_kclk, &sdmmcclk);
+static DEF_PER_CLK(p2_sdi3_clk, &p2_pclk7, &p2_sdi3_kclk);
+
+/* HSIR */
+static struct clk p2_ssirx_kclk = {
+ .name = "p2_ssirx_kclk",
+ .ops = &prcc_kclk_rec_ops,
+ .io_base = U8500_CLKRST2_BASE,
+ .cg_sel = BIT(6),
+ .parent = &hsirxclk,
+ .clock = &per2clk,
+};
+
+/* HSIT */
+static struct clk p2_ssitx_kclk = {
+ .name = "p2_ssitx_kclk",
+ .ops = &prcc_kclk_rec_ops,
+ .io_base = U8500_CLKRST2_BASE,
+ .cg_sel = BIT(7),
+ .parent = &hsitxclk,
+ .clock = &per2clk,
+};
+
+/* SSP0 */
+static DEF_PER3_KCLK(1, p3_ssp0_kclk, &sspclk);
+static DEF_PER_CLK(p3_ssp0_clk, &p3_pclk1, &p3_ssp0_kclk);
+
+/* SSP1 */
+static DEF_PER3_KCLK(2, p3_ssp1_kclk, &sspclk);
+static DEF_PER_CLK(p3_ssp1_clk, &p3_pclk2, &p3_ssp1_kclk);
+
+/* I2C0 */
+static DEF_PER3_KCLK(3, p3_i2c0_kclk, &i2cclk);
+static DEF_PER_CLK(p3_i2c0_clk, &p3_pclk3, &p3_i2c0_kclk);
+
+/* SDI2 */
+static DEF_PER3_KCLK(4, p3_sdi2_kclk, &sdmmcclk);
+static DEF_PER_CLK(p3_sdi2_clk, &p3_pclk4, &p3_sdi2_kclk);
+
+/* SKE */
+static DEF_PER3_KCLK(5, p3_ske_kclk, &rtc32k);
+static DEF_PER_CLK(p3_ske_clk, &p3_pclk5, &p3_ske_kclk);
+
+/* UART2 */
+static DEF_PER3_KCLK(6, p3_uart2_kclk, &uartclk);
+static DEF_PER_CLK(p3_uart2_clk, &p3_pclk6, &p3_uart2_kclk);
+
+/* SDI5 */
+static DEF_PER3_KCLK(7, p3_sdi5_kclk, &sdmmcclk);
+static DEF_PER_CLK(p3_sdi5_clk, &p3_pclk7, &p3_sdi5_kclk);
+
+/* RNG */
+static DEF_PER6_KCLK(0, p6_rng_kclk, &rngclk);
+static DEF_PER_CLK(p6_rng_clk, &p6_pclk0, &p6_rng_kclk);
+
+/* MTU:S */
+
+/* MTU0 */
+static DEF_PER_CLK(p6_mtu0_clk, &p6_pclk6, &timclk);
+
+/* MTU1 */
+static DEF_PER_CLK(p6_mtu1_clk, &p6_pclk7, &timclk);
+
+/*
+ * TODO: Ensure names match with devices and then remove unnecessary entries
+ * when all drivers use the clk API.
+ */
+
+static struct clk_lookup u8500_clocks[] = {
+ CLK_LOOKUP(soc0_pll, NULL, "soc0_pll"),
+ CLK_LOOKUP(soc1_pll, NULL, "soc1_pll"),
+ CLK_LOOKUP(ddr_pll, NULL, "ddr_pll"),
+ CLK_LOOKUP(ulp38m4, NULL, "ulp38m4"),
+ CLK_LOOKUP(sysclk, NULL, "sysclk"),
+ CLK_LOOKUP(rtc32k, NULL, "clk32k"),
+ CLK_LOOKUP(sysclk, "ab8500-usb.0", "sysclk"),
+ CLK_LOOKUP(sysclk, "ab8500-codec.0", "sysclk"),
+ CLK_LOOKUP(ab_ulpclk, "ab8500-codec.0", "ulpclk"),
+ CLK_LOOKUP(ab_intclk, "ab8500-codec.0", "intclk"),
+ CLK_LOOKUP(audioclk, "ab8500-codec.0", "audioclk"),
+ CLK_LOOKUP(ab_intclk, "ab8500-pwm.1", NULL),
+ CLK_LOOKUP(ab_intclk, "ab8500-pwm.2", NULL),
+ CLK_LOOKUP(ab_intclk, "ab8500-pwm.3", NULL),
+
+ CLK_LOOKUP(clkout0, "pri-cam", NULL),
+ CLK_LOOKUP(clkout1, "3-005c", NULL),
+ CLK_LOOKUP(clkout1, "3-005d", NULL),
+ CLK_LOOKUP(clkout1, "sec-cam", NULL),
+
+ /* prcmu */
+ CLK_LOOKUP(sgaclk, "mali", NULL),
+ CLK_LOOKUP(uartclk, "UART", NULL),
+ CLK_LOOKUP(msp02clk, "MSP02", NULL),
+ CLK_LOOKUP(i2cclk, "I2C", NULL),
+ CLK_LOOKUP(sdmmcclk, "sdmmc", NULL),
+ CLK_LOOKUP(slimclk, "slim", NULL),
+ CLK_LOOKUP(per1clk, "PERIPH1", NULL),
+ CLK_LOOKUP(per2clk, "PERIPH2", NULL),
+ CLK_LOOKUP(per3clk, "PERIPH3", NULL),
+ CLK_LOOKUP(per5clk, "PERIPH5", NULL),
+ CLK_LOOKUP(per6clk, "PERIPH6", NULL),
+ CLK_LOOKUP(per7clk, "PERIPH7", NULL),
+ CLK_LOOKUP(lcdclk, "lcd", NULL),
+ CLK_LOOKUP(bmlclk, "bml", NULL),
+ CLK_LOOKUP(p2_ssitx_kclk, "ste_hsi.0", "hsit_hsitxclk"),
+ CLK_LOOKUP(p2_ssirx_kclk, "ste_hsi.0", "hsir_hsirxclk"),
+ CLK_LOOKUP(lcdclk, "mcde", "lcd"),
+ CLK_LOOKUP(hdmiclk, "hdmi", NULL),
+ CLK_LOOKUP(hdmiclk, "mcde", "hdmi"),
+ CLK_LOOKUP(apeatclk, "apeat", NULL),
+ CLK_LOOKUP(apetraceclk, "apetrace", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", NULL),
+ CLK_LOOKUP(mcdeclk, "mcde", "mcde"),
+ CLK_LOOKUP(ipi2cclk, "ipi2", NULL),
+ CLK_LOOKUP(dmaclk, "dma40.0", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2", NULL),
+ CLK_LOOKUP(b2r2clk, "b2r2_bus", NULL),
+ CLK_LOOKUP(b2r2clk, "U8500-B2R2.0", NULL),
+ CLK_LOOKUP(tvclk, "tv", NULL),
+ CLK_LOOKUP(tvclk, "mcde", "tv"),
+ CLK_LOOKUP(msp1clk, "MSP1", NULL),
+ CLK_LOOKUP(dsialtclk, "dsialt", NULL),
+ CLK_LOOKUP(sspclk, "SSP", NULL),
+ CLK_LOOKUP(rngclk, "rngclk", NULL),
+ CLK_LOOKUP(uiccclk, "uicc", NULL),
+ CLK_LOOKUP(dsi0clk, "mcde", "dsihs0"),
+ CLK_LOOKUP(dsi1clk, "mcde", "dsihs1"),
+ CLK_LOOKUP(dsi_pll, "mcde", "dsihs2"),
+ CLK_LOOKUP(dsi0escclk, "mcde", "dsilp0"),
+ CLK_LOOKUP(dsi1escclk, "mcde", "dsilp1"),
+ CLK_LOOKUP(dsi2escclk, "mcde", "dsilp2"),
+
+ /* PERIPH 1 */
+ CLK_LOOKUP(p1_msp3_clk, "msp3", NULL),
+ CLK_LOOKUP(p1_msp3_clk, "ux500-msp-i2s.3", NULL),
+ CLK_LOOKUP(p1_msp3_kclk, "ab8500-codec.0", "msp3-kernel"),
+ CLK_LOOKUP(p1_pclk11, "ab8500-codec.0", "msp3-bus"),
+ CLK_LOOKUP(p1_uart0_clk, "uart0", NULL),
+ CLK_LOOKUP(p1_uart1_clk, "uart1", NULL),
+ CLK_LOOKUP(p1_i2c1_clk, "nmk-i2c.1", NULL),
+ CLK_LOOKUP(p1_msp0_clk, "msp0", NULL),
+ CLK_LOOKUP(p1_msp0_clk, "ux500-msp-i2s.0", NULL),
+ CLK_LOOKUP(p1_sdi0_clk, "sdi0", NULL),
+ CLK_LOOKUP(p1_i2c2_clk, "nmk-i2c.2", NULL),
+ CLK_LOOKUP(p1_slimbus0_clk, "slimbus0", NULL),
+ CLK_LOOKUP(p1_pclk9, "gpio.0", NULL),
+ CLK_LOOKUP(p1_pclk9, "gpio.1", NULL),
+ CLK_LOOKUP(p1_pclk9, "gpioblock0", NULL),
+ CLK_LOOKUP(p1_msp1_clk, "msp1", NULL),
+ CLK_LOOKUP(p1_msp1_clk, "ux500-msp-i2s.1", NULL),
+ CLK_LOOKUP(p1_msp1_kclk, "ab8500-codec.0", "msp1-kernel"),
+ CLK_LOOKUP(p1_pclk4, "ab8500-codec.0", "msp1-bus"),
+ CLK_LOOKUP(p1_pclk7, "spi3", NULL),
+ CLK_LOOKUP(p1_i2c4_clk, "nmk-i2c.4", NULL),
+
+ /* PERIPH 2 */
+ CLK_LOOKUP(p2_i2c3_clk, "nmk-i2c.3", NULL),
+ CLK_LOOKUP(p2_pclk1, "spi2", NULL),
+ CLK_LOOKUP(p2_pclk2, "spi1", NULL),
+ CLK_LOOKUP(p2_pclk3, "pwl", NULL),
+ CLK_LOOKUP(p2_sdi4_clk, "sdi4", NULL),
+ CLK_LOOKUP(p2_msp2_clk, "msp2", NULL),
+ CLK_LOOKUP(p2_msp2_clk, "ux500-msp-i2s.2", NULL),
+ CLK_LOOKUP(p2_sdi1_clk, "sdi1", NULL),
+ CLK_LOOKUP(p2_sdi3_clk, "sdi3", NULL),
+ CLK_LOOKUP(p2_pclk8, "spi0", NULL),
+ CLK_LOOKUP(p2_pclk9, "ste_hsi.0", "hsir_hclk"),
+ CLK_LOOKUP(p2_pclk10, "ste_hsi.0", "hsit_hclk"),
+ CLK_LOOKUP(p2_pclk11, "gpio.6", NULL),
+ CLK_LOOKUP(p2_pclk11, "gpio.7", NULL),
+ CLK_LOOKUP(p2_pclk11, "gpioblock1", NULL),
+
+ /* PERIPH 3 */
+ CLK_LOOKUP(p3_pclk0, NULL, "fsmc"),
+ CLK_LOOKUP(p3_i2c0_clk, "nmk-i2c.0", NULL),
+ CLK_LOOKUP(p3_sdi2_clk, "sdi2", NULL),
+ CLK_LOOKUP(p3_ske_clk, "ske", NULL),
+ CLK_LOOKUP(p3_ske_clk, "nmk-ske-keypad", NULL),
+ CLK_LOOKUP(p3_uart2_clk, "uart2", NULL),
+ CLK_LOOKUP(p3_sdi5_clk, "sdi5", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.2", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.3", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.4", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpio.5", NULL),
+ CLK_LOOKUP(p3_pclk8, "gpioblock2", NULL),
+ CLK_LOOKUP(p3_ssp0_clk, "ssp0", NULL),
+ CLK_LOOKUP(p3_ssp1_clk, "ssp1", NULL),
+
+ /* PERIPH 5 */
+ CLK_LOOKUP(p5_pclk1, "gpio.8", NULL),
+ CLK_LOOKUP(p5_pclk1, "gpioblock3", NULL),
+ CLK_LOOKUP(p5_pclk0, "musb-ux500.0", "usb"),
+
+ /* PERIPH 6 */
+ CLK_LOOKUP(p6_pclk1, "cryp0", NULL),
+ CLK_LOOKUP(p6_pclk2, "hash0", NULL),
+ CLK_LOOKUP(p6_pclk3, "pka", NULL),
+ CLK_LOOKUP(p6_pclk5, "cfgreg", NULL),
+ CLK_LOOKUP(p6_mtu0_clk, "mtu0", NULL),
+ CLK_LOOKUP(p6_mtu1_clk, "mtu1", NULL),
+ CLK_LOOKUP(p6_pclk4, "hash1", NULL),
+ CLK_LOOKUP(p6_pclk1, "cryp1", NULL),
+ CLK_LOOKUP(p6_rng_clk, "rng", NULL),
+
+};
+
+static struct clk_lookup u8500_v2_sysclks[] = {
+ CLK_LOOKUP(sysclk2, NULL, "sysclk2"),
+ CLK_LOOKUP(sysclk3, NULL, "sysclk3"),
+ CLK_LOOKUP(sysclk4, NULL, "sysclk4"),
+};
+
+static void sysclk_init_disable(struct work_struct *not_used)
+{
+ int i;
+
+ mutex_lock(&sysclk_mutex);
+
+ /* Enable SWAT */
+ if (ab8500_sysctrl_set(AB8500_SWATCTRL, AB8500_SWATCTRL_SWATENABLE))
+ goto err_swat;
+
+ for (i = 0; i < ARRAY_SIZE(u8500_v2_sysclks); i++) {
+ struct clk *clk = u8500_v2_sysclks[i].clk;
+
+ /* Disable sysclks */
+ if (!clk->enabled && clk->cg_sel) {
+ if (ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
+ (u8)clk->cg_sel))
+ goto err_sysclk;
+ }
+ }
+ goto unlock_and_exit;
+
+err_sysclk:
+ pr_err("clock: Disable %s failed", u8500_v2_sysclks[i].clk->name);
+ ab8500_sysctrl_clear(AB8500_SWATCTRL, AB8500_SWATCTRL_SWATENABLE);
+ goto unlock_and_exit;
+
+err_swat:
+ pr_err("clock: Enable SWAT failed");
+
+unlock_and_exit:
+ mutex_unlock(&sysclk_mutex);
+}
+
+static struct clk *db8500_dbg_clks[] __initdata = {
+ /* Clock sources */
+ &soc0_pll,
+ &soc1_pll,
+ &ddr_pll,
+ &ulp38m4,
+ &sysclk,
+ &rtc32k,
+ /* PRCMU clocks */
+ &sgaclk,
+ &uartclk,
+ &msp02clk,
+ &msp1clk,
+ &i2cclk,
+ &sdmmcclk,
+ &slimclk,
+ &per1clk,
+ &per2clk,
+ &per3clk,
+ &per5clk,
+ &per6clk,
+ &per7clk,
+ &lcdclk,
+ &bmlclk,
+ &hsitxclk,
+ &hsirxclk,
+ &hdmiclk,
+ &apeatclk,
+ &apetraceclk,
+ &mcdeclk,
+ &ipi2cclk,
+ &dsialtclk,
+ &dmaclk,
+ &b2r2clk,
+ &tvclk,
+ &sspclk,
+ &rngclk,
+ &uiccclk,
+ &sysclk2,
+ &clkout0,
+ &clkout1,
+ &p1_pclk0,
+ &p1_pclk1,
+ &p1_pclk2,
+ &p1_pclk3,
+ &p1_pclk4,
+ &p1_pclk5,
+ &p1_pclk6,
+ &p1_pclk7,
+ &p1_pclk8,
+ &p1_pclk9,
+ &p1_pclk10,
+ &p1_pclk11,
+ &p2_pclk0,
+ &p2_pclk1,
+ &p2_pclk2,
+ &p2_pclk3,
+ &p2_pclk4,
+ &p2_pclk5,
+ &p2_pclk6,
+ &p2_pclk7,
+ &p2_pclk8,
+ &p2_pclk9,
+ &p2_pclk10,
+ &p2_pclk11,
+ &p3_pclk0,
+ &p3_pclk1,
+ &p3_pclk2,
+ &p3_pclk3,
+ &p3_pclk4,
+ &p3_pclk5,
+ &p3_pclk6,
+ &p3_pclk7,
+ &p3_pclk8,
+ &p5_pclk0,
+ &p5_pclk1,
+ &p6_pclk0,
+ &p6_pclk1,
+ &p6_pclk2,
+ &p6_pclk3,
+ &p6_pclk4,
+ &p6_pclk5,
+ &p6_pclk6,
+ &p6_pclk7,
+};
+
+/* List of clocks which might be enabled from the bootloader */
+
+/*
+ * SOC settings enable bus + kernel clocks of all periphs without
+ * properly configuring the parents of the kernel clocks for all units.
+ * Enable and Disable them all to get them into a known and working state.
+ */
+static struct clk *loader_enabled_clk[] __initdata = {
+ /* periph 1 */
+ &p1_uart0_clk,
+ &p1_uart1_clk,
+ &p1_i2c1_clk,
+ &p1_msp0_clk,
+ &p1_msp1_clk,
+ &p1_sdi0_clk,
+ &p1_i2c2_clk,
+ &p1_pclk7, /* spi3 */
+ &p1_pclk9, /* gpioctrl */
+ &p1_i2c4_clk,
+
+ /* periph 2 */
+ &p2_i2c3_clk,
+ &p2_pclk1, /* spi2 */
+ &p2_pclk2, /* spi1 */
+ /* pwl has an unknown kclk parent, ignore it */
+ &p2_sdi4_clk,
+ &p2_msp2_clk,
+ &p2_sdi1_clk,
+ &p2_sdi3_clk,
+ &p2_pclk8, /* spi0 */
+ &p2_ssirx_kclk, /* hsir kernel */
+ &p2_ssitx_kclk, /* hsit kernel */
+ &p2_pclk9, /* hsir bus */
+ &p2_pclk10, /* hsit bus */
+ &p2_pclk11, /* gpioctrl */
+ /* periph 3 */
+ &p3_pclk0, /* fsmc */
+ &p3_ssp0_clk,
+ &p3_ssp1_clk,
+ &p3_i2c0_clk,
+ &p3_sdi2_clk,
+ &p3_ske_clk,
+ &p3_uart2_clk,
+ &p3_sdi5_clk,
+ &p3_pclk8, /* gpio */
+ /* periph 5 */
+ &p5_pclk0, /* usb */
+ &p5_pclk1, /* gpio */
+ /* periph 6 */
+ /* Leave out rng, cryp0, hash0 and pka */
+ &p6_pclk4, /* hash1 */
+ &p6_pclk5, /* cr */
+ &p6_mtu0_clk,
+ &p6_mtu1_clk,
+ /* periph 7 */
+ &per7clk, /* PERIPH7 */
+
+ &bmlclk, /* BML */
+ &dsialtclk, /* dsialt */
+ &hsirxclk, /* hsirx */
+ &hsitxclk, /* hsitx */
+ &ipi2cclk, /* ipi2 */
+ &lcdclk, /* mcde */
+ &b2r2clk, /* b2r2_bus */
+};
+
+static int __init init_clock_states(void)
+{
+ unsigned int i;
+ /*
+ * Disable peripheral clocks enabled by bootloader/default
+ * but without drivers
+ */
+ for (i = 0; i < ARRAY_SIZE(loader_enabled_clk); i++)
+ if (!clk_enable(loader_enabled_clk[i]))
+ clk_disable(loader_enabled_clk[i]);
+
+ /*
+ * APEATCLK and APETRACECLK are enabled at boot and needed
+ * in order to debug with Lauterbach
+ */
+ if (!clk_enable(&apeatclk)) {
+ if (!ux500_jtag_enabled())
+ clk_disable(&apeatclk);
+ }
+ if (!clk_enable(&apetraceclk)) {
+ if (!ux500_jtag_enabled())
+ clk_disable(&apetraceclk);
+ }
+
+ INIT_DELAYED_WORK(&sysclk_disable_work, sysclk_init_disable);
+ schedule_delayed_work(&sysclk_disable_work, 10 * HZ);
+
+ return 0;
+}
+late_initcall(init_clock_states);
+
+static void __init configure_c2_clocks(void)
+{
+ sgaclk.parent = &soc0_pll;
+ sgaclk.mutex = &soc0_pll_mutex;
+}
+
+int __init db8500_clk_init(void)
+{
+ struct prcmu_fw_version *fw_version;
+
+ /*
+ * Disable pwl's and slimbus' bus and kernel clocks without touching
+ * any parents. Because for slimbus, the prcmu fw has not correctly
+ * configured the clocks at boot and for pwl the kclk parent
+ * is unknown.
+ */
+
+ /* slimbus' bus and kernel clocks */
+ writel(1 << 8, __io_address(U8500_CLKRST1_BASE) + PRCC_PCKDIS);
+ writel(1 << 8, __io_address(U8500_CLKRST1_BASE) + PRCC_KCKDIS);
+ /* pwl's bus and kernel clocks */
+ writel(1 << 3, __io_address(U8500_CLKRST2_BASE) + PRCC_PCKDIS);
+ writel(1 << 1, __io_address(U8500_CLKRST2_BASE) + PRCC_KCKDIS);
+
+ fw_version = prcmu_get_fw_version();
+ if (fw_version != NULL)
+ switch (fw_version->project) {
+ case PRCMU_FW_PROJECT_U8500_C2:
+ case PRCMU_FW_PROJECT_U9500_C2:
+ configure_c2_clocks();
+ break;
+ default:
+ break;
+ }
+ clkdev_add_table(u8500_v2_sysclks,
+ ARRAY_SIZE(u8500_v2_sysclks));
+ clkdev_add_table(u8500_clocks,
+ ARRAY_SIZE(u8500_clocks));
+ return 0;
+}
+
+int __init db8500_clk_debug_init(void)
+{
+ return dbx500_clk_debug_init(db8500_dbg_clks,
+ ARRAY_SIZE(db8500_dbg_clks));
+}
diff --git a/arch/arm/mach-ux500/clock-debug.c b/arch/arm/mach-ux500/clock-debug.c
new file mode 100644
index 00000000000..1ebc69fe061
--- /dev/null
+++ b/arch/arm/mach-ux500/clock-debug.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <mach/hardware.h>
+
+#include "clock.h"
+
+struct clk_debug_info {
+ struct clk *clk;
+ struct dentry *dir;
+ struct dentry *enable;
+ struct dentry *requests;
+ int enabled;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *clk_dir;
+static struct dentry *clk_show;
+static struct dentry *clk_show_enabled_only;
+
+static struct clk_debug_info *cdi;
+static int num_clks;
+
+static int clk_show_print(struct seq_file *s, void *p)
+{
+ int i;
+ int enabled_only = (int)s->private;
+
+ seq_printf(s, "\n%-20s %10s %s\n", "name", "rate",
+ "enabled (kernel + debug)");
+ for (i = 0; i < num_clks; i++) {
+ if (enabled_only && !cdi[i].clk->enabled)
+ continue;
+ seq_printf(s,
+ "%-20s %10lu %5d + %d\n",
+ cdi[i].clk->name,
+ clk_get_rate(cdi[i].clk),
+ cdi[i].clk->enabled - cdi[i].enabled,
+ cdi[i].enabled);
+ }
+
+ return 0;
+}
+
+static int clk_show_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_show_print, inode->i_private);
+}
+
+static int clk_enable_print(struct seq_file *s, void *p)
+{
+ struct clk_debug_info *cdi = s->private;
+
+ return seq_printf(s, "%d\n", cdi->enabled);
+}
+
+static int clk_enable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_enable_print, inode->i_private);
+}
+
+static ssize_t clk_enable_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct clk_debug_info *cdi;
+ long user_val;
+ int err;
+
+ cdi = ((struct seq_file *)(file->private_data))->private;
+
+ err = kstrtol_from_user(user_buf, count, 0, &user_val);
+
+ if (err)
+ return err;
+
+ if ((user_val > 0) && (!cdi->enabled)) {
+ err = clk_enable(cdi->clk);
+ if (err) {
+ pr_err("clock: clk_enable(%s) failed.\n",
+ cdi->clk->name);
+ return -EFAULT;
+ }
+ cdi->enabled = 1;
+ } else if ((user_val <= 0) && (cdi->enabled)) {
+ clk_disable(cdi->clk);
+ cdi->enabled = 0;
+ }
+ return count;
+}
+
+static int clk_requests_print(struct seq_file *s, void *p)
+{
+ struct clk_debug_info *cdi = s->private;
+
+ return seq_printf(s, "%d\n", cdi->clk->enabled);
+}
+
+static int clk_requests_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_requests_print, inode->i_private);
+}
+
+static const struct file_operations clk_enable_fops = {
+ .open = clk_enable_open,
+ .write = clk_enable_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations clk_requests_fops = {
+ .open = clk_requests_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations clk_show_fops = {
+ .open = clk_show_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int create_clk_dirs(struct clk_debug_info *cdi, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ cdi[i].dir = debugfs_create_dir(cdi[i].clk->name, clk_dir);
+ if (!cdi[i].dir)
+ goto no_dir;
+ }
+
+ for (i = 0; i < size; i++) {
+ cdi[i].enable = debugfs_create_file("enable",
+ (S_IRUGO | S_IWUGO),
+ cdi[i].dir, &cdi[i],
+ &clk_enable_fops);
+ if (!cdi[i].enable)
+ goto no_enable;
+ }
+ for (i = 0; i < size; i++) {
+ cdi[i].requests = debugfs_create_file("requests", S_IRUGO,
+ cdi[i].dir, &cdi[i],
+ &clk_requests_fops);
+ if (!cdi[i].requests)
+ goto no_requests;
+ }
+ return 0;
+
+no_requests:
+ while (i--)
+ debugfs_remove(cdi[i].requests);
+ i = size;
+no_enable:
+ while (i--)
+ debugfs_remove(cdi[i].enable);
+ i = size;
+no_dir:
+ while (i--)
+ debugfs_remove(cdi[i].dir);
+
+ return -ENOMEM;
+}
+
+int __init dbx500_clk_debug_init(struct clk **clks, int num)
+{
+ int i;
+
+ cdi = kcalloc(sizeof(struct clk_debug_info), num, GFP_KERNEL);
+ if (!cdi)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ cdi[i].clk = clks[i];
+
+ num_clks = num;
+
+ clk_dir = debugfs_create_dir("clk", NULL);
+ if (!clk_dir)
+ goto no_dir;
+
+ clk_show = debugfs_create_file("show", S_IRUGO, clk_dir, (void *)0,
+ &clk_show_fops);
+ if (!clk_show)
+ goto no_show;
+
+ clk_show_enabled_only = debugfs_create_file("show-enabled-only",
+ S_IRUGO, clk_dir, (void *)1,
+ &clk_show_fops);
+ if (!clk_show_enabled_only)
+ goto no_enabled_only;
+
+ if (create_clk_dirs(cdi, num))
+ goto no_clks;
+
+ return 0;
+
+no_clks:
+ debugfs_remove(clk_show_enabled_only);
+no_enabled_only:
+ debugfs_remove(clk_show);
+no_show:
+ debugfs_remove(clk_dir);
+no_dir:
+ kfree(cdi);
+ return -ENOMEM;
+}
+
+static int __init clk_debug_init(void)
+{
+ if (cpu_is_u8500())
+ db8500_clk_debug_init();
+ else if (cpu_is_u5500())
+ db5500_clk_debug_init();
+
+ return 0;
+}
+module_init(clk_debug_init);
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 73790753700..da17bcfdc45 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -7,710 +7,521 @@
* published by the Free Software Foundation.
*/
#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/cpufreq.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/mfd/dbx500-prcmu.h>
-#include <plat/mtu.h>
-#include <mach/hardware.h>
#include "clock.h"
+#include "prcc.h"
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#include <linux/uaccess.h> /* for copy_from_user */
-static LIST_HEAD(clk_list);
-#endif
+DEFINE_MUTEX(clk_opp100_mutex);
+static DEFINE_SPINLOCK(clk_spin_lock);
+#define NO_LOCK &clk_spin_lock
-#define PRCC_PCKEN 0x00
-#define PRCC_PCKDIS 0x04
-#define PRCC_KCKEN 0x08
-#define PRCC_KCKDIS 0x0C
-
-#define PRCM_YYCLKEN0_MGT_SET 0x510
-#define PRCM_YYCLKEN1_MGT_SET 0x514
-#define PRCM_YYCLKEN0_MGT_CLR 0x518
-#define PRCM_YYCLKEN1_MGT_CLR 0x51C
-#define PRCM_YYCLKEN0_MGT_VAL 0x520
-#define PRCM_YYCLKEN1_MGT_VAL 0x524
-
-#define PRCM_SVAMMDSPCLK_MGT 0x008
-#define PRCM_SIAMMDSPCLK_MGT 0x00C
-#define PRCM_SGACLK_MGT 0x014
-#define PRCM_UARTCLK_MGT 0x018
-#define PRCM_MSP02CLK_MGT 0x01C
-#define PRCM_MSP1CLK_MGT 0x288
-#define PRCM_I2CCLK_MGT 0x020
-#define PRCM_SDMMCCLK_MGT 0x024
-#define PRCM_SLIMCLK_MGT 0x028
-#define PRCM_PER1CLK_MGT 0x02C
-#define PRCM_PER2CLK_MGT 0x030
-#define PRCM_PER3CLK_MGT 0x034
-#define PRCM_PER5CLK_MGT 0x038
-#define PRCM_PER6CLK_MGT 0x03C
-#define PRCM_PER7CLK_MGT 0x040
-#define PRCM_LCDCLK_MGT 0x044
-#define PRCM_BMLCLK_MGT 0x04C
-#define PRCM_HSITXCLK_MGT 0x050
-#define PRCM_HSIRXCLK_MGT 0x054
-#define PRCM_HDMICLK_MGT 0x058
-#define PRCM_APEATCLK_MGT 0x05C
-#define PRCM_APETRACECLK_MGT 0x060
-#define PRCM_MCDECLK_MGT 0x064
-#define PRCM_IPI2CCLK_MGT 0x068
-#define PRCM_DSIALTCLK_MGT 0x06C
-#define PRCM_DMACLK_MGT 0x074
-#define PRCM_B2R2CLK_MGT 0x078
-#define PRCM_TVCLK_MGT 0x07C
-#define PRCM_TCR 0x1C8
-#define PRCM_TCR_STOPPED (1 << 16)
-#define PRCM_TCR_DOZE_MODE (1 << 17)
-#define PRCM_UNIPROCLK_MGT 0x278
-#define PRCM_SSPCLK_MGT 0x280
-#define PRCM_RNGCLK_MGT 0x284
-#define PRCM_UICCCLK_MGT 0x27C
-
-#define PRCM_MGT_ENABLE (1 << 8)
-
-static DEFINE_SPINLOCK(clocks_lock);
-
-static void __clk_enable(struct clk *clk)
-{
- if (clk->enabled++ == 0) {
- if (clk->parent_cluster)
- __clk_enable(clk->parent_cluster);
+static void __iomem *prcmu_base;
- if (clk->parent_periph)
- __clk_enable(clk->parent_periph);
+static void __clk_lock(struct clk *clk, void *last_lock, unsigned long *flags)
+{
+ if (clk->mutex != last_lock) {
+ if (clk->mutex == NULL)
+ spin_lock_irqsave(&clk_spin_lock, *flags);
+ else
+ mutex_lock(clk->mutex);
+ }
+}
- if (clk->ops && clk->ops->enable)
- clk->ops->enable(clk);
+static void __clk_unlock(struct clk *clk, void *last_lock, unsigned long flags)
+{
+ if (clk->mutex != last_lock) {
+ if (clk->mutex == NULL)
+ spin_unlock_irqrestore(&clk_spin_lock, flags);
+ else
+ mutex_unlock(clk->mutex);
}
}
-int clk_enable(struct clk *clk)
+void __clk_disable(struct clk *clk, void *current_lock)
{
unsigned long flags;
- spin_lock_irqsave(&clocks_lock, flags);
- __clk_enable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
+ if (clk == NULL)
+ return;
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
+ __clk_lock(clk, current_lock, &flags);
-static void __clk_disable(struct clk *clk)
-{
- if (--clk->enabled == 0) {
- if (clk->ops && clk->ops->disable)
+ if (clk->enabled && (--clk->enabled == 0)) {
+ if ((clk->ops != NULL) && (clk->ops->disable != NULL))
clk->ops->disable(clk);
+ __clk_disable(clk->parent, clk->mutex);
+ __clk_disable(clk->bus_parent, clk->mutex);
+ }
- if (clk->parent_periph)
- __clk_disable(clk->parent_periph);
+ __clk_unlock(clk, current_lock, flags);
- if (clk->parent_cluster)
- __clk_disable(clk->parent_cluster);
- }
+ return;
}
-void clk_disable(struct clk *clk)
+int __clk_enable(struct clk *clk, void *current_lock)
{
+ int err;
unsigned long flags;
- WARN_ON(!clk->enabled);
+ if (clk == NULL)
+ return 0;
- spin_lock_irqsave(&clocks_lock, flags);
- __clk_disable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
+ __clk_lock(clk, current_lock, &flags);
-/*
- * The MTU has a separate, rather complex muxing setup
- * with alternative parents (peripheral cluster or
- * ULP or fixed 32768 Hz) depending on settings
- */
-static unsigned long clk_mtu_get_rate(struct clk *clk)
-{
- void __iomem *addr;
- u32 tcr;
- int mtu = (int) clk->data;
- /*
- * One of these is selected eventually
- * TODO: Replace the constant with a reference
- * to the ULP source once this is modeled.
- */
- unsigned long clk32k = 32768;
- unsigned long mturate;
- unsigned long retclk;
-
- if (cpu_is_u5500())
- addr = __io_address(U5500_PRCMU_BASE);
- else if (cpu_is_u8500())
- addr = __io_address(U8500_PRCMU_BASE);
- else
- ux500_unknown_soc();
+ if (!clk->enabled) {
+ err = __clk_enable(clk->bus_parent, clk->mutex);
+ if (unlikely(err))
+ goto bus_parent_error;
+
+ err = __clk_enable(clk->parent, clk->mutex);
+ if (unlikely(err))
+ goto parent_error;
- /*
- * On a startup, always conifgure the TCR to the doze mode;
- * bootloaders do it for us. Do this in the kernel too.
- */
- writel(PRCM_TCR_DOZE_MODE, addr + PRCM_TCR);
+ if ((clk->ops != NULL) && (clk->ops->enable != NULL)) {
+ err = clk->ops->enable(clk);
+ if (unlikely(err))
+ goto enable_error;
+ }
+ }
+ clk->enabled++;
- tcr = readl(addr + PRCM_TCR);
+ __clk_unlock(clk, current_lock, flags);
- /* Get the rate from the parent as a default */
- if (clk->parent_periph)
- mturate = clk_get_rate(clk->parent_periph);
- else if (clk->parent_cluster)
- mturate = clk_get_rate(clk->parent_cluster);
- else
- /* We need to be connected SOMEWHERE */
- BUG();
+ return 0;
- /* Return the clock selected for this MTU */
- if (tcr & (1 << mtu))
- retclk = clk32k;
- else
- retclk = mturate;
+enable_error:
+ __clk_disable(clk->parent, clk->mutex);
+parent_error:
+ __clk_disable(clk->bus_parent, clk->mutex);
+bus_parent_error:
+
+ __clk_unlock(clk, current_lock, flags);
- pr_info("MTU%d clock rate: %lu Hz\n", mtu, retclk);
- return retclk;
+ return err;
}
-unsigned long clk_get_rate(struct clk *clk)
+unsigned long __clk_get_rate(struct clk *clk, void *current_lock)
{
unsigned long rate;
+ unsigned long flags;
- /*
- * If there is a custom getrate callback for this clock,
- * it will take precedence.
- */
- if (clk->get_rate)
- return clk->get_rate(clk);
-
- if (clk->ops && clk->ops->get_rate)
- return clk->ops->get_rate(clk);
-
- rate = clk->rate;
- if (!rate) {
- if (clk->parent_periph)
- rate = clk_get_rate(clk->parent_periph);
- else if (clk->parent_cluster)
- rate = clk_get_rate(clk->parent_cluster);
- }
+ if (clk == NULL)
+ return 0;
+
+ __clk_lock(clk, current_lock, &flags);
+
+ if ((clk->ops != NULL) && (clk->ops->get_rate != NULL))
+ rate = clk->ops->get_rate(clk);
+ else if (clk->rate)
+ rate = clk->rate;
+ else
+ rate = __clk_get_rate(clk->parent, clk->mutex);
+
+ __clk_unlock(clk, current_lock, flags);
return rate;
}
-EXPORT_SYMBOL(clk_get_rate);
-long clk_round_rate(struct clk *clk, unsigned long rate)
+static long __clk_round_rate(struct clk *clk, unsigned long rate)
{
- /*TODO*/
- return rate;
+ if ((clk->ops != NULL) && (clk->ops->round_rate != NULL))
+ return clk->ops->round_rate(clk, rate);
+
+ return -ENOSYS;
}
-EXPORT_SYMBOL(clk_round_rate);
-int clk_set_rate(struct clk *clk, unsigned long rate)
+static int __clk_set_rate(struct clk *clk, unsigned long rate)
{
- clk->rate = rate;
- return 0;
+ if ((clk->ops != NULL) && (clk->ops->set_rate != NULL))
+ return clk->ops->set_rate(clk, rate);
+
+ return -ENOSYS;
}
-EXPORT_SYMBOL(clk_set_rate);
-static void clk_prcmu_enable(struct clk *clk)
+int clk_enable(struct clk *clk)
{
- void __iomem *cg_set_reg = __io_address(U8500_PRCMU_BASE)
- + PRCM_YYCLKEN0_MGT_SET + clk->prcmu_cg_off;
+ if (clk == NULL)
+ return -EINVAL;
- writel(1 << clk->prcmu_cg_bit, cg_set_reg);
+ return __clk_enable(clk, NO_LOCK);
}
+EXPORT_SYMBOL(clk_enable);
-static void clk_prcmu_disable(struct clk *clk)
+void clk_disable(struct clk *clk)
{
- void __iomem *cg_clr_reg = __io_address(U8500_PRCMU_BASE)
- + PRCM_YYCLKEN0_MGT_CLR + clk->prcmu_cg_off;
- writel(1 << clk->prcmu_cg_bit, cg_clr_reg);
+ if (clk == NULL)
+ return;
+
+ WARN_ON(!clk->enabled);
+ __clk_disable(clk, NO_LOCK);
}
+EXPORT_SYMBOL(clk_disable);
-static struct clkops clk_prcmu_ops = {
- .enable = clk_prcmu_enable,
- .disable = clk_prcmu_disable,
-};
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (clk == NULL)
+ return 0;
-static unsigned int clkrst_base[] = {
- [1] = U8500_CLKRST1_BASE,
- [2] = U8500_CLKRST2_BASE,
- [3] = U8500_CLKRST3_BASE,
- [5] = U8500_CLKRST5_BASE,
- [6] = U8500_CLKRST6_BASE,
-};
+ return __clk_get_rate(clk, NO_LOCK);
+}
+EXPORT_SYMBOL(clk_get_rate);
-static void clk_prcc_enable(struct clk *clk)
+long clk_round_rate(struct clk *clk, unsigned long rate)
{
- void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+ long rounded_rate;
+ unsigned long flags;
+
+ if (clk == NULL)
+ return -EINVAL;
- if (clk->prcc_kernel != -1)
- writel(1 << clk->prcc_kernel, addr + PRCC_KCKEN);
+ __clk_lock(clk, NO_LOCK, &flags);
- if (clk->prcc_bus != -1)
- writel(1 << clk->prcc_bus, addr + PRCC_PCKEN);
+ rounded_rate = __clk_round_rate(clk, rate);
+
+ __clk_unlock(clk, NO_LOCK, flags);
+
+ return rounded_rate;
}
+EXPORT_SYMBOL(clk_round_rate);
-static void clk_prcc_disable(struct clk *clk)
+long clk_round_rate_rec(struct clk *clk, unsigned long rate)
{
- void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+ long rounded_rate;
+ unsigned long flags;
+
+ if ((clk == NULL) || (clk->parent == NULL))
+ return -EINVAL;
+
+ __clk_lock(clk->parent, clk->mutex, &flags);
- if (clk->prcc_bus != -1)
- writel(1 << clk->prcc_bus, addr + PRCC_PCKDIS);
+ rounded_rate = __clk_round_rate(clk->parent, rate);
- if (clk->prcc_kernel != -1)
- writel(1 << clk->prcc_kernel, addr + PRCC_KCKDIS);
+ __clk_unlock(clk->parent, clk->mutex, flags);
+
+ return rounded_rate;
}
-static struct clkops clk_prcc_ops = {
- .enable = clk_prcc_enable,
- .disable = clk_prcc_disable,
-};
+static void lock_parent_rate(struct clk *clk)
+{
+ unsigned long flags;
-static struct clk clk_32khz = {
- .name = "clk_32khz",
- .rate = 32000,
-};
+ if (clk->parent == NULL)
+ return;
-/*
- * PRCMU level clock gating
- */
+ __clk_lock(clk->parent, clk->mutex, &flags);
-/* Bank 0 */
-static DEFINE_PRCMU_CLK(svaclk, 0x0, 2, SVAMMDSPCLK);
-static DEFINE_PRCMU_CLK(siaclk, 0x0, 3, SIAMMDSPCLK);
-static DEFINE_PRCMU_CLK(sgaclk, 0x0, 4, SGACLK);
-static DEFINE_PRCMU_CLK_RATE(uartclk, 0x0, 5, UARTCLK, 38400000);
-static DEFINE_PRCMU_CLK(msp02clk, 0x0, 6, MSP02CLK);
-static DEFINE_PRCMU_CLK(msp1clk, 0x0, 7, MSP1CLK); /* v1 */
-static DEFINE_PRCMU_CLK_RATE(i2cclk, 0x0, 8, I2CCLK, 48000000);
-static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 100000000);
-static DEFINE_PRCMU_CLK(slimclk, 0x0, 10, SLIMCLK);
-static DEFINE_PRCMU_CLK(per1clk, 0x0, 11, PER1CLK);
-static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
-static DEFINE_PRCMU_CLK(per3clk, 0x0, 13, PER3CLK);
-static DEFINE_PRCMU_CLK(per5clk, 0x0, 14, PER5CLK);
-static DEFINE_PRCMU_CLK_RATE(per6clk, 0x0, 15, PER6CLK, 133330000);
-static DEFINE_PRCMU_CLK(lcdclk, 0x0, 17, LCDCLK);
-static DEFINE_PRCMU_CLK(bmlclk, 0x0, 18, BMLCLK);
-static DEFINE_PRCMU_CLK(hsitxclk, 0x0, 19, HSITXCLK);
-static DEFINE_PRCMU_CLK(hsirxclk, 0x0, 20, HSIRXCLK);
-static DEFINE_PRCMU_CLK(hdmiclk, 0x0, 21, HDMICLK);
-static DEFINE_PRCMU_CLK(apeatclk, 0x0, 22, APEATCLK);
-static DEFINE_PRCMU_CLK(apetraceclk, 0x0, 23, APETRACECLK);
-static DEFINE_PRCMU_CLK(mcdeclk, 0x0, 24, MCDECLK);
-static DEFINE_PRCMU_CLK(ipi2clk, 0x0, 25, IPI2CCLK);
-static DEFINE_PRCMU_CLK(dsialtclk, 0x0, 26, DSIALTCLK); /* v1 */
-static DEFINE_PRCMU_CLK(dmaclk, 0x0, 27, DMACLK);
-static DEFINE_PRCMU_CLK(b2r2clk, 0x0, 28, B2R2CLK);
-static DEFINE_PRCMU_CLK(tvclk, 0x0, 29, TVCLK);
-static DEFINE_PRCMU_CLK(uniproclk, 0x0, 30, UNIPROCLK); /* v1 */
-static DEFINE_PRCMU_CLK_RATE(sspclk, 0x0, 31, SSPCLK, 48000000); /* v1 */
-
-/* Bank 1 */
-static DEFINE_PRCMU_CLK(rngclk, 0x4, 0, RNGCLK); /* v1 */
-static DEFINE_PRCMU_CLK(uiccclk, 0x4, 1, UICCCLK); /* v1 */
+ lock_parent_rate(clk->parent);
+ clk->parent->rate_locked++;
-/*
- * PRCC level clock gating
- * Format: per#, clk, PCKEN bit, KCKEN bit, parent
- */
+ __clk_unlock(clk->parent, clk->mutex, flags);
+}
-/* Peripheral Cluster #1 */
-static DEFINE_PRCC_CLK(1, i2c4, 10, 9, &clk_i2cclk);
-static DEFINE_PRCC_CLK(1, gpio0, 9, -1, NULL);
-static DEFINE_PRCC_CLK(1, slimbus0, 8, 8, &clk_slimclk);
-static DEFINE_PRCC_CLK(1, spi3, 7, -1, NULL);
-static DEFINE_PRCC_CLK(1, i2c2, 6, 6, &clk_i2cclk);
-static DEFINE_PRCC_CLK(1, sdi0, 5, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(1, msp1, 4, 4, &clk_msp1clk);
-static DEFINE_PRCC_CLK(1, msp0, 3, 3, &clk_msp02clk);
-static DEFINE_PRCC_CLK(1, i2c1, 2, 2, &clk_i2cclk);
-static DEFINE_PRCC_CLK(1, uart1, 1, 1, &clk_uartclk);
-static DEFINE_PRCC_CLK(1, uart0, 0, 0, &clk_uartclk);
-
-/* Peripheral Cluster #2 */
-static DEFINE_PRCC_CLK(2, gpio1, 11, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssitx, 10, 7, NULL);
-static DEFINE_PRCC_CLK(2, ssirx, 9, 6, NULL);
-static DEFINE_PRCC_CLK(2, spi0, 8, -1, NULL);
-static DEFINE_PRCC_CLK(2, sdi3, 7, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, sdi1, 6, 4, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, msp2, 5, 3, &clk_msp02clk);
-static DEFINE_PRCC_CLK(2, sdi4, 4, 2, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, pwl, 3, 1, NULL);
-static DEFINE_PRCC_CLK(2, spi1, 2, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi2, 1, -1, NULL);
-static DEFINE_PRCC_CLK(2, i2c3, 0, 0, &clk_i2cclk);
-
-/* Peripheral Cluster #3 */
-static DEFINE_PRCC_CLK(3, gpio2, 8, -1, NULL);
-static DEFINE_PRCC_CLK(3, sdi5, 7, 7, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(3, uart2, 6, 6, &clk_uartclk);
-static DEFINE_PRCC_CLK(3, ske, 5, 5, &clk_32khz);
-static DEFINE_PRCC_CLK(3, sdi2, 4, 4, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(3, i2c0, 3, 3, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp1, 2, 2, &clk_sspclk);
-static DEFINE_PRCC_CLK(3, ssp0, 1, 1, &clk_sspclk);
-static DEFINE_PRCC_CLK(3, fsmc, 0, -1, NULL);
-
-/* Peripheral Cluster #4 is in the always on domain */
-
-/* Peripheral Cluster #5 */
-static DEFINE_PRCC_CLK(5, gpio3, 1, -1, NULL);
-static DEFINE_PRCC_CLK(5, usb, 0, 0, NULL);
-
-/* Peripheral Cluster #6 */
-
-/* MTU ID in data */
-static DEFINE_PRCC_CLK_CUSTOM(6, mtu1, 8, -1, NULL, clk_mtu_get_rate, 1);
-static DEFINE_PRCC_CLK_CUSTOM(6, mtu0, 7, -1, NULL, clk_mtu_get_rate, 0);
-static DEFINE_PRCC_CLK(6, cfgreg, 6, 6, NULL);
-static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
-static DEFINE_PRCC_CLK(6, unipro, 4, 1, &clk_uniproclk);
-static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL);
-static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL);
-static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL);
-static DEFINE_PRCC_CLK(6, rng, 0, 0, &clk_rngclk);
-
-static struct clk clk_dummy_apb_pclk = {
- .name = "apb_pclk",
-};
+static void unlock_parent_rate(struct clk *clk)
+{
+ unsigned long flags;
-static struct clk_lookup u8500_clks[] = {
- CLK(dummy_apb_pclk, NULL, "apb_pclk"),
-
- /* Peripheral Cluster #1 */
- CLK(gpio0, "gpio.0", NULL),
- CLK(gpio0, "gpio.1", NULL),
- CLK(slimbus0, "slimbus0", NULL),
- CLK(i2c2, "nmk-i2c.2", NULL),
- CLK(sdi0, "sdi0", NULL),
- CLK(msp0, "msp0", NULL),
- CLK(i2c1, "nmk-i2c.1", NULL),
- CLK(uart1, "uart1", NULL),
- CLK(uart0, "uart0", NULL),
-
- /* Peripheral Cluster #3 */
- CLK(gpio2, "gpio.2", NULL),
- CLK(gpio2, "gpio.3", NULL),
- CLK(gpio2, "gpio.4", NULL),
- CLK(gpio2, "gpio.5", NULL),
- CLK(sdi5, "sdi5", NULL),
- CLK(uart2, "uart2", NULL),
- CLK(ske, "ske", NULL),
- CLK(ske, "nmk-ske-keypad", NULL),
- CLK(sdi2, "sdi2", NULL),
- CLK(i2c0, "nmk-i2c.0", NULL),
- CLK(fsmc, "fsmc", NULL),
-
- /* Peripheral Cluster #5 */
- CLK(gpio3, "gpio.8", NULL),
-
- /* Peripheral Cluster #6 */
- CLK(hash1, "hash1", NULL),
- CLK(pka, "pka", NULL),
- CLK(hash0, "hash0", NULL),
- CLK(cryp0, "cryp0", NULL),
-
- /* PRCMU level clock gating */
-
- /* Bank 0 */
- CLK(svaclk, "sva", NULL),
- CLK(siaclk, "sia", NULL),
- CLK(sgaclk, "sga", NULL),
- CLK(slimclk, "slim", NULL),
- CLK(lcdclk, "lcd", NULL),
- CLK(bmlclk, "bml", NULL),
- CLK(hsitxclk, "stm-hsi.0", NULL),
- CLK(hsirxclk, "stm-hsi.1", NULL),
- CLK(hdmiclk, "hdmi", NULL),
- CLK(apeatclk, "apeat", NULL),
- CLK(apetraceclk, "apetrace", NULL),
- CLK(mcdeclk, "mcde", NULL),
- CLK(ipi2clk, "ipi2", NULL),
- CLK(dmaclk, "dma40.0", NULL),
- CLK(b2r2clk, "b2r2", NULL),
- CLK(tvclk, "tv", NULL),
-
- /* Peripheral Cluster #1 */
- CLK(i2c4, "nmk-i2c.4", NULL),
- CLK(spi3, "spi3", NULL),
- CLK(msp1, "msp1", NULL),
-
- /* Peripheral Cluster #2 */
- CLK(gpio1, "gpio.6", NULL),
- CLK(gpio1, "gpio.7", NULL),
- CLK(ssitx, "ssitx", NULL),
- CLK(ssirx, "ssirx", NULL),
- CLK(spi0, "spi0", NULL),
- CLK(sdi3, "sdi3", NULL),
- CLK(sdi1, "sdi1", NULL),
- CLK(msp2, "msp2", NULL),
- CLK(sdi4, "sdi4", NULL),
- CLK(pwl, "pwl", NULL),
- CLK(spi1, "spi1", NULL),
- CLK(spi2, "spi2", NULL),
- CLK(i2c3, "nmk-i2c.3", NULL),
-
- /* Peripheral Cluster #3 */
- CLK(ssp1, "ssp1", NULL),
- CLK(ssp0, "ssp0", NULL),
-
- /* Peripheral Cluster #5 */
- CLK(usb, "musb-ux500.0", "usb"),
-
- /* Peripheral Cluster #6 */
- CLK(mtu1, "mtu1", NULL),
- CLK(mtu0, "mtu0", NULL),
- CLK(cfgreg, "cfgreg", NULL),
- CLK(hash1, "hash1", NULL),
- CLK(unipro, "unipro", NULL),
- CLK(rng, "rng", NULL),
-
- /* PRCMU level clock gating */
-
- /* Bank 0 */
- CLK(uniproclk, "uniproclk", NULL),
- CLK(dsialtclk, "dsialt", NULL),
-
- /* Bank 1 */
- CLK(rngclk, "rng", NULL),
- CLK(uiccclk, "uicc", NULL),
-};
+ if (clk->parent == NULL)
+ return;
-#ifdef CONFIG_DEBUG_FS
-/*
- * debugfs support to trace clock tree hierarchy and attributes with
- * powerdebug
- */
-static struct dentry *clk_debugfs_root;
+ __clk_lock(clk->parent, clk->mutex, &flags);
+
+ unlock_parent_rate(clk->parent);
+ clk->parent->rate_locked--;
-void __init clk_debugfs_add_table(struct clk_lookup *cl, size_t num)
+ __clk_unlock(clk->parent, clk->mutex, flags);
+}
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
{
- while (num--) {
- /* Check that the clock has not been already registered */
- if (!(cl->clk->list.prev != cl->clk->list.next))
- list_add_tail(&cl->clk->list, &clk_list);
+ int err;
+ unsigned long flags;
+
+ if (clk == NULL)
+ return -EINVAL;
- cl++;
+ __clk_lock(clk, NO_LOCK, &flags);
+
+ if (clk->enabled) {
+ err = -EBUSY;
+ goto unlock_and_return;
+ }
+ if (clk->rate_locked) {
+ err = -EAGAIN;
+ goto unlock_and_return;
}
+
+ lock_parent_rate(clk);
+ err = __clk_set_rate(clk, rate);
+ unlock_parent_rate(clk);
+
+unlock_and_return:
+ __clk_unlock(clk, NO_LOCK, flags);
+
+ return err;
}
+EXPORT_SYMBOL(clk_set_rate);
-static ssize_t usecount_dbg_read(struct file *file, char __user *buf,
- size_t size, loff_t *off)
+int clk_set_rate_rec(struct clk *clk, unsigned long rate)
{
- struct clk *clk = file->f_dentry->d_inode->i_private;
- char cusecount[128];
- unsigned int len;
+ int err;
+ unsigned long flags;
+
+ if ((clk == NULL) || (clk->parent == NULL))
+ return -EINVAL;
+
+ __clk_lock(clk->parent, clk->mutex, &flags);
+
+ if (clk->parent->enabled) {
+ err = -EBUSY;
+ goto unlock_and_return;
+ }
+ if (clk->parent->rate_locked != 1) {
+ err = -EAGAIN;
+ goto unlock_and_return;
+ }
+ err = __clk_set_rate(clk->parent, rate);
- len = sprintf(cusecount, "%u\n", clk->enabled);
- return simple_read_from_buffer(buf, size, off, cusecount, len);
+unlock_and_return:
+ __clk_unlock(clk->parent, clk->mutex, flags);
+
+ return err;
}
-static ssize_t rate_dbg_read(struct file *file, char __user *buf,
- size_t size, loff_t *off)
+int clk_set_parent(struct clk *clk, struct clk *parent)
{
- struct clk *clk = file->f_dentry->d_inode->i_private;
- char crate[128];
- unsigned int rate;
- unsigned int len;
-
- rate = clk_get_rate(clk);
- len = sprintf(crate, "%u\n", rate);
- return simple_read_from_buffer(buf, size, off, crate, len);
-}
+ int err = 0;
+ unsigned long flags;
+ struct clk **p;
-static const struct file_operations usecount_fops = {
- .read = usecount_dbg_read,
-};
+ if ((clk == NULL) || (clk->parents == NULL))
+ return -EINVAL;
+ for (p = clk->parents; *p != parent; p++) {
+ if (*p == NULL) /* invalid parent */
+ return -EINVAL;
+ }
-static const struct file_operations set_rate_fops = {
- .read = rate_dbg_read,
-};
+ __clk_lock(clk, NO_LOCK, &flags);
+
+ if ((clk->ops != NULL) && (clk->ops->set_parent != NULL)) {
+ err = clk->ops->set_parent(clk, parent);
+ if (err)
+ goto unlock_and_return;
+ } else if (clk->enabled) {
+ err = __clk_enable(parent, clk->mutex);
+ if (err)
+ goto unlock_and_return;
+ __clk_disable(clk->parent, clk->mutex);
+ }
-static struct dentry *clk_debugfs_register_dir(struct clk *c,
- struct dentry *p_dentry)
+ clk->parent = parent;
+
+unlock_and_return:
+ __clk_unlock(clk, NO_LOCK, flags);
+
+ return err;
+}
+
+/* PRCMU clock operations. */
+
+static int prcmu_clk_enable(struct clk *clk)
{
- struct dentry *d, *clk_d;
- const char *p = c->name;
-
- if (!p)
- p = "BUG";
-
- clk_d = debugfs_create_dir(p, p_dentry);
- if (!clk_d)
- return NULL;
-
- d = debugfs_create_file("usecount", S_IRUGO,
- clk_d, c, &usecount_fops);
- if (!d)
- goto err_out;
- d = debugfs_create_file("rate", S_IRUGO,
- clk_d, c, &set_rate_fops);
- if (!d)
- goto err_out;
- /*
- * TODO : not currently available in ux500
- * d = debugfs_create_x32("flags", S_IRUGO, clk_d, (u32 *)&c->flags);
- * if (!d)
- * goto err_out;
- */
-
- return clk_d;
-
-err_out:
- debugfs_remove_recursive(clk_d);
- return NULL;
+ return prcmu_request_clock(clk->cg_sel, true);
}
-static int clk_debugfs_register_one(struct clk *c)
+static void prcmu_clk_disable(struct clk *clk)
{
- struct clk *pa = c->parent_periph;
- struct clk *bpa = c->parent_cluster;
-
- if (!(bpa && !pa)) {
- c->dent = clk_debugfs_register_dir(c,
- pa ? pa->dent : clk_debugfs_root);
- if (!c->dent)
- return -ENOMEM;
+ if (prcmu_request_clock(clk->cg_sel, false)) {
+ pr_err("clock: %s failed to disable %s.\n", __func__,
+ clk->name);
}
+}
+
+static int request_ape_opp100(bool enable)
+{
+ static unsigned int requests;
- if (bpa) {
- c->dent_bus = clk_debugfs_register_dir(c,
- bpa->dent_bus ? bpa->dent_bus : bpa->dent);
- if ((!c->dent_bus) && (c->dent)) {
- debugfs_remove_recursive(c->dent);
- c->dent = NULL;
- return -ENOMEM;
+ if (enable) {
+ if (0 == requests++) {
+ return prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ "clock", 100);
}
+ } else if (1 == requests--) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "clock");
}
return 0;
}
-static int clk_debugfs_register(struct clk *c)
+static int prcmu_opp100_clk_enable(struct clk *clk)
{
- int err;
- struct clk *pa = c->parent_periph;
- struct clk *bpa = c->parent_cluster;
+ int r;
- if (pa && (!pa->dent && !pa->dent_bus)) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
+ r = request_ape_opp100(true);
+ if (r) {
+ pr_err("clock: %s failed to request APE OPP 100%% for %s.\n",
+ __func__, clk->name);
+ return r;
}
+ return prcmu_request_clock(clk->cg_sel, true);
+}
- if (bpa && (!bpa->dent && !bpa->dent_bus)) {
- err = clk_debugfs_register(bpa);
- if (err)
- return err;
- }
+static void prcmu_opp100_clk_disable(struct clk *clk)
+{
+ if (prcmu_request_clock(clk->cg_sel, false))
+ goto out_error;
+ if (request_ape_opp100(false))
+ goto out_error;
+ return;
+
+out_error:
+ pr_err("clock: %s failed to disable %s.\n", __func__, clk->name);
+}
- if ((!c->dent) && (!c->dent_bus)) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
+static unsigned long prcmu_clk_get_rate(struct clk *clk)
+{
+ return prcmu_clock_rate(clk->cg_sel);
}
-static int __init clk_debugfs_init(void)
+static long prcmu_clk_round_rate(struct clk *clk, unsigned long rate)
{
- struct clk *c;
- struct dentry *d;
- int err;
+ return prcmu_round_clock_rate(clk->cg_sel, rate);
+}
+
+static int prcmu_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return prcmu_set_clock_rate(clk->cg_sel, rate);
+}
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
+struct clkops prcmu_clk_ops = {
+ .enable = prcmu_clk_enable,
+ .disable = prcmu_clk_disable,
+ .get_rate = prcmu_clk_get_rate,
+};
- list_for_each_entry(c, &clk_list, list) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
+struct clkops prcmu_scalable_clk_ops = {
+ .enable = prcmu_clk_enable,
+ .disable = prcmu_clk_disable,
+ .get_rate = prcmu_clk_get_rate,
+ .round_rate = prcmu_clk_round_rate,
+ .set_rate = prcmu_clk_set_rate,
+};
+
+struct clkops prcmu_opp100_clk_ops = {
+ .enable = prcmu_opp100_clk_enable,
+ .disable = prcmu_opp100_clk_disable,
+ .get_rate = prcmu_clk_get_rate,
+};
+
+/* PRCC clock operations. */
+
+static int prcc_pclk_enable(struct clk *clk)
+{
+ void __iomem *io_base = __io_address(clk->io_base);
+
+ writel(clk->cg_sel, (io_base + PRCC_PCKEN));
+ while (!(readl(io_base + PRCC_PCKSR) & clk->cg_sel))
+ cpu_relax();
return 0;
-err_out:
- debugfs_remove_recursive(clk_debugfs_root);
- return err;
}
-late_initcall(clk_debugfs_init);
-#endif /* defined(CONFIG_DEBUG_FS) */
+static void prcc_pclk_disable(struct clk *clk)
+{
+ void __iomem *io_base = __io_address(clk->io_base);
+
+ writel(clk->cg_sel, (io_base + PRCC_PCKDIS));
+}
-unsigned long clk_smp_twd_rate = 500000000;
+struct clkops prcc_pclk_ops = {
+ .enable = prcc_pclk_enable,
+ .disable = prcc_pclk_disable,
+};
-unsigned long clk_smp_twd_get_rate(struct clk *clk)
+static int prcc_kclk_enable(struct clk *clk)
+{
+ int err;
+ void __iomem *io_base = __io_address(clk->io_base);
+
+ err = __clk_enable(clk->clock, clk->mutex);
+ if (err)
+ return err;
+
+ writel(clk->cg_sel, (io_base + PRCC_KCKEN));
+ while (!(readl(io_base + PRCC_KCKSR) & clk->cg_sel))
+ cpu_relax();
+
+ __clk_disable(clk->clock, clk->mutex);
+
+ return 0;
+}
+
+static void prcc_kclk_disable(struct clk *clk)
{
- return clk_smp_twd_rate;
+ void __iomem *io_base = __io_address(clk->io_base);
+
+ (void)__clk_enable(clk->clock, clk->mutex);
+ writel(clk->cg_sel, (io_base + PRCC_KCKDIS));
+ __clk_disable(clk->clock, clk->mutex);
}
-static struct clk clk_smp_twd = {
- .get_rate = clk_smp_twd_get_rate,
- .name = "smp_twd",
+struct clkops prcc_kclk_ops = {
+ .enable = prcc_kclk_enable,
+ .disable = prcc_kclk_disable,
};
-static struct clk_lookup clk_smp_twd_lookup = {
- .dev_id = "smp_twd",
- .clk = &clk_smp_twd,
+struct clkops prcc_kclk_rec_ops = {
+ .enable = prcc_kclk_enable,
+ .disable = prcc_kclk_disable,
+ .round_rate = clk_round_rate_rec,
+ .set_rate = clk_set_rate_rec,
};
#ifdef CONFIG_CPU_FREQ
+extern unsigned long dbx500_cpufreq_getfreq(void);
-static int clk_twd_cpufreq_transition(struct notifier_block *nb,
- unsigned long state, void *data)
+unsigned long clk_smp_twd_get_rate(struct clk *clk)
{
- struct cpufreq_freqs *f = data;
-
- if (state == CPUFREQ_PRECHANGE) {
- /* Save frequency in simple Hz */
- clk_smp_twd_rate = (f->new * 1000) / 2;
- }
-
- return NOTIFY_OK;
+ return dbx500_cpufreq_getfreq() / 2;
}
-static struct notifier_block clk_twd_cpufreq_nb = {
- .notifier_call = clk_twd_cpufreq_transition,
+static struct clkops clk_smp_twd_ops = {
+ .get_rate = clk_smp_twd_get_rate,
};
-static int clk_init_smp_twd_cpufreq(void)
-{
- return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-late_initcall(clk_init_smp_twd_cpufreq);
+static struct clk clk_smp_twd = {
+ .name = "smp_twd",
+ .ops = &clk_smp_twd_ops,
+};
+static struct clk_lookup clk_smp_twd_lookup = {
+ .clk = &clk_smp_twd,
+ .dev_id = "smp_twd",
+};
#endif
int __init clk_init(void)
{
- if (cpu_is_u5500()) {
- /* Clock tree for U5500 not implemented yet */
- clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
- clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
- clk_uartclk.rate = 36360000;
- clk_sdmmcclk.rate = 99900000;
+ if (cpu_is_u8500()) {
+ prcmu_base = __io_address(U8500_PRCMU_BASE);
+ } else if (cpu_is_u5500()) {
+ prcmu_base = __io_address(U5500_PRCMU_BASE);
+ } else {
+ pr_err("clock: Unknown DB Asic.\n");
+ return -EIO;
}
- clkdev_add_table(u8500_clks, ARRAY_SIZE(u8500_clks));
- clkdev_add(&clk_smp_twd_lookup);
+ if (cpu_is_u8500())
+ db8500_clk_init();
+ else if (cpu_is_u5500())
+ db5500_clk_init();
-#ifdef CONFIG_DEBUG_FS
- clk_debugfs_add_table(u8500_clks, ARRAY_SIZE(u8500_clks));
+#ifdef CONFIG_CPU_FREQ
+ clkdev_add(&clk_smp_twd_lookup);
#endif
+
return 0;
}
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
index 07449070522..f96f9986e4d 100644
--- a/arch/arm/mach-ux500/clock.h
+++ b/arch/arm/mach-ux500/clock.h
@@ -1,11 +1,57 @@
/*
- * Copyright (C) 2010 ST-Ericsson
+ * Copyright (C) 2010 ST-Ericsson SA
* Copyright (C) 2009 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#ifndef UX500_CLOCK_H
+#define UX500_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clk
+ * @ops: The hardware specific operations defined for the clock.
+ * @name: The name of the clock.
+ * @mutex: The mutex to lock when operating on the clock. %NULL means that
+ * the common clock spinlock will be used.
+ * @enabled: A reference counter of the enable requests for the clock.
+ * @rate_locked: A rate lock counter used by clk_set_rate().
+ * @opp100: A flag saying whether the clock is requested to run at the
+ * OPP 100%% frequency.
+ * @rate: The frequency of the clock. For scalable and scaling clocks,
+ * this is the OPP 100%% frequency.
+ * @io_base: An IO memory base address, meaningful only when considered
+ * together with the defined @ops.
+ * @cg_sel: Clock gate selector, meaningful only when considered together
+ * with the specified @ops.
+ * @parent: The current (or only) parent clock of the clock.
+ * @bus_parent: The (optional) auxiliary bus clock "parent" of the clock.
+ * @parents: A list of the possible parents the clock can have. This should
+ * be a %NULL-terminated &struct_clk array. Present if and only
+ * if clk_set_parent() is implemented for the clock.
+ * @regulator: The regulator needed to have the clock functional, if any.
+ * @clock: The clock needed to control the clock, if any.
+ */
+struct clk {
+ const struct clkops *ops;
+ const char *name;
+ struct mutex *mutex;
+ unsigned int enabled;
+ unsigned int rate_locked;
+ bool opp100;
+ unsigned long rate;
+ unsigned int io_base;
+ u32 cg_sel;
+ struct clk *parent;
+ struct clk *bus_parent;
+ struct clk **parents;
+ struct regulator *regulator;
+ struct clk *clock;
+ struct list_head list;
+};
/**
* struct clkops - ux500 clock operations
@@ -18,134 +64,119 @@
* NULL, the rate in the struct clk will be used.
*/
struct clkops {
- void (*enable) (struct clk *);
- void (*disable) (struct clk *);
- unsigned long (*get_rate) (struct clk *);
+ int (*enable)(struct clk *);
+ void (*disable)(struct clk *);
+ unsigned long (*get_rate)(struct clk *);
+ int (*set_rate)(struct clk *, unsigned long);
+ long (*round_rate)(struct clk *, unsigned long);
+ int (*set_parent)(struct clk *, struct clk *);
};
-/**
- * struct clk - ux500 clock structure
- * @ops: pointer to clkops struct used to control this clock
- * @name: name, for debugging
- * @enabled: refcount. positive if enabled, zero if disabled
- * @get_rate: custom callback for getting the clock rate
- * @data: custom per-clock data for example for the get_rate
- * callback
- * @rate: fixed rate for clocks which don't implement
- * ops->getrate
- * @prcmu_cg_off: address offset of the combined enable/disable register
- * (used on u8500v1)
- * @prcmu_cg_bit: bit in the combined enable/disable register (used on
- * u8500v1)
- * @prcmu_cg_mgt: address of the enable/disable register (used on
- * u8500ed)
- * @cluster: peripheral cluster number
- * @prcc_bus: bit for the bus clock in the peripheral's CLKRST
- * @prcc_kernel: bit for the kernel clock in the peripheral's CLKRST.
- * -1 if no kernel clock exists.
- * @parent_cluster: pointer to parent's cluster clk struct
- * @parent_periph: pointer to parent's peripheral clk struct
- *
- * Peripherals are organised into clusters, and each cluster has an associated
- * bus clock. Some peripherals also have a parent peripheral clock.
- *
- * In order to enable a clock for a peripheral, we need to enable:
- * (1) the parent cluster (bus) clock at the PRCMU level
- * (2) the parent peripheral clock (if any) at the PRCMU level
- * (3) the peripheral's bus & kernel clock at the PRCC level
- *
- * (1) and (2) are handled by defining clk structs (DEFINE_PRCMU_CLK) for each
- * of the cluster and peripheral clocks, and hooking these as the parents of
- * the individual peripheral clocks.
- *
- * (3) is handled by specifying the bits in the PRCC control registers required
- * to enable these clocks and modifying them in the ->enable and
- * ->disable callbacks of the peripheral clocks (DEFINE_PRCC_CLK).
- *
- * This structure describes both the PRCMU-level clocks and PRCC-level clocks.
- * The prcmu_* fields are only used for the PRCMU clocks, and the cluster,
- * prcc, and parent pointers are only used for the PRCC-level clocks.
- */
-struct clk {
- const struct clkops *ops;
- const char *name;
- unsigned int enabled;
- unsigned long (*get_rate)(struct clk *);
- void *data;
-
- unsigned long rate;
- struct list_head list;
+extern struct clkops prcmu_clk_ops;
+extern struct clkops prcmu_scalable_clk_ops;
+extern struct clkops prcmu_opp100_clk_ops;
+extern struct mutex clk_opp100_mutex;
+extern struct clkops prcc_pclk_ops;
+extern struct clkops prcc_kclk_ops;
+extern struct clkops prcc_kclk_rec_ops;
+extern struct clkops sga_clk_ops;
- /* These three are only for PRCMU clks */
+#define CLK_LOOKUP(_clk, _dev_id, _con_id) \
+ { .dev_id = _dev_id, .con_id = _con_id, .clk = &_clk }
- unsigned int prcmu_cg_off;
- unsigned int prcmu_cg_bit;
- unsigned int prcmu_cg_mgt;
-
- /* The rest are only for PRCC clks */
-
- int cluster;
- unsigned int prcc_bus;
- unsigned int prcc_kernel;
-
- struct clk *parent_cluster;
- struct clk *parent_periph;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *dent; /* For visible tree hierarchy */
- struct dentry *dent_bus; /* For visible tree hierarchy */
-#endif
-};
+/* Define PRCMU Clock */
+#define DEF_PRCMU_CLK(_name, _cg_sel, _rate) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ }
-#define DEFINE_PRCMU_CLK(_name, _cg_off, _cg_bit, _reg) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcmu_ops, \
- .prcmu_cg_off = _cg_off, \
- .prcmu_cg_bit = _cg_bit, \
- .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+#define DEF_PRCMU_SCALABLE_CLK(_name, _cg_sel) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_scalable_clk_ops, \
+ .cg_sel = _cg_sel, \
}
-#define DEFINE_PRCMU_CLK_RATE(_name, _cg_off, _cg_bit, _reg, _rate) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcmu_ops, \
- .prcmu_cg_off = _cg_off, \
- .prcmu_cg_bit = _cg_bit, \
- .rate = _rate, \
- .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+/* Use this for clocks that are only defined at OPP 100%. */
+#define DEF_PRCMU_OPP100_CLK(_name, _cg_sel, _rate) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcmu_opp100_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .rate = _rate, \
+ .mutex = &clk_opp100_mutex, \
}
-#define DEFINE_PRCC_CLK(_pclust, _name, _bus_en, _kernel_en, _kernclk) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcc_ops, \
- .cluster = _pclust, \
- .prcc_bus = _bus_en, \
- .prcc_kernel = _kernel_en, \
- .parent_cluster = &clk_per##_pclust##clk, \
- .parent_periph = _kernclk \
+/* Define PRCC clock */
+#define DEF_PRCC_PCLK(_name, _io_base, _cg_bit, _parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcc_pclk_ops, \
+ .io_base = _io_base, \
+ .cg_sel = BIT(_cg_bit), \
+ .parent = _parent, \
}
-#define DEFINE_PRCC_CLK_CUSTOM(_pclust, _name, _bus_en, _kernel_en, _kernclk, _callback, _data) \
-struct clk clk_##_name = { \
- .name = #_name, \
- .ops = &clk_prcc_ops, \
- .cluster = _pclust, \
- .prcc_bus = _bus_en, \
- .prcc_kernel = _kernel_en, \
- .parent_cluster = &clk_per##_pclust##clk, \
- .parent_periph = _kernclk, \
- .get_rate = _callback, \
- .data = (void *) _data \
+#define DEF_PRCC_KCLK(_name, _io_base, _cg_bit, _parent, _clock) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &prcc_kclk_ops, \
+ .io_base = _io_base, \
+ .cg_sel = BIT(_cg_bit), \
+ .parent = _parent, \
+ .clock = _clock, \
}
+#define DEF_PER_CLK(_name, _bus_parent, _parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .parent = _parent, \
+ .bus_parent = _bus_parent, \
+ }
-#define CLK(_clk, _devname, _conname) \
- { \
- .clk = &clk_##_clk, \
- .dev_id = _devname, \
- .con_id = _conname, \
+#define DEF_MTU_CLK(_cg_sel, _name, _bus_parent) \
+ struct clk _name = { \
+ .name = #_name, \
+ .ops = &mtu_clk_ops, \
+ .cg_sel = _cg_sel, \
+ .bus_parent = _bus_parent, \
}
-int __init clk_db8500_ed_fixup(void);
+/* Functions defined in clock.c */
int __init clk_init(void);
+void clks_register(struct clk_lookup *clks, size_t num);
+int __clk_enable(struct clk *clk, void *current_lock);
+void __clk_disable(struct clk *clk, void *current_lock);
+unsigned long __clk_get_rate(struct clk *clk, void *current_lock);
+long clk_round_rate_rec(struct clk *clk, unsigned long rate);
+int clk_set_rate_rec(struct clk *clk, unsigned long rate);
+
+#ifdef CONFIG_DEBUG_FS
+int dbx500_clk_debug_init(struct clk **clks, int num);
+#else
+static inline int dbx500_clk_debug_init(struct clk **clks, int num)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_UX500_SOC_DB8500
+int __init db8500_clk_init(void);
+int __init db8500_clk_debug_init(void);
+#else
+static inline int db8500_clk_init(void) { return 0; }
+static inline int db8500_clk_debug_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_UX500_SOC_DB5500
+int __init db5500_clk_init(void);
+int __init db5500_clk_debug_init(void);
+#else
+static inline int db5500_clk_init(void) { return 0; }
+static inline int db5500_clk_debug_init(void) { return 0; }
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index 18aa5c05c69..e21a77475a3 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -9,20 +9,23 @@
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gpio/nomadik.h>
-#include <asm/mach/map.h>
#include <asm/pmu.h>
+#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
-#include <plat/gpio-nomadik.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/devices.h>
#include <mach/setup.h>
#include <mach/irqs.h>
#include <mach/usb.h>
+#include <mach/ste-dma40-db5500.h>
#include "devices-db5500.h"
-#include "ste-dma40-db5500.h"
static struct map_desc u5500_uart_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_UART0_BASE, SZ_4K),
@@ -35,8 +38,16 @@ static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_GIC_DIST_BASE, SZ_4K),
__IO_DEV_DESC(U5500_L2CC_BASE, SZ_4K),
__IO_DEV_DESC(U5500_MTU0_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_MTU1_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_RTC_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_MTIMER_BASE, SZ_4K),
__IO_DEV_DESC(U5500_BACKUPRAM0_BASE, SZ_8K),
+ /* Map U5500_PUBLIC_BOOT_ROM_BASE (base+18000) only
+ * for TEE security driver
+ * and avoid overlap with asic ID at base+1D000 */
+ __MEM_DEV_DESC(U5500_BOOT_ROM_BASE+0x18000, 6*SZ_4K),
+
__IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
__IO_DEV_DESC(U5500_GPIO1_BASE, SZ_4K),
__IO_DEV_DESC(U5500_GPIO2_BASE, SZ_4K),
@@ -44,6 +55,11 @@ static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K),
__IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
__IO_DEV_DESC(U5500_PRCMU_TCDM_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST1_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST2_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST3_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST5_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_CLKRST6_BASE, SZ_4K),
};
static struct resource mbox0_resources[] = {
@@ -130,31 +146,58 @@ static struct platform_device mbox2_device = {
.num_resources = ARRAY_SIZE(mbox2_resources),
};
+static struct platform_device db5500_prcmu_device = {
+ .name = "db5500-prcmu",
+};
+
static struct platform_device *db5500_platform_devs[] __initdata = {
+ &u5500_gpio_devs[0],
+ &u5500_gpio_devs[1],
+ &u5500_gpio_devs[2],
+ &u5500_gpio_devs[3],
+ &u5500_gpio_devs[4],
+ &u5500_gpio_devs[5],
+ &u5500_gpio_devs[6],
+ &u5500_gpio_devs[7],
&mbox0_device,
&mbox1_device,
&mbox2_device,
+ &db5500_prcmu_device,
+ &u5500_wdt_device,
};
-static resource_size_t __initdata db5500_gpio_base[] = {
- U5500_GPIOBANK0_BASE,
- U5500_GPIOBANK1_BASE,
- U5500_GPIOBANK2_BASE,
- U5500_GPIOBANK3_BASE,
- U5500_GPIOBANK4_BASE,
- U5500_GPIOBANK5_BASE,
- U5500_GPIOBANK6_BASE,
- U5500_GPIOBANK7_BASE,
-};
+static u8 db5500_revision;
-static void __init db5500_add_gpios(void)
+bool cpu_is_u5500v1()
{
- struct nmk_gpio_platform_data pdata = {
- /* No custom data yet */
- };
+ return db5500_revision == 0xA0;
+}
- dbx500_add_gpios(ARRAY_AND_SIZE(db5500_gpio_base),
- IRQ_DB5500_GPIO0, &pdata);
+bool cpu_is_u5500v2()
+{
+ return (db5500_revision & 0xf0) == 0xB0;
+}
+
+bool cpu_is_u5500v20()
+{
+ return db5500_revision == 0xB0;
+}
+
+bool cpu_is_u5500v21()
+{
+ return db5500_revision == 0xB1;
+}
+
+static void db5500_rev_init(void)
+{
+ unsigned int asicid;
+
+ /* As in devicemaps_init() */
+ local_flush_tlb_all();
+ flush_cache_all();
+
+ asicid = readl_relaxed(__io_address(U5500_ASIC_ID_ADDRESS));
+ db5500_revision = asicid & 0xff;
}
void __init u5500_map_io(void)
@@ -169,6 +212,8 @@ void __init u5500_map_io(void)
iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
_PRCMU_BASE = __io_address(U5500_PRCMU_BASE);
+
+ db5500_rev_init();
}
static void __init db5500_pmu_init(void)
@@ -214,7 +259,10 @@ static int usb_db5500_tx_dma_cfg[] = {
void __init u5500_init_devices(void)
{
- db5500_add_gpios();
+#ifdef CONFIG_STM_TRACE
+ /* Early init for STM tracing */
+ /* platform_device_register(&u5500_stm_device); */
+#endif
db5500_pmu_init();
db5500_dma_init();
db5500_add_rtc();
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 7176ee7491a..a163e1fbf37 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -14,19 +14,23 @@
#include <linux/amba/bus.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/gpio/nomadik.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/sys_soc.h>
-#include <asm/mach/map.h>
#include <asm/pmu.h>
-#include <plat/gpio-nomadik.h>
+#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/reboot_reasons.h>
#include <mach/usb.h>
+#include <mach/ste-dma40-db8500.h>
#include "devices-db8500.h"
-#include "ste-dma40-db8500.h"
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_uart_io_desc[] __initdata = {
@@ -40,8 +44,15 @@ static struct map_desc u8500_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K),
__IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K),
__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_MTU1_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_RTC_BASE, SZ_4K),
__IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
+ /* Map U8500_PUBLIC_BOOT_ROM_BASE (base+17000) only
+ * for TEE security driver
+ * and avoid overlap with asic ID at base+1D000 */
+ __MEM_DEV_DESC(U8500_BOOT_ROM_BASE+0x17000, 6*SZ_4K),
+
__IO_DEV_DESC(U8500_CLKRST1_BASE, SZ_4K),
__IO_DEV_DESC(U8500_CLKRST2_BASE, SZ_4K),
__IO_DEV_DESC(U8500_CLKRST3_BASE, SZ_4K),
@@ -63,6 +74,14 @@ void __init u8500_map_io(void)
*/
iotable_init(u8500_uart_io_desc, ARRAY_SIZE(u8500_uart_io_desc));
+ /*
+ * STE NMF CM driver only used on the U8500 allocate using
+ * dma_alloc_coherent:
+ * 8M for SIA and SVA data + 2M for SIA code + 2M for SVA code
+ * Can't be higher than 14M with VMALLOC_END at 0xFF000000
+ */
+ init_consistent_dma_size(14*SZ_1M);
+
ux500_map_io();
iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
@@ -115,33 +134,20 @@ static struct platform_device db8500_prcmu_device = {
};
static struct platform_device *platform_devs[] __initdata = {
- &u8500_dma40_device,
+ &u8500_gpio_devs[0],
+ &u8500_gpio_devs[1],
+ &u8500_gpio_devs[2],
+ &u8500_gpio_devs[3],
+ &u8500_gpio_devs[4],
+ &u8500_gpio_devs[5],
+ &u8500_gpio_devs[6],
+ &u8500_gpio_devs[7],
+ &u8500_gpio_devs[8],
&db8500_pmu_device,
&db8500_prcmu_device,
+ &u8500_wdt_device,
};
-static resource_size_t __initdata db8500_gpio_base[] = {
- U8500_GPIOBANK0_BASE,
- U8500_GPIOBANK1_BASE,
- U8500_GPIOBANK2_BASE,
- U8500_GPIOBANK3_BASE,
- U8500_GPIOBANK4_BASE,
- U8500_GPIOBANK5_BASE,
- U8500_GPIOBANK6_BASE,
- U8500_GPIOBANK7_BASE,
- U8500_GPIOBANK8_BASE,
-};
-
-static void __init db8500_add_gpios(void)
-{
- struct nmk_gpio_platform_data pdata = {
- .supports_sleepmode = true,
- };
-
- dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
- IRQ_DB8500_GPIO0, &pdata);
-}
-
static int usb_db8500_rx_dma_cfg[] = {
DB8500_DMA_DEV38_USB_OTG_IEP_1_9,
DB8500_DMA_DEV37_USB_OTG_IEP_2_10,
@@ -169,8 +175,13 @@ static int usb_db8500_tx_dma_cfg[] = {
*/
void __init u8500_init_devices(void)
{
+#ifdef CONFIG_STM_TRACE
+ /* Early init for STM tracing */
+ platform_device_register(&u8500_stm_device);
+#endif
+
+ db8500_dma_init();
db8500_add_rtc();
- db8500_add_gpios();
db8500_add_usb(usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
platform_device_register_simple("cpufreq-u8500", -1, NULL, 0);
diff --git a/arch/arm/mach-ux500/cpu-db9500.c b/arch/arm/mach-ux500/cpu-db9500.c
new file mode 100644
index 00000000000..4d900cbe537
--- /dev/null
+++ b/arch/arm/mach-ux500/cpu-db9500.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ *
+ * Author: Pawel SZYSZUK <pawel.szyszuk@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <mach/id.h>
+
+#include "pins.h"
+
+/*
+ * U9500 is currently using U8500v2 HW. Therefore, the platform detection
+ * is based on the kernel cmd line setting (early_param "pinsfor").
+ */
+bool cpu_is_u9500()
+{
+ if (pins_for_u9500())
+ return true;
+ else
+ return false;
+}
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index f4185749437..8d7f1bc7936 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -8,9 +8,13 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/db8500-prcmu.h>
#include <linux/mfd/db5500-prcmu.h>
#include <linux/clksrc-dbx500-prcmu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/sys_soc.h>
#include <asm/hardware/gic.h>
#include <asm/mach/map.h>
@@ -19,11 +23,34 @@
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
+#include <mach/reboot_reasons.h>
+#include <mach/pm.h>
#include "clock.h"
void __iomem *_PRCMU_BASE;
+void ux500_restart(char mode, const char *cmd)
+{
+ unsigned short reset_code;
+
+ reset_code = reboot_reason_code(cmd);
+ prcmu_system_reset(reset_code);
+
+ mdelay(1000);
+
+ /*
+ * On 5500, the PRCMU firmware waits for up to 2 seconds for the modem
+ * to respond.
+ */
+ if (cpu_is_u5500())
+ mdelay(2000);
+
+ printk(KERN_ERR "Reboot via PRCMU failed -- System halted\n");
+ while (1)
+ ;
+}
+
void __init ux500_init_irq(void)
{
void __iomem *dist_base;
@@ -41,6 +68,12 @@ void __init ux500_init_irq(void)
gic_init(0, 29, dist_base, cpu_base);
/*
+ * On WD reboot gic is in some cases decoupled.
+ * This will make sure that the GIC is correctly configured.
+ */
+ ux500_pm_gic_recouple();
+
+ /*
* Init clocks here so that they are available for system timer
* initialization.
*/
@@ -48,5 +81,86 @@ void __init ux500_init_irq(void)
db5500_prcmu_early_init();
if (cpu_is_u8500())
db8500_prcmu_early_init();
+
+ arm_pm_restart = ux500_restart;
clk_init();
}
+
+#ifdef CONFIG_SYS_SOC
+#define U8500_BB_UID_BASE (U8500_BACKUPRAM1_BASE + 0xFC0)
+#define U8500_BB_UID_LENGTH 5
+
+static ssize_t ux500_get_machine(char *buf, struct sysfs_soc_info *si)
+{
+ return sprintf(buf, "DB%2x00\n", dbx500_id.partnumber);
+}
+
+static ssize_t ux500_get_soc_id(char *buf, struct sysfs_soc_info *si)
+{
+ void __iomem *uid_base;
+ int i;
+ ssize_t sz = 0;
+
+ if (dbx500_id.partnumber == 0x85) {
+ uid_base = __io_address(U8500_BB_UID_BASE);
+ for (i = 0; i < U8500_BB_UID_LENGTH; i++)
+ sz += sprintf(buf + sz, "%08x",
+ readl(uid_base + i * sizeof(u32)));
+ sz += sprintf(buf + sz, "\n");
+ } else {
+ /* Don't know where it is located for U5500 */
+ sz = sprintf(buf, "N/A\n");
+ }
+
+ return sz;
+}
+
+static ssize_t ux500_get_revision(char *buf, struct sysfs_soc_info *si)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ if (rev == 0x01)
+ return sprintf(buf, "%s\n", "ED");
+ else if (rev >= 0xA0)
+ return sprintf(buf, "%d.%d\n" ,
+ (rev >> 4) - 0xA + 1, rev & 0xf);
+
+ return sprintf(buf, "%s", "Unknown\n");
+}
+
+static ssize_t ux500_get_process(char *buf, struct sysfs_soc_info *si)
+{
+ if (dbx500_id.process == 0x00)
+ return sprintf(buf, "Standard\n");
+
+ return sprintf(buf, "%02xnm\n", dbx500_id.process);
+}
+
+static ssize_t ux500_get_reset_code(char *buf, struct sysfs_soc_info *si)
+{
+ return sprintf(buf, "0x%04x\n", prcmu_get_reset_code());
+}
+
+static ssize_t ux500_get_reset_reason(char *buf, struct sysfs_soc_info *si)
+{
+ return sprintf(buf, "%s\n",
+ reboot_reason_string(prcmu_get_reset_code()));
+}
+
+static struct sysfs_soc_info soc_info[] = {
+ SYSFS_SOC_ATTR_CALLBACK("machine", ux500_get_machine),
+ SYSFS_SOC_ATTR_VALUE("family", "Ux500"),
+ SYSFS_SOC_ATTR_CALLBACK("soc_id", ux500_get_soc_id),
+ SYSFS_SOC_ATTR_CALLBACK("revision", ux500_get_revision),
+ SYSFS_SOC_ATTR_CALLBACK("process", ux500_get_process),
+ SYSFS_SOC_ATTR_CALLBACK("reset_code", ux500_get_reset_code),
+ SYSFS_SOC_ATTR_CALLBACK("reset_reason", ux500_get_reset_reason),
+};
+
+static int __init ux500_sys_soc_init(void)
+{
+ return register_sysfs_soc(soc_info, ARRAY_SIZE(soc_info));
+}
+
+module_init(ux500_sys_soc_init);
+#endif
diff --git a/arch/arm/mach-ux500/dbx500_dump.c b/arch/arm/mach-ux500/dbx500_dump.c
new file mode 100644
index 00000000000..49d1711f6ff
--- /dev/null
+++ b/arch/arm/mach-ux500/dbx500_dump.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Bjornstedt <johan.bjornstedt@stericsson.com>
+ *
+ * Save DBx500 registers in case of kernel crash
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kdebug.h>
+
+#include <mach/hardware.h>
+#include <mach/db8500-regs.h>
+#include <mach/db5500-regs.h>
+
+struct dbx500_dump_info {
+ char *name;
+ int *data;
+ int *io_addr;
+ int phy_addr;
+ int size;
+};
+
+static struct dbx500_dump_info db8500_dump[] = {
+ {
+ .name = "prcmu_tcdm",
+ .phy_addr = U8500_PRCMU_TCDM_BASE,
+ .size = 0x1000,
+ },
+ {
+ .name = "prcmu_non_sec_1",
+ .phy_addr = U8500_PRCMU_BASE,
+ .size = 0x340,
+ },
+ {
+ .name = "prcmu_pmb",
+ .phy_addr = (U8500_PRCMU_BASE + 0x344),
+ .size = 0xC,
+ },
+ {
+ .name = "prcmu_thermal",
+ .phy_addr = (U8500_PRCMU_BASE + 0x3C0),
+ .size = 0x40,
+ },
+ {
+ .name = "prcmu_non_sec_2",
+ .phy_addr = (U8500_PRCMU_BASE + 0x404),
+ .size = 0x1FC,
+ },
+ {
+ .name = "prcmu_icn_pmu",
+ .phy_addr = (U8500_PRCMU_BASE + 0xE00),
+ .size = 0x118,
+ },
+};
+
+static struct dbx500_dump_info db5500_dump[] = {
+ {
+ .name = "prcmu_tcdm",
+ .phy_addr = U5500_PRCMU_TCDM_BASE,
+ .size = 0x5000,
+ },
+ {
+ .name = "prcmu_gpio",
+ .phy_addr = U5500_GPIO2_BASE,
+ .size = 0x1000,
+ },
+ {
+ .name = "prcmu_msp1",
+ .phy_addr = U5500_MSP1_BASE,
+ .size = 0x1000,
+ },
+ {
+ .name = "prcmu_sec",
+ .phy_addr = (U5500_PRCMU_BASE + 0x1000),
+ .size = 0x1000,
+ },
+ {
+ .name = "prcmu_unsec",
+ .phy_addr = U5500_PRCMU_BASE,
+ .size = 0x1000,
+ },
+};
+
+static struct dbx500_dump_info *dbx500_dump;
+static int dbx500_dump_size;
+
+static int crash_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ int i;
+
+ pr_info("dbx500_dump notified of crash\n");
+
+ for (i = 0; i < dbx500_dump_size; i++) {
+ memcpy_fromio(dbx500_dump[i].data, dbx500_dump[i].io_addr,
+ dbx500_dump[i].size);
+ }
+
+ return 0;
+}
+
+static void __init init_io_addresses(void)
+{
+ int i;
+
+ for (i = 0; i < dbx500_dump_size; i++)
+ dbx500_dump[i].io_addr = ioremap(dbx500_dump[i].phy_addr,
+ dbx500_dump[i].size);
+}
+
+static struct notifier_block die_notifier = {
+ .notifier_call = crash_notifier,
+ .priority = 0,
+};
+
+int __init dbx500_dump_init(void)
+{
+ int err, i;
+
+ if (cpu_is_u5500()) {
+ dbx500_dump = db5500_dump;
+ dbx500_dump_size = ARRAY_SIZE(db5500_dump);
+ } else if (cpu_is_u8500()) {
+ dbx500_dump = db8500_dump;
+ dbx500_dump_size = ARRAY_SIZE(db8500_dump);
+ } else {
+ ux500_unknown_soc();
+ }
+
+ for (i = 0; i < dbx500_dump_size; i++) {
+ dbx500_dump[i].data = kmalloc(dbx500_dump[i].size, GFP_KERNEL);
+ if (!dbx500_dump[i].data) {
+ pr_err("dbx500_dump: Could not allocate memory for "
+ "%s\n", dbx500_dump[i].name);
+ err = -ENOMEM;
+ goto free_mem;
+ }
+ }
+
+ init_io_addresses();
+
+ err = register_die_notifier(&die_notifier);
+ if (err != 0) {
+ pr_err("dbx500_dump: Unable to register a die notifier %d\n",
+ err);
+ goto free_mem;
+ }
+ pr_info("dbx500_dump: driver initialized\n");
+ return err;
+
+free_mem:
+ for (i = i - 1; i >= 0; i--)
+ kfree(dbx500_dump[i].data);
+
+ return err;
+}
+arch_initcall(dbx500_dump_init);
diff --git a/arch/arm/mach-ux500/dcache.c b/arch/arm/mach-ux500/dcache.c
new file mode 100644
index 00000000000..b117d4e8283
--- /dev/null
+++ b/arch/arm/mach-ux500/dcache.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Cache handler integration and data cache helpers.
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/system.h>
+
+/*
+ * Values are derived from measurements on HREFP_1.1_V32_OM_S10 running
+ * u8500-android-2.2_r1.1_v0.21.
+ *
+ * A lot of time can be spent trying to figure out the perfect breakpoints but
+ * for now I've chosen the following simple way.
+ *
+ * breakpoint = best_case + (worst_case - best_case) * 0.666
+ * The breakpoint is moved slightly towards the worst case because a full
+ * clean/flush affects the entire system so we should be a bit careful.
+ *
+ * BEST CASE:
+ * Best case is that the cache is empty and the system is idling. The case
+ * where the cache contains only targeted data could be better in some cases
+ * but it's hard to do measurements and calculate on that case so I choose the
+ * easier alternative.
+ *
+ * inner_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_empty_cache_time)
+ * inner_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_empty_cache_time)
+ *
+ * outer_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_empty_cache_time)
+ * outer_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_empty_cache_time)
+ *
+ * WORST CASE:
+ * Worst case is that the cache is filled with dirty non targeted data that
+ * will be used after the synchronization and the system is under heavy load.
+ *
+ * inner_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_full_cache_time * 1.5)
+ * Times 1.5 because it runs on both cores half the time.
+ * inner_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_full_cache_time * 1.5 +
+ * complete_flush_on_full_cache_time / 2)
+ * Plus "complete_flush_on_full_cache_time / 2" because all data has to be read
+ * back, here we assume that both cores can fill their cache simultaneously
+ * (seems to be the case as operations on full and empty inner cache takes
+ * roughly the same amount of time ie the bus to outer is not the bottle neck).
+ *
+ * outer_clean_breakpoint = time_2_range_clean_on_empty_cache(
+ * complete_clean_on_full_cache_time +
+ * (complete_clean_on_full_cache_time -
+ * complete_clean_on_empty_cache_time))
+ * Plus "(complete_flush_on_full_cache_time -
+ * complete_flush_on_empty_cache_time)" because no one else can work when we
+ * hog the bus with our unecessary transfer.
+ * outer_flush_breakpoint = time_2_range_flush_on_empty_cache(
+ * complete_flush_on_full_cache_time * 2 +
+ * (complete_flush_on_full_cache_time -
+ * complete_flush_on_empty_cache_time) * 2)
+ *
+ * These values might have to be updated if changes are made to the CPU, L2$,
+ * memory bus or memory.
+ */
+/* 28930 */
+static const u32 inner_clean_breakpoint = 21324 + (32744 - 21324) * 0.666;
+/* 36224 */
+static const u32 inner_flush_breakpoint = 21324 + (43697 - 21324) * 0.666;
+/* 254069 */
+static const u32 outer_clean_breakpoint = 68041 + (347363 - 68041) * 0.666;
+/* 485414 */
+static const u32 outer_flush_breakpoint = 68041 + (694727 - 68041) * 0.666;
+
+static void __clean_inner_dcache_all(void *param);
+static void clean_inner_dcache_all(void);
+
+static void __flush_inner_dcache_all(void *param);
+static void flush_inner_dcache_all(void);
+
+static bool is_cache_exclusive(void);
+
+void drain_cpu_write_buf(void)
+{
+ dsb();
+ outer_cache.sync();
+}
+
+void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *cleaned_everything)
+{
+ /*
+ * There is no problem with exclusive caches here as the Cortex-A9
+ * documentation (8.1.4. Exclusive L2 cache) says that when a dirty
+ * line is moved from L2 to L1 it is first written to mem. Because
+ * of this there is no way a line can avoid the clean by jumping
+ * between the cache levels.
+ */
+ *cleaned_everything = true;
+
+ if (length < inner_clean_breakpoint) {
+ /* Inner clean range */
+ dmac_map_area(vaddr, length, DMA_TO_DEVICE);
+ *cleaned_everything = false;
+ } else {
+ clean_inner_dcache_all();
+ }
+
+ if (!inner_only) {
+ /*
+ * There is currently no outer_cache.clean_all() so we use
+ * flush instead, which is ok as clean is a subset of flush.
+ * Clean range and flush range take the same amount of time
+ * so we can use outer_flush_breakpoint here.
+ */
+ if (length < outer_flush_breakpoint) {
+ outer_cache.clean_range(paddr, paddr + length);
+ *cleaned_everything = false;
+ } else {
+ outer_cache.flush_all();
+ }
+ }
+}
+
+void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *flushed_everything)
+{
+ /*
+ * There might still be stale data in the caches after this call if the
+ * cache levels are exclusive. The follwing can happen.
+ * 1. Clean L1 moves the data to L2.
+ * 2. Speculative prefetch, preemption or loads on the other core moves
+ * all the data back to L1, any dirty data will be written to mem as a
+ * result of this.
+ * 3. Flush L2 does nothing as there is no targeted data in L2.
+ * 4. Flush L1 moves the data to L2. Notice that this does not happen
+ * when the cache levels are non-exclusive as clean pages are not
+ * written to L2 in that case.
+ * 5. Stale data is still present in L2!
+ * I see two possible solutions, don't use exclusive caches or
+ * (temporarily) disable prefetching to L1, preeemption and the other
+ * core.
+ *
+ * A situation can occur where the operation does not seem atomic from
+ * the other core's point of view, even on a non-exclusive cache setup.
+ * Replace step 2 in the previous scenarion with a write from the other
+ * core. The other core will write on top of the old data but the
+ * result will not be written to memory. One would expect either that
+ * the write was performed on top of the old data and was written to
+ * memory (the write occured before the flush) or that the write was
+ * performed on top of the new data and was not written to memory (the
+ * write occured after the flush). The same problem can occur with one
+ * core if kernel preemption is enabled. The solution is to
+ * (temporarily) disable the other core and preemption. I can't think
+ * of any situation where this would be a problem and disabling the
+ * other core for the duration of this call is mighty expensive so for
+ * now I just ignore the problem.
+ */
+
+ *flushed_everything = true;
+
+ if (!inner_only) {
+ /*
+ * Beautiful solution for the exclusive problems :)
+ */
+ if (is_cache_exclusive())
+ panic("%s can't handle exclusive CPU caches\n",
+ __func__);
+
+ if (length < inner_clean_breakpoint) {
+ /* Inner clean range */
+ dmac_map_area(vaddr, length, DMA_TO_DEVICE);
+ *flushed_everything = false;
+ } else {
+ clean_inner_dcache_all();
+ }
+
+ if (length < outer_flush_breakpoint) {
+ outer_cache.flush_range(paddr, paddr + length);
+ *flushed_everything = false;
+ } else {
+ outer_cache.flush_all();
+ }
+ }
+
+ if (length < inner_flush_breakpoint) {
+ /* Inner flush range */
+ dmac_flush_range(vaddr, (void *)((u32)vaddr + length));
+ *flushed_everything = false;
+ } else {
+ flush_inner_dcache_all();
+ }
+}
+
+bool speculative_data_prefetch(void)
+{
+ return true;
+}
+
+u32 get_dcache_granularity(void)
+{
+ return 32;
+}
+
+/*
+ * Local functions
+ */
+
+static void __clean_inner_dcache_all(void *param)
+{
+ __cpuc_clean_dcache_all();
+}
+
+static void clean_inner_dcache_all(void)
+{
+ on_each_cpu(__clean_inner_dcache_all, NULL, 1);
+}
+
+static void __flush_inner_dcache_all(void *param)
+{
+ __cpuc_flush_dcache_all();
+}
+
+static void flush_inner_dcache_all(void)
+{
+ on_each_cpu(__flush_inner_dcache_all, NULL, 1);
+}
+
+static bool is_cache_exclusive(void)
+{
+ static const u32 CA9_ACTLR_EXCL = 0x80;
+
+ u32 armv7_actlr;
+
+ asm (
+ "mrc p15, 0, %0, c1, c0, 1"
+ : "=r" (armv7_actlr)
+ );
+
+ if (armv7_actlr & CA9_ACTLR_EXCL)
+ return true;
+ else
+ return false;
+}
diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c
index c563e5418d8..435b2523664 100644
--- a/arch/arm/mach-ux500/devices-common.c
+++ b/arch/arm/mach-ux500/devices-common.c
@@ -6,16 +6,19 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
-
-#include <plat/gpio-nomadik.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
#include <mach/hardware.h>
+#include <mach/pm.h>
#include "devices-common.h"
@@ -38,6 +41,7 @@ dbx500_add_amba_device(const char *name, resource_size_t base,
dev->dma_mask = DMA_BIT_MASK(32);
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.pm_domain = &ux500_amba_dev_power_domain;
dev->irq[0] = irq;
dev->irq[1] = NO_IRQ;
@@ -68,6 +72,7 @@ dbx500_add_platform_device(const char *name, int id, void *pdata,
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+ dev->dev.pm_domain = &ux500_dev_power_domain;
ret = platform_device_add_resources(dev, res, resnum);
if (ret)
@@ -108,6 +113,22 @@ dbx500_add_platform_device_4k1irq(const char *name, int id,
ARRAY_SIZE(resources));
}
+struct platform_device *
+dbx500_add_platform_device_noirq(const char *name, int id,
+ resource_size_t base, void *pdata)
+{
+ struct resource resources[] = {
+ [0] = {
+ .start = base,
+ .end = base + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ }
+ };
+
+ return dbx500_add_platform_device(name, id, pdata, resources,
+ ARRAY_SIZE(resources));
+}
+
static struct platform_device *
dbx500_add_gpio(int id, resource_size_t addr, int irq,
struct nmk_gpio_platform_data *pdata)
@@ -140,7 +161,6 @@ void dbx500_add_gpios(resource_size_t *base, int num, int irq,
pdata->first_gpio = first;
pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first);
pdata->num_gpio = 32;
-
dbx500_add_gpio(i, base[i], irq, pdata);
}
}
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index 7825705033b..89b7a562f6c 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -8,6 +8,8 @@
#ifndef __DEVICES_COMMON_H
#define __DEVICES_COMMON_H
+#include <linux/amba/serial.h>
+
extern struct amba_device *
dbx500_add_amba_device(const char *name, resource_size_t base,
int irq, void *pdata, unsigned int periphid);
@@ -17,18 +19,24 @@ dbx500_add_platform_device_4k1irq(const char *name, int id,
resource_size_t base,
int irq, void *pdata);
-struct spi_master_cntlr;
+extern struct platform_device *
+dbx500_add_platform_device_noirq(const char *name, int id,
+ resource_size_t base, void *pdata);
+
+struct stm_msp_controller;
static inline struct amba_device *
dbx500_add_msp_spi(const char *name, resource_size_t base, int irq,
- struct spi_master_cntlr *pdata)
+ struct stm_msp_controller *pdata)
{
return dbx500_add_amba_device(name, base, irq, pdata, 0);
}
+struct pl022_ssp_controller;
+
static inline struct amba_device *
dbx500_add_spi(const char *name, resource_size_t base, int irq,
- struct spi_master_cntlr *pdata,
+ struct pl022_ssp_controller *pdata,
u32 periphid)
{
return dbx500_add_amba_device(name, base, irq, pdata, periphid);
@@ -69,7 +77,7 @@ static inline struct platform_device *
dbx500_add_msp_i2s(int id, resource_size_t base, int irq,
struct msp_i2s_platform_data *pdata)
{
- return dbx500_add_platform_device_4k1irq("MSP_I2S", id, base, irq,
+ return dbx500_add_platform_device_4k1irq("ux500-msp-i2s", id, base, irq,
pdata);
}
@@ -79,6 +87,25 @@ dbx500_add_rtc(resource_size_t base, int irq)
return dbx500_add_amba_device("rtc-pl031", base, irq, NULL, 0);
}
+struct cryp_platform_data;
+
+static inline struct platform_device *
+dbx500_add_cryp1(int id, resource_size_t base, int irq,
+ struct cryp_platform_data *pdata)
+{
+ return dbx500_add_platform_device_4k1irq("cryp1", id, base, irq,
+ pdata);
+}
+
+struct hash_platform_data;
+
+static inline struct platform_device *
+dbx500_add_hash1(int id, resource_size_t base,
+ struct hash_platform_data *pdata)
+{
+ return dbx500_add_platform_device_noirq("hash1", id, base, pdata);
+}
+
struct nmk_gpio_platform_data;
void dbx500_add_gpios(resource_size_t *base, int num, int irq,
diff --git a/arch/arm/mach-ux500/devices-db5500.c b/arch/arm/mach-ux500/devices-db5500.c
new file mode 100644
index 00000000000..6d7764f3d9e
--- /dev/null
+++ b/arch/arm/mach-ux500/devices-db5500.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ *
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * for the System Trace Module part.
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+
+#ifdef CONFIG_FB_MCDE
+#include <video/mcde.h>
+#endif
+#include <mach/db5500-regs.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/pm.h>
+
+#define GPIO_DATA(_name, first, num) \
+ { \
+ .name = _name, \
+ .first_gpio = first, \
+ .first_irq = NOMADIK_GPIO_TO_IRQ(first), \
+ .num_gpio = num, \
+ .get_secondary_status = ux500_pm_gpio_read_wake_up_status, \
+ .set_ioforce = ux500_pm_prcmu_set_ioforce, \
+ }
+
+#define GPIO_RESOURCE(block) \
+ { \
+ .start = U5500_GPIOBANK##block##_BASE, \
+ .end = U5500_GPIOBANK##block##_BASE + 127, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_DB5500_GPIO##block, \
+ .end = IRQ_DB5500_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ { \
+ .start = IRQ_DB5500_PRCMU_GPIO##block, \
+ .end = IRQ_DB5500_PRCMU_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define GPIO_DEVICE(block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 3, \
+ .resource = &u5500_gpio_resources[block * 3], \
+ .dev = { \
+ .platform_data = &u5500_gpio_data[block], \
+ }, \
+ }
+
+static struct nmk_gpio_platform_data u5500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0, 32),
+ GPIO_DATA("GPIO-32-63", 32, 4), /* 36..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64, 19), /* 83..95 not routed to pin */
+ GPIO_DATA("GPIO-96-127", 96, 6), /* 102..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128, 21), /* 149..159 not routed to pin */
+ GPIO_DATA("GPIO-160-191", 160, 32),
+ GPIO_DATA("GPIO-192-223", 192, 32),
+ GPIO_DATA("GPIO-224-255", 224, 4), /* 228..255 not routed to pin */
+};
+
+static struct resource u5500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+};
+
+struct platform_device u5500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+};
+
+#define U5500_PWM_SIZE 0x20
+static struct resource u5500_pwm0_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource u5500_pwm1_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE + U5500_PWM_SIZE,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE * 2 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource u5500_pwm2_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE + U5500_PWM_SIZE * 2,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE * 3 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource u5500_pwm3_resource[] = {
+ {
+ .name = "PWM_BASE",
+ .start = U5500_PWM_BASE + U5500_PWM_SIZE * 3,
+ .end = U5500_PWM_BASE + U5500_PWM_SIZE * 4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device u5500_pwm0_device = {
+ .id = 0,
+ .name = "pwm",
+ .resource = u5500_pwm0_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm0_resource),
+};
+
+struct platform_device u5500_pwm1_device = {
+ .id = 1,
+ .name = "pwm",
+ .resource = u5500_pwm1_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm1_resource),
+};
+
+struct platform_device u5500_pwm2_device = {
+ .id = 2,
+ .name = "pwm",
+ .resource = u5500_pwm2_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm2_resource),
+};
+
+struct platform_device u5500_pwm3_device = {
+ .id = 3,
+ .name = "pwm",
+ .resource = u5500_pwm3_resource,
+ .num_resources = ARRAY_SIZE(u5500_pwm3_resource),
+};
+
+#ifdef CONFIG_FB_MCDE
+static struct resource mcde_resources[] = {
+ [0] = {
+ .name = MCDE_IO_AREA,
+ .start = U5500_MCDE_BASE,
+ .end = U5500_MCDE_BASE + U5500_MCDE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = MCDE_IO_AREA,
+ .start = U5500_DSI_LINK1_BASE,
+ .end = U5500_DSI_LINK1_BASE + U5500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .name = MCDE_IO_AREA,
+ .start = U5500_DSI_LINK2_BASE,
+ .end = U5500_DSI_LINK2_BASE + U5500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [3] = {
+ .name = MCDE_IRQ,
+ .start = IRQ_DB5500_DISP,
+ .end = IRQ_DB5500_DISP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int mcde_platform_enable_dsipll(void)
+{
+ return prcmu_enable_dsipll();
+}
+
+static int mcde_platform_disable_dsipll(void)
+{
+ return prcmu_disable_dsipll();
+}
+
+static int mcde_platform_set_display_clocks(void)
+{
+ return prcmu_set_display_clocks();
+}
+
+static struct mcde_platform_data mcde_pdata = {
+ .syncmux = 0x01,
+ .regulator_mcde_epod_id = "vsupply",
+ .regulator_esram_epod_id = "v-esram12",
+#ifdef CONFIG_MCDE_DISPLAY_DSI
+ .clock_dsi_id = "hdmi",
+ .clock_dsi_lp_id = "tv",
+#endif
+ .clock_mcde_id = "mcde",
+ .platform_set_clocks = mcde_platform_set_display_clocks,
+ .platform_enable_dsipll = mcde_platform_enable_dsipll,
+ .platform_disable_dsipll = mcde_platform_disable_dsipll,
+};
+
+struct platform_device u5500_mcde_device = {
+ .name = "mcde",
+ .id = -1,
+ .dev = {
+ .platform_data = &mcde_pdata,
+ },
+ .num_resources = ARRAY_SIZE(mcde_resources),
+ .resource = mcde_resources,
+};
+#endif
+
+static struct resource b2r2_resources[] = {
+ [0] = {
+ .start = U5500_B2R2_BASE,
+ .end = U5500_B2R2_BASE + ((4*1024)-1),
+ .name = "b2r2_base",
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "B2R2_IRQ",
+ .start = IRQ_DB5500_B2R2,
+ .end = IRQ_DB5500_B2R2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device u5500_b2r2_device = {
+ .name = "b2r2",
+ .id = 0,
+ .dev = {
+ .init_name = "b2r2_bus",
+ .coherent_dma_mask = ~0,
+ },
+ .num_resources = ARRAY_SIZE(b2r2_resources),
+ .resource = b2r2_resources,
+};
+
+static struct resource u5500_thsens_resources[] = {
+ [0] = {
+ .name = "IRQ_HOTMON_LOW",
+ .start = IRQ_DB5500_PRCMU_TEMP_SENSOR_LOW,
+ .end = IRQ_DB5500_PRCMU_TEMP_SENSOR_LOW,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .name = "IRQ_HOTMON_HIGH",
+ .start = IRQ_DB5500_PRCMU_TEMP_SENSOR_HIGH,
+ .end = IRQ_DB5500_PRCMU_TEMP_SENSOR_HIGH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device u5500_thsens_device = {
+ .name = "db5500_temp",
+ .resource = u5500_thsens_resources,
+ .num_resources = ARRAY_SIZE(u5500_thsens_resources),
+};
+
+struct platform_device u5500_wdt_device = {
+ .name = "ux500_wdt",
+ .id = -1,
+};
diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h
index 0c4bccd02b9..a6536edd2d1 100644
--- a/arch/arm/mach-ux500/devices-db5500.h
+++ b/arch/arm/mach-ux500/devices-db5500.h
@@ -17,6 +17,16 @@
#define db5500_add_i2c3(pdata) \
dbx500_add_i2c(3, U5500_I2C3_BASE, IRQ_DB5500_I2C3, pdata)
+struct db5500_keypad_platform_data;
+
+static inline struct platform_device *
+db5500_add_keypad(struct db5500_keypad_platform_data *pdata)
+{
+ return dbx500_add_platform_device_4k1irq("db5500-keypad", -1,
+ U5500_KEYPAD_BASE,
+ IRQ_DB5500_KBD, pdata);
+}
+
#define db5500_add_msp0_i2s(pdata) \
dbx500_add_msp_i2s(0, U5500_MSP0_BASE, IRQ_DB5500_MSP0, pdata)
#define db5500_add_msp1_i2s(pdata) \
@@ -37,21 +47,21 @@
#define db5500_add_usb(rx_cfg, tx_cfg) \
ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg)
-#define db5500_add_sdi0(pdata) \
+#define db5500_add_sdi0(pdata, pid) \
dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata, \
- 0x10480180)
-#define db5500_add_sdi1(pdata) \
+ pid)
+#define db5500_add_sdi1(pdata, pid) \
dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata, \
- 0x10480180)
-#define db5500_add_sdi2(pdata) \
- dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata \
- 0x10480180)
-#define db5500_add_sdi3(pdata) \
- dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata \
- 0x10480180)
-#define db5500_add_sdi4(pdata) \
- dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata \
- 0x10480180)
+ pid)
+#define db5500_add_sdi2(pdata, pid) \
+ dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata, \
+ pid)
+#define db5500_add_sdi3(pdata, pid) \
+ dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata, \
+ pid)
+#define db5500_add_sdi4(pdata, pid) \
+ dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata, \
+ pid)
/* This one has a bad peripheral ID in the U5500 silicon */
#define db5500_add_spi0(pdata) \
@@ -61,10 +71,10 @@
dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata, \
0x10080023)
#define db5500_add_spi2(pdata) \
- dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata \
+ dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata, \
0x10080023)
#define db5500_add_spi3(pdata) \
- dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata \
+ dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata, \
0x10080023)
#define db5500_add_uart0(plat) \
@@ -76,4 +86,9 @@
#define db5500_add_uart3(plat) \
dbx500_add_uart("uart3", U5500_UART3_BASE, IRQ_DB5500_UART3, plat)
+#define db5500_add_cryp1(pdata) \
+ dbx500_add_cryp1(-1, U5500_CRYP1_BASE, IRQ_DB5500_CRYP1, pdata)
+#define db5500_add_hash1(pdata) \
+ dbx500_add_hash1(-1, U5500_HASH1_BASE, pdata)
+
#endif
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index a7c6cdc9b11..b4da60f2f76 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -10,160 +10,435 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
+#include <plat/pincfg.h>
#include <plat/ste_dma40.h>
+#include <mach/devices.h>
#include <mach/hardware.h>
#include <mach/setup.h>
+#include <mach/pm.h>
+#ifdef CONFIG_FB_MCDE
+#include <video/mcde.h>
+#endif
+#include <linux/mfd/dbx500-prcmu.h>
+#ifdef CONFIG_HSI
+#include <mach/hsi.h>
+#endif
+#include <mach/ste-dma40-db8500.h>
-#include "ste-dma40-db8500.h"
+#include "pins-db8500.h"
-static struct resource dma40_resources[] = {
+#define GPIO_DATA(_name, first, num) \
+ { \
+ .name = _name, \
+ .first_gpio = first, \
+ .first_irq = NOMADIK_GPIO_TO_IRQ(first), \
+ .num_gpio = num, \
+ .get_secondary_status = ux500_pm_gpio_read_wake_up_status, \
+ .set_ioforce = ux500_pm_prcmu_set_ioforce, \
+ .supports_sleepmode = true, \
+ }
+
+#define GPIO_RESOURCE(block) \
+ { \
+ .start = U8500_GPIOBANK##block##_BASE, \
+ .end = U8500_GPIOBANK##block##_BASE + 127, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_DB8500_GPIO##block, \
+ .end = IRQ_DB8500_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ { \
+ .start = IRQ_PRCMU_GPIO##block, \
+ .end = IRQ_PRCMU_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define GPIO_DEVICE(block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 3, \
+ .resource = &u8500_gpio_resources[block * 3], \
+ .dev = { \
+ .platform_data = &u8500_gpio_data[block], \
+ }, \
+ }
+
+static struct nmk_gpio_platform_data u8500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0, 32),
+ GPIO_DATA("GPIO-32-63", 32, 5), /* 37..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64, 32),
+ GPIO_DATA("GPIO-96-127", 96, 2), /* 98..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128, 32),
+ GPIO_DATA("GPIO-160-191", 160, 12), /* 172..191 not routed to pin */
+ GPIO_DATA("GPIO-192-223", 192, 32),
+ GPIO_DATA("GPIO-224-255", 224, 7), /* 231..255 not routed to pin */
+ GPIO_DATA("GPIO-256-288", 256, 12), /* 268..288 not routed to pin */
+};
+
+static struct resource u8500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+ GPIO_RESOURCE(8),
+};
+
+struct platform_device u8500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+ GPIO_DEVICE(8),
+};
+
+static struct resource u8500_shrm_resources[] = {
[0] = {
- .start = U8500_DMA_BASE,
- .end = U8500_DMA_BASE + SZ_4K - 1,
+ .start = U8500_SHRM_GOP_INTERRUPT_BASE,
+ .end = U8500_SHRM_GOP_INTERRUPT_BASE + ((4*4)-1),
+ .name = "shrm_gop_register_base",
.flags = IORESOURCE_MEM,
- .name = "base",
},
[1] = {
- .start = U8500_DMA_LCPA_BASE,
- .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
- .flags = IORESOURCE_MEM,
- .name = "lcpa",
+ .start = IRQ_CA_WAKE_REQ_V1,
+ .end = IRQ_CA_WAKE_REQ_V1,
+ .name = "ca_irq_wake_req",
+ .flags = IORESOURCE_IRQ,
},
[2] = {
- .start = IRQ_DB8500_DMA,
- .end = IRQ_DB8500_DMA,
+ .start = IRQ_AC_READ_NOTIFICATION_0_V1,
+ .end = IRQ_AC_READ_NOTIFICATION_0_V1,
+ .name = "ac_read_notification_0_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_AC_READ_NOTIFICATION_1_V1,
+ .end = IRQ_AC_READ_NOTIFICATION_1_V1,
+ .name = "ac_read_notification_1_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+ [4] = {
+ .start = IRQ_CA_MSG_PEND_NOTIFICATION_0_V1,
+ .end = IRQ_CA_MSG_PEND_NOTIFICATION_0_V1,
+ .name = "ca_msg_pending_notification_0_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+ [5] = {
+ .start = IRQ_CA_MSG_PEND_NOTIFICATION_1_V1,
+ .end = IRQ_CA_MSG_PEND_NOTIFICATION_1_V1,
+ .name = "ca_msg_pending_notification_1_irq",
.flags = IORESOURCE_IRQ,
}
};
-/* Default configuration for physcial memcpy */
-struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
- .mode = STEDMA40_MODE_PHYSICAL,
- .dir = STEDMA40_MEM_TO_MEM,
+struct platform_device u8500_shrm_device = {
+ .name = "u8500_shrm",
+ .id = 0,
+ .dev = {
+ .init_name = "shrm_bus",
+ .coherent_dma_mask = ~0,
+ },
- .src_info.data_width = STEDMA40_BYTE_WIDTH,
- .src_info.psize = STEDMA40_PSIZE_PHY_1,
- .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+ .num_resources = ARRAY_SIZE(u8500_shrm_resources),
+ .resource = u8500_shrm_resources
+};
- .dst_info.data_width = STEDMA40_BYTE_WIDTH,
- .dst_info.psize = STEDMA40_PSIZE_PHY_1,
- .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+#ifdef CONFIG_FB_MCDE
+static struct resource mcde_resources[] = {
+ [0] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_MCDE_BASE,
+ .end = U8500_MCDE_BASE + U8500_MCDE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_DSI_LINK1_BASE,
+ .end = U8500_DSI_LINK1_BASE + U8500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_DSI_LINK2_BASE,
+ .end = U8500_DSI_LINK2_BASE + U8500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [3] = {
+ .name = MCDE_IO_AREA,
+ .start = U8500_DSI_LINK3_BASE,
+ .end = U8500_DSI_LINK3_BASE + U8500_DSI_LINK_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [4] = {
+ .name = MCDE_IRQ,
+ .start = IRQ_DB8500_DISP,
+ .end = IRQ_DB8500_DISP,
+ .flags = IORESOURCE_IRQ,
+ },
};
-/* Default configuration for logical memcpy */
-struct stedma40_chan_cfg dma40_memcpy_conf_log = {
- .dir = STEDMA40_MEM_TO_MEM,
- .src_info.data_width = STEDMA40_BYTE_WIDTH,
- .src_info.psize = STEDMA40_PSIZE_LOG_1,
- .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+static int mcde_platform_enable_dsipll(void)
+{
+ return prcmu_enable_dsipll();
+}
- .dst_info.data_width = STEDMA40_BYTE_WIDTH,
- .dst_info.psize = STEDMA40_PSIZE_LOG_1,
- .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+static int mcde_platform_disable_dsipll(void)
+{
+ return prcmu_disable_dsipll();
+}
+
+static int mcde_platform_set_display_clocks(void)
+{
+ return prcmu_set_display_clocks();
+}
+
+static struct mcde_platform_data mcde_u8500_pdata = {
+ /*
+ * [0] = 3: 24 bits DPI: connect LSB Ch B to D[0:7]
+ * [3] = 4: 24 bits DPI: connect MID Ch B to D[24:31]
+ * [4] = 5: 24 bits DPI: connect MSB Ch B to D[32:39]
+ *
+ * [1] = 3: TV out : connect LSB Ch B to D[8:15]
+ */
+#define DONT_CARE 0
+ .outmux = { 3, 3, DONT_CARE, 4, 5 },
+#undef DONT_CARE
+ .syncmux = 0x00, /* DPI channel A and B on output pins A and B resp */
+#ifdef CONFIG_MCDE_DISPLAY_DSI
+ .regulator_vana_id = "vdddsi1v2",
+#endif
+ .regulator_mcde_epod_id = "vsupply",
+ .regulator_esram_epod_id = "v-esram34",
+#ifdef CONFIG_MCDE_DISPLAY_DSI
+ .clock_dsi_id = "hdmi",
+ .clock_dsi_lp_id = "tv",
+#endif
+ .clock_dpi_id = "lcd",
+ .clock_mcde_id = "mcde",
+ .platform_set_clocks = mcde_platform_set_display_clocks,
+ .platform_enable_dsipll = mcde_platform_enable_dsipll,
+ .platform_disable_dsipll = mcde_platform_disable_dsipll,
};
+struct platform_device u8500_mcde_device = {
+ .name = "mcde",
+ .id = -1,
+ .dev = {
+ .platform_data = &mcde_u8500_pdata,
+ },
+ .num_resources = ARRAY_SIZE(mcde_resources),
+ .resource = mcde_resources,
+};
+#endif /* CONFIG_FB_MCDE */
+
+static struct resource b2r2_resources[] = {
+ [0] = {
+ .start = U8500_B2R2_BASE,
+ .end = U8500_B2R2_BASE + ((4*1024)-1),
+ .name = "b2r2_base",
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "B2R2_IRQ",
+ .start = IRQ_DB8500_B2R2,
+ .end = IRQ_DB8500_B2R2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device u8500_b2r2_device = {
+ .name = "b2r2",
+ .id = 0,
+ .dev = {
+ .init_name = "b2r2_bus",
+ .coherent_dma_mask = ~0,
+ },
+ .num_resources = ARRAY_SIZE(b2r2_resources),
+ .resource = b2r2_resources,
+};
+
+/*
+ * WATCHDOG
+ */
+
+struct platform_device u8500_wdt_device = {
+ .name = "ux500_wdt",
+ .id = -1,
+};
+
+#ifdef CONFIG_HSI
/*
- * Mapping between destination event lines and physical device address.
- * The event line is tied to a device and therefore the address is constant.
- * When the address comes from a primecell it will be configured in runtime
- * and we set the address to -1 as a placeholder.
+ * HSI
*/
-static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = {
- /* MUSB - these will be runtime-reconfigured */
- [DB8500_DMA_DEV39_USB_OTG_OEP_8] = -1,
- [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = -1,
- [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = -1,
- [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = -1,
- [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = -1,
- [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = -1,
- [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = -1,
- [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = -1,
- /* PrimeCells - run-time configured */
- [DB8500_DMA_DEV0_SPI0_TX] = -1,
- [DB8500_DMA_DEV1_SD_MMC0_TX] = -1,
- [DB8500_DMA_DEV2_SD_MMC1_TX] = -1,
- [DB8500_DMA_DEV3_SD_MMC2_TX] = -1,
- [DB8500_DMA_DEV8_SSP0_TX] = -1,
- [DB8500_DMA_DEV9_SSP1_TX] = -1,
- [DB8500_DMA_DEV11_UART2_TX] = -1,
- [DB8500_DMA_DEV12_UART1_TX] = -1,
- [DB8500_DMA_DEV13_UART0_TX] = -1,
- [DB8500_DMA_DEV28_SD_MM2_TX] = -1,
- [DB8500_DMA_DEV29_SD_MM0_TX] = -1,
- [DB8500_DMA_DEV32_SD_MM1_TX] = -1,
- [DB8500_DMA_DEV33_SPI2_TX] = -1,
- [DB8500_DMA_DEV35_SPI1_TX] = -1,
- [DB8500_DMA_DEV40_SPI3_TX] = -1,
- [DB8500_DMA_DEV41_SD_MM3_TX] = -1,
- [DB8500_DMA_DEV42_SD_MM4_TX] = -1,
- [DB8500_DMA_DEV43_SD_MM5_TX] = -1,
-};
-
-/* Mapping between source event lines and physical device address */
-static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = {
- /* MUSB - these will be runtime-reconfigured */
- [DB8500_DMA_DEV39_USB_OTG_IEP_8] = -1,
- [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = -1,
- [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = -1,
- [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = -1,
- [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = -1,
- [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = -1,
- [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = -1,
- [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = -1,
- /* PrimeCells */
- [DB8500_DMA_DEV0_SPI0_RX] = -1,
- [DB8500_DMA_DEV1_SD_MMC0_RX] = -1,
- [DB8500_DMA_DEV2_SD_MMC1_RX] = -1,
- [DB8500_DMA_DEV3_SD_MMC2_RX] = -1,
- [DB8500_DMA_DEV8_SSP0_RX] = -1,
- [DB8500_DMA_DEV9_SSP1_RX] = -1,
- [DB8500_DMA_DEV11_UART2_RX] = -1,
- [DB8500_DMA_DEV12_UART1_RX] = -1,
- [DB8500_DMA_DEV13_UART0_RX] = -1,
- [DB8500_DMA_DEV28_SD_MM2_RX] = -1,
- [DB8500_DMA_DEV29_SD_MM0_RX] = -1,
- [DB8500_DMA_DEV32_SD_MM1_RX] = -1,
- [DB8500_DMA_DEV33_SPI2_RX] = -1,
- [DB8500_DMA_DEV35_SPI1_RX] = -1,
- [DB8500_DMA_DEV40_SPI3_RX] = -1,
- [DB8500_DMA_DEV41_SD_MM3_RX] = -1,
- [DB8500_DMA_DEV42_SD_MM4_RX] = -1,
- [DB8500_DMA_DEV43_SD_MM5_RX] = -1,
-};
-
-/* Reserved event lines for memcpy only */
-static int dma40_memcpy_event[] = {
- DB8500_DMA_MEMCPY_TX_0,
- DB8500_DMA_MEMCPY_TX_1,
- DB8500_DMA_MEMCPY_TX_2,
- DB8500_DMA_MEMCPY_TX_3,
- DB8500_DMA_MEMCPY_TX_4,
- DB8500_DMA_MEMCPY_TX_5,
-};
-
-static struct stedma40_platform_data dma40_plat_data = {
- .dev_len = DB8500_DMA_NR_DEV,
- .dev_rx = dma40_rx_map,
- .dev_tx = dma40_tx_map,
- .memcpy = dma40_memcpy_event,
- .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
- .memcpy_conf_phy = &dma40_memcpy_conf_phy,
- .memcpy_conf_log = &dma40_memcpy_conf_log,
- .disabled_channels = {-1},
-};
-
-struct platform_device u8500_dma40_device = {
+#define HSI0_CAWAKE { \
+ .start = IRQ_PRCMU_HSI0, \
+ .end = IRQ_PRCMU_HSI0, \
+ .flags = IORESOURCE_IRQ, \
+ .name = "hsi0_cawake" \
+}
+
+#define HSI0_ACWAKE { \
+ .start = 226, \
+ .end = 226, \
+ .flags = IORESOURCE_IO, \
+ .name = "hsi0_acwake" \
+}
+
+#define HSIR_OVERRUN(num) { \
+ .start = IRQ_DB8500_HSIR_CH##num##_OVRRUN, \
+ .end = IRQ_DB8500_HSIR_CH##num##_OVRRUN, \
+ .flags = IORESOURCE_IRQ, \
+ .name = "hsi_rx_overrun_ch"#num \
+}
+
+#define STE_HSI_PORT0_TX_CHANNEL_CFG(n) { \
+ .dir = STEDMA40_MEM_TO_PERIPH, \
+ .high_priority = true, \
+ .mode = STEDMA40_MODE_LOGICAL, \
+ .mode_opt = STEDMA40_LCHAN_SRC_LOG_DST_LOG, \
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY, \
+ .dst_dev_type = n,\
+ .src_info.big_endian = false,\
+ .src_info.data_width = STEDMA40_WORD_WIDTH,\
+ .dst_info.big_endian = false,\
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,\
+},
+
+#define STE_HSI_PORT0_RX_CHANNEL_CFG(n) { \
+ .dir = STEDMA40_PERIPH_TO_MEM, \
+ .high_priority = true, \
+ .mode = STEDMA40_MODE_LOGICAL, \
+ .mode_opt = STEDMA40_LCHAN_SRC_LOG_DST_LOG, \
+ .src_dev_type = n,\
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY, \
+ .src_info.big_endian = false,\
+ .src_info.data_width = STEDMA40_WORD_WIDTH,\
+ .dst_info.big_endian = false,\
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,\
+},
+
+static struct resource u8500_hsi_resources[] = {
+ {
+ .start = U8500_HSIR_BASE,
+ .end = U8500_HSIR_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "hsi_rx_base"
+ },
+ {
+ .start = U8500_HSIT_BASE,
+ .end = U8500_HSIT_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "hsi_tx_base"
+ },
+ {
+ .start = IRQ_DB8500_HSIRD0,
+ .end = IRQ_DB8500_HSIRD0,
+ .flags = IORESOURCE_IRQ,
+ .name = "hsi_rx_irq0"
+ },
+ {
+ .start = IRQ_DB8500_HSITD0,
+ .end = IRQ_DB8500_HSITD0,
+ .flags = IORESOURCE_IRQ,
+ .name = "hsi_tx_irq0"
+ },
+ {
+ .start = IRQ_DB8500_HSIR_EXCEP,
+ .end = IRQ_DB8500_HSIR_EXCEP,
+ .flags = IORESOURCE_IRQ,
+ .name = "hsi_rx_excep0"
+ },
+ HSIR_OVERRUN(0),
+ HSIR_OVERRUN(1),
+ HSIR_OVERRUN(2),
+ HSIR_OVERRUN(3),
+ HSIR_OVERRUN(4),
+ HSIR_OVERRUN(5),
+ HSIR_OVERRUN(6),
+ HSIR_OVERRUN(7),
+ HSI0_CAWAKE,
+ HSI0_ACWAKE,
+};
+
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg ste_hsi_port0_dma_tx_cfg[] = {
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV20_SLIM0_CH0_TX_HSI_TX_CH0)
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV21_SLIM0_CH1_TX_HSI_TX_CH1)
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV22_SLIM0_CH2_TX_HSI_TX_CH2)
+ STE_HSI_PORT0_TX_CHANNEL_CFG(DB8500_DMA_DEV23_SLIM0_CH3_TX_HSI_TX_CH3)
+};
+
+static struct stedma40_chan_cfg ste_hsi_port0_dma_rx_cfg[] = {
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV20_SLIM0_CH0_RX_HSI_RX_CH0)
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV21_SLIM0_CH1_RX_HSI_RX_CH1)
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV22_SLIM0_CH2_RX_HSI_RX_CH2)
+ STE_HSI_PORT0_RX_CHANNEL_CFG(DB8500_DMA_DEV23_SLIM0_CH3_RX_HSI_RX_CH3)
+};
+#endif
+
+static struct ste_hsi_port_cfg ste_hsi_port0_cfg = {
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_tx_cfg = ste_hsi_port0_dma_tx_cfg,
+ .dma_rx_cfg = ste_hsi_port0_dma_rx_cfg
+#endif
+};
+
+struct ste_hsi_platform_data u8500_hsi_platform_data = {
+ .num_ports = 1,
+ .use_dma = 1,
+ .port_cfg = &ste_hsi_port0_cfg,
+};
+
+struct platform_device u8500_hsi_device = {
.dev = {
- .platform_data = &dma40_plat_data,
+ .platform_data = &u8500_hsi_platform_data,
},
- .name = "dma40",
+ .name = "ste_hsi",
.id = 0,
- .num_resources = ARRAY_SIZE(dma40_resources),
- .resource = dma40_resources
+ .resource = u8500_hsi_resources,
+ .num_resources = ARRAY_SIZE(u8500_hsi_resources)
+};
+#endif /* CONFIG_HSI */
+
+/*
+ * Thermal Sensor
+ */
+
+static struct resource u8500_thsens_resources[] = {
+ {
+ .name = "IRQ_HOTMON_LOW",
+ .start = IRQ_PRCMU_HOTMON_LOW,
+ .end = IRQ_PRCMU_HOTMON_LOW,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IRQ_HOTMON_HIGH",
+ .start = IRQ_PRCMU_HOTMON_HIGH,
+ .end = IRQ_PRCMU_HOTMON_HIGH,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device u8500_thsens_device = {
+ .name = "dbx500_temp",
+ .resource = u8500_thsens_resources,
+ .num_resources = ARRAY_SIZE(u8500_thsens_resources),
};
struct resource keypad_resources[] = {
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index cbd4a9ae810..4a54a6f5aa6 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -98,4 +98,9 @@ db8500_add_ssp(const char *name, resource_size_t base, int irq,
#define db8500_add_uart2(pdata) \
dbx500_add_uart("uart2", U8500_UART2_BASE, IRQ_DB8500_UART2, pdata)
+#define db8500_add_cryp1(pdata) \
+ dbx500_add_cryp1(-1, U8500_CRYP1_BASE, IRQ_DB8500_CRYP1, pdata)
+#define db8500_add_hash1(pdata) \
+ dbx500_add_hash1(-1, U8500_HASH1_BASE, pdata)
+
#endif
diff --git a/arch/arm/mach-ux500/devices.c b/arch/arm/mach-ux500/devices.c
index ea0a2f92ca7..77d8d088460 100644
--- a/arch/arm/mach-ux500/devices.c
+++ b/arch/arm/mach-ux500/devices.c
@@ -14,6 +14,51 @@
#include <mach/hardware.h>
#include <mach/setup.h>
+#ifdef CONFIG_STE_TRACE_MODEM
+#include <linux/db8500-modem-trace.h>
+#endif
+
+#ifdef CONFIG_STE_TRACE_MODEM
+static struct resource trace_resource = {
+ .start = 0,
+ .end = 0,
+ .name = "db8500-trace-area",
+ .flags = IORESOURCE_MEM
+};
+
+static struct db8500_trace_platform_data trace_pdata = {
+ .ape_base = U8500_APE_BASE,
+ .modem_base = U8500_MODEM_BASE,
+};
+
+struct platform_device u8500_trace_modem = {
+ .name = "db8500-modem-trace",
+ .id = 0,
+ .dev = {
+ .init_name = "db8500-modem-trace",
+ .platform_data = &trace_pdata,
+ },
+ .num_resources = 1,
+ .resource = &trace_resource,
+};
+
+static int __init early_trace_modem(char *p)
+{
+ struct resource *data = &trace_resource;
+ u32 size = memparse(p, &p);
+ if (*p == '@')
+ data->start = memparse(p + 1, &p);
+ data->end = data->start + size - 1;
+ return 0;
+}
+
+early_param("mem_mtrace", early_trace_modem);
+#endif
+
+struct platform_device ux500_hwmem_device = {
+ .name = "hwmem",
+};
+
void __init amba_add_devices(struct amba_device *devs[], int num)
{
int i;
diff --git a/arch/arm/mach-ux500/dma-db5500.c b/arch/arm/mach-ux500/dma-db5500.c
index 1cfab68ae41..8f31f2f06a5 100644
--- a/arch/arm/mach-ux500/dma-db5500.c
+++ b/arch/arm/mach-ux500/dma-db5500.c
@@ -14,8 +14,8 @@
#include <plat/ste_dma40.h>
#include <mach/setup.h>
#include <mach/hardware.h>
-
-#include "ste-dma40-db5500.h"
+#include <mach/pm.h>
+#include <mach/ste-dma40-db5500.h>
static struct resource dma40_resources[] = {
[0] = {
@@ -72,28 +72,128 @@ static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
* now.
*/
static const dma_addr_t dma40_rx_map[DB5500_DMA_NR_DEV] = {
- [DB5500_DMA_DEV24_SDMMC0_RX] = -1,
- [DB5500_DMA_DEV38_USB_OTG_IEP_8] = -1,
- [DB5500_DMA_DEV23_USB_OTG_IEP_7_15] = -1,
- [DB5500_DMA_DEV22_USB_OTG_IEP_6_14] = -1,
- [DB5500_DMA_DEV21_USB_OTG_IEP_5_13] = -1,
- [DB5500_DMA_DEV20_USB_OTG_IEP_4_12] = -1,
- [DB5500_DMA_DEV6_USB_OTG_IEP_3_11] = -1,
- [DB5500_DMA_DEV5_USB_OTG_IEP_2_10] = -1,
- [DB5500_DMA_DEV4_USB_OTG_IEP_1_9] = -1,
+ [DB5500_DMA_DEV0_SPI0_RX] = 0,
+ [DB5500_DMA_DEV1_SPI1_RX] = 0,
+ [DB5500_DMA_DEV2_SPI2_RX] = 0,
+ [DB5500_DMA_DEV3_SPI3_RX] = 0,
+ [DB5500_DMA_DEV4_USB_OTG_IEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV5_USB_OTG_IEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV6_USB_OTG_IEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV7_IRDA_RFS] = 0,
+ [DB5500_DMA_DEV8_IRDA_FIFO_RX] = 0,
+ [DB5500_DMA_DEV9_MSP0_RX] = U5500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV10_MSP1_RX] = U5500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV11_MSP2_RX] = U5500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV12_UART0_RX] = 0,
+ [DB5500_DMA_DEV13_UART1_RX] = 0,
+ [DB5500_DMA_DEV14_UART2_RX] = 0,
+ [DB5500_DMA_DEV15_UART3_RX] = 0,
+ [DB5500_DMA_DEV16_USB_OTG_IEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV17_USB_OTG_IEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV18_USB_OTG_IEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV19_USB_OTG_IEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV20_USB_OTG_IEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV21_USB_OTG_IEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV22_USB_OTG_IEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV23_USB_OTG_IEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV24_SDMMC0_RX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV25_SDMMC1_RX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV26_SDMMC2_RX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV27_SDMMC3_RX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV28_SDMMC4_RX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ /* 29, 30 not used */
+ [DB5500_DMA_DEV31_CRYPTO1_RX] = 0, /* v2 */
+ /* 32 not used */
+ [DB5500_DMA_DEV33_SDMMC0_RX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV34_SDMMC1_RX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV35_SDMMC2_RX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV36_SDMMC3_RX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV37_SDMMC4_RX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV38_USB_OTG_IEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV39_USB_OTG_IEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV40_USB_OTG_IEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV41_USB_OTG_IEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV42_USB_OTG_IEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV43_USB_OTG_IEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV44_USB_OTG_IEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV45_USB_OTG_IEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV46_CRYPTO1_RX] = 0, /* v2 */
+ [DB5500_DMA_DEV47_MCDE_RX] = 0,
+ [DB5500_DMA_DEV48_CRYPTO1_RX] = U5500_CRYP1_BASE + CRYP1_RX_REG_OFFSET,
+ /* 49, 50 not used */
+ [DB5500_DMA_DEV49_I2C1_RX] = 0,
+ [DB5500_DMA_DEV50_I2C3_RX] = 0,
+ [DB5500_DMA_DEV51_I2C2_RX] = 0,
+ /* 54 - 60 not used */
+ [DB5500_DMA_DEV61_CRYPTO0_RX] = 0,
+ /* 62, 63 not used */
};
/* Mapping between destination event lines and physical device address */
static const dma_addr_t dma40_tx_map[DB5500_DMA_NR_DEV] = {
- [DB5500_DMA_DEV24_SDMMC0_TX] = -1,
- [DB5500_DMA_DEV38_USB_OTG_OEP_8] = -1,
- [DB5500_DMA_DEV23_USB_OTG_OEP_7_15] = -1,
- [DB5500_DMA_DEV22_USB_OTG_OEP_6_14] = -1,
- [DB5500_DMA_DEV21_USB_OTG_OEP_5_13] = -1,
- [DB5500_DMA_DEV20_USB_OTG_OEP_4_12] = -1,
- [DB5500_DMA_DEV6_USB_OTG_OEP_3_11] = -1,
- [DB5500_DMA_DEV5_USB_OTG_OEP_2_10] = -1,
- [DB5500_DMA_DEV4_USB_OTG_OEP_1_9] = -1,
+ [DB5500_DMA_DEV0_SPI0_TX] = 0,
+ [DB5500_DMA_DEV1_SPI1_TX] = 0,
+ [DB5500_DMA_DEV2_SPI2_TX] = 0,
+ [DB5500_DMA_DEV3_SPI3_TX] = 0,
+ [DB5500_DMA_DEV4_USB_OTG_OEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV5_USB_OTG_OEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV6_USB_OTG_OEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV7_IRRC_TX] = 0,
+ [DB5500_DMA_DEV8_IRDA_FIFO_TX] = 0,
+ [DB5500_DMA_DEV9_MSP0_TX] = U5500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV10_MSP1_TX] = U5500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV11_MSP2_TX] = U5500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV12_UART0_TX] = 0,
+ [DB5500_DMA_DEV13_UART1_TX] = 0,
+ [DB5500_DMA_DEV14_UART2_TX] = 0,
+ [DB5500_DMA_DEV15_UART3_TX] = 0,
+ [DB5500_DMA_DEV16_USB_OTG_OEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV17_USB_OTG_OEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV18_USB_OTG_OEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV19_USB_OTG_OEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV20_USB_OTG_OEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV21_USB_OTG_OEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV22_USB_OTG_OEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV23_USB_OTG_OEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV24_SDMMC0_TX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV25_SDMMC1_TX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV26_SDMMC2_TX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV27_SDMMC3_TX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV28_SDMMC4_TX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ /* 29 not used */
+ [DB5500_DMA_DEV30_HASH1_TX] = 0, /* v2 */
+ [DB5500_DMA_DEV31_CRYPTO1_TX] = 0, /* v2 */
+ [DB5500_DMA_DEV32_FSMC_TX] = 0,
+ [DB5500_DMA_DEV33_SDMMC0_TX] = U5500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV34_SDMMC1_TX] = U5500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV35_SDMMC2_TX] = U5500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV36_SDMMC3_TX] = U5500_SDI3_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV37_SDMMC4_TX] = U5500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB5500_DMA_DEV38_USB_OTG_OEP_8] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV39_USB_OTG_OEP_1_9] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV40_USB_OTG_OEP_2_10] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV41_USB_OTG_OEP_3_11] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV42_USB_OTG_OEP_4_12] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV43_USB_OTG_OEP_5_13] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV44_USB_OTG_OEP_6_14] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV45_USB_OTG_OEP_7_15] = U5500_USBOTG_BASE,
+ [DB5500_DMA_DEV46_CRYPTO1_TX] = 0, /* v2 */
+ [DB5500_DMA_DEV47_STM_TX] = 0,
+ [DB5500_DMA_DEV48_CRYPTO1_TX] = U5500_CRYP1_BASE + CRYP1_TX_REG_OFFSET,
+ [DB5500_DMA_DEV49_CRYPTO1_TX_HASH1_TX] = 0,
+ [DB5500_DMA_DEV50_HASH1_TX] = U5500_HASH1_BASE + HASH1_TX_REG_OFFSET,
+ [DB5500_DMA_DEV51_I2C1_TX] = 0,
+ [DB5500_DMA_DEV52_I2C3_TX] = 0,
+ [DB5500_DMA_DEV53_I2C2_TX] = 0,
+ /* 54, 55 not used */
+ [DB5500_DMA_MEMCPY_TX_1] = 0,
+ [DB5500_DMA_MEMCPY_TX_2] = 0,
+ [DB5500_DMA_MEMCPY_TX_3] = 0,
+ [DB5500_DMA_MEMCPY_TX_4] = 0,
+ [DB5500_DMA_MEMCPY_TX_5] = 0,
+ [DB5500_DMA_DEV61_CRYPTO0_TX] = 0,
+ [DB5500_DMA_DEV62_CRYPTO0_TX_HASH0_TX] = 0,
+ [DB5500_DMA_DEV63_HASH0_TX] = 0,
};
static int dma40_memcpy_event[] = {
@@ -118,6 +218,9 @@ static struct stedma40_platform_data dma40_plat_data = {
static struct platform_device dma40_device = {
.dev = {
.platform_data = &dma40_plat_data,
+#ifdef CONFIG_PM
+ .pm_domain = &ux500_dev_power_domain,
+#endif
},
.name = "dma40",
.id = 0,
@@ -131,6 +234,6 @@ void __init db5500_dma_init(void)
ret = platform_device_register(&dma40_device);
if (ret)
- dev_err(&dma40_device.dev, "unable to register device: %d\n", ret);
-
+ dev_err(&dma40_device.dev, "unable to register device: %d\n",
+ ret);
}
diff --git a/arch/arm/mach-ux500/dma-db8500.c b/arch/arm/mach-ux500/dma-db8500.c
new file mode 100644
index 00000000000..17c4c80de33
--- /dev/null
+++ b/arch/arm/mach-ux500/dma-db8500.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ *
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <plat/ste_dma40.h>
+
+#ifdef CONFIG_HSI
+#include <mach/hsi.h>
+#endif
+#include <mach/setup.h>
+#include <mach/ste-dma40-db8500.h>
+#include <mach/pm.h>
+#include <mach/context.h>
+
+
+
+static struct resource dma40_resources[] = {
+ [0] = {
+ .start = U8500_DMA_BASE,
+ .end = U8500_DMA_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "base",
+ },
+ [1] = {
+ .start = U8500_DMA_LCPA_BASE,
+ .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "lcpa",
+ },
+ [2] = {
+ .start = IRQ_DB8500_DMA,
+ .end = IRQ_DB8500_DMA,
+ .flags = IORESOURCE_IRQ
+ },
+ [3] = {
+ .start = U8500_DMA_LCLA_BASE,
+ .end = U8500_DMA_LCLA_BASE + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "lcla_esram",
+ }
+};
+
+/* Default configuration for physcial memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
+ .mode = STEDMA40_MODE_PHYSICAL,
+ .dir = STEDMA40_MEM_TO_MEM,
+
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.psize = STEDMA40_PSIZE_PHY_1,
+ .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.psize = STEDMA40_PSIZE_PHY_1,
+ .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+};
+
+/* Default configuration for logical memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
+ .dir = STEDMA40_MEM_TO_MEM,
+
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.psize = STEDMA40_PSIZE_LOG_1,
+ .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_1,
+ .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+};
+
+/*
+ * Mapping between soruce event lines and physical device address
+ * This was created assuming that the event line is tied to a device and
+ * therefore the address is constant, however this is not true for at least
+ * USB, and the values are just placeholders for USB. This table is preserved
+ * and used for now.
+ */
+static dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV] = {
+ [DB8500_DMA_DEV0_SPI0_RX] = 0,
+ [DB8500_DMA_DEV1_SD_MMC0_RX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV2_SD_MMC1_RX] = 0,
+ [DB8500_DMA_DEV3_SD_MMC2_RX] = 0,
+ [DB8500_DMA_DEV4_I2C1_RX] = 0,
+ [DB8500_DMA_DEV5_I2C3_RX] = 0,
+ [DB8500_DMA_DEV6_I2C2_RX] = 0,
+ [DB8500_DMA_DEV7_I2C4_RX] = 0,
+ [DB8500_DMA_DEV8_SSP0_RX] = U8500_SSP0_BASE + SSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV9_SSP1_RX] = 0,
+ [DB8500_DMA_DEV10_MCDE_RX] = 0,
+ [DB8500_DMA_DEV11_UART2_RX] = 0,
+ [DB8500_DMA_DEV12_UART1_RX] = 0,
+ [DB8500_DMA_DEV13_UART0_RX] = 0,
+ [DB8500_DMA_DEV14_MSP2_RX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV15_I2C0_RX] = 0,
+ [DB8500_DMA_DEV16_USB_OTG_IEP_7_15] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV17_USB_OTG_IEP_6_14] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV18_USB_OTG_IEP_5_13] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV19_USB_OTG_IEP_4_12] = U8500_USBOTG_BASE,
+#ifdef CONFIG_HSI
+ [DB8500_DMA_DEV20_SLIM0_CH0_RX_HSI_RX_CH0] = U8500_HSIR_BASE + 0x0 + STE_HSI_RX_BUFFERX,
+ [DB8500_DMA_DEV21_SLIM0_CH1_RX_HSI_RX_CH1] = U8500_HSIR_BASE + 0x4 + STE_HSI_RX_BUFFERX,
+ [DB8500_DMA_DEV22_SLIM0_CH2_RX_HSI_RX_CH2] = U8500_HSIR_BASE + 0x8 + STE_HSI_RX_BUFFERX,
+ [DB8500_DMA_DEV23_SLIM0_CH3_RX_HSI_RX_CH3] = U8500_HSIR_BASE + 0xC + STE_HSI_RX_BUFFERX,
+#endif
+ [DB8500_DMA_DEV24_SRC_SXA0_RX_TX] = 0,
+ [DB8500_DMA_DEV25_SRC_SXA1_RX_TX] = 0,
+ [DB8500_DMA_DEV26_SRC_SXA2_RX_TX] = 0,
+ [DB8500_DMA_DEV27_SRC_SXA3_RX_TX] = 0,
+ [DB8500_DMA_DEV28_SD_MM2_RX] = U8500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV29_SD_MM0_RX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV30_MSP3_RX] = U8500_MSP3_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV32_SD_MM1_RX] = U8500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV33_SPI2_RX] = 0,
+ [DB8500_DMA_DEV34_I2C3_RX2] = 0,
+ [DB8500_DMA_DEV35_SPI1_RX] = 0,
+ [DB8500_DMA_DEV36_USB_OTG_IEP_3_11] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV37_USB_OTG_IEP_2_10] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV38_USB_OTG_IEP_1_9] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV39_USB_OTG_IEP_8] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV40_SPI3_RX] = 0,
+ [DB8500_DMA_DEV41_SD_MM3_RX] = 0,
+ [DB8500_DMA_DEV42_SD_MM4_RX] = U8500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV43_SD_MM5_RX] = 0,
+ [DB8500_DMA_DEV44_SRC_SXA4_RX_TX] = 0,
+ [DB8500_DMA_DEV45_SRC_SXA5_RX_TX] = 0,
+ [DB8500_DMA_DEV46_SLIM0_CH8_RX_SRC_SXA6_RX_TX] = 0,
+ [DB8500_DMA_DEV47_SLIM0_CH9_RX_SRC_SXA7_RX_TX] = 0,
+ [DB8500_DMA_DEV48_CAC1_RX] = U8500_CRYP1_BASE + CRYP1_RX_REG_OFFSET,
+ /* 49, 50 and 51 are not used */
+ [DB8500_DMA_DEV52_SLIM0_CH4_RX_HSI_RX_CH4] = 0,
+ [DB8500_DMA_DEV53_SLIM0_CH5_RX_HSI_RX_CH5] = 0,
+ [DB8500_DMA_DEV54_SLIM0_CH6_RX_HSI_RX_CH6] = 0,
+ [DB8500_DMA_DEV55_SLIM0_CH7_RX_HSI_RX_CH7] = 0,
+ /* 56, 57, 58, 59 and 60 are not used */
+ [DB8500_DMA_DEV61_CAC0_RX] = 0,
+ /* 62 and 63 are not used */
+};
+
+/* Mapping between destination event lines and physical device address */
+static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV] = {
+ [DB8500_DMA_DEV0_SPI0_TX] = 0,
+ [DB8500_DMA_DEV1_SD_MMC0_TX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV2_SD_MMC1_TX] = 0,
+ [DB8500_DMA_DEV3_SD_MMC2_TX] = 0,
+ [DB8500_DMA_DEV4_I2C1_TX] = 0,
+ [DB8500_DMA_DEV5_I2C3_TX] = 0,
+ [DB8500_DMA_DEV6_I2C2_TX] = 0,
+ [DB8500_DMA_DEV7_I2C4_TX] = 0,
+ [DB8500_DMA_DEV8_SSP0_TX] = U8500_SSP0_BASE + SSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV9_SSP1_TX] = 0,
+ /* 10 is not used*/
+ [DB8500_DMA_DEV11_UART2_TX] = 0,
+ [DB8500_DMA_DEV12_UART1_TX] = 0,
+ [DB8500_DMA_DEV13_UART0_TX] = 0,
+ [DB8500_DMA_DEV14_MSP2_TX] = U8500_MSP2_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV15_I2C0_TX] = 0,
+ [DB8500_DMA_DEV16_USB_OTG_OEP_7_15] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV17_USB_OTG_OEP_6_14] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV18_USB_OTG_OEP_5_13] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV19_USB_OTG_OEP_4_12] = U8500_USBOTG_BASE,
+#ifdef CONFIG_HSI
+ [DB8500_DMA_DEV20_SLIM0_CH0_TX_HSI_TX_CH0] = U8500_HSIT_BASE + 0x0 + STE_HSI_TX_BUFFERX,
+ [DB8500_DMA_DEV21_SLIM0_CH1_TX_HSI_TX_CH1] = U8500_HSIT_BASE + 0x4 + STE_HSI_TX_BUFFERX,
+ [DB8500_DMA_DEV22_SLIM0_CH2_TX_HSI_TX_CH2] = U8500_HSIT_BASE + 0x8 + STE_HSI_TX_BUFFERX,
+ [DB8500_DMA_DEV23_SLIM0_CH3_TX_HSI_TX_CH3] = U8500_HSIT_BASE + 0xC + STE_HSI_TX_BUFFERX,
+#endif
+ [DB8500_DMA_DEV24_DST_SXA0_RX_TX] = 0,
+ [DB8500_DMA_DEV25_DST_SXA1_RX_TX] = 0,
+ [DB8500_DMA_DEV26_DST_SXA2_RX_TX] = 0,
+ [DB8500_DMA_DEV27_DST_SXA3_RX_TX] = 0,
+ [DB8500_DMA_DEV28_SD_MM2_TX] = U8500_SDI2_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV29_SD_MM0_TX] = U8500_SDI0_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV30_MSP1_TX] = U8500_MSP1_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX] = U8500_MSP0_BASE + MSP_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV32_SD_MM1_TX] = U8500_SDI1_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV33_SPI2_TX] = 0,
+ [DB8500_DMA_DEV34_I2C3_TX2] = 0,
+ [DB8500_DMA_DEV35_SPI1_TX] = 0,
+ [DB8500_DMA_DEV36_USB_OTG_OEP_3_11] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV37_USB_OTG_OEP_2_10] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV38_USB_OTG_OEP_1_9] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV39_USB_OTG_OEP_8] = U8500_USBOTG_BASE,
+ [DB8500_DMA_DEV40_SPI3_TX] = 0,
+ [DB8500_DMA_DEV41_SD_MM3_TX] = 0,
+ [DB8500_DMA_DEV42_SD_MM4_TX] = U8500_SDI4_BASE + SD_MMC_TX_RX_REG_OFFSET,
+ [DB8500_DMA_DEV43_SD_MM5_TX] = 0,
+ [DB8500_DMA_DEV44_DST_SXA4_RX_TX] = 0,
+ [DB8500_DMA_DEV45_DST_SXA5_RX_TX] = 0,
+ [DB8500_DMA_DEV46_SLIM0_CH8_TX_DST_SXA6_RX_TX] = 0,
+ [DB8500_DMA_DEV47_SLIM0_CH9_TX_DST_SXA7_RX_TX] = 0,
+ [DB8500_DMA_DEV48_CAC1_TX] = U8500_CRYP1_BASE + CRYP1_TX_REG_OFFSET,
+ [DB8500_DMA_DEV49_CAC1_TX_HAC1_TX] = 0,
+ [DB8500_DMA_DEV50_HAC1_TX] = U8500_HASH1_BASE + HASH1_TX_REG_OFFSET,
+ [DB8500_DMA_MEMCPY_TX_0] = 0,
+ [DB8500_DMA_DEV52_SLIM1_CH4_TX_HSI_TX_CH4] = 0,
+ [DB8500_DMA_DEV53_SLIM1_CH5_TX_HSI_TX_CH5] = 0,
+ [DB8500_DMA_DEV54_SLIM1_CH6_TX_HSI_TX_CH6] = 0,
+ [DB8500_DMA_DEV55_SLIM1_CH7_TX_HSI_TX_CH7] = 0,
+ [DB8500_DMA_MEMCPY_TX_1] = 0,
+ [DB8500_DMA_MEMCPY_TX_2] = 0,
+ [DB8500_DMA_MEMCPY_TX_3] = 0,
+ [DB8500_DMA_MEMCPY_TX_4] = 0,
+ [DB8500_DMA_MEMCPY_TX_5] = 0,
+ [DB8500_DMA_DEV61_CAC0_TX] = 0,
+ [DB8500_DMA_DEV62_CAC0_TX_HAC0_TX] = 0,
+ [DB8500_DMA_DEV63_HAC0_TX] = 0,
+};
+
+/* Reserved event lines for memcpy only */
+static int dma40_memcpy_event[] = {
+ DB8500_DMA_MEMCPY_TX_0,
+ DB8500_DMA_MEMCPY_TX_1,
+ DB8500_DMA_MEMCPY_TX_2,
+ DB8500_DMA_MEMCPY_TX_3,
+ DB8500_DMA_MEMCPY_TX_4,
+ DB8500_DMA_MEMCPY_TX_5,
+};
+
+static struct stedma40_platform_data dma40_plat_data = {
+ .dev_len = ARRAY_SIZE(dma40_rx_map),
+ .dev_rx = dma40_rx_map,
+ .dev_tx = dma40_tx_map,
+ .memcpy = dma40_memcpy_event,
+ .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
+ .memcpy_conf_phy = &dma40_memcpy_conf_phy,
+ .memcpy_conf_log = &dma40_memcpy_conf_log,
+ /* Audio is using physical channel 2 from MMDSP */
+ .disabled_channels = {2, -1},
+ .use_esram_lcla = true,
+};
+
+#ifdef CONFIG_UX500_CONTEXT
+#define D40_DREG_GCC 0x000
+#define D40_DREG_LCPA 0x020
+#define D40_DREG_LCLA 0x024
+
+static void __iomem *base;
+
+static int dma_context_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ static unsigned long lcpa;
+ static unsigned long lcla;
+ static unsigned long gcc;
+
+ switch (event) {
+ case CONTEXT_APE_SAVE:
+ lcla = readl(base + D40_DREG_LCLA);
+ lcpa = readl(base + D40_DREG_LCPA);
+ gcc = readl(base + D40_DREG_GCC);
+ break;
+
+ case CONTEXT_APE_RESTORE:
+ writel(gcc, base + D40_DREG_GCC);
+ writel(lcpa, base + D40_DREG_LCPA);
+ writel(lcla, base + D40_DREG_LCLA);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dma_context_notifier = {
+ .notifier_call = dma_context_notifier_call,
+};
+
+static void dma_context_notifier_init(void)
+{
+ base = ioremap(dma40_resources[0].start,
+ resource_size(&dma40_resources[0]));
+ if (WARN_ON(!base))
+ return;
+
+ WARN_ON(context_ape_notifier_register(&dma_context_notifier));
+}
+#else
+static void dma_context_notifier_init(void)
+{
+}
+#endif
+
+static struct platform_device dma40_device = {
+ .dev = {
+ .platform_data = &dma40_plat_data,
+#ifdef CONFIG_PM
+ .pm_domain = &ux500_dev_power_domain,
+#endif
+ },
+ .name = "dma40",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dma40_resources),
+ .resource = dma40_resources
+};
+
+void __init db8500_dma_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&dma40_device);
+ if (ret)
+ dev_err(&dma40_device.dev, "unable to register device: %d\n",
+ ret);
+
+ dma_context_notifier_init();
+}
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c
index c76f0f456f0..bf7e81705d2 100644
--- a/arch/arm/mach-ux500/hotplug.c
+++ b/arch/arm/mach-ux500/hotplug.c
@@ -11,20 +11,31 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/completion.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
+#include <mach/context.h>
+
extern volatile int pen_release;
+static DECLARE_COMPLETION(cpu_killed);
+
static inline void platform_do_lowpower(unsigned int cpu)
{
flush_cache_all();
- /* we put the platform to just WFI */
for (;;) {
- __asm__ __volatile__("dsb\n\t" "wfi\n\t"
- : : : "memory");
+
+ context_varm_save_core();
+ context_save_cpu_registers();
+
+ context_save_to_sram_and_wfi(false);
+
+ context_restore_cpu_registers();
+ context_varm_restore_core();
+
if (pen_release == cpu_logical_map(cpu)) {
/*
* OK, proper wakeup, we're done
@@ -36,7 +47,7 @@ static inline void platform_do_lowpower(unsigned int cpu)
int platform_cpu_kill(unsigned int cpu)
{
- return 1;
+ return wait_for_completion_timeout(&cpu_killed, 5000);
}
/*
@@ -46,6 +57,19 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
+#ifdef DEBUG
+ unsigned int this_cpu = hard_smp_processor_id();
+
+ if (cpu != this_cpu) {
+ printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
+ this_cpu, cpu);
+ BUG();
+ }
+#endif
+
+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ complete(&cpu_killed);
+
/* directly enter low power state, skipping secure registers */
platform_do_lowpower(cpu);
}
diff --git a/arch/arm/mach-ux500/hwmem-int.c b/arch/arm/mach-ux500/hwmem-int.c
new file mode 100644
index 00000000000..e3fecb8c354
--- /dev/null
+++ b/arch/arm/mach-ux500/hwmem-int.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Hardware memory driver integration
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/hwmem.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* CONA API */
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size);
+void *cona_alloc(void *instance, size_t size);
+void cona_free(void *instance, void *alloc);
+phys_addr_t cona_get_alloc_paddr(void *alloc);
+void *cona_get_alloc_kaddr(void *instance, void *alloc);
+size_t cona_get_alloc_size(void *alloc);
+
+struct hwmem_mem_type_struct *hwmem_mem_types;
+unsigned int hwmem_num_mem_types;
+
+static phys_addr_t hwmem_paddr;
+static size_t hwmem_size;
+
+static phys_addr_t hwmem_prot_paddr;
+static size_t hwmem_prot_size;
+
+static int __init parse_hwmem_prot_param(char *p)
+{
+
+ hwmem_prot_size = memparse(p, &p);
+
+ if (*p != '@')
+ goto no_at;
+
+ hwmem_prot_paddr = memparse(p + 1, &p);
+
+ return 0;
+
+no_at:
+ hwmem_prot_size = 0;
+
+ return -EINVAL;
+}
+early_param("hwmem_prot", parse_hwmem_prot_param);
+
+static int __init parse_hwmem_param(char *p)
+{
+ hwmem_size = memparse(p, &p);
+
+ if (*p != '@')
+ goto no_at;
+
+ hwmem_paddr = memparse(p + 1, &p);
+
+ return 0;
+
+no_at:
+ hwmem_size = 0;
+
+ return -EINVAL;
+}
+early_param("hwmem", parse_hwmem_param);
+
+static int __init setup_hwmem(void)
+{
+ static const unsigned int NUM_MEM_TYPES = 3;
+
+ int ret;
+
+ if (hwmem_paddr != PAGE_ALIGN(hwmem_paddr) ||
+ hwmem_size != PAGE_ALIGN(hwmem_size) || hwmem_size == 0) {
+ printk(KERN_WARNING "HWMEM: hwmem_paddr !="
+ " PAGE_ALIGN(hwmem_paddr) || hwmem_size !="
+ " PAGE_ALIGN(hwmem_size) || hwmem_size == 0\n");
+ return -ENOMSG;
+ }
+
+ hwmem_mem_types = kzalloc(sizeof(struct hwmem_mem_type_struct) *
+ NUM_MEM_TYPES, GFP_KERNEL);
+ if (hwmem_mem_types == NULL)
+ return -ENOMEM;
+
+ hwmem_mem_types[0].id = HWMEM_MEM_SCATTERED_SYS;
+ hwmem_mem_types[0].allocator_api.alloc = cona_alloc;
+ hwmem_mem_types[0].allocator_api.free = cona_free;
+ hwmem_mem_types[0].allocator_api.get_alloc_paddr =
+ cona_get_alloc_paddr;
+ hwmem_mem_types[0].allocator_api.get_alloc_kaddr =
+ cona_get_alloc_kaddr;
+ hwmem_mem_types[0].allocator_api.get_alloc_size = cona_get_alloc_size;
+ hwmem_mem_types[0].allocator_instance = cona_create("hwmem",
+ hwmem_paddr, hwmem_size);
+ if (IS_ERR(hwmem_mem_types[0].allocator_instance)) {
+ ret = PTR_ERR(hwmem_mem_types[0].allocator_instance);
+ goto hwmem_ima_init_failed;
+ }
+
+ hwmem_mem_types[1] = hwmem_mem_types[0];
+ hwmem_mem_types[1].id = HWMEM_MEM_CONTIGUOUS_SYS;
+
+ hwmem_mem_types[2] = hwmem_mem_types[1];
+ hwmem_mem_types[2].id = HWMEM_MEM_PROTECTED_SYS;
+
+ if (hwmem_prot_size > 0) {
+ hwmem_mem_types[2].allocator_instance = cona_create("hwmem_prot",
+ hwmem_prot_paddr, hwmem_prot_size);
+ if (IS_ERR(hwmem_mem_types[2].allocator_instance)) {
+ ret = PTR_ERR(hwmem_mem_types[2].allocator_instance);
+ goto hwmem_ima_init_failed;
+ }
+ }
+
+ hwmem_num_mem_types = NUM_MEM_TYPES;
+
+ return 0;
+
+hwmem_ima_init_failed:
+ kfree(hwmem_mem_types);
+
+ return ret;
+}
+arch_initcall_sync(setup_hwmem);
+
+enum hwmem_alloc_flags cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings)
+{
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+
+ enum hwmem_alloc_flags cache_settings;
+
+ if (!(requested_cache_settings & CACHE_ON_FLAGS_MASK) &&
+ requested_cache_settings & (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED | HWMEM_ALLOC_HINT_WRITE_COMBINE))
+ /*
+ * We never use uncached as it's extremely slow and there is
+ * no scenario where it would be better than buffered memory.
+ */
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+
+ /*
+ * The user has specified cached or nothing at all, both are treated as
+ * cached.
+ */
+ cache_settings = (requested_cache_settings &
+ ~(HWMEM_ALLOC_HINT_UNCACHED |
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY |
+ HWMEM_ALLOC_HINT_CACHE_NAOW)) |
+ HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+ if (!(cache_settings & (HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_WT)))
+ cache_settings |= HWMEM_ALLOC_HINT_CACHE_WB;
+ /*
+ * On ARMv7 "alloc on write" is just a hint so we need to assume the
+ * worst case ie "alloc on write". We would however like to remember
+ * the requested "alloc on write" setting so that we can pass it on to
+ * the hardware, we use the reserved bit in the alloc flags to do that.
+ */
+ if (requested_cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ cache_settings |= HWMEM_ALLOC_RESERVED_CHI;
+ else
+ cache_settings &= ~HWMEM_ALLOC_RESERVED_CHI;
+
+ return cache_settings;
+}
+
+void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
+ pgprot_t *pgprot)
+{
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHE_WT)
+ *pgprot = __pgprot_modify(*pgprot, L_PTE_MT_MASK,
+ L_PTE_MT_WRITETHROUGH);
+ else {
+ if (cache_settings & HWMEM_ALLOC_RESERVED_CHI)
+ *pgprot = __pgprot_modify(*pgprot,
+ L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC);
+ else
+ *pgprot = __pgprot_modify(*pgprot,
+ L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
+ }
+ } else {
+ *pgprot = pgprot_writecombine(*pgprot);
+ }
+}
diff --git a/arch/arm/mach-ux500/hwreg.c b/arch/arm/mach-ux500/hwreg.c
new file mode 100644
index 00000000000..f35472e3ebf
--- /dev/null
+++ b/arch/arm/mach-ux500/hwreg.c
@@ -0,0 +1,651 @@
+%/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ *
+ * Author: Etienne CARRIERE <etienne.carriere@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * HWREG: debug purpose module to map declared IOs and read/write
+ * access from debugfs entries.
+ *
+ * HWREG 32bit DB8500 v2.0 register access
+ * =======================================
+ *
+ * 32bit read:
+ * # echo <addr> > <debugfs>/mem/reg-addr
+ * # cat <debugfs>/mem/reg-val
+ *
+ * 32bit write:
+ * # echo <addr> > <debugfs>/mem/reg-addr
+ * # echo <value> > <debugfs>/mem/reg-val
+ *
+ * <addr> 0x-prefixed hexadecimal
+ * <value> decimal or 0x-prefixed hexadecimal
+ *
+ * HWREG DB8500 formated read/write access
+ * =======================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] ADRESS [VALUE]" > $debugfs/mem/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * ADDRESS target reg physical addr (0x-hexa)
+ *
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ *
+ * Examples:
+ *
+ * before: [*ADDRESS = 0xABCDEF98]
+ * # echo read -h -mask 0xFFF -shift 12 ADDRESS > hwreg
+ * # cat hwreg-shift
+ * 0x0000CDE
+ * # echo write -h -mask 0xFFF -shift 12 ADDRESS 0x123 > hwreg
+ * # cat hwreg-shift
+ * 0x0000123
+ * after [*ADDRESS = 0xAB123F98]
+ *
+ * before: [*ADDRESS = 0xABCDEF98]
+ * # echo read -h -mask 0x00F0F000 ADDRESS 0x12345678 > hwreg
+ * # cat hwreg-shift
+ * 0x00C0E000
+ * # echo write -h -mask 0x00F0F000 ADDRESS 0x12345678 > hwreg
+ * # cat hwreg-shift
+ * 0xAB3D5F98
+ * after [*ADDRESS = 0xAB123F98]
+ *
+ * Read DB8500 version (full ID, chip version ID, chip version ID):
+ *
+ * echo read 0x9001DBF4 > hwreg
+ * cat hwreg
+ * echo read -m 0xFFFF -s 8 0x9001DBF4 > hwreg
+ * cat hwreg
+ * echo read -m 0xFF -s 0 0x9001DBF4 > hwreg
+ * cat hwreg
+ *
+ * Read and Enable/Disable I2C PRCMU clock:
+ *
+ * printf "I2CCLK = " && echo read -m 1 -s 8 0x80157520 > hwreg
+ * cat /sys/kernel/debug/db8500/hwreg
+ * printf "I2CCLK off" && echo write -m 1 -s 8 0x80157518 1 > hwreg
+ * printf "I2CCLK on" && echo write -m 1 -s 8 0x80157510 1 > hwreg
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+/*
+ * temporary definitions
+ * The following declarations are to be removed
+ * when kernel/arch/arm/mach-ux8500/include/mach/db8500-regs.h is up-to-date
+ */
+
+/* DDR-SDRAM chip-select 0 (0x0000 0000 : 0x1FFF FFFF) */
+#ifndef U8500_SCU_CD_R4_BASE
+#define U8500_SCU_CD_R4_BASE 0x17c40000
+#endif
+
+#ifndef U8500_SCU_AD_R4_BASE
+#define U8500_SCU_AD_R4_BASE 0x17d40000
+#endif
+
+#ifndef U8500_HSI2CMODEMR4_BASE
+#define U8500_HSI2CMODEMR4_BASE 0x17e02000
+#endif
+/* End of temporary definitions */
+
+static struct dentry *hwreg_debugfs_dir;
+
+/* 32bit read/write ressources */
+static u32 debug_address; /* shared: single read/write access */
+
+/* hwreg entry ressources */
+struct hwreg_cfg {
+ uint addr; /* target physical addr to access */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1) /* bit 0: 0=hexa, 1=dec */
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c)) /* bit 0: 0=hexa, 1=dec */
+#define REG_FMT_32B(c) (((c)->fmt & 0x6) == 0x0) /* bit[2:1]=0 => 32b access */
+#define REG_FMT_16B(c) (((c)->fmt & 0x6) == 0x2) /* bit[2:1]=1 => 16b access */
+#define REG_FMT_8B(c) (((c)->fmt & 0x6) == 0x4) /* bit[2:1]=2 => 8b access */
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
+/* HWREG guts: mapping table */
+
+struct hwreg_io_range {
+ u32 base;
+ u32 size;
+ u8 *addr;
+};
+
+/*
+ * HWREG guts: mapping table
+ */
+static struct hwreg_io_range hwreg_io_map[] = {
+ /* Periph1 Peripherals */
+ {.base = U8500_PER1_BASE, .size = 0x10000},
+ /* Periph2 Peripherals */
+ {.base = U8500_PER2_BASE, .size = 0x10000},
+ /* Periph3 Peripherals */
+ {.base = U8500_PER3_BASE, .size = 0x10000},
+ /* Periph4 Peripherals */
+ {.base = U8500_PER4_BASE, .size = 0x70000},
+ /* Periph5 Periphals */
+ {.base = U8500_PER5_BASE, .size = 0x20000},
+ /* Periph6 Peripherals */
+ {.base = U8500_PER6_BASE, .size = 0x10000},
+ /*
+ * Snoop Control Unit, A9 Private interrupt IF,
+ * A9 private peripherals, Level-2 Cache Configuration registers,
+ * and some reserved area
+ */
+ {.base = U8500_SCU_BASE, .size = 0x4000},
+
+ /* DISPLAY Ctrl. configuration registers */
+ {.base = U8500_MCDE_BASE, .size = SZ_4K},
+
+ /* DSI1 link registers */
+ {.base = U8500_DSI_LINK1_BASE, .size = SZ_4K},
+
+ /* DSI2 link registers */
+ {.base = U8500_DSI_LINK2_BASE, .size = SZ_4K},
+
+ /* DSI3 link registers */
+ {.base = U8500_DSI_LINK3_BASE, .size = SZ_4K},
+
+ /* DMA Ctrl. configuration registers (base address changed in V1) */
+ {.base = U8500_DMA_BASE, .size = SZ_4K},
+
+ /* 0xB7A00000 -> 0xB7E04000: Modem I2C */
+ {.base = U8500_MODEM_I2C, .size = 0x404000},
+
+ /* 0xA0390000 -> 0xA039FFFF: SBAG configuration registers */
+ {.base = U8500_SBAG_BASE, .size = SZ_4K},
+
+ /* 0xA0300000 -> 0xA031FFFF: SGA configuration registers */
+ {.base = U8500_SGA_BASE, .size = 0x10000},
+
+ /* 0xA0200000 -> 0xA02FFFFF: Smart Imaging Acc. Data Memory space (SIA) */
+ {.base = U8500_SIA_BASE, .size = 0x60000},
+
+ /* 0xA0100000 -> 0xA01FFFFF: Smart Video Acc. Data Memory space (SVA) */
+ {.base = U8500_SVA_BASE, .size = 0x60000},
+
+ /* 0x81000000 -> 0x8103FFFF: Main ICN Crossbar configuration registers */
+ {.base = U8500_ICN_BASE, .size = 0x2000},
+
+ /* 0x80140000 -> 0x8014FFFF: HSEM (Semaphores) configuration */
+ {.base = U8500_HSEM_BASE, .size = SZ_4K},
+
+ /* 0x80130000 -> 0x8013FFFF: B2R2 configuration registers */
+ {.base = U8500_B2R2_BASE, .size = SZ_4K},
+
+ /* 0x80100000 -> 0x8010FFFF: STM */
+ {.base = U8500_STM_BASE, .size = 0x10000},
+
+ /* High part of embedded boot ROM */
+ {.base = U8500_ASIC_ID_BASE, .size = SZ_4K},
+
+ /* 0x17C4 0000 : 0x17C4 007C */
+ {.base = U8500_SCU_CD_R4_BASE, .size = SZ_4K},
+
+ /* 0x17D4 0000 : 0x17D4 041C */
+ {.base = U8500_SCU_AD_R4_BASE, .size = SZ_4K},
+
+ /* 0x17E0 2000 : 0x17E0 2FFC */
+ {.base = U8500_HSI2CMODEMR4_BASE, .size = SZ_4K},
+
+ {.base = 0, .size = 0, },
+
+};
+
+static void hwreg_io_init(void)
+{
+ int i;
+
+ for (i = 0; hwreg_io_map[i].base; ++i) {
+ hwreg_io_map[i].addr = ioremap(hwreg_io_map[i].base,
+ hwreg_io_map[i].size);
+ if (!hwreg_io_map[i].addr)
+ printk(KERN_WARNING
+ "%s: ioremap for %d (%08x) failed\n",
+ __func__, i, hwreg_io_map[i].base);
+ }
+}
+
+static void hwreg_io_exit(void)
+{
+ int i;
+
+ for (i = 0; hwreg_io_map[i].base; ++i)
+ if (hwreg_io_map[i].addr)
+ iounmap(hwreg_io_map[i].addr);
+}
+
+static void *hwreg_io_ptov(u32 phys)
+{
+ int i;
+
+ for (i = 0; hwreg_io_map[i].base; ++i) {
+ u32 base = hwreg_io_map[i].base;
+ u32 size = hwreg_io_map[i].size;
+ u8 *addr = hwreg_io_map[i].addr;
+
+ if (phys < base || phys >= base + size)
+ continue;
+
+ if (addr)
+ return addr + phys - base;
+
+ break;
+ }
+
+ return NULL;
+}
+
+
+/*
+ * HWREG 32bit DB8500 register read/write access debugfs part
+ */
+
+static int hwreg_address_print(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "0x%08X\n", debug_address);
+}
+
+static int hwreg_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_address_print, inode->i_private);
+}
+
+static ssize_t hwreg_address_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long user_address;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
+
+ if (err)
+ return err;
+
+ if (hwreg_io_ptov(user_address) == NULL)
+ return -EADDRNOTAVAIL;
+
+ debug_address = user_address;
+ return count;
+}
+
+static int hwreg_value_print(struct seq_file *s, void *p)
+{
+ void *ptr;
+
+ ptr = hwreg_io_ptov(debug_address);
+ if (ptr == NULL)
+ return -EADDRNOTAVAIL;
+ seq_printf(s, "0x%X\n", readl(ptr));
+ return 0;
+}
+
+static int hwreg_value_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_value_print, inode->i_private);
+}
+
+static ssize_t hwreg_value_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long user_val;
+ void *ptr;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
+
+ if (err)
+ return err;
+
+ if ((ptr = hwreg_io_ptov(debug_address)) == NULL)
+ return -EFAULT;
+ writel(user_val, ptr);
+ return count;
+}
+
+static const struct file_operations hwreg_address_fops = {
+ .open = hwreg_address_open,
+ .write = hwreg_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+static const struct file_operations hwreg_value_fops = {
+ .open = hwreg_value_open,
+ .write = hwreg_value_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/* 'map' read entry: display current HWREG IO mapping table */
+static int hwreg_map_print(struct seq_file *s, void *p)
+{
+ int err, i;
+ for (i = 0; hwreg_io_map[i].base; ++i) {
+ err = seq_printf(s, "%d: 0x%08X => 0x%08X\n",
+ i, hwreg_io_map[i].base,
+ hwreg_io_map[i].base+hwreg_io_map[i].size);
+ if (err < 0)
+ return -ENOMEM;
+ }
+ return 0;
+}
+static int hwreg_map_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_map_print, inode->i_private);
+}
+
+static const struct file_operations hwreg_map_fops = {
+ .open = hwreg_map_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * HWREG DB8500 formated routines
+ */
+
+static int hwreg_print(struct seq_file *s, void *d)
+{
+ struct hwreg_cfg *c = (struct hwreg_cfg *) s->private;
+ void *p;
+ uint v;
+
+ if ((c == NULL) || ((p = hwreg_io_ptov(c->addr)) == NULL))
+ return -EADDRNOTAVAIL;
+
+ v = (uint) (REG_FMT_32B(c) ? readl(p) : REG_FMT_16B(c) ? readw(p) : readb(p));
+ v = (c->shift >= 0) ? v >> c->shift : v << (-c->shift);
+ v = v & c->mask;
+
+ if (REG_FMT_DEC(c))
+ seq_printf(s, "%d\n", v);
+ else if (REG_FMT_32B(c))
+ seq_printf(s, "0x%08X\n", v);
+ else if (REG_FMT_32B(c))
+ seq_printf(s, "0x%04X\n", v);
+ else
+ seq_printf(s, "0x%02X\n", v);
+ return 0;
+}
+
+static int hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hwreg_print, inode->i_private);
+}
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a numerical
+ * value. string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
+ s += 2;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s == '-')
+ s++;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg)
+{
+ uint write, val = 0, offset = 0;
+ struct hwreg_cfg loc = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if (!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else {
+ return -EINVAL;
+ }
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while ((*b == ' ') || (*b == '-')) {
+ if (*(b-1) != ' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) || (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) || (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) || (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2) == ' ') ? 3 : 6;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) || (!strncmp(b, "-shift ", 7))) {
+ b += (*(b+2) == ' ') ? 3 : 7;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+
+ } else if ((!strncmp(b, "-o ", 3)) || (!strncmp(b, "-offset ", 8))) {
+ b += (*(b+2) == ' ') ? 3 : 8;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ offset = simple_strtol(b, &b, 0);
+ } else if (!strncmp(b, "-l ", 3)) {
+ b += 3;
+ loc.fmt = (loc.fmt & ~(3<<1)) | (0<<1);
+ } else if (!strncmp(b, "-w ", 3)) {
+ b += 3;
+ loc.fmt = (loc.fmt & ~(3<<1)) | (1<<1);
+ } else if (!strncmp(b, "-b ", 3)) {
+ b += 3;
+ loc.fmt = (loc.fmt & ~(3<<1)) | (2<<1);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg ADDRESS */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+ loc.addr += offset;
+ if (hwreg_io_ptov(loc.addr) == NULL)
+ return -EINVAL;
+
+ if (write) {
+ while (*b == ' ')
+ b++; /* skip spaces up to arg VALUE */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef DEBUG
+ printk(KERN_INFO "HWREG request: %s %d-bit reg, %s, addr=0x%08X, "
+ "mask=0x%X, shift=%d value=0x%X\n",
+ (write) ? "write" : "read",
+ REG_FMT_32B(cfg) ? 32 : REG_FMT_16B(cfg) ? 16 : 8,
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (write) {
+ void *p = hwreg_io_ptov(cfg->addr);
+ uint d = (uint) (REG_FMT_32B(cfg)) ? readl(p) :
+ (REG_FMT_16B(cfg)) ? readw(p) : readb(p);
+
+ if (cfg->shift>=0) {
+ d &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ d &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | d;
+
+ /* read reg, reset mask field and update value bit-field */
+ if (REG_FMT_32B(cfg))
+ writel(val, p);
+ else if (REG_FMT_16B(cfg))
+ writew(val, p);
+ else
+ writeb(val, p);
+ }
+ return 0;
+}
+
+static ssize_t hwreg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg);
+ return (ret) ? ret : buf_size;
+}
+
+static const struct file_operations hwreg_fops = {
+ .open = hwreg_open,
+ .write = hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * hwreg module init/cleanup
+ */
+static int __init hwreg_initialize(void)
+{
+ static struct dentry *file;
+ hwreg_io_init();
+
+ hwreg_debugfs_dir = debugfs_create_dir("mem", NULL);
+ if (!hwreg_debugfs_dir)
+ goto debugfs_err;
+
+ file = debugfs_create_file("reg-addr",
+ (S_IRUGO | S_IWUGO), hwreg_debugfs_dir,
+ NULL, &hwreg_address_fops);
+ if (!file)
+ goto debugfs_err;
+ file = debugfs_create_file("reg-val",
+ (S_IRUGO | S_IWUGO), hwreg_debugfs_dir,
+ NULL, &hwreg_value_fops);
+ if (!file)
+ goto debugfs_err;
+ file = debugfs_create_file("reg-map",
+ (S_IRUGO),
+ hwreg_debugfs_dir, NULL, &hwreg_map_fops);
+ if (!file)
+ goto debugfs_err;
+ file = debugfs_create_file("hwreg",
+ (S_IRUGO),
+ hwreg_debugfs_dir, &hwreg_cfg, &hwreg_fops);
+ if (!file)
+ goto debugfs_err;
+ return 0;
+
+debugfs_err:
+ if (hwreg_debugfs_dir)
+ debugfs_remove_recursive(hwreg_debugfs_dir);
+ printk(KERN_ERR "hwreg: failed to register debugfs entries.\n");
+ return -1;
+}
+
+static void __exit hwreg_finalize(void)
+{
+ debugfs_remove_recursive(hwreg_debugfs_dir);
+ hwreg_io_exit();
+}
+
+module_init(hwreg_initialize);
+module_exit(hwreg_finalize);
+
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_DESCRIPTION("DB8500 HW registers access through debugfs");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-ux500/include/mach/ab8500_gpadc.h b/arch/arm/mach-ux500/include/mach/ab8500_gpadc.h
new file mode 100644
index 00000000000..4289dcfc0aa
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/ab8500_gpadc.h
@@ -0,0 +1,36 @@
+/*
+ * ab8500_gpadc.c - AB8500 GPADC Driver
+ *
+ * Copyright (C) 2010 ST-Ericsson SA
+ * Licensed under GPLv2.
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#ifndef _AB8500_GPADC_H
+#define _Ab8500_GPADC_H
+
+/* GPADC source: From datasheer(ADCSwSel[4:0] in GPADCCtrl2) */
+#define BAT_CTRL 0x01
+#define ACC_DETECT1 0x04
+#define ACC_DETECT2 0x05
+#define MAIN_BAT_V 0x08
+#define BK_BAT_V 0x0C
+#define VBUS_V 0x09
+#define MAIN_CHARGER_V 0x03
+#define MAIN_CHARGER_C 0x0A
+#define USB_CHARGER_C 0x0B
+#define DIE_TEMP 0x0D
+#define BTEMP_BALL 0x02
+
+struct ab8500_gpadc_device_info {
+ struct completion ab8500_gpadc_complete;
+ struct mutex ab8500_gpadc_lock;
+#if defined(CONFIG_REGULATOR)
+ struct regulator *regu;
+#endif
+};
+
+int ab8500_gpadc_conversion(int input);
+
+#endif /* _AB8500_GPADC_H */
diff --git a/arch/arm/mach-ux500/include/mach/abx500-accdet.h b/arch/arm/mach-ux500/include/mach/abx500-accdet.h
new file mode 100644
index 00000000000..cdd78cd7d0c
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/abx500-accdet.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright ST-Ericsson 2011.
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com> for ST Ericsson.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _ABx500_ACCDET_H
+#define _ABx500_ACCDET_H
+
+/*
+* Debounce times for AccDet1 input
+* @0x880 [2:0]
+*/
+#define ACCDET1_DB_0ms 0x00
+#define ACCDET1_DB_10ms 0x01
+#define ACCDET1_DB_20ms 0x02
+#define ACCDET1_DB_30ms 0x03
+#define ACCDET1_DB_40ms 0x04
+#define ACCDET1_DB_50ms 0x05
+#define ACCDET1_DB_60ms 0x06
+#define ACCDET1_DB_70ms 0x07
+
+/*
+* Voltage threshold for AccDet1 input
+* @0x880 [6:3]
+*/
+#define ACCDET1_TH_1100mV 0x40
+#define ACCDET1_TH_1200mV 0x48
+#define ACCDET1_TH_1300mV 0x50
+#define ACCDET1_TH_1400mV 0x58
+#define ACCDET1_TH_1500mV 0x60
+#define ACCDET1_TH_1600mV 0x68
+#define ACCDET1_TH_1700mV 0x70
+#define ACCDET1_TH_1800mV 0x78
+
+/*
+* Voltage threshold for AccDet21 input
+* @0x881 [3:0]
+*/
+#define ACCDET21_TH_300mV 0x00
+#define ACCDET21_TH_400mV 0x01
+#define ACCDET21_TH_500mV 0x02
+#define ACCDET21_TH_600mV 0x03
+#define ACCDET21_TH_700mV 0x04
+#define ACCDET21_TH_800mV 0x05
+#define ACCDET21_TH_900mV 0x06
+#define ACCDET21_TH_1000mV 0x07
+#define ACCDET21_TH_1100mV 0x08
+#define ACCDET21_TH_1200mV 0x09
+#define ACCDET21_TH_1300mV 0x0a
+#define ACCDET21_TH_1400mV 0x0b
+#define ACCDET21_TH_1500mV 0x0c
+#define ACCDET21_TH_1600mV 0x0d
+#define ACCDET21_TH_1700mV 0x0e
+#define ACCDET21_TH_1800mV 0x0f
+
+/*
+* Voltage threshold for AccDet22 input
+* @0x881 [7:4]
+*/
+#define ACCDET22_TH_300mV 0x00
+#define ACCDET22_TH_400mV 0x10
+#define ACCDET22_TH_500mV 0x20
+#define ACCDET22_TH_600mV 0x30
+#define ACCDET22_TH_700mV 0x40
+#define ACCDET22_TH_800mV 0x50
+#define ACCDET22_TH_900mV 0x60
+#define ACCDET22_TH_1000mV 0x70
+#define ACCDET22_TH_1100mV 0x80
+#define ACCDET22_TH_1200mV 0x90
+#define ACCDET22_TH_1300mV 0xa0
+#define ACCDET22_TH_1400mV 0xb0
+#define ACCDET22_TH_1500mV 0xc0
+#define ACCDET22_TH_1600mV 0xd0
+#define ACCDET22_TH_1700mV 0xe0
+#define ACCDET22_TH_1800mV 0xf0
+
+/*
+* Voltage threshold for AccDet1 input
+* @0x880 [6:3]
+*/
+#define ACCDET1_TH_300mV 0x00
+#define ACCDET1_TH_400mV 0x01
+#define ACCDET1_TH_500mV 0x02
+#define ACCDET1_TH_600mV 0x03
+#define ACCDET1_TH_700mV 0x04
+#define ACCDET1_TH_800mV 0x05
+#define ACCDET1_TH_900mV 0x06
+#define ACCDET1_TH_1000mV 0x07
+
+#define MAX_DET_COUNT 10
+#define MAX_VOLT_DIFF 30
+#define MIN_MIC_POWER -100
+
+/**
+ * struct abx500_accdet_platform_data - AV Accessory detection specific
+ * platform data
+ * @btn_keycode Keycode to be sent when accessory button is pressed.
+ * @accdet1_dbth Debounce time + voltage threshold for accdet 1 input.
+ * @accdet2122_th Voltage thresholds for accdet21 and accdet22 inputs.
+ * @is_detection_inverted Whether the accessory insert/removal, button
+ * press/release irq's are inverted.
+ */
+struct abx500_accdet_platform_data {
+ int btn_keycode;
+ u8 accdet1_dbth;
+ u8 accdet2122_th;
+ unsigned int video_ctrl_gpio;
+ bool is_detection_inverted;
+};
+
+/* Enumerations */
+
+/**
+ * @JACK_TYPE_UNSPECIFIED Not known whether any accessories are connected.
+ * @JACK_TYPE_DISCONNECTED No accessories connected.
+ * @JACK_TYPE_CONNECTED Accessory is connected but functionality was unable to
+ * detect the actual type. In this mode, possible button events are reported.
+ * @JACK_TYPE_HEADPHONE Headphone type of accessory (spkrs only) connected
+ * @JACK_TYPE_HEADSET Headset type of accessory (mic+spkrs) connected
+ * @JACK_TYPE_UNSUPPORTED_HEADSET Unsupported headset of type accessory connected
+ * @JACK_TYPE_CARKIT Carkit type of accessory connected
+ * @JACK_TYPE_OPENCABLE Open cable connected
+ * @JACK_TYPE_CVIDEO CVideo type of accessory connected.
+ */
+enum accessory_jack_type {
+ JACK_TYPE_UNSPECIFIED,
+ JACK_TYPE_DISCONNECTED,
+ JACK_TYPE_CONNECTED,
+ JACK_TYPE_HEADPHONE,
+ JACK_TYPE_HEADSET,
+ JACK_TYPE_UNSUPPORTED_HEADSET,
+ JACK_TYPE_CARKIT,
+ JACK_TYPE_OPENCABLE,
+ JACK_TYPE_CVIDEO
+};
+
+/**
+ * @BUTTON_UNK Button state not known
+ * @BUTTON_PRESSED Button "down"
+ * @BUTTON_RELEASED Button "up"
+ */
+enum accessory_button_state {
+ BUTTON_UNK,
+ BUTTON_PRESSED,
+ BUTTON_RELEASED
+};
+
+/**
+ * @PLUG_IRQ Interrupt gen. when accessory plugged in
+ * @UNPLUG_IRQ Interrupt gen. when accessory plugged out
+ * @BUTTON_PRESS_IRQ Interrupt gen. when accessory button pressed.
+ * @BUTTON_RELEASE_IRQ Interrupt gen. when accessory button released.
+ */
+enum accessory_irq {
+ PLUG_IRQ,
+ UNPLUG_IRQ,
+ BUTTON_PRESS_IRQ,
+ BUTTON_RELEASE_IRQ,
+};
+
+/**
+ * Enumerates the op. modes of the avcontrol switch
+ * @AUDIO_IN Audio input is selected
+ * @VIDEO_OUT Video output is selected
+ * @NOT_SET The av-switch control signal is disconnected.
+ */
+enum accessory_avcontrol_dir {
+ AUDIO_IN,
+ VIDEO_OUT,
+ NOT_SET,
+};
+
+/**
+ * @REGULATOR_VAUDIO v-audio regulator
+ * @REGULATOR_VAMIC1 v-amic1 regulator
+ * @REGULATOR_AVSWITCH Audio/Video select switch regulator
+ * @REGULATOR_ALL All regulators combined
+ */
+enum accessory_regulator {
+ REGULATOR_NONE = 0x0,
+ REGULATOR_VAUDIO = 0x1,
+ REGULATOR_VAMIC1 = 0x2,
+ REGULATOR_AVSWITCH = 0x4,
+ REGULATOR_ALL = 0xFF
+};
+
+/* Structures */
+
+/**
+ * Describes an interrupt
+ * @irq interrupt identifier
+ * @name name of the irq in platform data
+ * @isr interrupt service routine
+ * @register are we currently registered to receive interrupts from this source.
+ */
+struct accessory_irq_descriptor {
+ enum accessory_irq irq;
+ const char *name;
+ irq_handler_t isr;
+ int registered;
+};
+
+/**
+ * Encapsulates info of single regulator.
+ * @id regulator identifier
+ * @name name of the regulator
+ * @enabled flag indicating whether regu is currently enabled.
+ * @handle regulator handle
+ */
+struct accessory_regu_descriptor {
+ enum accessory_regulator id;
+ const char *name;
+ int enabled;
+ struct regulator *handle;
+};
+
+/**
+ * Defines attributes for accessory detection operation.
+ * @typename type as string
+ * @type Type of accessory this task tests
+ * @req_det_count How many times this particular type of accessory
+ * needs to be detected in sequence in order to accept. Multidetection
+ * implemented to avoid false detections during plug-in.
+ * @meas_mv Should ACCDETECT2 input voltage be measured just before
+ * making the decision or can cached voltage be used instead.
+ * @minvol minimum voltage (mV) for decision
+ * @maxvol maximum voltage (mV) for decision
+ * @alt_minvol minimum alternative voltage (mV) for decision
+ * @alt_maxvol maximum alternative voltage (mV) for decision
+ */
+struct accessory_detect_task {
+ const char *typename;
+ enum accessory_jack_type type;
+ int req_det_count;
+ int meas_mv;
+ int minvol;
+ int maxvol;
+ int alt_minvol;
+ int alt_maxvol;
+};
+
+/**
+ * Device data, capsulates all relevant device data structures.
+ *
+ * @pdev: pointer to platform device
+ * @pdata: Platform data
+ * @gpadc: interface for ADC data
+ * @irq_work_queue: Work queue for deferred interrupt processing
+ * @detect_work: work item to perform detection work
+ * @unplug_irq_work: work item to process unplug event
+ * @init_work: work item to process initialization work.
+ * @btn_input_dev: button input device used to report btn presses
+ * @btn_state: Current state of accessory button
+ * @jack_type: type of currently connected accessory
+ * @reported_jack_type: previously reported jack type.
+ * @jack_type_temp: temporary storage for currently connected accessory
+ * @jack_det_count: counter how many times in sequence the accessory
+ * type detection has produced same result.
+ * @total_jack_det_count: after plug-in irq, how many times detection
+ * has totally been made in order to detect the accessory type
+ * @detect_jiffies: Used to save timestamp when detection was made. Timestamp
+ * used to filter out spurious button presses that might occur during the
+ * plug-in procedure.
+ * @accdet1_th_set: flag to indicate whether accdet1 threshold and debounce
+ * times are configured
+ * @accdet2_th_set: flag to indicate whether accdet2 thresholds are configured
+ * @gpio35_dir_set: flag to indicate whether GPIO35 (VIDEOCTRL) direction
+ * has been configured.
+ * @irq_desc_norm: irq's as specified in the initial versions of ab
+ * @irq_desc_inverted: irq's inverted as seen in the latest versions of ab
+ * @no_irqs: Total number of irq's
+ * @regu_desc: Pointer to the regulator descriptors.
+ * @no_of_regu_desc: Total nummber of descriptors.
+ * @config_accdetect2_hw: Callback for configuring accdet2 comparator.
+ * @config_accdetect1_hw: Callback for configuring accdet1 comparator.
+ * @detect_plugged_in: Callback to detect type of accessory connected.
+ * @meas_voltage_stable: Callback to read present accdet voltage.
+ * @meas_alt_voltage_stable: Callback to read present alt accdet voltage.
+ * @config_hw_test_basic_carkit: Callback to configure hw for carkit
+ * detect.
+ * @turn_of_accdet_comparator: Call back to turn off comparators.
+ * @turn_on_accdet_comparator: Call back to turn ON comparators.
+ * @accdet_abx500_gpadc_get Call back to get a instance of the
+ * GPADC convertor.
+ * @config_hw_test_plug_connected: Call back to configure the hw for
+ * accessory detection.
+ * @set_av_switch: Call back to configure the switch for tvout or audioout.
+ * @get_platform_data: call to get platform specific data.
+ */
+struct abx500_ad {
+ struct platform_device *pdev;
+ struct abx500_accdet_platform_data *pdata;
+ void *gpadc;
+ struct workqueue_struct *irq_work_queue;
+
+ struct delayed_work detect_work;
+ struct delayed_work unplug_irq_work;
+ struct delayed_work init_work;
+
+ struct input_dev *btn_input_dev;
+ enum accessory_button_state btn_state;
+
+ enum accessory_jack_type jack_type;
+ enum accessory_jack_type reported_jack_type;
+ enum accessory_jack_type jack_type_temp;
+
+ int jack_det_count;
+ int total_jack_det_count;
+
+ unsigned long detect_jiffies;
+
+ int accdet1_th_set;
+ int accdet2_th_set;
+ int gpio35_dir_set;
+
+ struct accessory_irq_descriptor *irq_desc_norm;
+ struct accessory_irq_descriptor *irq_desc_inverted;
+ int no_irqs;
+
+ struct accessory_regu_descriptor *regu_desc;
+ int no_of_regu_desc;
+
+ void (*config_accdetect2_hw)(struct abx500_ad *, int);
+ void (*config_accdetect1_hw)(struct abx500_ad *, int);
+ int (*detect_plugged_in)(struct abx500_ad *);
+ int (*meas_voltage_stable)(struct abx500_ad *);
+ int (*meas_alt_voltage_stable)(struct abx500_ad *);
+ void (*config_hw_test_basic_carkit)(struct abx500_ad *, int);
+ void (*turn_off_accdet_comparator)(struct platform_device *pdev);
+ void (*turn_on_accdet_comparator)(struct platform_device *pdev);
+ void* (*accdet_abx500_gpadc_get)(void);
+ void (*config_hw_test_plug_connected)(struct abx500_ad *dd, int enable);
+ void (*set_av_switch)(struct abx500_ad *dd,
+ enum accessory_avcontrol_dir dir);
+ struct abx500_accdet_platform_data *
+ (*get_platform_data)(struct platform_device *pdev);
+};
+
+/* Forward declarations */
+extern irqreturn_t unplug_irq_handler(int irq, void *_userdata);
+extern irqreturn_t plug_irq_handler(int irq, void *_userdata);
+extern irqreturn_t button_press_irq_handler(int irq, void *_userdata);
+extern irqreturn_t button_release_irq_handler(int irq, void *_userdata);
+extern void accessory_regulator_enable(struct abx500_ad *dd,
+ enum accessory_regulator reg);
+extern void accessory_regulator_disable(struct abx500_ad *dd,
+ enum accessory_regulator reg);
+extern void report_jack_status(struct abx500_ad *dd);
+
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+extern struct abx500_ad ab5500_accessory_det_callbacks;
+#endif
+
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+extern struct abx500_ad ab8500_accessory_det_callbacks;
+#endif
+
+#endif /* _ABx500_ACCDET_H */
diff --git a/arch/arm/mach-ux500/include/mach/context.h b/arch/arm/mach-ux500/include/mach/context.h
new file mode 100644
index 00000000000..22b56351284
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/context.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <linux/notifier.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+
+/* Defines to be with
+ * context_ape_notifier_register
+ */
+#define CONTEXT_APE_SAVE 0 /* APE save */
+#define CONTEXT_APE_RESTORE 1 /* APE restore */
+
+/* Defines to be with
+ * context_arm_notifier_register
+ */
+#define CONTEXT_ARM_CORE_SAVE 0 /* Called for each ARM core */
+#define CONTEXT_ARM_CORE_RESTORE 1 /* Called for each ARM core */
+#define CONTEXT_ARM_COMMON_SAVE 2 /* Called when ARM common is saved */
+#define CONTEXT_ARM_COMMON_RESTORE 3 /* Called when ARM common is restored */
+
+int context_ape_notifier_register(struct notifier_block *nb);
+int context_ape_notifier_unregister(struct notifier_block *nb);
+
+int context_arm_notifier_register(struct notifier_block *nb);
+int context_arm_notifier_unregister(struct notifier_block *nb);
+
+void context_vape_save(void);
+void context_vape_restore(void);
+
+void context_gpio_save(void);
+void context_gpio_restore(void);
+void context_gpio_restore_mux(void);
+void context_gpio_mux_safe_switch(bool begin);
+
+void context_gic_dist_disable_unneeded_irqs(void);
+
+void context_varm_save_common(void);
+void context_varm_restore_common(void);
+
+void context_varm_save_core(void);
+void context_varm_restore_core(void);
+
+void context_save_cpu_registers(void);
+void context_restore_cpu_registers(void);
+
+void context_save_to_sram_and_wfi(bool cleanL2cache);
+
+void context_clean_l1_cache_all(void);
+void context_save_arm_registers(u32 **backup_stack);
+void context_restore_arm_registers(u32 **backup_stack);
+
+void context_save_cp15_registers(u32 **backup_stack);
+void context_restore_cp15_registers(u32 **backup_stack);
+
+void context_save_to_sram_and_wfi_internal(u32 backup_sram_storage,
+ bool cleanL2cache);
+
+/* DB specific functions in either context-db8500 or context-db5500 */
+void u8500_context_save_icn(void);
+void u8500_context_restore_icn(void);
+void u8500_context_init(void);
+
+void u5500_context_save_icn(void);
+void u5500_context_restore_icn(void);
+void u5500_context_init(void);
+
+#else
+
+static inline void context_varm_save_core(void) {}
+static inline void context_save_cpu_registers(void) {}
+static inline void context_save_to_sram_and_wfi(bool cleanL2cache) {}
+static inline void context_restore_cpu_registers(void) {}
+static inline void context_varm_restore_core(void) {}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/crypto-ux500.h b/arch/arm/mach-ux500/include/mach/crypto-ux500.h
new file mode 100644
index 00000000000..80c4620d633
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/crypto-ux500.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _CRYPTO_UX500_H
+#include <linux/dmaengine.h>
+#include <plat/ste_dma40.h>
+
+struct cryp_platform_data {
+ struct stedma40_chan_cfg mem_to_engine;
+ struct stedma40_chan_cfg engine_to_mem;
+};
+
+struct hash_platform_data {
+ void *mem_to_engine;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/cw1200_plat.h b/arch/arm/mach-ux500/include/mach/cw1200_plat.h
new file mode 100644
index 00000000000..3a73183c9f8
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/cw1200_plat.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CW1200_PLAT_H_INCLUDED
+#define CW1200_PLAT_H_INCLUDED
+
+#include <linux/ioport.h>
+
+struct cw1200_platform_data {
+ const char *mmc_id;
+ const struct resource *irq;
+ const struct resource *reset;
+ int (*power_ctrl)(const struct cw1200_platform_data *pdata,
+ bool enable);
+ int (*clk_ctrl)(const struct cw1200_platform_data *pdata,
+ bool enable);
+ int (*prcmu_ctrl)(const struct cw1200_platform_data *pdata,
+ bool enable);
+};
+
+/* Declaration only. Should be implemented in arch/xxx/mach-yyy */
+const struct cw1200_platform_data *cw1200_get_platform_data(void);
+
+#endif /* CW1200_PLAT_H_INCLUDED */
diff --git a/arch/arm/mach-ux500/include/mach/db5500-keypad.h b/arch/arm/mach-ux500/include/mach/db5500-keypad.h
new file mode 100644
index 00000000000..d9d23419ab3
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/db5500-keypad.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License, version 2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __DB5500_KEYPAD_H
+#define __DB5500_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+#define KEYPAD_MAX_ROWS 9
+#define KEYPAD_MAX_COLS 8
+
+/**
+ * struct db5500_keypad_platform_data - structure for platform specific data
+ * @keymap_data: matrix scan code table for keycodes
+ * @debounce_ms: platform specific debounce time
+ * @no_autorepeat: flag for auto repetition
+ * @init : pointer to keypad init function
+ * @exit : pointer to keypad exit function
+ * @krow : maximum number of rows
+ * @kcol : maximum number of cols
+ * @gpio_input_pins: pointer to gpio input pins
+ * @gpio_output_pins: pointer to gpio output pins
+ * @switch_delay : gpio switch_delay
+ */
+struct db5500_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+ u8 debounce_ms;
+ bool no_autorepeat;
+ int (*init)(void);
+ int (*exit)(void);
+ u8 krow;
+ u8 kcol;
+ int *gpio_input_pins;
+ int *gpio_output_pins;
+ int switch_delay;
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index 8e714bcb099..187163bd63d 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -34,6 +34,7 @@
#define U5500_ICN_BASE 0xA0040000
#define U5500_B2R2_BASE 0xa0200000
#define U5500_BOOT_ROM_BASE 0x90000000
+#define U5500_ASIC_ID_ADDRESS (U5500_BOOT_ROM_BASE + 0x1FFF4)
#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000)
#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000)
diff --git a/arch/arm/mach-ux500/include/mach/dcache.h b/arch/arm/mach-ux500/include/mach/dcache.h
new file mode 100644
index 00000000000..83fe618b04f
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/dcache.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Data cache helpers
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _MACH_UX500_DCACHE_H_
+#define _MACH_UX500_DCACHE_H_
+
+#include <linux/types.h>
+
+void drain_cpu_write_buf(void);
+void clean_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *cleaned_everything);
+void flush_cpu_dcache(void *vaddr, u32 paddr, u32 length, bool inner_only,
+ bool *flushed_everything);
+bool speculative_data_prefetch(void);
+/* Returns 1 if no cache is present */
+u32 get_dcache_granularity(void);
+
+#endif /* _MACH_UX500_DCACHE_H_ */
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index 5f6cb71fc62..2392b06f91f 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -13,9 +13,25 @@ struct amba_device;
extern struct platform_device u5500_gpio_devs[];
extern struct platform_device u8500_gpio_devs[];
+extern struct platform_device u8500_mcde_device;
+extern struct platform_device u5500_mcde_device;
+extern struct platform_device u8500_shrm_device;
+extern struct platform_device u8500_b2r2_device;
+extern struct platform_device u5500_b2r2_device;
+extern struct platform_device u8500_trace_modem;
+extern struct platform_device ux500_hwmem_device;
+extern struct platform_device u8500_stm_device;
extern struct amba_device ux500_pl031_device;
-
-extern struct platform_device u8500_dma40_device;
+extern struct platform_device ux500_hash1_device;
+extern struct platform_device ux500_cryp1_device;
+extern struct platform_device mloader_fw_device;
+extern struct platform_device u5500_thsens_device;
+extern struct platform_device u8500_thsens_device;
extern struct platform_device ux500_ske_keypad_device;
+extern struct platform_device u8500_wdt_device;
+extern struct platform_device u5500_wdt_device;
+extern struct platform_device u8500_hsi_device;
+extern struct platform_device ux500_mmio_device;
+extern struct platform_device u5500_mmio_device;
#endif
diff --git a/arch/arm/mach-ux500/include/mach/gpio.h b/arch/arm/mach-ux500/include/mach/gpio.h
index c01ef66537f..2d3bb8f47ce 100644
--- a/arch/arm/mach-ux500/include/mach/gpio.h
+++ b/arch/arm/mach-ux500/include/mach/gpio.h
@@ -1,5 +1,29 @@
#ifndef __ASM_ARCH_GPIO_H
#define __ASM_ARCH_GPIO_H
+/*
+ * 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
+ * room for a couple of GPIO expanders.
+ */
+
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#else
+#define ARCH_NR_GPIOS 355
+#endif
+
+#define NOMADIK_NR_GPIO 288
+
+#include <asm-generic/gpio.h>
+
+/* Invoke gpiolibs gpio_chip abstraction */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
+#define MOP500_EGPIO_END MOP500_EGPIO(24)
+#define AB8500_GPIO_BASE MOP500_EGPIO_END
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index b6ba26a1367..552f32c103a 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -24,12 +24,29 @@
/* typesafe io address */
#define __io_address(n) __io(IO_ADDRESS(n))
-/* Used by some plat-nomadik code */
-#define io_p2v(n) __io_address(n)
#include <mach/db8500-regs.h>
#include <mach/db5500-regs.h>
+/*
+ * FIFO offsets for IPs
+ */
+#define MSP_TX_RX_REG_OFFSET (0)
+#define HASH1_TX_REG_OFFSET (0x4)
+#define SSP_TX_RX_REG_OFFSET (0x8)
+#define SPI_TX_RX_REG_OFFSET (0x8)
+#define SD_MMC_TX_RX_REG_OFFSET (0x80)
+#define CRYP1_RX_REG_OFFSET (0x10)
+#define CRYP1_TX_REG_OFFSET (0x8)
+
+#define SSP_0_CONTROLLER 4
+#define SSP_1_CONTROLLER 5
+
+#define SPI023_0_CONTROLLER 6
+#define SPI023_1_CONTROLLER 7
+#define SPI023_2_CONTROLLER 8
+#define SPI023_3_CONTROLLER 9
+
#ifndef __ASSEMBLY__
#include <mach/id.h>
diff --git a/arch/arm/mach-ux500/include/mach/hsi.h b/arch/arm/mach-ux500/include/mach/hsi.h
new file mode 100644
index 00000000000..58d33249cae
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/hsi.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __MACH_HSI_H
+#define __MACH_HSI_H
+
+#include <plat/ste_dma40.h>
+
+/* HSIT register offsets */
+#define STE_HSI_TX_ID 0x000
+#define STE_HSI_TX_MODE 0x004
+#define STE_HSI_TX_STATE 0x008
+#define STE_HSI_TX_IOSTATE 0x00C
+#define STE_HSI_TX_BUFSTATE 0x010
+#define STE_HSI_TX_DIVISOR 0x014
+#define STE_HSI_TX_BREAK 0x01C
+#define STE_HSI_TX_CHANNELS 0x020
+#define STE_HSI_TX_FLUSHBITS 0x024
+#define STE_HSI_TX_PRIORITY 0x028
+#define STE_HSI_TX_STATICCONFID 0x02C
+#define STE_HSI_TX_DATASWAP 0x034
+#define STE_HSI_TX_FRAMELENX 0x080
+#define STE_HSI_TX_BUFFERX 0x0C0
+#define STE_HSI_TX_BASEX 0x100
+#define STE_HSI_TX_SPANX 0x140
+#define STE_HSI_TX_GAUGEX 0x180
+#define STE_HSI_TX_WATERMARKX 0x1C0
+#define STE_HSI_TX_DMAEN 0x200
+#define STE_HSI_TX_WATERMARKMIS 0x204
+#define STE_HSI_TX_WATERMARKIM 0x208
+#define STE_HSI_TX_WATERMARKIC 0x20C
+#define STE_HSI_TX_WATERMARKID 0x210
+#define STE_HSI_TX_WATERMARKIS 0x214
+#define STE_HSI_TX_PERIPHID0 0xFE0
+#define STE_HSI_TX_PERIPHID1 0xFE4
+#define STE_HSI_TX_PERIPHID2 0xFE8
+#define STE_HSI_TX_PERIPHID3 0xFEC
+
+/* HSIR register offsets */
+#define STE_HSI_RX_ID 0x000
+#define STE_HSI_RX_MODE 0x004
+#define STE_HSI_RX_STATE 0x008
+#define STE_HSI_RX_BUFSTATE 0x00C
+#define STE_HSI_RX_THRESHOLD 0x010
+#define STE_HSI_RX_DETECTOR 0x018
+#define STE_HSI_RX_EXCEP 0x01C
+#define STE_HSI_RX_ACK 0x020
+#define STE_HSI_RX_CHANNELS 0x024
+#define STE_HSI_RX_REALTIME 0x028
+#define STE_HSI_RX_OVERRUN 0x02C
+#define STE_HSI_RX_OVERRUNACK 0x030
+#define STE_HSI_RX_PREAMBLE 0x034
+#define STE_HSI_RX_PIPEGAUGE 0x038
+#define STE_HSI_RX_STATICCONFID 0x03C
+#define STE_HSI_RX_BUFFERX 0x080
+#define STE_HSI_RX_FRAMELENX 0x0C0
+#define STE_HSI_RX_BASEX 0x100
+#define STE_HSI_RX_SPANX 0x140
+#define STE_HSI_RX_GAUGEX 0x180
+#define STE_HSI_RX_WATERMARKX 0x1C0
+#define STE_HSI_RX_FRAMEBURSTCNT 0x1E0
+#define STE_HSI_RX_DMAEN 0x200
+#define STE_HSI_RX_WATERMARKMIS 0x204
+#define STE_HSI_RX_WATERMARKIM 0x208
+#define STE_HSI_RX_WATERMARKIC 0x20C
+#define STE_HSI_RX_WATERMARKID 0x210
+#define STE_HSI_RX_OVERRUNMIS 0x214
+#define STE_HSI_RX_OVERRUNIM 0x218
+#define STE_HSI_RX_EXCEPMIS 0x21C
+#define STE_HSI_RX_EXCEPIM 0x220
+#define STE_HSI_RX_WATERMARKIS 0x224
+#define STE_HSI_RX_PERIPHID0 0xFE0
+#define STE_HSI_RX_PERIPHID1 0xFE4
+#define STE_HSI_RX_PERIPHID2 0xFE8
+#define STE_HSI_RX_PERIPHID3 0xFEC
+
+/* HSI states */
+#define STE_HSI_STATE_IDLE 0x00
+#define STE_HSI_STATE_START 0x01
+#define STE_HSI_STATE_TRANSMIT 0x02
+#define STE_HSI_STATE_BREAK 0x03
+#define STE_HSI_STATE_FLUSH 0x04
+#define STE_HSI_STATE_HALT 0x05
+
+/* HSI exceptions */
+#define STE_HSI_EXCEP_TIMEOUT 0x01
+#define STE_HSI_EXCEP_OVERRUN 0x02
+#define STE_HSI_EXCEP_BREAK 0x04
+#define STE_HSI_EXCEP_PARITY 0x08
+
+/* HSI modes */
+#define STE_HSI_MODE_SLEEP 0x00
+#define STE_HSI_MODE_STREAM 0x01
+#define STE_HSI_MODE_FRAME 0x02
+#define STE_HSI_MODE_PIPELINED 0x03
+#define STE_HSI_MODE_FAILSAFE 0x04
+
+#define STE_HSI_MAX_BUFFERS 32
+
+/* Max channels of STE HSI controller */
+#define STE_HSI_MAX_CHANNELS 2
+
+#define STE_HSI_DMA_MAX_BURST 1
+
+struct stedma40_chan_cfg;
+
+struct ste_hsi_port_cfg {
+#ifdef CONFIG_STE_DMA40
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ struct stedma40_chan_cfg *dma_tx_cfg;
+ struct stedma40_chan_cfg *dma_rx_cfg;
+#endif
+};
+
+struct ste_hsi_platform_data {
+ int num_ports;
+ int use_dma;
+ struct ste_hsi_port_cfg *port_cfg;
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/id.h b/arch/arm/mach-ux500/include/mach/id.h
index 833d6a6edc9..b27902a5f2b 100644
--- a/arch/arm/mach-ux500/include/mach/id.h
+++ b/arch/arm/mach-ux500/include/mach/id.h
@@ -38,14 +38,41 @@ static inline unsigned int __attribute_const__ dbx500_revision(void)
static inline bool __attribute_const__ cpu_is_u8500(void)
{
- return dbx500_partnumber() == 0x8500;
+#ifdef CONFIG_UX500_SOC_DB8500
+ /* partnumber 8520 also comes under 8500 */
+ return ((dbx500_partnumber() >> 8) & 0xff) == 0x85;
+#else
+ return false;
+#endif
+}
+
+static inline bool __attribute_const__ cpu_is_u8520(void)
+{
+#ifdef CONFIG_UX500_SOC_DB8500
+ return dbx500_partnumber() == 0x8520;
+#else
+ return false;
+#endif
}
static inline bool __attribute_const__ cpu_is_u5500(void)
{
+#ifdef CONFIG_UX500_SOC_DB5500
return dbx500_partnumber() == 0x5500;
+#else
+ return false;
+#endif
}
+#ifdef CONFIG_UX500_SOC_DB8500
+bool cpu_is_u9500(void);
+#else
+static inline bool cpu_is_u9500(void)
+{
+ return false;
+}
+#endif
+
/*
* 5500 revisions
*/
@@ -74,26 +101,6 @@ static inline bool __attribute_const__ cpu_is_u5500v21(void)
* 8500 revisions
*/
-static inline bool __attribute_const__ cpu_is_u8500ed(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0x00;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v1(void)
-{
- return cpu_is_u8500() && (dbx500_revision() & 0xf0) == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v10(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v11(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0xA1;
-}
-
static inline bool __attribute_const__ cpu_is_u8500v2(void)
{
return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0);
@@ -109,9 +116,14 @@ static inline bool cpu_is_u8500v21(void)
return cpu_is_u8500() && (dbx500_revision() == 0xB1);
}
+static inline bool cpu_is_u8500v22(void)
+{
+ return cpu_is_u8500() && (dbx500_revision() == 0xB2);
+}
+
static inline bool cpu_is_u8500v20_or_later(void)
{
- return cpu_is_u8500() && !cpu_is_u8500v10() && !cpu_is_u8500v11();
+ return cpu_is_u8500() && ((dbx500_revision() & 0xf0) >= 0xB0);
}
static inline bool ux500_is_svp(void)
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index d2d4131435a..7c96540878b 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -43,6 +43,8 @@
#define MOP500_AB8500_VIR_GPIO_IRQ_BASE \
MOP500_STMPE1601_IRQ_END
+#define MOP500_AB8500_VIR_GPIO_IRQ(x) \
+ (MOP500_AB8500_VIR_GPIO_IRQ_BASE + (x))
#define MOP500_AB8500_VIR_GPIO_IRQ_END \
(MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
@@ -57,7 +59,7 @@
*/
#if MOP500_IRQ_END > IRQ_BOARD_END
#undef IRQ_BOARD_END
-#define IRQ_BOARD_END MOP500_IRQ_END
+#define IRQ_BOARD_END MOP500_IRQ_END
#endif
#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
index 29d972c7717..2294a47b3a2 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
@@ -7,13 +7,20 @@
#ifndef __MACH_IRQS_BOARD_U5500_H
#define __MACH_IRQS_BOARD_U5500_H
-#define AB5500_NR_IRQS 5
+#include <linux/mfd/abx500/ab5500.h>
+
+#define AB5500_NR_IRQS (AB5500_NUM_IRQ_REGS * 8)
#define IRQ_AB5500_BASE IRQ_BOARD_START
#define IRQ_AB5500_END (IRQ_AB5500_BASE + AB5500_NR_IRQS)
#define U5500_IRQ_END IRQ_AB5500_END
-#if IRQ_BOARD_END < U5500_IRQ_END
+/*
+ * We may have several boards, but only one will run at a
+ * time, so the one with most IRQs will bump this ahead,
+ * but the IRQ_BOARD_START remains the same for either board.
+ */
+#if U5500_IRQ_END > IRQ_BOARD_END
#undef IRQ_BOARD_END
#define IRQ_BOARD_END U5500_IRQ_END
#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db5500.h b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
index 77239776a6f..234cf4ac4e1 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
@@ -85,6 +85,37 @@
#ifdef CONFIG_UX500_SOC_DB5500
+/* Virtual interrupts corresponding to the PRCMU wakeups. */
+#define IRQ_DB5500_PRCMU_BASE IRQ_SOC_START
+
+#define IRQ_DB5500_PRCMU_RTC (IRQ_DB5500_PRCMU_BASE)
+#define IRQ_DB5500_PRCMU_RTT0 (IRQ_DB5500_PRCMU_BASE + 1)
+#define IRQ_DB5500_PRCMU_RTT1 (IRQ_DB5500_PRCMU_BASE + 2)
+#define IRQ_DB5500_PRCMU_CD_IRQ (IRQ_DB5500_PRCMU_BASE + 3)
+#define IRQ_DB5500_PRCMU_SRP_TIM (IRQ_DB5500_PRCMU_BASE + 4)
+#define IRQ_DB5500_PRCMU_APE_REQ (IRQ_DB5500_PRCMU_BASE + 5)
+#define IRQ_DB5500_PRCMU_USB (IRQ_DB5500_PRCMU_BASE + 6)
+#define IRQ_DB5500_PRCMU_ABB (IRQ_DB5500_PRCMU_BASE + 7)
+#define IRQ_DB5500_PRCMU_ARM (IRQ_DB5500_PRCMU_BASE + 8)
+#define IRQ_DB5500_PRCMU_MODEM_SW_RESET_REQ (IRQ_DB5500_PRCMU_BASE + 9)
+#define IRQ_DB5500_PRCMU_AC_WAKE_ACK (IRQ_DB5500_PRCMU_BASE + 10)
+#define IRQ_DB5500_PRCMU_GPIO0 (IRQ_DB5500_PRCMU_BASE + 11)
+#define IRQ_DB5500_PRCMU_GPIO1 (IRQ_DB5500_PRCMU_BASE + 12)
+#define IRQ_DB5500_PRCMU_GPIO2 (IRQ_DB5500_PRCMU_BASE + 13)
+#define IRQ_DB5500_PRCMU_GPIO3 (IRQ_DB5500_PRCMU_BASE + 14)
+#define IRQ_DB5500_PRCMU_GPIO4 (IRQ_DB5500_PRCMU_BASE + 15)
+#define IRQ_DB5500_PRCMU_GPIO5 (IRQ_DB5500_PRCMU_BASE + 16)
+#define IRQ_DB5500_PRCMU_GPIO6 (IRQ_DB5500_PRCMU_BASE + 17)
+#define IRQ_DB5500_PRCMU_GPIO7 (IRQ_DB5500_PRCMU_BASE + 18)
+#define IRQ_DB5500_PRCMU_AC_REL_ACK (IRQ_DB5500_PRCMU_BASE + 19)
+#define IRQ_DB5500_PRCMU_LOW_POWER_AUDIO (IRQ_DB5500_PRCMU_BASE + 20)
+#define IRQ_DB5500_PRCMU_TEMP_SENSOR_LOW (IRQ_DB5500_PRCMU_BASE + 21)
+#define IRQ_DB5500_PRCMU_TEMP_SENSOR_HIGH (IRQ_DB5500_PRCMU_BASE + 22)
+#define IRQ_DB5500_PRCMU_END (IRQ_DB5500_PRCMU_BASE + 23)
+#define IRQ_DB5500_PRCMU_APE_REL 0x200
+
+#define NUM_DB5500_PRCMU_WAKEUPS (IRQ_DB5500_PRCMU_END - IRQ_DB5500_PRCMU_BASE)
+
/*
* After the GPIO ones we reserve a range of IRQ:s in which virtual
* IRQ:s representing modem IRQ:s can be allocated
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 9db68d264c5..a2876464d43 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -11,9 +11,7 @@
#define ASM_ARCH_IRQS_H
#include <mach/hardware.h>
-
-#define IRQ_LOCALTIMER 29
-#define IRQ_LOCALWDOG 30
+#include <linux/gpio.h>
/* Shared Peripheral Interrupt (SHPI) */
#define IRQ_SHPI_START 32
@@ -22,27 +20,34 @@
* MTU0 preserved for now until plat-nomadik is taught not to use it. Don't
* add any other IRQs here, use the irqs-dbx500.h files.
*/
-#define IRQ_MTU0 (IRQ_SHPI_START + 4)
+#define IRQ_MTU0 (IRQ_SHPI_START + 4)
+
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+
+/*********************************************************************/
#define DBX500_NR_INTERNAL_IRQS 160
/* After chip-specific IRQ numbers we have the GPIO ones */
-#define NOMADIK_NR_GPIO 288
#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS)
#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS)
+
+#define GPIO_TO_IRQ NOMADIK_GPIO_TO_IRQ
+#define IRQ_TO_GPIO NOMADIK_IRQ_TO_GPIO
#define IRQ_GPIO_END NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
-#define IRQ_SOC_START IRQ_GPIO_END
+#define IRQ_SOC_START IRQ_GPIO_END
/* This will be overridden by SoC-specific irq headers */
-#define IRQ_SOC_END IRQ_SOC_START
+#define IRQ_SOC_END IRQ_SOC_START
+
+#define IRQ_BOARD_START IRQ_SOC_END
+/* This will be overridden by board-specific irq headers */
+#define IRQ_BOARD_END IRQ_BOARD_START
#include <mach/irqs-db5500.h>
#include <mach/irqs-db8500.h>
-#define IRQ_BOARD_START IRQ_SOC_END
-/* This will be overridden by board-specific irq headers */
-#define IRQ_BOARD_END IRQ_BOARD_START
-
#ifdef CONFIG_MACH_U8500
#include <mach/irqs-board-mop500.h>
#endif
@@ -51,6 +56,8 @@
#include <mach/irqs-board-u5500.h>
#endif
+#ifndef NR_IRQS
#define NR_IRQS IRQ_BOARD_END
+#endif
#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/isa_ioctl.h b/arch/arm/mach-ux500/include/mach/isa_ioctl.h
new file mode 100644
index 00000000000..b1f3ba159da
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/isa_ioctl.h
@@ -0,0 +1,50 @@
+/*---------------------------------------------------------------------------*/
+/* Copyright ST Ericsson, 2009. */
+/* This program is free software; you can redistribute it and/or modify it */
+/* under the terms of the GNU General Public License as published by the */
+/* Free Software Foundation; either version 2.1 of the License, or */
+/* (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, but */
+/* WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */
+/* See the GNU General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU General Public License */
+/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
+/*---------------------------------------------------------------------------*/
+#ifndef __MODEM_IPC_INCLUDED
+#define __MODEM_IPC_INCLUDED
+
+#define DLP_IOCTL_MAGIC_NUMBER 'M'
+#define COMMON_BUFFER_SIZE (1024*1024)
+
+/**
+DLP Message Structure for Userland
+*/
+struct t_dlp_message{
+ unsigned int offset;
+ unsigned int size;
+};
+
+/**
+mmap constants.
+*/
+enum t_dlp_mmap_params {
+ MMAP_DLQUEUE,
+ MMAP_ULQUEUE
+};
+
+/**
+DLP IOCTLs for Userland
+*/
+#define DLP_IOC_ALLOCATE_BUFFER \
+ _IOWR(DLP_IOCTL_MAGIC_NUMBER, 0, struct t_dlp_message *)
+#define DLP_IOC_DEALLOCATE_BUFFER \
+ _IOWR(DLP_IOCTL_MAGIC_NUMBER, 1, struct t_dlp_message *)
+#define DLP_IOC_GET_MESSAGE \
+ _IOWR(DLP_IOCTL_MAGIC_NUMBER, 2, struct t_dlp_message *)
+#define DLP_IOC_PUT_MESSAGE \
+ _IOWR(DLP_IOCTL_MAGIC_NUMBER, 3, struct t_dlp_message *)
+
+#endif /*__MODEM_IPC_INCLUDED*/
diff --git a/arch/arm/mach-ux500/include/mach/mbox-db5500.h b/arch/arm/mach-ux500/include/mach/mbox-db5500.h
index 7f9da4d2fbd..2da180b8df6 100644
--- a/arch/arm/mach-ux500/include/mach/mbox-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/mbox-db5500.h
@@ -40,6 +40,7 @@ typedef void mbox_recv_cb_t (u32 mbox_msg, void *priv);
* @lock: Spinlock to protect this mailbox instance.
* @write_index: Index in internal buffer to write to.
* @read_index: Index in internal buffer to read from.
+ * @irq: mailbox interrupt.
* @allocated: Indicates whether this particular mailbox
* id has been allocated by someone.
*/
@@ -57,7 +58,11 @@ struct mbox {
spinlock_t lock;
u8 write_index;
u8 read_index;
+ int irq;
bool allocated;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *dentry;
+#endif
};
/**
@@ -84,5 +89,5 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv);
* specify "block" in order to block until send is possible).
*/
int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block);
-
+void mbox_state_reset(void);
#endif /*INC_STE_MBOX_H*/
diff --git a/arch/arm/mach-ux500/include/mach/mbox_channels-db5500.h b/arch/arm/mach-ux500/include/mach/mbox_channels-db5500.h
new file mode 100644
index 00000000000..69616c4cdec
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/mbox_channels-db5500.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> for ST-Ericsson.
+ * Bibek Basu <bibek.basu@stericsson.com>
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __INC_MBOX_CHANNELS_H
+#define __INC_MBOX_CHANNELS_H
+
+/* Maximum number of datawords which can be send in one PDU */
+#define MAILBOX_NR_OF_DATAWORDS 3
+
+/* Number of buffers */
+#define NUM_DSP_BUFFER 32
+
+/**
+ * mbox_channel_cb_t - Definition of the mailbox channel callback.
+ * @data: Pointer to the data.
+ * @length: Length of the data.
+ * @priv: The client's private data.
+ *
+ * This function will be called upon reception of complete mbox channel PDU
+ * or after completion of send operation.
+ */
+typedef void mbox_channel_cb_t (u32 *data, u32 length, void *priv);
+
+/**
+ * struct mbox_channel_msg - Definition of mbox channel message
+ * @channel: Channel number.
+ * @data: Pointer to data to be sent.
+ * @length: Length of data to be sent.
+ * @cb: Pointer to the callback function to be called when send
+ * operation will be finished.
+ * @priv: The client's private data.
+ *
+ * This structure describes mailbox channel message.
+ */
+struct mbox_channel_msg {
+ u16 channel;
+ u32 *data;
+ u8 length;
+ mbox_channel_cb_t *cb;
+ void *priv;
+};
+
+/**
+ * mbox_channel_register - Set up a given mailbox channel.
+ * @channel: Mailbox channel number.
+ * @cb: Pointer to the callback function to be called when a new message
+ * is received.
+ * @priv: Client user data which will be returned in the callback.
+ *
+ * Returns 0 on success or a negative error code on error.
+ */
+int mbox_channel_register(u16 channel, mbox_channel_cb_t *cb, void *priv);
+
+/**
+ * mbox_channel_send - Send data on given mailbox channel.
+ * @msg: Mailbox channel message to be sent.
+ *
+ * Returns 0 on success or a negative error code on error.
+ */
+int mbox_channel_send(struct mbox_channel_msg *msg);
+
+/**
+ * mbox_channel_revoke_messages - Revoke messages on given mailbox channel.
+ * @channel: Mailbox channel number.
+ *
+ * Returns 0 on success or a negative error code on error.
+ */
+int mbox_channel_revoke_messages(u16 channel);
+
+/**
+ * mbox_channel_deregister - de-register given mailbox channel.
+ * @channel: Mailbox channel number.
+ *
+ * Returns 0 on success or a negative error code on error.
+ */
+int mbox_channel_deregister(u16 channel);
+
+#endif /*INC_STE_MBOX_H*/
diff --git a/arch/arm/mach-ux500/include/mach/mloader-dbx500.h b/arch/arm/mach-ux500/include/mach/mloader-dbx500.h
new file mode 100644
index 00000000000..68fa55a3f53
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/mloader-dbx500.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Ludovic Barre <ludovic.barre@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _MLOADER_UX500_H_
+#define _MLOADER_UX500_H_
+
+/**
+ * struct dbx500_ml_area - data structure for modem memory areas description
+ * @name: name of the area
+ * @start: start address of the area
+ * @size: size of the area
+ */
+struct dbx500_ml_area {
+ const char *name;
+ u32 start;
+ u32 size;
+};
+
+/**
+ * struct dbx500_ml_fw - data stucture for modem firmwares description
+ * @name: firmware name
+ * @area: area where firmware is uploaded
+ * @offset: offset in the area where firmware is uploaded
+ */
+struct dbx500_ml_fw {
+ const char *name;
+ struct dbx500_ml_area *area;
+ u32 offset;
+};
+
+/**
+ * struct dbx500_mloader_pdata - data structure for platform specific data
+ * @fws: pointer on firmwares table
+ * @nr_fws: number of firmwares
+ * @areas: pointer on areas table
+ * @nr_areas: number of areas
+ */
+struct dbx500_mloader_pdata {
+ struct dbx500_ml_fw *fws;
+ int nr_fws;
+ struct dbx500_ml_area *areas;
+ int nr_areas;
+};
+
+#endif /* _MLOADER_UX500_H_ */
diff --git a/arch/arm/mach-ux500/include/mach/msp.h b/arch/arm/mach-ux500/include/mach/msp.h
new file mode 100644
index 00000000000..6f42cca48c6
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/msp.h
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _STM_MSP_HEADER
+#define _STM_MSP_HEADER
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/dmaengine.h>
+#include <linux/irqreturn.h>
+#include <linux/bitops.h>
+#include <plat/ste_dma40.h>
+#include <linux/gpio.h>
+#include <linux/spi/stm_msp.h>
+
+/* Generic config struct. Use the actual values defined below for global
+ * control register
+ */
+
+enum msp_state {
+ MSP_STATE_IDLE = 0,
+ MSP_STATE_CONFIGURED = 1,
+ MSP_STATE_RUN = 2,
+};
+
+enum msp_rx_comparison_enable_mode {
+ MSP_COMPARISON_DISABLED = 0,
+ MSP_COMPARISON_NONEQUAL_ENABLED = 2,
+ MSP_COMPARISON_EQUAL_ENABLED = 3
+};
+
+#define RMCEN_BIT 0
+#define RMCSF_BIT 1
+#define RCMPM_BIT 3
+#define TMCEN_BIT 5
+#define TNCSF_BIT 6
+
+struct msp_multichannel_config {
+ bool rx_multichannel_enable;
+ bool tx_multichannel_enable;
+ enum msp_rx_comparison_enable_mode rx_comparison_enable_mode;
+ u8 padding;
+ u32 comparison_value;
+ u32 comparison_mask;
+ u32 rx_channel_0_enable;
+ u32 rx_channel_1_enable;
+ u32 rx_channel_2_enable;
+ u32 rx_channel_3_enable;
+ u32 tx_channel_0_enable;
+ u32 tx_channel_1_enable;
+ u32 tx_channel_2_enable;
+ u32 tx_channel_3_enable;
+};
+
+/**
+ * struct msp_protocol_desc- MSP Protocol desc structure per MSP.
+ * @rx_phase_mode: rx_phase_mode whether single or dual.
+ * @tx_phase_mode: tx_phase_mode whether single or dual.
+ * @rx_phase2_start_mode: rx_phase2_start_mode whether imediate or after
+ * some delay.
+ * @tx_phase2_start_mode: tx_phase2_start_mode whether imediate or after
+ * some delay.
+ * @rx_bit_transfer_format: MSP or LSB.
+ * @tx_bit_transfer_format: MSP or LSB.
+ * @rx_frame_length_1: Frame1 length 1,2,3..
+ * @rx_frame_length_2: Frame2 length 1,2,3..
+ * @tx_frame_length_1: Frame1 length 1,2,3..
+ * @tx_frame_length_2: Frame2 length 1,2,3..
+ * @rx_element_length_1: Element1 length 1,2,...
+ * @rx_element_length_2: Element2 length 1,2,...
+ * @tx_element_length_1: Element1 length 1,2,...
+ * @tx_element_length_2: Element2 length 1,2,...
+ * @rx_data_delay: Delay in clk cycle after frame sync
+ * @tx_data_delay: Delay in clk cycle after frame sync
+ * @rx_clock_pol: Rxpol whether rising or falling.It indicates pol of bit clock.
+ * @tx_clock_pol: Txpol whether rising or falling.It indicates pol of bit clock.
+ * @rx_frame_sync_pol: Frame sync pol whether rising or Falling.
+ * @tx_frame_sync_pol: Frame sync pol whether rising or Falling.
+ * @rx_half_word_swap: Word swap half word, full word.
+ * @tx_half_word_swap: Word swap half word, full word.
+ * @compression_mode: Compression mode whether Alaw or Ulaw or disabled.
+ * @expansion_mode: Compression mode whether Alaw or Ulaw or disabled.
+ * @spi_clk_mode: Spi clock mode to be enabled or not.
+ * @spi_burst_mode: Spi burst mode to be enabled or not.
+ * @frame_sync_ignore: Frame sync to be ignored or not. Ignore in case of Audio
+ * codec acting as Master.
+ * @frame_period: Frame period (clk cycles) after which new frame sync occurs.
+ * @frame_width: Frame width (clk cycles) after which frame sycn changes state.
+ * @total_clocks_for_one_frame: No. of clk cycles per frame.
+ *
+ * Main Msp protocol descriptor data structure to be used to store various info
+ * in transmit or recevie configuration registers of an MSP.
+ */
+
+struct msp_protocol_desc {
+ u32 rx_phase_mode;
+ u32 tx_phase_mode;
+ u32 rx_phase2_start_mode;
+ u32 tx_phase2_start_mode;
+ u32 rx_bit_transfer_format;
+ u32 tx_bit_transfer_format;
+ u32 rx_frame_length_1;
+ u32 rx_frame_length_2;
+ u32 tx_frame_length_1;
+ u32 tx_frame_length_2;
+ u32 rx_element_length_1;
+ u32 rx_element_length_2;
+ u32 tx_element_length_1;
+ u32 tx_element_length_2;
+ u32 rx_data_delay;
+ u32 tx_data_delay;
+ u32 rx_clock_pol;
+ u32 tx_clock_pol;
+ u32 rx_frame_sync_pol;
+ u32 tx_frame_sync_pol;
+ u32 rx_half_word_swap;
+ u32 tx_half_word_swap;
+ u32 compression_mode;
+ u32 expansion_mode;
+ u32 spi_clk_mode;
+ u32 spi_burst_mode;
+ u32 frame_sync_ignore;
+ u32 frame_period;
+ u32 frame_width;
+ u32 total_clocks_for_one_frame;
+};
+
+enum i2s_direction_t {
+ I2S_DIRECTION_TX = 0,
+ I2S_DIRECTION_RX = 1,
+ I2S_DIRECTION_BOTH = 2
+};
+
+enum i2s_transfer_mode_t {
+ I2S_TRANSFER_MODE_SINGLE_DMA = 0,
+ I2S_TRANSFER_MODE_CYCLIC_DMA = 1,
+ I2S_TRANSFER_MODE_INF_LOOPBACK = 2,
+ I2S_TRANSFER_MODE_NON_DMA = 4,
+};
+
+struct i2s_message {
+ enum i2s_direction_t i2s_direction;
+ void *txdata;
+ void *rxdata;
+ size_t txbytes;
+ size_t rxbytes;
+ int dma_flag;
+ int tx_offset;
+ int rx_offset;
+ /* cyclic dma */
+ bool cyclic_dma;
+ dma_addr_t buf_addr;
+ size_t buf_len;
+ size_t period_len;
+};
+
+enum i2s_flag {
+ DISABLE_ALL = 0,
+ DISABLE_TRANSMIT = 1,
+ DISABLE_RECEIVE = 2,
+};
+
+struct i2s_controller {
+ struct module *owner;
+ unsigned int id;
+ unsigned int class;
+ const struct i2s_algorithm *algo; /* the algorithm to access the bus */
+ void *data;
+ struct mutex bus_lock;
+ struct device dev; /* the controller device */
+ char name[48];
+};
+#define to_i2s_controller(d) container_of(d, struct i2s_controller, dev)
+
+/**
+ * struct trans_data - MSP transfer data structure used during xfer.
+ * @message: i2s message.
+ * @msp: msp structure.
+ * @tx_handler: callback handler for transmit path.
+ * @rx_handler: callback handler for receive path.
+ * @tx_callback_data: callback data for transmit.
+ * @rx_callback_data: callback data for receive.
+ *
+ */
+struct trans_data {
+ struct i2s_message message;
+ struct msp *msp;
+ void (*tx_handler) (void *data);
+ void (*rx_handler) (void *data);
+ void *tx_callback_data;
+ void *rx_callback_data;
+};
+
+/**
+ * struct msp_config- MSP configuration structure used by i2s client.
+ * @input_clock_freq: Input clock frequency default is 48MHz.
+ * @rx_clock_sel: Receive clock selection (Provided by Sample Gen or external
+ * source).
+ * @tx_clock_sel: Transmit clock selection (Provided by Sample Gen or external.
+ * source).
+ * @srg_clock_sel: APB clock or clock dervied from Slave (Audio codec).
+ * @rx_frame_sync_pol: Receive frame sync polarity.
+ * @tx_frame_sync_pol: Transmit frame sync polarity.
+ * @rx_frame_sync_sel: Rx frame sync signal is provided by which source.
+ * External source or by frame generator logic.
+ * @tx_frame_sync_sel: Tx frame sync signal is provided by which source.
+ * External source or by frame generator logic.
+ * @rx_fifo_config: Receive fifo enable or not.
+ * @tx_fifo_config: Transmit fifo enable or not.
+ * @spi_clk_mode: In case of SPI protocol spi modes: Normal, Zero delay or
+ * half cycle delay.
+ * @spi_burst_mode: Spi burst mode is enabled or not.
+ * @loopback_enable: Loopback mode.
+ * @tx_data_enable: Transmit extra delay enable.
+ * @default_protocol_desc: Flag to indicate client defined protocol desc or
+ * statically defined in msp.h.
+ * @protocol_desc: Protocol desc structure filled by i2s client driver.
+ * In case client defined default_prtocol_desc as 0.
+ * @multichannel_configured: multichannel configuration structure.
+ * @multichannel_config: multichannel is enabled or not.
+ * @direction: Transmit, Receive or Both.
+ * @work_mode: Dma, Polling or Interrupt.
+ * @protocol: I2S, PCM, etc.
+ * @frame_freq: Sampling freq at which data is sampled.
+ * @frame_size: size of element.
+ * @data_size: data size which defines the format in which data is written on
+ * transmit or receive fifo. Only three modes 8,16,32 are supported.
+ * @def_elem_len: Flag to indicate whether default element length is to be used
+ * or should be changed acc to data size defined by user at run time.
+ * @iodelay: value for the MSP_IODLY register
+ * @handler: callback handler in case of interrupt or dma.
+ * @tx_callback_data: Callback data for transmit.
+ * @rx_callback_data: Callback data for receive.
+ *
+ * Main Msp configuration data structure used by i2s client driver to fill
+ * various info like data size, frequency etc.
+ */
+struct msp_config {
+ unsigned int input_clock_freq;
+ unsigned int rx_clock_sel;
+ unsigned int tx_clock_sel;
+ unsigned int srg_clock_sel;
+ unsigned int rx_frame_sync_pol;
+ unsigned int tx_frame_sync_pol;
+ unsigned int rx_frame_sync_sel;
+ unsigned int tx_frame_sync_sel;
+ unsigned int rx_fifo_config;
+ unsigned int tx_fifo_config;
+ unsigned int spi_clk_mode;
+ unsigned int spi_burst_mode;
+ unsigned int loopback_enable;
+ unsigned int tx_data_enable;
+ unsigned int default_protocol_desc;
+ struct msp_protocol_desc protocol_desc;
+ int multichannel_configured;
+ struct msp_multichannel_config multichannel_config;
+ unsigned int direction;
+ unsigned int work_mode;
+ unsigned int protocol;
+ unsigned int frame_freq;
+ unsigned int frame_size;
+ enum msp_data_size data_size;
+ unsigned int def_elem_len;
+ unsigned int iodelay;
+ void (*handler) (void *data);
+ void *tx_callback_data;
+ void *rx_callback_data;
+
+};
+
+/*** Protocols ***/
+enum msp_protocol {
+ MSP_I2S_PROTOCOL,
+ MSP_PCM_PROTOCOL,
+ MSP_PCM_COMPAND_PROTOCOL,
+ MSP_AC97_PROTOCOL,
+ MSP_MASTER_SPI_PROTOCOL,
+ MSP_SLAVE_SPI_PROTOCOL,
+ MSP_INVALID_PROTOCOL
+};
+
+/*** Sample Frequencies ***/
+/* These are no longer required, frequencies in Hz can be used directly */
+enum msp_sample_freq {
+ MSP_SAMPLE_FREQ_NOT_SUPPORTED = -1,
+ MSP_SAMPLE_FREQ_8KHZ = 8000,
+ MSP_SAMPLE_FREQ_12KHZ = 12000,
+ MSP_SAMPLE_FREQ_16KHZ = 16000,
+ MSP_SAMPLE_FREQ_24KHZ = 24000,
+ MSP_SAMPLE_FREQ_32KHZ = 32000,
+ MSP_SAMPLE_FREQ_44KHZ = 44000,
+ MSP_SAMPLE_FREQ_48KHZ = 48000,
+ MSP_SAMPLE_FREQ_64KHZ = 64000,
+ MSP_SAMPLE_FREQ_88KHZ = 88000,
+ MSP_SAMPLE_FREQ_96KHZ = 96000,
+ MSP_SAMPLE_FREQ_22KHZ = 22000,
+ MSP_SAMPLE_FREQ_11KHZ = 11000
+};
+
+/*** Input Frequencies ***/
+/* These are no longer required, frequencies in Hz can be used directly */
+enum msp_in_clock_freq {
+ MSP_INPUT_FREQ_1MHZ = 1000,
+ MSP_INPUT_FREQ_2MHZ = 2000,
+ MSP_INPUT_FREQ_3MHZ = 3000,
+ MSP_INPUT_FREQ_4MHZ = 4000,
+ MSP_INPUT_FREQ_5MHZ = 5000,
+ MSP_INPUT_FREQ_6MHZ = 6000,
+ MSP_INPUT_FREQ_8MHZ = 8000,
+ MSP_INPUT_FREQ_11MHZ = 11000,
+ MSP_INPUT_FREQ_12MHZ = 12000,
+ MSP_INPUT_FREQ_16MHZ = 16000,
+ MSP_INPUT_FREQ_22MHZ = 22000,
+ MSP_INPUT_FREQ_24MHZ = 24000,
+ MSP_INPUT_FREQ_48MHZ = 48000
+};
+
+#define MSP_INPUT_FREQ_APB 48000000
+
+/*** Stereo mode. Used for APB data accesses as 16 bits accesses (mono),
+ * 32 bits accesses (stereo).
+ ***/
+enum msp_stereo_mode {
+ MSP_MONO,
+ MSP_STEREO
+};
+
+/* Direction (Transmit/Receive mode) */
+enum msp_direction {
+ MSP_TRANSMIT_MODE,
+ MSP_RECEIVE_MODE,
+ MSP_BOTH_T_R_MODE
+};
+
+/* Dma mode should be used for large transfers,
+ * polling mode should be used for transfers of a few bytes
+ */
+enum msp_xfer_mode {
+ MSP_DMA_MODE,
+ MSP_POLLING_MODE,
+ MSP_INTERRUPT_MODE
+};
+
+/* User client for the MSP */
+enum msp_user {
+ MSP_NO_USER = 0,
+ MSP_USER_SPI,
+ MSP_USER_ALSA,
+ MSP_USER_SAA,
+};
+
+/*Flag structure for MSPx*/
+struct msp_flag {
+ struct semaphore lock;
+ enum msp_user user;
+};
+
+/* User client for the MSP */
+enum msp_mode {
+ MSP_NO_MODE = 0,
+ MSP_MODE_SPI,
+ MSP_MODE_NON_SPI,
+};
+
+/* Transmit and receive configuration register */
+#define MSP_BIG_ENDIAN 0x00000000
+#define MSP_LITTLE_ENDIAN 0x00001000
+#define MSP_UNEXPECTED_FS_ABORT 0x00000000
+#define MSP_UNEXPECTED_FS_IGNORE 0x00008000
+#define MSP_NON_MODE_BIT_MASK 0x00009000
+
+/* Global configuration register */
+#define RX_ENABLE 0x00000001
+#define RX_FIFO_ENABLE 0x00000002
+#define RX_SYNC_SRG 0x00000010
+#define RX_CLK_POL_RISING 0x00000020
+#define RX_CLK_SEL_SRG 0x00000040
+#define TX_ENABLE 0x00000100
+#define TX_FIFO_ENABLE 0x00000200
+#define TX_SYNC_SRG_PROG 0x00001800
+#define TX_SYNC_SRG_AUTO 0x00001000
+#define TX_CLK_POL_RISING 0x00002000
+#define TX_CLK_SEL_SRG 0x00004000
+#define TX_EXTRA_DELAY_ENABLE 0x00008000
+#define SRG_ENABLE 0x00010000
+#define FRAME_GEN_ENABLE 0x00100000
+#define SRG_CLK_SEL_APB 0x00000000
+#define RX_FIFO_SYNC_HI 0x00000000
+#define TX_FIFO_SYNC_HI 0x00000000
+#define SPI_CLK_MODE_NORMAL 0x00000000
+
+/* SPI Clock Modes enumertion
+ * SPI clock modes of MSP provides compatibility with
+ * the SPI protocol.MSP supports 2 SPI transfer formats.
+ * MSP_ZERO_DELAY_SPI_MODE:MSP transmits data over Tx/Rx
+ * Lines immediately after MSPTCK/MSPRCK rising/falling edge.
+ * MSP_HALF_CYCLE_DELY_SPI_MODE:MSP transmits data one-half cycle
+ * ahead of the rising/falling edge of the MSPTCK
+ */
+
+#define MSP_FRAME_SIZE_AUTO -1
+
+
+#define MSP_DR 0x00
+#define MSP_GCR 0x04
+#define MSP_TCF 0x08
+#define MSP_RCF 0x0c
+#define MSP_SRG 0x10
+#define MSP_FLR 0x14
+#define MSP_DMACR 0x18
+
+#define MSP_IMSC 0x20
+#define MSP_RIS 0x24
+#define MSP_MIS 0x28
+#define MSP_ICR 0x2c
+#define MSP_MCR 0x30
+#define MSP_RCV 0x34
+#define MSP_RCM 0x38
+
+#define MSP_TCE0 0x40
+#define MSP_TCE1 0x44
+#define MSP_TCE2 0x48
+#define MSP_TCE3 0x4c
+
+#define MSP_RCE0 0x60
+#define MSP_RCE1 0x64
+#define MSP_RCE2 0x68
+#define MSP_RCE3 0x6c
+#define MSP_IODLY 0x70
+
+#define MSP_ITCR 0x80
+#define MSP_ITIP 0x84
+#define MSP_ITOP 0x88
+#define MSP_TSTDR 0x8c
+
+#define MSP_PID0 0xfe0
+#define MSP_PID1 0xfe4
+#define MSP_PID2 0xfe8
+#define MSP_PID3 0xfec
+
+#define MSP_CID0 0xff0
+#define MSP_CID1 0xff4
+#define MSP_CID2 0xff8
+#define MSP_CID3 0xffc
+
+/* Single or dual phase mode */
+enum msp_phase_mode {
+ MSP_SINGLE_PHASE,
+ MSP_DUAL_PHASE
+};
+
+/* Frame length */
+enum msp_frame_length {
+ MSP_FRAME_LENGTH_1 = 0,
+ MSP_FRAME_LENGTH_2 = 1,
+ MSP_FRAME_LENGTH_4 = 3,
+ MSP_FRAME_LENGTH_8 = 7,
+ MSP_FRAME_LENGTH_12 = 11,
+ MSP_FRAME_LENGTH_16 = 15,
+ MSP_FRAME_LENGTH_20 = 19,
+ MSP_FRAME_LENGTH_32 = 31,
+ MSP_FRAME_LENGTH_48 = 47,
+ MSP_FRAME_LENGTH_64 = 63
+};
+
+/* Element length */
+enum msp_elem_length {
+ MSP_ELEM_LENGTH_8 = 0,
+ MSP_ELEM_LENGTH_10 = 1,
+ MSP_ELEM_LENGTH_12 = 2,
+ MSP_ELEM_LENGTH_14 = 3,
+ MSP_ELEM_LENGTH_16 = 4,
+ MSP_ELEM_LENGTH_20 = 5,
+ MSP_ELEM_LENGTH_24 = 6,
+ MSP_ELEM_LENGTH_32 = 7
+};
+
+enum msp_data_xfer_width {
+ MSP_DATA_TRANSFER_WIDTH_BYTE,
+ MSP_DATA_TRANSFER_WIDTH_HALFWORD,
+ MSP_DATA_TRANSFER_WIDTH_WORD
+};
+
+enum msp_frame_sync {
+ MSP_FRAME_SYNC_UNIGNORE = 0,
+ MSP_FRAME_SYNC_IGNORE = 1,
+
+};
+
+enum msp_phase2_start_mode {
+ MSP_PHASE2_START_MODE_IMEDIATE,
+ MSP_PHASE2_START_MODE_FRAME_SYNC
+};
+
+enum msp_btf {
+ MSP_BTF_MS_BIT_FIRST = 0,
+ MSP_BTF_LS_BIT_FIRST = 1
+};
+
+enum msp_frame_sync_pol {
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH = 0,
+ MSP_FRAME_SYNC_POL_ACTIVE_LOW = 1
+};
+
+/* Data delay (in bit clock cycles) */
+enum msp_delay {
+ MSP_DELAY_0 = 0,
+ MSP_DELAY_1 = 1,
+ MSP_DELAY_2 = 2,
+ MSP_DELAY_3 = 3
+};
+
+/* Configurations of clocks (transmit, receive or sample rate generator) */
+enum msp_edge {
+ MSP_FALLING_EDGE = 0,
+ MSP_RISING_EDGE = 1,
+};
+
+enum msp_hws {
+ MSP_HWS_NO_SWAP = 0,
+ MSP_HWS_BYTE_SWAP_IN_WORD = 1,
+ MSP_HWS_BYTE_SWAP_IN_EACH_HALF_WORD = 2,
+ MSP_HWS_HALF_WORD_SWAP_IN_WORD = 3
+};
+
+enum msp_compress_mode {
+ MSP_COMPRESS_MODE_LINEAR = 0,
+ MSP_COMPRESS_MODE_MU_LAW = 2,
+ MSP_COMPRESS_MODE_A_LAW = 3
+};
+
+enum msp_spi_clock_mode {
+ MSP_SPI_CLOCK_MODE_NON_SPI = 0,
+ MSP_SPI_CLOCK_MODE_ZERO_DELAY = 2,
+ MSP_SPI_CLOCK_MODE_HALF_CYCLE_DELAY = 3
+};
+
+enum msp_spi_burst_mode {
+ MSP_SPI_BURST_MODE_DISABLE = 0,
+ MSP_SPI_BURST_MODE_ENABLE = 1
+};
+
+enum msp_expand_mode {
+ MSP_EXPAND_MODE_LINEAR = 0,
+ MSP_EXPAND_MODE_LINEAR_SIGNED = 1,
+ MSP_EXPAND_MODE_MU_LAW = 2,
+ MSP_EXPAND_MODE_A_LAW = 3
+};
+
+/* Protocol dependant parameters list */
+#define RX_ENABLE_MASK BIT(0)
+#define RX_FIFO_ENABLE_MASK BIT(1)
+#define RX_FRAME_SYNC_MASK BIT(2)
+#define DIRECT_COMPANDING_MASK BIT(3)
+#define RX_SYNC_SEL_MASK BIT(4)
+#define RX_CLK_POL_MASK BIT(5)
+#define RX_CLK_SEL_MASK BIT(6)
+#define LOOPBACK_MASK BIT(7)
+#define TX_ENABLE_MASK BIT(8)
+#define TX_FIFO_ENABLE_MASK BIT(9)
+#define TX_FRAME_SYNC_MASK BIT(10)
+#define TX_MSP_TDR_TSR BIT(11)
+#define TX_SYNC_SEL_MASK (BIT(12) | BIT(11))
+#define TX_CLK_POL_MASK BIT(13)
+#define TX_CLK_SEL_MASK BIT(14)
+#define TX_EXTRA_DELAY_MASK BIT(15)
+#define SRG_ENABLE_MASK BIT(16)
+#define SRG_CLK_POL_MASK BIT(17)
+#define SRG_CLK_SEL_MASK (BIT(19) | BIT(18))
+#define FRAME_GEN_EN_MASK BIT(20)
+#define SPI_CLK_MODE_MASK (BIT(22) | BIT(21))
+#define SPI_BURST_MODE_MASK BIT(23)
+
+#define RXEN_SHIFT 0
+#define RFFEN_SHIFT 1
+#define RFSPOL_SHIFT 2
+#define DCM_SHIFT 3
+#define RFSSEL_SHIFT 4
+#define RCKPOL_SHIFT 5
+#define RCKSEL_SHIFT 6
+#define LBM_SHIFT 7
+#define TXEN_SHIFT 8
+#define TFFEN_SHIFT 9
+#define TFSPOL_SHIFT 10
+#define TFSSEL_SHIFT 11
+#define TCKPOL_SHIFT 13
+#define TCKSEL_SHIFT 14
+#define TXDDL_SHIFT 15
+#define SGEN_SHIFT 16
+#define SCKPOL_SHIFT 17
+#define SCKSEL_SHIFT 18
+#define FGEN_SHIFT 20
+#define SPICKM_SHIFT 21
+#define TBSWAP_SHIFT 28
+
+#define RCKPOL_MASK BIT(0)
+#define TCKPOL_MASK BIT(0)
+#define SPICKM_MASK (BIT(1) | BIT(0))
+#define MSP_RX_CLKPOL_BIT(n) ((n & RCKPOL_MASK) << RCKPOL_SHIFT)
+#define MSP_TX_CLKPOL_BIT(n) ((n & TCKPOL_MASK) << TCKPOL_SHIFT)
+#define MSP_SPI_CLK_MODE_BITS(n) ((n & SPICKM_MASK) << SPICKM_SHIFT)
+
+
+
+/* Use this to clear the clock mode bits to non-spi */
+#define MSP_NON_SPI_CLK_MASK (BIT(22) | BIT(21))
+
+#define P1ELEN_SHIFT 0
+#define P1FLEN_SHIFT 3
+#define DTYP_SHIFT 10
+#define ENDN_SHIFT 12
+#define DDLY_SHIFT 13
+#define FSIG_SHIFT 15
+#define P2ELEN_SHIFT 16
+#define P2FLEN_SHIFT 19
+#define P2SM_SHIFT 26
+#define P2EN_SHIFT 27
+#define FRAME_SYNC_SHIFT 15
+
+
+#define P1ELEN_MASK 0x00000007
+#define P2ELEN_MASK 0x00070000
+#define P1FLEN_MASK 0x00000378
+#define P2FLEN_MASK 0x03780000
+#define DDLY_MASK 0x00003000
+#define DTYP_MASK 0x00000600
+#define P2SM_MASK 0x04000000
+#define P2EN_MASK 0x08000000
+#define ENDN_MASK 0x00001000
+#define TFSPOL_MASK 0x00000400
+#define TBSWAP_MASK 0x30000000
+#define COMPANDING_MODE_MASK 0x00000c00
+#define FRAME_SYNC_MASK 0x00008000
+
+#define MSP_P1_ELEM_LEN_BITS(n) (n & P1ELEN_MASK)
+#define MSP_P2_ELEM_LEN_BITS(n) (((n) << P2ELEN_SHIFT) & P2ELEN_MASK)
+#define MSP_P1_FRAME_LEN_BITS(n) (((n) << P1FLEN_SHIFT) & P1FLEN_MASK)
+#define MSP_P2_FRAME_LEN_BITS(n) (((n) << P2FLEN_SHIFT) & P2FLEN_MASK)
+#define MSP_DATA_DELAY_BITS(n) (((n) << DDLY_SHIFT) & DDLY_MASK)
+#define MSP_DATA_TYPE_BITS(n) (((n) << DTYP_SHIFT) & DTYP_MASK)
+#define MSP_P2_START_MODE_BIT(n) ((n << P2SM_SHIFT) & P2SM_MASK)
+#define MSP_P2_ENABLE_BIT(n) ((n << P2EN_SHIFT) & P2EN_MASK)
+#define MSP_SET_ENDIANNES_BIT(n) ((n << ENDN_SHIFT) & ENDN_MASK)
+#define MSP_FRAME_SYNC_POL(n) ((n << TFSPOL_SHIFT) & TFSPOL_MASK)
+#define MSP_DATA_WORD_SWAP(n) ((n << TBSWAP_SHIFT) & TBSWAP_MASK)
+#define MSP_SET_COMPANDING_MODE(n) ((n << DTYP_SHIFT) & COMPANDING_MODE_MASK)
+#define MSP_SET_FRAME_SYNC_IGNORE(n) ((n << FRAME_SYNC_SHIFT) & \
+ FRAME_SYNC_MASK)
+
+/* Flag register */
+#define RX_BUSY BIT(0)
+#define RX_FIFO_EMPTY BIT(1)
+#define RX_FIFO_FULL BIT(2)
+#define TX_BUSY BIT(3)
+#define TX_FIFO_EMPTY BIT(4)
+#define TX_FIFO_FULL BIT(5)
+
+#define RBUSY_SHIFT 0
+#define RFE_SHIFT 1
+#define RFU_SHIFT 2
+#define TBUSY_SHIFT 3
+#define TFE_SHIFT 4
+#define TFU_SHIFT 5
+
+/* Multichannel control register */
+#define RMCEN_SHIFT 0
+#define RMCSF_SHIFT 1
+#define RCMPM_SHIFT 3
+#define TMCEN_SHIFT 5
+#define TNCSF_SHIFT 6
+
+/* Sample rate generator register */
+#define SCKDIV_SHIFT 0
+#define FRWID_SHIFT 10
+#define FRPER_SHIFT 16
+
+#define SCK_DIV_MASK 0x0000003FF
+#define FRAME_WIDTH_BITS(n) (((n) << FRWID_SHIFT) & 0x0000FC00)
+#define FRAME_PERIOD_BITS(n) (((n) << FRPER_SHIFT) & 0x1FFF0000)
+
+/* DMA controller register */
+#define RX_DMA_ENABLE BIT(0)
+#define TX_DMA_ENABLE BIT(1)
+
+#define RDMAE_SHIFT 0
+#define TDMAE_SHIFT 1
+
+/* Interrupt Register */
+#define RECEIVE_SERVICE_INT BIT(0)
+#define RECEIVE_OVERRUN_ERROR_INT BIT(1)
+#define RECEIVE_FRAME_SYNC_ERR_INT BIT(2)
+#define RECEIVE_FRAME_SYNC_INT BIT(3)
+#define TRANSMIT_SERVICE_INT BIT(4)
+#define TRANSMIT_UNDERRUN_ERR_INT BIT(5)
+#define TRANSMIT_FRAME_SYNC_ERR_INT BIT(6)
+#define TRANSMIT_FRAME_SYNC_INT BIT(7)
+#define ALL_INT 0x000000ff
+
+/* MSP test control register */
+#define MSP_ITCR_ITEN BIT(0)
+#define MSP_ITCR_TESTFIFO BIT(1)
+
+/*
+ * Protocol configuration values I2S:
+ * Single phase, 16 bits, 2 words per frame
+ */
+#define I2S_PROTOCOL_DESC \
+{ \
+ MSP_SINGLE_PHASE, \
+ MSP_SINGLE_PHASE, \
+ MSP_PHASE2_START_MODE_IMEDIATE, \
+ MSP_PHASE2_START_MODE_IMEDIATE, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_ELEM_LENGTH_32, \
+ MSP_ELEM_LENGTH_32, \
+ MSP_ELEM_LENGTH_32, \
+ MSP_ELEM_LENGTH_32, \
+ MSP_DELAY_1, \
+ MSP_DELAY_1, \
+ MSP_RISING_EDGE, \
+ MSP_FALLING_EDGE, \
+ MSP_FRAME_SYNC_POL_ACTIVE_LOW, \
+ MSP_FRAME_SYNC_POL_ACTIVE_LOW, \
+ MSP_HWS_NO_SWAP, \
+ MSP_HWS_NO_SWAP, \
+ MSP_COMPRESS_MODE_LINEAR, \
+ MSP_EXPAND_MODE_LINEAR, \
+ MSP_SPI_CLOCK_MODE_NON_SPI, \
+ MSP_SPI_BURST_MODE_DISABLE, \
+ MSP_FRAME_SYNC_IGNORE, \
+ 31, \
+ 15, \
+ 32, \
+}
+
+#define PCM_PROTOCOL_DESC \
+{ \
+ MSP_DUAL_PHASE, \
+ MSP_DUAL_PHASE, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_ELEM_LENGTH_16, \
+ MSP_ELEM_LENGTH_16, \
+ MSP_ELEM_LENGTH_16, \
+ MSP_ELEM_LENGTH_16, \
+ MSP_DELAY_0, \
+ MSP_DELAY_0, \
+ MSP_RISING_EDGE, \
+ MSP_FALLING_EDGE, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_HWS_NO_SWAP, \
+ MSP_HWS_NO_SWAP, \
+ MSP_COMPRESS_MODE_LINEAR, \
+ MSP_EXPAND_MODE_LINEAR, \
+ MSP_SPI_CLOCK_MODE_NON_SPI, \
+ MSP_SPI_BURST_MODE_DISABLE, \
+ MSP_FRAME_SYNC_IGNORE, \
+ 255, \
+ 0, \
+ 256, \
+}
+
+/* Companded PCM: Single phase, 8 bits, 1 word per frame */
+#define PCM_COMPAND_PROTOCOL_DESC \
+{ \
+ MSP_SINGLE_PHASE, \
+ MSP_SINGLE_PHASE, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_DELAY_0, \
+ MSP_DELAY_0, \
+ MSP_RISING_EDGE, \
+ MSP_RISING_EDGE, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_HWS_NO_SWAP, \
+ MSP_HWS_NO_SWAP, \
+ MSP_COMPRESS_MODE_LINEAR, \
+ MSP_EXPAND_MODE_LINEAR, \
+ MSP_SPI_CLOCK_MODE_NON_SPI, \
+ MSP_SPI_BURST_MODE_DISABLE, \
+ MSP_FRAME_SYNC_IGNORE, \
+ 255, \
+ 0, \
+ 256, \
+}
+
+/*
+ * AC97: Double phase, 1 element of 16 bits during first phase,
+ * 12 elements of 20 bits in second phase.
+ */
+#define AC97_PROTOCOL_DESC \
+{ \
+ MSP_DUAL_PHASE, \
+ MSP_DUAL_PHASE, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_12, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_12, \
+ MSP_ELEM_LENGTH_16, \
+ MSP_ELEM_LENGTH_20, \
+ MSP_ELEM_LENGTH_16, \
+ MSP_ELEM_LENGTH_20, \
+ MSP_DELAY_1, \
+ MSP_DELAY_1, \
+ MSP_RISING_EDGE, \
+ MSP_RISING_EDGE, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_HWS_NO_SWAP, \
+ MSP_HWS_NO_SWAP, \
+ MSP_COMPRESS_MODE_LINEAR, \
+ MSP_EXPAND_MODE_LINEAR, \
+ MSP_SPI_CLOCK_MODE_NON_SPI, \
+ MSP_SPI_BURST_MODE_DISABLE, \
+ MSP_FRAME_SYNC_IGNORE, \
+ 255, \
+ 0, \
+ 256, \
+}
+
+#define SPI_MASTER_PROTOCOL_DESC \
+{ \
+ MSP_SINGLE_PHASE, \
+ MSP_SINGLE_PHASE, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_DELAY_1, \
+ MSP_DELAY_1, \
+ MSP_FALLING_EDGE, \
+ MSP_FALLING_EDGE, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_HWS_NO_SWAP, \
+ MSP_HWS_NO_SWAP, \
+ MSP_COMPRESS_MODE_LINEAR, \
+ MSP_EXPAND_MODE_LINEAR, \
+ MSP_SPI_CLOCK_MODE_NON_SPI, \
+ MSP_SPI_BURST_MODE_DISABLE, \
+ MSP_FRAME_SYNC_IGNORE, \
+ 255, \
+ 0, \
+ 256, \
+}
+
+#define SPI_SLAVE_PROTOCOL_DESC \
+{ \
+ MSP_SINGLE_PHASE, \
+ MSP_SINGLE_PHASE, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_PHASE2_START_MODE_FRAME_SYNC, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_BTF_MS_BIT_FIRST, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_FRAME_LENGTH_1, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_ELEM_LENGTH_8, \
+ MSP_DELAY_1, \
+ MSP_DELAY_1, \
+ MSP_FALLING_EDGE, \
+ MSP_FALLING_EDGE, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH, \
+ MSP_HWS_NO_SWAP, \
+ MSP_HWS_NO_SWAP, \
+ MSP_COMPRESS_MODE_LINEAR, \
+ MSP_EXPAND_MODE_LINEAR, \
+ MSP_SPI_CLOCK_MODE_NON_SPI, \
+ MSP_SPI_BURST_MODE_DISABLE, \
+ MSP_FRAME_SYNC_IGNORE, \
+ 255, \
+ 0, \
+ 256, \
+}
+
+#define MSP_FRAME_PERIOD_IN_MONO_MODE 256
+#define MSP_FRAME_PERIOD_IN_STEREO_MODE 32
+#define MSP_FRAME_WIDTH_IN_STEREO_MODE 16
+
+/*
+ * No of registers to backup during
+ * suspend resume
+ */
+#define MAX_MSP_BACKUP_REGS 36
+
+enum enum_i2s_controller {
+ MSP_0_I2S_CONTROLLER = 0,
+ MSP_1_I2S_CONTROLLER,
+ MSP_2_I2S_CONTROLLER,
+ MSP_3_I2S_CONTROLLER,
+};
+
+/**
+ * struct msp - Main msp controller data structure per MSP.
+ * @work_mode: Mode i.e dma, polling or interrupt.
+ * @id: Controller id like MSP1 or MSP2 etc.
+ * @msp_io_error: To indicate error while transferring.
+ * @registers: MSP's register base address.
+ * @actual_data_size: Data size in which data needs to send or receive.
+ * @irq: MSP's irq number.
+ * @i2s_cont: MSP's Controller's structure pointer created per MSP.
+ * @lock: semaphore lock acquired while configuring msp.
+ * @dma_cfg_tx: TX DMA configuration
+ * @dma_cfg_rx: RX DMA configuration
+ * @tx_pipeid: TX DMA channel
+ * @rx_pipeid: RX DMA channel
+ * @msp_state: Current state of msp.
+ * @read: Function pointer for read, u8_msp_read,u16_msp_read,u32_msp_read.
+ * @write: Function pointer for write, u8_msp_write,u16_msp_write,u32_msp_write.
+ * @transfer: Function pointer for type of transfer i.e dma,polling or interrupt
+ * @xfer_data: MSP's transfer data structure. Contains info about current xfer.
+ * @plat_init: MSP's initialization function.
+ * @plat_exit: MSP's Exit function.
+ * @notify_timer: Timer used in Polling mode to prevent hang.
+ * @polling_flag: Flag used in error handling while polling.
+ * @def_elem_len: Flag indicates whether default elem len to be used in
+ * protocol_desc or not.
+ * @reg_enabled: Flag indicates whether regulator has been enabled or not.
+ * @vape_opp_constraint: 1 if constraint is applied to have vape at 100OPP; 0 otherwise
+ * @infinite: true if an infinite transfer has been configured
+ *
+ * Main Msp private data structure to be used to store various info of a
+ * particular MSP.Longer description
+ */
+struct msp {
+ int work_mode;
+ enum enum_i2s_controller id;
+ int msp_io_error;
+ void __iomem *registers;
+ enum msp_data_size actual_data_size;
+ struct device *dev;
+ int irq;
+ struct i2s_controller *i2s_cont;
+ struct semaphore lock;
+ struct stedma40_chan_cfg *dma_cfg_rx;
+ struct stedma40_chan_cfg *dma_cfg_tx;
+ struct dma_chan *tx_pipeid;
+ struct dma_chan *rx_pipeid;
+ enum msp_state msp_state;
+ void (*read) (struct trans_data *xfer_data);
+ void (*write) (struct trans_data *xfer_data);
+ int (*transfer) (struct msp *msp, struct i2s_message *message);
+ struct trans_data xfer_data;
+ int (*plat_init) (void);
+ int (*plat_exit) (void);
+ struct timer_list notify_timer;
+ int polling_flag;
+ int def_elem_len;
+ struct clk *clk;
+ unsigned int direction;
+ int users;
+ int reg_enabled;
+ int loopback_enable;
+ u32 backup_regs[MAX_MSP_BACKUP_REGS];
+ int vape_opp_constraint;
+ bool infinite;
+};
+
+/**
+ * struct msp_i2s_platform_data - Main msp controller platform data structure.
+ * @id: Controller id like MSP1 or MSP2 etc.
+ * @msp_i2s_dma_rx: RX DMA channel config
+ * @msp_i2s_dma_tx: RX DMA channel config
+ * @msp_i2s_init: MSP's initialization function.
+ * @msp_i2s_exit: MSP's Exit function.
+ * @backup_regs: used for backup registers during suspend resume.
+ *
+ * Platform data structure passed by devices.c file.
+ */
+struct msp_i2s_platform_data {
+ enum enum_i2s_controller id;
+ struct stedma40_chan_cfg *msp_i2s_dma_rx;
+ struct stedma40_chan_cfg *msp_i2s_dma_tx;
+ int (*msp_i2s_init) (void);
+ int (*msp_i2s_exit) (void);
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/pm-timer.h b/arch/arm/mach-ux500/include/mach/pm-timer.h
new file mode 100644
index 00000000000..f5fafbbaa77
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/pm-timer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+
+#ifndef PM_TIMER_H
+#define PM_TIMER_H
+
+#include <linux/ktime.h>
+
+#ifdef CONFIG_UX500_CPUIDLE_DEBUG
+ktime_t u8500_rtc_exit_latency_get(void);
+void ux500_rtcrtt_measure_latency(bool enable);
+#else
+static inline ktime_t u8500_rtc_exit_latency_get(void)
+{
+ return ktime_set(0, 0);
+}
+static inline void ux500_rtcrtt_measure_latency(bool enable) { }
+
+#endif
+
+void ux500_rtcrtt_off(void);
+void ux500_rtcrtt_next(u32 time_us);
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/pm.h b/arch/arm/mach-ux500/include/mach/pm.h
new file mode 100644
index 00000000000..c6f1b0adca5
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/pm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef PM_COMMON_H
+#define PM_COMMON_H
+
+#ifdef CONFIG_PM
+#include <linux/mfd/dbx500-prcmu.h>
+
+/**
+ * ux500_pm_gic_decouple()
+ *
+ * Decouple GIC from the interrupt bus.
+ */
+void ux500_pm_gic_decouple(void);
+
+/**
+ * ux500_pm_gic_recouple()
+ *
+ * Recouple GIC with the interrupt bus.
+ */
+void ux500_pm_gic_recouple(void);
+
+/**
+ * ux500_pm_gic_pending_interrupt()
+ *
+ * returns true, if there are pending interrupts.
+ */
+bool ux500_pm_gic_pending_interrupt(void);
+
+/**
+ * ux500_pm_prcmu_pending_interrupt()
+ *
+ * returns true, if there are pending interrupts.
+ */
+bool ux500_pm_prcmu_pending_interrupt(void);
+
+/**
+ * ux500_pm_prcmu_set_ioforce()
+ *
+ * @enable: Enable/disable
+ *
+ * Enable/disable the gpio-ring
+ */
+void ux500_pm_prcmu_set_ioforce(bool enable);
+
+/**
+ * ux500_pm_prcmu_copy_gic_settings()
+ *
+ * This function copies all the gic interrupt settings to the prcmu.
+ * This is needed for the system to catch interrupts in ApIdle
+ */
+void ux500_pm_prcmu_copy_gic_settings(void);
+
+/**
+ * ux500_pm_gpio_save_wake_up_status()
+ *
+ * This function is called when the prcmu has woken the ARM
+ * but before ioforce is disabled.
+ */
+void ux500_pm_gpio_save_wake_up_status(void);
+
+/**
+ * ux500_pm_gpio_read_wake_up_status()
+ *
+ * @bank_number: The gpio bank.
+ *
+ * Returns the WKS register settings for given bank number.
+ * The WKS register is cleared when ioforce is released therefore
+ * this function is needed.
+ */
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_number);
+
+/**
+ * ux500_pm_other_cpu_wfi()
+ *
+ * Returns true if the other CPU is in WFI.
+ */
+bool ux500_pm_other_cpu_wfi(void);
+
+struct dev_pm_domain;
+extern struct dev_pm_domain ux500_dev_power_domain;
+extern struct dev_pm_domain ux500_amba_dev_power_domain;
+
+#else
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_number)
+{
+ return 0;
+}
+
+/**
+ * ux500_pm_prcmu_set_ioforce()
+ *
+ * @enable: Enable/disable
+ *
+ * Enable/disable the gpio-ring
+ */
+static inline void ux500_pm_prcmu_set_ioforce(bool enable) { }
+
+#endif
+
+extern int ux500_console_uart_gpio_pin;
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-debug.h b/arch/arm/mach-ux500/include/mach/prcmu-debug.h
new file mode 100644
index 00000000000..e468543fdef
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/prcmu-debug.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Martin Persson for ST-Ericsson
+ * Etienne Carriere <etienne.carriere@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#ifndef PRCMU_DEBUG_H
+#define PRCMU_DEBUG_H
+
+#ifdef CONFIG_DBX500_PRCMU_DEBUG
+void prcmu_debug_ape_opp_log(u8 opp);
+void prcmu_debug_ddr_opp_log(u8 opp);
+void prcmu_debug_arm_opp_log(u8 opp);
+#else
+static inline void prcmu_debug_ape_opp_log(u8 opp) {}
+static inline void prcmu_debug_ddr_opp_log(u8 opp) {}
+static inline void prcmu_debug_arm_opp_log(u8 opp) {}
+#endif
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/reboot_reasons.h b/arch/arm/mach-ux500/include/mach/reboot_reasons.h
new file mode 100644
index 00000000000..4471c3c1f33
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/reboot_reasons.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Rickard Evertsson <rickard.evertsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Use this file to customize your reboot / sw reset reasons. Add, remove or
+ * modify reasons in reboot_reasons[].
+ * The reboot reasons will be saved to a secure location in TCDM memory and
+ * can be read at bootup by e.g. the bootloader, or at a later stage userspace
+ * since the code is exposed through sysfs.
+ */
+
+#ifndef _REBOOT_REASONS_H
+#define _REBOOT_REASONS_H
+
+/*
+ * These defines contains the codes that will be written down to a secure
+ * location before resetting. These values are exposed through a sysfs
+ * entry under /sys/socinfo, see mach-ux500/cpu-db8500.c
+ */
+#define SW_RESET_NO_ARGUMENT 0xBEEF
+#define SW_RESET_FACTORY_RESET 0x4242
+#define SW_RESET_CRASH 0xDEAD
+#define SW_RESET_NORMAL 0xc001
+#define SW_RESET_CHARGING 0xCAFE
+#define SW_RESET_COLDSTART 0x0
+#define SW_RESET_RECOVERY 0x5502
+#define SW_RESET_CHGONLY_EXIT 0xCAFF
+
+/*
+ * The array reboot_reasons[] is used when you want to map a string to a reboot
+ * reason code
+ */
+struct reboot_reason {
+ const char *reason;
+ u16 code;
+};
+
+extern struct reboot_reason reboot_reasons[];
+
+extern unsigned int reboot_reasons_size;
+
+u16 reboot_reason_code(const char *cmd);
+const char *reboot_reason_string(u16 code);
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index a7d363fdb4c..51a7287072f 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -14,6 +14,8 @@
#include <asm/mach/time.h>
#include <linux/init.h>
+extern void ux500_restart(char, const char *);
+
void __init ux500_map_io(void);
extern void __init u5500_map_io(void);
extern void __init u8500_map_io(void);
@@ -26,6 +28,7 @@ extern void __init ux500_init_irq(void);
extern void __init u5500_sdi_init(void);
extern void __init db5500_dma_init(void);
+extern void __init db8500_dma_init(void);
/* We re-use nomadik_timer for this platform */
extern void nmdk_timer_init(void);
diff --git a/arch/arm/mach-ux500/include/mach/sim_detect.h b/arch/arm/mach-ux500/include/mach/sim_detect.h
new file mode 100644
index 00000000000..4dae656b7e0
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/sim_detect.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright ST-Ericsson 2010 SA.
+ *
+ * Author: Bibek Basu <bibek.basu@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB8500_SIM_DETECT_H
+#define _AB8500_SIM_DETECT_H
+
+struct sim_detect_platform_data {
+ int irq_num;
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/ste-dma40-db5500.h b/arch/arm/mach-ux500/include/mach/ste-dma40-db5500.h
index cb2110c3285..0ddd4ab9020 100644
--- a/arch/arm/mach-ux500/ste-dma40-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/ste-dma40-db5500.h
@@ -42,7 +42,9 @@ enum dma_src_dev_type {
DB5500_DMA_DEV26_SDMMC2_RX = 26,
DB5500_DMA_DEV27_SDMMC3_RX = 27,
DB5500_DMA_DEV28_SDMMC4_RX = 28,
- /* 29 - 32 not used */
+ /* 29, 30 not used */
+ DB5500_DMA_DEV31_CRYPTO1_RX = 31, /* v2 */
+ /* 32 not used */
DB5500_DMA_DEV33_SDMMC0_RX = 33,
DB5500_DMA_DEV34_SDMMC1_RX = 34,
DB5500_DMA_DEV35_SDMMC2_RX = 35,
@@ -56,7 +58,7 @@ enum dma_src_dev_type {
DB5500_DMA_DEV43_USB_OTG_IEP_5_13 = 43,
DB5500_DMA_DEV44_USB_OTG_IEP_6_14 = 44,
DB5500_DMA_DEV45_USB_OTG_IEP_7_15 = 45,
- /* 46 not used */
+ DB5500_DMA_DEV46_CRYPTO1_RX = 46, /* v2 */
DB5500_DMA_DEV47_MCDE_RX = 47,
DB5500_DMA_DEV48_CRYPTO1_RX = 48,
/* 49, 50 not used */
@@ -98,7 +100,9 @@ enum dma_dest_dev_type {
DB5500_DMA_DEV26_SDMMC2_TX = 26,
DB5500_DMA_DEV27_SDMMC3_TX = 27,
DB5500_DMA_DEV28_SDMMC4_TX = 28,
- /* 29 - 31 not used */
+ /* 29 not used */
+ DB5500_DMA_DEV30_HASH1_TX = 30, /* v2 */
+ DB5500_DMA_DEV31_CRYPTO1_TX = 31, /* v2 */
DB5500_DMA_DEV32_FSMC_TX = 32,
DB5500_DMA_DEV33_SDMMC0_TX = 33,
DB5500_DMA_DEV34_SDMMC1_TX = 34,
@@ -113,7 +117,7 @@ enum dma_dest_dev_type {
DB5500_DMA_DEV43_USB_OTG_OEP_5_13 = 43,
DB5500_DMA_DEV44_USB_OTG_OEP_6_14 = 44,
DB5500_DMA_DEV45_USB_OTG_OEP_7_15 = 45,
- /* 46 not used */
+ DB5500_DMA_DEV46_CRYPTO1_TX = 46, /* v2 */
DB5500_DMA_DEV47_STM_TX = 47,
DB5500_DMA_DEV48_CRYPTO1_TX = 48,
DB5500_DMA_DEV49_CRYPTO1_TX_HASH1_TX = 49,
diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/include/mach/ste-dma40-db8500.h
index a616419bea7..65799a75199 100644
--- a/arch/arm/mach-ux500/ste-dma40-db8500.h
+++ b/arch/arm/mach-ux500/include/mach/ste-dma40-db8500.h
@@ -1,16 +1,19 @@
/*
- * arch/arm/mach-ux500/ste_dma40_db8500.h
- * DB8500-SoC-specific configuration for DMA40
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * DB8500-SoC-specific configuration for DMA40
*/
#ifndef STE_DMA40_DB8500_H
#define STE_DMA40_DB8500_H
#define DB8500_DMA_NR_DEV 64
+/*
+ * All entries with double names are multiplexed
+ * and can never be used at the same time.
+ */
enum dma_src_dev_type {
DB8500_DMA_DEV0_SPI0_RX = 0,
@@ -20,7 +23,7 @@ enum dma_src_dev_type {
DB8500_DMA_DEV4_I2C1_RX = 4,
DB8500_DMA_DEV5_I2C3_RX = 5,
DB8500_DMA_DEV6_I2C2_RX = 6,
- DB8500_DMA_DEV7_I2C4_RX = 7, /* Only on V1 and later */
+ DB8500_DMA_DEV7_I2C4_RX = 7,
DB8500_DMA_DEV8_SSP0_RX = 8,
DB8500_DMA_DEV9_SSP1_RX = 9,
DB8500_DMA_DEV10_MCDE_RX = 10,
@@ -43,8 +46,6 @@ enum dma_src_dev_type {
DB8500_DMA_DEV27_SRC_SXA3_RX_TX = 27,
DB8500_DMA_DEV28_SD_MM2_RX = 28,
DB8500_DMA_DEV29_SD_MM0_RX = 29,
- DB8500_DMA_DEV30_MSP1_RX = 30,
- /* On DB8500v2, MSP3 RX replaces MSP1 RX */
DB8500_DMA_DEV30_MSP3_RX = 30,
DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX = 31,
DB8500_DMA_DEV32_SD_MM1_RX = 32,
@@ -82,7 +83,7 @@ enum dma_dest_dev_type {
DB8500_DMA_DEV4_I2C1_TX = 4,
DB8500_DMA_DEV5_I2C3_TX = 5,
DB8500_DMA_DEV6_I2C2_TX = 6,
- DB8500_DMA_DEV7_I2C4_TX = 7, /* Only on V1 and later */
+ DB8500_DMA_DEV7_I2C4_TX = 7,
DB8500_DMA_DEV8_SSP0_TX = 8,
DB8500_DMA_DEV9_SSP1_TX = 9,
/* 10 is not used*/
diff --git a/arch/arm/mach-ux500/include/mach/suspend.h b/arch/arm/mach-ux500/include/mach/suspend.h
new file mode 100644
index 00000000000..5a8df72be2e
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/suspend.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __MACH_SUSPEND_H
+#define __MACH_SUSPEND_H
+
+#ifdef CONFIG_UX500_SUSPEND
+void suspend_block_sleep(void);
+void suspend_unblock_sleep(void);
+void suspend_set_pins_force_fn(void (*force)(void), void (*force_mux)(void));
+#else
+static inline void suspend_block_sleep(void) { }
+static inline void suspend_unblock_sleep(void) { }
+static inline void suspend_set_pins_force_fn(void (*force)(void),
+ void (*force_mux)(void)) { }
+#endif
+
+#endif /* __MACH_SUSPEND_H */
diff --git a/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h
new file mode 100644
index 00000000000..2ac88edfe71
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/tee_ta_start_modem.h
@@ -0,0 +1,47 @@
+/*
+ * Data types and interface for TEE application for starting the modem.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef TEE_TA_START_MODEM_H
+#define TEE_TA_START_MODEM_H
+
+#define COMMAND_ID_START_MODEM 0x00000001
+
+#define UUID_TEE_TA_START_MODEM_LOW 0x8AD94107
+#define UUID_TEE_TA_START_MODEM_MID 0x6E50
+#define UUID_TEE_TA_START_MODEM_HIGH 0x418E
+#define UUID_TEE_TA_START_MODEM_CLOCKSEQ \
+ {0xB1, 0x14, 0x75, 0x7D, 0x60, 0x21, 0xBD, 0x36}
+
+struct mcore_segment_descr {
+ void *segment;
+ void *hash;
+ u32 size;
+};
+
+struct access_image_descr {
+ void *elf_hdr;
+ void *pgm_hdr_tbl;
+ void *signature;
+ unsigned long nbr_segment;
+ struct mcore_segment_descr *descr;
+};
+
+/* TODO: To be redefined with only info needed by Secure world. */
+struct tee_ta_start_modem {
+ void *access_mem_start;
+ u32 shared_mem_size;
+ u32 access_private_mem_size;
+ struct access_image_descr access_image_descr;
+};
+
+/**
+ * This is the function to handle the modem release.
+ */
+int tee_ta_start_modem(struct tee_ta_start_modem *data);
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/timex.h b/arch/arm/mach-ux500/include/mach/timex.h
index d0942c17401..0ba497bd9d7 100644
--- a/arch/arm/mach-ux500/include/mach/timex.h
+++ b/arch/arm/mach-ux500/include/mach/timex.h
@@ -2,5 +2,6 @@
#define __ASM_ARCH_TIMEX_H
#define CLOCK_TICK_RATE 110000000
+#define ARCH_HAS_READ_CURRENT_TIMER
#endif
diff --git a/arch/arm/mach-ux500/include/mach/usb.h b/arch/arm/mach-ux500/include/mach/usb.h
index d3739d41881..67fbd00e690 100644
--- a/arch/arm/mach-ux500/include/mach/usb.h
+++ b/arch/arm/mach-ux500/include/mach/usb.h
@@ -22,4 +22,14 @@ struct ux500_musb_board_data {
void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg,
int *dma_tx_cfg);
+
+struct abx500_usbgpio_platform_data {
+ int (*get)(struct device *device);
+ void (*enable)(void);
+ void (*disable)(void);
+ void (*put)(void);
+ int usb_cs;
+};
+
+void ux500_restore_context(void);
#endif
diff --git a/arch/arm/mach-ux500/l2x0-prefetch.c b/arch/arm/mach-ux500/l2x0-prefetch.c
new file mode 100644
index 00000000000..48a4495533f
--- /dev/null
+++ b/arch/arm/mach-ux500/l2x0-prefetch.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/tee.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+#include <asm/hardware/cache-l2x0.h>
+
+static struct tee_session session;
+static struct tee_context context;
+static void __iomem *l2x0_base;
+
+#define L2X0_PREFETCH_CTRL_REG (0x00000F60)
+#define L2X0_PREFETCH_CTRL_BIT_DATA_EN (1 << 28)
+#define L2X0_PREFETCH_CTRL_BIT_INST_EN (1 << 29)
+
+#define L2X0_UUID_TEE_TA_START_LOW 0xBC765EDE
+#define L2X0_UUID_TEE_TA_START_MID 0x6724
+#define L2X0_UUID_TEE_TA_START_HIGH 0x11DF
+#define L2X0_UUID_TEE_TA_START_CLOCKSEQ \
+ {0x8E, 0x12, 0xEC, 0xDB, 0xDF, 0xD7, 0x20, 0x85}
+
+static void prefetch_enable(void)
+{
+ struct tee_operation operation;
+ u32 data;
+ int err;
+ int origin_err;
+
+ data = readl(l2x0_base + L2X0_PREFETCH_CTRL_REG);
+
+ pr_debug("l2x0-prefetch: %s start, preftect_ctrl=0x%08x\n", __func__,
+ data);
+ if (!(data & L2X0_PREFETCH_CTRL_BIT_INST_EN) ||
+ !(data & L2X0_PREFETCH_CTRL_BIT_DATA_EN)) {
+
+ data |= (L2X0_PREFETCH_CTRL_BIT_INST_EN |
+ L2X0_PREFETCH_CTRL_BIT_DATA_EN);
+
+ operation.shm[0].buffer = &data;
+ operation.shm[0].size = sizeof(data);
+ operation.shm[0].flags = TEEC_MEM_INPUT;
+ operation.flags = TEEC_MEMREF_0_USED;
+
+ err = teec_invoke_command(&session,
+ TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER,
+ &operation, &origin_err);
+ if (err)
+ pr_err("l2x0-prefetch: prefetch enable failed, err=%d",
+ err);
+ }
+ pr_debug("l2x0-prefetch: %s end, prefetch_ctrl=0x%08x\n", __func__,
+ readl(l2x0_base + L2X0_PREFETCH_CTRL_REG));
+}
+
+static void prefetch_disable(void)
+{
+ struct tee_operation operation;
+ u32 data;
+ int err;
+ int origin_err;
+
+ data = readl(l2x0_base + L2X0_PREFETCH_CTRL_REG);
+
+ pr_debug("l2x0-prefetch: %s start, preftect_ctrl=0x%08x\n", __func__,
+ data);
+ if (data & (L2X0_PREFETCH_CTRL_BIT_INST_EN |
+ L2X0_PREFETCH_CTRL_BIT_DATA_EN)) {
+
+ data &= ~(L2X0_PREFETCH_CTRL_BIT_INST_EN |
+ L2X0_PREFETCH_CTRL_BIT_DATA_EN);
+
+ operation.shm[0].buffer = &data;
+ operation.shm[0].size = sizeof(data);
+ operation.shm[0].flags = TEEC_MEM_INPUT;
+ operation.flags = TEEC_MEMREF_0_USED;
+
+ err = teec_invoke_command(&session,
+ TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER,
+ &operation, &origin_err);
+ if (err)
+ pr_err("l2x0-prefetch: prefetch disable failed, err=%d",
+ err);
+ }
+ pr_debug("l2x0-prefetch: %s end, prefetch_ctrl=0x%08x\n", __func__,
+ readl(l2x0_base + L2X0_PREFETCH_CTRL_REG));
+}
+
+static int __init prefetch_ctrl_init(void)
+{
+ int err;
+ int origin_err;
+ /* Selects trustzone application needed for the job. */
+ struct tee_uuid static_uuid = {
+ L2X0_UUID_TEE_TA_START_LOW,
+ L2X0_UUID_TEE_TA_START_MID,
+ L2X0_UUID_TEE_TA_START_HIGH,
+ L2X0_UUID_TEE_TA_START_CLOCKSEQ,
+ };
+
+ /* Get PL310 base address. It will be used as readonly. */
+ if (cpu_is_u5500())
+ l2x0_base = __io_address(U5500_L2CC_BASE);
+ else if (cpu_is_u8500())
+ l2x0_base = __io_address(U8500_L2CC_BASE);
+ else
+ ux500_unknown_soc();
+
+ err = teec_initialize_context(NULL, &context);
+ if (err) {
+ pr_err("l2x0-prefetch: unable to initialize tee context,"
+ " err = %d\n", err);
+ err = -EINVAL;
+ goto error0;
+ }
+
+ err = teec_open_session(&context, &session, &static_uuid,
+ TEEC_LOGIN_PUBLIC, NULL, NULL, &origin_err);
+ if (err) {
+ pr_err("l2x0-prefetch: unable to open tee session,"
+ " tee error = %d, origin error = %d\n",
+ err, origin_err);
+ err = -EINVAL;
+ goto error1;
+ }
+
+ outer_cache.prefetch_enable = prefetch_enable;
+ outer_cache.prefetch_disable = prefetch_disable;
+
+ pr_info("l2x0-prefetch: initialized.\n");
+
+ return 0;
+
+error1:
+ (void)teec_finalize_context(&context);
+error0:
+ return err;
+}
+
+static void __exit prefetch_ctrl_exit(void)
+{
+ outer_cache.prefetch_enable = NULL;
+ outer_cache.prefetch_disable = NULL;
+
+ (void)teec_close_session(&session);
+ (void)teec_finalize_context(&context);
+}
+
+/* Wait for TEE driver to be initialized. */
+late_initcall(prefetch_ctrl_init);
+module_exit(prefetch_ctrl_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PL310 prefetch control");
diff --git a/arch/arm/mach-ux500/mloader-db5500.c b/arch/arm/mach-ux500/mloader-db5500.c
new file mode 100644
index 00000000000..bc3a57af28b
--- /dev/null
+++ b/arch/arm/mach-ux500/mloader-db5500.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors: Jonas Aaberg <jonas.aberg@stericsson.com>
+ * Paer-Olof Haakansson <par-olof.hakansson@stericsson.com>
+ * for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+static ssize_t db5500_mloader_sysfs_addr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t db5500_mloader_sysfs_finalize(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+static ssize_t db5500_mloader_sysfs_itpmode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(addr, S_IRUSR|S_IRGRP, db5500_mloader_sysfs_addr, NULL);
+static DEVICE_ATTR(finalize, S_IWUSR, NULL, db5500_mloader_sysfs_finalize);
+static DEVICE_ATTR(is_itpmode, S_IRUSR|S_IRGRP, db5500_mloader_sysfs_itpmode, NULL);
+
+static unsigned int db5500_bootargs_memmap_modem_start;
+static unsigned int db5500_bootargs_memmap_modem_total_size;
+static unsigned int db5500_mloader_itpmode;
+static unsigned int db5500_mloader_shm_total_size;
+module_param_named(shm_total_size, db5500_mloader_shm_total_size, uint, 0600);
+MODULE_PARM_DESC(shm_total_size, "Total Size of SHM shared memory");
+
+static int __init db5500_bootargs_modem_memmap(char *p)
+{
+ db5500_bootargs_memmap_modem_total_size = memparse(p, &p);
+ if (*p == '@')
+ db5500_bootargs_memmap_modem_start = memparse(p + 1, &p);
+
+ return 0;
+}
+early_param("mem_modem", db5500_bootargs_modem_memmap);
+
+static int __init db5500_bootargs_shm_total_size(char *str)
+{
+ int ret;
+ ret = strict_strtoul(str, 0, &db5500_mloader_shm_total_size);
+ if (ret < 0)
+ return -EINVAL;
+ return 1;
+}
+early_param("mloader.shm_total_size", db5500_bootargs_shm_total_size);
+
+static int __init db5500_bootargs_itpmode(char *p)
+{
+ int ret;
+ int count = 3;
+ if (!memcmp(p, "itp", count))
+ db5500_mloader_itpmode = true;
+ else
+ db5500_mloader_itpmode = false;
+ return 1;
+}
+early_param("modem_boot_type", db5500_bootargs_itpmode);
+
+static int __exit db5500_mloader_remove(struct platform_device *pdev)
+{
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_addr.attr);
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_finalize.attr);
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_is_itpmode.attr);
+
+ return 0;
+}
+
+
+static struct platform_driver db5500_mloader_driver = {
+ .driver = {
+ .name = "db5500_mloader",
+ },
+ .remove = __exit_p(db5500_mloader_remove),
+};
+
+struct db5500_mloader {
+ struct work_struct work;
+ struct platform_device *pdev;
+};
+
+static void db5500_mloader_clean_up(struct work_struct *work)
+{
+ struct db5500_mloader *m = container_of(work,
+ struct db5500_mloader,
+ work);
+
+ /* Remove this module */
+ platform_device_unregister(m->pdev);
+
+ platform_driver_unregister(&db5500_mloader_driver);
+ kfree(m);
+
+}
+
+static ssize_t db5500_mloader_sysfs_addr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%x 0x%x 0x%x\n",
+ db5500_bootargs_memmap_modem_start,
+ db5500_bootargs_memmap_modem_total_size,
+ db5500_mloader_shm_total_size);
+}
+
+static ssize_t db5500_mloader_sysfs_itpmode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%x\n",
+ db5500_mloader_itpmode);
+}
+
+static ssize_t db5500_mloader_sysfs_finalize(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct db5500_mloader *m;
+
+ m = kmalloc(sizeof(struct db5500_mloader), GFP_KERNEL);
+
+ m->pdev = container_of(dev,
+ struct platform_device,
+ dev);
+
+ INIT_WORK(&m->work, db5500_mloader_clean_up);
+
+ /* The module can not remove itself while being in a sysfs function,
+ * it has to use a workqueue.
+ */
+ schedule_work(&m->work);
+
+ return count;
+}
+
+static void db5500_mloader_release(struct device *dev)
+{
+ /* Nothing to release */
+}
+
+static int __init db5500_mloader_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ pdev->dev.release = db5500_mloader_release;
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_addr.attr);
+ if (ret)
+ return ret;
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_finalize.attr);
+
+ if (ret) {
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_addr.attr);
+ return ret;
+ }
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_is_itpmode.attr);
+ if (ret) {
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_finalize.attr);
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_addr.attr);
+ return ret;
+ }
+ return 0;
+
+}
+
+static int __init db5500_mloader_init(void)
+{
+/*
+ * mloader for Fairbanks. It exports the physical
+ * address where the modem side ELF should be located in a sysfs
+ * file to make it available for a user space utility.
+ * When the mLoader utility has picked up these settings, this module is no
+ * longer needed and can be removed by writing to sysfs finalize.
+ *
+ * The modem side should be loaded via mmap'ed /dev/mem
+ *
+ */
+
+ return platform_driver_probe(&db5500_mloader_driver,
+ db5500_mloader_probe);
+}
+module_init(db5500_mloader_init);
+
+
+static void __exit mloader_exit(void)
+{
+ platform_driver_unregister(&db5500_mloader_driver);
+}
+module_exit(mloader_exit);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-ux500/mloader-db8500.c b/arch/arm/mach-ux500/mloader-db8500.c
new file mode 100644
index 00000000000..b13652f55cb
--- /dev/null
+++ b/arch/arm/mach-ux500/mloader-db8500.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson
+ *
+ * Author: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+#include <mach/mloader-dbx500.h>
+
+static struct dbx500_ml_area modem_areas[] = {
+ { .name = "modem_trace", .start = 0x6000000, .size = 0xf00000 },
+ { .name = "modem_shared", .start = 0x6f00000, .size = 0x100000 },
+ { .name = "modem_priv", .start = 0x7000000, .size = 0x1000000 },
+};
+
+static struct dbx500_ml_fw modem_fws[] = {
+ { .name = "MODEM", .area = &modem_areas[0], .offset = 0x0 },
+ { .name = "IPL", .area = &modem_areas[1], .offset = 0x00 },
+};
+
+static struct dbx500_mloader_pdata mloader_fw_data = {
+ .fws = modem_fws,
+ .nr_fws = ARRAY_SIZE(modem_fws),
+ .areas = modem_areas,
+ .nr_areas = ARRAY_SIZE(modem_areas),
+};
+
+struct platform_device mloader_fw_device = {
+ .name = "dbx500_mloader_fw",
+ .id = -1,
+ .dev = {
+ .platform_data = &mloader_fw_data,
+ },
+ .num_resources = 0,
+};
+
+/* Default areas can be overloaded in cmdline */
+static int __init early_modem_priv(char *p)
+{
+ struct dbx500_ml_area *area = &modem_areas[2];
+
+ area->size = memparse(p, &p);
+
+ if (*p == '@')
+ area->start = memparse(p + 1, &p);
+
+ return 0;
+}
+early_param("mem_modem", early_modem_priv);
+
+static int __init early_modem_shared(char *p)
+{
+ struct dbx500_ml_area *area = &modem_areas[1];
+
+ area->size = memparse(p, &p);
+
+ if (*p == '@')
+ area->start = memparse(p + 1, &p);
+
+ return 0;
+}
+early_param("mem_mshared", early_modem_shared);
+
+static int __init early_modem_trace(char *p)
+{
+ struct dbx500_ml_area *area = &modem_areas[0];
+
+ area->size = memparse(p, &p);
+
+ if (*p == '@')
+ area->start = memparse(p + 1, &p);
+
+ return 0;
+}
+early_param("mem_mtrace", early_modem_trace);
diff --git a/arch/arm/mach-ux500/modem-irq-db5500.c b/arch/arm/mach-ux500/modem-irq-db5500.c
index 6b86416c94c..7c2947af984 100644
--- a/arch/arm/mach-ux500/modem-irq-db5500.c
+++ b/arch/arm/mach-ux500/modem-irq-db5500.c
@@ -81,7 +81,7 @@ static irqreturn_t modem_cpu_irq_handler(int irq, void *data)
virt_irq);
if (virt_irq != 0)
- generic_handle_irq(virt_irq);
+ handle_nested_irq(virt_irq);
pr_debug("modem_irq: Done handling virtual IRQ %d!\n", virt_irq);
@@ -91,6 +91,7 @@ static irqreturn_t modem_cpu_irq_handler(int irq, void *data)
static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip)
{
irq_set_chip_and_handler(irq, modem_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
set_irq_flags(irq, IRQF_VALID);
pr_debug("modem_irq: Created virtual IRQ %d\n", irq);
@@ -131,7 +132,8 @@ static int modem_irq_init(void)
create_virtual_irq(MBOX_PAIR2_VIRT_IRQ, &modem_irq_chip);
err = request_threaded_irq(IRQ_DB5500_MODEM, NULL,
- modem_cpu_irq_handler, IRQF_ONESHOT,
+ modem_cpu_irq_handler,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
"modem_irq", mi);
if (err)
pr_err("modem_irq: Could not register IRQ %d\n",
diff --git a/arch/arm/mach-ux500/pins-db8500.h b/arch/arm/mach-ux500/pins-db8500.h
index 8b1d1a7a679..062c7acf457 100644
--- a/arch/arm/mach-ux500/pins-db8500.h
+++ b/arch/arm/mach-ux500/pins-db8500.h
@@ -35,40 +35,40 @@
#define GPIO4_GPIO PIN_CFG(4, GPIO)
#define GPIO4_U1_RXD PIN_CFG(4, ALT_A)
-#define GPIO4_I2C4_SCL PIN_CFG_INPUT(4, ALT_B, PULLUP)
+#define GPIO4_I2C4_SCL PIN_CFG(4, ALT_B)
#define GPIO4_IP_TRSTn PIN_CFG(4, ALT_C)
#define GPIO5_GPIO PIN_CFG(5, GPIO)
#define GPIO5_U1_TXD PIN_CFG(5, ALT_A)
-#define GPIO5_I2C4_SDA PIN_CFG_INPUT(5, ALT_B, PULLUP)
+#define GPIO5_I2C4_SDA PIN_CFG(5, ALT_B)
#define GPIO5_IP_GPIO6 PIN_CFG(5, ALT_C)
#define GPIO6_GPIO PIN_CFG(6, GPIO)
#define GPIO6_U1_CTSn PIN_CFG(6, ALT_A)
-#define GPIO6_I2C1_SCL PIN_CFG_INPUT(6, ALT_B, PULLUP)
+#define GPIO6_I2C1_SCL PIN_CFG(6, ALT_B)
#define GPIO6_IP_GPIO0 PIN_CFG(6, ALT_C)
#define GPIO7_GPIO PIN_CFG(7, GPIO)
#define GPIO7_U1_RTSn PIN_CFG(7, ALT_A)
-#define GPIO7_I2C1_SDA PIN_CFG_INPUT(7, ALT_B, PULLUP)
+#define GPIO7_I2C1_SDA PIN_CFG(7, ALT_B)
#define GPIO7_IP_GPIO1 PIN_CFG(7, ALT_C)
#define GPIO8_GPIO PIN_CFG(8, GPIO)
-#define GPIO8_IPI2C_SDA PIN_CFG_INPUT(8, ALT_A, PULLUP)
-#define GPIO8_I2C2_SDA PIN_CFG_INPUT(8, ALT_B, PULLUP)
+#define GPIO8_IPI2C_SDA PIN_CFG(8, ALT_A)
+#define GPIO8_I2C2_SDA PIN_CFG(8, ALT_B)
#define GPIO9_GPIO PIN_CFG(9, GPIO)
-#define GPIO9_IPI2C_SCL PIN_CFG_INPUT(9, ALT_A, PULLUP)
-#define GPIO9_I2C2_SCL PIN_CFG_INPUT(9, ALT_B, PULLUP)
+#define GPIO9_IPI2C_SCL PIN_CFG(9, ALT_A)
+#define GPIO9_I2C2_SCL PIN_CFG(9, ALT_B)
#define GPIO10_GPIO PIN_CFG(10, GPIO)
-#define GPIO10_IPI2C_SDA PIN_CFG_INPUT(10, ALT_A, PULLUP)
-#define GPIO10_I2C2_SDA PIN_CFG_INPUT(10, ALT_B, PULLUP)
+#define GPIO10_IPI2C_SDA PIN_CFG(10, ALT_A)
+#define GPIO10_I2C2_SDA PIN_CFG(10, ALT_B)
#define GPIO10_IP_GPIO3 PIN_CFG(10, ALT_C)
#define GPIO11_GPIO PIN_CFG(11, GPIO)
-#define GPIO11_IPI2C_SCL PIN_CFG_INPUT(11, ALT_A, PULLUP)
-#define GPIO11_I2C2_SCL PIN_CFG_INPUT(11, ALT_B, PULLUP)
+#define GPIO11_IPI2C_SCL PIN_CFG(11, ALT_A)
+#define GPIO11_I2C2_SCL PIN_CFG(11, ALT_B)
#define GPIO11_IP_GPIO2 PIN_CFG(11, ALT_C)
#define GPIO12_GPIO PIN_CFG(12, GPIO)
@@ -87,12 +87,12 @@
#define GPIO16_GPIO PIN_CFG(16, GPIO)
#define GPIO16_MSP0_RFS PIN_CFG(16, ALT_A)
-#define GPIO16_I2C1_SCL PIN_CFG_INPUT(16, ALT_B, PULLUP)
+#define GPIO16_I2C1_SCL PIN_CFG(16, ALT_B)
#define GPIO16_SLIM0_DAT PIN_CFG(16, ALT_C)
#define GPIO17_GPIO PIN_CFG(17, GPIO)
#define GPIO17_MSP0_RCK PIN_CFG(17, ALT_A)
-#define GPIO17_I2C1_SDA PIN_CFG_INPUT(17, ALT_B, PULLUP)
+#define GPIO17_I2C1_SDA PIN_CFG(17, ALT_B)
#define GPIO17_SLIM0_CLK PIN_CFG(17, ALT_C)
#define GPIO18_GPIO PIN_CFG(18, GPIO)
@@ -434,10 +434,10 @@
#define GPIO146_SSP0_TXD PIN_CFG(146, ALT_A)
#define GPIO147_GPIO PIN_CFG(147, GPIO)
-#define GPIO147_I2C0_SCL PIN_CFG_INPUT(147, ALT_A, PULLUP)
+#define GPIO147_I2C0_SCL PIN_CFG(147, ALT_A)
#define GPIO148_GPIO PIN_CFG(148, GPIO)
-#define GPIO148_I2C0_SDA PIN_CFG_INPUT(148, ALT_A, PULLUP)
+#define GPIO148_I2C0_SDA PIN_CFG(148, ALT_A)
#define GPIO149_GPIO PIN_CFG(149, GPIO)
#define GPIO149_IP_GPIO0 PIN_CFG(149, ALT_A)
@@ -459,82 +459,82 @@
#define GPIO152_KP_O9 PIN_CFG(152, ALT_C)
#define GPIO153_GPIO PIN_CFG(153, GPIO)
-#define GPIO153_KP_I7 PIN_CFG_INPUT(153, ALT_A, PULLDOWN)
+#define GPIO153_KP_I7 PIN_CFG(153, ALT_A)
#define GPIO153_LCD_D24 PIN_CFG(153, ALT_B)
#define GPIO153_U2_RXD PIN_CFG(153, ALT_C)
#define GPIO154_GPIO PIN_CFG(154, GPIO)
-#define GPIO154_KP_I6 PIN_CFG_INPUT(154, ALT_A, PULLDOWN)
+#define GPIO154_KP_I6 PIN_CFG(154, ALT_A)
#define GPIO154_LCD_D25 PIN_CFG(154, ALT_B)
#define GPIO154_U2_TXD PIN_CFG(154, ALT_C)
#define GPIO155_GPIO PIN_CFG(155, GPIO)
-#define GPIO155_KP_I5 PIN_CFG_INPUT(155, ALT_A, PULLDOWN)
+#define GPIO155_KP_I5 PIN_CFG(155, ALT_A)
#define GPIO155_LCD_D26 PIN_CFG(155, ALT_B)
#define GPIO155_STMAPE_CLK PIN_CFG(155, ALT_C)
#define GPIO156_GPIO PIN_CFG(156, GPIO)
-#define GPIO156_KP_I4 PIN_CFG_INPUT(156, ALT_A, PULLDOWN)
+#define GPIO156_KP_I4 PIN_CFG(156, ALT_A)
#define GPIO156_LCD_D27 PIN_CFG(156, ALT_B)
#define GPIO156_STMAPE_DAT3 PIN_CFG(156, ALT_C)
#define GPIO157_GPIO PIN_CFG(157, GPIO)
-#define GPIO157_KP_O7 PIN_CFG_INPUT(157, ALT_A, PULLUP)
+#define GPIO157_KP_O7 PIN_CFG(157, ALT_A)
#define GPIO157_LCD_D28 PIN_CFG(157, ALT_B)
#define GPIO157_STMAPE_DAT2 PIN_CFG(157, ALT_C)
#define GPIO158_GPIO PIN_CFG(158, GPIO)
-#define GPIO158_KP_O6 PIN_CFG_INPUT(158, ALT_A, PULLUP)
+#define GPIO158_KP_O6 PIN_CFG(158, ALT_A)
#define GPIO158_LCD_D29 PIN_CFG(158, ALT_B)
#define GPIO158_STMAPE_DAT1 PIN_CFG(158, ALT_C)
#define GPIO159_GPIO PIN_CFG(159, GPIO)
-#define GPIO159_KP_O5 PIN_CFG_INPUT(159, ALT_A, PULLUP)
+#define GPIO159_KP_O5 PIN_CFG(159, ALT_A)
#define GPIO159_LCD_D30 PIN_CFG(159, ALT_B)
#define GPIO159_STMAPE_DAT0 PIN_CFG(159, ALT_C)
#define GPIO160_GPIO PIN_CFG(160, GPIO)
-#define GPIO160_KP_O4 PIN_CFG_INPUT(160, ALT_A, PULLUP)
+#define GPIO160_KP_O4 PIN_CFG(160, ALT_A)
#define GPIO160_LCD_D31 PIN_CFG(160, ALT_B)
#define GPIO160_NONE PIN_CFG(160, ALT_C)
#define GPIO161_GPIO PIN_CFG(161, GPIO)
-#define GPIO161_KP_I3 PIN_CFG_INPUT(161, ALT_A, PULLDOWN)
+#define GPIO161_KP_I3 PIN_CFG(161, ALT_A)
#define GPIO161_LCD_D32 PIN_CFG(161, ALT_B)
#define GPIO161_UARTMOD_RXD PIN_CFG(161, ALT_C)
#define GPIO162_GPIO PIN_CFG(162, GPIO)
-#define GPIO162_KP_I2 PIN_CFG_INPUT(162, ALT_A, PULLDOWN)
+#define GPIO162_KP_I2 PIN_CFG(162, ALT_A)
#define GPIO162_LCD_D33 PIN_CFG(162, ALT_B)
#define GPIO162_UARTMOD_TXD PIN_CFG(162, ALT_C)
#define GPIO163_GPIO PIN_CFG(163, GPIO)
-#define GPIO163_KP_I1 PIN_CFG_INPUT(163, ALT_A, PULLDOWN)
+#define GPIO163_KP_I1 PIN_CFG(163, ALT_A)
#define GPIO163_LCD_D34 PIN_CFG(163, ALT_B)
#define GPIO163_STMMOD_CLK PIN_CFG(163, ALT_C)
#define GPIO164_GPIO PIN_CFG(164, GPIO)
-#define GPIO164_KP_I0 PIN_CFG_INPUT(164, ALT_A, PULLUP)
+#define GPIO164_KP_I0 PIN_CFG(164, ALT_A)
#define GPIO164_LCD_D35 PIN_CFG(164, ALT_B)
#define GPIO164_STMMOD_DAT3 PIN_CFG(164, ALT_C)
#define GPIO165_GPIO PIN_CFG(165, GPIO)
-#define GPIO165_KP_O3 PIN_CFG_INPUT(165, ALT_A, PULLUP)
+#define GPIO165_KP_O3 PIN_CFG(165, ALT_A)
#define GPIO165_LCD_D36 PIN_CFG(165, ALT_B)
#define GPIO165_STMMOD_DAT2 PIN_CFG(165, ALT_C)
#define GPIO166_GPIO PIN_CFG(166, GPIO)
-#define GPIO166_KP_O2 PIN_CFG_INPUT(166, ALT_A, PULLUP)
+#define GPIO166_KP_O2 PIN_CFG(166, ALT_A)
#define GPIO166_LCD_D37 PIN_CFG(166, ALT_B)
#define GPIO166_STMMOD_DAT1 PIN_CFG(166, ALT_C)
#define GPIO167_GPIO PIN_CFG(167, GPIO)
-#define GPIO167_KP_O1 PIN_CFG_INPUT(167, ALT_A, PULLUP)
+#define GPIO167_KP_O1 PIN_CFG(167, ALT_A)
#define GPIO167_LCD_D38 PIN_CFG(167, ALT_B)
#define GPIO167_STMMOD_DAT0 PIN_CFG(167, ALT_C)
#define GPIO168_GPIO PIN_CFG(168, GPIO)
-#define GPIO168_KP_O0 PIN_CFG_INPUT(168, ALT_A, PULLUP)
+#define GPIO168_KP_O0 PIN_CFG(168, ALT_A)
#define GPIO168_LCD_D39 PIN_CFG(168, ALT_B)
#define GPIO168_NONE PIN_CFG(168, ALT_C)
@@ -637,7 +637,7 @@
#define GPIO216_GPIO PIN_CFG(216, GPIO)
#define GPIO216_MC1_DAT2DIR PIN_CFG(216, ALT_A)
#define GPIO216_MC3_CMDDIR PIN_CFG(216, ALT_B)
-#define GPIO216_I2C3_SDA PIN_CFG_INPUT(216, ALT_C, PULLUP)
+#define GPIO216_I2C3_SDA PIN_CFG(216, ALT_C)
#define GPIO216_SPI2_FRM PIN_CFG(216, ALT_C)
#define GPIO217_GPIO PIN_CFG(217, GPIO)
@@ -649,7 +649,7 @@
#define GPIO218_GPIO PIN_CFG(218, GPIO)
#define GPIO218_MC1_DAT31DIR PIN_CFG(218, ALT_A)
#define GPIO218_MC3_DAT0DIR PIN_CFG(218, ALT_B)
-#define GPIO218_I2C3_SCL PIN_CFG_INPUT(218, ALT_C, PULLUP)
+#define GPIO218_I2C3_SCL PIN_CFG(218, ALT_C)
#define GPIO218_SPI2_RXD PIN_CFG(218, ALT_C)
#define GPIO219_GPIO PIN_CFG(219, GPIO)
@@ -698,12 +698,12 @@
#define GPIO229_GPIO PIN_CFG(229, GPIO)
#define GPIO229_CLKOUT1 PIN_CFG(229, ALT_A)
#define GPIO229_PWL PIN_CFG(229, ALT_B)
-#define GPIO229_I2C3_SDA PIN_CFG_INPUT(229, ALT_C, PULLUP)
+#define GPIO229_I2C3_SDA PIN_CFG(229, ALT_C)
#define GPIO230_GPIO PIN_CFG(230, GPIO)
#define GPIO230_CLKOUT2 PIN_CFG(230, ALT_A)
#define GPIO230_PWL PIN_CFG(230, ALT_B)
-#define GPIO230_I2C3_SCL PIN_CFG_INPUT(230, ALT_C, PULLUP)
+#define GPIO230_I2C3_SCL PIN_CFG(230, ALT_C)
#define GPIO256_GPIO PIN_CFG(256, GPIO)
#define GPIO256_USB_NXT PIN_CFG(256, ALT_A)
diff --git a/arch/arm/mach-ux500/pins.c b/arch/arm/mach-ux500/pins.c
new file mode 100644
index 00000000000..ed2ed7333ff
--- /dev/null
+++ b/arch/arm/mach-ux500/pins.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <plat/pincfg.h>
+#include <linux/gpio.h>
+
+#include "pins.h"
+
+static LIST_HEAD(pin_lookups);
+static DEFINE_MUTEX(pin_lookups_mutex);
+static DEFINE_SPINLOCK(pins_lock);
+
+void __init ux500_pins_add(struct ux500_pin_lookup *pl, size_t num)
+{
+ mutex_lock(&pin_lookups_mutex);
+
+ while (num--) {
+ list_add_tail(&pl->node, &pin_lookups);
+ pl++;
+ }
+
+ mutex_unlock(&pin_lookups_mutex);
+}
+
+struct ux500_pins *ux500_pins_get(const char *name)
+{
+ struct ux500_pins *pins = NULL;
+ struct ux500_pin_lookup *pl;
+
+ mutex_lock(&pin_lookups_mutex);
+
+ list_for_each_entry(pl, &pin_lookups, node) {
+ if (!strcmp(pl->name, name)) {
+ pins = pl->pins;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&pin_lookups_mutex);
+ return pins;
+}
+
+int ux500_pins_enable(struct ux500_pins *pins)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&pins_lock, flags);
+
+ if (pins->usage++ == 0)
+ ret = nmk_config_pins(pins->cfg, pins->num);
+
+ spin_unlock_irqrestore(&pins_lock, flags);
+ return ret;
+}
+
+int ux500_pins_disable(struct ux500_pins *pins)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&pins_lock, flags);
+
+ if (WARN_ON(pins->usage == 0))
+ goto out;
+
+ if (--pins->usage == 0)
+ ret = nmk_config_pins_sleep(pins->cfg, pins->num);
+
+out:
+ spin_unlock_irqrestore(&pins_lock, flags);
+ return ret;
+}
+
+void ux500_pins_put(struct ux500_pins *pins)
+{
+ WARN_ON(!pins);
+}
+
+void __init ux500_offchip_gpio_init(struct ux500_pins *pins)
+{
+ int err;
+ int i;
+ int gpio;
+ int output;
+ int value;
+ pin_cfg_t cfg;
+
+ for (i = 0; i < pins->num; i++) {
+ cfg = pins->cfg[i];
+ gpio = PIN_NUM(cfg);
+ output = PIN_DIR(cfg);
+ value = PIN_VAL(cfg);
+
+ err = gpio_request(gpio, "offchip_gpio_init");
+ if (err < 0) {
+ pr_err("pins: gpio_request for gpio=%d failed with"
+ "err: %d\n", gpio, err);
+ /* Pin already requested. Try to configure rest. */
+ continue;
+ }
+
+ if (!output) {
+ err = gpio_direction_input(gpio);
+ if (err < 0)
+ pr_err("pins: gpio_direction_input for gpio=%d"
+ "failed with err: %d\n", gpio, err);
+ } else {
+ err = gpio_direction_output(gpio, value);
+ if (err < 0)
+ pr_err("pins: gpio_direction_output for gpio="
+ "%d failed with err: %d\n", gpio, err);
+ }
+ gpio_free(gpio);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/gpio.h>
+
+static void show_pin(struct seq_file *s, pin_cfg_t pin)
+{
+ static char *afnames[] = {
+ [NMK_GPIO_ALT_GPIO] = "GPIO",
+ [NMK_GPIO_ALT_A] = "A",
+ [NMK_GPIO_ALT_B] = "B",
+ [NMK_GPIO_ALT_C] = "C"
+ };
+ static char *pullnames[] = {
+ [NMK_GPIO_PULL_NONE] = "none",
+ [NMK_GPIO_PULL_UP] = "up",
+ [NMK_GPIO_PULL_DOWN] = "down",
+ [3] /* illegal */ = "??"
+ };
+
+ int pin_num = PIN_NUM(pin);
+ int pull = PIN_PULL(pin);
+ int af = PIN_ALT(pin);
+ int slpm = PIN_SLPM(pin);
+ int output = PIN_DIR(pin);
+ int val = PIN_VAL(pin);
+ int slpm_pull = PIN_SLPM_PULL(pin);
+ int slpm_dir = PIN_SLPM_DIR(pin);
+ int slpm_val = PIN_SLPM_VAL(pin);
+
+ seq_printf(s,
+ " pin %d [%#lx]: af %s, pull %s (%s%s) - slpm: %s%s%s%s\n",
+ pin_num, pin, afnames[af],
+ pullnames[pull],
+ output ? "output " : "input",
+ output ? (val ? "high" : "low") : "",
+ slpm ? "no-change/no-wakeup " : "input/wakeup ",
+ slpm_dir ? (slpm_dir == 1 ? "input " : "output ") : "",
+ slpm_dir == 1 ? (slpm_pull == 0 ? "pull: none " :
+ (slpm_pull == NMK_GPIO_PULL_UP ?
+ "pull: up " : "pull: down ") ): "",
+ slpm_dir == 2 ? (slpm_val == 1 ? "low " : "high ") : "");
+}
+
+static int pins_dbg_show(struct seq_file *s, void *iter)
+{
+ struct ux500_pin_lookup *pl;
+ int i;
+ bool *pins;
+ int prev = -2;
+ int first = 0;
+
+ pins = kzalloc(sizeof(bool) * NOMADIK_NR_GPIO, GFP_KERNEL);
+
+ mutex_lock(&pin_lookups_mutex);
+
+ list_for_each_entry(pl, &pin_lookups, node) {
+ seq_printf(s, "\n%s (%d) usage: %d\n",
+ pl->name, pl->pins->num, pl->pins->usage);
+ for (i = 0; i < pl->pins->num; i++) {
+ show_pin(s, pl->pins->cfg[i]);
+ pins[PIN_NUM(pl->pins->cfg[i])] = true;
+ }
+ }
+ mutex_unlock(&pin_lookups_mutex);
+
+ seq_printf(s, "\nSummary allocated pins:\n");
+ for (i = 0; i < NOMADIK_NR_GPIO; i++) {
+ if (prev == i - 1) {
+ if (pins[i])
+ prev = i;
+ else
+ if (prev > 0) {
+ if (first != prev)
+ seq_printf(s, "-%d, ", prev);
+ else
+ seq_printf(s, ", ");
+ }
+ continue;
+ }
+ if (pins[i]) {
+ seq_printf(s, "%d", i);
+ prev = i;
+ first = i;
+ }
+ }
+ if (prev == i - 1 && first != prev)
+ seq_printf(s, "-%d", prev);
+
+ seq_printf(s, "\n");
+
+ kfree(pins);
+
+ return 0;
+}
+
+static int pins_dbg_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, pins_dbg_show, inode->i_private);
+}
+
+static const struct file_operations pins_fops = {
+ .open = pins_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int __init pins_dbg_init(void)
+{
+ (void) debugfs_create_file("pins", S_IRUGO,
+ NULL,
+ NULL,
+ &pins_fops);
+ return 0;
+}
+late_initcall(pins_dbg_init);
+#endif
diff --git a/arch/arm/mach-ux500/pins.h b/arch/arm/mach-ux500/pins.h
new file mode 100644
index 00000000000..0d36af2e7d9
--- /dev/null
+++ b/arch/arm/mach-ux500/pins.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef __MACH_UX500_PINS_H
+#define __MACH_UX500_PINS_H
+
+#include <linux/list.h>
+#include <plat/pincfg.h>
+
+#define PIN_LOOKUP(_name, _pins) \
+{ \
+ .name = _name, \
+ .pins = _pins, \
+}
+
+#define UX500_PINS(name, pins...) \
+struct ux500_pins name = { \
+ .cfg = (pin_cfg_t[]) {pins}, \
+ .num = ARRAY_SIZE(((pin_cfg_t[]) {pins})), \
+}
+
+struct ux500_pins {
+ int usage;
+ int num;
+ pin_cfg_t *cfg;
+};
+
+struct ux500_pin_lookup {
+ struct list_head node;
+ const char *name;
+ struct ux500_pins *pins;
+};
+
+void __init ux500_pins_add(struct ux500_pin_lookup *pl, size_t num);
+void __init ux500_offchip_gpio_init(struct ux500_pins *pins);
+struct ux500_pins *ux500_pins_get(const char *name);
+int ux500_pins_enable(struct ux500_pins *pins);
+int ux500_pins_disable(struct ux500_pins *pins);
+void ux500_pins_put(struct ux500_pins *pins);
+int pins_for_u9500(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/Kconfig b/arch/arm/mach-ux500/pm/Kconfig
new file mode 100644
index 00000000000..12004ba9858
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/Kconfig
@@ -0,0 +1,70 @@
+config DBX500_PRCMU_QOS_POWER
+ bool "DBX500 PRCMU power QoS support"
+ depends on (MFD_DB5500_PRCMU || MFD_DB8500_PRCMU)
+ default y
+ help
+ Add support for PRCMU power Quality of Service
+
+config UX500_CONTEXT
+ bool "Context save/restore support for UX500"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM
+ help
+ This is needed for ApSleep and deeper sleep states.
+
+config UX500_PM_PERFORMANCE
+ bool "Performance supervision"
+ depends on DBX500_PRCMU_QOS_POWER
+ default y
+ help
+ Enable supervision of events which may require a boost
+ of platform performance.
+
+config UX500_CONSOLE_UART_GPIO_PIN
+ int "The pin number of the console UART GPIO pin"
+ default 29
+ depends on UX500_SUSPEND_DBG_WAKE_ON_UART || UX500_CPUIDLE_DEBUG
+ help
+ GPIO pin number of the GPIO pin connected to the console UART RX line.
+
+ Board-specific code can change this.
+
+config UX500_SUSPEND
+ bool "Suspend to mem and standby support"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM && SUSPEND
+ select UX500_CONTEXT
+ help
+ Add support for suspend.
+
+config UX500_SUSPEND_STANDBY
+ bool "Suspend Standby goes to ApSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo standby > /sys/power/state puts the system into ApSleep.
+
+config UX500_SUSPEND_MEM
+ bool "Suspend Mem goes to ApDeepSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo mem > /sys/power/state puts the system into ApDeepSleep else
+ it will do the same as echo standby > /sys/power/state.
+
+config UX500_SUSPEND_DBG
+ bool "Suspend debug"
+ depends on UX500_SUSPEND && DEBUG_FS
+ help
+ Add debug support for suspend.
+
+config UX500_SUSPEND_DBG_WAKE_ON_UART
+ bool "Suspend wakes on console UART"
+ depends on UX500_SUSPEND_DBG
+ help
+ Wake up on uart interrupts. Makes it possible for the console to wake up system.
+
+config UX500_USECASE_GOVERNOR
+ bool "UX500 use-case governor"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && \
+ (CPU_FREQ && CPU_IDLE && HOTPLUG_CPU && \
+ EARLYSUSPEND && UX500_L2X0_PREFETCH_CTRL && PM)
+ default y
+ help
+ Adjusts CPU_IDLE, CPU_FREQ, HOTPLUG_CPU and L2 cache parameters
diff --git a/arch/arm/mach-ux500/pm/Makefile b/arch/arm/mach-ux500/pm/Makefile
new file mode 100644
index 00000000000..c0af28e5d3e
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/Makefile
@@ -0,0 +1,12 @@
+#
+# Power save related files
+#
+obj-y := pm.o runtime.o
+
+obj-$(CONFIG_DBX500_PRCMU_QOS_POWER) += prcmu-qos-power.o
+obj-$(CONFIG_UX500_CONTEXT) += context.o context_arm.o context-db8500.o context-db5500.o
+obj-$(CONFIG_UX500_CPUIDLE) += timer.o
+obj-$(CONFIG_UX500_SUSPEND) += suspend.o
+obj-$(CONFIG_UX500_SUSPEND_DBG) += suspend_dbg.o
+obj-$(CONFIG_UX500_PM_PERFORMANCE) += performance.o
+obj-$(CONFIG_UX500_USECASE_GOVERNOR) += usecase_gov.o
diff --git a/arch/arm/mach-ux500/pm/context-db5500.c b/arch/arm/mach-ux500/pm/context-db5500.c
new file mode 100644
index 00000000000..9842785c05a
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db5500.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Sundar Iyer <sundar.iyer@stericsson.com>,
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+/* These registers are DB5500 specific */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x0
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x4
+
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x18
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x1C
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x20
+
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x2C
+
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+#define NODE_HIBW2_DDR_IN_3_PRIORITY 0xC0C
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC30
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC34
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC38
+#define NODE_HIBW2_DDR_IN_3_LIMIT 0xC3C
+
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC40
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+
+#define NODE_ESRAM0_IN_0_LIMIT 0x1024
+#define NODE_ESRAM0_IN_1_LIMIT 0x1028
+#define NODE_ESRAM0_IN_2_LIMIT 0x102C
+#define NODE_ESRAM0_OUT_0_PRIORITY 0x1030
+
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1424
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1428
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x142C
+#define NODE_ESRAM1_2_OUT_0_PRIORITY 0x1430
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1824
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1828
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x182C
+#define NODE_ESRAM3_4_OUT_0_PRIORITY 0x1830
+
+/*
+ * Save ICN (Interconnect or Interconnect nodes) configuration registers
+ * TODO: This can be optimized, for example if we have
+ * a static ICN configuration.
+ */
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[2];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio_reg;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio_reg;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[3];
+ u32 esram_in_lim[3];
+ u32 esram_out_prio_reg;
+
+ u32 esram12_in_prio[3];
+ u32 esram12_in_arb_lim[3];
+ u32 esram12_out_prio_reg;
+
+ u32 esram34_in_prio[3];
+ u32 esram34_in_arb_lim[3];
+ u32 esram34_out_prio;
+} context_icn;
+
+
+void u5500_context_save_icn(void)
+{
+ void __iomem *base = context_icn.base;
+
+ /* hibw1 */
+ context_icn.hibw1_esram_in_pri[0] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio_reg =
+ readl_relaxed(base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ context_icn.hibw2_esram_in_pri[0] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[3] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ context_icn.hibw2_ddr_in_limit[3] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_3_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio_reg =
+ readl_relaxed(base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ context_icn.esram_in_prio[0] =
+ readl_relaxed(base + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram_in_prio[1] =
+ readl_relaxed(base + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram_in_prio[2] =
+ readl_relaxed(base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ context_icn.esram_in_lim[0] =
+ readl_relaxed(base + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram_in_lim[1] =
+ readl_relaxed(base + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram_in_lim[2] =
+ readl_relaxed(base + NODE_ESRAM0_IN_2_LIMIT);
+
+ context_icn.esram_out_prio_reg =
+ readl_relaxed(base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ context_icn.esram12_in_prio[0] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram12_out_prio_reg =
+ readl_relaxed(base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ context_icn.esram34_in_prio[0] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram34_out_prio =
+ readl_relaxed(base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+}
+
+/*
+ * Restore ICN configuration registers
+ */
+void u5500_context_restore_icn(void)
+{
+ void __iomem *base = context_icn.base;
+
+ /* hibw1 */
+ writel_relaxed(context_icn.hibw1_esram_in_pri[0],
+ base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[1],
+ base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[0],
+ base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[1],
+ base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[2],
+ base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[0],
+ base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[1],
+ base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[2],
+ base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[0],
+ base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[1],
+ base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[2],
+ base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[0],
+ base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[1],
+ base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[2],
+ base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_out_prio_reg,
+ base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ writel_relaxed(context_icn.hibw2_esram_in_pri[0],
+ base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_esram_in_pri[1],
+ base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[0],
+ base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[1],
+ base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[2],
+ base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[0],
+ base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[1],
+ base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[2],
+ base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[0],
+ base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[1],
+ base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[2],
+ base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[3],
+ base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[0],
+ base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[1],
+ base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[2],
+ base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[3],
+ base + NODE_HIBW2_DDR_IN_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_out_prio_reg,
+ base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ writel_relaxed(context_icn.esram_in_prio[0],
+ base + NODE_ESRAM0_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram_in_prio[1],
+ base + NODE_ESRAM0_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram_in_prio[2],
+ base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.esram_in_lim[0],
+ base + NODE_ESRAM0_IN_0_LIMIT);
+ writel_relaxed(context_icn.esram_in_lim[1],
+ base + NODE_ESRAM0_IN_1_LIMIT);
+ writel_relaxed(context_icn.esram_in_lim[2],
+ base + NODE_ESRAM0_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.esram_out_prio_reg,
+ base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ writel_relaxed(context_icn.esram12_in_prio[0],
+ base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[1],
+ base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[2],
+ base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.esram12_in_arb_lim[0],
+ base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[1],
+ base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[2],
+ base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ writel_relaxed(context_icn.esram12_out_prio_reg,
+ base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ writel_relaxed(context_icn.esram34_in_prio[0],
+ base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[1],
+ base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[2],
+ base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.esram34_in_arb_lim[0],
+ base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[1],
+ base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[2],
+ base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ writel_relaxed(context_icn.esram34_out_prio,
+ base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+
+}
+
+void u5500_context_init(void)
+{
+ context_icn.base = ioremap(U5500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context-db8500.c b/arch/arm/mach-ux500/pm/context-db8500.c
new file mode 100644
index 00000000000..3ba73e51a6d
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db8500.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer for ST-Ericsson
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+/*
+ * ST-Interconnect context
+ */
+
+/* priority, bw limiter register offsets */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x00
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x04
+#define NODE_HIBW1_ESRAM_IN_2_PRIORITY 0x08
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x2C
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x30
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x34
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x38
+#define NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT 0x3C
+#define NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT 0x40
+#define NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT 0x44
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC24
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC28
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC2C
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC30
+
+/*
+ * Note the following addresses are presented in
+ * db8500 design spec v3.1 and v3.3, table 10.
+ * But their addresses are not the same as in the
+ * description. The addresses in the description
+ * of each registers are correct.
+ * NODE_HIBW2_DDR_IN_3_LIMIT is only present in v1.
+ *
+ * Faulty registers addresses in table 10:
+ * NODE_HIBW2_DDR_IN_2_LIMIT 0xC38
+ * NODE_HIBW2_DDR_IN_3_LIMIT 0xC3C
+ * NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC40
+ */
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+#define NODE_ESRAM0_IN_3_PRIORITY 0x100C
+#define NODE_ESRAM0_IN_0_LIMIT 0x1030
+#define NODE_ESRAM0_IN_1_LIMIT 0x1034
+#define NODE_ESRAM0_IN_2_LIMIT 0x1038
+#define NODE_ESRAM0_IN_3_LIMIT 0x103C
+/* common */
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+#define NODE_ESRAM1_2_IN_3_PRIORITY 0x140C
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1430
+#define NODE_ESRAM1_2_IN_0_ARB_2_LIMIT 0x1434
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1438
+#define NODE_ESRAM1_2_IN_1_ARB_2_LIMIT 0x143C
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x1440
+#define NODE_ESRAM1_2_IN_2_ARB_2_LIMIT 0x1444
+#define NODE_ESRAM1_2_IN_3_ARB_1_LIMIT 0x1448
+#define NODE_ESRAM1_2_IN_3_ARB_2_LIMIT 0x144C
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+#define NODE_ESRAM3_4_IN_3_PRIORITY 0x180C
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1830
+#define NODE_ESRAM3_4_IN_0_ARB_2_LIMIT 0x1834
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1838
+#define NODE_ESRAM3_4_IN_1_ARB_2_LIMIT 0x183C
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x1840
+#define NODE_ESRAM3_4_IN_2_ARB_2_LIMIT 0x1844
+#define NODE_ESRAM3_4_IN_3_ARB_1_LIMIT 0x1848
+#define NODE_ESRAM3_4_IN_3_ARB_2_LIMIT 0x184C
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[3];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_esram_in2_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[4];
+ u32 esram_in_lim[4];
+ u32 esram0_in_prio[4];
+ u32 esram0_in_lim[4];
+ u32 esram12_in_prio[4];
+ u32 esram12_in_arb_lim[8];
+ u32 esram34_in_prio[4];
+ u32 esram34_in_arb_lim[8];
+} context_icn;
+
+/**
+ * u8500_context_save_icn() - save ICN context
+ *
+ */
+void u8500_context_save_icn(void)
+{
+ void __iomem *b = context_icn.base;
+
+ context_icn.hibw1_esram_in_pri[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ context_icn.hibw1_esram_in_pri[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in2_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in2_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in2_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio =
+ readl_relaxed(b + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ context_icn.hibw2_esram_in_pri[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_1_LIMIT);
+
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_2_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio =
+ readl_relaxed(b + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ context_icn.esram0_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram0_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram0_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM0_IN_2_PRIORITY);
+ context_icn.esram0_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM0_IN_3_PRIORITY);
+
+ context_icn.esram0_in_lim[0] =
+ readl_relaxed(b + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram0_in_lim[1] =
+ readl_relaxed(b + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram0_in_lim[2] =
+ readl_relaxed(b + NODE_ESRAM0_IN_2_LIMIT);
+ context_icn.esram0_in_lim[3] =
+ readl_relaxed(b + NODE_ESRAM0_IN_3_LIMIT);
+
+ context_icn.esram12_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_PRIORITY);
+ context_icn.esram12_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[3] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[4] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[5] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[6] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[7] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ context_icn.esram34_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_PRIORITY);
+ context_icn.esram34_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[3] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[4] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[5] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[6] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[7] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+}
+
+/**
+ * u8500_context_restore_icn() - restore ICN context
+ *
+ */
+void u8500_context_restore_icn(void)
+{
+ void __iomem *b = context_icn.base;
+
+ writel_relaxed(context_icn.hibw1_esram_in_pri[0],
+ b + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[1],
+ b + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[2],
+ b + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[0],
+ b + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[1],
+ b + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[2],
+ b + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[0],
+ b + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[1],
+ b + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[2],
+ b + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_out_prio,
+ b + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in_pri[0],
+ b + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_esram_in_pri[1],
+ b + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[0],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[1],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[2],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[0],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[1],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[2],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[0],
+ b + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[1],
+ b + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[2],
+ b + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[0],
+ b + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[1],
+ b + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[2],
+ b + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_out_prio,
+ b + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ writel_relaxed(context_icn.esram0_in_prio[0],
+ b + NODE_ESRAM0_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[1],
+ b + NODE_ESRAM0_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[2],
+ b + NODE_ESRAM0_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[3],
+ b + NODE_ESRAM0_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram0_in_lim[0],
+ b + NODE_ESRAM0_IN_0_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[1],
+ b + NODE_ESRAM0_IN_1_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[2],
+ b + NODE_ESRAM0_IN_2_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[3],
+ b + NODE_ESRAM0_IN_3_LIMIT);
+
+ writel_relaxed(context_icn.esram12_in_prio[0],
+ b + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[1],
+ b + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[2],
+ b + NODE_ESRAM1_2_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[3],
+ b + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram12_in_arb_lim[0],
+ b + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[1],
+ b + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[2],
+ b + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[3],
+ b + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[4],
+ b + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[5],
+ b + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[6],
+ b + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[7],
+ b + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ writel_relaxed(context_icn.esram34_in_prio[0],
+ b + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[1],
+ b + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[2],
+ b + NODE_ESRAM3_4_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[3],
+ b + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram34_in_arb_lim[0],
+ b + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[1],
+ b + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[2],
+ b + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[3],
+ b + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[4],
+ b + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[5],
+ b + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[6],
+ b + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[7],
+ b + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+}
+
+void u8500_context_init(void)
+{
+ context_icn.base = ioremap(U8500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context.c b/arch/arm/mach-ux500/pm/context.c
new file mode 100644
index 00000000000..ffd73f3ed52
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context.c
@@ -0,0 +1,962 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/pm.h>
+#include <mach/context.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/smp_twd.h>
+
+#include "scu.h"
+#include "../product.h"
+#include "../prcc.h"
+
+#define GPIO_NUM_BANKS 9
+#define GPIO_NUM_SAVE_REGISTERS 7
+
+/*
+ * TODO:
+ * - Use the "UX500*"-macros instead where possible
+ */
+
+#define U8500_BACKUPRAM_SIZE SZ_64K
+
+#define U8500_PUBLIC_BOOT_ROM_BASE (U8500_BOOT_ROM_BASE + 0x17000)
+#define U5500_PUBLIC_BOOT_ROM_BASE (U5500_BOOT_ROM_BASE + 0x18000)
+
+/*
+ * Special dedicated addresses in backup RAM. The 5500 addresses are identical
+ * to the 8500 ones.
+ */
+#define U8500_EXT_RAM_LOC_BACKUPRAM_ADDR 0x80151FDC
+#define U8500_CPU0_CP15_CR_BACKUPRAM_ADDR 0x80151F80
+#define U8500_CPU1_CP15_CR_BACKUPRAM_ADDR 0x80151FA0
+
+#define U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FD8
+#define U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FE0
+
+#define GIC_DIST_ENABLE_NS 0x0
+
+/* 32 interrupts fits in 4 bytes */
+#define GIC_DIST_ENABLE_SET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 32)
+#define GIC_DIST_ENABLE_SET_CPU_NUM (IRQ_SHPI_START / 32)
+#define GIC_DIST_ENABLE_SET_SPI0 GIC_DIST_ENABLE_SET
+#define GIC_DIST_ENABLE_SET_SPI32 (GIC_DIST_ENABLE_SET + IRQ_SHPI_START / 8)
+
+#define GIC_DIST_ENABLE_CLEAR_0 GIC_DIST_ENABLE_CLEAR
+#define GIC_DIST_ENABLE_CLEAR_32 (GIC_DIST_ENABLE_CLEAR + 4)
+#define GIC_DIST_ENABLE_CLEAR_64 (GIC_DIST_ENABLE_CLEAR + 8)
+#define GIC_DIST_ENABLE_CLEAR_96 (GIC_DIST_ENABLE_CLEAR + 12)
+#define GIC_DIST_ENABLE_CLEAR_128 (GIC_DIST_ENABLE_CLEAR + 16)
+
+#define GIC_DIST_PRI_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) / 4)
+#define GIC_DIST_PRI_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_PRI_SPI0 GIC_DIST_PRI
+#define GIC_DIST_PRI_SPI32 (GIC_DIST_PRI + IRQ_SHPI_START)
+
+#define GIC_DIST_SPI_TARGET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 4)
+#define GIC_DIST_SPI_TARGET_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_SPI_TARGET_SPI0 GIC_DIST_TARGET
+#define GIC_DIST_SPI_TARGET_SPI32 (GIC_DIST_TARGET + IRQ_SHPI_START)
+
+/* 16 interrupts per 4 bytes */
+#define GIC_DIST_CONFIG_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) \
+ / 16)
+#define GIC_DIST_CONFIG_CPU_NUM (IRQ_SHPI_START / 16)
+#define GIC_DIST_CONFIG_SPI0 GIC_DIST_CONFIG
+#define GIC_DIST_CONFIG_SPI32 (GIC_DIST_CONFIG + IRQ_SHPI_START / 4)
+
+/* TODO! Move STM reg offsets to suitable place */
+#define STM_CR_OFFSET 0x00
+#define STM_MMC_OFFSET 0x08
+#define STM_TER_OFFSET 0x10
+
+#define TPIU_PORT_SIZE 0x4
+#define TPIU_TRIGGER_COUNTER 0x104
+#define TPIU_TRIGGER_MULTIPLIER 0x108
+#define TPIU_CURRENT_TEST_PATTERN 0x204
+#define TPIU_TEST_PATTERN_REPEAT 0x208
+#define TPIU_FORMATTER 0x304
+#define TPIU_FORMATTER_SYNC 0x308
+#define TPIU_LOCK_ACCESS_REGISTER 0xFB0
+
+#define TPIU_UNLOCK_CODE 0xc5acce55
+
+#define SCU_FILTER_STARTADDR 0x40
+#define SCU_FILTER_ENDADDR 0x44
+#define SCU_ACCESS_CTRL_SAC 0x50
+
+/* The context of the Trace Port Interface Unit (TPIU) */
+static struct {
+ void __iomem *base;
+ u32 port_size;
+ u32 trigger_counter;
+ u32 trigger_multiplier;
+ u32 current_test_pattern;
+ u32 test_pattern_repeat;
+ u32 formatter;
+ u32 formatter_sync;
+} context_tpiu;
+
+static struct {
+ void __iomem *base;
+ u32 cr;
+ u32 mmc;
+ u32 ter;
+} context_stm_ape;
+
+struct context_gic_cpu {
+ void __iomem *base;
+ u32 ctrl;
+ u32 primask;
+ u32 binpoint;
+};
+static DEFINE_PER_CPU(struct context_gic_cpu, context_gic_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ns;
+ u32 enable_set[GIC_DIST_ENABLE_SET_COMMON_NUM]; /* IRQ 32 to 160 */
+ u32 priority_level[GIC_DIST_PRI_COMMON_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_COMMON_NUM];
+ u32 config[GIC_DIST_CONFIG_COMMON_NUM];
+} context_gic_dist_common;
+
+struct context_gic_dist_cpu {
+ void __iomem *base;
+ u32 enable_set[GIC_DIST_ENABLE_SET_CPU_NUM]; /* IRQ 0 to 31 */
+ u32 priority_level[GIC_DIST_PRI_CPU_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_CPU_NUM];
+ u32 config[GIC_DIST_CONFIG_CPU_NUM];
+};
+static DEFINE_PER_CPU(struct context_gic_dist_cpu, context_gic_dist_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ctrl;
+ u32 cpu_pwrstatus;
+ u32 inv_all_nonsecure;
+ u32 filter_start_addr;
+ u32 filter_end_addr;
+ u32 access_ctrl_sac;
+} context_scu;
+
+#define UX500_NR_PRCC_BANKS 5
+static struct {
+ void __iomem *base;
+ struct clk *clk;
+ u32 bus_clk;
+ u32 kern_clk;
+} context_prcc[UX500_NR_PRCC_BANKS];
+
+static u32 backup_sram_storage[NR_CPUS] = {
+ IO_ADDRESS(U8500_CPU0_CP15_CR_BACKUPRAM_ADDR),
+ IO_ADDRESS(U8500_CPU1_CP15_CR_BACKUPRAM_ADDR),
+};
+
+static u32 gpio_bankaddr[GPIO_NUM_BANKS] = {IO_ADDRESS(U8500_GPIOBANK0_BASE),
+ IO_ADDRESS(U8500_GPIOBANK1_BASE),
+ IO_ADDRESS(U8500_GPIOBANK2_BASE),
+ IO_ADDRESS(U8500_GPIOBANK3_BASE),
+ IO_ADDRESS(U8500_GPIOBANK4_BASE),
+ IO_ADDRESS(U8500_GPIOBANK5_BASE),
+ IO_ADDRESS(U8500_GPIOBANK6_BASE),
+ IO_ADDRESS(U8500_GPIOBANK7_BASE),
+ IO_ADDRESS(U8500_GPIOBANK8_BASE)
+};
+
+static u32 gpio_save[GPIO_NUM_BANKS][GPIO_NUM_SAVE_REGISTERS];
+
+/*
+ * Stacks and stack pointers
+ */
+static DEFINE_PER_CPU(u32[128], varm_registers_backup_stack);
+static DEFINE_PER_CPU(u32 *, varm_registers_pointer);
+
+static DEFINE_PER_CPU(u32[128], varm_cp15_backup_stack);
+static DEFINE_PER_CPU(u32 *, varm_cp15_pointer);
+
+static ATOMIC_NOTIFIER_HEAD(context_ape_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(context_arm_notifier_list);
+
+/*
+ * Register a simple callback for handling vape context save/restore
+ */
+int context_ape_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_ape_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_ape_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_ape_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_unregister);
+
+/*
+ * Register a simple callback for handling varm context save/restore
+ */
+int context_arm_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_arm_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_arm_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_arm_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_unregister);
+
+static void save_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ clk_enable(context_prcc[i].clk);
+
+ context_prcc[i].bus_clk =
+ readl(context_prcc[i].base + PRCC_PCKSR);
+ context_prcc[i].kern_clk =
+ readl(context_prcc[i].base + PRCC_KCKSR);
+
+ clk_disable(context_prcc[i].clk);
+ }
+}
+
+static void restore_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ clk_enable(context_prcc[i].clk);
+
+ writel(~context_prcc[i].bus_clk,
+ context_prcc[i].base + PRCC_PCKDIS);
+ writel(~context_prcc[i].kern_clk,
+ context_prcc[i].base + PRCC_KCKDIS);
+
+ writel(context_prcc[i].bus_clk,
+ context_prcc[i].base + PRCC_PCKEN);
+ writel(context_prcc[i].kern_clk,
+ context_prcc[i].base + PRCC_KCKEN);
+ /*
+ * Consider having a while over KCK/BCK_STATUS
+ * to check that all clocks get disabled/enabled
+ */
+
+ clk_disable(context_prcc[i].clk);
+ }
+}
+
+static void save_stm_ape(void)
+{
+ /*
+ * TODO: Check with PRCMU developers how STM is handled by PRCMU
+ * firmware. According to DB5500 design spec there is a "flush"
+ * mechanism supposed to be used by the PRCMU before power down,
+ * PRCMU fw might save/restore the following three registers
+ * at the same time.
+ */
+ context_stm_ape.cr = readl(context_stm_ape.base +
+ STM_CR_OFFSET);
+ context_stm_ape.mmc = readl(context_stm_ape.base +
+ STM_MMC_OFFSET);
+ context_stm_ape.ter = readl(context_stm_ape.base +
+ STM_TER_OFFSET);
+}
+
+static void restore_stm_ape(void)
+{
+ writel(context_stm_ape.ter,
+ context_stm_ape.base + STM_TER_OFFSET);
+ writel(context_stm_ape.mmc,
+ context_stm_ape.base + STM_MMC_OFFSET);
+ writel(context_stm_ape.cr,
+ context_stm_ape.base + STM_CR_OFFSET);
+}
+
+static bool inline tpiu_clocked(void)
+{
+ return ux500_jtag_enabled();
+}
+
+/*
+ * Save the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void save_tpiu(void)
+{
+ if (!tpiu_clocked())
+ return;
+
+ context_tpiu.port_size = readl(context_tpiu.base +
+ TPIU_PORT_SIZE);
+ context_tpiu.trigger_counter = readl(context_tpiu.base +
+ TPIU_TRIGGER_COUNTER);
+ context_tpiu.trigger_multiplier = readl(context_tpiu.base +
+ TPIU_TRIGGER_MULTIPLIER);
+ context_tpiu.current_test_pattern = readl(context_tpiu.base +
+ TPIU_CURRENT_TEST_PATTERN);
+ context_tpiu.test_pattern_repeat = readl(context_tpiu.base +
+ TPIU_TEST_PATTERN_REPEAT);
+ context_tpiu.formatter = readl(context_tpiu.base +
+ TPIU_FORMATTER);
+ context_tpiu.formatter_sync = readl(context_tpiu.base +
+ TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Restore the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void restore_tpiu(void)
+{
+ if (!tpiu_clocked())
+ return;
+
+ writel(TPIU_UNLOCK_CODE,
+ context_tpiu.base + TPIU_LOCK_ACCESS_REGISTER);
+
+ writel(context_tpiu.port_size,
+ context_tpiu.base + TPIU_PORT_SIZE);
+ writel(context_tpiu.trigger_counter,
+ context_tpiu.base + TPIU_TRIGGER_COUNTER);
+ writel(context_tpiu.trigger_multiplier,
+ context_tpiu.base + TPIU_TRIGGER_MULTIPLIER);
+ writel(context_tpiu.current_test_pattern,
+ context_tpiu.base + TPIU_CURRENT_TEST_PATTERN);
+ writel(context_tpiu.test_pattern_repeat,
+ context_tpiu.base + TPIU_TEST_PATTERN_REPEAT);
+ writel(context_tpiu.formatter,
+ context_tpiu.base + TPIU_FORMATTER);
+ writel(context_tpiu.formatter_sync,
+ context_tpiu.base + TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Save GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+static void save_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ c_gic_cpu->ctrl = readl_relaxed(c_gic_cpu->base + GIC_CPU_CTRL);
+ c_gic_cpu->primask = readl_relaxed(c_gic_cpu->base + GIC_CPU_PRIMASK);
+ c_gic_cpu->binpoint = readl_relaxed(c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Restore GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+static void restore_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ writel_relaxed(c_gic_cpu->ctrl, c_gic_cpu->base + GIC_CPU_CTRL);
+ writel_relaxed(c_gic_cpu->primask, c_gic_cpu->base + GIC_CPU_PRIMASK);
+ writel_relaxed(c_gic_cpu->binpoint, c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Save GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+static void save_gic_dist_common(void)
+{
+ int i;
+
+ context_gic_dist_common.ns = readl_relaxed(context_gic_dist_common.base
+ + GIC_DIST_ENABLE_NS);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ context_gic_dist_common.enable_set[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ context_gic_dist_common.priority_level[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ context_gic_dist_common.spi_target[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ context_gic_dist_common.config[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+}
+
+/*
+ * Restore GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+static void restore_gic_dist_common(void)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.config[i],
+ context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.spi_target[i],
+ context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.priority_level[i],
+ context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.enable_set[i],
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ writel_relaxed(context_gic_dist_common.ns,
+ context_gic_dist_common.base + GIC_DIST_ENABLE_NS);
+}
+
+/*
+ * Save GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * save_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void save_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ c_gic->enable_set[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ c_gic->priority_level[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ c_gic->spi_target[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ c_gic->config[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+}
+
+/*
+ * Restore GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * restore_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void restore_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ writel_relaxed(c_gic->config[i],
+ c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ writel_relaxed(c_gic->spi_target[i],
+ c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ writel_relaxed(c_gic->priority_level[i],
+ c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ writel_relaxed(c_gic->enable_set[i],
+ c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+}
+
+/*
+ * Disable interrupts that are not necessary
+ * to have turned on during ApDeepSleep.
+ */
+void context_gic_dist_disable_unneeded_irqs(void)
+{
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_0);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_32);
+
+ /* Leave PRCMU IRQ 0 and 1 enabled */
+ writel(0xffff3fff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_64);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_96);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_128);
+}
+
+static void save_scu(void)
+{
+ context_scu.ctrl =
+ readl_relaxed(context_scu.base + SCU_CTRL);
+ context_scu.cpu_pwrstatus =
+ readl_relaxed(context_scu.base + SCU_CPU_STATUS);
+ context_scu.inv_all_nonsecure =
+ readl_relaxed(context_scu.base + SCU_INVALIDATE);
+ context_scu.filter_start_addr =
+ readl_relaxed(context_scu.base + SCU_FILTER_STARTADDR);
+ context_scu.filter_end_addr =
+ readl_relaxed(context_scu.base + SCU_FILTER_ENDADDR);
+ context_scu.access_ctrl_sac =
+ readl_relaxed(context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+static void restore_scu(void)
+{
+ writel_relaxed(context_scu.ctrl,
+ context_scu.base + SCU_CTRL);
+ writel_relaxed(context_scu.cpu_pwrstatus,
+ context_scu.base + SCU_CPU_STATUS);
+ writel_relaxed(context_scu.inv_all_nonsecure,
+ context_scu.base + SCU_INVALIDATE);
+ writel_relaxed(context_scu.filter_start_addr,
+ context_scu.base + SCU_FILTER_STARTADDR);
+ writel_relaxed(context_scu.filter_end_addr,
+ context_scu.base + SCU_FILTER_ENDADDR);
+ writel_relaxed(context_scu.access_ctrl_sac,
+ context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+/*
+ * Save VAPE context
+ */
+void context_vape_save(void)
+{
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_SAVE, NULL);
+
+ if (cpu_is_u5500())
+ u5500_context_save_icn();
+ if (cpu_is_u8500())
+ u8500_context_save_icn();
+
+ save_stm_ape();
+
+ save_tpiu();
+
+ save_prcc();
+}
+
+/*
+ * Restore VAPE context
+ */
+void context_vape_restore(void)
+{
+ restore_prcc();
+
+ restore_tpiu();
+
+ restore_stm_ape();
+
+ if (cpu_is_u5500())
+ u5500_context_restore_icn();
+ if (cpu_is_u8500())
+ u8500_context_restore_icn();
+
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_RESTORE, NULL);
+}
+
+/*
+ * Save GPIO registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_save(void)
+{
+ int i;
+
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ gpio_save[i][0] = readl(gpio_bankaddr[i] + NMK_GPIO_AFSLA);
+ gpio_save[i][1] = readl(gpio_bankaddr[i] + NMK_GPIO_AFSLB);
+ gpio_save[i][2] = readl(gpio_bankaddr[i] + NMK_GPIO_PDIS);
+ gpio_save[i][3] = readl(gpio_bankaddr[i] + NMK_GPIO_DIR);
+ gpio_save[i][4] = readl(gpio_bankaddr[i] + NMK_GPIO_DAT);
+ gpio_save[i][6] = readl(gpio_bankaddr[i] + NMK_GPIO_SLPC);
+ }
+}
+
+/*
+ * Restore GPIO registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_restore(void)
+{
+ int i;
+ u32 output_state;
+ u32 pull_up;
+ u32 pull_down;
+ u32 pull;
+
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(gpio_save[i][2], gpio_bankaddr[i] + NMK_GPIO_PDIS);
+
+ writel(gpio_save[i][3], gpio_bankaddr[i] + NMK_GPIO_DIR);
+
+ /* Set the high outputs. outpute_state = GPIO_DIR & GPIO_DAT */
+ output_state = gpio_save[i][3] & gpio_save[i][4];
+ writel(output_state, gpio_bankaddr[i] + NMK_GPIO_DATS);
+
+ /*
+ * Set the low outputs.
+ * outpute_state = ~(GPIO_DIR & GPIO_DAT) & GPIO_DIR
+ */
+ output_state = ~(gpio_save[i][3] & gpio_save[i][4]) &
+ gpio_save[i][3];
+ writel(output_state, gpio_bankaddr[i] + NMK_GPIO_DATC);
+
+ /*
+ * Restore pull up/down.
+ * Only write pull up/down settings on inputs where
+ * PDIS is not set.
+ * pull = (~GPIO_DIR & ~GPIO_PDIS)
+ */
+ pull = (~gpio_save[i][3] & ~gpio_save[i][2]);
+ nmk_gpio_read_pull(i, &pull_up);
+
+ pull_down = pull & ~pull_up;
+ pull_up = pull & pull_up;
+ /* Set pull ups */
+ writel(pull_up, gpio_bankaddr[i] + NMK_GPIO_DATS);
+ /* Set pull downs */
+ writel(pull_down, gpio_bankaddr[i] + NMK_GPIO_DATC);
+
+ writel(gpio_save[i][6], gpio_bankaddr[i] + NMK_GPIO_SLPC);
+
+ }
+}
+
+/*
+ * Restore GPIO mux registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_restore_mux(void)
+{
+ int i;
+
+ /* Change mux settings */
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(gpio_save[i][0], gpio_bankaddr[i] + NMK_GPIO_AFSLA);
+ writel(gpio_save[i][1], gpio_bankaddr[i] + NMK_GPIO_AFSLB);
+ }
+}
+
+/*
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers (Not done.)
+ * - Set SLPM=0 for the IOs you want to switch. (We assume that all
+ * SLPM registers already are 0 except for the ones that wants to
+ * have the mux connected in sleep (e.g modem STM)).
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers (Not done.)
+ * - Any spurious wake up event during switch sequence to be ignored
+ * and cleared
+ */
+void context_gpio_mux_safe_switch(bool begin)
+{
+ int i;
+
+ static u32 rwimsc[GPIO_NUM_BANKS];
+ static u32 fwimsc[GPIO_NUM_BANKS];
+
+ if (begin) {
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ /* Save registers */
+ rwimsc[i] = readl(gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ fwimsc[i] = readl(gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+
+ /* Prevent spurious wakeups */
+ writel(0, gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ writel(0, gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+ }
+
+ ux500_pm_prcmu_set_ioforce(true);
+ } else {
+ ux500_pm_prcmu_set_ioforce(false);
+
+ /* Restore wake up settings */
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(rwimsc[i], gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ writel(fwimsc[i], gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+ }
+ }
+}
+
+/*
+ * Save common
+ *
+ * This function must be called once for all cores before going to deep sleep.
+ */
+void context_varm_save_common(void)
+{
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_SAVE, NULL);
+
+ /* Save common parts */
+ save_gic_dist_common();
+ save_scu();
+}
+
+/*
+ * Restore common
+ *
+ * This function must be called once for all cores when waking up from deep
+ * sleep.
+ */
+void context_varm_restore_common(void)
+{
+ /* Restore common parts */
+ restore_scu();
+ restore_gic_dist_common();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_RESTORE, NULL);
+}
+
+/*
+ * Save core
+ *
+ * This function must be called once for each cpu core before going to deep
+ * sleep.
+ */
+void context_varm_save_core(void)
+{
+ int cpu = smp_processor_id();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_SAVE, NULL);
+
+ per_cpu(varm_cp15_pointer, cpu) = per_cpu(varm_cp15_backup_stack, cpu);
+
+ /* Save core */
+ twd_save();
+ save_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ save_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ context_save_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+}
+
+/*
+ * Restore core
+ *
+ * This function must be called once for each cpu core when waking up from
+ * deep sleep.
+ */
+void context_varm_restore_core(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Restore core */
+ context_restore_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+ restore_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ restore_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ twd_restore();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_RESTORE, NULL);
+}
+
+/*
+ * Save CPU registers
+ *
+ * This function saves ARM registers.
+ */
+void context_save_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(varm_registers_pointer, cpu) =
+ per_cpu(varm_registers_backup_stack, cpu);
+ context_save_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * Restore CPU registers
+ *
+ * This function restores ARM registers.
+ */
+void context_restore_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ context_restore_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * This function stores CP15 registers related to cache and mmu
+ * in backup SRAM. It also stores stack pointer, CPSR
+ * and return address for the PC in backup SRAM and
+ * does wait for interrupt.
+ */
+void context_save_to_sram_and_wfi(bool cleanL2cache)
+{
+ int cpu = smp_processor_id();
+
+ context_save_to_sram_and_wfi_internal(backup_sram_storage[cpu],
+ cleanL2cache);
+}
+
+static int __init context_init(void)
+{
+ int i;
+ void __iomem *ux500_backup_ptr;
+
+ /* allocate backup pointer for RAM data */
+ ux500_backup_ptr = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(U8500_BACKUPRAM_SIZE));
+
+ if (!ux500_backup_ptr) {
+ pr_warning("context: could not allocate backup memory\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * ROM code addresses to store backup contents,
+ * pass the physical address of back up to ROM code
+ */
+ writel(virt_to_phys(ux500_backup_ptr),
+ IO_ADDRESS(U8500_EXT_RAM_LOC_BACKUPRAM_ADDR));
+
+ if (cpu_is_u5500()) {
+ writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ context_tpiu.base = ioremap(U5500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U5500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U5500_SCU_BASE, SZ_4K);
+
+ context_prcc[0].base = ioremap(U5500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U5500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U5500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U5500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U5500_CLKRST6_BASE, SZ_4K);
+
+ context_gic_dist_common.base = ioremap(U5500_GIC_DIST_BASE, SZ_4K);
+ per_cpu(context_gic_cpu, 0).base = ioremap(U5500_GIC_CPU_BASE, SZ_4K);
+ } else if (cpu_is_u8500()) {
+ /* Give logical address to backup RAM. For both CPUs */
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ context_tpiu.base = ioremap(U8500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U8500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U8500_SCU_BASE, SZ_4K);
+
+ /* PERIPH4 is always on, so no need saving prcc */
+ context_prcc[0].base = ioremap(U8500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U8500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U8500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U8500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U8500_CLKRST6_BASE, SZ_4K);
+
+ context_gic_dist_common.base = ioremap(U8500_GIC_DIST_BASE, SZ_4K);
+ per_cpu(context_gic_cpu, 0).base = ioremap(U8500_GIC_CPU_BASE, SZ_4K);
+ }
+
+ per_cpu(context_gic_dist_cpu, 0).base = context_gic_dist_common.base;
+
+ for (i = 1; i < num_possible_cpus(); i++) {
+ per_cpu(context_gic_cpu, i).base
+ = per_cpu(context_gic_cpu, 0).base;
+ per_cpu(context_gic_dist_cpu, i).base
+ = per_cpu(context_gic_dist_cpu, 0).base;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(context_prcc); i++) {
+ const int clusters[] = {1, 2, 3, 5, 6};
+ char clkname[10];
+
+ snprintf(clkname, sizeof(clkname), "PERIPH%d", clusters[i]);
+
+ context_prcc[i].clk = clk_get_sys(clkname, NULL);
+ BUG_ON(IS_ERR(context_prcc[i].clk));
+ }
+
+ if (cpu_is_u8500()) {
+ u8500_context_init();
+ } else if (cpu_is_u5500()) {
+ u5500_context_init();
+ } else {
+ printk(KERN_ERR "context: unknown hardware!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+subsys_initcall(context_init);
diff --git a/arch/arm/mach-ux500/pm/context_arm.S b/arch/arm/mach-ux500/pm/context_arm.S
new file mode 100644
index 00000000000..edb894d6a35
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context_arm.S
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/linkage.h>
+#include <mach/hardware.h>
+#include <asm/hardware/cache-l2x0.h>
+
+/*
+ * Save and increment macro
+ */
+.macro SAVE_AND_INCREMENT FROM_REG TO_REG
+ str \FROM_REG, [\TO_REG], #+4
+.endm
+
+/*
+ * Decrement and restore macro
+ */
+.macro DECREMENT_AND_RESTORE FROM_REG TO_REG
+ ldr \TO_REG, [\FROM_REG, #-4]!
+.endm
+
+/*
+ * Save ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * + {sp, lr}^
+ * + cpsr
+ * + {r3, r8-r14} (FIQ mode: r3=spsr)
+ * + {r3, r13, r14} (IRQ mode: r3=spsr)
+ * + {r3, r13, r14} (abort mode: r3=spsr)
+ * + {r3, r13, r14} (undef mode: r3=spsr)
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_save_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ARM( stmia r1, {sp, lr}^ ) @ Store user mode sp and lr
+ @ registers
+ARM( add r1, r1, #8 ) @ Update backup pointer (not
+ @ done in previous instruction)
+THUMB( str sp, [r1], #+4 )
+THUMB( str lr, [r1], #+4 )
+
+ mrs r2, cpsr @ Get CPSR
+ SAVE_AND_INCREMENT r2 r1 @ Save CPSR register
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ @ The suffix to CPSR refers to which field(s) of the CPSR is
+ @ rereferenced (you can specify one or more). Defined fields are:
+ @
+ @ c - control
+ @ x - extension
+ @ s - status
+ @ f - flags
+
+ orr r3, r2, #0x11 @ Save FIQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r8-r14} )
+THUMB( stmia r1!, {r3, r8-r12, r14} )
+THUMB( str r13, [r1], #+4 )
+
+
+ orr r3, r2, #0x12 @ Save IRQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r13, r14} )
+THUMB( stmia r1!, {r3, r14} )
+THUMB( str r13, [r1], #+4 )
+
+ orr r3, r2, #0x17 @ Save abort mode registers +
+ @ common mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r13, r14} )
+THUMB( stmia r1!, {r3, r14} )
+THUMB( str r13, [r1], #+4 )
+
+ orr r3, r2, #0x1B @ Save undef mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r13, r14} )
+THUMB( stmia r1!, {r3, r14} )
+THUMB( str r13, [r1], #+4 )
+
+ orr r3, r2, #0x13 @ Return to supervisor mode
+ msr cpsr_cxsf, r3
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * - {r3, r13, r14} (undef mode: spsr=r3)
+ * - {r3, r13, r14} (abort mode: spsr=r3)
+ * - {r3, r13, r14} (IRQ mode: spsr=r3)
+ * - {r3, r8-r14} (FIQ mode: spsr=r3)
+ * - cpsr
+ * - {sp, lr}^
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_restore_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrs r2, cpsr @ Get CPSR
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ orr r3, r2, #0x1b @ Restore undef mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r13, r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r14} )
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x17 @ Restore abort mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r13, r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r14} )
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x12 @ Restore IRQ mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r13, r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r14} )
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x11 @ Restore FIQ mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r8-r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r8-r12, r14} )
+
+ msr spsr_cxsf, r3
+
+ DECREMENT_AND_RESTORE r1 r3 @ Restore cpsr register
+ msr cpsr_cxsf, r3
+
+ARM( ldmdb r1, {sp, lr}^ ) @ Restore sp and lr registers
+ARM( sub r1, r1, #8 ) @ Update backup pointer (not
+ @ done in previous instruction)
+THUMB( ldr lr, [r1], #-4 )
+THUMB( ldr sp, [r1], #-4 )
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Save CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * TTBR0, TTBR1, TTBRC, DACR CP15 registers are restored by boot ROM from SRAM.
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_cp15_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack (r3 is saved due
+ @ to 8 byte aligned stack)
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrc p15, 0, r2, c12, c0, 0 @ Read Non-secure Vector Base
+ @ Address Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c13, c0, 1 @ Read Context ID Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 2 @ Read Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 3 @ Read Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible.
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 4 @ Read Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only.
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c9, c12, 0 @ Read PMNC Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 1 @ Read PMCNTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 5 @ Read PMSELR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 0 @ Read PMCCNTR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 1 @ Read PMXEVTYPER Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 0 @ Read PMUSERENR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 1 @ Read PMINTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 2 @ Read PMINTENCLR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c1, c0, 2 @ Read CPACR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_restore_cp15_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack (r3 is saved due
+ @ to 8 byte aligned stack)
+ ldr r1, [r0] @ Read backup stack pointer
+
+ DECREMENT_AND_RESTORE r1 r2 @ Write CPACR register
+ mcr p15, 0, r2, c1, c0, 2
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 2 @ Write PMINTENCLR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 1 @ Write PMINTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 0 @ Write PMUSERENR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 1 @ Write PMXEVTYPER Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 0 @ Write PMCCNTR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 5 @ Write PMSELR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 1 @ Write PMCNTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 0 @ Write PMNC Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 4 @ Write Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 3 @ Write Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 2 @ Write Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 1 @ Write Context ID Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c12, c0, 0 @ Write Non-secure Vector Base
+ @ Address Register
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+/*
+ * L1 cache clean function. Commit 'dirty' data from L1
+ * to L2 cache.
+ *
+ * r0, r1, r2, used locally
+ *
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_clean_l1_cache_all)
+
+ mov r0, #0 @ swith to cache level 0
+ @ (L1 cache)
+ mcr p15, 2, r0, c0, c0, 0 @ select current cache level
+ @ in cssr
+
+ dmb
+ mov r1, #0 @ r1 = way index
+wayLoopL1clean:
+ mov r0, #0 @ r0 = line index
+lineLoopL1clean:
+ mov r2, r1, lsl #30 @ TODO: OK to hard-code
+ @ SoC-specific L1 cache details?
+ mov r3, r0, lsl #5
+ add r2, r3
+@ add r2, r0, lsl #5
+ mcr p15, 0, r2, c7, c10, 2 @ Clean cache by set/way
+ add r0, r0, #1
+ cmp r0, #256 @ TODO: Ok with hard-coded
+ @ set/way sizes or do we have to
+ @ read them from ARM regs? Is it
+ @ set correctly in silicon?
+ bne lineLoopL1clean
+ add r1, r1, #1
+ cmp r1, #4 @ TODO: Ditto, sizes...
+ bne wayLoopL1clean
+
+ dsb
+ isb
+ mov pc, lr
+
+ENDPROC(context_clean_l1_cache_all)
+
+/*
+ * Last saves to backup RAM, cache clean and WFI
+ *
+ * r0 = address to backup_sram_storage base adress
+ * r1 = indicate whether also L2 cache should be cleaned
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_to_sram_and_wfi_internal)
+
+ stmfd sp!, {r2-r12, lr} @ save on stack.
+
+ mrc p15, 0, r2, c1, c0, 0 @ read cp15 system control
+ @ register
+ str r2, [r0, #0x00]
+ mrc p15, 0, r2, c2, c0, 0 @ read cp15 ttb0 register
+ str r2, [r0, #0x04]
+ mrc p15, 0, r2, c2, c0, 1 @ read cp15 ttb1 register
+ str r2, [r0, #0x08]
+ mrc p15, 0, r2, c2, c0, 2 @ read cp15 ttb control register
+ str r2, [r0, #0x0C]
+ mrc p15, 0, r2, c3, c0, 0 @ read domain access control
+ @ register
+ str r2, [r0, #0x10]
+
+ ldr r2, =return_here
+ str r2, [r0, #0x14] @ save program counter restore
+ @ value to backup_sram_storage
+ mrs r2, cpsr
+ str r2, [r0, #0x18] @ save cpsr to
+ @ backup_sram_storage
+ str sp, [r0, #0x1c] @ save sp to backup_sram_storage
+
+ mov r4, r1 @ Set r4 = cleanL2cache, r1
+ @ will be destroyed by
+ @ v7_clean_l1_cache_all
+
+ bl context_clean_l1_cache_all @ Commit all dirty data in L1
+ @ cache to L2 without
+ @ invalidating
+
+ dsb @ data synchronization barrier
+ isb @ instruction synchronization
+ @ barrier
+ wfi @ wait for interrupt
+
+return_here: @ both cores return here
+ @ now we are out deep sleep
+ @ with all the context lost
+ @ except pc, sp and cpsr
+
+ ldmfd sp!, {r2-r12, pc} @ restore from stack
+
diff --git a/arch/arm/mach-ux500/pm/performance.c b/arch/arm/mach-ux500/pm/performance.c
new file mode 100644
index 00000000000..04aca3cb5bd
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/performance.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Johan Rudholm <johan.rudholm@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/genhd.h>
+#include <linux/major.h>
+#include <linux/cdev.h>
+#include <linux/kernel_stat.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpu.h>
+#include <linux/pm_qos.h>
+
+#include <mach/irqs.h>
+
+#define WLAN_PROBE_DELAY 3000 /* 3 seconds */
+#define WLAN_LIMIT (3000/3) /* If we have more than 1000 irqs per second */
+
+/*
+ * MMC TODO:
+ * o Develop a more power-aware algorithm
+ * o Make the parameters visible through debugfs
+ * o Get the value of CONFIG_MMC_BLOCK_MINORS in runtime instead, since
+ * it may be altered by drivers/mmc/card/block.c
+ */
+
+/* Sample reads and writes every n ms */
+#define PERF_MMC_PROBE_DELAY 1000
+/* Read threshold, sectors/second */
+#define PERF_MMC_LIMIT_READ 10240
+/* Write threshold, sectors/second */
+#define PERF_MMC_LIMIT_WRITE 8192
+/* Nr of MMC devices */
+#define PERF_MMC_HOSTS 8
+
+/*
+ * Rescan for new MMC devices every
+ * PERF_MMC_PROBE_DELAY * PERF_MMC_RESCAN_CYCLES ms
+ */
+#define PERF_MMC_RESCAN_CYCLES 10
+
+#ifdef CONFIG_MMC_BLOCK
+static struct delayed_work work_mmc;
+#endif
+
+static struct delayed_work work_wlan_workaround;
+static struct pm_qos_request wlan_pm_qos_latency;
+static bool wlan_pm_qos_is_latency_0;
+
+static void wlan_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_SDMMC1, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > WLAN_LIMIT) {
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "wlan", 125);
+ if (!wlan_pm_qos_is_latency_0) {
+ /*
+ * The wake up latency is set to 0 to prevent
+ * the system from going to sleep. This improves
+ * the wlan throughput in DMA mode.
+ * The wake up latency from sleep adds ~5% overhead
+ * for TX in some cases.
+ * This change doesn't increase performance for wlan
+ * PIO since the CPU usage prevents sleep in this mode.
+ */
+ pm_qos_add_request(&wlan_pm_qos_latency,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+ wlan_pm_qos_is_latency_0 = true;
+ }
+ } else {
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "wlan", 25);
+ if (wlan_pm_qos_is_latency_0) {
+ pm_qos_remove_request(&wlan_pm_qos_latency);
+ wlan_pm_qos_is_latency_0 = false;
+ }
+ }
+
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &work_wlan_workaround,
+ msecs_to_jiffies(WLAN_PROBE_DELAY));
+}
+
+#ifdef CONFIG_MMC_BLOCK
+/*
+ * Loop through every CONFIG_MMC_BLOCK_MINORS'th minor device for
+ * MMC_BLOCK_MAJOR, get the struct gendisk for each device. Returns
+ * nr of found disks. Populate mmc_disks.
+ */
+static int scan_mmc_devices(struct gendisk *mmc_disks[])
+{
+ dev_t devnr;
+ int i, j = 0, part;
+ struct gendisk *mmc_devices[256 / CONFIG_MMC_BLOCK_MINORS];
+
+ memset(&mmc_devices, 0, sizeof(mmc_devices));
+
+ for (i = 0; i * CONFIG_MMC_BLOCK_MINORS < 256; i++) {
+ devnr = MKDEV(MMC_BLOCK_MAJOR, i * CONFIG_MMC_BLOCK_MINORS);
+ mmc_devices[i] = get_gendisk(devnr, &part);
+
+ /* Invalid capacity of device, do not add to list */
+ if (!mmc_devices[i] || !get_capacity(mmc_devices[i]))
+ continue;
+
+ mmc_disks[j] = mmc_devices[i];
+ j++;
+
+ if (j == PERF_MMC_HOSTS)
+ break;
+ }
+
+ return j;
+}
+
+/*
+ * Sample sectors read and written to any MMC devices, update PRCMU
+ * qos requirement
+ */
+static void mmc_load(struct work_struct *work)
+{
+ static unsigned long long old_sectors_read[PERF_MMC_HOSTS];
+ static unsigned long long old_sectors_written[PERF_MMC_HOSTS];
+ static struct gendisk *mmc_disks[PERF_MMC_HOSTS];
+ static int cycle, nrdisk;
+ static bool old_mode;
+ unsigned long long sectors;
+ bool new_mode = false;
+ int i;
+
+ if (!cycle) {
+ memset(&mmc_disks, 0, sizeof(mmc_disks));
+ nrdisk = scan_mmc_devices(mmc_disks);
+ cycle = PERF_MMC_RESCAN_CYCLES;
+ }
+ cycle--;
+
+ for (i = 0; i < nrdisk; i++) {
+ sectors = part_stat_read(&(mmc_disks[i]->part0),
+ sectors[READ]);
+
+ if (old_sectors_read[i] &&
+ sectors > old_sectors_read[i] &&
+ (sectors - old_sectors_read[i]) >
+ PERF_MMC_LIMIT_READ)
+ new_mode = true;
+
+ old_sectors_read[i] = sectors;
+ sectors = part_stat_read(&(mmc_disks[i]->part0),
+ sectors[WRITE]);
+
+ if (old_sectors_written[i] &&
+ sectors > old_sectors_written[i] &&
+ (sectors - old_sectors_written[i]) >
+ PERF_MMC_LIMIT_WRITE)
+ new_mode = true;
+
+ old_sectors_written[i] = sectors;
+ }
+
+ if (!old_mode && new_mode)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "mmc", 125);
+
+ if (old_mode && !new_mode)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "mmc", 25);
+
+ old_mode = new_mode;
+
+ schedule_delayed_work(&work_mmc,
+ msecs_to_jiffies(PERF_MMC_PROBE_DELAY));
+
+}
+#endif /* CONFIG_MMC_BLOCK */
+
+static int __init performance_register(void)
+{
+ int ret;
+
+#ifdef CONFIG_MMC_BLOCK
+ ret = prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "mmc", 25);
+ if (ret) {
+ pr_err("%s: Failed to add PRCMU req for mmc\n", __func__);
+ goto out;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_mmc, mmc_load);
+
+ schedule_delayed_work(&work_mmc,
+ msecs_to_jiffies(PERF_MMC_PROBE_DELAY));
+#endif
+
+ ret = prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "wlan", 25);
+ if (ret) {
+ pr_err("%s: Failed to add PRCMU req for wlan\n", __func__);
+ goto out;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_wlan_workaround,
+ wlan_load);
+
+ schedule_delayed_work_on(0, &work_wlan_workaround,
+ msecs_to_jiffies(WLAN_PROBE_DELAY));
+out:
+ return ret;
+}
+late_initcall(performance_register);
diff --git a/arch/arm/mach-ux500/pm/pm.c b/arch/arm/mach-ux500/pm/pm.c
new file mode 100644
index 00000000000..691642e4200
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/pm.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/processor.h>
+
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+#define STABILIZATION_TIME 30 /* us */
+#define GIC_FREEZE_DELAY 1 /* us */
+
+#define PRCM_ARM_WFI_STANDBY_CPU0_WFI 0x8
+#define PRCM_ARM_WFI_STANDBY_CPU1_WFI 0x10
+
+/* Dual A9 core interrupt management unit registers */
+#define PRCM_A9_MASK_REQ 0x328
+#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1
+#define PRCM_A9_MASK_ACK 0x32c
+
+#define PRCM_ARMITMSK31TO0 0x11c
+#define PRCM_ARMITMSK63TO32 0x120
+#define PRCM_ARMITMSK95TO64 0x124
+#define PRCM_ARMITMSK127TO96 0x128
+#define PRCM_POWER_STATE_VAL 0x25C
+#define PRCM_ARMITVAL31TO0 0x260
+#define PRCM_ARMITVAL63TO32 0x264
+#define PRCM_ARMITVAL95TO64 0x268
+#define PRCM_ARMITVAL127TO96 0x26C
+
+/* ARM WFI Standby signal register */
+#define PRCM_ARM_WFI_STANDBY 0x130
+
+/* IO force */
+#define PRCM_IOCR 0x310
+#define PRCM_IOCR_IOFORCE 0x1
+
+#ifdef CONFIG_UX500_SUSPEND
+int ux500_console_uart_gpio_pin = CONFIG_UX500_CONSOLE_UART_GPIO_PIN;
+#endif
+
+static u32 u8500_gpio_banks[] = {U8500_GPIOBANK0_BASE,
+ U8500_GPIOBANK1_BASE,
+ U8500_GPIOBANK2_BASE,
+ U8500_GPIOBANK3_BASE,
+ U8500_GPIOBANK4_BASE,
+ U8500_GPIOBANK5_BASE,
+ U8500_GPIOBANK6_BASE,
+ U8500_GPIOBANK7_BASE,
+ U8500_GPIOBANK8_BASE};
+
+static u32 u5500_gpio_banks[] = {U5500_GPIOBANK0_BASE,
+ U5500_GPIOBANK1_BASE,
+ U5500_GPIOBANK2_BASE,
+ U5500_GPIOBANK3_BASE,
+ U5500_GPIOBANK4_BASE,
+ U5500_GPIOBANK5_BASE,
+ U5500_GPIOBANK6_BASE,
+ U5500_GPIOBANK7_BASE};
+
+static u32 ux500_gpio_wks[ARRAY_SIZE(u8500_gpio_banks)];
+
+inline int ux500_pm_arm_on_ext_clk(bool leave_arm_pll_on)
+{
+ return 0;
+}
+
+/* Decouple GIC from the interrupt bus */
+void ux500_pm_gic_decouple(void)
+{
+ prcmu_write_masked(PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ);
+
+ while (!prcmu_read(PRCM_A9_MASK_REQ))
+ cpu_relax();
+
+ /* TODO: Use the ack bit when possible */
+ udelay(GIC_FREEZE_DELAY); /* Wait for the GIC to freeze */
+}
+
+/* Recouple GIC with the interrupt bus */
+void ux500_pm_gic_recouple(void)
+{
+ prcmu_write_masked(PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
+ 0);
+
+ /* TODO: Use the ack bit when possible */
+}
+
+#define GIC_NUMBER_REGS 5
+bool ux500_pm_gic_pending_interrupt(void)
+{
+ u32 pr; /* Pending register */
+ u32 er; /* Enable register */
+ int i;
+
+ /* 5 registers. STI & PPI not skipped */
+ for (i = 0; i < GIC_NUMBER_REGS; i++) {
+
+ pr = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_PENDING_SET + i * 4);
+ er = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + i * 4);
+
+ if (pr & er)
+ return true; /* There is a pending interrupt */
+ }
+ return false;
+}
+
+#define GIC_NUMBER_SPI_REGS 4
+bool ux500_pm_prcmu_pending_interrupt(void)
+{
+ u32 it;
+ u32 im;
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* There are 4 registers */
+
+ it = prcmu_read(PRCM_ARMITVAL31TO0 + i * 4);
+ im = prcmu_read(PRCM_ARMITMSK31TO0 + i * 4);
+
+ if (it & im)
+ return true; /* There is a pending interrupt */
+ }
+
+ return false;
+}
+
+void ux500_pm_prcmu_set_ioforce(bool enable)
+{
+ if (enable)
+ prcmu_write_masked(PRCM_IOCR,
+ PRCM_IOCR_IOFORCE,
+ PRCM_IOCR_IOFORCE);
+ else
+ prcmu_write_masked(PRCM_IOCR,
+ PRCM_IOCR_IOFORCE,
+ 0);
+}
+
+void ux500_pm_prcmu_copy_gic_settings(void)
+{
+ u32 er; /* Enable register */
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* 4*32 SPI interrupts */
+ /* +1 due to skip STI and PPI */
+ er = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + (i + 1) * 4);
+ prcmu_write(PRCM_ARMITMSK31TO0 + i * 4, er);
+ }
+}
+
+void ux500_pm_gpio_save_wake_up_status(void)
+{
+ int num_banks;
+ u32 *banks;
+ int i;
+
+ if (cpu_is_u5500()) {
+ num_banks = ARRAY_SIZE(u5500_gpio_banks);
+ banks = u5500_gpio_banks;
+ } else {
+ num_banks = ARRAY_SIZE(u8500_gpio_banks);
+ banks = u8500_gpio_banks;
+ }
+
+ nmk_gpio_clocks_enable();
+
+ for (i = 0; i < num_banks; i++)
+ ux500_gpio_wks[i] = readl(__io_address(banks[i]) + NMK_GPIO_WKS);
+
+ nmk_gpio_clocks_disable();
+}
+
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_num)
+{
+ if (WARN_ON(cpu_is_u5500() && bank_num >=
+ ARRAY_SIZE(u5500_gpio_banks)))
+ return 0;
+
+ if (WARN_ON(cpu_is_u8500() && bank_num >=
+ ARRAY_SIZE(u8500_gpio_banks)))
+ return 0;
+
+ return ux500_gpio_wks[bank_num];
+}
+
+/* Check if the other CPU is in WFI */
+bool ux500_pm_other_cpu_wfi(void)
+{
+ if (smp_processor_id()) {
+ /* We are CPU 1 => check if CPU0 is in WFI */
+ if (prcmu_read(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU0_WFI)
+ return true;
+ } else {
+ /* We are CPU 0 => check if CPU1 is in WFI */
+ if (prcmu_read(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU1_WFI)
+ return true;
+ }
+
+ return false;
+}
diff --git a/arch/arm/mach-ux500/pm/prcmu-qos-power.c b/arch/arm/mach-ux500/pm/prcmu-qos-power.c
new file mode 100644
index 00000000000..a600a57dc13
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/prcmu-qos-power.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Martin Persson
+ * Per Fransson <per.xx.fransson@stericsson.com>
+ *
+ * Quality of Service for the U8500 PRCM Unit interface driver
+ *
+ * Strongly influenced by kernel/pm_qos_params.c.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/cpufreq.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpufreq-dbx500.h>
+
+#include <mach/prcmu-debug.h>
+
+#define ARM_THRESHOLD_FREQ (400000)
+
+static int qos_delayed_cpufreq_notifier(struct notifier_block *,
+ unsigned long, void *);
+
+static s32 cpufreq_requirement_queued;
+static s32 cpufreq_requirement_set;
+
+/*
+ * locking rule: all changes to requirements or prcmu_qos_object list
+ * and prcmu_qos_objects need to happen with prcmu_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+
+struct prcmu_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice prcmu_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 force_value;
+ atomic_t target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct prcmu_qos_object null_qos;
+static BLOCKING_NOTIFIER_HEAD(prcmu_ape_opp_notifier);
+static BLOCKING_NOTIFIER_HEAD(prcmu_ddr_opp_notifier);
+
+static struct prcmu_qos_object ape_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(ape_opp_qos.requirements.list)
+ },
+ .notifiers = &prcmu_ape_opp_notifier,
+ .name = "ape_opp",
+ /* Target value in % APE OPP */
+ .default_value = 50,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(50),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object ddr_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(ddr_opp_qos.requirements.list)
+ },
+ .notifiers = &prcmu_ddr_opp_notifier,
+ .name = "ddr_opp",
+ /* Target value in % DDR OPP */
+ .default_value = 25,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(25),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object arm_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(arm_opp_qos.requirements.list)
+ },
+ /*
+ * No notifier on ARM opp qos request, since this won't actually
+ * do anything, except changing limits for cpufreq
+ */
+ .name = "arm_opp",
+ /* Target value in % ARM OPP, note can be 125% */
+ .default_value = 25,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(25),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object *prcmu_qos_array[] = {
+ &null_qos,
+ &ape_opp_qos,
+ &ddr_opp_qos,
+ &arm_opp_qos,
+};
+
+static DEFINE_MUTEX(prcmu_qos_mutex);
+static DEFINE_SPINLOCK(prcmu_qos_lock);
+
+static bool ape_opp_forced_to_50_partly_25;
+
+static unsigned long cpufreq_opp_delay = HZ / 5;
+
+unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
+{
+ return cpufreq_opp_delay;
+}
+
+static struct notifier_block qos_delayed_cpufreq_notifier_block = {
+ .notifier_call = qos_delayed_cpufreq_notifier,
+};
+
+void prcmu_qos_set_cpufreq_opp_delay(unsigned long n)
+{
+ if (n == 0) {
+ cpufreq_unregister_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+ cpufreq_requirement_queued = PRCMU_QOS_DEFAULT_VALUE;
+ } else if (cpufreq_opp_delay != 0) {
+ cpufreq_register_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ cpufreq_opp_delay = n;
+}
+#ifdef CONFIG_CPU_FREQ
+static void update_cpu_limits(s32 extreme_value)
+{
+ int cpu;
+ struct cpufreq_policy policy;
+ int ret;
+ int min_freq, max_freq;
+
+ for_each_online_cpu(cpu) {
+ ret = cpufreq_get_policy(&policy, cpu);
+ if (ret) {
+ pr_err("prcmu qos: get cpufreq policy failed (cpu%d)\n",
+ cpu);
+ continue;
+ }
+
+ ret = dbx500_cpufreq_get_limits(cpu, extreme_value,
+ &min_freq, &max_freq);
+ if (ret)
+ continue;
+ /*
+ * cpufreq fw does not allow frequency change if
+ * "current min freq" > "new max freq" or
+ * "current max freq" < "new min freq".
+ * Thus the intermediate steps below.
+ */
+ if (policy.min > max_freq) {
+ ret = cpufreq_update_freq(cpu, min_freq, policy.max);
+ if (ret)
+ pr_err("prcmu qos: update min cpufreq failed (1)\n");
+ }
+ if (policy.max < min_freq) {
+ ret = cpufreq_update_freq(cpu, policy.min, max_freq);
+ if (ret)
+ pr_err("prcmu qos: update max cpufreq failed (2)\n");
+ }
+
+ ret = cpufreq_update_freq(cpu, min_freq, max_freq);
+ if (ret)
+ pr_err("prcmu qos: update max cpufreq failed (3)\n");
+ }
+
+}
+#else
+static inline void update_cpu_limits(s32 extreme_value) { }
+#endif
+/* static helper function */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ bool update = false;
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ extreme_value = prcmu_qos_array[target]->default_value;
+
+ if (prcmu_qos_array[target]->force_value != 0) {
+ extreme_value = prcmu_qos_array[target]->force_value;
+ update = true;
+ } else {
+ list_for_each_entry(node,
+ &prcmu_qos_array[target]->requirements.list,
+ list) {
+ extreme_value = prcmu_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (atomic_read(&prcmu_qos_array[target]->target_value)
+ != extreme_value) {
+ update = true;
+ atomic_set(&prcmu_qos_array[target]->target_value,
+ extreme_value);
+ pr_debug("prcmu qos: new target for qos %d is %d\n",
+ target, atomic_read(
+ &prcmu_qos_array[target]->target_value
+ ));
+ }
+ }
+
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+
+ if (!update)
+ goto unlock_and_return;
+
+ if (prcmu_qos_array[target]->notifiers)
+ blocking_notifier_call_chain(prcmu_qos_array[target]->notifiers,
+ (unsigned long)extreme_value,
+ NULL);
+ switch (target) {
+ case PRCMU_QOS_DDR_OPP:
+ switch (extreme_value) {
+ case 50:
+ op = DDR_50_OPP;
+ pr_debug("prcmu qos: set ddr opp to 50%%\n");
+ break;
+ case 100:
+ op = DDR_100_OPP;
+ pr_debug("prcmu qos: set ddr opp to 100%%\n");
+ break;
+ case 25:
+ /* 25% DDR OPP is not supported on 5500 */
+ if (!cpu_is_u5500()) {
+ op = DDR_25_OPP;
+ pr_debug("prcmu qos: set ddr opp to 25%%\n");
+ break;
+ }
+ default:
+ pr_err("prcmu qos: Incorrect ddr target value (%d)",
+ extreme_value);
+ goto unlock_and_return;
+ }
+ prcmu_set_ddr_opp(op);
+ prcmu_debug_ddr_opp_log(op);
+ break;
+ case PRCMU_QOS_APE_OPP:
+ switch (extreme_value) {
+ case 50:
+ op = APE_50_OPP;
+ pr_debug("prcmu qos: set ape opp to 50%%\n");
+ break;
+ case 100:
+ op = APE_100_OPP;
+ pr_debug("prcmu qos: set ape opp to 100%%\n");
+ break;
+ default:
+ pr_err("prcmu qos: Incorrect ape target value (%d)",
+ extreme_value);
+ goto unlock_and_return;
+ }
+
+ if (!ape_opp_forced_to_50_partly_25)
+ (void)prcmu_set_ape_opp(op);
+ prcmu_debug_ape_opp_log(op);
+ break;
+ case PRCMU_QOS_ARM_OPP:
+ {
+ mutex_unlock(&prcmu_qos_mutex);
+ /*
+ * We can't hold the mutex since changing cpufreq
+ * will trigger an prcmu fw callback.
+ */
+ update_cpu_limits(extreme_value);
+ /* Return since the lock is unlocked */
+ return;
+
+ break;
+ }
+ default:
+ pr_err("prcmu qos: Incorrect target\n");
+ break;
+ }
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
+void prcmu_qos_force_opp(int prcmu_qos_class, s32 i)
+{
+ prcmu_qos_array[prcmu_qos_class]->force_value = i;
+ update_target(prcmu_qos_class);
+}
+
+void prcmu_qos_voice_call_override(bool enable)
+{
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ ape_opp_forced_to_50_partly_25 = enable;
+
+ if (enable) {
+ (void)prcmu_set_ape_opp(APE_50_PARTLY_25_OPP);
+ goto unlock_and_return;
+ }
+
+ /* Disable: set the OPP according to the current target value. */
+ switch (atomic_read(
+ &prcmu_qos_array[PRCMU_QOS_APE_OPP]->target_value)) {
+ case 50:
+ op = APE_50_OPP;
+ break;
+ case 100:
+ op = APE_100_OPP;
+ break;
+ default:
+ goto unlock_and_return;
+ }
+
+ (void)prcmu_set_ape_opp(op);
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
+/**
+ * prcmu_qos_requirement - returns current prcmu qos expectation
+ * @prcmu_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int prcmu_qos_requirement(int prcmu_qos_class)
+{
+ return atomic_read(&prcmu_qos_array[prcmu_qos_class]->target_value);
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_requirement);
+
+/**
+ * prcmu_qos_add_requirement - inserts new qos request into the list
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the prcmu_qos_class list of requested
+ * qos performance characteristics. It recomputes the aggregate QoS
+ * expectations for the prcmu_qos_class of parameters.
+ */
+int prcmu_qos_add_requirement(int prcmu_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep == NULL)
+ return -ENOMEM;
+
+ if (value == PRCMU_QOS_DEFAULT_VALUE)
+ dep->value = prcmu_qos_array[prcmu_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_add(&dep->list,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ update_target(prcmu_qos_class);
+
+ return 0;
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_add_requirement);
+
+/**
+ * prcmu_qos_update_requirement - modifies an existing qos request
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requirement for the prcmu_qos_class of parameters
+ * along with updating the target prcmu_qos_class value.
+ *
+ * If the named request isn't in the list then no change is made.
+ */
+int prcmu_qos_update_requirement(int prcmu_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_for_each_entry(node,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PRCMU_QOS_DEFAULT_VALUE)
+ node->value =
+ prcmu_qos_array[prcmu_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ if (pending_update)
+ update_target(prcmu_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_update_requirement);
+
+/**
+ * prcmu_qos_remove_requirement - modifies an existing qos request
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from prcmu_qos_class list of parameters and
+ * recompute the current target value for the prcmu_qos_class.
+ */
+void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_for_each_entry(node,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ if (pending_update)
+ update_target(prcmu_qos_class);
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_remove_requirement);
+
+/**
+ * prcmu_qos_add_notifier - sets notification entry for changes to target value
+ * @prcmu_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * upon changes to the prcmu_qos_class target value.
+ */
+int prcmu_qos_add_notifier(int prcmu_qos_class, struct notifier_block *notifier)
+{
+ int retval = -EINVAL;
+
+ if (prcmu_qos_array[prcmu_qos_class]->notifiers)
+ retval = blocking_notifier_chain_register(
+ prcmu_qos_array[prcmu_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_add_notifier);
+
+/**
+ * prcmu_qos_remove_notifier - deletes notification entry from chain.
+ * @prcmu_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * upon changes to the prcmu_qos_class target value.
+ */
+int prcmu_qos_remove_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier)
+{
+ int retval = -EINVAL;
+ if (prcmu_qos_array[prcmu_qos_class]->notifiers)
+ retval = blocking_notifier_chain_unregister(
+ prcmu_qos_array[prcmu_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_remove_notifier);
+
+#define USER_QOS_NAME_LEN 32
+
+static int prcmu_qos_power_open(struct inode *inode, struct file *filp,
+ long prcmu_qos_class)
+{
+ int ret;
+ char name[USER_QOS_NAME_LEN];
+
+ filp->private_data = (void *)prcmu_qos_class;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ ret = prcmu_qos_add_requirement(prcmu_qos_class, name,
+ PRCMU_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+
+ return -EPERM;
+}
+
+
+static int prcmu_qos_ape_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_APE_OPP);
+}
+
+static int prcmu_qos_ddr_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_DDR_OPP);
+}
+
+static int prcmu_qos_arm_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_ARM_OPP);
+}
+
+static int prcmu_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int prcmu_qos_class;
+ char name[USER_QOS_NAME_LEN];
+
+ prcmu_qos_class = (long)filp->private_data;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ prcmu_qos_remove_requirement(prcmu_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t prcmu_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int prcmu_qos_class;
+ char name[USER_QOS_NAME_LEN];
+
+ prcmu_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ prcmu_qos_update_requirement(prcmu_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+/* Functions to provide QoS to user space */
+static const struct file_operations prcmu_qos_ape_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_ape_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+/* Functions to provide QoS to user space */
+static const struct file_operations prcmu_qos_ddr_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_ddr_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+static const struct file_operations prcmu_qos_arm_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_arm_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+static int register_prcmu_qos_misc(struct prcmu_qos_object *qos,
+ const struct file_operations *fops)
+{
+ qos->prcmu_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->prcmu_qos_power_miscdev.name = qos->name;
+ qos->prcmu_qos_power_miscdev.fops = fops;
+
+ return misc_register(&qos->prcmu_qos_power_miscdev);
+}
+
+static void qos_delayed_work_up_fn(struct work_struct *work)
+{
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq", 100);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq", 100);
+ cpufreq_requirement_set = 100;
+}
+
+static void qos_delayed_work_down_fn(struct work_struct *work)
+{
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+}
+
+static DECLARE_DELAYED_WORK(qos_delayed_work_up, qos_delayed_work_up_fn);
+static DECLARE_DELAYED_WORK(qos_delayed_work_down, qos_delayed_work_down_fn);
+
+static int qos_delayed_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ s32 new_ddr_target;
+
+ /* Only react once per transition and only for one core, e.g. core 0 */
+ if (event != CPUFREQ_POSTCHANGE || freq->cpu != 0)
+ return 0;
+
+ /*
+ * APE and DDR OPP are always handled together in this solution.
+ * Hence no need to check both DDR and APE opp in the code below.
+ */
+
+ /* Which DDR OPP are we aiming for? */
+ if (freq->new > ARM_THRESHOLD_FREQ)
+ new_ddr_target = 100;
+ else
+ new_ddr_target = PRCMU_QOS_DEFAULT_VALUE;
+
+ if (new_ddr_target == cpufreq_requirement_queued) {
+ /*
+ * We're already at, or going to, the target requirement.
+ * This is only a fluctuation within the interval
+ * corresponding to the same DDR requirement.
+ */
+ return 0;
+ }
+ cpufreq_requirement_queued = new_ddr_target;
+
+ if (freq->new > ARM_THRESHOLD_FREQ) {
+ cancel_delayed_work_sync(&qos_delayed_work_down);
+ /*
+ * Only schedule this requirement if it is not the current
+ * one.
+ */
+ if (new_ddr_target != cpufreq_requirement_set)
+ schedule_delayed_work(&qos_delayed_work_up,
+ cpufreq_opp_delay);
+ } else {
+ cancel_delayed_work_sync(&qos_delayed_work_up);
+ /*
+ * Only schedule this requirement if it is not the current
+ * one.
+ */
+ if (new_ddr_target != cpufreq_requirement_set)
+ schedule_delayed_work(&qos_delayed_work_down,
+ cpufreq_opp_delay);
+ }
+
+ return 0;
+}
+
+static int __init prcmu_qos_power_init(void)
+{
+ int ret;
+
+ /* 25% DDR OPP is not supported on u5500 */
+ if (cpu_is_u5500()) {
+ ddr_opp_qos.default_value = 50;
+ atomic_set(&ddr_opp_qos.target_value, 50);
+ }
+
+ ret = register_prcmu_qos_misc(&ape_opp_qos, &prcmu_qos_ape_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu ape qos: setup failed\n");
+ return ret;
+ }
+
+ ret = register_prcmu_qos_misc(&ddr_opp_qos, &prcmu_qos_ddr_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu ddr qos: setup failed\n");
+ return ret;
+ }
+
+ ret = register_prcmu_qos_misc(&arm_opp_qos, &prcmu_qos_arm_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu arm qos: setup failed\n");
+ return ret;
+ }
+
+ prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+ cpufreq_requirement_queued = PRCMU_QOS_DEFAULT_VALUE;
+
+ cpufreq_register_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return ret;
+}
+
+late_initcall(prcmu_qos_power_init);
diff --git a/arch/arm/mach-ux500/pm/runtime.c b/arch/arm/mach-ux500/pm/runtime.c
new file mode 100644
index 00000000000..710bd8aae3d
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/runtime.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson> for ST-Ericsson
+ *
+ * Based on:
+ * Runtime PM support code for SuperH Mobile ARM
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <plat/pincfg.h>
+
+#include "../pins.h"
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_ENABLED 2
+
+struct pm_runtime_data {
+ unsigned long flags;
+ struct ux500_regulator *regulator;
+ struct ux500_pins *pins;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+ struct pm_runtime_data *prd = res;
+
+ dev_dbg(dev, "__devres_release()\n");
+
+ if (test_bit(BIT_ENABLED, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+ }
+
+ if (test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_put(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_put(prd->regulator);
+ }
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+ return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ prd->pins = ux500_pins_get(dev_name(dev));
+
+ prd->regulator = ux500_regulator_get(dev);
+ if (IS_ERR(prd->regulator))
+ prd->regulator = NULL;
+
+ if (prd->pins || prd->regulator) {
+ dev_info(dev, "managed by runtime pm: %s%s\n",
+ prd->pins ? "pins " : "",
+ prd->regulator ? "regulator " : "");
+
+ set_bit(BIT_ACTIVE, &prd->flags);
+ }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+ dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+static void platform_pm_runtime_used(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd)
+ set_bit(BIT_ONCE, &prd->flags);
+}
+
+static int ux500_pd_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static void ux500_pd_disable(struct pm_runtime_data *prd)
+{
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+
+ clear_bit(BIT_ENABLED, &prd->flags);
+ }
+}
+
+static int ux500_pd_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_bug(dev, prd);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ux500_pd_disable(prd);
+
+ return 0;
+}
+
+static void ux500_pd_enable(struct pm_runtime_data *prd)
+{
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_enable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_enable(prd->regulator);
+
+ set_bit(BIT_ENABLED, &prd->flags);
+ }
+}
+
+static int ux500_pd_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_used(dev, prd);
+ ux500_pd_enable(prd);
+
+ return pm_generic_runtime_resume(dev);
+}
+
+static int ux500_pd_suspend_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /* Already is runtime suspended? Nothing to do. */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended for some
+ * reason. We still need to do the power save stuff when going into
+ * suspend, so force it here.
+ */
+ return ux500_pd_runtime_suspend(dev);
+}
+
+static int ux500_pd_resume_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /*
+ * Already was runtime suspended? No need to resume here, runtime
+ * resume will take care of it.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended,
+ * but we forced it down in suspend_noirq above. Bring it
+ * up since pm-runtime thinks it is not suspended.
+ */
+ return ux500_pd_runtime_resume(dev);
+}
+#ifdef CONFIG_UX500_SUSPEND
+static int ux500_pd_amba_suspend_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret = 0;
+ bool is_suspended = pm_runtime_status_suspended(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->suspend_noirq;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_suspend_noirq(dev);
+
+ if (!ret && !is_suspended)
+ ux500_pd_disable(prd);
+
+ return ret;
+}
+
+static int ux500_pd_amba_resume_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret = 0;
+ bool is_suspended = pm_runtime_status_suspended(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->resume_noirq;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_resume_noirq(dev);
+
+ if (!ret && !is_suspended)
+ ux500_pd_enable(prd);
+
+ return ret;
+}
+#else
+static int ux500_pd_amba_suspend_noirq(struct device *dev)
+{
+ return 0;
+}
+static int ux500_pd_amba_resume_noirq(struct device *dev)
+{
+ return 0;
+}
+#endif
+static int ux500_pd_amba_runtime_suspend(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do this first, to make sure pins is not in undefined state after
+ * drivers has run their runtime suspend. This also means that drivers
+ * are not able to use their pins/regulators during runtime suspend.
+ */
+ ux500_pd_disable(prd);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_suspend;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_runtime_suspend(dev);
+
+ if (ret)
+ ux500_pd_enable(prd);
+
+ return ret;
+}
+
+static int ux500_pd_amba_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_resume;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_runtime_resume(dev);
+
+ /*
+ * Restore pins/regulator after drivers has runtime resumed, due
+ * to that we must not have pins in undefined state. This also means
+ * that drivers are not able to use their pins/regulators during
+ * runtime resume.
+ */
+ if (!ret)
+ ux500_pd_enable(prd);
+
+ return ret;
+}
+
+static int ux500_pd_amba_runtime_idle(struct device *dev)
+{
+ int (*callback)(struct device *) = NULL;
+ int ret;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus runtime functions by calling generic runtime
+ * directly. A future fix could be to implement a
+ * "pm_bus_generic_runtime_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_idle;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_runtime_idle(dev);
+
+ return ret;
+}
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action,
+ void *data,
+ bool enable)
+{
+ struct device *dev = data;
+ struct pm_runtime_data *prd;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+ if (prd) {
+ devres_add(dev, prd);
+ platform_pm_runtime_init(dev, prd);
+ if (enable)
+ ux500_pd_enable(prd);
+ } else
+ dev_err(dev, "unable to alloc memory for runtime pm\n");
+ }
+
+ return 0;
+}
+
+static int ux500_pd_plat_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return ux500_pd_bus_notify(nb, action, data, false);
+}
+
+static int ux500_pd_amba_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return ux500_pd_bus_notify(nb, action, data, true);
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+#define ux500_pd_suspend_noirq NULL
+#define ux500_pd_resume_noirq NULL
+#define ux500_pd_runtime_idle NULL
+#define ux500_pd_runtime_suspend NULL
+#define ux500_pd_runtime_resume NULL
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ux500_regulator *regulator = NULL;
+ struct ux500_pins *pins = NULL;
+ struct device *dev = data;
+ const char *onoff = NULL;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_enable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_enable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "on";
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_disable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_disable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "off";
+ break;
+ }
+
+ if (pins || regulator) {
+ dev_info(dev, "runtime pm disabled, forced %s: %s%s\n",
+ onoff,
+ pins ? "pins " : "",
+ regulator ? "regulator " : "");
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+struct dev_pm_domain ux500_amba_dev_power_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(ux500_pd_amba_runtime_suspend,
+ ux500_pd_amba_runtime_resume,
+ ux500_pd_amba_runtime_idle)
+ USE_PLATFORM_PM_SLEEP_OPS
+ .suspend_noirq = ux500_pd_amba_suspend_noirq,
+ .resume_noirq = ux500_pd_amba_resume_noirq,
+ },
+};
+
+struct dev_pm_domain ux500_dev_power_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(ux500_pd_runtime_suspend,
+ ux500_pd_runtime_resume,
+ ux500_pd_runtime_idle)
+ USE_PLATFORM_PM_SLEEP_OPS
+ .suspend_noirq = ux500_pd_suspend_noirq,
+ .resume_noirq = ux500_pd_resume_noirq,
+ },
+};
+
+static struct notifier_block ux500_pd_platform_notifier = {
+ .notifier_call = ux500_pd_plat_bus_notify,
+};
+
+static struct notifier_block ux500_pd_amba_notifier = {
+ .notifier_call = ux500_pd_amba_bus_notify,
+};
+
+static int __init ux500_pm_runtime_platform_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &ux500_pd_platform_notifier);
+ return 0;
+}
+core_initcall(ux500_pm_runtime_platform_init);
+
+/*
+ * The amba bus itself gets registered in a core_initcall, so we can't use
+ * that.
+ */
+static int __init ux500_pm_runtime_amba_init(void)
+{
+ bus_register_notifier(&amba_bustype, &ux500_pd_amba_notifier);
+ return 0;
+}
+arch_initcall(ux500_pm_runtime_amba_init);
diff --git a/arch/arm/mach-ux500/pm/scu.h b/arch/arm/mach-ux500/pm/scu.h
new file mode 100644
index 00000000000..a09e86a9d3c
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/scu.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASMARM_ARCH_SCU_H
+#define __ASMARM_ARCH_SCU_H
+
+#include <mach/hardware.h>
+
+#define SCU_BASE U8500_SCU_BASE
+/*
+ * * SCU registers
+ * */
+#define SCU_CTRL 0x00
+#define SCU_CONFIG 0x04
+#define SCU_CPU_STATUS 0x08
+#define SCU_INVALIDATE 0x0c
+#define SCU_FPGA_REVISION 0x10
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/suspend.c b/arch/arm/mach-ux500/pm/suspend.c
new file mode 100644
index 00000000000..c5cf6260fbd
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ */
+
+#include <linux/suspend.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500-debug.h>
+#include <linux/regulator/dbx500-prcmu.h>
+
+#include <mach/context.h>
+#include <mach/pm.h>
+#include <mach/id.h>
+
+#include "suspend_dbg.h"
+
+static void (*pins_suspend_force)(void);
+static void (*pins_suspend_force_mux)(void);
+
+static suspend_state_t suspend_state = PM_SUSPEND_ON;
+
+void suspend_set_pins_force_fn(void (*force)(void), void (*force_mux)(void))
+{
+ pins_suspend_force = force;
+ pins_suspend_force_mux = force_mux;
+}
+
+static atomic_t block_sleep = ATOMIC_INIT(0);
+
+void suspend_block_sleep(void)
+{
+ atomic_inc(&block_sleep);
+}
+
+void suspend_unblock_sleep(void)
+{
+ atomic_dec(&block_sleep);
+}
+
+static bool sleep_is_blocked(void)
+{
+ return (atomic_read(&block_sleep) != 0);
+}
+
+static int suspend(bool do_deepsleep)
+{
+ bool pins_force = pins_suspend_force_mux && pins_suspend_force;
+ int ret = 0;
+
+ if (sleep_is_blocked()) {
+ pr_info("suspend/resume: interrupted by modem.\n");
+ return -EBUSY;
+ }
+
+ nmk_gpio_clocks_enable();
+
+ ux500_suspend_dbg_add_wake_on_uart();
+
+ nmk_gpio_wakeups_suspend();
+
+ /* configure the prcm for a sleep wakeup */
+ if (cpu_is_u9500())
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ABB) | PRCMU_WAKEUP(HSI0));
+ else
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ABB));
+
+ context_vape_save();
+
+ if (pins_force) {
+ /*
+ * Save GPIO settings before applying power save
+ * settings
+ */
+ context_gpio_save();
+
+ /* Apply GPIO power save mux settings */
+ context_gpio_mux_safe_switch(true);
+ pins_suspend_force_mux();
+ context_gpio_mux_safe_switch(false);
+
+ /* Apply GPIO power save settings */
+ pins_suspend_force();
+ }
+
+ ux500_pm_gic_decouple();
+
+ if (ux500_pm_gic_pending_interrupt()) {
+ pr_info("suspend/resume: pending interrupt\n");
+
+ /* Recouple GIC with the interrupt bus */
+ ux500_pm_gic_recouple();
+ ret = -EBUSY;
+
+ goto exit;
+ }
+ ux500_pm_prcmu_set_ioforce(true);
+
+ if (do_deepsleep) {
+ context_varm_save_common();
+ context_varm_save_core();
+ context_gic_dist_disable_unneeded_irqs();
+ context_save_cpu_registers();
+
+ /*
+ * Due to we have only 100us between requesting a powerstate
+ * and wfi, we clean the cache before as well to assure the
+ * final cache clean before wfi has as little as possible to
+ * do.
+ */
+ context_clean_l1_cache_all();
+
+ (void) prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
+ false, false);
+ context_save_to_sram_and_wfi(true);
+
+ context_restore_cpu_registers();
+ context_varm_restore_core();
+ context_varm_restore_common();
+
+ } else {
+
+ context_clean_l1_cache_all();
+ (void) prcmu_set_power_state(APEXECUTE_TO_APSLEEP,
+ false, false);
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+ }
+
+ context_vape_restore();
+
+ /* If GPIO woke us up then save the pins that caused the wake up */
+ ux500_pm_gpio_save_wake_up_status();
+
+ ux500_suspend_dbg_sleep_status(do_deepsleep);
+
+ /* APE was turned off, restore IO ring */
+ ux500_pm_prcmu_set_ioforce(false);
+
+exit:
+ if (pins_force) {
+ /* Restore gpio settings */
+ context_gpio_mux_safe_switch(true);
+ context_gpio_restore_mux();
+ context_gpio_mux_safe_switch(false);
+ context_gpio_restore();
+ }
+
+ /* This is what cpuidle wants */
+ if (cpu_is_u9500())
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+ PRCMU_WAKEUP(ABB) | PRCMU_WAKEUP(HSI0));
+ else
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+ PRCMU_WAKEUP(ABB));
+
+ nmk_gpio_wakeups_resume();
+
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ nmk_gpio_clocks_disable();
+
+ return ret;
+}
+
+static int ux500_suspend_enter(suspend_state_t state)
+{
+ if (ux500_suspend_enabled()) {
+ if (ux500_suspend_deepsleep_enabled() &&
+ state == PM_SUSPEND_MEM)
+ return suspend(true);
+ if (ux500_suspend_sleep_enabled())
+ return suspend(false);
+ }
+
+ ux500_suspend_dbg_add_wake_on_uart();
+ /*
+ * Set IOFORCE in order to wake on GPIO the same way
+ * as in deeper sleep.
+ * (U5500 is not ready for IOFORCE)
+ */
+ if (!cpu_is_u5500())
+ ux500_pm_prcmu_set_ioforce(true);
+
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+
+ if (!cpu_is_u5500())
+ ux500_pm_prcmu_set_ioforce(false);
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ return 0;
+}
+
+static int ux500_suspend_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
+}
+
+static int ux500_suspend_prepare(void)
+{
+ int ret;
+
+ ret = regulator_suspend_prepare(suspend_state);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ux500_suspend_prepare_late(void)
+{
+ /* ESRAM to retention instead of OFF until ROM is fixed */
+ (void) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+
+ ab8500_regulator_debug_force();
+ ux500_regulator_suspend_debug();
+ return 0;
+}
+
+static void ux500_suspend_wake(void)
+{
+ ux500_regulator_resume_debug();
+ ab8500_regulator_debug_restore();
+ (void) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+}
+
+static void ux500_suspend_finish(void)
+{
+ (void)regulator_suspend_finish();
+}
+
+static int ux500_suspend_begin(suspend_state_t state)
+{
+ (void) prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "suspend", 125);
+ suspend_state = state;
+ return ux500_suspend_dbg_begin(state);
+}
+
+static void ux500_suspend_end(void)
+{
+ (void) prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "suspend", 25);
+ suspend_state = PM_SUSPEND_ON;
+}
+
+static struct platform_suspend_ops ux500_suspend_ops = {
+ .enter = ux500_suspend_enter,
+ .valid = ux500_suspend_valid,
+ .prepare = ux500_suspend_prepare,
+ .prepare_late = ux500_suspend_prepare_late,
+ .wake = ux500_suspend_wake,
+ .finish = ux500_suspend_finish,
+ .begin = ux500_suspend_begin,
+ .end = ux500_suspend_end,
+};
+
+static __init int ux500_suspend_init(void)
+{
+ ux500_suspend_dbg_init();
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "suspend", 25);
+ suspend_set_ops(&ux500_suspend_ops);
+ return 0;
+}
+device_initcall(ux500_suspend_init);
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.c b/arch/arm/mach-ux500/pm/suspend_dbg.c
new file mode 100644
index 00000000000..1b7d871ba52
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <mach/pm.h>
+
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+static u32 sleep_enabled = 1;
+#else
+static u32 sleep_enabled;
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_MEM
+static u32 deepsleep_enabled = 1;
+#else
+static u32 deepsleep_enabled;
+#endif
+
+static u32 suspend_enabled = 1;
+
+static u32 deepsleeps_done;
+static u32 deepsleeps_failed;
+static u32 sleeps_done;
+static u32 sleeps_failed;
+static u32 suspend_count;
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void)
+{
+ irq_set_irq_wake(GPIO_TO_IRQ(ux500_console_uart_gpio_pin), 1);
+ irq_set_irq_type(GPIO_TO_IRQ(ux500_console_uart_gpio_pin),
+ IRQ_TYPE_EDGE_BOTH);
+}
+
+void ux500_suspend_dbg_remove_wake_on_uart(void)
+{
+ irq_set_irq_wake(GPIO_TO_IRQ(ux500_console_uart_gpio_pin), 0);
+}
+#endif
+
+bool ux500_suspend_enabled(void)
+{
+ return suspend_enabled != 0;
+}
+
+bool ux500_suspend_sleep_enabled(void)
+{
+ return sleep_enabled != 0;
+}
+
+bool ux500_suspend_deepsleep_enabled(void)
+{
+ return deepsleep_enabled != 0;
+}
+
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep)
+{
+ enum prcmu_power_status prcmu_status;
+
+ prcmu_status = prcmu_get_power_state_result();
+
+ if (is_deepsleep) {
+ pr_info("Returning from ApDeepSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == PRCMU_DEEP_SLEEP_OK ?
+ "Success" : "Fail!");
+ if (prcmu_status == PRCMU_DEEP_SLEEP_OK)
+ deepsleeps_done++;
+ else
+ deepsleeps_failed++;
+ } else {
+ pr_info("Returning from ApSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == PRCMU_SLEEP_OK ? "Success" : "Fail!");
+ if (prcmu_status == PRCMU_SLEEP_OK)
+ sleeps_done++;
+ else
+ sleeps_failed++;
+ }
+}
+
+int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ suspend_count++;
+ return 0;
+}
+
+void ux500_suspend_dbg_init(void)
+{
+ struct dentry *suspend_dir;
+ struct dentry *file;
+
+ suspend_dir = debugfs_create_dir("suspend", NULL);
+ if (IS_ERR_OR_NULL(suspend_dir))
+ return;
+
+ file = debugfs_create_bool("sleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &sleep_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_bool("deepsleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &deepsleep_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_bool("enable", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &suspend_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("count", S_IRUGO,
+ suspend_dir,
+ &suspend_count);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("sleep_count", S_IRUGO,
+ suspend_dir,
+ &sleeps_done);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("deepsleep_count", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_done);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+
+ file = debugfs_create_u32("sleep_failed", S_IRUGO,
+ suspend_dir,
+ &sleeps_failed);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("deepsleep_failed", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_failed);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ return;
+error:
+ if (!IS_ERR_OR_NULL(suspend_dir))
+ debugfs_remove_recursive(suspend_dir);
+}
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.h b/arch/arm/mach-ux500/pm/suspend_dbg.h
new file mode 100644
index 00000000000..29bfec7e269
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#ifndef UX500_SUSPEND_DBG_H
+#define UX500_SUSPEND_DBG_H
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void);
+void ux500_suspend_dbg_remove_wake_on_uart(void);
+#else
+static inline void ux500_suspend_dbg_add_wake_on_uart(void) { }
+static inline void ux500_suspend_dbg_remove_wake_on_uart(void) { }
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_DBG
+bool ux500_suspend_enabled(void);
+bool ux500_suspend_sleep_enabled(void);
+bool ux500_suspend_deepsleep_enabled(void);
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep);
+void ux500_suspend_dbg_init(void);
+int ux500_suspend_dbg_begin(suspend_state_t state);
+
+#else
+static inline bool ux500_suspend_enabled(void)
+{
+ return true;
+}
+static inline bool ux500_suspend_sleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+ return true;
+#else
+ return false;
+#endif
+}
+static inline bool ux500_suspend_deepsleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_MEM
+ return true;
+#else
+ return false;
+#endif
+}
+static inline void ux500_suspend_dbg_sleep_status(bool is_deepsleep) { }
+static inline void ux500_suspend_dbg_init(void) { }
+
+static inline int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/timer.c b/arch/arm/mach-ux500/pm/timer.c
new file mode 100644
index 00000000000..61f92bf73da
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/timer.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * The RTC timer block is a ST Microelectronics variant of ARM PL031.
+ * Clockwatch part is the same as PL031, while the timer part is only
+ * present on the ST Microelectronics variant.
+ * Here only the timer part is used.
+ *
+ * The timer part is quite troublesome to program correctly. Lots
+ * of long delays must be there in order to secure that you actually get what
+ * you wrote.
+ *
+ * In other words, this timer is and should only used from cpuidle during
+ * special conditions when the surroundings are know in order to be able
+ * to remove the number of delays.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+
+#include <asm/errno.h>
+
+#include <mach/hardware.h>
+
+#define RTC_IMSC 0x10
+#define RTC_MIS 0x18
+#define RTC_ICR 0x1C
+#define RTC_TDR 0x20
+#define RTC_TLR1 0x24
+#define RTC_TCR 0x28
+
+#define RTC_TLR2 0x2C
+#define RTC_TPR1 0x3C
+
+#define RTC_TCR_RTTOS (1 << 0)
+#define RTC_TCR_RTTEN (1 << 1)
+#define RTC_TCR_RTTSS (1 << 2)
+
+#define RTC_IMSC_TIMSC (1 << 1)
+#define RTC_ICR_TIC (1 << 1)
+#define RTC_MIS_RTCTMIS (1 << 1)
+
+#define RTC_TCR_RTTPS_2 (1 << 4)
+#define RTC_TCR_RTTPS_3 (2 << 4)
+#define RTC_TCR_RTTPS_4 (3 << 4)
+#define RTC_TCR_RTTPS_5 (4 << 4)
+#define RTC_TCR_RTTPS_6 (5 << 4)
+#define RTC_TCR_RTTPS_7 (6 << 4)
+#define RTC_TCR_RTTPS_8 (7 << 4)
+
+#define WRITE_DELAY 130 /* 4 cycles plus margin */
+
+/*
+ * Count down measure point. It just have to be high to differ
+ * from scheduled values.
+ */
+#define MEASURE_VAL 0xffffffff
+
+/* Just a value bigger than any reason able scheduled timeout. */
+#define MEASURE_VAL_LIMIT 0xf0000000
+
+#define TICKS_TO_NS(x) ((s64)x * 30518)
+#define US_TO_TICKS(x) ((u32)((1000 * x) / 30518))
+
+static void __iomem *rtc_base;
+static bool measure_latency;
+
+#ifdef CONFIG_UX500_CPUIDLE_DEBUG
+
+/*
+ * The plan here is to be able to measure the ApSleep/ApDeepSleep exit latency
+ * by having a know timer pattern.
+ * The first entry in the pattern, LR1, is the value that the scheduler
+ * wants us to sleep. The second pattern in a high value, too large to be
+ * scheduled, so we can differ between a running scheduled value and a
+ * time measure value.
+ * When a RTT interrupt has occured, the block will automatically start
+ * to execute the measure value in LR2 and when the ARM is awake, it reads
+ * how far the RTT has decreased the value loaded from LR2 and from that
+ * calculate how long time it took to wake up.
+ */
+ktime_t u8500_rtc_exit_latency_get(void)
+{
+ u32 ticks;
+
+ if (measure_latency) {
+ ticks = MEASURE_VAL - readl(rtc_base + RTC_TDR);
+
+ /*
+ * Check if we are actually counting on a LR2 value.
+ * If not we have woken on another interrupt.
+ */
+ if (ticks < MEASURE_VAL_LIMIT) {
+ /* convert 32 kHz ticks to ns */
+ return ktime_set(0, TICKS_TO_NS(ticks));
+ }
+ }
+ return ktime_set(0, 0);
+}
+
+static void measure_latency_start(void)
+{
+ udelay(WRITE_DELAY);
+ /*
+ * Disable RTT and clean self-start due to we want to restart,
+ * not continue from current pattern. (See below)
+ */
+ writel(0, rtc_base + RTC_TCR);
+ udelay(WRITE_DELAY);
+
+ /*
+ * Program LR2 (load register two) to maximum value to ease
+ * identification of timer interrupt vs other.
+ */
+ writel(MEASURE_VAL, rtc_base + RTC_TLR2);
+ /*
+ * Set Load Register execution pattern, bit clear
+ * means pick LR1, bit set means LR2
+ * 0xfe, binary 11111110 means first do LR1 then do
+ * LR2 seven times
+ */
+ writel(0xfe, rtc_base + RTC_TPR1);
+
+ udelay(WRITE_DELAY);
+
+ /*
+ * Enable self-start, plus a pattern of eight.
+ */
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTPS_8,
+ rtc_base + RTC_TCR);
+ udelay(WRITE_DELAY);
+}
+
+void ux500_rtcrtt_measure_latency(bool enable)
+{
+ if (enable) {
+ measure_latency_start();
+ } else {
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+ writel(RTC_IMSC_TIMSC, rtc_base + RTC_IMSC);
+ }
+ measure_latency = enable;
+}
+#else
+static inline void measure_latency_start(void) { }
+static inline void ux500_rtcrtt_measure_latency(bool enable)
+{
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+ writel(RTC_IMSC_TIMSC, rtc_base + RTC_IMSC);
+}
+#endif
+
+void ux500_rtcrtt_off(void)
+{
+ if (measure_latency) {
+ measure_latency_start();
+ } else {
+ /* Clear eventual interrupts */
+ if (readl(rtc_base + RTC_MIS) & RTC_MIS_RTCTMIS)
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+
+ /* Disable, self start and oneshot mode */
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ }
+}
+
+void ux500_rtcrtt_next(u32 time_us)
+{
+ writel(US_TO_TICKS(time_us), rtc_base + RTC_TLR1);
+}
+
+static int __init ux500_rtcrtt_init(void)
+{
+ if (cpu_is_u8500()) {
+ rtc_base = __io_address(U8500_RTC_BASE);
+ } else if (cpu_is_u5500()) {
+ rtc_base = __io_address(U5500_RTC_BASE);
+ } else {
+ pr_err("timer-rtt: Unknown DB Asic!\n");
+ return -EINVAL;
+ }
+ ux500_rtcrtt_measure_latency(false);
+ return 0;
+}
+subsys_initcall(ux500_rtcrtt_init);
diff --git a/arch/arm/mach-ux500/pm/usecase_gov.c b/arch/arm/mach-ux500/pm/usecase_gov.c
new file mode 100644
index 00000000000..5c7fe403c2f
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/usecase_gov.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@stericsson.com> for ST-Ericsson
+ * Author: Vincent Guittot <vincent.guittot@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/io.h>
+#include <linux/earlysuspend.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/kernel_stat.h>
+#include <linux/ktime.h>
+#include <linux/cpufreq.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpufreq-dbx500.h>
+
+#include "../../../../drivers/cpuidle/cpuidle-dbx500.h"
+
+
+#define CPULOAD_MEAS_DELAY 3000 /* 3 secondes of delta */
+
+/* debug */
+static unsigned long debug;
+
+#define hp_printk \
+ if (debug) \
+ printk \
+
+enum ux500_uc {
+ UX500_UC_NORMAL = 0,
+ UX500_UC_AUTO, /* Add use case below this. */
+ UX500_UC_VC,
+ UX500_UC_LPA,
+ UX500_UC_USER, /* Add use case above this. */
+ UX500_UC_MAX,
+};
+
+/* cpu load monitor struct */
+#define LOAD_MONITOR 4
+struct hotplug_cpu_info {
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_io;
+ unsigned int load[LOAD_MONITOR];
+ unsigned int io[LOAD_MONITOR];
+ unsigned int idx;
+};
+
+static DEFINE_PER_CPU(struct hotplug_cpu_info, hotplug_info);
+
+/* Auto trigger criteria */
+/* loadavg threshold */
+static unsigned long lower_threshold = 175;
+static unsigned long upper_threshold = 450;
+/* load balancing */
+static unsigned long max_unbalance = 210;
+/* trend load */
+static unsigned long trend_unbalance = 40;
+static unsigned long min_trend = 5;
+/* instant load */
+static unsigned long max_instant = 85;
+
+/* Number of interrupts per second before exiting auto mode */
+static u32 exit_irq_per_s = 1000;
+static u64 old_num_irqs;
+
+static DEFINE_MUTEX(usecase_mutex);
+static bool user_config_updated;
+static enum ux500_uc current_uc = UX500_UC_MAX;
+static bool is_work_scheduled;
+static bool is_early_suspend;
+static bool uc_master_enable = true;
+
+static unsigned int cpuidle_deepest_state;
+
+struct usecase_config {
+ char *name;
+ /* Minimum required ARM OPP. if no requirement set 25 */
+ unsigned int min_arm_opp;
+ /* Only use max_arm_opp if you know what you're doing */
+ unsigned int max_arm_opp;
+ unsigned long cpuidle_multiplier;
+ bool second_cpu_online;
+ bool l2_prefetch_en;
+ bool enable;
+ unsigned int forced_state; /* Forced cpu idle state. */
+ bool vc_override; /* QOS override for voice-call. */
+};
+
+static struct usecase_config usecase_conf[UX500_UC_MAX] = {
+ [UX500_UC_NORMAL] = {
+ .name = "normal",
+ .min_arm_opp = 25,
+ .cpuidle_multiplier = 1024,
+ .second_cpu_online = true,
+ .l2_prefetch_en = true,
+ .enable = true,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_AUTO] = {
+ .name = "auto",
+ .min_arm_opp = 25,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = true,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_VC] = {
+ .name = "voice-call",
+ .min_arm_opp = 50,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = true,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = true,
+ },
+ [UX500_UC_LPA] = {
+ .name = "low-power-audio",
+ .min_arm_opp = 50,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0, /* Updated dynamically */
+ .vc_override = false,
+ },
+};
+
+/* daemon */
+static struct delayed_work work_usecase;
+static struct early_suspend usecase_early_suspend;
+
+/* calculate loadavg */
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+extern int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
+extern int cpuidle_set_multiplier(unsigned int value);
+extern int cpuidle_force_state(unsigned int state);
+
+static unsigned long determine_loadavg(void)
+{
+ unsigned long avg = 0;
+ unsigned long avnrun[3];
+
+ get_avenrun(avnrun, FIXED_1 / 200, 0);
+ avg += (LOAD_INT(avnrun[0]) * 100) + (LOAD_FRAC(avnrun[0]) % 100);
+
+ return avg;
+}
+
+static unsigned long determine_cpu_load(void)
+{
+ int i;
+ unsigned long total_load = 0;
+
+ /* get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load, iowait;
+ unsigned int idle_time, iowait_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ /* update both cur_idle_time and cur_wall_time */
+ cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time_us(i, &cur_wall_time);
+
+ /* how much wall time has passed since last iteration? */
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ info->prev_cpu_wall);
+ info->prev_cpu_wall = cur_wall_time;
+
+ /* how much idle time has passed since last iteration? */
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ info->prev_cpu_idle);
+ info->prev_cpu_idle = cur_idle_time;
+
+ /* how much io wait time has passed since last iteration? */
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ info->prev_cpu_io);
+ info->prev_cpu_io = cur_iowait_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /* load is the percentage of time not spent in idle */
+ load = 100 * (wall_time - idle_time) / wall_time;
+ info->load[info->idx] = load;
+ hp_printk("cpu %d load %u ", i, load);
+
+ /* iowait is the percentage of time not spent in io wait */
+ iowait = 100 * (iowait_time) / wall_time;
+ info->io[info->idx++] = load;
+ hp_printk("iowait %u\n", iowait);
+
+ if (info->idx >= LOAD_MONITOR)
+ info->idx = 0;
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_load_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ hp_printk("cpu %d load trend %u\n", i, load);
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_balance_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+ unsigned long min_load = (unsigned long) (-1);
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ if (min_load > load)
+ min_load = load;
+ total_load += load;
+ }
+
+ if (min_load > min_trend)
+ total_load = (100 * total_load) / min_load;
+ else
+ total_load = 50 << num_online_cpus();
+
+ return total_load;
+}
+
+static void init_cpu_load_trend(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct hotplug_cpu_info *info;
+ int j;
+
+ info = &per_cpu(hotplug_info, i);
+
+ info->prev_cpu_idle = get_cpu_idle_time_us(i,
+ &(info->prev_cpu_wall));
+ info->prev_cpu_io = get_cpu_iowait_time_us(i,
+ &(info->prev_cpu_wall));
+
+ for (j = 0; j < LOAD_MONITOR; j++) {
+ info->load[j] = 100;
+ info->io[j] = 100;
+ }
+ info->idx = 0;
+ }
+}
+
+static u32 get_num_interrupts_per_s(void)
+{
+ int cpu;
+ int i;
+ u64 num_irqs = 0;
+ ktime_t now;
+ static ktime_t last;
+ unsigned int delta;
+ u32 irqs = 0;
+
+ now = ktime_get();
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < NR_IRQS; i++)
+ num_irqs += kstat_irqs_cpu(i, cpu);
+ }
+ pr_debug("%s: total num irqs: %lld, previous %lld\n",
+ __func__, num_irqs, old_num_irqs);
+
+ if (old_num_irqs > 0) {
+ delta = (u32)ktime_to_ms(ktime_sub(now, last)) / 1000;
+ irqs = ((u32)(num_irqs - old_num_irqs)) / delta;
+ }
+
+ old_num_irqs = num_irqs;
+ last = now;
+
+ pr_debug("delta irqs per sec:%d\n", irqs);
+
+ return irqs;
+}
+
+static int set_cpufreq(int cpu, int min_freq, int max_freq)
+{
+ int ret;
+ struct cpufreq_policy policy;
+
+ ret = cpufreq_get_policy(&policy, cpu);
+ if (ret < 0) {
+ pr_err("usecase-gov: failed to read policy\n");
+ return ret;
+ }
+
+ if (policy.min > max_freq) {
+ ret = cpufreq_update_freq(cpu, min_freq, policy.max);
+ if (ret)
+ pr_err("usecase-gov: update min cpufreq failed (1)\n");
+ }
+ if (policy.max < min_freq) {
+ ret = cpufreq_update_freq(cpu, policy.min, max_freq);
+ if (ret)
+ pr_err("usecase-gov: update max cpufreq failed (2)\n");
+ }
+
+ ret = cpufreq_update_freq(cpu, min_freq, max_freq);
+ if (ret)
+ pr_err("usecase-gov: update min-max cpufreq failed\n");
+
+ return ret;
+}
+
+static void set_cpu_config(enum ux500_uc new_uc)
+{
+ bool update = false;
+ int ret;
+ int cpu;
+ static struct cpufreq_policy original_cpufreq_policy;
+
+ if (new_uc != current_uc)
+ update = true;
+ else if ((user_config_updated) && (new_uc == UX500_UC_USER))
+ update = true;
+
+ pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
+ __func__, new_uc, current_uc, update);
+
+ if (!update)
+ goto exit;
+
+ /* Cpu hotplug */
+ if (!(usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() > 1))
+ cpu_down(1);
+ else if ((usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() < 2))
+ cpu_up(1);
+
+ if (usecase_conf[new_uc].max_arm_opp) {
+ int max_freq;
+
+ max_freq = dbx500_cpufreq_percent2freq(usecase_conf[new_uc].max_arm_opp);
+
+ ret = cpufreq_get_policy(&original_cpufreq_policy, 0);
+ if (ret < 0)
+ pr_err("usecase-gov: fail to get cpufreq policy\n");
+
+ for_each_online_cpu(cpu) {
+ set_cpufreq(cpu,
+ original_cpufreq_policy.min,
+ max_freq);
+ }
+ }
+
+ if (new_uc == UX500_UC_NORMAL &&
+ usecase_conf[current_uc].max_arm_opp) {
+ /*
+ * Reset cpufreq limits to what is was before. Yes, overwrite
+ * any changes done outside usecase governors control.
+ */
+ for_each_online_cpu(cpu) {
+ set_cpufreq(cpu,
+ original_cpufreq_policy.min,
+ original_cpufreq_policy.max);
+ }
+ }
+
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usecase", usecase_conf[new_uc].min_arm_opp);
+
+ /* Cpu idle */
+ cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);
+
+ /* L2 prefetch */
+ if (usecase_conf[new_uc].l2_prefetch_en)
+ outer_prefetch_enable();
+ else
+ outer_prefetch_disable();
+
+ /* Force cpuidle state */
+ cpuidle_force_state(usecase_conf[new_uc].forced_state);
+
+ /* QOS override */
+ prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);
+
+ current_uc = new_uc;
+
+exit:
+ /* Its ok to clear even if new_uc != UX500_UC_USER */
+ user_config_updated = false;
+}
+
+void usecase_update_governor_state(void)
+{
+ bool cancel_work = false;
+
+ mutex_lock(&usecase_mutex);
+
+ if (uc_master_enable && (usecase_conf[UX500_UC_AUTO].enable ||
+ usecase_conf[UX500_UC_USER].enable)) {
+ /*
+ * Usecases are enabled. If we are in early suspend put
+ * governor to work.
+ */
+ if (is_early_suspend && !is_work_scheduled) {
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+ is_work_scheduled = true;
+ } else if (!is_early_suspend && is_work_scheduled) {
+ /* Exiting from early suspend. */
+ cancel_work = true;
+ }
+
+ } else if (is_work_scheduled) {
+ /* No usecase enabled or governor is not enabled. */
+ cancel_work = true;
+ }
+
+ if (cancel_work) {
+ cancel_delayed_work_sync(&work_usecase);
+ is_work_scheduled = false;
+
+ /* Set the default settings before exiting. */
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&usecase_mutex);
+
+}
+
+/*
+ * Start load measurment every 6 s in order detrmine if can unplug one CPU.
+ * In order to not corrupt measurment, the first load average is not done
+ * here call in early suspend.
+ */
+static void usecase_earlysuspend_callback(struct early_suspend *h)
+{
+ init_cpu_load_trend();
+
+ is_early_suspend = true;
+
+ usecase_update_governor_state();
+}
+
+/* Stop measurement, call LCD early resume */
+static void usecase_lateresume_callback(struct early_suspend *h)
+{
+ is_early_suspend = false;
+
+ usecase_update_governor_state();
+}
+
+static void delayed_usecase_work(struct work_struct *work)
+{
+ unsigned long avg, load, trend, balance;
+ bool inc_perf = false;
+ bool dec_perf = false;
+ u32 irqs_per_s;
+
+ /* determine loadavg */
+ avg = determine_loadavg();
+ hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
+ avg, lower_threshold, upper_threshold);
+
+ /* determine instant load */
+ load = determine_cpu_load();
+ hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);
+
+ /* determine load trend */
+ trend = determine_cpu_load_trend();
+ hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
+ trend, min_trend, trend_unbalance);
+
+ /* determine load balancing */
+ balance = determine_cpu_balance_trend();
+ hp_printk("load balancing trend = %lu min %lu\n",
+ balance, max_unbalance);
+
+ irqs_per_s = get_num_interrupts_per_s();
+
+ /* Dont let configuration change in the middle of our calculations. */
+ mutex_lock(&usecase_mutex);
+
+ /* detect "instant" load increase */
+ if (load > max_instant || irqs_per_s > exit_irq_per_s) {
+ inc_perf = true;
+ } else if (!usecase_conf[UX500_UC_USER].enable &&
+ usecase_conf[UX500_UC_AUTO].enable) {
+ /* detect high loadavg use case */
+ if (avg > upper_threshold)
+ inc_perf = true;
+ /* detect idle use case */
+ else if (trend < min_trend)
+ dec_perf = true;
+ /* detect unbalanced low cpu load use case */
+ else if ((balance > max_unbalance) && (trend < trend_unbalance))
+ dec_perf = true;
+ /* detect low loadavg use case */
+ else if (avg < lower_threshold)
+ dec_perf = true;
+ /* All user use cases disabled, current load not triggering
+ * any change.
+ */
+ else if (user_config_updated)
+ dec_perf = true;
+ } else {
+ dec_perf = true;
+ }
+
+ /*
+ * set_cpu_config() will not update the config unless it has been
+ * changed.
+ */
+ if (dec_perf) {
+ if (usecase_conf[UX500_UC_USER].enable)
+ set_cpu_config(UX500_UC_USER);
+ else if (usecase_conf[UX500_UC_AUTO].enable)
+ set_cpu_config(UX500_UC_AUTO);
+ } else if (inc_perf) {
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&usecase_mutex);
+
+ /* reprogramm scheduled work */
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+
+}
+
+static struct dentry *usecase_dir;
+
+#ifdef CONFIG_DEBUG_FS
+#define define_set(_name) \
+static ssize_t set_##_name(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ int err; \
+ long unsigned i; \
+ \
+ err = kstrtoul_from_user(user_buf, count, 0, &i); \
+ \
+ if (err) \
+ return err; \
+ \
+ _name = i; \
+ hp_printk("New value : %lu\n", _name); \
+ \
+ return count; \
+}
+
+define_set(upper_threshold);
+define_set(lower_threshold);
+define_set(max_unbalance);
+define_set(trend_unbalance);
+define_set(min_trend);
+define_set(max_instant);
+define_set(debug);
+
+#define define_print(_name) \
+static ssize_t print_##_name(struct seq_file *s, void *p) \
+{ \
+ return seq_printf(s, "%lu\n", _name); \
+}
+
+define_print(upper_threshold);
+define_print(lower_threshold);
+define_print(max_unbalance);
+define_print(trend_unbalance);
+define_print(min_trend);
+define_print(max_instant);
+define_print(debug);
+
+#define define_open(_name) \
+static ssize_t open_##_name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, print_##_name, inode->i_private); \
+}
+
+define_open(upper_threshold);
+define_open(lower_threshold);
+define_open(max_unbalance);
+define_open(trend_unbalance);
+define_open(min_trend);
+define_open(max_instant);
+define_open(debug);
+
+#define define_dbg_file(_name) \
+static const struct file_operations fops_##_name = { \
+ .open = open_##_name, \
+ .write = set_##_name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+}; \
+static struct dentry *file_##_name;
+
+define_dbg_file(upper_threshold);
+define_dbg_file(lower_threshold);
+define_dbg_file(max_unbalance);
+define_dbg_file(trend_unbalance);
+define_dbg_file(min_trend);
+define_dbg_file(max_instant);
+define_dbg_file(debug);
+
+struct dbg_file {
+ struct dentry **file;
+ const struct file_operations *fops;
+ const char *name;
+};
+
+#define define_dbg_entry(_name) \
+{ \
+ .file = &file_##_name, \
+ .fops = &fops_##_name, \
+ .name = #_name \
+}
+
+static struct dbg_file debug_entry[] = {
+ define_dbg_entry(upper_threshold),
+ define_dbg_entry(lower_threshold),
+ define_dbg_entry(max_unbalance),
+ define_dbg_entry(trend_unbalance),
+ define_dbg_entry(min_trend),
+ define_dbg_entry(max_instant),
+ define_dbg_entry(debug),
+};
+
+static int setup_debugfs(void)
+{
+ int i;
+ usecase_dir = debugfs_create_dir("usecase", NULL);
+
+ if (IS_ERR_OR_NULL(usecase_dir))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(debug_entry); i++) {
+ if (IS_ERR_OR_NULL(debugfs_create_file(debug_entry[i].name,
+ S_IWUGO | S_IRUGO,
+ usecase_dir,
+ NULL,
+ debug_entry[i].fops)))
+ goto fail;
+ }
+
+ if (IS_ERR_OR_NULL(debugfs_create_u32("exit_irq_per_s",
+ S_IWUGO | S_IRUGO, usecase_dir,
+ &exit_irq_per_s)))
+ goto fail;
+ return 0;
+fail:
+ debugfs_remove_recursive(usecase_dir);
+ return -EINVAL;
+}
+#else
+static int setup_debugfs(void)
+{
+ return 0;
+}
+#endif
+
+static void usecase_update_user_config(void)
+{
+ int i;
+ bool config_enable = false;
+ struct usecase_config *user_conf = &usecase_conf[UX500_UC_USER];
+
+ mutex_lock(&usecase_mutex);
+
+ user_conf->min_arm_opp = 25;
+ user_conf->max_arm_opp = 0;
+ user_conf->cpuidle_multiplier = 0;
+ user_conf->second_cpu_online = false;
+ user_conf->l2_prefetch_en = false;
+ user_conf->forced_state = cpuidle_deepest_state;
+ user_conf->vc_override = true; /* A single false will clear it. */
+
+ /* Dont include Auto and Normal modes in this */
+ for (i = (UX500_UC_AUTO + 1); i < UX500_UC_USER; i++) {
+ if (!usecase_conf[i].enable)
+ continue;
+
+ config_enable = true;
+
+ /* It's the highest arm opp requirement that should be used */
+ if (usecase_conf[i].min_arm_opp > user_conf->min_arm_opp)
+ user_conf->min_arm_opp = usecase_conf[i].min_arm_opp;
+
+ if (usecase_conf[i].max_arm_opp > user_conf->max_arm_opp)
+ user_conf->max_arm_opp = usecase_conf[i].max_arm_opp;
+
+ if (usecase_conf[i].cpuidle_multiplier >
+ user_conf->cpuidle_multiplier)
+ user_conf->cpuidle_multiplier =
+ usecase_conf[i].cpuidle_multiplier;
+
+ user_conf->second_cpu_online |=
+ usecase_conf[i].second_cpu_online;
+
+ user_conf->l2_prefetch_en |=
+ usecase_conf[i].l2_prefetch_en;
+
+ /* Take the shallowest state. */
+ if (usecase_conf[i].forced_state < user_conf->forced_state)
+ user_conf->forced_state = usecase_conf[i].forced_state;
+
+ /* Only override QOS if all enabled configurations are
+ * requesting it.
+ */
+ if (!usecase_conf[i].vc_override)
+ user_conf->vc_override = false;
+ }
+
+ user_conf->enable = config_enable;
+ user_config_updated = true;
+
+ mutex_unlock(&usecase_mutex);
+}
+
+struct usecase_devclass_attr {
+ struct sysdev_class_attribute class_attr;
+ u32 index;
+};
+
+/* One for each usecase except "user" + current + enable */
+#define UX500_NUM_SYSFS_NODES (UX500_UC_USER + 2)
+#define UX500_CURRENT_NODE_INDEX (UX500_NUM_SYSFS_NODES - 1)
+#define UX500_ENABLE_NODE_INDEX (UX500_NUM_SYSFS_NODES - 2)
+
+static struct usecase_devclass_attr usecase_dc_attr[UX500_NUM_SYSFS_NODES];
+
+static struct attribute *dbs_attributes[UX500_NUM_SYSFS_NODES + 1] = {NULL};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "usecase",
+};
+
+static ssize_t show_current(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ enum ux500_uc display_uc = (current_uc == UX500_UC_MAX) ?
+ UX500_UC_NORMAL : current_uc;
+
+ return sprintf(buf, "min_arm_opp: %d\n"
+ "max_arm_opp: %d\n"
+ "cpuidle_multiplier: %ld\n"
+ "second_cpu_online: %s\n"
+ "l2_prefetch_en: %s\n"
+ "forced_state: %d\n"
+ "vc_override: %s\n",
+ usecase_conf[display_uc].min_arm_opp,
+ usecase_conf[display_uc].max_arm_opp,
+ usecase_conf[display_uc].cpuidle_multiplier,
+ usecase_conf[display_uc].second_cpu_online ? "true" : "false",
+ usecase_conf[display_uc].l2_prefetch_en ? "true" : "false",
+ usecase_conf[display_uc].forced_state,
+ usecase_conf[display_uc].vc_override ? "true" : "false");
+}
+
+static ssize_t show_enable(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", uc_master_enable);
+}
+
+static ssize_t store_enable(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ uc_master_enable = (bool) input;
+
+ usecase_update_governor_state();
+
+ return count;
+}
+
+static ssize_t show_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ return sprintf(buf, "%u\n",
+ usecase_conf[uattr->index].enable);
+}
+
+static ssize_t store_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ ret = sscanf(buf, "%u", &input);
+
+ /* Normal mode cant be changed. */
+ if ((ret != 1) || (uattr->index == 0))
+ return -EINVAL;
+
+ usecase_conf[uattr->index].enable = (bool)input;
+
+ usecase_update_user_config();
+
+ usecase_update_governor_state();
+
+ return count;
+}
+
+static int usecase_sysfs_init(void)
+{
+ int err;
+ int i;
+
+ /* Last two nodes are not based on usecase configurations */
+ for (i = 0; i < (UX500_NUM_SYSFS_NODES - 2); i++) {
+ usecase_dc_attr[i].class_attr.attr.name = usecase_conf[i].name;
+ usecase_dc_attr[i].class_attr.attr.mode = 0644;
+ usecase_dc_attr[i].class_attr.show = show_dc_attr;
+ usecase_dc_attr[i].class_attr.store = store_dc_attr;
+ usecase_dc_attr[i].index = i;
+
+ dbs_attributes[i] = &(usecase_dc_attr[i].class_attr.attr);
+ }
+
+ /* sysfs current */
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr.name =
+ "current";
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr.mode =
+ 0644;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.show =
+ show_current;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.store =
+ NULL;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].index =
+ 0;
+ dbs_attributes[UX500_CURRENT_NODE_INDEX] =
+ &(usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr);
+
+ /* sysfs enable */
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr.name =
+ "enable";
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr.mode =
+ 0644;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.show =
+ show_enable;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.store =
+ store_enable;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].index =
+ 0;
+ dbs_attributes[UX500_ENABLE_NODE_INDEX] =
+ &(usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr);
+
+ err = sysfs_create_group(&(cpu_sysdev_class.kset.kobj),
+ &dbs_attr_group);
+ if (err)
+ pr_err("usecase-gov: sysfs_create_group"
+ " failed with error = %d\n", err);
+
+ return err;
+}
+
+static void usecase_cpuidle_init(void)
+{
+ int max_states;
+ int i;
+ struct cstate *state = ux500_ci_get_cstates(&max_states);
+
+ for (i = 0; i < max_states; i++)
+ if ((state[i].APE == APE_OFF) && (state[i].ARM == ARM_RET))
+ break;
+
+ usecase_conf[UX500_UC_LPA].forced_state = i;
+
+ cpuidle_deepest_state = max_states - 1;
+}
+
+/* initialize devices */
+static int __init init_usecase_devices(void)
+{
+ int err;
+
+ pr_info("Use-case governor initialized\n");
+
+ /* add early_suspend callback */
+ usecase_early_suspend.level = 200;
+ usecase_early_suspend.suspend = usecase_earlysuspend_callback;
+ usecase_early_suspend.resume = usecase_lateresume_callback;
+ register_early_suspend(&usecase_early_suspend);
+
+ /* register delayed queuework */
+ INIT_DELAYED_WORK_DEFERRABLE(&work_usecase,
+ delayed_usecase_work);
+
+ init_cpu_load_trend();
+
+ err = setup_debugfs();
+ if (err)
+ goto error;
+ err = usecase_sysfs_init();
+ if (err)
+ goto error2;
+
+ usecase_cpuidle_init();
+
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "usecase", 25);
+
+ return 0;
+error2:
+ debugfs_remove_recursive(usecase_dir);
+error:
+ unregister_early_suspend(&usecase_early_suspend);
+ return err;
+}
+
+device_initcall(init_usecase_devices);
diff --git a/arch/arm/mach-ux500/prcc.h b/arch/arm/mach-ux500/prcc.h
new file mode 100644
index 00000000000..05220f8eaa1
--- /dev/null
+++ b/arch/arm/mach-ux500/prcc.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2009-2011 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_UX500_PRCC_H__
+
+#define PRCC_PCKEN 0x000
+#define PRCC_PCKDIS 0x004
+#define PRCC_KCKEN 0x008
+#define PRCC_KCKDIS 0x00C
+#define PRCC_PCKSR 0x010
+#define PRCC_KCKSR 0x014
+#define PRCC_K_SOFTRST_CLR 0x018
+#define PRCC_K_SOFTRST_SET 0x01C
+#define PRCC_K_RST_STATUS 0x020
+
+#endif
diff --git a/arch/arm/mach-ux500/prcmu-debug.c b/arch/arm/mach-ux500/prcmu-debug.c
new file mode 100644
index 00000000000..4b26d911adc
--- /dev/null
+++ b/arch/arm/mach-ux500/prcmu-debug.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Martin Persson for ST-Ericsson
+ * Etienne Carriere <etienne.carriere@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <mach/hardware.h>
+
+#define MAX_STATES 5
+#define MAX_NAMELEN 16
+
+struct state_history {
+ ktime_t start;
+ u32 state;
+ u32 counter[MAX_STATES];
+ u8 opps[MAX_STATES];
+ int max_states;
+ int req;
+ bool reqs[MAX_STATES];
+ ktime_t time[MAX_STATES];
+ int state_names[MAX_STATES];
+ char prefix[MAX_NAMELEN];
+ spinlock_t lock;
+};
+
+static struct state_history ape_sh = {
+ .prefix = "APE",
+ .req = PRCMU_QOS_APE_OPP,
+ .opps = {APE_50_OPP, APE_100_OPP},
+ .state_names = {50, 100},
+ .max_states = 2,
+};
+
+static struct state_history ddr_sh = {
+ .prefix = "DDR",
+ .req = PRCMU_QOS_DDR_OPP,
+ .opps = {DDR_25_OPP, DDR_50_OPP, DDR_100_OPP},
+ .state_names = {25, 50, 100},
+ .max_states = 3,
+};
+
+static struct state_history arm_sh = {
+ .prefix = "ARM",
+ .req = PRCMU_QOS_ARM_OPP,
+ .opps = {ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP},
+ .state_names = {25, 50, 100, 125},
+ .max_states = 4,
+};
+
+static int ape_voltage_count;
+
+static void log_set(struct state_history *sh, u8 opp)
+{
+ ktime_t now;
+ ktime_t dtime;
+ unsigned long flags;
+ int state;
+
+ now = ktime_get();
+ spin_lock_irqsave(&sh->lock, flags);
+
+ for (state = 0 ; sh->opps[state] != opp; state++)
+ ;
+ BUG_ON(state >= sh->max_states);
+
+ dtime = ktime_sub(now, sh->start);
+ sh->time[sh->state] = ktime_add(sh->time[sh->state], dtime);
+ sh->start = now;
+ sh->counter[sh->state]++;
+ sh->state = state;
+
+ spin_unlock_irqrestore(&sh->lock, flags);
+}
+
+void prcmu_debug_ape_opp_log(u8 opp)
+{
+ log_set(&ape_sh, opp);
+}
+
+void prcmu_debug_ddr_opp_log(u8 opp)
+{
+ log_set(&ddr_sh, opp);
+}
+
+void prcmu_debug_arm_opp_log(u8 opp)
+{
+ log_set(&arm_sh, opp);
+}
+
+static void log_reset(struct state_history *sh)
+{
+ unsigned long flags;
+ int i;
+
+ pr_info("reset\n");
+
+ spin_lock_irqsave(&sh->lock, flags);
+ for (i = 0; i < sh->max_states; i++) {
+ sh->counter[i] = 0;
+ sh->time[i] = ktime_set(0, 0);
+ }
+
+ sh->start = ktime_get();
+ spin_unlock_irqrestore(&sh->lock, flags);
+
+}
+
+static ssize_t ape_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ log_reset(&ape_sh);
+ return count;
+}
+
+static ssize_t ddr_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ log_reset(&ddr_sh);
+ return count;
+}
+
+static ssize_t arm_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ log_reset(&arm_sh);
+ return count;
+}
+
+static int log_print(struct seq_file *s, struct state_history *sh)
+{
+ int i;
+ unsigned long flags;
+ ktime_t total;
+ ktime_t dtime;
+ s64 t_ms;
+ s64 perc;
+ s64 total_ms;
+
+ spin_lock_irqsave(&sh->lock, flags);
+
+ dtime = ktime_sub(ktime_get(), sh->start);
+
+ total = dtime;
+
+ for (i = 0; i < sh->max_states; i++)
+ total = ktime_add(total, sh->time[i]);
+ total_ms = ktime_to_ms(total);
+
+ for (i = 0; i < sh->max_states; i++) {
+ ktime_t t = sh->time[i];
+ if (sh->state == i)
+ t = ktime_add(t, dtime);
+
+ t_ms = ktime_to_ms(t);
+ perc = 100 * t_ms;
+ do_div(perc, total_ms);
+
+ seq_printf(s, "%s OPP %d: # %u in %lld ms %d%%\n",
+ sh->prefix, sh->state_names[i],
+ sh->counter[i] + (int)(sh->state == i),
+ t_ms, (u32)perc);
+
+ }
+ spin_unlock_irqrestore(&sh->lock, flags);
+ return 0;
+}
+
+static int ape_stats_print(struct seq_file *s, void *p)
+{
+ log_print(s, &ape_sh);
+ return 0;
+}
+
+static int ddr_stats_print(struct seq_file *s, void *p)
+{
+ log_print(s, &ddr_sh);
+ return 0;
+}
+
+static int arm_stats_print(struct seq_file *s, void *p)
+{
+ log_print(s, &arm_sh);
+ return 0;
+}
+
+static int opp_read(struct seq_file *s, void *p)
+{
+ int opp;
+
+ struct state_history *sh = (struct state_history *)s->private;
+
+ switch (sh->req) {
+ case PRCMU_QOS_DDR_OPP:
+ opp = prcmu_get_ddr_opp();
+ seq_printf(s, "%s (%d)\n",
+ (opp == DDR_100_OPP) ? "100%" :
+ (opp == DDR_50_OPP) ? "50%" :
+ (opp == DDR_25_OPP) ? "25%" :
+ "unknown", opp);
+ break;
+ case PRCMU_QOS_APE_OPP:
+ opp = prcmu_get_ape_opp();
+ seq_printf(s, "%s (%d)\n",
+ (opp == APE_100_OPP) ? "100%" :
+ (opp == APE_50_OPP) ? "50%" :
+ "unknown", opp);
+ break;
+ case PRCMU_QOS_ARM_OPP:
+ opp = prcmu_get_arm_opp();
+ seq_printf(s, "%s (%d)\n",
+ (opp == ARM_MAX_OPP) ? "max" :
+ (opp == ARM_MAX_FREQ100OPP) ? "max-freq100" :
+ (opp == ARM_100_OPP) ? "100%" :
+ (opp == ARM_50_OPP) ? "50%" :
+ (opp == ARM_EXTCLK) ? "25% (extclk)" :
+ "unknown", opp);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+}
+
+static ssize_t opp_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ long unsigned i;
+ int err;
+ struct state_history *sh = (struct state_history *)
+ ((struct seq_file *)file->private_data)->private;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &i);
+
+ if (err)
+ return err;
+
+ prcmu_qos_force_opp(sh->req, i);
+
+ pr_info("prcmu debug: forced OPP for %s to %d\n", sh->prefix, (int)i);
+
+ return count;
+}
+
+static int cpufreq_delay_read(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "%lu\n", prcmu_qos_get_cpufreq_opp_delay());
+}
+
+static int ape_voltage_read(struct seq_file *s, void *p)
+{
+ return seq_printf(s, "This reference count only includes "
+ "requests via debugfs.\nCount: %d\n",
+ ape_voltage_count);
+}
+
+static ssize_t ape_voltage_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ long unsigned i;
+ int err;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &i);
+
+ if (err)
+ return err;
+
+ switch (i) {
+ case 0:
+ if (ape_voltage_count == 0)
+ pr_info("prcmu debug: reference count is already 0\n");
+ else {
+ err = prcmu_request_ape_opp_100_voltage(false);
+ if (err)
+ pr_err("prcmu debug: drop request failed\n");
+ else
+ ape_voltage_count--;
+ }
+ break;
+ case 1:
+ err = prcmu_request_ape_opp_100_voltage(true);
+ if (err)
+ pr_err("prcmu debug: request failed\n");
+ else
+ ape_voltage_count++;
+ break;
+ default:
+ pr_info("prcmu debug: value not equal to 0 or 1\n");
+ }
+ return count;
+}
+
+static ssize_t cpufreq_delay_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ long unsigned i;
+
+ err = kstrtoul_from_user(user_buf, count, 0, &i);
+
+ if (err)
+ return err;
+
+ prcmu_qos_set_cpufreq_opp_delay(i);
+
+ pr_info("prcmu debug: changed delay between cpufreq change and QoS "
+ "requirement to %lu.\n", i);
+
+ return count;
+}
+
+/* These are only for u8500 */
+#define PRCM_AVS_BASE 0x2FC
+#define AVS_VBB_RET 0x0
+#define AVS_VBB_MAX_OPP 0x1
+#define AVS_VBB_100_OPP 0x2
+#define AVS_VBB_50_OPP 0x3
+#define AVS_VARM_MAX_OPP 0x4
+#define AVS_VARM_100_OPP 0x5
+#define AVS_VARM_50_OPP 0x6
+#define AVS_VARM_RET 0x7
+#define AVS_VAPE_100_OPP 0x8
+#define AVS_VAPE_50_OPP 0x9
+#define AVS_VMOD_100_OPP 0xA
+#define AVS_VMOD_50_OPP 0xB
+#define AVS_VSAFE 0xC
+#define AVS_VSAFE_RET 0xD
+#define AVS_SIZE 14
+
+static int avs_read(struct seq_file *s, void *p)
+{
+
+ u8 avs[AVS_SIZE];
+ void __iomem *tcdm_base;
+
+ if (cpu_is_u8500()) {
+ tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+
+ memcpy_fromio(avs, tcdm_base + PRCM_AVS_BASE, AVS_SIZE);
+
+ seq_printf(s, "VBB_RET : 0x%02x\n", avs[AVS_VBB_RET]);
+ seq_printf(s, "VBB_MAX_OPP : 0x%02x\n", avs[AVS_VBB_MAX_OPP]);
+ seq_printf(s, "VBB_100_OPP : 0x%02x\n", avs[AVS_VBB_100_OPP]);
+ seq_printf(s, "VBB_50_OPP : 0x%02x\n", avs[AVS_VBB_50_OPP]);
+ seq_printf(s, "VARM_MAX_OPP : 0x%02x\n", avs[AVS_VARM_MAX_OPP]);
+ seq_printf(s, "VARM_100_OPP : 0x%02x\n", avs[AVS_VARM_100_OPP]);
+ seq_printf(s, "VARM_50_OPP : 0x%02x\n", avs[AVS_VARM_50_OPP]);
+ seq_printf(s, "VARM_RET : 0x%02x\n", avs[AVS_VARM_RET]);
+ seq_printf(s, "VAPE_100_OPP : 0x%02x\n", avs[AVS_VAPE_100_OPP]);
+ seq_printf(s, "VAPE_50_OPP : 0x%02x\n", avs[AVS_VAPE_50_OPP]);
+ seq_printf(s, "VMOD_100_OPP : 0x%02x\n", avs[AVS_VMOD_100_OPP]);
+ seq_printf(s, "VMOD_50_OPP : 0x%02x\n", avs[AVS_VMOD_50_OPP]);
+ seq_printf(s, "VSAFE : 0x%02x\n", avs[AVS_VSAFE]);
+ seq_printf(s, "VSAFE_RET : 0x%02x\n", avs[AVS_VSAFE_RET]);
+ } else {
+ seq_printf(s, "Only u8500 supported.\n");
+ }
+
+ return 0;
+}
+
+static int opp_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, opp_read, inode->i_private);
+}
+
+static int ape_stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, ape_stats_print, inode->i_private);
+}
+
+static int ddr_stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, ddr_stats_print, inode->i_private);
+}
+
+static int arm_stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, arm_stats_print, inode->i_private);
+}
+
+static int cpufreq_delay_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, cpufreq_delay_read, inode->i_private);
+}
+
+static int ape_voltage_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, ape_voltage_read, inode->i_private);
+}
+
+static int avs_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, avs_read, inode->i_private);
+}
+
+static const struct file_operations opp_fops = {
+ .open = opp_open_file,
+ .write = opp_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ape_stats_fops = {
+ .open = ape_stats_open_file,
+ .write = ape_stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ddr_stats_fops = {
+ .open = ddr_stats_open_file,
+ .write = ddr_stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations arm_stats_fops = {
+ .open = arm_stats_open_file,
+ .write = arm_stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations cpufreq_delay_fops = {
+ .open = cpufreq_delay_open_file,
+ .write = cpufreq_delay_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ape_voltage_fops = {
+ .open = ape_voltage_open_file,
+ .write = ape_voltage_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations avs_fops = {
+ .open = avs_open_file,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int setup_debugfs(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+
+ dir = debugfs_create_dir("prcmu", NULL);
+ if (IS_ERR_OR_NULL(dir))
+ goto fail;
+
+ file = debugfs_create_file("ape_stats", (S_IRUGO | S_IWUGO),
+ dir, NULL, &ape_stats_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ddr_stats", (S_IRUGO | S_IWUGO),
+ dir, NULL, &ddr_stats_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("arm_stats", (S_IRUGO | S_IWUGO),
+ dir, NULL, &arm_stats_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ape_opp", (S_IRUGO),
+ dir, (void *)&ape_sh,
+ &opp_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ddr_opp", (S_IRUGO),
+ dir, (void *)&ddr_sh,
+ &opp_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("arm_opp", (S_IRUGO),
+ dir, (void *)&arm_sh,
+ &opp_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("opp_cpufreq_delay", (S_IRUGO),
+ dir, NULL, &cpufreq_delay_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ape_voltage", (S_IRUGO),
+ dir, NULL, &ape_voltage_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("avs",
+ (S_IRUGO),
+ dir, NULL, &avs_fops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ return 0;
+fail:
+ if (!IS_ERR_OR_NULL(dir))
+ debugfs_remove_recursive(dir);
+
+ pr_err("prcmu debug: debugfs entry failed\n");
+ return -ENOMEM;
+}
+
+static __init int prcmu_debug_init(void)
+{
+ spin_lock_init(&ape_sh.lock);
+ spin_lock_init(&ddr_sh.lock);
+ spin_lock_init(&arm_sh.lock);
+ ape_sh.start = ktime_get();
+ ddr_sh.start = ktime_get();
+ arm_sh.start = ktime_get();
+ return 0;
+}
+arch_initcall(prcmu_debug_init);
+
+static __init int prcmu_debug_debugfs_init(void)
+{
+ setup_debugfs();
+ return 0;
+}
+late_initcall(prcmu_debug_debugfs_init);
diff --git a/arch/arm/mach-ux500/product.c b/arch/arm/mach-ux500/product.c
new file mode 100644
index 00000000000..014fe4b145d
--- /dev/null
+++ b/arch/arm/mach-ux500/product.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Jens Wiklander <jens.wiklander@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/tee.h>
+#include <linux/module.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
+#define STATIC_TEE_TA_START_LOW 0xBC765EDE
+#define STATIC_TEE_TA_START_MID 0x6724
+#define STATIC_TEE_TA_START_HIGH 0x11DF
+#define STATIC_TEE_TA_START_CLOCKSEQ \
+ {0x8E, 0x12, 0xEC, 0xDB, 0xDF, 0xD7, 0x20, 0x85}
+
+#define U5500_PRCMU_DBG_PWRCTRL (U5500_PRCMU_BASE + 0x4AC)
+#define PRCMU_DBG_PWRCTRL_A9DBGCLKEN (1 << 4)
+
+static struct tee_product_config product_config;
+
+bool ux500_jtag_enabled(void)
+{
+#ifdef CONFIG_UX500_DEBUG_NO_LAUTERBACH
+ return false;
+#else
+ if (machine_is_snowball())
+ return true;
+ if (cpu_is_u5500())
+ return readl_relaxed(__io_address(U5500_PRCMU_DBG_PWRCTRL))
+ & PRCMU_DBG_PWRCTRL_A9DBGCLKEN;
+ if (cpu_is_u8500())
+ return (product_config.rt_flags & TEE_RT_FLAGS_JTAG_ENABLED) ==
+ TEE_RT_FLAGS_JTAG_ENABLED;
+
+ return true;
+#endif
+}
+
+static int __init product_detect(void)
+{
+ int err;
+ int origin_err;
+ struct tee_operation operation = { { { 0 } } };
+ struct tee_context context;
+ struct tee_session session;
+
+ /* Selects trustzone application needed for the job. */
+ struct tee_uuid static_uuid = {
+ STATIC_TEE_TA_START_LOW,
+ STATIC_TEE_TA_START_MID,
+ STATIC_TEE_TA_START_HIGH,
+ STATIC_TEE_TA_START_CLOCKSEQ,
+ };
+
+ if (cpu_is_u5500())
+ return -ENODEV;
+
+ err = teec_initialize_context(NULL, &context);
+ if (err) {
+ pr_err("ux500-product: unable to initialize tee context,"
+ " err = %d\n", err);
+ err = -EINVAL;
+ goto error0;
+ }
+
+ err = teec_open_session(&context, &session, &static_uuid,
+ TEEC_LOGIN_PUBLIC, NULL, NULL, &origin_err);
+ if (err) {
+ pr_err("ux500-product: unable to open tee session,"
+ " tee error = %d, origin error = %d\n",
+ err, origin_err);
+ err = -EINVAL;
+ goto error1;
+ }
+
+ operation.shm[0].buffer = &product_config;
+ operation.shm[0].size = sizeof(product_config);
+ operation.shm[0].flags = TEEC_MEM_OUTPUT;
+ operation.flags = TEEC_MEMREF_0_USED;
+
+ err = teec_invoke_command(&session,
+ TEE_STA_GET_PRODUCT_CONFIG,
+ &operation, &origin_err);
+ if (err) {
+ pr_err("ux500-product: fetching product settings failed, err=%d",
+ err);
+ err = -EINVAL;
+ goto error1;
+ }
+
+ switch (product_config.product_id) {
+ case TEE_PRODUCT_ID_8400:
+ pr_info("ux500-product: u8400 detected\n");
+ break;
+ case TEE_PRODUCT_ID_8500:
+ pr_info("ux500-product: u8500 detected\n");
+ break;
+ case TEE_PRODUCT_ID_9500:
+ pr_info("ux500-product: u9500 detected\n");
+ break;
+ case TEE_PRODUCT_ID_5500:
+ pr_info("ux500-product: u5500 detected\n");
+ break;
+ case TEE_PRODUCT_ID_7400:
+ pr_info("ux500-product: u7400 detected\n");
+ break;
+ case TEE_PRODUCT_ID_8500C:
+ pr_info("ux500-product: u8500C detected\n");
+ break;
+ case TEE_PRODUCT_ID_UNKNOWN:
+ default:
+ pr_info("ux500-product: UNKNOWN! (0x%x) detected\n",
+ product_config.product_id);
+ break;
+ }
+ pr_info("ux500-product: JTAG is %s\n",
+ ux500_jtag_enabled() ? "enabled" : "disabled");
+error1:
+ (void) teec_finalize_context(&context);
+error0:
+ return err;
+}
+device_initcall(product_detect);
diff --git a/arch/arm/mach-ux500/product.h b/arch/arm/mach-ux500/product.h
new file mode 100644
index 00000000000..502eff4df14
--- /dev/null
+++ b/arch/arm/mach-ux500/product.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Jens Wiklander <jens.wiklander@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef UX500_PRODUCT_H
+#define UX500_PRODUCT_H
+
+#ifdef CONFIG_TEE_UX500
+
+bool ux500_jtag_enabled(void);
+
+#else
+
+static inline bool ux500_jtag_enabled(void)
+{
+ return true;
+}
+
+#endif
+#endif
diff --git a/arch/arm/mach-ux500/reboot_reasons.c b/arch/arm/mach-ux500/reboot_reasons.c
new file mode 100644
index 00000000000..167a527e33e
--- /dev/null
+++ b/arch/arm/mach-ux500/reboot_reasons.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Rickard Evertsson <rickard.evertsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Use this file to customize your reboot / sw reset reasons. Add, remove or
+ * modify reasons in reboot_reasons[].
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <mach/reboot_reasons.h>
+
+struct reboot_reason reboot_reasons[] = {
+ {"crash", SW_RESET_CRASH},
+ {"factory-reset", SW_RESET_FACTORY_RESET},
+ {"recovery", SW_RESET_RECOVERY},
+ {"charging", SW_RESET_CHARGING},
+ {"coldstart", SW_RESET_COLDSTART},
+ {"none", SW_RESET_NO_ARGUMENT}, /* Normal Boot */
+ {"chgonly-exit", SW_RESET_CHGONLY_EXIT}, /* Exit Charge Only Mode */
+};
+
+unsigned int reboot_reasons_size = ARRAY_SIZE(reboot_reasons);
+
+/*
+ * The reboot reason string can be 255 characters long and the memory
+ * in which we save the sw reset reason is 2 bytes. Therefore we need to
+ * convert the string into a 16 bit pattern.
+ *
+ * See file reboot_reasons.h for conversion.
+ */
+u16 reboot_reason_code(const char *cmd)
+{
+ int i;
+
+ if (cmd == NULL) {
+ if (oops_in_progress) {
+ /* if we're in an oops assume it's a crash */
+ return SW_RESET_CRASH;
+ } else {
+ /* normal reboot w/o argument */
+ return SW_RESET_NO_ARGUMENT;
+ }
+ }
+
+ /* Search through reboot reason list */
+ for (i = 0; i < reboot_reasons_size; i++) {
+ if (!strcmp(reboot_reasons[i].reason, cmd))
+ return reboot_reasons[i].code;
+ }
+
+ /* No valid reboot reason found */
+ return SW_RESET_NO_ARGUMENT;
+}
+
+/*
+ * The saved sw reset reason is a 2 byte code that is translated into
+ * a reboot reason string which is up to 255 characters long by this
+ * function.
+ *
+ * See file reboot_reasons.h for conversion.
+ */
+const char *reboot_reason_string(u16 code)
+{
+ int i;
+
+ /* Search through reboot reason list */
+ for (i = 0; i < reboot_reasons_size; i++) {
+ if (reboot_reasons[i].code == code)
+ return reboot_reasons[i].reason;
+ }
+
+ /* No valid reboot reason code found */
+ return "unknown";
+}
diff --git a/arch/arm/mach-ux500/regulator-u5500.h b/arch/arm/mach-ux500/regulator-u5500.h
new file mode 100644
index 00000000000..cf3eeed9366
--- /dev/null
+++ b/arch/arm/mach-ux500/regulator-u5500.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __REGULATOR_U5500_H
+#define __REGULATOR_U5500_H
+
+enum u5500_regulator_id {
+ U5500_REGULATOR_VAPE,
+ U5500_REGULATOR_SWITCH_SGA,
+ U5500_REGULATOR_SWITCH_HVA,
+ U5500_REGULATOR_SWITCH_SIA,
+ U5500_REGULATOR_SWITCH_DISP,
+ U5500_REGULATOR_SWITCH_ESRAM12,
+ U5500_NUM_REGULATORS
+};
+
+#endif
diff --git a/arch/arm/mach-ux500/tee_service_svp.c b/arch/arm/mach-ux500/tee_service_svp.c
new file mode 100644
index 00000000000..aa65dd961a0
--- /dev/null
+++ b/arch/arm/mach-ux500/tee_service_svp.c
@@ -0,0 +1,66 @@
+/*
+ * TEE service to handle the calls to trusted applications in SVP.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/tee.h>
+#include <linux/err.h>
+#include "mach/tee_ta_start_modem.h"
+
+static int cmp_uuid_start_modem(struct tee_uuid *uuid)
+{
+ int ret = -EINVAL;
+
+ if (uuid == NULL)
+ return -EINVAL;
+
+ /* This handles the calls to TA for start the modem */
+ if ((uuid->timeLow == UUID_TEE_TA_START_MODEM_LOW) &&
+ (uuid->timeMid == UUID_TEE_TA_START_MODEM_MID) &&
+ (uuid->timeHiAndVersion == UUID_TEE_TA_START_MODEM_HIGH)) {
+
+ u8 clockSeqAndNode[TEE_UUID_CLOCK_SIZE] =
+ UUID_TEE_TA_START_MODEM_CLOCKSEQ;
+
+ ret = memcmp(uuid->clockSeqAndNode, clockSeqAndNode,
+ TEE_UUID_CLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+int call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ int ret = 0;
+
+ if (ts == NULL)
+ return -EINVAL;
+
+ if (cmp_uuid_start_modem(ts->uuid))
+ return -EINVAL;
+
+ switch (ts->cmd) {
+ case COMMAND_ID_START_MODEM:
+ ret = tee_ta_start_modem((struct tee_ta_start_modem *)
+ ts->op);
+ if (ret) {
+ ts->err = TEED_ERROR_GENERIC;
+ ts->origin = TEED_ORIGIN_TEE_APPLICATION;
+ pr_err("tee_ta_start_modem() failed!\n");
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* TODO: to handle more trusted applications. */
+
+ return ret;
+}
diff --git a/arch/arm/mach-ux500/tee_ta_start_modem_svp.c b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c
new file mode 100644
index 00000000000..12337b93154
--- /dev/null
+++ b/arch/arm/mach-ux500/tee_ta_start_modem_svp.c
@@ -0,0 +1,56 @@
+/*
+ * Trusted application for starting the modem.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <mach/hardware.h>
+
+#include "mach/tee_ta_start_modem.h"
+
+static int reset_modem(unsigned long modem_start_addr)
+{
+ void __iomem *base = ioremap(U5500_ACCCON_BASE_SEC, 0x2FF);
+ if (!base)
+ return -ENOMEM;
+
+ pr_info("[%s] Setting modem start address!\n", __func__);
+ writel(base + (U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET/sizeof(uint32_t)),
+ modem_start_addr);
+
+ pr_info("[%s] resetting the modem!\n", __func__);
+ writel(base + (U5500_ACCCON_ACC_CPU_CTRL_OFFSET/sizeof(uint32_t)), 1);
+
+ iounmap(base);
+
+ return 0;
+}
+
+int tee_ta_start_modem(struct tee_ta_start_modem *data)
+{
+ int ret = 0;
+ struct elfhdr *elfhdr;
+ void __iomem *vaddr;
+
+ vaddr = ioremap((unsigned long)data->access_image_descr.elf_hdr,
+ sizeof(struct elfhdr));
+ if (!vaddr)
+ return -ENOMEM;
+
+ elfhdr = (struct elfhdr *)readl(vaddr);
+ pr_info("Reading in kernel:elfhdr 0x%x:elfhdr->entry=0x%x\n",
+ (uint32_t)elfhdr, (uint32_t)elfhdr->e_entry);
+
+ pr_info("[%s] reset modem()...\n", __func__);
+ ret = reset_modem(elfhdr->e_entry);
+
+ iounmap(vaddr);
+
+ return ret;
+}
diff --git a/arch/arm/mach-ux500/tee_ux500.c b/arch/arm/mach-ux500/tee_ux500.c
new file mode 100644
index 00000000000..160ca529261
--- /dev/null
+++ b/arch/arm/mach-ux500/tee_ux500.c
@@ -0,0 +1,95 @@
+/*
+ * TEE service to handle the calls to trusted applications.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/tee.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include <mach/hardware.h>
+
+#define ISSWAPI_EXECUTE_TA 0x11000001
+#define ISSWAPI_CLOSE_TA 0x11000002
+
+#define SEC_ROM_NO_FLAG_MASK 0x0000
+
+static u32 call_sec_rom_bridge(u32 service_id, u32 cfg, ...)
+{
+ typedef u32 (*bridge_func)(u32, u32, va_list);
+ bridge_func hw_sec_rom_pub_bridge;
+ va_list ap;
+ u32 ret;
+
+ if (cpu_is_u8500v20_or_later())
+ hw_sec_rom_pub_bridge = (bridge_func)
+ ((u32)IO_ADDRESS(U8500_BOOT_ROM_BASE + 0x17300));
+ else if (cpu_is_u5500())
+ hw_sec_rom_pub_bridge = (bridge_func)
+ ((u32)IO_ADDRESS(U5500_BOOT_ROM_BASE + 0x18300));
+ else
+ ux500_unknown_soc();
+
+ va_start(ap, cfg);
+ ret = hw_sec_rom_pub_bridge(service_id, cfg, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+int call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ /*
+ * ts->ta and ts->uuid is set to NULL when opening the device,
+ * hence it should be safe to just do the call here.
+ */
+
+ switch (sec_cmd) {
+ case TEED_INVOKE:
+ if (!ts->uuid) {
+ call_sec_rom_bridge(ISSWAPI_EXECUTE_TA,
+ SEC_ROM_NO_FLAG_MASK,
+ virt_to_phys(&ts->id),
+ NULL,
+ virt_to_phys(ts->ta),
+ ts->cmd,
+ virt_to_phys((void *)(ts->op)),
+ virt_to_phys((void *)(&ts->err)),
+ virt_to_phys((void *)(&ts->origin)));
+ } else {
+ call_sec_rom_bridge(ISSWAPI_EXECUTE_TA,
+ SEC_ROM_NO_FLAG_MASK,
+ virt_to_phys(&ts->id),
+ virt_to_phys(ts->uuid),
+ virt_to_phys(ts->ta),
+ ts->cmd,
+ virt_to_phys((void *)(ts->op)),
+ virt_to_phys((void *)(&ts->err)),
+ virt_to_phys((void *)(&ts->origin)));
+ }
+ break;
+
+ case TEED_CLOSE_SESSION:
+ call_sec_rom_bridge(ISSWAPI_CLOSE_TA,
+ SEC_ROM_NO_FLAG_MASK,
+ ts->id,
+ NULL,
+ virt_to_phys(ts->ta),
+ virt_to_phys((void *)(&ts->err)));
+
+ /* Since the TEE Client API does NOT take care of
+ * the return value, we print a warning here if
+ * something went wrong in secure world.
+ */
+ if (ts->err != TEED_SUCCESS)
+ pr_warning("[%s] failed in secure world\n",
+ __func__);
+
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c
index aea467d04ff..a287dbca892 100644
--- a/arch/arm/mach-ux500/timer.c
+++ b/arch/arm/mach-ux500/timer.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/clksrc-dbx500-prcmu.h>
+#include <linux/clksrc-db5500-mtimer.h>
#include <asm/localtimer.h>
@@ -14,6 +15,21 @@
#include <mach/setup.h>
#include <mach/hardware.h>
+#include <mach/context.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+static int mtu_context_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ if (event == CONTEXT_APE_RESTORE)
+ nmdk_clksrc_reset();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mtu_context_notifier = {
+ .notifier_call = mtu_context_notifier_call,
+};
+#endif
static void __init ux500_timer_init(void)
{
@@ -47,13 +63,23 @@ static void __init ux500_timer_init(void)
* depending on delay which is not yet calibrated. RTC-RTT is in the
* always-on powerdomain and is used as clockevent instead of twd when
* sleeping.
- * The PRCMU timer 4(3 for DB5500) register a clocksource and
- * sched_clock with higher rating then MTU since is always-on.
*
+ * The PRCMU timer 4 (3 for DB5500) registers a clocksource and
+ * sched_clock with higher rating than the MTU since it is
+ * always-on.
+ *
+ * On DB5500, the MTIMER is the best clocksource since, unlike the
+ * PRCMU timer, it doesn't occasionally go backwards.
*/
nmdk_timer_init();
+ if (cpu_is_u5500())
+ db5500_mtimer_init(__io_address(U5500_MTIMER_BASE));
clksrc_dbx500_prcmu_init(prcmu_timer_base);
+
+#ifdef CONFIG_UX500_CONTEXT
+ WARN_ON(context_ape_notifier_register(&mtu_context_notifier));
+#endif
}
static void ux500_timer_reset(void)
diff --git a/arch/arm/mach-ux500/uart-db8500.c b/arch/arm/mach-ux500/uart-db8500.c
new file mode 100644
index 00000000000..fad9b9a13df
--- /dev/null
+++ b/arch/arm/mach-ux500/uart-db8500.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/serial.h>
+#include <mach/setup.h>
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+
+static struct {
+ struct clk *uart_clk;
+ void __iomem *base;
+ /* dr */
+ /* rsr_err */
+ u32 dma_wm;
+ u32 timeout;
+ /* fr */
+ u32 lcrh_rx;
+ u32 ilpr;
+ u32 ibrd;
+ u32 fbrd;
+ u32 lcrh_tx;
+ u32 cr;
+ u32 ifls;
+ u32 imsc;
+ /* ris */
+ /* mis */
+ /* icr */
+ u32 dmacr;
+ u32 xfcr;
+ u32 xon1;
+ u32 xon2;
+ u32 xoff1;
+ u32 xoff2;
+ /* itcr */
+ /* itip */
+ /* itop */
+ /* tdr */
+ u32 abcr;
+ /* absr */
+ /* abfmt */
+ /* abdr */
+ /* abdfr */
+ /* abmr */
+ u32 abimsc;
+ /* abris */
+ /* abmis */
+ /* abicr */
+ /* id_product_h_xy */
+ /* id_provider */
+ /* periphid0 */
+ /* periphid1 */
+ /* periphid2 */
+ /* periphid3 */
+ /* pcellid0 */
+ /* pcellid1 */
+ /* pcellid2 */
+ /* pcellid3 */
+} context_uart;
+
+static void save_uart(void)
+{
+ void __iomem *membase;
+
+ membase = context_uart.base;
+
+ clk_enable(context_uart.uart_clk);
+
+ context_uart.dma_wm = readl_relaxed(membase + ST_UART011_DMAWM);
+ context_uart.timeout = readl_relaxed(membase + ST_UART011_TIMEOUT);
+ context_uart.lcrh_rx = readl_relaxed(membase + ST_UART011_LCRH_RX);
+ context_uart.ilpr = readl_relaxed(membase + UART01x_ILPR);
+ context_uart.ibrd = readl_relaxed(membase + UART011_IBRD);
+ context_uart.fbrd = readl_relaxed(membase + UART011_FBRD);
+ context_uart.lcrh_tx = readl_relaxed(membase + ST_UART011_LCRH_TX);
+ context_uart.cr = readl_relaxed(membase + UART011_CR);
+ context_uart.ifls = readl_relaxed(membase + UART011_IFLS);
+ context_uart.imsc = readl_relaxed(membase + UART011_IMSC);
+ context_uart.dmacr = readl_relaxed(membase + UART011_DMACR);
+ context_uart.xfcr = readl_relaxed(membase + ST_UART011_XFCR);
+ context_uart.xon1 = readl_relaxed(membase + ST_UART011_XON1);
+ context_uart.xon2 = readl_relaxed(membase + ST_UART011_XON2);
+ context_uart.xoff1 = readl_relaxed(membase + ST_UART011_XOFF1);
+ context_uart.xoff2 = readl_relaxed(membase + ST_UART011_XOFF2);
+ context_uart.abcr = readl_relaxed(membase + ST_UART011_ABCR);
+ context_uart.abimsc = readl_relaxed(membase + ST_UART011_ABIMSC);
+
+ clk_disable(context_uart.uart_clk);
+}
+
+static void restore_uart(void)
+{
+ int cnt;
+ int retries = 100;
+ unsigned int cr;
+ void __iomem *membase;
+ u16 dummy;
+ bool show_warn = false;
+
+ membase = context_uart.base;
+ clk_enable(context_uart.uart_clk);
+
+ writew_relaxed(context_uart.ifls, membase + UART011_IFLS);
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+
+ writew_relaxed(cr, membase + UART011_CR);
+ writew_relaxed(0, membase + UART011_FBRD);
+ writew_relaxed(1, membase + UART011_IBRD);
+ writew_relaxed(0, membase + ST_UART011_LCRH_RX);
+ if (context_uart.lcrh_tx != ST_UART011_LCRH_RX) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ dummy = readw(membase + ST_UART011_LCRH_RX);
+ writew_relaxed(0, membase + ST_UART011_LCRH_TX);
+ }
+ writew(0, membase + UART01x_DR);
+ do {
+ if (!(readw(membase + UART01x_FR) & UART01x_FR_BUSY))
+ break;
+ cpu_relax();
+ } while (retries-- > 0);
+ if (retries < 0)
+ /*
+ * We can't print out a warning here since the uart is
+ * not fully restored. Do it later.
+ */
+ show_warn = true;
+
+ writel_relaxed(context_uart.dma_wm, membase + ST_UART011_DMAWM);
+ writel_relaxed(context_uart.timeout, membase + ST_UART011_TIMEOUT);
+ writel_relaxed(context_uart.lcrh_rx, membase + ST_UART011_LCRH_RX);
+ writel_relaxed(context_uart.ilpr, membase + UART01x_ILPR);
+ writel_relaxed(context_uart.ibrd, membase + UART011_IBRD);
+ writel_relaxed(context_uart.fbrd, membase + UART011_FBRD);
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10-3
+ * times, as already there are 3 writes after
+ * ST_UART011_LCRH_RX
+ */
+ for (cnt = 0; cnt < 7; cnt++)
+ dummy = readw(membase + ST_UART011_LCRH_RX);
+
+ writel_relaxed(context_uart.lcrh_tx, membase + ST_UART011_LCRH_TX);
+ writel_relaxed(context_uart.ifls, membase + UART011_IFLS);
+ writel_relaxed(context_uart.dmacr, membase + UART011_DMACR);
+ writel_relaxed(context_uart.xfcr, membase + ST_UART011_XFCR);
+ writel_relaxed(context_uart.xon1, membase + ST_UART011_XON1);
+ writel_relaxed(context_uart.xon2, membase + ST_UART011_XON2);
+ writel_relaxed(context_uart.xoff1, membase + ST_UART011_XOFF1);
+ writel_relaxed(context_uart.xoff2, membase + ST_UART011_XOFF2);
+ writel_relaxed(context_uart.abcr, membase + ST_UART011_ABCR);
+ writel_relaxed(context_uart.abimsc, membase + ST_UART011_ABIMSC);
+ writel_relaxed(context_uart.cr, membase + UART011_CR);
+ writel(context_uart.imsc, membase + UART011_IMSC);
+
+ clk_disable(context_uart.uart_clk);
+
+ if (show_warn)
+ pr_warning("%s:uart tx busy\n", __func__);
+}
+
+static int uart_context_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case CONTEXT_APE_SAVE:
+ save_uart();
+ break;
+
+ case CONTEXT_APE_RESTORE:
+ restore_uart();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block uart_context_notifier = {
+ .notifier_call = uart_context_notifier_call,
+};
+
+#define __UART_BASE(soc, x) soc##_UART##x##_BASE
+#define UART_BASE(soc, x) __UART_BASE(soc, x)
+
+static int __init uart_context_notifier_init(void)
+{
+ unsigned long base;
+ static const char clkname[] __initconst
+ = "uart" __stringify(CONFIG_UX500_DEBUG_UART);
+
+ if (cpu_is_u8500())
+ base = UART_BASE(U8500, CONFIG_UX500_DEBUG_UART);
+ else if (cpu_is_u5500())
+ base = UART_BASE(U5500, CONFIG_UX500_DEBUG_UART);
+ else
+ ux500_unknown_soc();
+
+ context_uart.base = ioremap(base, SZ_4K);
+ context_uart.uart_clk = clk_get_sys(clkname, NULL);
+
+ if (IS_ERR(context_uart.uart_clk)) {
+ pr_err("%s:unable to get clk-uart%d\n", __func__,
+ CONFIG_UX500_DEBUG_UART);
+ return -EINVAL;
+ }
+
+ return WARN_ON(context_ape_notifier_register(&uart_context_notifier));
+}
+arch_initcall(uart_context_notifier_init);
+#endif
diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c
index 9f9e1c20306..bf9ccfc83be 100644
--- a/arch/arm/mach-ux500/usb.c
+++ b/arch/arm/mach-ux500/usb.c
@@ -10,6 +10,9 @@
#include <plat/ste_dma40.h>
#include <mach/hardware.h>
#include <mach/usb.h>
+#include <plat/pincfg.h>
+#include "pins.h"
+#include "board-ux500-usb.h"
#define MUSB_DMA40_RX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
@@ -31,6 +34,8 @@
.dst_info.psize = STEDMA40_PSIZE_LOG_16, \
}
+#define USB_OTG_GPIO_CS 76
+
static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS]
= {
MUSB_DMA40_RX_CH,
@@ -85,9 +90,54 @@ static struct ux500_musb_board_data musb_board_data = {
.dma_filter = stedma40_filter,
};
+#ifdef CONFIG_USB_UX500_DMA
static u64 ux500_musb_dmamask = DMA_BIT_MASK(32);
+#else
+static u64 ux500_musb_dmamask = DMA_BIT_MASK(0);
+#endif
+static struct ux500_pins *usb_gpio_pins;
+
+/**
+ * Fifo mode
+ * Sum of maxpacket <= 12 KB
+ * As ux500 provides 12 KB buffer size only
+ *
+ * Enable Double buffer for Mass Storage Class
+ * endpoint.
+ */
+static struct musb_fifo_cfg ux500_mode_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
static struct musb_hdrc_config musb_hdrc_config = {
+ .fifo_cfg = ux500_mode_cfg, /* Fifo configuration */
+ .fifo_cfg_size = ARRAY_SIZE(ux500_mode_cfg),
.multipoint = true,
.dyn_fifo = true,
.num_eps = 16,
@@ -124,6 +174,38 @@ struct platform_device ux500_musb_device = {
.resource = usb_resources,
};
+static void enable_gpio(void)
+{
+ ux500_pins_enable(usb_gpio_pins);
+}
+static void disable_gpio(void)
+{
+ ux500_pins_disable(usb_gpio_pins);
+}
+static int get_gpio(struct device *device)
+{
+ usb_gpio_pins = ux500_pins_get(dev_name(device));
+
+ if (usb_gpio_pins == NULL) {
+ dev_err(device, "Could not get %s:usb_gpio_pins structure\n",
+ dev_name(device));
+
+ return PTR_ERR(usb_gpio_pins);
+ }
+ return 0;
+}
+static void put_gpio(void)
+{
+ ux500_pins_put(usb_gpio_pins);
+}
+struct abx500_usbgpio_platform_data abx500_usbgpio_plat_data = {
+ .get = &get_gpio,
+ .enable = &enable_gpio,
+ .disable = &disable_gpio,
+ .put = &put_gpio,
+ .usb_cs = USB_OTG_GPIO_CS,
+};
+
static inline void ux500_usb_dma_update_rx_ch_config(int *src_dev_type)
{
u32 idx;
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 07201637109..226f8736152 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -240,6 +240,24 @@ ENTRY(fa_dma_unmap_area)
mov pc, lr
ENDPROC(fa_dma_unmap_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(fa_clean_dcache_all)
+ mov pc, lr
+ENDPROC(fa_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(fa_flush_dcache_all)
+ mov pc, lr
+ENDPROC(fa_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b1e192ba8c2..2412797a446 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -320,9 +320,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
- aux &= aux_mask;
- aux |= aux_val;
-
/* Determine the number of ways */
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
@@ -331,6 +328,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
else
ways = 8;
type = "L310";
+
+ /*
+ * Set bit 22 in the auxiliary control register. If this bit
+ * is cleared, PL310 treats Normal Shared Non-cacheable
+ * accesses as Cacheable no-allocate.
+ */
+ aux_val |= 1 << 22;
break;
case L2X0_CACHE_ID_PART_L210:
ways = (aux >> 13) & 0xf;
@@ -358,6 +362,9 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
* accessing the below registers will fault.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ aux &= aux_mask;
+ aux |= aux_val;
+
/* Make sure that I&D is not locked down when starting */
l2x0_unlock(cache_id);
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2301f22610..ab5bf508a2a 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -127,6 +127,24 @@ ENTRY(v3_dma_map_area)
ENDPROC(v3_dma_unmap_area)
ENDPROC(v3_dma_map_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v3_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v3_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v3_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v3_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index fd9bb7addc8..9d3a055127e 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -139,6 +139,24 @@ ENTRY(v4_dma_map_area)
ENDPROC(v4_dma_unmap_area)
ENDPROC(v4_dma_map_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v4_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v4_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v4_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v4_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 4f2c14151cc..54d3cda4a89 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -251,6 +251,24 @@ ENTRY(v4wb_dma_unmap_area)
mov pc, lr
ENDPROC(v4wb_dma_unmap_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v4wb_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v4wb_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v4wb_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v4wb_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 4d7b467631c..40f7dba11f5 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -195,6 +195,24 @@ ENTRY(v4wt_dma_map_area)
ENDPROC(v4wt_dma_unmap_area)
ENDPROC(v4wt_dma_map_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v4wt_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v4wt_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v4wt_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v4wt_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a33a4..b88dd4ab038 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -328,6 +328,24 @@ ENTRY(v6_dma_unmap_area)
mov pc, lr
ENDPROC(v6_dma_unmap_area)
+/*
+ * clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v6_clean_dcache_all)
+ mov pc, lr
+ENDPROC(v6_clean_dcache_all)
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v6_flush_dcache_all)
+ mov pc, lr
+ENDPROC(v6_flush_dcache_all)
+
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a655d3da386..70744d6a066 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -33,7 +33,7 @@ ENTRY(v7_flush_icache_all)
ENDPROC(v7_flush_icache_all)
/*
- * v7_flush_dcache_all()
+ * __v7_flush_dcache_all()
*
* Flush the whole D-cache.
*
@@ -41,7 +41,7 @@ ENDPROC(v7_flush_icache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7_flush_dcache_all)
+ENTRY(__v7_flush_dcache_all)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
@@ -94,9 +94,93 @@ finished:
dsb
isb
mov pc, lr
+ENDPROC(__v7_flush_dcache_all)
+
+/*
+ * __v7_clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ *
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ */
+ENTRY(__v7_clean_dcache_all)
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq finished1 @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+loop21:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip1 @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+loop22:
+ mov r9, r4 @ create working copy of max way size
+loop23:
+ ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r9, r5 )
+ THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
+ ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r7, r2 )
+ THUMB( orr r11, r11, r6 ) @ factor index number into r11
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge loop23
+ subs r7, r7, #1 @ decrement the index
+ bge loop22
+skip1:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt loop21
+finished1:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ mov pc, lr
+ENDPROC(__v7_clean_dcache_all)
+
+/*
+ * v7_flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ */
+ENTRY(v7_flush_dcache_all)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ bl __v7_flush_dcache_all
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
ENDPROC(v7_flush_dcache_all)
/*
+ * v7_clean_dcache_all()
+ *
+ * Clean the whole D-cache.
+ */
+ENTRY(v7_clean_dcache_all)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ bl __v7_clean_dcache_all
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(v7_clean_dcache_all)
+
+/*
* v7_flush_cache_all()
*
* Flush the entire cache system.
@@ -108,14 +192,12 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
- ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ stmfd sp!, {lr}
bl v7_flush_dcache_all
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
- ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ ldmfd sp!, {lr}
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 94c5a0c94f5..50166e8bdad 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -286,6 +286,20 @@ static struct mem_type mem_types[] = {
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+ /* NOTE : this is only a temporary hack!!!
+ * The U8500 ED/V1.0 cuts require such a
+ * memory type for deep sleep resume.
+ * This is expected to be solved in cut v2.0
+ * and we clean this up then. for more details
+ * look @ the commit message please
+ */
+ [MT_BACKUP_RAM] = {
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
+ .domain = DOMAIN_IO,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 2d8ff3ad86d..518ab10fea5 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -304,6 +304,8 @@ ENTRY(\name\()_cache_fns)
.long \name\()_coherent_kern_range
.long \name\()_coherent_user_range
.long \name\()_flush_kern_dcache_area
+ .long \name\()_clean_dcache_all
+ .long \name\()_flush_dcache_all
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
.long \name\()_dma_flush_range
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f1c8486f750..a6ba8ccbf52 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -172,7 +172,7 @@ __v7_ca15mp_setup:
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
- bl v7_flush_dcache_all
+ bl __v7_flush_dcache_all
ldmia r12, {r0-r5, r7, r9, r11, lr}
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
diff --git a/arch/arm/plat-nomadik/include/plat/mtu.h b/arch/arm/plat-nomadik/include/plat/mtu.h
index 6508e7694a4..5c97b3ccf76 100644
--- a/arch/arm/plat-nomadik/include/plat/mtu.h
+++ b/arch/arm/plat-nomadik/include/plat/mtu.h
@@ -7,5 +7,7 @@ extern void __iomem *mtu_base;
void nmdk_clkevt_reset(void);
void nmdk_clksrc_reset(void);
+struct clock_event_device *nmdk_clkevt_get(void);
+
#endif /* __PLAT_MTU_H */
diff --git a/arch/arm/plat-nomadik/include/plat/pincfg.h b/arch/arm/plat-nomadik/include/plat/pincfg.h
index 22cb97d2d8a..c015133a7ad 100644
--- a/arch/arm/plat-nomadik/include/plat/pincfg.h
+++ b/arch/arm/plat-nomadik/include/plat/pincfg.h
@@ -24,6 +24,7 @@
* bit 16..18 - SLPM pull up/down state
* bit 19..20 - SLPM direction
* bit 21..22 - SLPM Value (if output)
+ * bit 23..25 - PDIS value (if input)
*
* to facilitate the definition, the following macros are provided
*
@@ -67,6 +68,10 @@ typedef unsigned long pin_cfg_t;
/* These two replace the above in DB8500v2+ */
#define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT)
#define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT)
+#define PIN_SLPM_USE_MUX_SETTINGS_IN_SLEEP PIN_SLPM_WAKEUP_DISABLE
+
+#define PIN_SLPM_GPIO PIN_SLPM_WAKEUP_ENABLE /* In SLPM, pin is a gpio */
+#define PIN_SLPM_ALTFUNC PIN_SLPM_WAKEUP_DISABLE /* In SLPM, pin is altfunc */
#define PIN_DIR_SHIFT 14
#define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT)
@@ -105,6 +110,20 @@ typedef unsigned long pin_cfg_t;
#define PIN_SLPM_VAL_LOW ((1 + 0) << PIN_SLPM_VAL_SHIFT)
#define PIN_SLPM_VAL_HIGH ((1 + 1) << PIN_SLPM_VAL_SHIFT)
+#define PIN_SLPM_PDIS_SHIFT 23
+#define PIN_SLPM_PDIS_MASK (0x3 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS(x) \
+ (((x) & PIN_SLPM_PDIS_MASK) >> PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_NO_CHANGE (0 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_DISABLED (1 << PIN_SLPM_PDIS_SHIFT)
+#define PIN_SLPM_PDIS_ENABLED (2 << PIN_SLPM_PDIS_SHIFT)
+
+#define PIN_LOWEMI_SHIFT 25
+#define PIN_LOWEMI_MASK (0x1 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI(x) (((x) & PIN_LOWEMI_MASK) >> PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_DISABLED (0 << PIN_LOWEMI_SHIFT)
+#define PIN_LOWEMI_ENABLED (1 << PIN_LOWEMI_SHIFT)
+
/* Shortcuts. Use these instead of separate DIR, PULL, and VAL. */
#define PIN_INPUT_PULLDOWN (PIN_DIR_INPUT | PIN_PULL_DOWN)
#define PIN_INPUT_PULLUP (PIN_DIR_INPUT | PIN_PULL_UP)
diff --git a/arch/arm/plat-nomadik/include/plat/ske.h b/arch/arm/plat-nomadik/include/plat/ske.h
index 31382fbc07d..7a4fbdf3c13 100644
--- a/arch/arm/plat-nomadik/include/plat/ske.h
+++ b/arch/arm/plat-nomadik/include/plat/ske.h
@@ -22,6 +22,9 @@
#define SKE_MIS 0x18
#define SKE_ICR 0x1C
+#define SKE_KPD_MAX_ROWS 8
+#define SKE_KPD_MAX_COLS 8
+
/*
* Keypad module
*/
@@ -30,21 +33,27 @@
* struct keypad_platform_data - structure for platform specific data
* @init: pointer to keypad init function
* @exit: pointer to keypad deinitialisation function
+ * @gpio_input_pins: pointer to gpio input pins
+ * @gpio_output_pins: pointer to gpio output pins
* @keymap_data: matrix scan code table for keycodes
* @krow: maximum number of rows
* @kcol: maximum number of columns
* @debounce_ms: platform specific debounce time
* @no_autorepeat: flag for auto repetition
* @wakeup_enable: allow waking up the system
+ * @switch_delay: gpio switch_delay
*/
struct ske_keypad_platform_data {
int (*init)(void);
int (*exit)(void);
+ int *gpio_input_pins;
+ int *gpio_output_pins;
const struct matrix_keymap_data *keymap_data;
u8 krow;
u8 kcol;
u8 debounce_ms;
bool no_autorepeat;
bool wakeup_enable;
+ int switch_delay;
};
#endif /*__SKE_KPD_H*/
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index fd0ee84c45d..43f4d3a1126 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -146,6 +146,7 @@ struct stedma40_chan_cfg {
* @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy
* @disabled_channels: A vector, ending with -1, that marks physical channels
+ * @use_esram_lcla: flag for mapping the lcla into esram region
* that are for different reasons not available for the driver.
*/
struct stedma40_platform_data {
@@ -162,6 +163,24 @@ struct stedma40_platform_data {
#ifdef CONFIG_STE_DMA40
+/*
+ * stedma40_get_src_addr - get current source address
+ * @chan: the DMA channel
+ *
+ * Returns the physical address of the current source element to be read by the
+ * DMA.
+ */
+dma_addr_t stedma40_get_src_addr(struct dma_chan *chan);
+
+/*
+ * stedma40_get_dst_addr - get current destination address
+ * @chan: the DMA channel
+ *
+ * Returns the physical address of the current destination element to be
+ * written by the DMA.
+ */
+dma_addr_t stedma40_get_dst_addr(struct dma_chan *chan);
+
/**
* stedma40_filter() - Provides stedma40_chan_cfg to the
* ste_dma40 dma driver via the dmaengine framework.
@@ -205,6 +224,16 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
}
#else
+dma_addr_t stedma40_get_src_addr(struct dma_chan *chan)
+{
+ return NULL;
+}
+
+dma_addr_t stedma40_get_dst_addr(struct dma_chan *chan)
+{
+ return NULL;
+}
+
static inline bool stedma40_filter(struct dma_chan *chan, void *data)
{
return false;
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index ad1b45b605a..f9c714a2c6f 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
@@ -151,6 +152,28 @@ static struct clock_event_device nmdk_clkevt = {
.set_next_event = nmdk_clkevt_next,
};
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+static void nmdk_timer_delay_loop(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ bclock = ~readl(mtu_base + MTU_VAL(0));
+ do {
+ now = ~readl(mtu_base + MTU_VAL(0));
+ /* If timer have been cleared (suspend) or wrapped we exit */
+ if (unlikely(now < bclock))
+ return;
+ } while ((now - bclock) < loops);
+}
+
+/* Used to calibrate the delay */
+int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = ~readl(mtu_base + MTU_VAL(0));
+ return 0;
+}
+#endif
+
/*
* IRQ Handler for timer 1 of the MTU block.
*/
@@ -183,6 +206,11 @@ void nmdk_clksrc_reset(void)
mtu_base + MTU_CR(0));
}
+struct clock_event_device *nmdk_clkevt_get(void)
+{
+ return &nmdk_clkevt;
+}
+
void __init nmdk_timer_init(void)
{
unsigned long rate;
@@ -237,4 +265,8 @@ void __init nmdk_timer_init(void)
/* Register irq and clockevents */
setup_irq(IRQ_MTU0, &nmdk_timer_irq);
clockevents_register_device(&nmdk_clkevt);
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+ set_delay_fn(nmdk_timer_delay_loop);
+#endif
+
}
diff --git a/arch/arm/plat-omap/include/plat/ssi.h b/arch/arm/plat-omap/include/plat/ssi.h
new file mode 100644
index 00000000000..eb84c3a69f7
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/ssi.h
@@ -0,0 +1,204 @@
+/*
+ * plat/ssi.h
+ *
+ * Hardware definitions for SSI.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __OMAP_SSI_REGS_H__
+#define __OMAP_SSI_REGS_H__
+
+#define SSI_NUM_PORTS 1
+/*
+ * SSI SYS registers
+ */
+#define SSI_REVISION_REG 0
+# define SSI_REV_MAJOR 0xf0
+# define SSI_REV_MINOR 0xf
+#define SSI_SYSCONFIG_REG 0x10
+# define SSI_AUTOIDLE (1 << 0)
+# define SSI_SOFTRESET (1 << 1)
+# define SSI_SIDLEMODE_FORCE 0
+# define SSI_SIDLEMODE_NO (1 << 3)
+# define SSI_SIDLEMODE_SMART (1 << 4)
+# define SSI_SIDLEMODE_MASK 0x18
+# define SSI_MIDLEMODE_FORCE 0
+# define SSI_MIDLEMODE_NO (1 << 12)
+# define SSI_MIDLEMODE_SMART (1 << 13)
+# define SSI_MIDLEMODE_MASK 0x3000
+#define SSI_SYSSTATUS_REG 0x14
+# define SSI_RESETDONE 1
+#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2))
+#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8))
+# define SSI_DATAACCEPT(channel) (1 << (channel))
+# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8))
+# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16))
+# define SSI_ERROROCCURED (1 << 24)
+# define SSI_BREAKDETECTED (1 << 25)
+#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800
+#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804
+# define SSI_GDD_LCH(channel) (1 << (channel))
+#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10))
+#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10))
+#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10))
+# define SSI_WAKE(channel) (1 << (channel))
+# define SSI_WAKE_MASK 0xff
+
+/*
+ * SSI SST registers
+ */
+#define SSI_SST_ID_REG 0
+#define SSI_SST_MODE_REG 4
+# define SSI_MODE_VAL_MASK 3
+# define SSI_MODE_SLEEP 0
+# define SSI_MODE_STREAM 1
+# define SSI_MODE_FRAME 2
+# define SSI_MODE_MULTIPOINTS 3
+#define SSI_SST_FRAMESIZE_REG 8
+# define SSI_FRAMESIZE_DEFAULT 31
+#define SSI_SST_TXSTATE_REG 0xc
+# define SSI_TXSTATE_IDLE 0
+#define SSI_SST_BUFSTATE_REG 0x10
+# define SSI_FULL(channel) (1 << (channel))
+#define SSI_SST_DIVISOR_REG 0x18
+# define SSI_MAX_DIVISOR 127
+#define SSI_SST_BREAK_REG 0x20
+#define SSI_SST_CHANNELS_REG 0x24
+# define SSI_CHANNELS_DEFAULT 4
+#define SSI_SST_ARBMODE_REG 0x28
+# define SSI_ARBMODE_ROUNDROBIN 0
+# define SSI_ARBMODE_PRIORITY 1
+#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
+#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
+
+/*
+ * SSI SSR registers
+ */
+#define SSI_SSR_ID_REG 0
+#define SSI_SSR_MODE_REG 4
+#define SSI_SSR_FRAMESIZE_REG 8
+#define SSI_SSR_RXSTATE_REG 0xc
+#define SSI_SSR_BUFSTATE_REG 0x10
+# define SSI_NOTEMPTY(channel) (1 << (channel))
+#define SSI_SSR_BREAK_REG 0x1c
+#define SSI_SSR_ERROR_REG 0x20
+#define SSI_SSR_ERRORACK_REG 0x24
+#define SSI_SSR_OVERRUN_REG 0x2c
+#define SSI_SSR_OVERRUNACK_REG 0x30
+#define SSI_SSR_TIMEOUT_REG 0x34
+# define SSI_TIMEOUT_DEFAULT 0
+#define SSI_SSR_CHANNELS_REG 0x28
+#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
+#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
+
+/*
+ * SSI GDD registers
+ */
+#define SSI_GDD_HW_ID_REG 0
+#define SSI_GDD_PPORT_ID_REG 0x10
+#define SSI_GDD_MPORT_ID_REG 0x14
+#define SSI_GDD_PPORT_SR_REG 0x20
+#define SSI_GDD_MPORT_SR_REG 0x24
+# define SSI_ACTIVE_LCH_NUM_MASK 0xff
+#define SSI_GDD_TEST_REG 0x40
+# define SSI_TEST 1
+#define SSI_GDD_GCR_REG 0x100
+# define SSI_CLK_AUTOGATING_ON (1 << 3)
+# define SSI_FREE (1 << 2)
+# define SSI_SWITCH_OFF (1 << 0)
+#define SSI_GDD_GRST_REG 0x200
+# define SSI_SWRESET 1
+#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40))
+# define SSI_DST_BURST_EN_MASK 0xc000
+# define SSI_DST_SINGLE_ACCESS0 0
+# define SSI_DST_SINGLE_ACCESS (1 << 14)
+# define SSI_DST_BURST_4x32_BIT (2 << 14)
+# define SSI_DST_BURST_8x32_BIT (3 << 14)
+# define SSI_DST_MASK 0x1e00
+# define SSI_DST_MEMORY_PORT (8 << 9)
+# define SSI_DST_PERIPHERAL_PORT (9 << 9)
+# define SSI_SRC_BURST_EN_MASK 0x180
+# define SSI_SRC_SINGLE_ACCESS0 0
+# define SSI_SRC_SINGLE_ACCESS (1 << 7)
+# define SSI_SRC_BURST_4x32_BIT (2 << 7)
+# define SSI_SRC_BURST_8x32_BIT (3 << 7)
+# define SSI_SRC_MASK 0x3c
+# define SSI_SRC_MEMORY_PORT (8 << 2)
+# define SSI_SRC_PERIPHERAL_PORT (9 << 2)
+# define SSI_DATA_TYPE_MASK 3
+# define SSI_DATA_TYPE_S32 2
+#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40))
+# define SSI_DST_AMODE_MASK (3 << 14)
+# define SSI_DST_AMODE_CONST 0
+# define SSI_DST_AMODE_POSTINC (1 << 12)
+# define SSI_SRC_AMODE_MASK (3 << 12)
+# define SSI_SRC_AMODE_CONST 0
+# define SSI_SRC_AMODE_POSTINC (1 << 12)
+# define SSI_CCR_ENABLE (1 << 7)
+# define SSI_CCR_SYNC_MASK 0x1f
+#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40))
+# define SSI_BLOCK_IE (1 << 5)
+# define SSI_HALF_IE (1 << 2)
+# define SSI_TOUT_IE (1 << 0)
+#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40))
+# define SSI_CSR_SYNC (1 << 6)
+# define SSI_CSR_BLOCK (1 << 5)
+# define SSI_CSR_HALF (1 << 2)
+# define SSI_CSR_TOUR (1 << 0)
+#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40))
+#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40))
+#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40))
+#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40))
+#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40))
+#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40))
+# define SSI_ENABLE_LNK (1 << 15)
+# define SSI_STOP_LNK (1 << 14)
+# define SSI_NEXT_CH_ID_MASK 0xf
+
+/**
+ * struct omap_ssi_platform_data - OMAP SSI platform data
+ * @num_ports: Number of ports on the controller
+ * @ctxt_loss_count: Pointer to omap_pm_get_dev_context_loss_count
+ */
+struct omap_ssi_platform_data {
+ unsigned int num_ports;
+ int (*get_dev_context_loss_count)(struct device *dev);
+};
+
+/**
+ * struct omap_ssi_config - SSI board configuration
+ * @num_ports: Number of ports in use
+ * @cawake_line: Array of cawake gpio lines
+ */
+struct omap_ssi_board_config {
+ unsigned int num_ports;
+ int cawake_gpio[SSI_NUM_PORTS];
+};
+
+#ifdef CONFIG_OMAP_SSI_CONFIG
+extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config);
+#else
+static inline int omap_ssi_config(struct omap_ssi_board_config *ssi_config)
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP_SSI_CONFIG */
+
+#endif /* __OMAP_SSI_REGS_H__ */
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index cb5f0a3f1b0..097be1934ee 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -68,6 +68,25 @@ config ACORN_PARTITION_RISCIX
of machines called RISCiX. If you say 'Y' here, Linux will be able
to read disks partitioned under RISCiX.
+config BLKDEV_PARTITION
+ bool "Blockdev commandline partition support" if PARTITION_ADVANCED
+ default n
+ help
+ Say Y if you like to setup partitions for block devices by reading
+ from the kernel command line (kernel boot arguments).
+
+ The format of the partitions on the command line:
+ blkdevparts=<blkdev-def>[;<blkdev-def>]
+ <blkdev-def> := <blkdev-id>:<partdef>[,<partdef>]
+ <partdef> := <size>[@<offset>]
+
+ <blkdev-id> := unique id used to map driver to blockdev name
+ <size> := size in numbers of sectors
+ <offset> := offset in sectors for partition to start at
+
+ Example:
+ blkdevparts=mmc0:1024@0,524288@1024;mmc1:8192@0,8192@8192
+
config OSF_PARTITION
bool "Alpha OSF partition support" if PARTITION_ADVANCED
default y if ALPHA
diff --git a/block/partitions/Makefile b/block/partitions/Makefile
index 03af8eac51d..48b216c53db 100644
--- a/block/partitions/Makefile
+++ b/block/partitions/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_BLOCK) := check.o
obj-$(CONFIG_ACORN_PARTITION) += acorn.o
obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_BLKDEV_PARTITION) += blkdev_parts.o
obj-$(CONFIG_MAC_PARTITION) += mac.o
obj-$(CONFIG_LDM_PARTITION) += ldm.o
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
diff --git a/block/partitions/blkdev_parts.c b/block/partitions/blkdev_parts.c
new file mode 100755
index 00000000000..030565b7ce7
--- /dev/null
+++ b/block/partitions/blkdev_parts.c
@@ -0,0 +1,127 @@
+/*
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ulf Hansson <ulf.hansson@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Create partitions for block devices by reading from the kernel
+ * command line (kernel boot arguments).
+ *
+ */
+
+#include "check.h"
+#include "blkdev_parts.h"
+
+static char *cmdline;
+
+/*
+ * This is the handler for our kernel commandline parameter,
+ * called from main.c::checksetup().
+ * Note that we can not yet kmalloc() anything, so we only save
+ * the commandline for later processing.
+ */
+static int cmdline_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+__setup("blkdevparts=", cmdline_setup);
+
+/* Parse for a matching blkdev-id and return pointer to partdef */
+static char *parse_blkdev_id(char *blkdev_name)
+{
+ int blkdev_id_len;
+ char *p, *blkdev_id;
+
+ /* Start parsing for a matching blkdev-id */
+ p = blkdev_id = cmdline;
+ while (blkdev_id != NULL) {
+
+ /* Find the end of the blkdev-id string */
+ p = strchr(blkdev_id, ':');
+ if (p == NULL)
+ return NULL;
+
+ /* Check if we found a matching blkdev-id */
+ blkdev_id_len = p - blkdev_id;
+ if (strlen(blkdev_name) == blkdev_id_len) {
+ if (strncmp(blkdev_name, blkdev_id, blkdev_id_len) == 0)
+ return p;
+ }
+
+ /* Move to next blkdev-id string if there is one */
+ blkdev_id = strchr(p, ';');
+ if (blkdev_id != NULL)
+ blkdev_id++;
+ }
+ return NULL;
+}
+
+static int parse_partdef(char **part, struct parsed_partitions *state, int part_nbr)
+{
+ sector_t size, offset;
+ char *p = *part;
+
+ /* Skip the beginning "," or ":" */
+ p++;
+
+ /* Fetch and verify size from partdef */
+ size = simple_strtoull(p, &p, 10);
+ if ((size == 0) || (*p != '@'))
+ return 0;
+
+ /* Skip the "@" */
+ p++;
+
+ /* Fetch offset from partdef and check if there are more parts */
+ offset = simple_strtoull(p, &p, 10);
+ if (*p == ',')
+ *part = p;
+ else
+ *part = NULL;
+
+ /* Add partition to state */
+ put_partition(state, part_nbr, offset, size);
+ printk(KERN_INFO "\nPartition: size=%llu, offset=%llu\n",
+ (unsigned long long) size,
+ (unsigned long long) offset);
+ return 1;
+}
+
+static int parse_blkdev_parts(char *blkdev_name, struct parsed_partitions *state)
+{
+ char *partdef;
+ int part_nbr = 0;
+
+ /* Find partdef */
+ partdef = parse_blkdev_id(blkdev_name);
+
+ /* Add parts */
+ while (partdef != NULL) {
+ /* Find next part and add it to state */
+ part_nbr++;
+ if (!parse_partdef(&partdef, state, part_nbr))
+ return 0;
+ }
+ return part_nbr;
+}
+
+int blkdev_partition(struct parsed_partitions *state)
+{
+ char blkdev_name[BDEVNAME_SIZE];
+
+ /* Check if there are any partitions to handle */
+ if (cmdline == NULL)
+ return 0;
+
+ /* Get the name of the blockdevice we are operating upon */
+ if (bdevname(state->bdev, blkdev_name) == NULL) {
+ printk(KERN_WARNING "Could not get a blkdev name\n");
+ return 0;
+ }
+
+ /* Parse for partitions and add them to the state */
+ return parse_blkdev_parts(blkdev_name, state);
+}
+
diff --git a/block/partitions/blkdev_parts.h b/block/partitions/blkdev_parts.h
new file mode 100755
index 00000000000..16d2b571625
--- /dev/null
+++ b/block/partitions/blkdev_parts.h
@@ -0,0 +1,14 @@
+/*
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ulf Hansson <ulf.hansson@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Create partitions for block devices by reading from the kernel
+ * command line (kernel boot arguments).
+ *
+ */
+
+int blkdev_partition(struct parsed_partitions *state);
+
diff --git a/block/partitions/check.c b/block/partitions/check.c
index bc908672c97..3020c577c3b 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -22,6 +22,7 @@
#include "acorn.h"
#include "amiga.h"
#include "atari.h"
+#include "blkdev_parts.h"
#include "ldm.h"
#include "mac.h"
#include "msdos.h"
@@ -41,6 +42,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
* Probe partition formats with tables at disk address 0
* that also have an ADFS boot block at 0xdc0.
*/
+#ifdef CONFIG_BLKDEV_PARTITION
+ blkdev_partition,
+#endif
#ifdef CONFIG_ACORN_PARTITION_ICS
adfspart_check_ICS,
#endif
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 6b7e6f6c102..032188148ad 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/hsi/Kconfig"
+
source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
@@ -126,6 +128,8 @@ source "drivers/platform/Kconfig"
source "drivers/clk/Kconfig"
+source "drivers/tee/Kconfig"
+
source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
@@ -136,6 +140,9 @@ source "drivers/virt/Kconfig"
source "drivers/devfreq/Kconfig"
+source "drivers/modem/Kconfig"
+
source "drivers/gator/Kconfig"
endmenu
+
diff --git a/drivers/Makefile b/drivers/Makefile
index 5c20031df74..52c60243ac8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
+obj-y += hsi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
@@ -99,6 +100,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
+obj-$(CONFIG_MODEM) += modem/
obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
@@ -122,6 +124,7 @@ obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/
+obj-$(CONFIG_TEE_SUPPORT) += tee/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 7be9f79018e..bc92803f679 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -176,6 +176,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config SYS_SOC
+ bool
+
source "drivers/base/regmap/Kconfig"
config DMA_SHARED_BUFFER
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 610f9997a40..6a054324dd7 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -18,6 +18,7 @@ ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+obj-$(CONFIG_SYS_SOC) += soc.o
obj-$(CONFIG_REGMAP) += regmap/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 00000000000..046b43bfcdb
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+struct kobject *soc_object;
+
+ssize_t show_soc_info(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct sysfs_soc_info *si = container_of(attr,
+ struct sysfs_soc_info, attr);
+
+ if (si->info)
+ return sprintf(buf, "%s\n", si->info);
+
+ return si->get_info(buf, si);
+}
+
+int __init register_sysfs_soc_info(struct sysfs_soc_info *info, int nb_info)
+{
+ int i, ret;
+
+ for (i = 0; i < nb_info; i++) {
+ ret = sysfs_create_file(soc_object, &info[i].attr.attr);
+ if (ret) {
+ for (i -= 1; i >= 0; i--)
+ sysfs_remove_file(soc_object, &info[i].attr.attr);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct attribute *soc_attrs[] = {
+ NULL,
+};
+
+static struct attribute_group soc_attr_group = {
+ .attrs = soc_attrs,
+};
+
+int __init register_sysfs_soc(struct sysfs_soc_info *info, size_t num)
+{
+ int ret;
+
+ soc_object = kobject_create_and_add("socinfo", NULL);
+ if (!soc_object) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = sysfs_create_group(soc_object, &soc_attr_group);
+ if (ret)
+ goto kset_exit;
+
+ ret = register_sysfs_soc_info(info, num);
+ if (ret)
+ goto group_exit;
+
+ return 0;
+
+group_exit:
+ sysfs_remove_group(soc_object, &soc_attr_group);
+kset_exit:
+ kobject_put(soc_object);
+exit:
+ return ret;
+}
+
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 32762ba769c..3b82024f70e 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -50,6 +50,11 @@ obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
+ifdef CONFIG_PHONET
+obj-$(CONFIG_U8500_SHRM) += shrm_char.o
+obj-$(CONFIG_MODEM_M6718_SPI) += m6718_modem_char.o
+endif
+
obj-$(CONFIG_MWAVE) += mwave/
obj-$(CONFIG_AGP) += agp/
obj-$(CONFIG_PCMCIA) += pcmcia/
diff --git a/drivers/char/m6718_modem_char.c b/drivers/char/m6718_modem_char.c
new file mode 100644
index 00000000000..34bfe98aa57
--- /dev/null
+++ b/drivers/char/m6718_modem_char.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_char.c
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * M6718 modem char device interface.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/modem/m6718_spi/modem_char.h>
+#include <linux/modem/m6718_spi/modem_driver.h>
+
+#define NAME "IPC_ISA"
+
+#define MAX_PDU_SIZE (2000) /* largest frame we need to send */
+#define MAX_RX_FIFO_ENTRIES (10)
+#define SIZE_OF_RX_FIFO (MAX_PDU_SIZE * MAX_RX_FIFO_ENTRIES)
+#define SIZE_OF_TX_COPY_BUFFER (MAX_PDU_SIZE) /* only need 1 at a time */
+
+static u8 message_fifo[MODEM_M6718_SPI_MAX_CHANNELS][SIZE_OF_RX_FIFO];
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+static u8 wr_mlb_msg[SIZE_OF_TX_COPY_BUFFER];
+#endif
+static u8 wr_audio_msg[SIZE_OF_TX_COPY_BUFFER];
+
+struct map_device {
+ u8 l2_header;
+ u8 idx;
+ char *name;
+};
+
+static struct map_device map_dev[] = {
+ {MODEM_M6718_SPI_CHN_ISI, 0, "isi"},
+ {MODEM_M6718_SPI_CHN_AUDIO, 1, "modemaudio"},
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+ {MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0, 2, "master_loopback0"},
+ {MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0, 3, "slave_loopback0"},
+ {MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1, 4, "master_loopback1"},
+ {MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1, 5, "slave_loopback1"},
+#endif
+};
+
+/*
+ * major - variable exported as module_param to specify major node number
+ */
+static int major;
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+/* global fops mutex */
+static DEFINE_MUTEX(isa_lock);
+
+/**
+ * modem_get_cdev_index() - return the index mapped to l2 header
+ * @l2_header: L2 header
+ *
+ * struct map_device maps the index(count) with the device L2 header.
+ * This function returns the index for the provided L2 header in case
+ * of success else -ve value.
+ */
+int modem_get_cdev_index(u8 l2_header)
+{
+ u8 cnt;
+ for (cnt = 0; cnt < ARRAY_SIZE(map_dev); cnt++) {
+ if (map_dev[cnt].l2_header == l2_header)
+ return map_dev[cnt].idx;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(modem_get_cdev_index);
+
+/**
+ * modem_get_cdev_l2header() - return l2_header mapped to the index
+ * @idx: index
+ *
+ * struct map_device maps the index(count) with the device L2 header.
+ * This function returns the L2 header for the given index in case
+ * of success else -ve value.
+ */
+int modem_get_cdev_l2header(u8 idx)
+{
+ u8 cnt;
+ for (cnt = 0; cnt < ARRAY_SIZE(map_dev); cnt++) {
+ if (map_dev[cnt].idx == idx)
+ return map_dev[cnt].l2_header;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(modem_get_cdev_l2header);
+
+/**
+ * modem_isa_reset() - reset device interfaces
+ * @modem_spi_dev: pointer to modem driver information structure
+ *
+ * Emptys the queue for each L2 mux channel.
+ */
+void modem_isa_reset(struct modem_spi_dev *modem_spi_dev)
+{
+ struct isa_device_context *isadev;
+ struct isa_driver_context *isa_context;
+ struct queue_element *cur_msg = NULL;
+ struct list_head *cur_msg_ptr = NULL;
+ struct list_head *msg_ptr;
+ struct message_queue *q;
+ int devidx;
+
+ dev_info(modem_spi_dev->dev, "resetting char device queues\n");
+
+ isa_context = modem_spi_dev->isa_context;
+ for (devidx = 0; devidx < ARRAY_SIZE(map_dev); devidx++) {
+ isadev = &isa_context->isadev[devidx];
+ q = &isadev->dl_queue;
+
+ spin_lock_bh(&q->update_lock);
+ /* empty out the msg queue */
+ list_for_each_safe(cur_msg_ptr, msg_ptr, &q->msg_list) {
+ cur_msg = list_entry(cur_msg_ptr,
+ struct queue_element, entry);
+ list_del(cur_msg_ptr);
+ kfree(cur_msg);
+ }
+
+ /* reset the msg queue pointers */
+ q->size = SIZE_OF_RX_FIFO;
+ q->readptr = 0;
+ q->writeptr = 0;
+ q->no = 0;
+
+ /* wake up the blocking read/select */
+ atomic_set(&q->q_rp, 1);
+ wake_up_interruptible(&q->wq_readable);
+ spin_unlock_bh(&q->update_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(modem_isa_reset);
+
+static void create_queue(struct message_queue *q, u8 channel,
+ struct modem_spi_dev *modem_spi_dev)
+{
+ q->channel = channel;
+ q->fifo_base = (u8 *)&message_fifo[channel];
+ q->size = SIZE_OF_RX_FIFO;
+ q->free = q->size;
+ q->readptr = 0;
+ q->writeptr = 0;
+ q->no = 0;
+ spin_lock_init(&q->update_lock);
+ atomic_set(&q->q_rp, 0);
+ init_waitqueue_head(&q->wq_readable);
+ INIT_LIST_HEAD(&q->msg_list);
+ q->modem_spi_dev = modem_spi_dev;
+}
+
+static void delete_queue(struct message_queue *q)
+{
+ q->size = 0;
+ q->readptr = 0;
+ q->writeptr = 0;
+}
+
+/**
+ * modem_isa_queue_msg() - Add a message to a queue queue
+ * @q: message queue
+ * @size: size in bytes
+ *
+ * This function tries to allocate size bytes in FIFO q.
+ * It returns negative number when no memory can be allocated
+ * currently.
+ */
+int modem_isa_queue_msg(struct message_queue *q, u32 size)
+{
+ struct queue_element *new_msg = NULL;
+ struct modem_spi_dev *modem_spi_dev = q->modem_spi_dev;
+
+ new_msg = kmalloc(sizeof(struct queue_element), GFP_ATOMIC);
+ if (new_msg == NULL) {
+ dev_err(modem_spi_dev->dev,
+ "failed to allocate memory for queue item\n");
+ return -ENOMEM;
+ }
+ new_msg->offset = q->writeptr;
+ new_msg->size = size;
+ new_msg->no = q->no++;
+
+ /* check for overflow condition */
+ if (q->readptr <= q->writeptr) {
+ if (((q->writeptr - q->readptr) + size) >= q->size) {
+ dev_err(modem_spi_dev->dev, "rx q++ ch %d %d (%d)\n",
+ q->channel, size, q->free);
+ dev_err(modem_spi_dev->dev,
+ "ch%d buffer overflow, frame discarded\n",
+ q->channel);
+ return -ENOMEM;
+ }
+ } else {
+ if ((q->writeptr + size) >= q->readptr) {
+ dev_err(modem_spi_dev->dev, "rx q++ ch %d %d (%d)\n",
+ q->channel, size, q->free);
+ dev_err(modem_spi_dev->dev,
+ "ch%d buffer overflow, frame discarded\n",
+ q->channel);
+ return -ENOMEM;
+ }
+ }
+ q->free -= size;
+ q->writeptr = (q->writeptr + size) % q->size;
+ if (list_empty(&q->msg_list)) {
+ list_add_tail(&new_msg->entry, &q->msg_list);
+ /* There can be 2 blocking calls: read and another select */
+ atomic_set(&q->q_rp, 1);
+ wake_up_interruptible(&q->wq_readable);
+ } else {
+ list_add_tail(&new_msg->entry, &q->msg_list);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_isa_queue_msg);
+
+/**
+ * modem_isa_unqueue_msg() - remove a message from the msg queue
+ * @q: message queue
+ *
+ * Deletes a message from the message list associated with message
+ * queue q and also updates read ptr. If the message list is empty
+ * then a flag is set to block the select and read calls of the paricular queue.
+ *
+ * The message list is FIFO style and message is always added to tail and
+ * removed from head.
+ */
+int modem_isa_unqueue_msg(struct message_queue *q)
+{
+ struct queue_element *old_msg = NULL;
+ struct list_head *msg_ptr = NULL;
+ struct list_head *old_msg_ptr = NULL;
+
+ list_for_each_safe(old_msg_ptr, msg_ptr, &q->msg_list) {
+ old_msg = list_entry(old_msg_ptr, struct queue_element, entry);
+ if (old_msg == NULL)
+ return -EFAULT;
+ list_del(old_msg_ptr);
+ q->readptr = (q->readptr + old_msg->size) % q->size;
+ q->free += old_msg->size;
+ kfree(old_msg);
+ break;
+ }
+ if (list_empty(&q->msg_list))
+ atomic_set(&q->q_rp, 0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_isa_unqueue_msg);
+
+/**
+ * modem_isa_msg_size() - retrieve the size of the most recent message
+ * @q: message queue
+ */
+int modem_isa_msg_size(struct message_queue *q)
+{
+ struct queue_element *new_msg = NULL;
+ struct list_head *msg_list;
+ unsigned long flags;
+ int size = 0;
+
+ spin_lock_irqsave(&q->update_lock, flags);
+ list_for_each(msg_list, &q->msg_list) {
+ new_msg = list_entry(msg_list, struct queue_element, entry);
+ if (new_msg == NULL) {
+ spin_unlock_irqrestore(&q->update_lock, flags);
+ return -EFAULT;
+ }
+ size = new_msg->size;
+ break;
+ }
+ spin_unlock_irqrestore(&q->update_lock, flags);
+ return size;
+}
+EXPORT_SYMBOL_GPL(modem_isa_msg_size);
+
+static u32 isa_select(struct file *filp, struct poll_table_struct *wait)
+{
+ struct isa_device_context *isadev = filp->private_data;
+ struct modem_spi_dev *modem_spi_dev = isadev->dl_queue.modem_spi_dev;
+ struct message_queue *q;
+ u32 m = iminor(filp->f_path.dentry->d_inode);
+ u8 idx = modem_get_cdev_index(m);
+
+ if (modem_spi_dev->msr_flag)
+ return -ENODEV;
+ if (isadev->device_id != idx)
+ return -1;
+
+ q = &isadev->dl_queue;
+ poll_wait(filp, &q->wq_readable, wait);
+ if (atomic_read(&q->q_rp) == 1)
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+static ssize_t isa_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ u32 size = 0;
+ int ret;
+ char *psrc;
+ struct isa_device_context *isadev =
+ (struct isa_device_context *)filp->private_data;
+ struct message_queue *q = &isadev->dl_queue;
+ struct modem_spi_dev *modem_spi_dev = q->modem_spi_dev;
+ u32 msgsize;
+ unsigned long flags;
+
+ if (len <= 0)
+ return -EFAULT;
+
+ if (modem_spi_dev->msr_flag) {
+ atomic_set(&q->q_rp, 0);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&q->update_lock, flags);
+ if (list_empty(&q->msg_list)) {
+ spin_unlock_irqrestore(&q->update_lock, flags);
+ dev_dbg(modem_spi_dev->dev, "waiting for data on device %d\n",
+ isadev->device_id);
+ if (wait_event_interruptible(q->wq_readable,
+ atomic_read(&q->q_rp) == 1))
+ return -ERESTARTSYS;
+ } else {
+ spin_unlock_irqrestore(&q->update_lock, flags);
+ }
+
+ if (modem_spi_dev->msr_flag) {
+ atomic_set(&q->q_rp, 0);
+ return -ENODEV;
+ }
+
+ msgsize = modem_isa_msg_size(q);
+ if (len < msgsize)
+ return -EINVAL;
+
+ if ((q->readptr + msgsize) >= q->size) {
+ psrc = (char *)buf;
+ size = (q->size - q->readptr);
+ /* copy first part of msg */
+ if (copy_to_user(psrc, (u8 *)(q->fifo_base + q->readptr),
+ size))
+ return -EFAULT;
+
+ psrc += size;
+ /* copy second part of msg at the top of fifo */
+ if (copy_to_user(psrc, (u8 *)(q->fifo_base),
+ (msgsize - size)))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(buf, (u8 *)(q->fifo_base + q->readptr),
+ msgsize))
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&q->update_lock, flags);
+ ret = modem_isa_unqueue_msg(q);
+ if (ret < 0)
+ msgsize = ret;
+ spin_unlock_irqrestore(&q->update_lock, flags);
+ return msgsize;
+}
+
+static ssize_t isa_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct isa_device_context *isadev = filp->private_data;
+ struct message_queue *q = &isadev->dl_queue;
+ struct modem_spi_dev *modem_spi_dev = q->modem_spi_dev;
+ struct isa_driver_context *isa_context = modem_spi_dev->isa_context;
+ void *addr = 0;
+ int err;
+ int l2_header;
+ int ret = 0;
+ unsigned long flags;
+
+ if (len <= 0 || buf == NULL)
+ return -EFAULT;
+
+ if (len > SIZE_OF_TX_COPY_BUFFER) {
+ dev_err(modem_spi_dev->dev,
+ "invalid message size %d! max is %d bytes\n",
+ len, SIZE_OF_TX_COPY_BUFFER);
+ return -EFAULT;
+ }
+
+ l2_header = modem_get_cdev_l2header(isadev->device_id);
+ if (l2_header < 0) {
+ dev_err(modem_spi_dev->dev, "invalid L2 channel!\n");
+ return l2_header;
+ }
+
+ switch (l2_header) {
+ case MODEM_M6718_SPI_CHN_AUDIO:
+ addr = (void *)wr_audio_msg;
+ break;
+ case MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0:
+ case MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0:
+ case MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1:
+ case MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1:
+ addr = (void *)wr_mlb_msg;
+ break;
+ default:
+ dev_dbg(modem_spi_dev->dev, "invalid device!\n");
+ return -EFAULT;
+ }
+
+ if (copy_from_user(addr, buf, len))
+ return -EFAULT;
+
+ /*
+ * Special handling for audio channel:
+ * uses a mutext instead of a spinlock
+ */
+ if (l2_header == MODEM_M6718_SPI_CHN_AUDIO ||
+ l2_header == MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1) {
+ mutex_lock(&isa_context->audio_tx_mutex);
+ err = modem_m6718_spi_send(modem_spi_dev, l2_header, len, addr);
+ if (!err)
+ ret = len;
+ else
+ ret = err;
+ mutex_unlock(&modem_spi_dev->isa_context->audio_tx_mutex);
+ } else {
+ spin_lock_irqsave(&isa_context->common_tx_lock, flags);
+ err = modem_m6718_spi_send(modem_spi_dev, l2_header, len, addr);
+ if (!err)
+ ret = len;
+ else
+ ret = err;
+ spin_unlock_irqrestore(&isa_context->common_tx_lock, flags);
+ }
+ return ret;
+}
+
+static long isa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+static int isa_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static int isa_close(struct inode *inode, struct file *filp)
+{
+ struct isa_device_context *isadev = filp->private_data;
+ struct modem_spi_dev *modem_spi_dev = isadev->dl_queue.modem_spi_dev;
+ struct isa_driver_context *isa_context = modem_spi_dev->isa_context;
+ u8 m;
+ int idx;
+
+ mutex_lock(&isa_lock);
+ m = iminor(filp->f_path.dentry->d_inode);
+ idx = modem_get_cdev_index(m);
+ if (idx < 0) {
+ dev_err(modem_spi_dev->dev, "invalid L2 channel!\n");
+ return idx;
+ }
+
+ if (atomic_dec_and_test(&isa_context->is_open[idx])) {
+ atomic_inc(&isa_context->is_open[idx]);
+ dev_err(modem_spi_dev->dev, "device is not open yet!\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ }
+ atomic_set(&isa_context->is_open[idx], 1);
+
+ switch (m) {
+ case MODEM_M6718_SPI_CHN_AUDIO:
+ dev_dbg(modem_spi_dev->dev, "close channel AUDIO\n");
+ break;
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+ case MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0:
+ dev_dbg(modem_spi_dev->dev, "close channel MASTER_LOOPBACK0\n");
+ break;
+ case MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0:
+ dev_dbg(modem_spi_dev->dev, "close channel SLAVE_LOOPBACK0\n");
+ break;
+ case MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1:
+ dev_dbg(modem_spi_dev->dev, "close channel MASTER_LOOPBACK1\n");
+ break;
+ case MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1:
+ dev_dbg(modem_spi_dev->dev, "close channel SLAVE_LOOPBACK1\n");
+ break;
+#endif
+ default:
+ dev_dbg(modem_spi_dev->dev, "invalid device\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&isa_lock);
+ return 0;
+}
+
+static int isa_open(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ u8 m;
+ int idx;
+ struct isa_device_context *isadev;
+ struct isa_driver_context *isa_context =
+ container_of(inode->i_cdev, struct isa_driver_context, cdev);
+ struct modem_spi_dev *modem_spi_dev =
+ isa_context->isadev->dl_queue.modem_spi_dev;
+
+ if (!modem_m6718_spi_is_boot_done()) {
+ dev_dbg(modem_spi_dev->dev,
+ "failed to open device, boot is not complete\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ mutex_lock(&isa_lock);
+ m = iminor(inode);
+ idx = modem_get_cdev_index(m);
+ if (idx < 0) {
+ dev_err(modem_spi_dev->dev, "invalid device\n");
+ err = -ENODEV;
+ goto cleanup;
+ }
+
+ if (!atomic_dec_and_test(&isa_context->is_open[idx])) {
+ atomic_inc(&isa_context->is_open[idx]);
+ dev_err(modem_spi_dev->dev, "device is already open\n");
+ err = -EBUSY;
+ goto cleanup;
+ }
+
+ isadev = &isa_context->isadev[idx];
+ if (filp != NULL)
+ filp->private_data = isadev;
+
+ switch (m) {
+ case MODEM_M6718_SPI_CHN_AUDIO:
+ dev_dbg(modem_spi_dev->dev, "open channel AUDIO\n");
+ break;
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+ case MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0:
+ dev_dbg(modem_spi_dev->dev, "open channel MASTER_LOOPBACK0\n");
+ break;
+ case MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0:
+ dev_dbg(modem_spi_dev->dev, "open channel SLAVE_LOOPBACK0\n");
+ break;
+ case MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1:
+ dev_dbg(modem_spi_dev->dev, "open channel MASTER_LOOPBACK1\n");
+ break;
+ case MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1:
+ dev_dbg(modem_spi_dev->dev, "open channel SLAVE_LOOPBACK1\n");
+ break;
+#endif
+ }
+
+cleanup:
+ mutex_unlock(&isa_lock);
+out:
+ return err;
+}
+
+const struct file_operations isa_fops = {
+ .owner = THIS_MODULE,
+ .open = isa_open,
+ .release = isa_close,
+ .unlocked_ioctl = isa_ioctl,
+ .mmap = isa_mmap,
+ .read = isa_read,
+ .write = isa_write,
+ .poll = isa_select,
+};
+
+/**
+ * modem_isa_init() - initialise the modem char device interfaces
+ * @modem_spi_dev: pointer to the modem driver information structure
+ *
+ * This function registers registers as a char device driver and creates the
+ * char device nodes supported by the modem.
+ */
+int modem_isa_init(struct modem_spi_dev *modem_spi_dev)
+{
+ dev_t dev_id;
+ int retval;
+ int devidx;
+ struct isa_device_context *isadev;
+ struct isa_driver_context *isa_context;
+
+ dev_dbg(modem_spi_dev->dev, "registering char device interfaces\n");
+
+ isa_context = kzalloc(sizeof(struct isa_driver_context), GFP_KERNEL);
+ if (isa_context == NULL) {
+ dev_err(modem_spi_dev->dev, "failed to allocate context\n");
+ retval = -ENOMEM;
+ goto rollback;
+ }
+
+ modem_spi_dev->isa_context = isa_context;
+ if (major) {
+ /* major node specified at module load */
+ dev_id = MKDEV(major, 0);
+ retval = register_chrdev_region(dev_id,
+ MODEM_M6718_SPI_MAX_CHANNELS, NAME);
+ } else {
+ retval = alloc_chrdev_region(&dev_id, 0,
+ MODEM_M6718_SPI_MAX_CHANNELS, NAME);
+ major = MAJOR(dev_id);
+ }
+
+ dev_dbg(modem_spi_dev->dev, "device major is %d\n", major);
+
+ cdev_init(&isa_context->cdev, &isa_fops);
+ isa_context->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&isa_context->cdev, dev_id,
+ MODEM_M6718_SPI_MAX_CHANNELS);
+ if (retval) {
+ dev_err(modem_spi_dev->dev, "failed to add char device\n");
+ goto rollback_register;
+ }
+
+ isa_context->modem_class = class_create(THIS_MODULE, NAME);
+ if (IS_ERR(isa_context->modem_class)) {
+ dev_err(modem_spi_dev->dev, "failed to create modem class\n");
+ retval = PTR_ERR(isa_context->modem_class);
+ goto rollback_add_dev;
+ }
+ isa_context->isadev = kzalloc(sizeof(struct isa_device_context) *
+ MODEM_M6718_SPI_MAX_CHANNELS, GFP_KERNEL);
+ if (isa_context->isadev == NULL) {
+ dev_err(modem_spi_dev->dev,
+ "failed to allocate device context\n");
+ goto rollback_create_dev;
+ }
+
+ for (devidx = 0; devidx < ARRAY_SIZE(map_dev); devidx++) {
+ atomic_set(&isa_context->is_open[devidx], 1);
+ device_create(isa_context->modem_class,
+ NULL,
+ MKDEV(MAJOR(dev_id), map_dev[devidx].l2_header),
+ NULL,
+ map_dev[devidx].name);
+
+ isadev = &isa_context->isadev[devidx];
+ isadev->device_id = devidx;
+ create_queue(&isadev->dl_queue,
+ isadev->device_id, modem_spi_dev);
+
+ dev_dbg(modem_spi_dev->dev, "created device %d (%s) (%d.%d)\n",
+ devidx, map_dev[devidx].name, major,
+ map_dev[devidx].l2_header);
+ }
+
+ mutex_init(&isa_context->audio_tx_mutex);
+ spin_lock_init(&isa_context->common_tx_lock);
+
+ dev_dbg(modem_spi_dev->dev, "registered modem char devices\n");
+ return 0;
+
+rollback_create_dev:
+ for (devidx = 0; devidx < ARRAY_SIZE(map_dev); devidx++) {
+ device_destroy(isa_context->modem_class,
+ MKDEV(MAJOR(dev_id), map_dev[devidx].l2_header));
+ }
+ class_destroy(isa_context->modem_class);
+rollback_add_dev:
+ cdev_del(&isa_context->cdev);
+rollback_register:
+ unregister_chrdev_region(dev_id, MODEM_M6718_SPI_MAX_CHANNELS);
+ kfree(isa_context);
+ modem_spi_dev->isa_context = NULL;
+rollback:
+ return retval;
+}
+EXPORT_SYMBOL_GPL(modem_isa_init);
+
+/**
+ * modem_isa_exit() - remove the char device interfaces and clean up
+ * @modem_spi_dev: pointer to the modem driver information structure
+ */
+void modem_isa_exit(struct modem_spi_dev *modem_spi_dev)
+{
+ int devidx;
+ struct isa_device_context *isadev;
+ struct isa_driver_context *isa_context = modem_spi_dev->isa_context;
+ dev_t dev_id = MKDEV(major, 0);
+
+ if (!modem_spi_dev || !modem_spi_dev->isa_context)
+ return;
+
+ for (devidx = 0; devidx < ARRAY_SIZE(map_dev); devidx++)
+ device_destroy(isa_context->modem_class,
+ MKDEV(MAJOR(dev_id),
+ map_dev[devidx].l2_header));
+ for (devidx = 0; devidx < MODEM_M6718_SPI_MAX_CHANNELS; devidx++) {
+ isadev = &isa_context->isadev[devidx];
+ delete_queue(&isadev->dl_queue);
+ kfree(isadev);
+ }
+ class_destroy(isa_context->modem_class);
+ cdev_del(&isa_context->cdev);
+ unregister_chrdev_region(dev_id, MODEM_M6718_SPI_MAX_CHANNELS);
+ kfree(isa_context);
+ modem_spi_dev->isa_context = NULL;
+ dev_dbg(modem_spi_dev->dev, "removed modem char devices\n");
+}
+EXPORT_SYMBOL_GPL(modem_isa_exit);
+
+MODULE_AUTHOR("Chris Blair <chris.blair@stericsson.com>");
+MODULE_DESCRIPTION("M6718 modem IPC char device interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/shrm_char.c b/drivers/char/shrm_char.c
new file mode 100644
index 00000000000..e8f350e5da8
--- /dev/null
+++ b/drivers/char/shrm_char.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/modem/shrm/shrm_driver.h>
+#include <linux/modem/shrm/shrm_private.h>
+#include <linux/modem/shrm/shrm_config.h>
+#include <linux/modem/shrm/shrm.h>
+#include <asm/atomic.h>
+
+#include <mach/isa_ioctl.h>
+
+
+#define NAME "IPC_ISA"
+/* L2 header for rtc_calibration device is 0xC8 and hence 0xC8 + 1 = 201 */
+#define MAX_L2_HEADERS 201
+
+#define SIZE_OF_FIFO (512*1024)
+
+static u8 message_fifo[ISA_DEVICES][SIZE_OF_FIFO];
+
+static u8 wr_rpc_msg[10*1024];
+static u8 wr_sec_msg[10*1024];
+static u8 wr_audio_msg[10*1024];
+static u8 wr_rtc_cal_msg[100];
+
+struct map_device {
+ u8 l2_header;
+ u8 idx;
+ char *name;
+};
+
+static struct map_device map_dev[] = {
+ {ISI_MESSAGING, 0, "isi"},
+ {RPC_MESSAGING, 1, "rpc"},
+ {AUDIO_MESSAGING, 2, "modemaudio"},
+ {SECURITY_MESSAGING, 3, "sec"},
+ {COMMON_LOOPBACK_MESSAGING, 4, "common_loopback"},
+ {AUDIO_LOOPBACK_MESSAGING, 5, "audio_loopback"},
+ {CIQ_MESSAGING, 6, "ciq"},
+ {RTC_CAL_MESSAGING, 7, "rtc_calibration"},
+};
+
+/*
+ * int major:This variable is exported to user as module_param to specify
+ * major number at load time
+ */
+static int major;
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+/* global fops mutex */
+static DEFINE_MUTEX(isa_lock);
+
+/**
+ * shrm_get_cdev_index() - return the index mapped to l2 header
+ * @l2_header: L2 header
+ *
+ * struct map_device maps the index(count) with the device L2 header.
+ * This function returns the index for the provided L2 header in case
+ * of success else -ve value.
+ */
+int shrm_get_cdev_index(u8 l2_header)
+{
+ u8 cnt;
+ for (cnt = 0; cnt < ISA_DEVICES; cnt++) {
+ if (map_dev[cnt].l2_header == l2_header)
+ return map_dev[cnt].idx;
+ }
+ return -EINVAL;
+}
+
+/**
+ * shrm_get_cdev_l2header() - return l2_header mapped to the index
+ * @idx: index
+ *
+ * struct map_device maps the index(count) with the device L2 header.
+ * This function returns the L2 header for the given index in case
+ * of success else -ve value.
+ */
+int shrm_get_cdev_l2header(u8 idx)
+{
+ u8 cnt;
+ for (cnt = 0; cnt < ISA_DEVICES; cnt++) {
+ if (map_dev[cnt].idx == idx)
+ return map_dev[cnt].l2_header;
+ }
+ return -EINVAL;
+}
+
+void shrm_char_reset_queues(struct shrm_dev *shrm)
+{
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context;
+ struct queue_element *cur_msg = NULL;
+ struct list_head *cur_msg_ptr = NULL;
+ struct list_head *msg_ptr;
+ struct message_queue *q;
+ int no_dev;
+
+ dev_info(shrm->dev, "%s: Resetting char device queues\n", __func__);
+ isa_context = shrm->isa_context;
+ for (no_dev = 0 ; no_dev < ISA_DEVICES ; no_dev++) {
+ isadev = &isa_context->isadev[no_dev];
+ q = &isadev->dl_queue;
+
+ spin_lock_bh(&q->update_lock);
+ /* empty out the msg queue */
+ list_for_each_safe(cur_msg_ptr, msg_ptr, &q->msg_list) {
+ cur_msg = list_entry(cur_msg_ptr,
+ struct queue_element, entry);
+ list_del(cur_msg_ptr);
+ kfree(cur_msg);
+ }
+
+ /* reset the msg queue pointers */
+ q->size = SIZE_OF_FIFO;
+ q->readptr = 0;
+ q->writeptr = 0;
+ q->no = 0;
+
+ /* wake up the blocking read/select */
+ atomic_set(&q->q_rp, 1);
+ wake_up_interruptible(&q->wq_readable);
+
+ spin_unlock_bh(&q->update_lock);
+ }
+}
+
+/**
+ * create_queue() - To create FIFO for Tx and Rx message buffering.
+ * @q: message queue.
+ * @devicetype: device type 0-isi,1-rpc,2-audio,3-security,
+ * 4-common_loopback, 5-audio_loopback.
+ * @shrm: pointer to the shrm device information structure
+ *
+ * This function creates a FIFO buffer of n_bytes size using
+ * dma_alloc_coherent(). It also initializes all queue handling
+ * locks, queue management pointers. It also initializes message list
+ * which occupies this queue.
+ */
+static int create_queue(struct message_queue *q, u32 devicetype,
+ struct shrm_dev *shrm)
+{
+ q->fifo_base = (u8 *)&message_fifo[devicetype];
+ q->size = SIZE_OF_FIFO;
+ q->readptr = 0;
+ q->writeptr = 0;
+ q->no = 0;
+ q->shrm = shrm;
+ spin_lock_init(&q->update_lock);
+ INIT_LIST_HEAD(&q->msg_list);
+ init_waitqueue_head(&q->wq_readable);
+ atomic_set(&q->q_rp, 0);
+
+ return 0;
+}
+
+static void delete_queue(struct message_queue *q)
+{
+ q->size = 0;
+ q->readptr = 0;
+ q->writeptr = 0;
+}
+
+/**
+ * add_msg_to_queue() - Add a message inside queue
+ * @q: message queue
+ * @size: size in bytes
+ *
+ * This function tries to allocate n_bytes of size in FIFO q.
+ * It returns negative number when no memory can be allocated
+ * currently.
+ */
+int add_msg_to_queue(struct message_queue *q, u32 size)
+{
+ struct queue_element *new_msg = NULL;
+ struct shrm_dev *shrm = q->shrm;
+
+ dev_dbg(shrm->dev, "%s IN q->writeptr=%d\n", __func__, q->writeptr);
+ new_msg = kmalloc(sizeof(struct queue_element), GFP_ATOMIC);
+ if (new_msg == NULL) {
+ dev_err(shrm->dev, "unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ new_msg->offset = q->writeptr;
+ new_msg->size = size;
+ new_msg->no = q->no++;
+
+ /* check for overflow condition */
+ if (q->readptr <= q->writeptr) {
+ if (((q->writeptr-q->readptr) + size) >= q->size) {
+ dev_err(shrm->dev, "Buffer overflow !!\n");
+ BUG_ON(((q->writeptr-q->readptr) + size) >= q->size);
+ }
+ } else {
+ if ((q->writeptr + size) >= q->readptr) {
+ dev_err(shrm->dev, "Buffer overflow !!\n");
+ BUG_ON((q->writeptr + size) >= q->readptr);
+ }
+ }
+ q->writeptr = (q->writeptr + size) % q->size;
+ if (list_empty(&q->msg_list)) {
+ list_add_tail(&new_msg->entry, &q->msg_list);
+ /* There can be 2 blocking calls read and another select */
+ atomic_set(&q->q_rp, 1);
+ wake_up_interruptible(&q->wq_readable);
+ } else
+ list_add_tail(&new_msg->entry, &q->msg_list);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return 0;
+}
+
+/**
+ * remove_msg_from_queue() - To remove a message from the msg queue.
+ * @q: message queue
+ *
+ * This function delets a message from the message list associated with message
+ * queue q and also updates read ptr.
+ * If the message list is empty, then, event is set to block the select and
+ * read calls of the paricular queue.
+ *
+ * The message list is FIFO style and message is always added to tail and
+ * removed from head.
+ */
+int remove_msg_from_queue(struct message_queue *q)
+{
+ struct queue_element *old_msg = NULL;
+ struct shrm_dev *shrm = q->shrm;
+ struct list_head *msg_ptr = NULL;
+ struct list_head *old_msg_ptr = NULL;
+
+ dev_dbg(shrm->dev, "%s IN q->readptr %d\n", __func__, q->readptr);
+
+ list_for_each_safe(old_msg_ptr, msg_ptr, &q->msg_list) {
+ old_msg = list_entry(old_msg_ptr, struct queue_element, entry);
+ if (old_msg == NULL) {
+ dev_err(shrm->dev, "no message found\n");
+ return -EFAULT;
+ }
+ list_del(old_msg_ptr);
+ q->readptr = (q->readptr + old_msg->size)%q->size;
+ kfree(old_msg);
+ break;
+ }
+ if (list_empty(&q->msg_list)) {
+ dev_dbg(shrm->dev, "List is empty setting RP= 0\n");
+ atomic_set(&q->q_rp, 0);
+ }
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return 0;
+}
+
+/**
+ * get_size_of_new_msg() - retrieve new message from message list
+ * @q: message queue
+ *
+ * This function will retrieve most recent message from the corresponding
+ * queue list. New message is always retrieved from head side.
+ * It returns new message no, offset if FIFO and size.
+ */
+int get_size_of_new_msg(struct message_queue *q)
+{
+ struct queue_element *new_msg = NULL;
+ struct list_head *msg_list;
+ struct shrm_dev *shrm = q->shrm;
+ int size = 0;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ spin_lock_bh(&q->update_lock);
+ list_for_each(msg_list, &q->msg_list) {
+ new_msg = list_entry(msg_list, struct queue_element, entry);
+ if (new_msg == NULL) {
+ spin_unlock_bh(&q->update_lock);
+ dev_err(shrm->dev, "no message found\n");
+ return -EFAULT;
+ }
+ size = new_msg->size;
+ break;
+ }
+ spin_unlock_bh(&q->update_lock);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return size;
+}
+
+/**
+ * isa_select() - shrm char interface driver select interface
+ * @filp: file descriptor pointer
+ * @wait: poll_table_struct pointer
+ *
+ * This function is used to perform non-blocking read operations. It allows
+ * a process to determine whether it can read from one or more open files
+ * without blocking. These calls can also block a process until any of a
+ * given set of file descriptors becomes available for reading.
+ * If a file is ready to read, POLLIN | POLLRDNORM bitmask is returned.
+ * The driver method is called whenever the user-space program performs a select
+ * system call involving a file descriptor associated with the driver.
+ */
+static u32 isa_select(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct message_queue *q;
+ u32 mask = 0;
+ u32 m = iminor(filp->f_path.dentry->d_inode);
+ u8 idx = shrm_get_cdev_index(m);
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (shrm->msr_flag)
+ return -ENODEV;
+
+ if (isadev->device_id != idx)
+ return -1;
+
+ q = &isadev->dl_queue;
+ poll_wait(filp, &q->wq_readable, wait);
+ if (atomic_read(&q->q_rp) == 1)
+ mask = POLLIN | POLLRDNORM;
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return mask;
+}
+
+/**
+ * isa_read() - Read from device
+ * @filp: file descriptor
+ * @buf: user buffer pointer
+ * @len: size of requested data transfer
+ * @ppos: not used
+ *
+ * It reads a oldest message from queue and copies it into user buffer and
+ * returns its size.
+ * If there is no message present in queue, then it blocks until new data is
+ * available.
+ */
+ssize_t isa_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
+{
+ u32 size = 0;
+ int ret;
+ char *psrc;
+ struct isadev_context *isadev = (struct isadev_context *)
+ filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct message_queue *q;
+ u32 msgsize;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (len <= 0)
+ return -EFAULT;
+
+ q = &isadev->dl_queue;
+
+ if (shrm->msr_flag) {
+ atomic_set(&q->q_rp, 0);
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&q->update_lock);
+ if (list_empty(&q->msg_list)) {
+ spin_unlock_bh(&q->update_lock);
+ dev_dbg(shrm->dev, "Waiting for Data\n");
+ if (wait_event_interruptible(q->wq_readable,
+ atomic_read(&q->q_rp) == 1))
+ return -ERESTARTSYS;
+ } else
+ spin_unlock_bh(&q->update_lock);
+
+ if (shrm->msr_flag) {
+ atomic_set(&q->q_rp, 0);
+ return -ENODEV;
+ }
+
+ msgsize = get_size_of_new_msg(q);
+
+ if (len < msgsize)
+ return -EINVAL;
+
+ if ((q->readptr+msgsize) >= q->size) {
+ dev_dbg(shrm->dev, "Inside Loop Back\n");
+ psrc = (char *)buf;
+ size = (q->size-q->readptr);
+ /* Copy First Part of msg */
+ if (copy_to_user(psrc,
+ (u8 *)(q->fifo_base+q->readptr),
+ size)) {
+ dev_err(shrm->dev, "copy_to_user failed\n");
+ return -EFAULT;
+ }
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ if (copy_to_user(psrc,
+ (u8 *)(q->fifo_base),
+ (msgsize-size))) {
+ dev_err(shrm->dev, "copy_to_user failed\n");
+ return -EFAULT;
+ }
+ } else {
+ if (copy_to_user(buf,
+ (u8 *)(q->fifo_base + q->readptr),
+ msgsize)) {
+ dev_err(shrm->dev, "copy_to_user failed\n");
+ return -EFAULT;
+ }
+ }
+ spin_lock_bh(&q->update_lock);
+ ret = remove_msg_from_queue(q);
+ if (ret < 0) {
+ dev_err(shrm->dev,
+ "Remove msg from message queue failed\n");
+ msgsize = ret;
+ }
+ spin_unlock_bh(&q->update_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return msgsize;
+}
+
+/**
+ * isa_write() - Write to shrm char device
+ * @filp: file descriptor
+ * @buf: user buffer pointer
+ * @len: size of requested data transfer
+ * @ppos: not used
+ *
+ * It checks if there is space available in queue, and copies the message
+ * inside queue. If there is no space, it blocks until space becomes available.
+ * It also schedules transfer thread to transmit the newly added message.
+ */
+ssize_t isa_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct message_queue *q;
+ void *addr = 0;
+ int err, l2_header;
+ int ret = 0;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (len <= 0 || buf == NULL)
+ return -EFAULT;
+ q = &isadev->dl_queue;
+ l2_header = shrm_get_cdev_l2header(isadev->device_id);
+ if (l2_header < 0) {
+ dev_err(shrm->dev, "failed to get L2 header\n");
+ return l2_header;
+ }
+
+ switch (l2_header) {
+ case RPC_MESSAGING:
+ dev_dbg(shrm->dev, "RPC\n");
+ addr = (void *)wr_rpc_msg;
+ break;
+ case AUDIO_MESSAGING:
+ dev_dbg(shrm->dev, "Audio\n");
+ addr = (void *)wr_audio_msg;
+ break;
+ case SECURITY_MESSAGING:
+ dev_dbg(shrm->dev, "Security\n");
+ addr = (void *)wr_sec_msg;
+ break;
+ case COMMON_LOOPBACK_MESSAGING:
+ dev_dbg(shrm->dev, "Common loopback\n");
+ addr = isadev->addr;
+ break;
+ case AUDIO_LOOPBACK_MESSAGING:
+ dev_dbg(shrm->dev, "Audio loopback\n");
+ addr = isadev->addr;
+ break;
+ case CIQ_MESSAGING:
+ dev_dbg(shrm->dev, "CIQ\n");
+ addr = isadev->addr;
+ break;
+ case RTC_CAL_MESSAGING:
+ dev_dbg(shrm->dev, "isa_write(): RTC Calibration\n");
+ addr = (void *)wr_rtc_cal_msg;
+ break;
+ default:
+ dev_dbg(shrm->dev, "Wrong device\n");
+ return -EFAULT;
+ }
+
+ if (copy_from_user(addr, buf, len)) {
+ dev_err(shrm->dev, "copy_from_user failed\n");
+ return -EFAULT;
+ }
+ /* Write msg to Fifo */
+ if ((l2_header == AUDIO_MESSAGING) ||
+ (l2_header == AUDIO_LOOPBACK_MESSAGING)) {
+ mutex_lock(&shrm->isa_context->tx_audio_mutex);
+ err = shm_write_msg(shrm, l2_header, addr, len);
+ if (!err)
+ ret = len;
+ else
+ ret = err;
+ mutex_unlock(&shrm->isa_context->tx_audio_mutex);
+ } else {
+ spin_lock_bh(&shrm->isa_context->common_tx);
+ err = shm_write_msg(shrm, l2_header, addr, len);
+ if (!err)
+ ret = len;
+ else
+ ret = err;
+ spin_unlock_bh(&shrm->isa_context->common_tx);
+ }
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * isa_ioctl() - To handle different ioctl commands supported by driver.
+ * @inode: structure is used by the kernel internally to represent files
+ * @filp: file descriptor pointer
+ * @cmd: ioctl command
+ * @arg: input param
+ *
+ * Following ioctls are supported by this driver.
+ * DLP_IOCTL_ALLOCATE_BUFFER - To allocate buffer for new uplink message.
+ * This ioctl is called with required message size. It returns offset for
+ * the allocates space in the queue. DLP_IOCTL_PUT_MESSAGE - To indicate
+ * new uplink message available in queuq for transmission. Message is copied
+ * from offset location returned by previous ioctl before calling this ioctl.
+ * DLP_IOCTL_GET_MESSAGE - To check if any downlink message is available in
+ * queue. It returns offset for new message inside queue.
+ * DLP_IOCTL_DEALLOCATE_BUFFER - To deallocate any buffer allocate for
+ * downlink message once the message is copied. Message is copied from offset
+ * location returned by previous ioctl before calling this ioctl.
+ */
+static long isa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long err = 0;
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ u32 m = iminor(filp->f_path.dentry->d_inode);
+
+ isadev = (struct isadev_context *)filp->private_data;
+
+ if (isadev->device_id != m)
+ return -EINVAL;
+
+ switch (cmd) {
+ case DLP_IOC_ALLOCATE_BUFFER:
+ dev_dbg(shrm->dev, "DLP_IOC_ALLOCATE_BUFFER\n");
+ break;
+ case DLP_IOC_PUT_MESSAGE:
+ dev_dbg(shrm->dev, "DLP_IOC_PUT_MESSAGE\n");
+ break;
+ case DLP_IOC_GET_MESSAGE:
+ dev_dbg(shrm->dev, "DLP_IOC_GET_MESSAGE\n");
+ break;
+ case DLP_IOC_DEALLOCATE_BUFFER:
+ dev_dbg(shrm->dev, "DLP_IOC_DEALLOCATE_BUFFER\n");
+ break;
+ default:
+ dev_dbg(shrm->dev, "Unknown IOCTL\n");
+ err = -EFAULT;
+ break;
+ }
+ return err;
+}
+/**
+ * isa_mmap() - Maps kernel queue memory to user space.
+ * @filp: file descriptor pointer
+ * @vma: virtual area memory structure.
+ *
+ * This function maps kernel FIFO into user space. This function
+ * shall be called twice to map both uplink and downlink buffers.
+ */
+static int isa_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+
+ u32 m = iminor(filp->f_path.dentry->d_inode);
+ dev_dbg(shrm->dev, "%s %d\n", __func__, m);
+
+ return 0;
+}
+
+/**
+ * isa_close() - Close device file
+ * @inode: structure is used by the kernel internally to represent files
+ * @filp: device file descriptor
+ *
+ * This function deletes structues associated with this file, deletes
+ * queues, flushes and destroys workqueus and closes this file.
+ * It also unregisters itself from l2mux driver.
+ */
+static int isa_close(struct inode *inode, struct file *filp)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct isa_driver_context *isa_context = shrm->isa_context;
+ u8 m;
+ int idx;
+
+ mutex_lock(&isa_lock);
+ m = iminor(filp->f_path.dentry->d_inode);
+ idx = shrm_get_cdev_index(m);
+ if (idx < 0) {
+ dev_err(shrm->dev, "failed to get index\n");
+ mutex_unlock(&isa_lock);
+ return idx;
+ }
+ dev_dbg(shrm->dev, "isa_close %d", m);
+
+ if (atomic_dec_and_test(&isa_context->is_open[idx])) {
+ atomic_inc(&isa_context->is_open[idx]);
+ dev_err(shrm->dev, "Device not opened yet\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ }
+ atomic_set(&isa_context->is_open[idx], 1);
+
+ switch (m) {
+ case RPC_MESSAGING:
+ dev_info(shrm->dev, "Close RPC_MESSAGING Device\n");
+ break;
+ case AUDIO_MESSAGING:
+ dev_info(shrm->dev, "Close AUDIO_MESSAGING Device\n");
+ break;
+ case SECURITY_MESSAGING:
+ dev_info(shrm->dev, "CLose SECURITY_MESSAGING Device\n");
+ break;
+ case COMMON_LOOPBACK_MESSAGING:
+ kfree(isadev->addr);
+ dev_info(shrm->dev, "Close COMMON_LOOPBACK_MESSAGING Device\n");
+ break;
+ case AUDIO_LOOPBACK_MESSAGING:
+ kfree(isadev->addr);
+ dev_info(shrm->dev, "Close AUDIO_LOOPBACK_MESSAGING Device\n");
+ break;
+ case CIQ_MESSAGING:
+ kfree(isadev->addr);
+ dev_info(shrm->dev, "Close CIQ_MESSAGING Device\n");
+ break;
+ case RTC_CAL_MESSAGING:
+ dev_info(shrm->dev, "Close RTC_CAL_MESSAGING Device\n");
+ break;
+ default:
+ dev_info(shrm->dev, "No such device present\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ };
+ mutex_unlock(&isa_lock);
+ return 0;
+}
+/**
+ * isa_open() - Open device file
+ * @inode: structure is used by the kernel internally to represent files
+ * @filp: device file descriptor
+ *
+ * This function performs initialization tasks needed to open SHM channel.
+ * Following tasks are performed.
+ * -return if device is already opened
+ * -create uplink FIFO
+ * -create downlink FIFO
+ * -init delayed workqueue thread
+ * -register to l2mux driver
+ */
+static int isa_open(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ u8 m;
+ int idx;
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context = container_of(
+ inode->i_cdev,
+ struct isa_driver_context,
+ cdev);
+ struct shrm_dev *shrm = isa_context->isadev->dl_queue.shrm;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (get_boot_state() != BOOT_DONE) {
+ dev_err(shrm->dev, "Boot is not done\n");
+ return -EBUSY;
+ }
+ mutex_lock(&isa_lock);
+ m = iminor(inode);
+
+ if ((m != RPC_MESSAGING) &&
+ (m != AUDIO_LOOPBACK_MESSAGING) &&
+ (m != COMMON_LOOPBACK_MESSAGING) &&
+ (m != AUDIO_MESSAGING) &&
+ (m != SECURITY_MESSAGING) &&
+ (m != CIQ_MESSAGING) &&
+ (m != RTC_CAL_MESSAGING)) {
+ dev_err(shrm->dev, "No such device present\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ }
+ idx = shrm_get_cdev_index(m);
+ if (idx < 0) {
+ dev_err(shrm->dev, "failed to get index\n");
+ mutex_unlock(&isa_lock);
+ return idx;
+ }
+ if (!atomic_dec_and_test(&isa_context->is_open[idx])) {
+ atomic_inc(&isa_context->is_open[idx]);
+ dev_err(shrm->dev, "Device already opened\n");
+ mutex_unlock(&isa_lock);
+ return -EBUSY;
+ }
+ isadev = &isa_context->isadev[idx];
+ if (filp != NULL)
+ filp->private_data = isadev;
+
+ switch (m) {
+ case RPC_MESSAGING:
+ dev_info(shrm->dev, "Open RPC_MESSAGING Device\n");
+ break;
+ case AUDIO_MESSAGING:
+ dev_info(shrm->dev, "Open AUDIO_MESSAGING Device\n");
+ break;
+ case SECURITY_MESSAGING:
+ dev_info(shrm->dev, "Open SECURITY_MESSAGING Device\n");
+ break;
+ case COMMON_LOOPBACK_MESSAGING:
+ isadev->addr = kzalloc(10 * 1024, GFP_KERNEL);
+ if (!isadev->addr) {
+ mutex_unlock(&isa_lock);
+ return -ENOMEM;
+ }
+ dev_info(shrm->dev, "Open COMMON_LOOPBACK_MESSAGING Device\n");
+ break;
+ case AUDIO_LOOPBACK_MESSAGING:
+ isadev->addr = kzalloc(10 * 1024, GFP_KERNEL);
+ if (!isadev->addr) {
+ mutex_unlock(&isa_lock);
+ return -ENOMEM;
+ }
+ dev_info(shrm->dev, "Open AUDIO_LOOPBACK_MESSAGING Device\n");
+ break;
+ case CIQ_MESSAGING:
+ isadev->addr = kzalloc(10 * 1024, GFP_KERNEL);
+ if (!isadev->addr) {
+ mutex_unlock(&isa_lock);
+ return -ENOMEM;
+ }
+ dev_info(shrm->dev, "Open CIQ_MESSAGING Device\n");
+ break;
+ case RTC_CAL_MESSAGING:
+ dev_info(shrm->dev, "Open RTC_CAL_MESSAGING Device\n");
+ break;
+ };
+
+ mutex_unlock(&isa_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return err;
+}
+
+const struct file_operations isa_fops = {
+ .owner = THIS_MODULE,
+ .open = isa_open,
+ .release = isa_close,
+ .unlocked_ioctl = isa_ioctl,
+ .mmap = isa_mmap,
+ .read = isa_read,
+ .write = isa_write,
+ .poll = isa_select,
+};
+
+/**
+ * isa_init() - module insertion function
+ * @shrm: pointer to the shrm device information structure
+ *
+ * This function registers module as a character driver using
+ * register_chrdev_region() or alloc_chrdev_region. It adds this
+ * driver to system using cdev_add() call. Major number is dynamically
+ * allocated using alloc_chrdev_region() by default or left to user to specify
+ * it during load time. For this variable major is used as module_param
+ * Nodes to be created using
+ * mknod /dev/isi c $major 0
+ * mknod /dev/rpc c $major 1
+ * mknod /dev/audio c $major 2
+ * mknod /dev/sec c $major 3
+ */
+int isa_init(struct shrm_dev *shrm)
+{
+ dev_t dev_id;
+ int retval, no_dev;
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context;
+
+ isa_context = kzalloc(sizeof(struct isa_driver_context),
+ GFP_KERNEL);
+ if (isa_context == NULL) {
+ dev_err(shrm->dev, "Failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ shrm->isa_context = isa_context;
+ if (major) {
+ dev_id = MKDEV(major, MAX_L2_HEADERS);
+ retval = register_chrdev_region(dev_id, ISA_DEVICES, NAME);
+ } else {
+ /*
+ * L2 header of loopback device is 192(0xc0). As per the shrm
+ * protocol the minor id of the deivce is mapped to the
+ * L2 header.
+ */
+ retval = alloc_chrdev_region(&dev_id, 0, MAX_L2_HEADERS, NAME);
+ major = MAJOR(dev_id);
+ }
+ dev_dbg(shrm->dev, " major %d\n", major);
+
+ cdev_init(&isa_context->cdev, &isa_fops);
+ isa_context->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&isa_context->cdev, dev_id, MAX_L2_HEADERS);
+ if (retval) {
+ dev_err(shrm->dev, "Failed to add char device\n");
+ return retval;
+ }
+ /* create class and device */
+ isa_context->shm_class = class_create(THIS_MODULE, NAME);
+ if (IS_ERR(isa_context->shm_class)) {
+ dev_err(shrm->dev, "Error creating shrm class\n");
+ cdev_del(&isa_context->cdev);
+ retval = PTR_ERR(isa_context->shm_class);
+ kfree(isa_context);
+ return retval;
+ }
+
+ for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++) {
+ atomic_set(&isa_context->is_open[no_dev], 1);
+ device_create(isa_context->shm_class, NULL,
+ MKDEV(MAJOR(dev_id),
+ map_dev[no_dev].l2_header), NULL,
+ map_dev[no_dev].name);
+ }
+
+ isa_context->isadev = kzalloc(sizeof
+ (struct isadev_context)*ISA_DEVICES,
+ GFP_KERNEL);
+ if (isa_context->isadev == NULL) {
+ dev_err(shrm->dev, "Failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ for (no_dev = 0 ; no_dev < ISA_DEVICES ; no_dev++) {
+ isadev = &isa_context->isadev[no_dev];
+ isadev->device_id = no_dev;
+ retval = create_queue(&isadev->dl_queue,
+ isadev->device_id, shrm);
+
+ if (retval < 0) {
+ dev_err(shrm->dev, "create dl_queue failed\n");
+ delete_queue(&isadev->dl_queue);
+ kfree(isadev);
+ return retval;
+ }
+ }
+ mutex_init(&isa_context->tx_audio_mutex);
+ spin_lock_init(&isa_context->common_tx);
+ dev_dbg(shrm->dev, " SHM Char Driver added\n");
+ return retval;
+}
+
+void isa_exit(struct shrm_dev *shrm)
+{
+ int no_dev;
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context = shrm->isa_context;
+ dev_t dev_id = MKDEV(major, 0);
+
+ for (no_dev = 0 ; no_dev < ISA_DEVICES ; no_dev++) {
+ device_destroy(isa_context->shm_class,
+ MKDEV(MAJOR(dev_id),
+ map_dev[no_dev].l2_header));
+ isadev = &isa_context->isadev[no_dev];
+ delete_queue(&isadev->dl_queue);
+ kfree(isadev);
+ }
+ class_destroy(isa_context->shm_class);
+ cdev_del(&isa_context->cdev);
+ unregister_chrdev_region(dev_id, ISA_DEVICES);
+ kfree(isa_context);
+ dev_dbg(shrm->dev, " SHM Char Driver removed\n");
+}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 999d6a03e43..6f86f8ca9b0 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -19,14 +19,27 @@ config DW_APB_TIMER
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
- default y
+ default y if UX500_SOC_DB8500
help
Use the always on PRCMU Timer as clocksource
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- bool "Clocksource PRCMU Timer sched_clock"
- depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
+ bool
+ depends on CLKSRC_DBX500_PRCMU
select HAVE_SCHED_CLOCK
+ help
+ Use the always on PRCMU Timer as sched_clock
+
+config CLKSRC_DB5500_MTIMER
+ bool "Clocksource MTIMER"
+ depends on UX500_SOC_DB5500
default y
help
+ Use the always on MTIMER as clocksource
+
+config CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+ bool
+ depends on CLKSRC_DB5500_MTIMER
+ select HAVE_SCHED_CLOCK
+ help
Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d3265..9b10f6b7536 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
-obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file
+obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
+obj-$(CONFIG_CLKSRC_DB5500_MTIMER) += db5500-mtimer.o
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index fb6b6d28b60..24e8e22f236 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -14,6 +14,9 @@
*/
#include <linux/clockchips.h>
#include <linux/clksrc-dbx500-prcmu.h>
+#ifdef CONFIG_BOOTTIME
+#include <linux/boottime.h>
+#endif
#include <asm/sched_clock.h>
@@ -69,6 +72,23 @@ static u32 notrace dbx500_prcmu_sched_clock_read(void)
#endif
+#ifdef CONFIG_BOOTTIME
+static unsigned long __init boottime_get_time(void)
+{
+ return div_s64(clocksource_cyc2ns(clocksource_dbx500_prcmu.read(
+ &clocksource_dbx500_prcmu),
+ clocksource_dbx500_prcmu.mult,
+ clocksource_dbx500_prcmu.shift),
+ 1000);
+}
+
+static struct boottime_timer __initdata boottime_timer = {
+ .init = NULL,
+ .get_time = boottime_get_time,
+ .finalize = NULL,
+};
+#endif
+
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
{
clksrc_dbx500_timer_base = base;
@@ -93,4 +113,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
RATE_32K, SCHED_CLOCK_MIN_WRAP);
clocksource_register(&clocksource_dbx500_prcmu);
+#ifdef CONFIG_BOOTTIME
+ boottime_activate(&boottime_timer);
+#endif
}
diff --git a/drivers/clocksource/db5500-mtimer.c b/drivers/clocksource/db5500-mtimer.c
new file mode 100644
index 00000000000..5e64da19e66
--- /dev/null
+++ b/drivers/clocksource/db5500-mtimer.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/clockchips.h>
+#include <linux/clksrc-db5500-mtimer.h>
+#include <linux/boottime.h>
+
+#include <asm/sched_clock.h>
+
+#define MTIMER_PRIMARY_COUNTER 0x18
+
+static void __iomem *db5500_mtimer_base;
+
+#ifdef CONFIG_CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+static DEFINE_CLOCK_DATA(cd);
+
+unsigned long long notrace sched_clock(void)
+{
+ u32 cyc;
+
+ if (unlikely(!db5500_mtimer_base))
+ return 0;
+
+ cyc = readl_relaxed(db5500_mtimer_base + MTIMER_PRIMARY_COUNTER);
+
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+static void notrace db5500_mtimer_update_sched_clock(void)
+{
+ u32 cyc = readl_relaxed(db5500_mtimer_base + MTIMER_PRIMARY_COUNTER);
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+#endif
+
+#ifdef CONFIG_BOOTTIME
+static unsigned long __init boottime_get_time(void)
+{
+ return sched_clock();
+}
+
+static struct boottime_timer __initdata boottime_timer = {
+ .init = NULL,
+ .get_time = boottime_get_time,
+ .finalize = NULL,
+};
+#endif
+
+void __init db5500_mtimer_init(void __iomem *base)
+{
+ db5500_mtimer_base = base;
+
+ clocksource_mmio_init(base + MTIMER_PRIMARY_COUNTER, "mtimer", 32768,
+ 400, 32, clocksource_mmio_readl_up);
+
+#ifdef CONFIG_CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+ init_sched_clock(&cd, db5500_mtimer_update_sched_clock,
+ 32, 32768);
+#endif
+ boottime_activate(&boottime_timer);
+}
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ac000fa76bb..7c271f042c7 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB5500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 622013fb789..1005e0ddcc1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -367,6 +367,27 @@ show_one(scaling_cur_freq, cur);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max)
+{
+ int ret;
+ struct cpufreq_policy new_policy;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ ret = cpufreq_get_policy(&new_policy, cpu);
+ if (ret)
+ return -EINVAL;
+
+ new_policy.min = min;
+ new_policy.max = max;
+
+ ret = __cpufreq_set_policy(policy, &new_policy);
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_update_freq);
+
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
deleted file mode 100644
index f5002015d82..00000000000
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include <mach/id.h>
-
-static struct cpufreq_frequency_table freq_table[] = {
- [0] = {
- .index = 0,
- .frequency = 200000,
- },
- [1] = {
- .index = 1,
- .frequency = 300000,
- },
- [2] = {
- .index = 2,
- .frequency = 600000,
- },
- [3] = {
- /* Used for MAX_OPP, if available */
- .index = 3,
- .frequency = CPUFREQ_TABLE_END,
- },
- [4] = {
- .index = 4,
- .frequency = CPUFREQ_TABLE_END,
- },
-};
-
-static enum arm_opp idx2opp[] = {
- ARM_EXTCLK,
- ARM_50_OPP,
- ARM_100_OPP,
- ARM_MAX_OPP
-};
-
-static struct freq_attr *db8500_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_freqs freqs;
- unsigned int idx;
-
- /* scale the target frequency to one of the extremes supported */
- if (target_freq < policy->cpuinfo.min_freq)
- target_freq = policy->cpuinfo.min_freq;
- if (target_freq > policy->cpuinfo.max_freq)
- target_freq = policy->cpuinfo.max_freq;
-
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
- return -EINVAL;
- }
-
- freqs.old = policy->cur;
- freqs.new = freq_table[idx].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- /* pre-change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- /* request the PRCM unit for opp change */
- if (prcmu_set_arm_opp(idx2opp[idx])) {
- pr_err("db8500-cpufreq: Failed to set OPP level\n");
- return -EINVAL;
- }
-
- /* post change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return 0;
-}
-
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
-{
- int i;
- /* request the prcm to get the current ARM opp */
- for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
- ;
- return freq_table[i].frequency;
-}
-
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
-{
- int i, res;
-
- BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
-
- if (!prcmu_is_u8400()) {
- freq_table[1].frequency = 400000;
- freq_table[2].frequency = 800000;
- if (prcmu_has_arm_maxopp())
- freq_table[3].frequency = 1000000;
- }
- pr_info("db8500-cpufreq : Available frequencies:\n");
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
-
- /* get policy fields based on the table */
- res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!res)
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- return res;
- }
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /*
- * FIXME : Need to take time measurement across the target()
- * function with no/some/all drivers in the notification
- * list.
- */
- policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
- /* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, &cpu_present_map);
-
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-
- return 0;
-}
-
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
-};
-
-static int __init db8500_cpufreq_register(void)
-{
- if (!cpu_is_u8500v20_or_later())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return cpufreq_register_driver(&db8500_cpufreq_driver);
-}
-device_initcall(db8500_cpufreq_register);
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
new file mode 100644
index 00000000000..38edd733b4c
--- /dev/null
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer
+ * Author: Martin Persson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/id.h>
+
+static struct cpufreq_frequency_table db8500_freq_table[] = {
+ [0] = {
+ .index = 0,
+ .frequency = 200000,
+ },
+ [1] = {
+ .index = 1,
+ .frequency = 400000,
+ },
+ [2] = {
+ .index = 2,
+ .frequency = 800000,
+ },
+ [3] = {
+ /* Used for MAX_OPP, if available */
+ .index = 3,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+ [4] = {
+ .index = 4,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+static struct cpufreq_frequency_table db5500_freq_table[] = {
+ [0] = {
+ .index = 0,
+ .frequency = 200000,
+ },
+ [1] = {
+ .index = 1,
+ .frequency = 396500,
+ },
+ [2] = {
+ .index = 2,
+ .frequency = 793000,
+ },
+ [3] = {
+ .index = 3,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+static struct cpufreq_frequency_table *freq_table;
+static int freq_table_len;
+
+static enum arm_opp db8500_idx2opp[] = {
+ ARM_EXTCLK,
+ ARM_50_OPP,
+ ARM_100_OPP,
+ ARM_MAX_OPP
+};
+
+static enum arm_opp db5500_idx2opp[] = {
+ ARM_EXTCLK,
+ ARM_50_OPP,
+ ARM_100_OPP,
+};
+
+static enum arm_opp *idx2opp;
+
+static struct freq_attr *dbx500_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int idx;
+
+ /* scale the target frequency to one of the extremes supported */
+ if (target_freq < policy->cpuinfo.min_freq)
+ target_freq = policy->cpuinfo.min_freq;
+ if (target_freq > policy->cpuinfo.max_freq)
+ target_freq = policy->cpuinfo.max_freq;
+
+ /* Lookup the next frequency */
+ if (cpufreq_frequency_table_target
+ (policy, freq_table, target_freq, relation, &idx)) {
+ return -EINVAL;
+ }
+
+ freqs.old = policy->cur;
+ freqs.new = freq_table[idx].frequency;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ /* pre-change notification */
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* request the PRCM unit for opp change */
+ if (prcmu_set_arm_opp(idx2opp[idx])) {
+ pr_err("ux500-cpufreq: Failed to set OPP level\n");
+ return -EINVAL;
+ }
+
+ /* post change notification */
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
+{
+ int i;
+ /* request the prcm to get the current ARM opp */
+ for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
+ ;
+ return freq_table[i].frequency;
+}
+
+static void __init dbx500_cpufreq_init_maxopp_freq(void)
+{
+ struct prcmu_fw_version *fw_version = prcmu_get_fw_version();
+
+ if ((fw_version == NULL) || !prcmu_has_arm_maxopp())
+ return;
+
+ switch (fw_version->project) {
+ case PRCMU_FW_PROJECT_U8500:
+ case PRCMU_FW_PROJECT_U9500:
+ freq_table[3].frequency = 1000000;
+ break;
+ case PRCMU_FW_PROJECT_U8500_C2:
+ case PRCMU_FW_PROJECT_U9500_C2:
+ freq_table[3].frequency = 1150000;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool initialized;
+
+static void __init dbx500_cpufreq_early_init(void)
+{
+ if (cpu_is_u5500()) {
+ freq_table = db5500_freq_table;
+ idx2opp = db5500_idx2opp;
+ freq_table_len = ARRAY_SIZE(db5500_freq_table);
+ } else if (cpu_is_u8500()) {
+ freq_table = db8500_freq_table;
+ idx2opp = db8500_idx2opp;
+ dbx500_cpufreq_init_maxopp_freq();
+ freq_table_len = ARRAY_SIZE(db8500_freq_table);
+ } else {
+ ux500_unknown_soc();
+ }
+ initialized = true;
+}
+
+/*
+ * This is called from localtimer initialization, via the clk_get_rate() for
+ * the smp_twd clock. This is way before cpufreq is initialized.
+ */
+unsigned long dbx500_cpufreq_getfreq(void)
+{
+ if (!initialized)
+ dbx500_cpufreq_early_init();
+
+ return dbx500_cpufreq_getspeed(0) * 1000;
+}
+
+int dbx500_cpufreq_percent2freq(int percent)
+{
+ int op;
+ int i;
+
+ switch (percent) {
+ case 0:
+ /* Fall through */
+ case 25:
+ op = ARM_EXTCLK;
+ break;
+ case 50:
+ op = ARM_50_OPP;
+ break;
+ case 100:
+ op = ARM_100_OPP;
+ break;
+ case 125:
+ if (cpu_is_u8500() && prcmu_has_arm_maxopp())
+ op = ARM_MAX_OPP;
+ else
+ op = ARM_100_OPP;
+ break;
+ default:
+ pr_err("cpufreq-dbx500: Incorrect arm target value (%d).\n",
+ percent);;
+ return -EINVAL;
+ break;
+ }
+
+ for (i = 0; idx2opp[i] != op && i < freq_table_len; i++)
+ ;
+
+ if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
+ pr_err("cpufreq-dbx500: Matching frequency does not exist!\n");
+ return -EINVAL;
+ }
+
+ return freq_table[i].frequency;
+}
+
+int dbx500_cpufreq_get_limits(int cpu, int r,
+ unsigned int *min, unsigned int *max)
+{
+ int freq;
+ int ret;
+ static int old_freq;
+ struct cpufreq_policy p;
+
+ freq = dbx500_cpufreq_percent2freq(r);
+
+ if (freq < 0)
+ return -EINVAL;
+
+ if (freq != old_freq)
+ pr_debug("cpufreq-dbx500: set min arm freq to %d\n",
+ freq);
+
+ (*min) = freq;
+
+ ret = cpufreq_get_policy(&p, cpu);
+ if (ret) {
+ pr_err("cpufreq-dbx500: Failed to get policy.\n");
+ return -EINVAL;
+ }
+
+ (*max) = p.max;
+ return 0;
+}
+
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int res;
+ int i;
+
+ /* get policy fields based on the table */
+ res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!res)
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ else {
+ pr_err("dbx500-cpufreq : Failed to read policy table\n");
+ return res;
+ }
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
+
+ for (i = 0; freq_table[i].frequency != policy->cur; i++)
+ ;
+
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+ /*
+ * FIXME : Need to take time measurement across the target()
+ * function with no/some/all drivers in the notification
+ * list.
+ */
+ policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
+
+ /* policy sharing between dual CPUs */
+ cpumask_copy(policy->cpus, &cpu_present_map);
+
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+
+ return 0;
+}
+
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
+};
+
+static int __init dbx500_cpufreq_register(void)
+{
+ int i;
+
+ if (!initialized)
+ dbx500_cpufreq_early_init();
+
+ pr_info("dbx500-cpufreq : Available frequencies:\n");
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ pr_info(" %d Mhz\n", freq_table[i].frequency / 1000);
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
+}
+device_initcall(dbx500_cpufreq_register);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 6d16b4b0d7a..12edcef976a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -293,4 +293,15 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
+config CRYPTO_DEV_UX500
+ tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
+ depends on ARCH_U8500
+ select CRYPTO_ALGAPI
+ help
+ Driver for ST-Ericsson UX500 crypto engine.
+
+if CRYPTO_DEV_UX500
+ source "drivers/crypto/ux500/Kconfig"
+endif # if CRYPTO_DEV_UX500
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 53ea5015531..dbcc3113205 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
new file mode 100644
index 00000000000..165a03d46c0
--- /dev/null
+++ b/drivers/crypto/ux500/Kconfig
@@ -0,0 +1,29 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+config CRYPTO_DEV_UX500_CRYP
+ tristate "UX500 crypto driver for CRYP block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_DES
+ help
+ This is the driver for the crypto block CRYP.
+
+config CRYPTO_DEV_UX500_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
+ select CRYPTO_HMAC
+ help
+ This selects the UX500 hash driver for the HASH hardware.
+ Depends on U8500/STM DMA if running in DMA mode.
+
+config CRYPTO_DEV_UX500_DEBUG
+ bool "Activate ux500 platform debug-mode for crypto and hash block"
+ depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
+ default n
+ help
+ Say Y if you want to add debug prints to ux500_hash and
+ ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
new file mode 100644
index 00000000000..b9a365bade8
--- /dev/null
+++ b/drivers/crypto/ux500/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
new file mode 100644
index 00000000000..e5d362a6f68
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -0,0 +1,13 @@
+#/*
+# * Copyright (C) ST-Ericsson SA 2010
+# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
+# * License terms: GNU General Public License (GPL) version 2 */
+
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_cryp_core.o := -DDEBUG -O0
+CFLAGS_cryp.o := -DDEBUG -O0
+CFLAGS_cryp_irq.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
+ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
new file mode 100644
index 00000000000..211200fed34
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -0,0 +1,418 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+/**
+ * cryp_wait_until_done - wait until the device logic is not busy
+ */
+void cryp_wait_until_done(struct cryp_device_data *device_data)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+}
+
+/**
+ * cryp_check - This routine checks Peripheral and PCell Id
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_check(struct cryp_device_data *device_data)
+{
+ int peripheralID2 = 0;
+
+ if (NULL == device_data)
+ return -EINVAL;
+
+ if (cpu_is_u8500())
+ peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500;
+ else if (cpu_is_u5500())
+ peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500;
+
+ /* Check Peripheral and Pcell Id Register for CRYP */
+ if ((CRYP_PERIPHERAL_ID0 ==
+ readl_relaxed(&device_data->base->periphId0))
+ && (CRYP_PERIPHERAL_ID1 ==
+ readl_relaxed(&device_data->base->periphId1))
+ && (peripheralID2 ==
+ readl_relaxed(&device_data->base->periphId2))
+ && (CRYP_PERIPHERAL_ID3 ==
+ readl_relaxed(&device_data->base->periphId3))
+ && (CRYP_PCELL_ID0 ==
+ readl_relaxed(&device_data->base->pcellId0))
+ && (CRYP_PCELL_ID1 ==
+ readl_relaxed(&device_data->base->pcellId1))
+ && (CRYP_PCELL_ID2 ==
+ readl_relaxed(&device_data->base->pcellId2))
+ && (CRYP_PCELL_ID3 ==
+ readl_relaxed(&device_data->base->pcellId3))) {
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+/**
+ * cryp_activity - This routine enables/disable the cryptography function.
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_activity: Enable/Disable functionality
+ */
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen)
+{
+ CRYP_PUT_BITS(&device_data->base->cr,
+ cryp_crypen,
+ CRYP_CR_CRYPEN_POS,
+ CRYP_CR_CRYPEN_MASK);
+}
+
+/**
+ * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
+ * @device_data: Pointer to the device data struct for base address.
+ */
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
+{
+ /*
+ * We always need to disble the hardware before trying to flush the
+ * FIFO. This is something that isn't written in the design
+ * specification, but we have been informed by the hardware designers
+ * that this must be done.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
+ /*
+ * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
+ * register when starting a new calculation, which means Input FIFO is
+ * not full and input FIFO is empty.
+ */
+ while (readl_relaxed(&device_data->base->sr) !=
+ CRYP_SR_INFIFO_READY_MASK)
+ cpu_relax();
+}
+
+/**
+ * cryp_set_configuration - This routine set the cr CRYP IP
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_config: Pointer to the configuration parameter
+ * @control_register: The control register to be written later on.
+ */
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register)
+{
+ u32 cr_for_kse;
+
+ if (NULL == device_data || NULL == cryp_config)
+ return -EINVAL;
+
+ *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
+
+ /* Prepare key for decryption in AES_ECB and AES_CBC mode. */
+ if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
+ ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
+ (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
+ cr_for_kse = *control_register;
+ /*
+ * This seems a bit odd, but it is indeed needed to set this to
+ * encrypt even though it is a decryption that we are doing. It
+ * also mentioned in the design spec that you need to do this.
+ * After the keyprepartion for decrypting is done you should set
+ * algodir back to decryption, which is done outside this if
+ * statement.
+ *
+ * According to design specification we should set mode ECB
+ * during key preparation even though we might be running CBC
+ * when enter this function.
+ *
+ * Writing to KSE_ENABLED will drop CRYPEN when key preparation
+ * is done. Therefore we need to set CRYPEN again outside this
+ * if statement when running decryption.
+ */
+ cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
+ (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
+ (KSE_ENABLED << CRYP_CR_KSE_POS));
+
+ writel_relaxed(cr_for_kse, &device_data->base->cr);
+ cryp_wait_until_done(device_data);
+ }
+
+ *control_register |=
+ ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
+ (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
+
+ return 0;
+}
+
+/**
+ * cryp_configure_protection - set the protection bits in the CRYP logic.
+ * @device_data: Pointer to the device data struct for base address.
+ * @p_protect_config: Pointer to the protection mode and
+ * secure mode configuration
+ */
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config)
+{
+ if (NULL == p_protect_config)
+ return -EINVAL;
+
+ CRYP_WRITE_BIT(&device_data->base->cr,
+ (u32) p_protect_config->secure_access,
+ CRYP_CR_SECURE_MASK);
+ CRYP_PUT_BITS(&device_data->base->cr,
+ p_protect_config->privilege_access,
+ CRYP_CR_PRLG_POS,
+ CRYP_CR_PRLG_MASK);
+
+ return 0;
+}
+
+/**
+ * cryp_is_logic_busy - returns the busy status of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_is_logic_busy(struct cryp_device_data *device_data)
+{
+ return CRYP_TEST_BITS(&device_data->base->sr,
+ CRYP_SR_BUSY_MASK);
+}
+
+/**
+ * cryp_configure_for_dma - configures the CRYP IP for DMA operation
+ * @device_data: Pointer to the device data struct for base address.
+ * @dma_req: Specifies the DMA request type value.
+ */
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req)
+{
+ CRYP_SET_BITS(&device_data->base->dmacr,
+ (u32) dma_req);
+}
+
+/**
+ * cryp_configure_key_values - configures the key values for CRYP operations
+ * @device_data: Pointer to the device data struct for base address.
+ * @key_reg_index: Key value index register
+ * @key_value: The key value struct
+ */
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (key_reg_index) {
+ case CRYP_KEY_REG_1:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_1_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_1_r);
+ break;
+ case CRYP_KEY_REG_2:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_2_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_2_r);
+ break;
+ case CRYP_KEY_REG_3:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_3_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_3_r);
+ break;
+ case CRYP_KEY_REG_4:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_4_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_4_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+/**
+ * cryp_configure_init_vector - configures the initialization vector register
+ * @device_data: Pointer to the device data struct for base address.
+ * @init_vector_index: Specifies the index of the init vector.
+ * @init_vector_value: Specifies the value for the init vector.
+ */
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (init_vector_index) {
+ case CRYP_INIT_VECTOR_INDEX_0:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_0_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_0_r);
+ break;
+ case CRYP_INIT_VECTOR_INDEX_1:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_1_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_1_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cryp_save_device_context - Store hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode)
+{
+ enum cryp_algo_mode algomode;
+ struct cryp_register *src_reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Always start by disable the hardware and wait for it to finish the
+ * ongoing calculations before trying to reprogram it.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
+
+ if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
+ ctx->din = readl_relaxed(&src_reg->din);
+
+ ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
+
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
+ ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
+ ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
+ ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
+
+ default:
+ ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
+ ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
+ }
+
+ /* Save IV for CBC mode for both AES and DES. */
+ algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
+ if (algomode == CRYP_ALGO_TDES_CBC ||
+ algomode == CRYP_ALGO_DES_CBC ||
+ algomode == CRYP_ALGO_AES_CBC) {
+ ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
+ ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
+ ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
+ ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_restore_device_context - Restore hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx)
+{
+ struct cryp_register *reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Fall through for all items in switch statement. DES is captured in
+ * the default.
+ */
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ writel_relaxed(ctx->key_4_l, &reg->key_4_l);
+ writel_relaxed(ctx->key_4_r, &reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ writel_relaxed(ctx->key_3_l, &reg->key_3_l);
+ writel_relaxed(ctx->key_3_r, &reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ writel_relaxed(ctx->key_2_l, &reg->key_2_l);
+ writel_relaxed(ctx->key_2_r, &reg->key_2_r);
+
+ default:
+ writel_relaxed(ctx->key_1_l, &reg->key_1_l);
+ writel_relaxed(ctx->key_1_r, &reg->key_1_r);
+ }
+
+ /* Restore IV for CBC mode for AES and DES. */
+ if (config->algomode == CRYP_ALGO_TDES_CBC ||
+ config->algomode == CRYP_ALGO_DES_CBC ||
+ config->algomode == CRYP_ALGO_AES_CBC) {
+ writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
+ writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
+ writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
+ writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data word to write
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data)
+{
+ writel_relaxed(write_data, &device_data->base->din);
+
+ return 0;
+}
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data)
+{
+ *read_data = readl_relaxed(&device_data->base->dout);
+
+ return 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
new file mode 100644
index 00000000000..df2e25d4671
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -0,0 +1,308 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_H_
+#define _CRYP_H_
+
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/klist.h>
+#include <linux/mutex.h>
+
+#define DEV_DBG_NAME "crypX crypX:"
+
+/* CRYP enable/disable */
+enum cryp_crypen {
+ CRYP_CRYPEN_DISABLE = 0,
+ CRYP_CRYPEN_ENABLE = 1
+};
+
+/* CRYP Start Computation enable/disable */
+enum cryp_start {
+ CRYP_START_DISABLE = 0,
+ CRYP_START_ENABLE = 1
+};
+
+/* CRYP Init Signal enable/disable */
+enum cryp_init {
+ CRYP_INIT_DISABLE = 0,
+ CRYP_INIT_ENABLE = 1
+};
+
+/* Cryp State enable/disable */
+enum cryp_state {
+ CRYP_STATE_DISABLE = 0,
+ CRYP_STATE_ENABLE = 1
+};
+
+/* Key preparation bit enable */
+enum cryp_key_prep {
+ KSE_DISABLED = 0,
+ KSE_ENABLED = 1
+};
+
+/* Key size for AES */
+#define CRYP_KEY_SIZE_128 (0)
+#define CRYP_KEY_SIZE_192 (1)
+#define CRYP_KEY_SIZE_256 (2)
+
+/* AES modes */
+enum cryp_algo_mode {
+ CRYP_ALGO_TDES_ECB,
+ CRYP_ALGO_TDES_CBC,
+ CRYP_ALGO_DES_ECB,
+ CRYP_ALGO_DES_CBC,
+ CRYP_ALGO_AES_ECB,
+ CRYP_ALGO_AES_CBC,
+ CRYP_ALGO_AES_CTR,
+ CRYP_ALGO_AES_XTS
+};
+
+/* Cryp Encryption or Decryption */
+enum cryp_algorithm_dir {
+ CRYP_ALGORITHM_ENCRYPT,
+ CRYP_ALGORITHM_DECRYPT
+};
+
+/* Hardware access method */
+enum cryp_mode {
+ CRYP_MODE_POLLING,
+ CRYP_MODE_INTERRUPT,
+ CRYP_MODE_DMA
+};
+
+/**
+ * struct cryp_config -
+ * @keysize: Key size for AES
+ * @algomode: AES modes
+ * @algodir: Cryp Encryption or Decryption
+ *
+ * CRYP configuration structure to be passed to set configuration
+ */
+struct cryp_config {
+ int keysize;
+ enum cryp_algo_mode algomode;
+ enum cryp_algorithm_dir algodir;
+};
+
+/**
+ * struct cryp_protection_config -
+ * @privilege_access: Privileged cryp state enable/disable
+ * @secure_access: Secure cryp state enable/disable
+ *
+ * Protection configuration structure for setting privilage access
+ */
+struct cryp_protection_config {
+ enum cryp_state privilege_access;
+ enum cryp_state secure_access;
+};
+
+/* Cryp status */
+enum cryp_status_id {
+ CRYP_STATUS_BUSY = 0x10,
+ CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
+ CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
+ CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
+ CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
+};
+
+/* Cryp DMA interface */
+enum cryp_dma_req_type {
+ CRYP_DMA_DISABLE_BOTH,
+ CRYP_DMA_ENABLE_IN_DATA,
+ CRYP_DMA_ENABLE_OUT_DATA,
+ CRYP_DMA_ENABLE_BOTH_DIRECTIONS
+};
+
+enum cryp_dma_channel {
+ CRYP_DMA_RX = 0,
+ CRYP_DMA_TX
+};
+
+/* Key registers */
+enum cryp_key_reg_index {
+ CRYP_KEY_REG_1,
+ CRYP_KEY_REG_2,
+ CRYP_KEY_REG_3,
+ CRYP_KEY_REG_4
+};
+
+/* Key register left and right */
+struct cryp_key_value {
+ u32 key_value_left;
+ u32 key_value_right;
+};
+
+/* Cryp Initialization structure */
+enum cryp_init_vector_index {
+ CRYP_INIT_VECTOR_INDEX_0,
+ CRYP_INIT_VECTOR_INDEX_1
+};
+
+/* struct cryp_init_vector_value -
+ * @init_value_left
+ * @init_value_right
+ * */
+struct cryp_init_vector_value {
+ u32 init_value_left;
+ u32 init_value_right;
+};
+
+/**
+ * struct cryp_device_context - structure for a cryp context.
+ * @cr: control register
+ * @dmacr: DMA control register
+ * @imsc: Interrupt mask set/clear register
+ * @key_1_l: Key 1l register
+ * @key_1_r: Key 1r register
+ * @key_2_l: Key 2l register
+ * @key_2_r: Key 2r register
+ * @key_3_l: Key 3l register
+ * @key_3_r: Key 3r register
+ * @key_4_l: Key 4l register
+ * @key_4_r: Key 4r register
+ * @init_vect_0_l: Initialization vector 0l register
+ * @init_vect_0_r: Initialization vector 0r register
+ * @init_vect_1_l: Initialization vector 1l register
+ * @init_vect_1_r: Initialization vector 0r register
+ * @din: Data in register
+ * @dout: Data out register
+ *
+ * CRYP power management specifc structure.
+ */
+struct cryp_device_context {
+ u32 cr;
+ u32 dmacr;
+ u32 imsc;
+
+ u32 key_1_l;
+ u32 key_1_r;
+ u32 key_2_l;
+ u32 key_2_r;
+ u32 key_3_l;
+ u32 key_3_r;
+ u32 key_4_l;
+ u32 key_4_r;
+
+ u32 init_vect_0_l;
+ u32 init_vect_0_r;
+ u32 init_vect_1_l;
+ u32 init_vect_1_r;
+
+ u32 din;
+ u32 dout;
+};
+
+struct cryp_dma {
+ dma_cap_mask_t mask;
+ struct completion cryp_dma_complete;
+ struct dma_chan *chan_cryp2mem;
+ struct dma_chan *chan_mem2cryp;
+ struct stedma40_chan_cfg *cfg_cryp2mem;
+ struct stedma40_chan_cfg *cfg_mem2cryp;
+ int sg_src_len;
+ int sg_dst_len;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
+ int nents_src;
+ int nents_dst;
+};
+
+/**
+ * struct cryp_device_data - structure for a cryp device.
+ * @base: Pointer to the hardware base address.
+ * @dev: Pointer to the devices dev structure.
+ * @clk: Pointer to the device's clock control.
+ * @pwr_regulator: Pointer to the device's power control.
+ * @power_status: Current status of the power.
+ * @ctx_lock: Lock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @list_node: For inclusion into a klist.
+ * @dma: The dma structure holding channel configuration.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_spinlock: Spinlock for power_state.
+ * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
+ */
+struct cryp_device_data {
+ struct cryp_register __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct ux500_regulator *pwr_regulator;
+ int power_status;
+ struct spinlock ctx_lock;
+ struct cryp_ctx *current_ctx;
+ struct klist_node list_node;
+ struct cryp_dma dma;
+ bool power_state;
+ struct spinlock power_state_spinlock;
+ bool restore_dev_ctx;
+};
+
+void cryp_wait_until_done(struct cryp_device_data *device_data);
+
+/* Initialization functions */
+
+int cryp_check(struct cryp_device_data *device_data);
+
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen);
+
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
+
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register);
+
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req);
+
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value);
+
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value);
+
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config);
+
+/* Power management funtions */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode);
+
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx);
+
+/* Data transfer and status bits. */
+int cryp_is_logic_busy(struct cryp_device_data *device_data);
+
+int cryp_get_status(struct cryp_device_data *device_data);
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data to write.
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
+
+#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
new file mode 100644
index 00000000000..5893abb57dc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -0,0 +1,2313 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/klist.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/semaphore.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+
+#include <plat/ste_dma40.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+#define CRYP_MAX_KEY_SIZE 32
+#define BYTES_PER_WORD 4
+
+static int cryp_mode;
+static atomic_t session_id;
+
+static struct stedma40_chan_cfg *mem_to_engine;
+static struct stedma40_chan_cfg *engine_to_mem;
+
+/**
+ * struct cryp_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct cryp_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+/**
+ * struct cryp_ctx - Crypto context
+ * @config: Crypto mode.
+ * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @keylen: Length of key.
+ * @iv: Pointer to initialization vector.
+ * @indata: Pointer to indata.
+ * @outdata: Pointer to outdata.
+ * @datalen: Length of indata.
+ * @outlen: Length of outdata.
+ * @blocksize: Size of blocks.
+ * @updated: Updated flag.
+ * @dev_ctx: Device dependent context.
+ * @device: Pointer to the device.
+ */
+struct cryp_ctx {
+ struct cryp_config config;
+ u8 key[CRYP_MAX_KEY_SIZE];
+ u32 keylen;
+ u8 *iv;
+ const u8 *indata;
+ u8 *outdata;
+ u32 datalen;
+ u32 outlen;
+ u32 blocksize;
+ u8 updated;
+ struct cryp_device_context dev_ctx;
+ struct cryp_device_data *device;
+ u32 session_id;
+};
+
+static struct cryp_driver_data driver_data;
+
+/**
+ * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
+ * @in: Data to convert.
+ */
+static inline u32 uint8p_to_uint32_be(u8 *in)
+{
+ return (u32)in[0]<<24 |
+ ((u32)in[1]<<16) |
+ ((u32)in[2]<<8) |
+ ((u32)in[3]);
+}
+
+/**
+ * swap_bits_in_byte - mirror the bits in a byte
+ * @b: the byte to be mirrored
+ *
+ * The bits are swapped the following way:
+ * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
+ * nibble 2 (n2) bits 4-7.
+ *
+ * Nibble 1 (n1):
+ * (The "old" (moved) bit is replaced with a zero)
+ * 1. Move bit 6 and 7, 4 positions to the left.
+ * 2. Move bit 3 and 5, 2 positions to the left.
+ * 3. Move bit 1-4, 1 position to the left.
+ *
+ * Nibble 2 (n2):
+ * 1. Move bit 0 and 1, 4 positions to the right.
+ * 2. Move bit 2 and 4, 2 positions to the right.
+ * 3. Move bit 3-6, 1 position to the right.
+ *
+ * Combine the two nibbles to a complete and swapped byte.
+ */
+
+static inline u8 swap_bits_in_byte(u8 b)
+{
+#define R_SHIFT_4_MASK (0xc0) /* Bits 6 and 7, right shift 4 */
+#define R_SHIFT_2_MASK (0x28) /* (After right shift 4) Bits 3 and 5,
+ right shift 2 */
+#define R_SHIFT_1_MASK (0x1e) /* (After right shift 2) Bits 1-4,
+ right shift 1 */
+#define L_SHIFT_4_MASK (0x03) /* Bits 0 and 1, left shift 4 */
+#define L_SHIFT_2_MASK (0x14) /* (After left shift 4) Bits 2 and 4,
+ left shift 2 */
+#define L_SHIFT_1_MASK (0x78) /* (After left shift 1) Bits 3-6,
+ left shift 1 */
+
+ u8 n1;
+ u8 n2;
+
+ /* Swap most significant nibble */
+ /* Right shift 4, bits 6 and 7 */
+ n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
+ /* Right shift 2, bits 3 and 5 */
+ n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
+ /* Right shift 1, bits 1-4 */
+ n1 = (n1 & R_SHIFT_1_MASK) >> 1;
+
+ /* Swap least significant nibble */
+ /* Left shift 4, bits 0 and 1 */
+ n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
+ /* Left shift 2, bits 2 and 4 */
+ n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
+ /* Left shift 1, bits 3-6 */
+ n2 = (n2 & L_SHIFT_1_MASK) << 1;
+
+ return n1 | n2;
+}
+
+static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
+ u8 *out, u32 len)
+{
+ unsigned int i = 0;
+ int j;
+ int index = 0;
+
+ j = len - BYTES_PER_WORD;
+ while (j >= 0) {
+ for (i = 0; i < BYTES_PER_WORD; i++) {
+ index = len - j - BYTES_PER_WORD + i;
+ out[j + i] =
+ swap_bits_in_byte(in[index]);
+ }
+ j -= BYTES_PER_WORD;
+ }
+}
+
+static void add_session_id(struct cryp_ctx *ctx)
+{
+ /*
+ * We never want 0 to be a valid value, since this is the default value
+ * for the software context.
+ */
+ if (unlikely(atomic_inc_and_test(&session_id)))
+ atomic_inc(&session_id);
+
+ ctx->session_id = atomic_read(&session_id);
+}
+
+static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+{
+ struct cryp_ctx *ctx;
+ int i;
+ struct cryp_device_data *device_data;
+
+ if (param == NULL) {
+ BUG_ON(!param);
+ return IRQ_HANDLED;
+ }
+
+ /* The device is coming from the one found in hw_crypt_noxts. */
+ device_data = (struct cryp_device_data *)param;
+
+ ctx = device_data->current_ctx;
+
+ if (ctx == NULL) {
+ BUG_ON(!ctx);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
+ cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
+ "out" : "in");
+
+ if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+ if (ctx->outlen / ctx->blocksize > 0) {
+ for (i = 0; i < ctx->blocksize / 4; i++) {
+ cryp_read_outdata(device_data,
+ (u32 *)ctx->outdata);
+ ctx->outdata += 4;
+ ctx->outlen -= 4;
+ }
+
+ if (ctx->outlen == 0) {
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+ }
+ }
+ } else if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO)) {
+ if (ctx->datalen / ctx->blocksize > 0) {
+ for (i = 0 ; i < ctx->blocksize / 4; i++) {
+ cryp_write_indata(device_data,
+ *((u32 *)ctx->indata));
+ ctx->indata += 4;
+ ctx->datalen -= 4;
+ }
+
+ if (ctx->datalen == 0)
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO);
+
+ if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
+ CRYP_PUT_BITS(&device_data->base->cr,
+ CRYP_START_ENABLE,
+ CRYP_CR_START_POS,
+ CRYP_CR_START_MASK);
+
+ cryp_wait_until_done(device_data);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mode_is_aes(enum cryp_algo_mode mode)
+{
+ return (CRYP_ALGO_AES_ECB == mode) ||
+ (CRYP_ALGO_AES_CBC == mode) ||
+ (CRYP_ALGO_AES_CTR == mode) ||
+ (CRYP_ALGO_AES_XTS == mode);
+}
+
+static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
+ enum cryp_init_vector_index index)
+{
+ struct cryp_init_vector_value vector_value;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ vector_value.init_value_left = left;
+ vector_value.init_value_right = right;
+
+ return cryp_configure_init_vector(device_data,
+ index,
+ vector_value);
+}
+
+static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
+{
+ int i;
+ int status = 0;
+ int num_of_regs = ctx->blocksize / 8;
+ u32 iv[AES_BLOCK_SIZE / 4];
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ /*
+ * Since we loop on num_of_regs we need to have a check in case
+ * someone provides an incorrect blocksize which would force calling
+ * cfg_iv with i greater than 2 which is an error.
+ */
+ if (num_of_regs > 2) {
+ dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
+ __func__, ctx->blocksize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ctx->blocksize / 4; i++)
+ iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+
+ for (i = 0; i < num_of_regs; i++) {
+ status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
+ (enum cryp_init_vector_index) i);
+ if (status != 0)
+ return status;
+ }
+ return status;
+}
+
+static int set_key(struct cryp_device_data *device_data,
+ u32 left_key,
+ u32 right_key,
+ enum cryp_key_reg_index index)
+{
+ struct cryp_key_value key_value;
+ int cryp_error;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ key_value.key_value_left = left_key;
+ key_value.key_value_right = right_key;
+
+ cryp_error = cryp_configure_key_values(device_data,
+ index,
+ key_value);
+ if (cryp_error != 0)
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_configure_key_values() failed!", __func__);
+
+ return cryp_error;
+}
+
+static int cfg_keys(struct cryp_ctx *ctx)
+{
+ int i;
+ int num_of_regs = ctx->keylen / 8;
+ u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+ int cryp_error = 0;
+
+ dev_dbg(ctx->device->dev, "[%s]", __func__);
+
+ if (mode_is_aes(ctx->config.algomode)) {
+ swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+ (u8 *)swapped_key,
+ ctx->keylen);
+ } else {
+ for (i = 0; i < ctx->keylen / 4; i++)
+ swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+ }
+
+ for (i = 0; i < num_of_regs; i++) {
+ cryp_error = set_key(ctx->device,
+ *(((u32 *)swapped_key)+i*2),
+ *(((u32 *)swapped_key)+i*2+1),
+ (enum cryp_key_reg_index) i);
+
+ if (cryp_error != 0) {
+ dev_err(ctx->device->dev, "[%s]: set_key() failed!",
+ __func__);
+ return cryp_error;
+ }
+ }
+ return cryp_error;
+}
+
+static int cryp_setup_context(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ u32 control_register = CRYP_CR_DEFAULT;
+
+ switch (cryp_mode) {
+ case CRYP_MODE_INTERRUPT:
+ writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
+ break;
+
+ case CRYP_MODE_DMA:
+ writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ctx->updated == 0) {
+ cryp_flush_inoutfifo(device_data);
+ if (cfg_keys(ctx) != 0) {
+ dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
+ __func__);
+ return -EPERM;
+ }
+
+ if ((ctx->iv) &&
+ (CRYP_ALGO_AES_ECB != ctx->config.algomode) &&
+ (CRYP_ALGO_DES_ECB != ctx->config.algomode) &&
+ (CRYP_ALGO_TDES_ECB != ctx->config.algomode)) {
+ if (cfg_ivs(device_data, ctx) != 0)
+ return -EPERM;
+ }
+
+ cryp_set_configuration(device_data, &ctx->config,
+ &control_register);
+ add_session_id(ctx);
+ } else if (ctx->updated == 1 &&
+ ctx->session_id != atomic_read(&session_id)) {
+ cryp_flush_inoutfifo(device_data);
+ cryp_restore_device_context(device_data, &ctx->dev_ctx);
+
+ add_session_id(ctx);
+ control_register = ctx->dev_ctx.cr;
+ } else
+ control_register = ctx->dev_ctx.cr;
+
+ writel(control_register |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
+ &device_data->base->cr);
+
+ return 0;
+}
+
+static int cryp_get_device_data(struct cryp_ctx *ctx,
+ struct cryp_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct cryp_device_data *local_device_data = NULL;
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct cryp_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
+ struct device *dev)
+{
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2cryp = mem_to_engine;
+ device_data->dma.chan_mem2cryp =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_mem2cryp);
+
+ device_data->dma.cfg_cryp2mem = engine_to_mem;
+ device_data->dma.chan_cryp2mem =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_cryp2mem);
+
+ init_completion(&device_data->dma.cryp_dma_complete);
+}
+
+static void cryp_dma_out_callback(void *data)
+{
+ struct cryp_ctx *ctx = (struct cryp_ctx *) data;
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ complete(&ctx->device->dma.cryp_dma_complete);
+}
+
+static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+ struct scatterlist *sg,
+ int len,
+ enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
+ dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
+ "aligned! Addr: 0x%08x", __func__, (u32)sg);
+ return -EFAULT;
+ }
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ channel = ctx->device->dma.chan_mem2cryp;
+ ctx->device->dma.sg_src = sg;
+ ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.nents_src,
+ direction);
+
+ if (!ctx->device->dma.sg_src_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len,
+ direction,
+ DMA_CTRL_ACK);
+ break;
+
+ case DMA_FROM_DEVICE:
+ channel = ctx->device->dma.chan_cryp2mem;
+ ctx->device->dma.sg_dst = sg;
+ ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.nents_dst,
+ direction);
+
+ if (!ctx->device->dma.sg_dst_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list "
+ "(FROM_DEVICE)", __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(FROM_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len,
+ direction,
+ DMA_CTRL_ACK |
+ DMA_PREP_INTERRUPT);
+
+ desc->callback = cryp_dma_out_callback;
+ desc->callback_param = ctx;
+ break;
+
+ default:
+ dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void cryp_dma_done(struct cryp_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ chan = ctx->device->dma.chan_mem2cryp;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+
+ chan = ctx->device->dma.chan_cryp2mem;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+}
+
+static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
+ int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static void cryp_polling_mode(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int len = ctx->blocksize / BYTES_PER_WORD;
+ int remaining_length = ctx->datalen;
+ u32 *indata = (u32 *)ctx->indata;
+ u32 *outdata = (u32 *)ctx->outdata;
+
+ while (remaining_length > 0) {
+ writesl(&device_data->base->din, indata, len);
+ indata += len;
+ remaining_length -= (len * BYTES_PER_WORD);
+ cryp_wait_until_done(device_data);
+
+ readsl(&device_data->base->dout, outdata, len);
+ outdata += len;
+ cryp_wait_until_done(device_data);
+ }
+}
+
+static int cryp_disable_power(struct device *dev,
+ struct cryp_device_data *device_data,
+ bool save_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state)
+ goto out;
+
+ spin_lock(&device_data->ctx_lock);
+ if (save_device_context && device_data->current_ctx) {
+ cryp_save_device_context(device_data,
+ &device_data->current_ctx->dev_ctx,
+ cryp_mode);
+ device_data->restore_dev_ctx = true;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ clk_disable(device_data->clk);
+ ret = ux500_regulator_atomic_disable(device_data->pwr_regulator);
+ if (ret)
+ dev_err(dev, "[%s]: "
+ "regulator_disable() failed!",
+ __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int cryp_enable_power(
+ struct device *dev,
+ struct cryp_device_data *device_data,
+ bool restore_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state) {
+ ret = ux500_regulator_atomic_enable(device_data->pwr_regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ux500_regulator_atomic_disable(
+ device_data->pwr_regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_ctx) {
+ spin_lock(&device_data->ctx_lock);
+ if (restore_device_context && device_data->current_ctx) {
+ device_data->restore_dev_ctx = false;
+ cryp_restore_device_context(device_data,
+ &device_data->current_ctx->dev_ctx);
+ }
+ spin_unlock(&device_data->ctx_lock);
+ }
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int hw_crypt_noxts(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int ret = 0;
+
+ const u8 *indata = ctx->indata;
+ u8 *outdata = ctx->outdata;
+ u32 datalen = ctx->datalen;
+ u32 outlen = datalen;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->outlen = ctx->datalen;
+
+ if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
+ pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)indata);
+ return -EINVAL;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+
+ if (ret)
+ goto out;
+
+ if (cryp_mode == CRYP_MODE_INTERRUPT) {
+ cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+
+ /*
+ * ctx->outlen is decremented in the cryp_interrupt_handler
+ * function. We had to add cpu_relax() (barrier) to make sure
+ * that gcc didn't optimze away this variable.
+ */
+ while (ctx->outlen > 0)
+ cpu_relax();
+ } else if (cryp_mode == CRYP_MODE_POLLING ||
+ cryp_mode == CRYP_MODE_DMA) {
+ /*
+ * The reason for having DMA in this if case is that if we are
+ * running cryp_mode = 2, then we separate DMA routines for
+ * handling cipher/plaintext > blocksize, except when
+ * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
+ * the polling mode. Overhead of doing DMA setup eats up the
+ * benefits using it.
+ */
+ cryp_polling_mode(ctx, device_data);
+ } else {
+ dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out:
+ ctx->indata = indata;
+ ctx->outdata = outdata;
+ ctx->datalen = datalen;
+ ctx->outlen = outlen;
+
+ return ret;
+}
+
+static int get_nents(struct scatterlist *sg, int nbytes)
+{
+ int nents = 0;
+
+ while (nbytes > 0) {
+ nbytes -= sg->length;
+ sg = scatterwalk_sg_next(sg);
+ nents++;
+ }
+
+ return nents;
+}
+
+static int ablk_dma_crypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+
+ int bytes_written = 0;
+ int bytes_read = 0;
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->datalen = areq->nbytes;
+ ctx->outlen = areq->nbytes;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+ if (ret)
+ goto out_power;
+
+ /* We have the device now, so store the nents in the dma struct. */
+ ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
+ ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
+
+ /* Enable DMA in- and output. */
+ cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
+
+ bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
+ bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
+
+ wait_for_completion(&ctx->device->dma.cryp_dma_complete);
+ cryp_dma_done(ctx);
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out_power:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+
+out:
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ if (unlikely(bytes_written != bytes_read))
+ return -EPERM;
+
+ return 0;
+}
+
+static int ablk_crypt(struct ablkcipher_request *areq)
+{
+ struct ablkcipher_walk walk;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+ unsigned long src_paddr;
+ unsigned long dst_paddr;
+ int ret;
+ int nbytes;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out_power;
+ }
+
+ ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
+ ret = ablkcipher_walk_phys(areq, &walk);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ while ((nbytes = walk.nbytes) > 0) {
+ ctx->iv = walk.iv;
+ src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ ctx->indata = phys_to_virt(src_paddr);
+
+ dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ ctx->outdata = phys_to_virt(dst_paddr);
+
+ ctx->datalen = nbytes - (nbytes % ctx->blocksize);
+
+ ret = hw_crypt_noxts(ctx, device_data);
+ if (ret)
+ goto out_power;
+
+ nbytes -= ctx->datalen;
+ ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ if (ret)
+ goto out_power;
+ }
+ ablkcipher_walk_complete(&walk);
+
+out_power:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+out:
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ break;
+
+ case AES_KEYSIZE_192:
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ break;
+
+ case AES_KEYSIZE_256:
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+ break;
+
+ default:
+ pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+
+ return 0;
+}
+
+static int aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)key, 4))) {
+ dev_err(ctx->device->dev, "[%s]: key isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)key);
+ return -EFAULT;
+ }
+
+ /* For CTR mode */
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s] invalid keylen", __func__);
+ return -EINVAL;
+ }
+
+ if (keylen == AES_KEYSIZE_128)
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ else if (keylen == AES_KEYSIZE_192)
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ else if (keylen == AES_KEYSIZE_256)
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Checking key interdependency for weak key detection. */
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int cryp_hw_calculate(struct cryp_ctx *ctx)
+{
+ struct cryp_device_data *device_data;
+ int ret;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (hw_crypt_noxts(ctx, device_data))
+ dev_err(device_data->dev, "[%s]: hw_crypt_noxts() failed!",
+ __func__);
+
+out:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static int aes_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CBC;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CBC;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ctr_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CTR;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ctr_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CTR;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int des_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_CBC;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_CBC;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_CBC;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_CBC;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+/**
+ * struct crypto_alg aes_alg
+ */
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_setkey,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_alg
+ */
+static struct crypto_alg des_alg = {
+ .cra_name = "des",
+ .cra_driver_name = "des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES_KEY_SIZE,
+ .cia_max_keysize = DES_KEY_SIZE,
+ .cia_setkey = des_setkey,
+ .cia_encrypt = des_encrypt,
+ .cia_decrypt = des_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_alg
+ */
+static struct crypto_alg des3_alg = {
+ .cra_name = "des3_ede",
+ .cra_driver_name = "des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES3_EDE_KEY_SIZE,
+ .cia_max_keysize = DES3_EDE_KEY_SIZE,
+ .cia_setkey = des3_setkey,
+ .cia_encrypt = des3_encrypt,
+ .cia_decrypt = des3_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_ecb_alg
+ */
+static struct crypto_alg aes_ecb_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_ecb_encrypt,
+ .decrypt = aes_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_cbc_alg
+ */
+static struct crypto_alg aes_cbc_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_cbc_encrypt,
+ .decrypt = aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_ctr_alg
+ */
+static struct crypto_alg aes_ctr_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_ctr_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_ctr_encrypt,
+ .decrypt = aes_ctr_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_ecb_alg
+ */
+static struct crypto_alg des_ecb_alg = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = des_ecb_encrypt,
+ .decrypt = des_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_cbc_alg
+ */
+static struct crypto_alg des_cbc_alg = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = des_cbc_encrypt,
+ .decrypt = des_cbc_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_ecb_alg
+ */
+static struct crypto_alg des3_ecb_alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = des3_ecb_encrypt,
+ .decrypt = des3_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_cbc_alg
+ */
+static struct crypto_alg des3_cbc_alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = des3_cbc_encrypt,
+ .decrypt = des3_cbc_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg *ux500_cryp_algs[] -
+ */
+static struct crypto_alg *ux500_cryp_algs[] = {
+ &aes_alg,
+ &des_alg,
+ &des3_alg,
+ &aes_ecb_alg,
+ &aes_cbc_alg,
+ &aes_ctr_alg,
+ &des_ecb_alg,
+ &des_cbc_alg,
+ &des3_ecb_alg,
+ &des3_cbc_alg,
+};
+
+/**
+ * cryp_algs_register_all -
+ */
+static int cryp_algs_register_all(void)
+{
+ int ret;
+ int i;
+ int count;
+
+ pr_debug("[%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) {
+ ret = crypto_register_alg(ux500_cryp_algs[i]);
+ if (ret) {
+ count = i;
+ pr_err("[%s] alg registration failed",
+ ux500_cryp_algs[i]->cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_alg(ux500_cryp_algs[i]);
+ return ret;
+}
+
+/**
+ * cryp_algs_unregister_all -
+ */
+static void cryp_algs_unregister_all(void)
+{
+ int i;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++)
+ crypto_unregister_alg(ux500_cryp_algs[i]);
+}
+
+static int ux500_cryp_probe(struct platform_device *pdev)
+{
+ int ret;
+ int cryp_error = 0;
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+ struct cryp_protection_config prot = {
+ .privilege_access = CRYP_STATE_ENABLE
+ };
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "[%s]", __func__);
+ device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_err(dev, "[%s]: kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ /* Grab the DMA configuration from platform data. */
+ mem_to_engine = &((struct cryp_platform_data *)
+ dev->platform_data)->mem_to_engine;
+ engine_to_mem = &((struct cryp_platform_data *)
+ dev->platform_data)->engine_to_mem;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "[%s]: platform_get_resource() failed",
+ __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(dev, "[%s]: request_mem_region() failed",
+ __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s]: ioremap failed!", __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_spinlock);
+
+ /* Enable power for CRYP hardware block */
+ device_data->pwr_regulator = ux500_regulator_get(&pdev->dev);
+ if (IS_ERR(device_data->pwr_regulator)) {
+ dev_err(dev, "[%s]: could not get cryp regulator", __func__);
+ ret = PTR_ERR(device_data->pwr_regulator);
+ device_data->pwr_regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clk for CRYP hardware block */
+ device_data->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s]: clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ cryp_error = cryp_check(device_data);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_init() failed!", __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ cryp_error = cryp_configure_protection(device_data, &prot);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_configure_protection() failed!",
+ __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
+ __func__);
+ goto out_power;
+ }
+
+ ret = request_irq(res_irq->start,
+ cryp_interrupt_handler,
+ 0,
+ "cryp1",
+ device_data);
+ if (ret) {
+ dev_err(dev, "[%s]: Unable to request IRQ", __func__);
+ goto out_power;
+ }
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ atomic_set(&session_id, 1);
+
+ ret = cryp_algs_register_all();
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(dev, "[%s]: cryp_disable_power() failed!", __func__);
+
+ return 0;
+
+out_power:
+ cryp_disable_power(&pdev->dev, device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ ux500_regulator_put(device_data->pwr_regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+static int ux500_cryp_remove(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ ux500_regulator_put(device_data->pwr_regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ kfree(device_data);
+
+ return 0;
+}
+
+static void ux500_cryp_shutdown(struct platform_device *pdev)
+{
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+}
+
+static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ /* Handle state? */
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else
+ disable_irq(res_irq->start);
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = cryp_disable_power(&pdev->dev, device_data, false);
+
+ } else
+ ret = cryp_disable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
+
+ return ret;
+}
+
+static int ux500_cryp_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = cryp_enable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
+ __func__);
+ else {
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ enable_irq(res_irq->start);
+ }
+
+ return ret;
+}
+
+static struct platform_driver cryp_driver = {
+ .probe = ux500_cryp_probe,
+ .remove = ux500_cryp_remove,
+ .shutdown = ux500_cryp_shutdown,
+ .suspend = ux500_cryp_suspend,
+ .resume = ux500_cryp_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cryp1"
+ }
+};
+
+static int __init ux500_cryp_mod_init(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+ return platform_driver_register(&cryp_driver);
+}
+
+static void __exit ux500_cryp_mod_fini(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ platform_driver_unregister(&cryp_driver);
+ return;
+}
+
+module_init(ux500_cryp_mod_init);
+module_exit(ux500_cryp_mod_fini);
+
+module_param(cryp_mode, int, 0);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
+MODULE_ALIAS("aes-all");
+MODULE_ALIAS("des-all");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
new file mode 100644
index 00000000000..08d291cdbe6
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+
+#include "cryp.h"
+#include "cryp_p.h"
+#include "cryp_irq.h"
+#include "cryp_irqp.h"
+
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i | irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i & ~irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
new file mode 100644
index 00000000000..5a7837f1b8f
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_IRQ_H_
+#define _CRYP_IRQ_H_
+
+#include "cryp.h"
+
+enum cryp_irq_src_id {
+ CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
+ CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
+ CRYP_IRQ_SRC_ALL = 0x3
+};
+
+/**
+ * M0 Funtions
+ */
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+#endif /* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
new file mode 100644
index 00000000000..8b339cc34bf
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __CRYP_IRQP_H_
+#define __CRYP_IRQP_H_
+
+#include "cryp_irq.h"
+
+/**
+ *
+ * CRYP Registers - Offset mapping
+ * +-----------------+
+ * 00h | CRYP_CR | Configuration register
+ * +-----------------+
+ * 04h | CRYP_SR | Status register
+ * +-----------------+
+ * 08h | CRYP_DIN | Data In register
+ * +-----------------+
+ * 0ch | CRYP_DOUT | Data out register
+ * +-----------------+
+ * 10h | CRYP_DMACR | DMA control register
+ * +-----------------+
+ * 14h | CRYP_IMSC | IMSC
+ * +-----------------+
+ * 18h | CRYP_RIS | Raw interrupt status
+ * +-----------------+
+ * 1ch | CRYP_MIS | Masked interrupt status.
+ * +-----------------+
+ * Key registers
+ * IVR registers
+ * Peripheral
+ * Cell IDs
+ *
+ * Refer data structure for other register map
+ */
+
+/**
+ * struct cryp_register
+ * @cr - Configuration register
+ * @status - Status register
+ * @din - Data input register
+ * @din_size - Data input size register
+ * @dout - Data output register
+ * @dout_size - Data output size register
+ * @dmacr - Dma control register
+ * @imsc - Interrupt mask set/clear register
+ * @ris - Raw interrupt status
+ * @mis - Masked interrupt statu register
+ * @key_1_l - Key register 1 L
+ * @key_1_r - Key register 1 R
+ * @key_2_l - Key register 2 L
+ * @key_2_r - Key register 2 R
+ * @key_3_l - Key register 3 L
+ * @key_3_r - Key register 3 R
+ * @key_4_l - Key register 4 L
+ * @key_4_r - Key register 4 R
+ * @init_vect_0_l - init vector 0 L
+ * @init_vect_0_r - init vector 0 R
+ * @init_vect_1_l - init vector 1 L
+ * @init_vect_1_r - init vector 1 R
+ * @cryp_unused1 - unused registers
+ * @itcr - Integration test control register
+ * @itip - Integration test input register
+ * @itop - Integration test output register
+ * @cryp_unused2 - unused registers
+ * @periphId0 - FE0 CRYP Peripheral Identication Register
+ * @periphId1 - FE4
+ * @periphId2 - FE8
+ * @periphId3 - FEC
+ * @pcellId0 - FF0 CRYP PCell Identication Register
+ * @pcellId1 - FF4
+ * @pcellId2 - FF8
+ * @pcellId3 - FFC
+ */
+struct cryp_register {
+ u32 cr; /* Configuration register */
+ u32 sr; /* Status register */
+ u32 din; /* Data input register */
+ u32 din_size; /* Data input size register */
+ u32 dout; /* Data output register */
+ u32 dout_size; /* Data output size register */
+ u32 dmacr; /* Dma control register */
+ u32 imsc; /* Interrupt mask set/clear register */
+ u32 ris; /* Raw interrupt status */
+ u32 mis; /* Masked interrupt statu register */
+
+ u32 key_1_l; /*Key register 1 L */
+ u32 key_1_r; /*Key register 1 R */
+ u32 key_2_l; /*Key register 2 L */
+ u32 key_2_r; /*Key register 2 R */
+ u32 key_3_l; /*Key register 3 L */
+ u32 key_3_r; /*Key register 3 R */
+ u32 key_4_l; /*Key register 4 L */
+ u32 key_4_r; /*Key register 4 R */
+
+ u32 init_vect_0_l; /*init vector 0 L */
+ u32 init_vect_0_r; /*init vector 0 R */
+ u32 init_vect_1_l; /*init vector 1 L */
+ u32 init_vect_1_r; /*init vector 1 R */
+
+ u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */
+ u32 itcr; /*Integration test control register */
+ u32 itip; /*Integration test input register */
+ u32 itop; /*Integration test output register */
+ u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */
+
+ u32 periphId0; /* FE0 CRYP Peripheral Identication Register */
+ u32 periphId1; /* FE4 */
+ u32 periphId2; /* FE8 */
+ u32 periphId3; /* FEC */
+
+ u32 pcellId0; /* FF0 CRYP PCell Identication Register */
+ u32 pcellId1; /* FF4 */
+ u32 pcellId2; /* FF8 */
+ u32 pcellId3; /* FFC */
+};
+
+#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
new file mode 100644
index 00000000000..0e070829edc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_P_H_
+#define _CRYP_P_H_
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include "cryp.h"
+#include "cryp_irqp.h"
+
+/**
+ * Generic Macros
+ */
+#define CRYP_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define CRYP_WRITE_BIT(reg_name, val, mask) \
+ writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
+ ((val) & (mask))), reg_name)
+
+#define CRYP_TEST_BITS(reg_name, val) \
+ (readl_relaxed(reg_name) & (val))
+
+#define CRYP_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+/**
+ * CRYP specific Macros
+ */
+#define CRYP_PERIPHERAL_ID0 0xE3
+#define CRYP_PERIPHERAL_ID1 0x05
+
+#define CRYP_PERIPHERAL_ID2_DB8500 0x28
+#define CRYP_PERIPHERAL_ID2_DB5500 0x29
+#define CRYP_PERIPHERAL_ID3 0x00
+
+#define CRYP_PCELL_ID0 0x0D
+#define CRYP_PCELL_ID1 0xF0
+#define CRYP_PCELL_ID2 0x05
+#define CRYP_PCELL_ID3 0xB1
+
+/**
+ * CRYP register default values
+ */
+#define MAX_DEVICE_SUPPORT 2
+
+/* Priv set, keyrden set and datatype 8bits swapped set as default. */
+#define CRYP_CR_DEFAULT 0x0482
+#define CRYP_DMACR_DEFAULT 0x0
+#define CRYP_IMSC_DEFAULT 0x0
+#define CRYP_DIN_DEFAULT 0x0
+#define CRYP_DOUT_DEFAULT 0x0
+#define CRYP_KEY_DEFAULT 0x0
+#define CRYP_INIT_VECT_DEFAULT 0x0
+
+/**
+ * CRYP Control register specific mask
+ */
+#define CRYP_CR_SECURE_MASK BIT(0)
+#define CRYP_CR_PRLG_MASK BIT(1)
+#define CRYP_CR_ALGODIR_MASK BIT(2)
+#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3))
+#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6))
+#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8))
+#define CRYP_CR_KEYRDEN_MASK BIT(10)
+#define CRYP_CR_KSE_MASK BIT(11)
+#define CRYP_CR_START_MASK BIT(12)
+#define CRYP_CR_INIT_MASK BIT(13)
+#define CRYP_CR_FFLUSH_MASK BIT(14)
+#define CRYP_CR_CRYPEN_MASK BIT(15)
+#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\
+ CRYP_CR_PRLG_MASK |\
+ CRYP_CR_ALGODIR_MASK |\
+ CRYP_CR_ALGOMODE_MASK |\
+ CRYP_CR_DATATYPE_MASK |\
+ CRYP_CR_KEYSIZE_MASK |\
+ CRYP_CR_KEYRDEN_MASK |\
+ CRYP_CR_DATATYPE_MASK)
+
+
+#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1))
+#define CRYP_SR_IFEM_MASK BIT(0)
+#define CRYP_SR_BUSY_MASK BIT(4)
+
+/**
+ * Bit position used while setting bits in register
+ */
+#define CRYP_CR_PRLG_POS 1
+#define CRYP_CR_ALGODIR_POS 2
+#define CRYP_CR_ALGOMODE_POS 3
+#define CRYP_CR_DATATYPE_POS 6
+#define CRYP_CR_KEYSIZE_POS 8
+#define CRYP_CR_KEYRDEN_POS 10
+#define CRYP_CR_KSE_POS 11
+#define CRYP_CR_START_POS 12
+#define CRYP_CR_INIT_POS 13
+#define CRYP_CR_CRYPEN_POS 15
+
+#define CRYP_SR_BUSY_POS 4
+
+/**
+ * CRYP PCRs------PC_NAND control register
+ * BIT_MASK
+ */
+#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0))
+#define CRYP_DMA_REQ_MASK_POS 0
+
+
+struct cryp_system_context {
+ /* CRYP Register structure */
+ struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
+};
+
+#endif
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
new file mode 100644
index 00000000000..b2f90d9bac7
--- /dev/null
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_hash_core.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
+ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
new file mode 100644
index 00000000000..b8619ea4a27
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen (shujuan.chen@stericsson.com)
+ * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
+ * Author: Berne Hebark (berne.hebark@stericsson.com))
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _HASH_ALG_H
+#define _HASH_ALG_H
+
+#include <linux/bitops.h>
+
+#define HASH_BLOCK_SIZE 64
+#define HASH_DMA_ALIGN_SIZE 4
+#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
+#define HASH_BYTES_PER_WORD 4
+
+/* Maximum value of the length's high word */
+#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
+
+/* Power on Reset values HASH registers */
+#define HASH_RESET_CR_VALUE 0x0
+#define HASH_RESET_STR_VALUE 0x0
+
+/* Number of context swap registers */
+#define HASH_CSR_COUNT 52
+
+#define HASH_RESET_CSRX_REG_VALUE 0x0
+#define HASH_RESET_CSFULL_REG_VALUE 0x0
+#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
+
+#define HASH_RESET_INDEX_VAL 0x0
+#define HASH_RESET_BIT_INDEX_VAL 0x0
+#define HASH_RESET_BUFFER_VAL 0x0
+#define HASH_RESET_LEN_HIGH_VAL 0x0
+#define HASH_RESET_LEN_LOW_VAL 0x0
+
+/* Control register bitfields */
+#define HASH_CR_RESUME_MASK 0x11FCF
+
+#define HASH_CR_SWITCHON_POS 31
+#define HASH_CR_SWITCHON_MASK BIT(31)
+
+#define HASH_CR_EMPTYMSG_POS 20
+#define HASH_CR_EMPTYMSG_MASK BIT(20)
+
+#define HASH_CR_DINF_POS 12
+#define HASH_CR_DINF_MASK BIT(12)
+
+#define HASH_CR_NBW_POS 8
+#define HASH_CR_NBW_MASK 0x00000F00UL
+
+#define HASH_CR_LKEY_POS 16
+#define HASH_CR_LKEY_MASK BIT(16)
+
+#define HASH_CR_ALGO_POS 7
+#define HASH_CR_ALGO_MASK BIT(7)
+
+#define HASH_CR_MODE_POS 6
+#define HASH_CR_MODE_MASK BIT(6)
+
+#define HASH_CR_DATAFORM_POS 4
+#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
+
+#define HASH_CR_DMAE_POS 3
+#define HASH_CR_DMAE_MASK BIT(3)
+
+#define HASH_CR_INIT_POS 2
+#define HASH_CR_INIT_MASK BIT(2)
+
+#define HASH_CR_PRIVN_POS 1
+#define HASH_CR_PRIVN_MASK BIT(1)
+
+#define HASH_CR_SECN_POS 0
+#define HASH_CR_SECN_MASK BIT(0)
+
+/* Start register bitfields */
+#define HASH_STR_DCAL_POS 8
+#define HASH_STR_DCAL_MASK BIT(8)
+#define HASH_STR_DEFAULT 0x0
+
+#define HASH_STR_NBLW_POS 0
+#define HASH_STR_NBLW_MASK 0x0000001FUL
+
+#define HASH_NBLW_MAX_VAL 0x1F
+
+/* PrimeCell IDs */
+#define HASH_P_ID0 0xE0
+#define HASH_P_ID1 0x05
+#define HASH_P_ID2 0x38
+#define HASH_P_ID3 0x00
+#define HASH_CELL_ID0 0x0D
+#define HASH_CELL_ID1 0xF0
+#define HASH_CELL_ID2 0x05
+#define HASH_CELL_ID3 0xB1
+
+#define HASH_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define HASH_CLEAR_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
+
+#define HASH_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len))
+
+#define HASH_INITIALIZE \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ 0x01, HASH_CR_INIT_POS, \
+ HASH_CR_INIT_MASK)
+
+#define HASH_SET_DATA_FORMAT(data_format) \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ (u32) (data_format), HASH_CR_DATAFORM_POS, \
+ HASH_CR_DATAFORM_MASK)
+#define HASH_SET_NBLW(val) \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ (u32) (val), HASH_STR_NBLW_POS, \
+ HASH_STR_NBLW_MASK)
+#define HASH_SET_DCAL \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ 0x01, HASH_STR_DCAL_POS, \
+ HASH_STR_DCAL_MASK)
+
+/* Hardware access method */
+enum hash_mode {
+ HASH_MODE_CPU,
+ HASH_MODE_DMA
+};
+
+/**
+ * struct uint64 - Structure to handle 64 bits integers.
+ * @high_word: Most significant bits.
+ * @low_word: Least significant bits.
+ *
+ * Used to handle 64 bits integers.
+ */
+struct uint64 {
+ u32 high_word;
+ u32 low_word;
+};
+
+/**
+ * struct hash_register - Contains all registers in u8500 hash hardware.
+ * @cr: HASH control register (0x000).
+ * @din: HASH data input register (0x004).
+ * @str: HASH start register (0x008).
+ * @hx: HASH digest register 0..7 (0x00c-0x01C).
+ * @padding0: Reserved (0x02C).
+ * @itcr: Integration test control register (0x080).
+ * @itip: Integration test input register (0x084).
+ * @itop: Integration test output register (0x088).
+ * @padding1: Reserved (0x08C).
+ * @csfull: HASH context full register (0x0F8).
+ * @csdatain: HASH context swap data input register (0x0FC).
+ * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
+ * @padding2: Reserved (0x1D0).
+ * @periphid0: HASH peripheral identification register 0 (0xFE0).
+ * @periphid1: HASH peripheral identification register 1 (0xFE4).
+ * @periphid2: HASH peripheral identification register 2 (0xFE8).
+ * @periphid3: HASH peripheral identification register 3 (0xFEC).
+ * @cellid0: HASH PCell identification register 0 (0xFF0).
+ * @cellid1: HASH PCell identification register 1 (0xFF4).
+ * @cellid2: HASH PCell identification register 2 (0xFF8).
+ * @cellid3: HASH PCell identification register 3 (0xFFC).
+ *
+ * The device communicates to the HASH via 32-bit-wide control registers
+ * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
+ * with the registers used.
+ */
+struct hash_register {
+ u32 cr;
+ u32 din;
+ u32 str;
+ u32 hx[8];
+
+ u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
+
+ u32 itcr;
+ u32 itip;
+ u32 itop;
+
+ u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
+
+ u32 csfull;
+ u32 csdatain;
+ u32 csrx[HASH_CSR_COUNT];
+
+ u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
+
+ u32 periphid0;
+ u32 periphid1;
+ u32 periphid2;
+ u32 periphid3;
+
+ u32 cellid0;
+ u32 cellid1;
+ u32 cellid2;
+ u32 cellid3;
+};
+
+/**
+ * struct hash_state - Hash context state.
+ * @temp_cr: Temporary HASH Control Register.
+ * @str_reg: HASH Start Register.
+ * @din_reg: HASH Data Input Register.
+ * @csr[52]: HASH Context Swap Registers 0-39.
+ * @csfull: HASH Context Swap Registers 40 ie Status flags.
+ * @csdatain: HASH Context Swap Registers 41 ie Input data.
+ * @buffer: Working buffer for messages going to the hardware.
+ * @length: Length of the part of message hashed so far (floor(N/64) * 64).
+ * @index: Valid number of bytes in buffer (N % 64).
+ * @bit_index: Valid number of bits in buffer (N % 8).
+ *
+ * This structure is used between context switches, i.e. when ongoing jobs are
+ * interupted with new jobs. When this happens we need to store intermediate
+ * results in software.
+ *
+ * WARNING: "index" is the member of the structure, to be sure that "buffer"
+ * is aligned on a 4-bytes boundary. This is highly implementation dependent
+ * and MUST be checked whenever this code is ported on new platforms.
+ */
+struct hash_state {
+ u32 temp_cr;
+ u32 str_reg;
+ u32 din_reg;
+ u32 csr[52];
+ u32 csfull;
+ u32 csdatain;
+ u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
+ struct uint64 length;
+ u8 index;
+ u8 bit_index;
+};
+
+/**
+ * enum hash_device_id - HASH device ID.
+ * @HASH_DEVICE_ID_0: Hash hardware with ID 0
+ * @HASH_DEVICE_ID_1: Hash hardware with ID 1
+ */
+enum hash_device_id {
+ HASH_DEVICE_ID_0 = 0,
+ HASH_DEVICE_ID_1 = 1
+};
+
+/**
+ * enum hash_data_format - HASH data format.
+ * @HASH_DATA_32_BITS: 32 bits data format
+ * @HASH_DATA_16_BITS: 16 bits data format
+ * @HASH_DATA_8_BITS: 8 bits data format.
+ * @HASH_DATA_1_BITS: 1 bit data format.
+ */
+enum hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+/**
+ * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
+ * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
+ * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
+ */
+enum hash_algo {
+ HASH_ALGO_SHA1 = 0x0,
+ HASH_ALGO_SHA256 = 0x1
+};
+
+/**
+ * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
+ * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
+ * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
+ */
+enum hash_op {
+ HASH_OPER_MODE_HASH = 0x0,
+ HASH_OPER_MODE_HMAC = 0x1
+};
+
+/**
+ * struct hash_config - Configuration data for the hardware.
+ * @data_format: Format of data entered into the hash data in register.
+ * @algorithm: Algorithm selection bit.
+ * @oper_mode: Operating mode selection bit.
+ */
+struct hash_config {
+ int data_format;
+ int algorithm;
+ int oper_mode;
+};
+
+/**
+ * struct hash_dma - Structure used for dma.
+ * @mask: DMA capabilities bitmap mask.
+ * @complete: Used to maintain state for a "completion".
+ * @chan_mem2hash: DMA channel.
+ * @cfg_mem2hash: DMA channel configuration.
+ * @sg_len: Scatterlist length.
+ * @sg: Scatterlist.
+ * @nents: Number of sg entries.
+ */
+struct hash_dma {
+ dma_cap_mask_t mask;
+ struct completion complete;
+ struct dma_chan *chan_mem2hash;
+ void *cfg_mem2hash;
+ int sg_len;
+ struct scatterlist *sg;
+ int nents;
+};
+
+/**
+ * struct hash_ctx - The context used for hash calculations.
+ * @key: The key used in the operation.
+ * @keylen: The length of the key.
+ * @updated: Indicates if hardware is initialized for new operations.
+ * @state: The state of the current calculations.
+ * @config: The current configuration.
+ * @digestsize: The size of current digest.
+ * @device: Pointer to the device structure.
+ * @dma_mode: Used in special cases (workaround), e.g. need to change to
+ * cpu mode, if not supported/working in dma mode.
+ */
+struct hash_ctx {
+ u8 *key;
+ u32 keylen;
+ u8 updated;
+ struct hash_state state;
+ struct hash_config config;
+ int digestsize;
+ struct hash_device_data *device;
+ bool dma_mode;
+};
+
+/**
+ * struct hash_device_data - structure for a hash device.
+ * @base: Pointer to the hardware base address.
+ * @list_node: For inclusion in klist.
+ * @dev: Pointer to the device dev structure.
+ * @ctx_lock: Spinlock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_lock: Spinlock for power_state.
+ * @regulator: Pointer to the device's power control.
+ * @clk: Pointer to the device's clock control.
+ * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
+ * @dma: Structure used for dma.
+ */
+struct hash_device_data {
+ struct hash_register __iomem *base;
+ struct klist_node list_node;
+ struct device *dev;
+ struct spinlock ctx_lock;
+ struct hash_ctx *current_ctx;
+ bool power_state;
+ struct spinlock power_state_lock;
+ struct ux500_regulator *regulator;
+ struct clk *clk;
+ bool restore_dev_state;
+ struct hash_dma dma;
+};
+
+int hash_check_hw(struct hash_device_data *device_data);
+
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config);
+
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
+
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm);
+
+int hash_hw_update(struct ahash_request *req);
+
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *state);
+
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *state);
+
+#endif
diff --git a/drivers/crypto/ux500/hash/hash_alg_p.h b/drivers/crypto/ux500/hash/hash_alg_p.h
new file mode 100755
index 00000000000..c85faaeba6f
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg_p.h
@@ -0,0 +1,26 @@
+/*****************************************************************************/
+/**
+* � ST-Ericsson, 2009 - All rights reserved
+* Reproduction and Communication of this document is strictly prohibited
+* unless specifically authorized in writing by ST-Ericsson
+*
+* static Header file of HASH Processor
+* Specification release related to this implementation: A_V2.2
+* AUTHOR : ST-Ericsson
+*/
+/*****************************************************************************/
+
+#ifndef _HASH_P_H_
+#define _HASH_P_H_
+
+/*--------------------------------------------------------------------------*
+ * Includes *
+ *--------------------------------------------------------------------------*/
+#include "hash_alg.h"
+
+/*--------------------------------------------------------------------------*
+ * Defines *
+ *--------------------------------------------------------------------------*/
+
+#endif /* End _HASH_P_H_ */
+
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
new file mode 100644
index 00000000000..9cc50e91c9e
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -0,0 +1,2080 @@
+/*
+ * Cryptographic API.
+ * Support for Nomadik hardware crypto engine.
+
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/klist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "hash_alg.h"
+
+#define DEV_DBG_NAME "hashX hashX:"
+
+static int hash_mode;
+module_param(hash_mode, int, 0);
+MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
+
+/**
+ * Pre-calculated empty message digests.
+ */
+static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/* HMAC-SHA1, no key */
+static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
+ 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
+ 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
+ 0x70, 0x69, 0x0e, 0x1d
+};
+
+/* HMAC-SHA256, no key */
+static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
+ 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
+ 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
+ 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
+ 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
+};
+
+/**
+ * struct hash_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct hash_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+static struct hash_driver_data driver_data;
+
+/* Declaration of functions */
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message
+ * @index_bytes: The number of bytes in the last message
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes);
+
+/**
+ * release_hash_device - Releases a previously allocated hash device.
+ * @device_data: Structure for the hash device.
+ *
+ */
+static void release_hash_device(struct hash_device_data *device_data)
+{
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx->device = NULL;
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+}
+
+static void hash_dma_setup_channel(struct hash_device_data *device_data,
+ struct device *dev)
+{
+ struct hash_platform_data *platform_data = dev->platform_data;
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
+ device_data->dma.chan_mem2hash =
+ dma_request_channel(device_data->dma.mask,
+ platform_data->dma_filter,
+ device_data->dma.cfg_mem2hash);
+
+ init_completion(&device_data->dma.complete);
+}
+
+static void hash_dma_callback(void *data)
+{
+ struct hash_ctx *ctx = (struct hash_ctx *) data;
+
+ complete(&ctx->device->dma.complete);
+}
+
+static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
+ int len, enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ if (direction != DMA_TO_DEVICE) {
+ dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
+
+ channel = ctx->device->dma.chan_mem2hash;
+ ctx->device->dma.sg = sg;
+ ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg, ctx->device->dma.nents,
+ direction);
+
+ if (!ctx->device->dma.sg_len) {
+ dev_err(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg, ctx->device->dma.sg_len,
+ direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(ctx->device->dev,
+ "[%s]: device_prep_slave_sg() failed!", __func__);
+ return -EFAULT;
+ }
+
+ desc->callback = hash_dma_callback;
+ desc->callback_param = ctx;
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void hash_dma_done(struct hash_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ chan = ctx->device->dma.chan_mem2hash;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
+ ctx->device->dma.sg_len, DMA_TO_DEVICE);
+
+}
+
+static int hash_dma_write(struct hash_ctx *ctx,
+ struct scatterlist *sg, int len)
+{
+ int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+/**
+ * get_empty_message_digest - Returns a pre-calculated digest for
+ * the empty message.
+ * @device_data: Structure for the hash device.
+ * @zero_hash: Buffer to return the empty message digest.
+ * @zero_hash_size: Hash size of the empty message digest.
+ * @zero_digest: True if zero_digest returned.
+ */
+static int get_empty_message_digest(
+ struct hash_device_data *device_data,
+ u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = device_data->current_ctx;
+ *zero_digest = false;
+
+ /**
+ * Caller responsible for ctx != NULL.
+ */
+
+ if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 ==
+ ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
+ if (!ctx->keylen) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ dev_dbg(device_data->dev, "[%s] Continue hash "
+ "calculation, since hmac key avalable",
+ __func__);
+ }
+ }
+out:
+
+ return ret;
+}
+
+/**
+ * hash_disable_power - Request to disable power and clock.
+ * @device_data: Structure for the hash device.
+ * @save_device_state: If true, saves the current hw state.
+ *
+ * This function request for disabling power (regulator) and clock,
+ * and could also save current hw state.
+ */
+static int hash_disable_power(
+ struct hash_device_data *device_data,
+ bool save_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state)
+ goto out;
+
+ if (save_device_state && device_data->current_ctx) {
+ hash_save_state(device_data,
+ &device_data->current_ctx->state);
+ device_data->restore_dev_state = true;
+ }
+
+ clk_disable(device_data->clk);
+ ret = ux500_regulator_atomic_disable(device_data->regulator);
+ if (ret)
+ dev_err(dev, "[%s] regulator_disable() failed!", __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_enable_power - Request to enable power and clock.
+ * @device_data: Structure for the hash device.
+ * @restore_device_state: If true, restores a previous saved hw state.
+ *
+ * This function request for enabling power (regulator) and clock,
+ * and could also restore a previously saved hw state.
+ */
+static int hash_enable_power(
+ struct hash_device_data *device_data,
+ bool restore_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state) {
+ ret = ux500_regulator_atomic_enable(device_data->regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ret = ux500_regulator_atomic_disable(
+ device_data->regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_state) {
+ if (restore_device_state) {
+ device_data->restore_dev_state = false;
+ hash_resume_state(device_data,
+ &device_data->current_ctx->state);
+ }
+ }
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_get_device_data - Checks for an available hash device and return it.
+ * @hash_ctx: Structure for the hash context.
+ * @device_data: Structure for the hash device.
+ *
+ * This function check for an available hash device and return it to
+ * the caller.
+ * Note! Caller need to release the device, calling up().
+ */
+static int hash_get_device_data(struct hash_ctx *ctx,
+ struct hash_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct hash_device_data *local_device_data = NULL;
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct hash_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+/**
+ * hash_hw_write_key - Writes the key to the hardware registries.
+ *
+ * @device_data: Structure for the hash device.
+ * @key: Key to be written.
+ * @keylen: The lengt of the key.
+ *
+ * Note! This function DOES NOT write to the NBLW registry, even though
+ * specified in the the hw design spec. Either due to incorrect info in the
+ * spec or due to a bug in the hw.
+ */
+static void hash_hw_write_key(struct hash_device_data *device_data,
+ const u8 *key, unsigned int keylen)
+{
+ u32 word = 0;
+ int nwords = 1;
+
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ while (keylen >= 4) {
+ word = ((u32) (key[3] & 0xff) << 24) |
+ ((u32) (key[2] & 0xff) << 16) |
+ ((u32) (key[1] & 0xff) << 8) |
+ ((u32) (key[0] & 0xff));
+
+ HASH_SET_DIN(&word, nwords);
+ keylen -= 4;
+ key += 4;
+ }
+
+ /* Take care of the remaining bytes in the last word */
+ if (keylen) {
+ word = 0;
+ while (keylen) {
+ word |= (key[keylen - 1] << (8 * (keylen - 1)));
+ keylen--;
+ }
+
+ HASH_SET_DIN(&word, nwords);
+ }
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ HASH_SET_DCAL;
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * init_hash_hw - Initialise the hash hardware for a new calculation.
+ * @device_data: Structure for the hash device.
+ * @ctx: The hash context.
+ *
+ * This function will enable the bits needed to clear and start a new
+ * calculation.
+ */
+static int init_hash_hw(struct hash_device_data *device_data,
+ struct hash_ctx *ctx)
+{
+ int ret = 0;
+
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_setconfiguration() "
+ "failed!", __func__);
+ return ret;
+ }
+
+ hash_begin(device_data, ctx);
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ return ret;
+}
+
+/**
+ * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
+ *
+ * @sg: Scatterlist.
+ * @size: Size in bytes.
+ * @aligned: True if sg data aligned to work in DMA mode.
+ *
+ * Reentrancy: Non Re-entrant
+ */
+static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
+{
+ int nents = 0;
+ bool aligned_data = true;
+
+ while (size > 0 && sg) {
+ nents++;
+ size -= sg->length;
+
+ /* hash_set_dma_transfer will align last nent */
+ if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
+ || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
+ size > 0))
+ aligned_data = false;
+
+ sg = sg_next(sg);
+ }
+
+ if (aligned)
+ *aligned = aligned_data;
+
+ if (size != 0)
+ return -EFAULT;
+
+ return nents;
+}
+
+/**
+ * hash_dma_valid_data - checks for dma valid sg data.
+ * @sg: Scatterlist.
+ * @datasize: Datasize in bytes.
+ *
+ * NOTE! This function checks for dma valid sg data, since dma
+ * only accept datasizes of even wordsize.
+ */
+static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
+{
+ bool aligned;
+
+ /* Need to include at least one nent, else error */
+ if (hash_get_nents(sg, datasize, &aligned) < 1)
+ return false;
+
+ return aligned;
+}
+
+/**
+ * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ *
+ * Initialize structures.
+ */
+static int hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (!ctx->key)
+ ctx->keylen = 0;
+
+ memset(&ctx->state, 0, sizeof(struct hash_state));
+ ctx->updated = 0;
+ if (hash_mode == HASH_MODE_DMA) {
+ if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) &&
+ cpu_is_u5500()) {
+ pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working "
+ "on u5500, directing to CPU mode.",
+ __func__);
+ ctx->dma_mode = false; /* Don't use DMA in this case */
+ goto out;
+ }
+
+ if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
+ ctx->dma_mode = false; /* Don't use DMA in this case */
+
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
+ "to CPU mode for data size < %d",
+ __func__, HASH_DMA_ALIGN_SIZE);
+ } else {
+ if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
+ hash_dma_valid_data(req->src,
+ req->nbytes)) {
+ ctx->dma_mode = true;
+ } else {
+ ctx->dma_mode = false;
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
+ " CPU mode for datalength < %d"
+ " or non-aligned data, except "
+ "in last nent", __func__,
+ HASH_DMA_PERFORMANCE_MIN_SIZE);
+ }
+ }
+ }
+out:
+ return 0;
+}
+
+/**
+ * hash_processblock - This function processes a single block of 512 bits (64
+ * bytes), word aligned, starting at message.
+ * @device_data: Structure for the hash device.
+ * @message: Block (512 bits) of message to be written to
+ * the HASH hardware.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_processblock(
+ struct hash_device_data *device_data,
+ const u32 *message, int length)
+{
+ int len = length / HASH_BYTES_PER_WORD;
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /*
+ * Write message data to the HASH_DIN register.
+ */
+ HASH_SET_DIN(message, len);
+}
+
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message.
+ * @index_bytes: The number of bytes in the last message.
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes)
+{
+ int nwords = 1;
+
+ /*
+ * Clear hash str register, only clear NBLW
+ * since DCAL will be reset by hardware.
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /* Main loop */
+ while (index_bytes >= 4) {
+ HASH_SET_DIN(message, nwords);
+ index_bytes -= 4;
+ message++;
+ }
+
+ if (index_bytes)
+ HASH_SET_DIN(message, nwords);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
+ HASH_SET_NBLW(index_bytes * 8);
+ dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
+ readl_relaxed(&device_data->base->din),
+ (int)(readl_relaxed(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+ HASH_SET_DCAL;
+ dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
+ __func__, readl_relaxed(&device_data->base->din),
+ (int)(readl_relaxed(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * hash_incrementlength - Increments the length of the current message.
+ * @ctx: Hash context
+ * @incr: Length of message processed already
+ *
+ * Overflow cannot occur, because conditions for overflow are checked in
+ * hash_hw_update.
+ */
+static void hash_incrementlength(struct hash_ctx *ctx, u32 incr)
+{
+ ctx->state.length.low_word += incr;
+
+ /* Check for wrap-around */
+ if (ctx->state.length.low_word < incr)
+ ctx->state.length.high_word++;
+}
+
+/**
+ * hash_setconfiguration - Sets the required configuration for the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @config: Pointer to a configuration structure.
+ *
+ * Reentrancy: Non Re-entrant
+ * Reentrancy issues:
+ * 1. Global variable registry(cofiguration register,
+ * parameter register, divider register) is being modified
+ *
+ * Comments 1. : User need to call hash_begin API after calling this
+ * API i.e. the current configuration is set only when
+ * bit INIT is set and we set INIT bit in hash_begin.
+ * Changing the configuration during a computation has
+ * no effect so we first set configuration by calling
+ * this API and then set the INIT bit for the HASH
+ * processor and the curent configuration is taken into
+ * account. As reading INIT bit (with correct protection
+ * rights) will always return 0b so we can't make a check
+ * at software level. So the user has to initialize the
+ * device for new configuration to take in to effect.
+ * 2. The default value of data format is 00b ie the format
+ * of data entered in HASH_DIN register is 32-bit data.
+ * The data written in HASH_DIN is used directly by the
+ * HASH processing, without re ordering.
+ */
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config)
+{
+ int ret = 0;
+
+ if (config->algorithm != HASH_ALGO_SHA1 &&
+ config->algorithm != HASH_ALGO_SHA256)
+ return -EPERM;
+
+ /*
+ * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
+ * to be written to HASH_DIN is considered as 32 bits.
+ */
+ HASH_SET_DATA_FORMAT(config->data_format);
+
+ /*
+ * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
+ */
+ switch (config->algorithm) {
+ case HASH_ALGO_SHA1:
+ HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ case HASH_ALGO_SHA256:
+ HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ default:
+ dev_err(device_data->dev, "[%s] Incorrect algorithm.",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * MODE bit. This bit selects between HASH or HMAC mode for the
+ * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
+ */
+ if (HASH_OPER_MODE_HASH == config->oper_mode)
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
+ /* Truncate key to blocksize */
+ dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ } else {
+ dev_dbg(device_data->dev, "[%s] LKEY cleared",
+ __func__);
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ }
+ } else { /* Wrong hash mode */
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * hash_begin - This routine resets some globals and initializes the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @ctx: Hash context.
+ *
+ * Reentrancy: Non Re-entrant
+ *
+ * Comments 1. : User need to call hash_setconfiguration API before
+ * calling this API i.e. the current configuration is set
+ * only when bit INIT is set and we set INIT bit in
+ * hash_begin. Changing the configuration during a
+ * computation has no effect so we first set
+ * configuration by calling this API and then set the
+ * INIT bit for the HASH processor and the current
+ * configuration is taken into account. As reading INIT
+ * bit (with correct protection rights) will always
+ * return 0b so we can't make a check at software level.
+ * So the user has to initialize the device for new
+ * configuration to take in to effect.
+ */
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
+{
+ /* HW and SW initializations */
+ /* Note: there is no need to initialize buffer and digest members */
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+}
+
+int hash_process_data(
+ struct hash_device_data *device_data,
+ struct hash_ctx *ctx, int msg_length, u8 *data_buffer,
+ u8 *buffer, u8 *index)
+{
+ int ret = 0;
+ u32 count;
+
+ do {
+ if ((*index + msg_length) < HASH_BLOCK_SIZE) {
+ for (count = 0; count < msg_length; count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ *index += msg_length;
+ msg_length = 0;
+ } else {
+ if (ctx->updated) {
+
+ ret = hash_resume_state(device_data,
+ &ctx->state);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_resume_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ } else {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "init_hash_hw()"
+ " failed!", __func__);
+ goto out;
+ }
+ ctx->updated = 1;
+ }
+ /*
+ * If 'data_buffer' is four byte aligned and
+ * local buffer does not have any data, we can
+ * write data directly from 'data_buffer' to
+ * HW peripheral, otherwise we first copy data
+ * to a local buffer
+ */
+ if ((0 == (((u32)data_buffer) % 4))
+ && (0 == *index))
+ hash_processblock(device_data,
+ (const u32 *)
+ data_buffer, HASH_BLOCK_SIZE);
+ else {
+ for (count = 0; count <
+ (u32)(HASH_BLOCK_SIZE -
+ *index);
+ count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ hash_processblock(device_data,
+ (const u32 *)buffer,
+ HASH_BLOCK_SIZE);
+ }
+ hash_incrementlength(ctx, HASH_BLOCK_SIZE);
+ data_buffer += (HASH_BLOCK_SIZE - *index);
+
+ msg_length -= (HASH_BLOCK_SIZE - *index);
+ *index = 0;
+
+ ret = hash_save_state(device_data,
+ &ctx->state);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_save_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ }
+ } while (msg_length != 0);
+out:
+
+ return ret;
+}
+
+/**
+ * hash_dma_final - The hash dma final function for SHA1/SHA256.
+ * @req: The hash request for the job.
+ */
+static int hash_dma_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+ int bytes_written = 0;
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ }
+
+ if (!ctx->updated) {
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_setconfiguration() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ /* Enable DMA input */
+ if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) {
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ } else {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_PRIVN_MASK);
+ }
+
+ HASH_INITIALIZE;
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ /* Number of bits in last word = (nbytes * 8) % 32 */
+ HASH_SET_NBLW((req->nbytes * 8) % 32);
+ ctx->updated = 1;
+ }
+
+ /* Store the nents in the dma struct. */
+ ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
+ if (!ctx->device->dma.nents) {
+ dev_err(device_data->dev, "[%s] "
+ "ctx->device->dma.nents = 0", __func__);
+ goto out_power;
+ }
+
+ bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
+ if (bytes_written != req->nbytes) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_dma_write() failed!", __func__);
+ goto out_power;
+ }
+
+ wait_for_completion(&ctx->device->dma.complete);
+ hash_dma_done(ctx);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_final - The final hash calculation function
+ * @req: The hash request for the job.
+ */
+int hash_hw_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen == 0) {
+ u8 zero_hash[SHA256_DIGEST_SIZE];
+ u32 zero_hash_size = 0;
+ bool zero_digest = false;
+ /**
+ * Use a pre-calculated empty message digest
+ * (workaround since hw return zeroes, hw bug!?)
+ */
+ ret = get_empty_message_digest(device_data, &zero_hash[0],
+ &zero_hash_size, &zero_digest);
+ if (!ret && likely(zero_hash_size == ctx->digestsize) &&
+ zero_digest) {
+ memcpy(req->result, &zero_hash[0], ctx->digestsize);
+ goto out_power;
+ } else if (!ret && !zero_digest) {
+ dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
+ "key, continue...", __func__);
+ } else {
+ dev_err(device_data->dev, "[%s] ret=%d, or wrong "
+ "digest size? %s", __func__, ret,
+ (zero_hash_size == ctx->digestsize) ?
+ "true" : "false");
+ /* Return error */
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen > 0) {
+ dev_err(device_data->dev, "[%s] Empty message with "
+ "keylength > 0, NOT supported.", __func__);
+ goto out_power;
+ }
+
+ if (!ctx->updated) {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] init_hash_hw() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ }
+
+ if (ctx->state.index) {
+ hash_messagepad(device_data, ctx->state.buffer,
+ ctx->state.index);
+ } else {
+ HASH_SET_DCAL;
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+ }
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_update - Updates current HASH computation hashing another part of
+ * the message.
+ * @req: Byte array containing the message to be hashed (caller
+ * allocated).
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_hw_update(struct ahash_request *req)
+{
+ int ret = 0;
+ u8 index = 0;
+ u8 *buffer;
+ struct hash_device_data *device_data;
+ u8 *data_buffer;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_hash_walk walk;
+ int msg_length = crypto_hash_walk_first(req, &walk);
+
+ /* Empty message ("") is correct indata */
+ if (msg_length == 0)
+ return ret;
+
+ index = ctx->state.index;
+ buffer = (u8 *)ctx->state.buffer;
+
+ /* Check if ctx->state.length + msg_length
+ overflows */
+ if (msg_length > (ctx->state.length.low_word + msg_length) &&
+ HASH_HIGH_WORD_MAX_VAL ==
+ ctx->state.length.high_word) {
+ pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
+ __func__);
+ return -EPERM;
+ }
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ /* Main loop */
+ while (0 != msg_length) {
+ data_buffer = walk.data;
+ ret = hash_process_data(device_data, ctx,
+ msg_length, data_buffer, buffer, &index);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_internal_hw_"
+ "update() failed!", __func__);
+ goto out_power;
+ }
+
+ msg_length = crypto_hash_walk_done(&walk, 0);
+ }
+
+ ctx->state.index = index;
+ dev_dbg(device_data->dev, "[%s] indata length=%d, "
+ "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "hash_disable_power() failed!", __func__);
+out:
+ release_hash_device(device_data);
+
+ return ret;
+}
+
+/**
+ * hash_resume_state - Function that resumes the state of an calculation.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The state to be restored in the hash hardware
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *device_state)
+{
+ u32 temp_cr;
+ s32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Check correctness of index and length members */
+ if (device_state->index > HASH_BLOCK_SIZE
+ || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ temp_cr = device_state->temp_cr;
+ writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ writel_relaxed(device_state->csr[count],
+ &device_data->base->csrx[count]);
+ }
+
+ writel_relaxed(device_state->csfull, &device_data->base->csfull);
+ writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
+
+ writel_relaxed(device_state->str_reg, &device_data->base->str);
+ writel_relaxed(temp_cr, &device_data->base->cr);
+
+ return 0;
+}
+
+/**
+ * hash_save_state - Function that saves the state of hardware.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The strucure where the hardware state should be saved.
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *device_state)
+{
+ u32 temp_cr;
+ u32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Write dummy value to force digest intermediate calculation. This
+ * actually makes sure that there isn't any ongoing calculation in the
+ * hardware.
+ */
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ temp_cr = readl_relaxed(&device_data->base->cr);
+
+ device_state->str_reg = readl_relaxed(&device_data->base->str);
+
+ device_state->din_reg = readl_relaxed(&device_data->base->din);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ device_state->csr[count] =
+ readl_relaxed(&device_data->base->csrx[count]);
+ }
+
+ device_state->csfull = readl_relaxed(&device_data->base->csfull);
+ device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
+
+ device_state->temp_cr = temp_cr;
+
+ return 0;
+}
+
+/**
+ * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
+ * @device_data:
+ *
+ */
+int hash_check_hw(struct hash_device_data *device_data)
+{
+ int ret = 0;
+
+ if (NULL == device_data) {
+ ret = -EPERM;
+ pr_err(DEV_DBG_NAME " [%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ goto out;
+ }
+
+ /* Checking Peripheral Ids */
+ if ((HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0))
+ && (HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1))
+ && (HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2))
+ && (HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3))
+ && (HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0))
+ && (HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1))
+ && (HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2))
+ && (HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3))
+ ) {
+ ret = 0;
+ goto out;;
+ } else {
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
+ __func__);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/**
+ * hash_get_digest - Gets the digest.
+ * @device_data: Pointer to the device structure.
+ * @digest: User allocated byte array for the calculated digest.
+ * @algorithm: The algorithm in use.
+ *
+ * Reentrancy: Non Re-entrant, global variable registry (hash control register)
+ * is being modified.
+ *
+ * Note that, if this is called before the final message has been handle it
+ * will return the intermediate message digest.
+ */
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm)
+{
+ u32 temp_hx_val, count;
+ int loop_ctr;
+
+ if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
+ dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
+ __func__, algorithm);
+ return;
+ }
+
+ if (algorithm == HASH_ALGO_SHA1)
+ loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
+ else
+ loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
+
+ dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
+ __func__, (u32) digest);
+
+ /* Copy result into digest array */
+ for (count = 0; count < loop_ctr; count++) {
+ temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
+ digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
+ digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
+ digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
+ digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
+ }
+}
+
+/**
+ * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_update(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode)
+ ret = hash_hw_update(req);
+ /* Skip update for DMA, all data will be passed to DMA in final */
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
+
+ if ((hash_mode == HASH_MODE_DMA) && ctx->dma_mode)
+ ret = hash_dma_final(req);
+ else
+ ret = hash_hw_final(req);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
+ __func__);
+ }
+
+ return ret;
+}
+
+static int hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen, int alg)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /**
+ * Freed in final.
+ */
+ ctx->key = kmalloc(keylen, GFP_KERNEL);
+ if (!ctx->key) {
+ pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
+ "for %d\n", __func__, alg);
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return ret;
+ }
+
+static int ahash_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = ahash_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int ahash_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = ahash_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = hmac_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = hmac_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
+}
+
+static int hmac_sha256_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
+}
+
+static struct ahash_alg ahash_sha1_alg = {
+ .init = ahash_sha1_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha1_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg ahash_sha256_alg = {
+ .init = ahash_sha256_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha256_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg hmac_sha1_alg = {
+ .init = hmac_sha1_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha1_digest,
+ .setkey = hmac_sha1_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg hmac_sha256_alg = {
+ .init = hmac_sha256_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha256_digest,
+ .setkey = hmac_sha256_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/**
+ * struct hash_alg *ux500_hash_algs[] -
+ */
+static struct ahash_alg *ux500_ahash_algs[] = {
+ &ahash_sha1_alg,
+ &ahash_sha256_alg,
+ &hmac_sha1_alg,
+ &hmac_sha256_alg
+};
+
+/**
+ * hash_algs_register_all -
+ */
+static int ahash_algs_register_all(struct hash_device_data *device_data)
+{
+ int ret;
+ int i;
+ int count;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) {
+ ret = crypto_register_ahash(ux500_ahash_algs[i]);
+ if (ret) {
+ count = i;
+ dev_err(device_data->dev, "[%s] alg registration"
+ " failed",
+ ux500_ahash_algs[i]->halg.base.cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_ahash(ux500_ahash_algs[i]);
+ return ret;
+}
+
+/**
+ * hash_algs_unregister_all -
+ */
+static void ahash_algs_unregister_all(struct hash_device_data *device_data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++)
+ crypto_unregister_ahash(ux500_ahash_algs[i]);
+}
+
+/**
+ * ux500_hash_probe - Function that probes the hash hardware.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s] ioremap() failed!",
+ __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_lock);
+
+ /* Enable power for HASH1 hardware block */
+ device_data->regulator = ux500_regulator_get(dev);
+ if (IS_ERR(device_data->regulator)) {
+ dev_err(dev, "[%s] regulator_get() failed!", __func__);
+ ret = PTR_ERR(device_data->regulator);
+ device_data->regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clock for HASH1 hardware block */
+ device_data->clk = clk_get(dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s] clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ ret = hash_check_hw(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
+ goto out_power;
+ }
+
+ if (hash_mode == HASH_MODE_DMA)
+ hash_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ ret = ahash_algs_register_all(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] ahash_algs_register_all() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed!", __func__);
+
+ dev_info(dev, "[%s] successfully probed\n", __func__);
+ return 0;
+
+out_power:
+ hash_disable_power(device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ ux500_regulator_put(device_data->regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+/**
+ * ux500_hash_remove - Function that removes the hash device from the platform.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ ux500_regulator_put(device_data->regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(device_data);
+
+ return 0;
+}
+
+/**
+ * ux500_hash_shutdown - Function that shutdown the hash device.
+ * @pdev: The platform device
+ */
+static void ux500_hash_shutdown(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ if (hash_disable_power(device_data, false))
+ dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
+ __func__);
+}
+
+/**
+ * ux500_hash_suspend - Function that suspends the hash device.
+ * @pdev: The platform device.
+ * @state: -
+ */
+static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = hash_disable_power(device_data, false);
+
+ } else
+ ret = hash_disable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
+
+ return ret;
+}
+
+/**
+ * ux500_hash_resume - Function that resume the hash device.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = hash_enable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
+ __func__);
+
+ return ret;
+}
+
+static struct platform_driver hash_driver = {
+ .probe = ux500_hash_probe,
+ .remove = ux500_hash_remove,
+ .shutdown = ux500_hash_shutdown,
+ .suspend = ux500_hash_suspend,
+ .resume = ux500_hash_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hash1",
+ }
+};
+
+/**
+ * ux500_hash_mod_init - The kernel module init function.
+ */
+static int __init ux500_hash_mod_init(void)
+{
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+
+ return platform_driver_register(&hash_driver);
+}
+
+/**
+ * ux500_hash_mod_fini - The kernel module exit function.
+ */
+static void __exit ux500_hash_mod_fini(void)
+{
+ platform_driver_unregister(&hash_driver);
+ return;
+}
+
+module_init(ux500_hash_mod_init);
+module_exit(ux500_hash_mod_fini);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("sha1-all");
+MODULE_ALIAS("sha256-all");
+MODULE_ALIAS("hmac-sha1-all");
+MODULE_ALIAS("hmac-sha256-all");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cc5ecbc067a..6f618d45b2b 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -331,6 +331,7 @@ struct d40_base {
int irq;
int num_phy_chans;
int num_log_chans;
+ struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
@@ -2080,7 +2081,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
desc->cyclic = true;
- if (direction != DMA_NONE) {
+ if (direction != DMA_TRANS_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
if (direction == DMA_DEV_TO_MEM)
@@ -2521,6 +2522,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
+ if (src_maxburst > 16) {
+ src_maxburst = 16;
+ dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
+ } else if (dst_maxburst > 16) {
+ dst_maxburst = 16;
+ src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
+ }
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
@@ -2582,6 +2591,56 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return -ENXIO;
}
+dma_addr_t stedma40_get_src_addr(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_addr_t addr;
+
+ if (chan_is_physical(d40c))
+ addr = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSPTR);
+ else {
+ unsigned long lower;
+ unsigned long upper;
+
+ /*
+ * There is a potential for overflow between the time the two
+ * halves of the pointer are read.
+ */
+ lower = d40c->lcpa->lcsp0 & D40_MEM_LCSP0_SPTR_MASK;
+ upper = d40c->lcpa->lcsp1 & D40_MEM_LCSP1_SPTR_MASK;
+
+ addr = upper | lower;
+ }
+
+ return addr;
+}
+EXPORT_SYMBOL(stedma40_get_src_addr);
+
+dma_addr_t stedma40_get_dst_addr(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_addr_t addr;
+
+ if (chan_is_physical(d40c))
+ addr = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDPTR);
+ else {
+ unsigned long lower;
+ unsigned long upper;
+
+ lower = d40c->lcpa->lcsp2 & D40_MEM_LCSP2_DPTR_MASK;
+ upper = d40c->lcpa->lcsp3 & D40_MEM_LCSP3_DPTR_MASK;
+
+ addr = upper | lower;
+ }
+
+ return addr;
+}
+EXPORT_SYMBOL(stedma40_get_dst_addr);
+
/* Initialization functions */
static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -3294,6 +3353,13 @@ static int __init d40_probe(struct platform_device *pdev)
if (err)
goto failure;
+ base->dev->dma_parms = &base->dma_parms;
+ err = dma_set_max_seg_size(base->dev, 0xffff);
+ if (err) {
+ d40_err(&pdev->dev, "Failed to set dma max seg size\n");
+ goto failure;
+ }
+
d40_hw_init(base);
dev_info(base->dev, "initialized\n");
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index cad9e1daedf..d47d1fa36b9 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -102,16 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
} else {
/* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
@@ -331,10 +333,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcpa[0].lcsp0);
- writel(lli_src->lcsp13, &lcpa[0].lcsp1);
- writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
- writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+ writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
@@ -344,10 +346,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcla[0].lcsp02);
- writel(lli_src->lcsp13, &lcla[0].lcsp13);
- writel(lli_dst->lcsp02, &lcla[1].lcsp02);
- writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+ writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
}
static void d40_log_fill_lli(struct d40_log_lli *lli,
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490968a..9bf7b72316f 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -96,10 +96,13 @@
/* LCSP2 */
#define D40_MEM_LCSP2_ECNT_POS 16
+#define D40_MEM_LCSP2_DPTR_POS 0
#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
+#define D40_MEM_LCSP2_DPTR_MASK (0xFFFF << D40_MEM_LCSP2_DPTR_POS)
/* LCSP3 */
+#define D40_MEM_LCSP3_DPTR_POS 16
#define D40_MEM_LCSP3_DCFG_MST_POS 15
#define D40_MEM_LCSP3_DCFG_TIM_POS 14
#define D40_MEM_LCSP3_DCFG_EIM_POS 13
@@ -109,6 +112,7 @@
#define D40_MEM_LCSP3_DLOS_POS 1
#define D40_MEM_LCSP3_DTCP_POS 0
+#define D40_MEM_LCSP3_DPTR_MASK (0xFFFF << D40_MEM_LCSP3_DPTR_POS)
#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d0c41188d4e..5e073f74baa 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -479,7 +479,7 @@ config GPIO_JANZ_TTL
config GPIO_AB8500
bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE && BROKEN
+ depends on AB8500_CORE
help
Select this to enable the AB8500 IC GPIO driver
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index 050c05d9189..273f7caba22 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -18,9 +18,9 @@
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpio.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+
/*
* GPIO registers offset
@@ -60,7 +60,7 @@
#define AB8500_GPIO_IN4_REG 0x43
#define AB8500_GPIO_IN5_REG 0x44
#define AB8500_GPIO_IN6_REG 0x45
-#define AB8500_GPIO_ALTFUN_REG 0x45
+#define AB8500_GPIO_ALTFUN_REG 0x50
#define ALTFUN_REG_INDEX 6
#define AB8500_NUM_GPIO 42
#define AB8500_NUM_VIR_GPIO_IRQ 16
@@ -115,7 +115,7 @@ static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
u8 mask = 1 << (offset % 8);
- u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
+ u8 reg = AB8500_GPIO_IN1_REG + (offset / 8);
int ret;
u8 data;
ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
@@ -132,7 +132,7 @@ static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
int ret;
/* Write the data */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
+ ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
if (ret < 0)
dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
}
@@ -174,9 +174,9 @@ static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
int start;
int end;
} clusters[] = {
- {.start = 6, .end = 13},
- {.start = 24, .end = 25},
- {.start = 36, .end = 41},
+ {.start = 5, .end = 12}, /* GPIO numbers start from 1 */
+ {.start = 23, .end = 24},
+ {.start = 35, .end = 40},
};
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
int base = ab8500_gpio->irq_base;
@@ -207,7 +207,7 @@ static struct gpio_chip ab8500gpio_chip = {
static unsigned int irq_to_rising(unsigned int irq)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq);
int offset = irq - ab8500_gpio->irq_base;
int new_irq = offset + AB8500_INT_GPIO6R
+ ab8500_gpio->parent->irq_base;
@@ -216,7 +216,7 @@ static unsigned int irq_to_rising(unsigned int irq)
static unsigned int irq_to_falling(unsigned int irq)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq);
int offset = irq - ab8500_gpio->irq_base;
int new_irq = offset + AB8500_INT_GPIO6F
+ ab8500_gpio->parent->irq_base;
@@ -261,15 +261,16 @@ static irqreturn_t handle_falling(int irq, void *dev)
return IRQ_HANDLED;
}
-static void ab8500_gpio_irq_lock(unsigned int irq)
+static void ab8500_gpio_irq_lock(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500_gpio->lock);
}
-static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
+static void ab8500_gpio_irq_sync_unlock(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq;
int offset = irq - ab8500_gpio->irq_base;
bool rising = ab8500_gpio->rising & BIT(offset);
bool falling = ab8500_gpio->falling & BIT(offset);
@@ -316,21 +317,22 @@ static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
}
-static void ab8500_gpio_irq_mask(unsigned int irq)
+static void ab8500_gpio_irq_mask(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = MASK;
}
-static void ab8500_gpio_irq_unmask(unsigned int irq)
+static void ab8500_gpio_irq_unmask(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = UNMASK;
}
-static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int ab8500_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq;
int offset = irq - ab8500_gpio->irq_base;
if (type == IRQ_TYPE_EDGE_BOTH) {
@@ -344,28 +346,28 @@ static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
return 0;
}
-unsigned int ab8500_gpio_irq_startup(unsigned int irq)
+unsigned int ab8500_gpio_irq_startup(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = STARTUP;
return 0;
}
-void ab8500_gpio_irq_shutdown(unsigned int irq)
+void ab8500_gpio_irq_shutdown(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = SHUTDOWN;
}
static struct irq_chip ab8500_gpio_irq_chip = {
.name = "ab8500-gpio",
- .startup = ab8500_gpio_irq_startup,
- .shutdown = ab8500_gpio_irq_shutdown,
- .bus_lock = ab8500_gpio_irq_lock,
- .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
- .mask = ab8500_gpio_irq_mask,
- .unmask = ab8500_gpio_irq_unmask,
- .set_type = ab8500_gpio_irq_set_type,
+ .irq_startup = ab8500_gpio_irq_startup,
+ .irq_shutdown = ab8500_gpio_irq_shutdown,
+ .irq_bus_lock = ab8500_gpio_irq_lock,
+ .irq_bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
+ .irq_mask = ab8500_gpio_irq_mask,
+ .irq_unmask = ab8500_gpio_irq_unmask,
+ .irq_set_type = ab8500_gpio_irq_set_type,
};
static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
@@ -374,14 +376,14 @@ static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
int irq;
for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
- set_irq_chip_data(irq, ab8500_gpio);
- set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
+ irq_set_chip_data(irq, ab8500_gpio);
+ irq_set_chip_and_handler(irq, &ab8500_gpio_irq_chip,
handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -397,8 +399,8 @@ static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
@@ -443,6 +445,18 @@ static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
pdata->config_reg[i]);
if (ret < 0)
goto out_free;
+
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i + AB8500_GPIO_DIR1_REG,
+ pdata->config_direction[i]);
+ if (ret < 0)
+ goto out_free;
+
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i + AB8500_GPIO_PUD1_REG,
+ pdata->config_pullups[i]);
+ if (ret < 0)
+ goto out_free;
}
ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
AB8500_GPIO_ALTFUN_REG,
@@ -493,6 +507,86 @@ static int __devexit ab8500_gpio_remove(struct platform_device *pdev)
return 0;
}
+int ab8500_config_pulldown(struct device *dev,
+ enum ab8500_pin gpio, bool enable)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 pos = offset % 8;
+ u8 val = enable ? 0 : 1;
+ u8 reg = AB8500_GPIO_PUD1_REG + (offset / 8);
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(dev, "%s write failed\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL(ab8500_config_pulldown);
+
+/*
+ * ab8500_gpio_config_select()
+ *
+ * Configure functionality of pin, either specific use or GPIO.
+ * @dev: device pointer
+ * @gpio: gpio number
+ * @gpio_select: true if the pin should be used as GPIO
+ */
+int ab8500_gpio_config_select(struct device *dev,
+ enum ab8500_pin gpio, bool gpio_select)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8);
+ u8 pos = offset % 8;
+ u8 val = gpio_select ? 1 : 0;
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(dev, "%s write failed\n", __func__);
+
+ dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ __func__, AB8500_MISC, reg, 1 << pos, val << pos);
+
+ return ret;
+}
+
+/*
+ * ab8500_gpio_config_get_select()
+ *
+ * Read currently configured functionality, either specific use or GPIO.
+ * @dev: device pointer
+ * @gpio: gpio number
+ * @gpio_select: pointer to pin selection status
+ */
+int ab8500_gpio_config_get_select(struct device *dev,
+ enum ab8500_pin gpio, bool *gpio_select)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8);
+ u8 pos = offset % 8;
+ u8 val;
+ int ret;
+
+ ret = abx500_get_register_interruptible(dev,
+ AB8500_MISC, reg, &val);
+ if (ret < 0) {
+ dev_err(dev, "%s read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & (1 << pos))
+ *gpio_select = true;
+ else
+ *gpio_select = false;
+
+ dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ __func__, AB8500_MISC, reg, 1 << pos, val);
+
+ return 0;
+}
+
static struct platform_driver ab8500_gpio_driver = {
.driver = {
.name = "ab8500-gpio",
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 839624f9fe6..c101825235a 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -23,11 +23,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/gpio/nomadik.h>
#include <asm/mach/irq.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
@@ -58,8 +58,11 @@ struct nmk_gpio_chip {
u32 real_wake;
u32 rwimsc;
u32 fwimsc;
+ u32 rimsc;
+ u32 fimsc;
u32 slpm;
u32 pull_up;
+ u32 lowemi;
};
static struct nmk_gpio_chip *
@@ -124,6 +127,24 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
}
}
+static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, bool lowemi)
+{
+ u32 bit = BIT(offset);
+ bool enabled = nmk_chip->lowemi & bit;
+
+ if (lowemi == enabled)
+ return;
+
+ if (lowemi)
+ nmk_chip->lowemi |= bit;
+ else
+ nmk_chip->lowemi &= ~bit;
+
+ writel_relaxed(nmk_chip->lowemi,
+ nmk_chip->addr + NMK_GPIO_LOWEMI);
+}
+
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
unsigned offset)
{
@@ -150,8 +171,8 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int gpio_mode,
bool glitch)
{
- u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+ u32 rwimsc = nmk_chip->rwimsc;
+ u32 fwimsc = nmk_chip->fwimsc;
if (glitch && nmk_chip->set_ioforce) {
u32 bit = BIT(offset);
@@ -173,6 +194,36 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
}
}
+static void
+nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
+{
+ u32 falling = nmk_chip->fimsc & BIT(offset);
+ u32 rising = nmk_chip->rimsc & BIT(offset);
+ int gpio = nmk_chip->chip.base + offset;
+ int irq = NOMADIK_GPIO_TO_IRQ(gpio);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ if (!rising && !falling)
+ return;
+
+ if (!d || !irqd_irq_disabled(d))
+ return;
+
+ if (rising) {
+ nmk_chip->rimsc &= ~BIT(offset);
+ writel_relaxed(nmk_chip->rimsc,
+ nmk_chip->addr + NMK_GPIO_RIMSC);
+ }
+
+ if (falling) {
+ nmk_chip->fimsc &= ~BIT(offset);
+ writel_relaxed(nmk_chip->fimsc,
+ nmk_chip->addr + NMK_GPIO_FIMSC);
+ }
+
+ dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio);
+}
+
static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
{
@@ -238,6 +289,17 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
__nmk_gpio_set_pull(nmk_chip, offset, pull);
}
+ __nmk_gpio_set_lowemi(nmk_chip, offset, PIN_LOWEMI(cfg));
+
+ /*
+ * If the pin is switching to altfunc, and there was an interrupt
+ * installed on it which has been lazy disabled, actually mask the
+ * interrupt to prevent spurious interrupts that would occur while the
+ * pin is under control of the peripheral. Only SKE does this.
+ */
+ if (af != NMK_GPIO_ALT_GPIO)
+ nmk_gpio_disable_lazy_irq(nmk_chip, offset);
+
/*
* If we've backed up the SLPM registers (glitch workaround), modify
* the backups since they will be restored.
@@ -359,7 +421,7 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
/**
* nmk_config_pin - configure a pin's mux attributes
* @cfg: pin confguration
- *
+ * @sleep: Non-zero to apply the sleep mode configuration
* Configures a pin's mode (alternate function or GPIO), its pull up status,
* and its sleep mode based on the specified configuration. The @cfg is
* usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
@@ -556,27 +618,38 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
int gpio, enum nmk_gpio_irq_type which,
bool enable)
{
- u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC;
- u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC;
u32 bitmask = nmk_gpio_get_bitmask(gpio);
- u32 reg;
+ u32 *rimscval;
+ u32 *fimscval;
+ u32 rimscreg;
+ u32 fimscreg;
+
+ if (which == NORMAL) {
+ rimscreg = NMK_GPIO_RIMSC;
+ fimscreg = NMK_GPIO_FIMSC;
+ rimscval = &nmk_chip->rimsc;
+ fimscval = &nmk_chip->fimsc;
+ } else {
+ rimscreg = NMK_GPIO_RWIMSC;
+ fimscreg = NMK_GPIO_FWIMSC;
+ rimscval = &nmk_chip->rwimsc;
+ fimscval = &nmk_chip->fwimsc;
+ }
/* we must individually set/clear the two edges */
if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + rimsc);
if (enable)
- reg |= bitmask;
+ *rimscval |= bitmask;
else
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + rimsc);
+ *rimscval &= ~bitmask;
+ writel(*rimscval, nmk_chip->addr + rimscreg);
}
if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + fimsc);
if (enable)
- reg |= bitmask;
+ *fimscval |= bitmask;
else
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + fimsc);
+ *fimscval &= ~bitmask;
+ writel(*fimscval, nmk_chip->addr + fimscreg);
}
}
@@ -1008,9 +1081,6 @@ void nmk_gpio_wakeups_suspend(void)
clk_enable(chip->clk);
- chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
- chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
-
writel(chip->rwimsc & chip->real_wake,
chip->addr + NMK_GPIO_RWIMSC);
writel(chip->fwimsc & chip->real_wake,
@@ -1123,7 +1193,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
*/
nmk_chip->bank = dev->id;
nmk_chip->clk = clk;
- nmk_chip->addr = io_p2v(res->start);
+ nmk_chip->addr = __io(IO_ADDRESS(res->start));
nmk_chip->chip = nmk_gpio_template;
nmk_chip->parent_irq = irq;
nmk_chip->secondary_parent_irq = secondary_irq;
@@ -1139,6 +1209,10 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
chip->dev = &dev->dev;
chip->owner = THIS_MODULE;
+ clk_enable(nmk_chip->clk);
+ nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
+ clk_disable(nmk_chip->clk);
+
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
goto out_free;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd..b2ae09b4cc7 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
obj-y += drm/ vga/ stub/
+obj-y += mali/
diff --git a/drivers/gpu/mali/Kconfig b/drivers/gpu/mali/Kconfig
new file mode 100644
index 00000000000..0781bcbd854
--- /dev/null
+++ b/drivers/gpu/mali/Kconfig
@@ -0,0 +1,15 @@
+config GPU_MALI
+ tristate "ARM Mali 200/300/400 support"
+ depends on ARM
+ ---help---
+ This enables support for the ARM Mali 200/300/400 family of GPUs.
+ Platform specific configuration is made in configuration files in the
+ drivers/gpu/mali/src folder
+
+config GPU_MALI_DEBUG
+ boolean "Debug mode (required for instrumentation)"
+ default y
+ depends on GPU_MALI
+ ---help---
+ This enables debug prints in the mali device driver. Debug mode must be
+ enabled in order to use the intrumentation feature of the mali libraries.
diff --git a/drivers/gpu/mali/Makefile b/drivers/gpu/mali/Makefile
new file mode 100644
index 00000000000..93c95aca19a
--- /dev/null
+++ b/drivers/gpu/mali/Makefile
@@ -0,0 +1,10 @@
+MALI_SUBFOLDER := mali400ko/driver/src/devicedrv/mali
+MALIDRM_SUBFOLDER := mali400ko/x11/mali_drm/mali
+MALI_FOLDER := $(srctree)/$(src)/$(MALI_SUBFOLDER)
+ifeq ($(shell [ -d $(MALI_FOLDER) ] && echo "OK"), OK)
+obj-$(CONFIG_GPU_MALI) += $(MALI_SUBFOLDER)/
+obj-y += $(MALIDRM_SUBFOLDER)/
+else
+$(warning WARNING: mali: Could not find $(MALI_FOLDER) - mali device driver will not be built)
+obj-n += ./
+endif
diff --git a/drivers/gpu/mali/mali400ko/.gitignore b/drivers/gpu/mali/mali400ko/.gitignore
new file mode 100644
index 00000000000..e2d66d55882
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/.gitignore
@@ -0,0 +1,32 @@
+#symlinks created during build
+driver/src/devicedrv/mali/arch
+driver/bin
+driver/devdrv
+driver/lib
+
+#build output
+driver/build
+.tmp_versions
+*.cmd
+*.o
+*.d
+driver/src/devicedrv/linux/__mali_build_info.*
+driver/src/devicedrv/linux/modules.order
+driver/src/devicedrv/linux/Module.symvers
+driver/src/devicedrv/mali/Module.symvers
+driver/src/devicedrv/mali/__malidrv_build_info.c
+driver/src/devicedrv/mali/mali.ko
+driver/src/devicedrv/mali/mali.mod.c
+driver/src/devicedrv/mali/modules.order
+driver/out*
+driver/src/shared/essl_compiler/src/shadergen_maligp2/shader_pieces.*
+
+out
+kernel
+
+#temp files from editors and Eclipse project files
+*~
+.cproject
+.project
+.settings
+
diff --git a/drivers/gpu/mali/mali400ko/Makefile b/drivers/gpu/mali/mali400ko/Makefile
new file mode 100644
index 00000000000..9176f9f3795
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/Makefile
@@ -0,0 +1,102 @@
+#This Makefile can be called to do an out-of-kernel build of tha mali kernel module.
+#It is not used when mali is built from the kernel build system.
+
+CROSS_COMPILE ?= arm-eabi-
+
+ifeq ($(TARGET_PRODUCT),ste_u5500)
+CONFIG_UX500_SOC_DB5500=y
+export CONFIG_UX500_SOC_DB5500
+endif
+
+PWD:=$(shell pwd)
+
+#if KERNEL_BUILD_DIR parameter is not set, assume there is a symlink to the kernel
+KERNEL_BUILD_DIR?=$(PWD)/kernel
+
+# android
+ifneq (,$(findstring -android,$(PLATFORM)))
+INSTALL_MOD_PATH?=$(ANDROID_OUT_TARGET_PRODUCT_DIRECTORY)/system
+endif
+
+# lbp
+ifneq (,$(findstring -linux,$(PLATFORM)))
+PREFIX?=$(MMROOT)/linux/install/cortexA9-linux-gnu-href_v1
+INSTALL_MOD_PATH?=$(PREFIX)
+endif
+
+#if still no kernel module path is set, install the modules in a local folder
+ifeq (,$(INSTALL_MOD_PATH))
+INSTALL_MOD_PATH?=$(PWD)/out
+endif
+
+KO_FLAGS=KDIR=$(KERNEL_BUILD_DIR) CROSS_COMPILE=$(CROSS_COMPILE)
+
+.PHONY: all entry check
+
+all: mali-devicedrv install-mali
+
+check:
+ @if [ ! -d $(KERNEL_BUILD_DIR) ] ; then echo "Error: provide a path to your linux kernel through KERNEL_BUILD_DIR"; exit 1 ; fi
+
+## Build ###############
+
+mali-devicedrv: check
+ $(MAKE) -C driver/src/devicedrv/mali $(KO_FLAGS) all
+
+ump-devicedrv: check
+ $(MAKE) -C driver/src/devicedrv/ump $(KO_FLAGS) all
+
+mali_drm-devicedrv: check
+ $(MAKE) -C x11/mali_drm/mali $(KO_FLAGS) all
+
+## Clean ###############
+
+clean-mali:
+ $(MAKE) clean -C driver/src/devicedrv/mali $(KO_FLAGS)
+
+clean-ump:
+ $(MAKE) clean -C driver/src/devicedrv/ump $(KO_FLAGS)
+
+clean-mali_drm:
+ $(MAKE) clean -C x11/mali_drm/mali $(KO_FLAGS)
+
+clean: clean-mali
+
+realclean:
+ rm -f driver/src/devicedrv/mali/common/*.o
+ rm -f driver/src/devicedrv/mali/linux/*.o
+ rm -f driver/src/devicedrv/mali/common/.*.cmd
+ rm -f driver/src/devicedrv/mali/linux/.*.cmd
+ rm -f driver/src/devicedrv/mali/.*.cmd
+ rm -f driver/src/devicedrv/mali/*.c
+
+distclean: realclean
+
+## Install #############
+
+install: install-mali
+
+install-mali: mali-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/driver/src/devicedrv/mali modules_install
+
+install-ump: ump-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/driver/src/devicedrv/ump modules_install
+
+install-mali_drm: mali_drm-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/x11/mali_drm/mali modules_install
+
+
+printenv:
+ @echo "\033[0;44m"
+ @echo "ANDROID_OUT_TARGET_PRODUCT_DIRECTORY: " $(ANDROID_OUT_TARGET_PRODUCT_DIRECTORY)
+ @echo "ANDROID_BSP_ROOT: " $(ANDROID_BSP_ROOT)
+ @echo "KERNEL_BUILD_DIR: " $(KERNEL_BUILD_DIR)
+ @echo "MM_INSTALL_DIR: " $(MM_INSTALL_DIR)
+ @echo "PLATFORM: " $(PLATFORM)
+ @echo "PREFIX: " $(PREFIX)
+ @echo "MMROOT: " $(MMROOT)
+ @echo "INSTALL_MOD_PATH: " $(INSTALL_MOD_PATH)
+ @echo "FOO: " $(FOO)
+ @echo "VARIANT: " $(VARIANT)
+ @echo "\033[0m"
+
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h
new file mode 100644
index 00000000000..3bd928aefdb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void * ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+ UMP_DD_SUCCESS, /**< indicates success */
+ UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+ unsigned long addr; /**< The physical address of the block */
+ unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h
new file mode 100644
index 00000000000..70eea22d3c3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h
new file mode 100644
index 00000000000..55c49885f06
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile
new file mode 100644
index 00000000000..20b57359167
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile
@@ -0,0 +1,346 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKOS=linux
+FILES_PREFIX=
+
+ifneq ($(KBUILD_EXTMOD),)
+DRIVER_DIR=$(KBUILD_EXTMOD)
+else
+src?=.
+srctree?=.
+DRIVER_DIR?=$(srctree)/$(src)
+M?=$(DRIVER_DIR)
+endif
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+include $(DRIVER_DIR)/Makefile.platform
+include $(DRIVER_DIR)/Makefile.common
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+USING_MMU ?= 1
+USING_UMP ?= 0
+USING_HWMEM ?= 0
+USING_OS_MEMORY ?= 0
+USING_PMM ?= 0
+USING_GPU_UTILIZATION ?= 0
+USING_MALI_RUN_TIME_PM ?= 0
+USING_MALI_PMM_TESTSUITE ?= 0
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 6
+USING_PROFILING ?= 0
+TIMESTAMP ?= default
+BUILD ?= debug
+TARGET_PLATFORM ?= default
+
+ifeq ($(USING_UMP),1)
+ifneq ($(USING_HWMEM),1)
+ UMP_SYMVERS_FILE = ../ump/Module.symvers
+ KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTMOD)/$(UMP_SYMVERS_FILE)
+endif
+endif
+
+# Check if a Mali Core sub module should be enabled, true or false returned
+submodule_enabled = $(shell gcc $(DEFINES) -E $1/arch/config.h | grep type | grep -c $(2))
+
+# linux build system integration
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_OS_MEMORY=$(USING_OS_MEMORY)
+DEFINES += -DUSING_MMU=$(USING_MMU)
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -DUSING_HWMEM=$(USING_HWMEM)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DMALI_TIMELINE_PROFILING_ENABLED=$(USING_PROFILING)
+DEFINES += -DMALI_POWER_MGMT_TEST_SUITE=$(USING_MALI_PMM_TESTSUITE)
+DEFINES += -DMALI_PMM_RUNTIME_JOB_CONTROL_ON=$(USING_MALI_RUN_TIME_PM)
+DEFINES += -DMALI_STATE_TRACKING=1
+DEFINES += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+
+ifneq ($(call submodule_enabled, $M, PMU),0)
+ MALI_PLATFORM_FILE = platform/mali400-pmu/mali_platform.c
+else
+ MALI_PLATFORM_FILE = platform/$(TARGET_PLATFORM)/mali_platform.c
+endif
+
+DEFINES += -DUSING_MALI_PMM=$(USING_PMM)
+DEFINES += -DMALI_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+
+ifeq ($(BUILD), debug)
+DEFINES += -DDEBUG
+endif
+DEFINES += -DSVN_REV=$(SVN_REV)
+DEFINES += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+# Linux has its own mmap cleanup handlers (see mali_kernel_mem_mmu.c)
+DEFINES += -DMALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+
+ifeq ($(USING_UMP),1)
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=1
+ EXTRA_CFLAGS += -I$(DRIVER_DIR)/../../../include/ump
+else
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=0
+endif
+
+# Use our defines when compiling
+EXTRA_CFLAGS += $(DEFINES) -I$(DRIVER_DIR) -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/linux/license/proprietary
+else
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/common/pmm
+
+# Source files which always are included in a build
+SRC = \
+ common/mali_kernel_core.c \
+ linux/mali_kernel_linux.c \
+ $(OSKOS)/mali_osk_indir_mmap.c \
+ common/mali_kernel_rendercore.c \
+ common/mali_kernel_descriptor_mapping.c \
+ common/mali_kernel_vsync.c \
+ linux/mali_ukk_vsync.c \
+ $(MALI_PLATFORM_FILE) \
+ $(OSKFILES) \
+ $(UKKFILES)
+ #__malidrv_build_info.c
+
+ifeq ($(USING_PROFILING),1)
+SRC += \
+ common/mali_kernel_profiling.c \
+ timestamp-$(TIMESTAMP)/mali_timestamp.c
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+endif
+
+# Selecting files to compile by parsing the config file
+
+ifeq ($(USING_PMM),1)
+SRC += \
+ common/pmm/mali_pmm.c \
+ common/pmm/mali_pmm_policy.c \
+ common/pmm/mali_pmm_policy_alwayson.c \
+ common/pmm/mali_pmm_policy_jobcontrol.c \
+ common/pmm/mali_pmm_state.c \
+ linux/mali_kernel_pm.c \
+ linux/mali_osk_pm.c \
+ linux/mali_device_pause_resume.c
+endif
+
+ifeq ($(USING_GPU_UTILIZATION),1)
+SRC += \
+ common/mali_kernel_utilization.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400PP),0)
+ # Mali-400 PP in use
+ EXTRA_CFLAGS += -DUSING_MALI400
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400GP),0)
+ # Mali-400 GP in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300PP),0)
+ # Mali-400 PP in use
+ EXTRA_CFLAGS += -DUSING_MALI400
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300GP),0)
+ # Mali-400 GP in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI200),0)
+ # Mali200 in use
+ EXTRA_CFLAGS += -DUSING_MALI200
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALIGP2),0)
+ # MaliGP2 in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MMU),0)
+ # Mali MMU in use
+ SRC += common/mali_kernel_mem_mmu.c common/mali_kernel_memory_engine.c common/mali_block_allocator.c common/mali_kernel_mem_os.c
+else
+ # No Mali MMU in use
+ SRC += common/mali_kernel_mem_buddy.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400L2)$(),0)
+ # Mali Level2 cache in use
+ EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+ SRC += common/mali_kernel_l2_cache.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300L2)$(),0)
+ # Mali Level2 cache in use
+ EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+ SRC += common/mali_kernel_l2_cache.c
+endif
+
+ifeq ($(USING_HWMEM),1)
+ # HWMEM used as backend for UMP api
+ EXTRA_CFLAGS += -I$(DRIVER_DIR)/../ump/common
+ SRC += platform/ux500/ump_kernel_api_hwmem.c
+endif
+
+# Target build file
+MODULE:=mali.ko
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_GPU_MALI) := $(MODULE:.ko=.o)
+# Tell the Linux build system to enable building of our .c files
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ # default to Virtex5
+ CONFIG ?= pb-virtex5-m200
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= pb11mp
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning Config $(CONFIG))
+$(warning Host CPU $(CPU))
+$(warning MMU $(USING_MMU))
+$(warning OS_MEMORY $(USING_OS_MEMORY))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that $(DRIVER_DIR)/arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf $(DRIVER_DIR)/arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+
+# Register number of Mali400 cores for routing test jobs to the right Mali-400
+ifeq ($CONFIG, pb-virtex5-m400-1)
+VERSION_STRINGS += USING_MALI400_PP_CORES=1
+endif
+ifeq ($CONFIG, pb-virtex5-m400-2)
+VERSION_STRINGS += USING_MALI400_PP_CORES=2
+endif
+ifeq ($CONFIG, pb-virtex5-m400-3)
+VERSION_STRINGS += USING_MALI400_PP_CORES=3
+endif
+ifeq ($CONFIG, pb-virtex5-m400-4)
+VERSION_STRINGS += USING_MALI400_PP_CORES=4
+endif
+
+endif
+
+# Filename to use when patching. Original will be saved as this
+PATCH_BACKUP_FILE:=config_makefile_patch_backup.h
+
+ifdef ARM_INTERNAL_BUILD
+$(warning Looking for previous backup)
+endif
+# Look for previous backup
+shell_output:=$(shell [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -vf arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored")
+ifneq ($(shell_output),)
+ifdef ARM_INTERNAL_BUILD
+$(warning $(shell_output))
+endif
+endif
+
+ifdef PATCH
+ifdef ARM_INTERNAL_BUILD
+$(warning Patching using: $(PATCH) )
+endif
+ifneq ($(shell [ -f arch/$(PATCH).diff ] && echo "OK"), OK)
+# The patch file does not exist
+shell_output:=$(shell echo "Possible PATCH arguments are:" ; find arch/ -name "*.diff" |sed -r 's/.*\/(.*)\.diff/\1/')
+ifdef ARM_INTERNAL_BUILD
+$(warning $(shell_output))
+endif
+$(error Could not find file arch-$(CONFIG)/$(PATCH).diff )
+else
+# Patch file found, do patching
+shell_output:=$(shell cp -f arch/config.h arch/$(PATCH_BACKUP_FILE) && patch --no-backup-if-mismatch -st -p0 arch/config.h -i arch/$(PATCH).diff && echo "OK")
+ifneq ($(shell_output), OK)
+$(warning Output from failed patch: $(shell_output))
+$(shell mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h)
+$(error Could not patch file arch-$(CONFIG)/config.h with arch-$(CONFIG)/$(PATCH).diff.)
+endif
+endif
+endif
+
+# Extend common version-string
+VERSION_STRINGS += BUILD=$(shell echo $(BUILD) | tr a-z A-Z)
+VERSION_STRINGS += CPU=$(CPU)
+VERSION_STRINGS += USING_MMU=$(USING_MMU)
+VERSION_STRINGS += USING_UMP=$(USING_UMP)
+VERSION_STRINGS += USING_HWMEM=$(USING_HWMEM)
+VERSION_STRINGS += USING_PMM=$(USING_PMM)
+VERSION_STRINGS += USING_MALI200=$(call submodule_enabled, ., MALI200)
+VERSION_STRINGS += USING_MALI400=$(call submodule_enabled, ., MALI400)
+VERSION_STRINGS += USING_MALI400_L2_CACHE=$(call submodule_enabled, ., MALI400L2)
+VERSION_STRINGS += USING_GP2=$(call submodule_enabled, ., MALIGP2)
+VERSION_STRINGS += KDIR=$(KDIR)
+
+all: make-build-info-file $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) CONFIG_GPU_MALI=m modules
+ @([ "$(PATCH)" != "" ] && [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored") || echo No Unpatching needed
+ @rm -f $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ @([ "$(PATCH)" != "" ] && [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored") || echo No Unpatching needed
+
+make-build-info-file:
+ @echo 'char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(FILES_PREFIX)__malidrv_build_info.c
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+# end of outside kernel build system block
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common
new file mode 100644
index 00000000000..a8f4189d0cf
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_atomics.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_irq.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_locks.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_low_level_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_math.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_memory.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_misc.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_mali.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_notification.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_time.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_timers.c
+
+UKKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_gp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_pp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_core.c
+
+ifeq ($(USING_PROFILING),1)
+UKKFILES+=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_profiling.c
+endif
+
+ifeq ($(MALI_PLATFORM_FILE),)
+MALI_PLATFORM_FILE=platform/default/mali_platform.c
+endif
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+SVN_REV := $(shell (cd $(DRIVER_DIR); (svnversion | grep -qvE '(exported|Unversioned)' && svnversion) || git svn info | grep '^Revision: '| sed -e 's/^Revision: //' ) 2>/dev/null )
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+# Common version-string, will be extended by OS-specifc sections
+VERSION_STRINGS =
+VERSION_STRINGS += CONFIG=$(CONFIG)
+VERSION_STRINGS += USING_OS_MEMORY=$(USING_OS_MEMORY)
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)common\/mali_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'URL: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Rev: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Date: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform
new file mode 100644
index 00000000000..1f8068e25de
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform
@@ -0,0 +1,45 @@
+#these are the build options for the ST-Ericsson Ux500 platforms.
+
+ifeq ($(CONFIG_UX500_SOC_DB5500),y)
+TARGET_PLATFORM = default
+USING_GPU_UTILIZATION = 0
+DEFINES += -DSOC_DB5500=1
+endif
+
+CONFIG = ux500
+CPU = $(CONFIG)
+TARGET_PLATFORM ?= $(CONFIG)
+ARCH ?= arm
+USING_MMU ?= 1
+USING_PMM ?= 1
+USING_UMP ?= 1
+USING_HWMEM ?= 1
+USING_OS_MEMORY ?= 1
+USING_GPU_UTILIZATION ?= 1
+
+ifeq ($(CONFIG_GPU_MALI_DEBUG),y)
+BUILD ?= debug
+else
+BUILD ?= release
+endif
+
+KDIR-$(CPU)=$(srctree)
+
+#these are paths relative to the mali400ko/driver/src/devicedrv/mali folder
+#not to be confused with the drivers/gpu/mali symlink in the kernel tree
+EXTRA_CFLAGS += -I$(realpath $(DRIVER_DIR)/../../../include/ump)
+EXTRA_CFLAGS += -I$(realpath $(DRIVER_DIR)/../ump/common)
+
+#The following is duplicated from the main Makefile to ensure that the 'arch'
+#link is created even during an in-kernel build.
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that $(DRIVER_DIR)/arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf $(DRIVER_DIR)/arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h
new file mode 100644
index 00000000000..5011c97a28c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-300 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI300GP,
+ .description = "Mali-300 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI300PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-300 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-300 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-300 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI300L2,
+ .base = 0xC0001000,
+ .description = "Mali-300 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h
new file mode 100644
index 00000000000..ee53f49bfe0
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = OS_MEMORY,
+ .description = "OS Memory",
+ .alloc_order = 10, /* Lowest preference for this memory */
+ .size = 96 * 1024 * 1024, /* 96 MB */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = 0,
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0x80000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h
new file mode 100644
index 00000000000..5c7c7f3ef13
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h
new file mode 100644
index 00000000000..9f675fd1d23
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h
new file mode 100644
index 00000000000..cc1f05cb5f4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+},
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h
new file mode 100644
index 00000000000..a672e7d22ef
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000C000,
+ .irq = -1,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = 102,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = 0xC0006000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h
new file mode 100644
index 00000000000..ae9100c1ae4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the EB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000C000,
+ .irq = -1,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000E000,
+ .irq = -1,
+ .description = "Mali-400 PP 3",
+ .mmu_id = 5
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = 102,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = 0xC0006000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MMU,
+ .base = 0xC0007000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 3",
+ .mmu_id = 5
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h
new file mode 100644
index 00000000000..dc324cf0d39
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Architecture configuration for ST-Ericsson Ux500 platforms
+ *
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#ifndef __ARCH_UX500_CONFIG_H__
+#define __ARCH_UX500_CONFIG_H__
+
+/* #include <mach/hardware.h> */
+
+#define U5500_SGA_BASE 0x801D0000
+#define U8500_SGA_BASE 0xA0300000
+
+#if defined(SOC_DB5500) && (1 == SOC_DB5500)
+#define UX500_SGA_BASE U5500_SGA_BASE
+#else
+#define UX500_SGA_BASE U8500_SGA_BASE
+#endif
+
+#define MEGABYTE (1024*1024)
+#define MALI_MEM_BASE (128 * MEGABYTE)
+#define MALI_MEM_SIZE ( 32 * MEGABYTE)
+#define OS_MEM_SIZE (128 * MEGABYTE)
+
+/* Hardware revision u8500 v1: GX570-BU-00000-r0p1
+ * Hardware revision u8500 v2: GX570-BU-00000-r1p0
+ * Hardware revision u5500: GX570-BU-00000-r1p0
+ * configuration registers: 0xA0300000-0xA031FFFFF (stw8500v1_usermanual p269)
+ *
+ * Shared Peripheral Interrupt assignments: (stw8500v1_usermanual p265-266)
+ * Nb | Interrupt Source
+ * 116 | Mali400 combined
+ * 115 | Mali400 geometry processor
+ * 114 | Mali400 geometry processor MMU
+ * 113 | Mali400 pixel processor
+ * 112 | Mali400 pixel processor MMU
+ *
+ * irq offset: 32
+ */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = UX500_SGA_BASE + 0x0000,
+ .irq = 115+32,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = UX500_SGA_BASE + 0x8000,
+ .irq = 113+32,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = UX500_SGA_BASE + 0x3000,
+ .irq = 114+32,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = UX500_SGA_BASE + 0x4000,
+ .irq = 112+32,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM",
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = MALI_MEM_BASE,
+ .size = MALI_MEM_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+#if USING_OS_MEMORY
+ {
+ .type = OS_MEMORY,
+ .description = "Linux kernel memory",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .size = OS_MEM_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#endif
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0x00000000, /* Validate all memory for now */
+ .size = 2047 * MEGABYTE, /* "2GB ought to be enough for anyone" */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = UX500_SGA_BASE + 0x1000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_UX500_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
new file mode 100644
index 00000000000..fc80a20c813
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, 105);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecssary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h
new file mode 100644
index 00000000000..c3b5761e23a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c
new file mode 100644
index 00000000000..e91abe120cb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c
@@ -0,0 +1,1418 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if defined(USING_MALI400_L2_CACHE)
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#if defined(USING_MALI200)
+#define MALI_GP_SUBSYSTEM_NAME "MaliGP2"
+#define MALI_GP_CORE_TYPE _MALI_GP2
+#elif defined(USING_MALI400)
+#define MALI_GP_SUBSYSTEM_NAME "Mali-400 GP"
+#define MALI_GP_CORE_TYPE _MALI_400_GP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOBGP2_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, maligp_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_gp_id = -1;
+
+static mali_core_renderunit * last_gp_core_cookie = NULL;
+
+/* Describing a maligp job settings */
+typedef struct maligp_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_gp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 status_reg_on_stop;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 vscl_stop_addr;
+ u32 plbcl_stop_addr;
+ u32 heap_current_addr;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+ int is_stalled_waiting_for_more_memory;
+
+ u32 active_mask;
+ /* progress checking */
+ u32 last_vscl;
+ u32 last_plbcl;
+ /* extended progress checking, only enabled when we can use one of the performance counters */
+ u32 have_extended_progress_checking;
+ u32 vertices;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} maligp_job;
+
+/*Functions Exposed to the General External System through
+ function pointers.*/
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+void maligp_subsystem_dump_state(void);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core );
+static void maligp_raw_reset( mali_core_renderunit *core);
+static void maligp_reset_hard(struct mali_core_renderunit * core);
+static void maligp_reset(mali_core_renderunit *core);
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core );
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core);
+#endif
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core);
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core);
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument);
+static void subsystem_maligp_return_job_to_user(mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style );
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core);
+
+/* Variables */
+static register_address_and_value default_mgmt_regs[] =
+{
+ { MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED }
+};
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_gp2=
+{
+ maligp_subsystem_startup, /* startup */
+ maligp_subsystem_terminate, /* shutdown */
+#if USING_MMU
+ maligp_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ maligp_subsystem_core_system_info_fill, /* system_info_fill */
+ maligp_subsystem_session_begin, /* session_begin */
+ maligp_subsystem_session_end, /* session_end */
+#if USING_MMU
+ maligp_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ maligp_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_maligp ;
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ mali_subsystem_gp_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_maligp, 0, sizeof(*subsystem));
+
+ subsystem = &subsystem_maligp;
+ subsystem->start_job = &subsystem_maligp_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_maligp_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_maligp_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_maligp_get_new_job_from_user;
+ subsystem->suspend_response = &subsystem_maligp_suspend_response;
+ subsystem->return_job_to_user = &subsystem_maligp_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_maligp_renderunit_delete;
+ subsystem->reset_core = &subsystem_maligp_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_maligp_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_maligp_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_maligp_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_GP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_GP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALIGP2 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALIGP2, maligp_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400GP, maligp_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_maligp);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_maligp);
+}
+
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(*session) ), _MALI_OSK_ERR_FAULT);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_maligp;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+ /** @note mali_session_data not needed here */
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session *)*slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t for errors.
+ */
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_maligp, info);
+}
+
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ int err;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALIGP2 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400GP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_GP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_GP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_maligp ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALIGP2_REGISTER_ADDRESS_SPACE_SIZE ;
+ core->description = resource->description;
+ core->irq_nr = resource->irq ;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ core->pmm_id = MALI_PMM_CORE_GP;
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error1;
+
+ /* Check that the register mapping of the core works.
+ Return 0 if maligp core is present and accessible. */
+ if (mali_benchmark) {
+ core->core_version = MALI_GP_PRODUCT_ID << 16;
+ } else {
+ core->core_version = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = maligp_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ maligp_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_maligp, core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Initial Register settings:\n"));
+ maligp_print_regs(4, core);
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_maligp, message, data);
+}
+#endif
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core)
+{
+ if (debug_level <= mali_debug_level)
+ {
+ MALI_DEBUG_PRINT(1, (" VS 0x%08X 0x%08X, PLBU 0x%08X 0x%08X ALLOC 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+ MALI_DEBUG_PRINT(1, (" IntRaw 0x%08X IntMask 0x%08X, Status 0x%02X Ver: 0x%08X \n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_MASK),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION)));
+
+ MALI_DEBUG_PRINT(1, (" PERF_CNT Enbl:%d %d Src: %02d %02d VAL: 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+
+ MALI_DEBUG_PRINT(1, (" VS_START 0x%08X PLBU_START 0x%08X AXI_ERR 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_CONTR_AXI_BUS_ERROR_STAT)));
+ }
+}
+#endif
+
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+
+#if defined(USING_MALI400)
+ if ( MALI400_GP_PRODUCT_ID != mali_type && MALI300_GP_PRODUCT_ID != mali_type )
+#else
+ if ( MALI_GP_PRODUCT_ID != mali_type )
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from maligp version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali GP: core_version_legal: Reads correct mali version: %d\n", core->core_version )) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+static void maligp_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ maligp_raw_reset(core);
+ maligp_initialize_registers_mgmt(core);
+ }
+}
+
+
+static void maligp_reset_hard( mali_core_renderunit *core )
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+
+}
+
+static void maligp_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: maligp_raw_reset: %s\n", core->description)) ;
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali GP: Bus was never stopped during core reset\n"));
+
+ if (request_loop_count==i)
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing MMU bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* the bus was stopped OK, complete the reset */
+ /* use the hard reset routine to do the actual reset */
+ maligp_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & /*Bitwise OR*/
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if ( request_loop_count==i )
+ {
+#if USING_MMU
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ }
+ else
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ }
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+/* Sets the registers on maligp according to the const default_mgmt_regs array. */
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ int i;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_initialize_registers_mgmt: %s\n", core->description)) ;
+ for(i=0 ; i< (sizeof(default_mgmt_regs)/sizeof(*default_mgmt_regs)) ; ++i)
+ {
+ mali_core_renderunit_register_write(core, default_mgmt_regs[i].address, default_mgmt_regs[i].value);
+ }
+}
+
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ maligp_job *jobgp;
+ u32 startcmd;
+ /* The local extended version of the general structs */
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+
+ startcmd = 0;
+ if ( jobgp->user_input.frame_registers[0] != jobgp->user_input.frame_registers[1] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if ( jobgp->user_input.frame_registers[2] != jobgp->user_input.frame_registers[3] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ if(0 == startcmd)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)jobgp->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers Start\n"));
+ maligp_print_regs(4, core);
+#endif
+
+
+ mali_core_renderunit_register_write_array(
+ core,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR,
+ &(jobgp->user_input.frame_registers[0]),
+ sizeof(jobgp->user_input.frame_registers)/sizeof(jobgp->user_input.frame_registers[0]));
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != jobgp->user_input.perf_counter_flag )
+ {
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ jobgp->user_input.perf_counter_src0);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ jobgp->user_input.perf_counter_src1);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = jobgp->user_input.perf_counter_l2_src0;
+ }
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = jobgp->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&jobgp->perf_counter_l2_src0,
+ &jobgp->perf_counter_l2_val0,
+ &jobgp->perf_counter_l2_src1,
+ &jobgp->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ if ( 0 == (jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))
+ {
+ /* extended progress checking can be enabled */
+
+ jobgp->have_extended_progress_checking = 1;
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED
+ );
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ subsystem_flush_mapped_mem_cache();
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: STARTING GP WITH CMD: 0x%x\n", startcmd));
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(core,
+ MALIGP2_REG_ADDR_MGMT_CMD,
+ startcmd);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), jobgp->pid, jobgp->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: IRQ: %04x\n", irq_readout)) ;
+
+ if ( MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK , MALIGP2_REG_VAL_IRQ_MASK_NONE);
+ /* We do need to handle this in a bottom half, return 1 */
+ return 1;
+ }
+ return 0;
+}
+
+/* This function should check if the interrupt indicates that job was finished.
+If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+If the job is still working after this function it should return MALI_FALSE.
+The function must also enable the bits in the interrupt mask for the core.
+Called by the bottom half interrupt function. */
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core)
+{
+ mali_core_job * job;
+ maligp_job * jobgp;
+ u32 irq_readout;
+ u32 core_status;
+ u32 vscl;
+ u32 plbcl;
+
+ job = core->current_job;
+
+ if (mali_benchmark) {
+ MALI_DEBUG_PRINT(3, ("MaliGP: Job: Benchmark\n") );
+ irq_readout = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ core_status = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ jobgp = GET_JOBGP2_PTR(job);
+
+ jobgp->heap_current_addr = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+
+ vscl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR);
+ plbcl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Status: 0x%x\n",
+ (u32)jobgp->user_input.user_job_ptr, irq_readout , core_status )) ;
+
+ jobgp->irq_status |= irq_readout;
+ jobgp->status_reg_on_stop = core_status;
+
+ if ( 0 != jobgp->is_stalled_waiting_for_more_memory )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Mali GP: Job aborted - userspace would not provide more heap memory.\n"));
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+ /* finished ? */
+ else if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers On job end:\n"));
+ maligp_print_regs(4, core);
+#endif
+ MALI_DEBUG_PRINT_IF(5, irq_readout & 0x04, ("OOM when done, ignoring (reg.current = 0x%x, reg.end = 0x%x)\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+
+ if (0 != jobgp->user_input.perf_counter_flag )
+ {
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+ }
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+ return JOB_STATUS_END_SUCCESS; /* core idle */
+ }
+ /* sw watchdog timeout handling or time to do hang checking ? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ (
+ (CORE_HANG_CHECK_TIMEOUT == core->state) &&
+ (
+ (jobgp->have_extended_progress_checking ? (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE) == jobgp->vertices) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) ? (vscl == jobgp->last_vscl) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) ? (plbcl == jobgp->last_plbcl) : 1/*TRUE*/)
+ )
+ )
+ )
+ {
+ /* no progress detected, killed by the watchdog */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali GP: SW-Timeout. Regs:\n"));
+ if (core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) MALI_DEBUG_PRINT(1, ("vscl current = 0x%x last = 0x%x\n", (void*)vscl, (void*)jobgp->last_vscl));
+ if (core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) MALI_DEBUG_PRINT(1, ("plbcl current = 0x%x last = 0x%x\n", (void*)plbcl, (void*)jobgp->last_plbcl));
+ if (jobgp->have_extended_progress_checking) MALI_DEBUG_PRINT(1, ("vertices processed = %d, last = %d\n", mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE),
+ jobgp->vertices));
+#ifdef DEBUG
+ maligp_print_regs(2, core);
+#endif
+ return JOB_STATUS_END_HANG;
+ }
+ /* if hang timeout checking was enabled and we detected progress, will be fall down to this check */
+ /* check for PLBU OOM before the hang check to avoid the race condition of the hw wd trigging while waiting for us to handle the OOM interrupt */
+ else if ( 0 != (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM))
+ {
+ mali_core_session *session;
+ _mali_osk_notification_t *notific;
+ _mali_uk_gp_job_suspended_s * suspended_job;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("OOM, new heap requested by GP\n"));
+ MALI_DEBUG_PRINT(4, ("Status when OOM: current = 0x%x, end = 0x%x\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+ notific = _mali_osk_notification_create(
+
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof( _mali_uk_gp_job_suspended_s )
+ );
+ if ( NULL == notific)
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification object\n")) ;
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+
+ core->state = CORE_WORKING;
+ jobgp->is_stalled_waiting_for_more_memory = 1;
+ suspended_job = (_mali_uk_gp_job_suspended_s *)notific->result_buffer; /* this is ok - result_buffer was malloc'd */
+
+ suspended_job->user_job_ptr = jobgp->user_input.user_job_ptr;
+ suspended_job->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY ;
+ suspended_job->cookie = (u32) core;
+ last_gp_core_cookie = core;
+
+ _mali_osk_notification_queue_send( session->notification_queue, notific);
+
+#ifdef DEBUG
+ maligp_print_regs(4, core);
+#endif
+
+ /* stop all active timers */
+ _mali_osk_timer_del( core->timer);
+ _mali_osk_timer_del( core->timer_hang_detection);
+ MALI_DEBUG_PRINT(4, ("Mali GP: PLBU heap empty, sending memory request to userspace\n"));
+ /* save to watchdog_jiffies what was remaining WD timeout value when OOM was triggered */
+ job->watchdog_jiffies = (long)job->watchdog_jiffies - (long)_mali_osk_time_tickcount();
+ /* reuse core->timer as the userspace response timeout handler */
+ _mali_osk_timer_add( core->timer, _mali_osk_time_mstoticks(1000) ); /* wait max 1 sec for userspace to respond */
+ return JOB_STATUS_CONTINUE_RUN; /* The core is NOT available for new jobs. */
+ }
+ /* hw watchdog is reporting a new hang or an existing progress-during-hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & jobgp->active_mask & MALIGP2_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("Mali GP: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ core->state = CORE_WORKING;
+
+ /* save state for the progress checking */
+ jobgp->last_vscl = vscl;
+ jobgp->last_plbcl = plbcl;
+ if (jobgp->have_extended_progress_checking)
+ {
+ jobgp->vertices = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add( core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ jobgp->active_mask &= ~MALIGP2_REG_VAL_IRQ_HANG; /* ignore the hw watchdog from now on */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finihsed */ }
+ /* no errors, but still working */
+ else if ( ( 0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ERROR)) &&
+ ( 0 != (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE ))
+ )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN;
+ }
+ /* Else there must be some error */
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali GP: Core crashed? *IRQ: 0x%x Status: 0x%x\n", irq_readout, core_status ));
+ #ifdef DEBUG
+ MALI_DEBUG_PRINT(1, ("Mali GP: Registers Before reset:\n"));
+ maligp_print_regs(1, core);
+ #endif
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ maligp_job *jobgp;
+ mali_core_job *job;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_gp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_gp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(jobgp = (maligp_job *) _mali_osk_calloc(1, sizeof(maligp_job)), _MALI_OSK_ERR_FAULT);
+
+ /* Copy the job data from the U/K interface */
+ if ( NULL == _mali_osk_memcpy(&jobgp->user_input, user_ptr_job_input, sizeof(_mali_uk_gp_start_job_s) ) )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: subsystem_maligp_get_new_job_from_user 0x%x\n", (void*)jobgp->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job Regs: 0x%08X 0x%08X, 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ jobgp->user_input.frame_registers[0],
+ jobgp->user_input.frame_registers[1],
+ jobgp->user_input.frame_registers[2],
+ jobgp->user_input.frame_registers[3],
+ jobgp->user_input.frame_registers[4],
+ jobgp->user_input.frame_registers[5]) );
+
+
+ job = GET_JOB_EMBEDDED_PTR(jobgp);
+
+ job->session = session;
+ job_priority_set(job, jobgp->user_input.priority);
+ job_watchdog_set(job, jobgp->user_input.watchdog_msecs );
+ jobgp->heap_current_addr = jobgp->user_input.frame_registers[4];
+
+ job->abort_id = jobgp->user_input.abort_id;
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ jobgp->pid = _mali_osk_get_pid();
+ jobgp->tid = _mali_osk_get_tid();
+#endif
+
+ if (NULL != session->job_waiting_to_run)
+ {
+ /* IF NOT( newjow HAS HIGHER PRIORITY THAN waitingjob) EXIT_NOT_START new job */
+ if(!job_has_higher_priority(job, session->job_waiting_to_run))
+ {
+ /* The job we try to add does NOT have higher pri than current */
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+ }
+
+ /* We now know that we have a job, and a slot to put it in */
+
+ jobgp->active_mask = MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ jobgp->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_FINISHED,
+ sizeof(_mali_uk_gp_job_finished_s) );
+
+ if ( NULL == jobgp->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x INPUT from user.\n", (u32)jobgp->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ err = mali_core_session_add_job(session, job, &previous_replaced_job);
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Internal error\n")) ;
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( jobgp->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ maligp_job *previous_replaced_jobgp;
+
+ previous_replaced_jobgp = GET_JOBGP2_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Replacing job: 0x%08x\n", (u32)previous_replaced_jobgp->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_jobgp->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free jobgp,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if ( _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(jobgp);
+ }
+ MALI_ERROR(err);
+}
+
+
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument)
+{
+ mali_core_renderunit *core;
+ maligp_job *jobgp;
+ mali_core_job *job;
+
+ _mali_uk_gp_suspend_response_s * suspend_response;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_suspend_response\n"));
+
+ suspend_response = (_mali_uk_gp_suspend_response_s *)argument;
+
+ /* We read job data from User */
+ /* On a single mali_gp system we can only have one Stalled GP,
+ and therefore one stalled request with a cookie. This checks
+ that we get the correct cookie */
+ if ( last_gp_core_cookie != (mali_core_renderunit *)suspend_response->cookie )
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GP: Got an illegal cookie from Userspace.\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ core = (mali_core_renderunit *)suspend_response->cookie;
+ last_gp_core_cookie = NULL;
+ job = core->current_job;
+ jobgp = GET_JOBGP2_PTR(job);
+
+ switch( suspend_response->code )
+ {
+ case _MALIGP_JOB_RESUME_WITH_NEW_HEAP :
+ MALI_DEBUG_PRINT(5, ("MALIGP_JOB_RESUME_WITH_NEW_HEAP jiffies: %li\n", _mali_osk_time_tickcount()));
+ MALI_DEBUG_PRINT(4, ("New Heap addr 0x%08x - 0x%08x\n", suspend_response->arguments[0], suspend_response->arguments[1]));
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+ job->watchdog_jiffies += _mali_osk_time_tickcount(); /* convert to absolute time again */
+ _mali_osk_timer_mod( core->timer, job->watchdog_jiffies); /* update the timer */
+
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, suspend_response->arguments[0]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, suspend_response->arguments[1]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0);
+#endif
+
+ MALI_DEBUG_PRINT(4, ("GP resumed with new heap\n"));
+
+ break;
+
+ case _MALIGP_JOB_ABORT:
+ MALI_DEBUG_PRINT(3, ("MALIGP_JOB_ABORT on heap extend request\n"));
+ _mali_osk_irq_schedulework( core->irq );
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Wrong Suspend response from userspace\n"));
+ }
+ MALI_SUCCESS;
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_maligp_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status )
+{
+ maligp_job *jobgp;
+ _mali_uk_gp_job_finished_s * job_out;
+ _mali_uk_gp_start_job_s* job_input;
+ mali_core_session *session;
+
+
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+ job_out = (_mali_uk_gp_job_finished_s *)jobgp->notification_obj->result_buffer; /* OK - this should've been malloc'd */
+ job_input= &(jobgp->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: Job: 0x%08x OUTPUT to user. Runtime: %d ms, irq readout %x\n",
+ (u32)jobgp->user_input.user_job_ptr,
+ job->render_time_msecs,
+ jobgp->irq_status)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_gp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+
+ job_out->irq_status = jobgp->irq_status;
+ job_out->status_reg_on_stop = jobgp->status_reg_on_stop;
+ job_out->vscl_stop_addr = 0;
+ job_out->plbcl_stop_addr = 0;
+ job_out->heap_current_addr = jobgp->heap_current_addr;
+ job_out->perf_counter0 = jobgp->perf_counter0;
+ job_out->perf_counter1 = jobgp->perf_counter1;
+ job_out->perf_counter_src0 = jobgp->user_input.perf_counter_src0 ;
+ job_out->perf_counter_src1 = jobgp->user_input.perf_counter_src1 ;
+ job_out->render_time = job->render_time_msecs;
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = jobgp->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = jobgp->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = jobgp->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = jobgp->perf_counter_l2_val1;
+#endif
+
+ _mali_osk_notification_queue_send( session->notification_queue, jobgp->notification_obj);
+ jobgp->notification_obj = NULL;
+
+ _mali_osk_free(jobgp);
+
+ last_gp_core_cookie = NULL;
+}
+
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: maligp_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ maligp_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ maligp_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ maligp_reset_hard(core);
+ maligp_initialize_registers_mgmt(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ break;
+ }
+}
+
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG );
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ if ( MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_suspend_response(session, args);
+}
+
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power up core - queue_only: %d\n", queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_maligp, 0, queue_only ) );
+}
+
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power down core - immediate_only: %d\n", immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_maligp, 0, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+void maligp_subsystem_dump_state(void)
+{
+ mali_core_renderunit_dump_state(&subsystem_maligp);
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c
new file mode 100644
index 00000000000..0ac49379bea
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_core.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#ifdef USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#include "mali_osk_list.h"
+
+#if defined(USING_MALI200)
+#define MALI_PP_SUBSYSTEM_NAME "Mali200"
+#define MALI_PP_CORE_TYPE _MALI_200
+#elif defined(USING_MALI400)
+#define MALI_PP_SUBSYSTEM_NAME "Mali-400 PP"
+#define MALI_PP_CORE_TYPE _MALI_400_PP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOB200_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, mali200_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_mali200_id = -1;
+
+/* Describing a mali200 job settings */
+typedef struct mali200_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_pp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 last_tile_list_addr; /* Neccessary to continue a stopped job */
+
+ u32 active_mask;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+ u32 perf_counter_l2_val0_raw;
+ u32 perf_counter_l2_val1_raw;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} mali200_job;
+
+
+/*Functions Exposed to the General External System through
+ funciont pointers.*/
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+void mali200_subsystem_dump_state(void);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core );
+static void mali200_reset(mali_core_renderunit *core);
+static void mali200_reset_hard(struct mali_core_renderunit * core);
+static void mali200_raw_reset(mali_core_renderunit * core);
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core );
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core);
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style);
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core);
+static u32 subsystem_mali200_irq_handler_upper_half(struct mali_core_renderunit * core);
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core);
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_mali200=
+{
+ mali200_subsystem_startup, /* startup */
+ mali200_subsystem_terminate, /* shutdown */
+#if USING_MMU
+ mali200_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ mali200_subsystem_core_system_info_fill, /* system_info_fill */
+ mali200_subsystem_session_begin, /* session_begin */
+ mali200_subsystem_session_end, /* session_end */
+#if USING_MMU
+ mali200_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ mali200_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_mali200 ;
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ mali_subsystem_mali200_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_mali200, 0, sizeof(subsystem_mali200));
+
+ subsystem = &subsystem_mali200;
+ subsystem->start_job = &subsystem_mali200_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_mali200_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_mali200_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_mali200_get_new_job_from_user;
+ subsystem->return_job_to_user = &subsystem_mali200_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_mali200_renderunit_delete;
+ subsystem->reset_core = &subsystem_mali200_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_mali200_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_mali200_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_mali200_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_PP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_PP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALI200 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI200, mali200_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400PP, mali200_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_mali200);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_mali200);
+}
+
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(mali_core_session) ), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_mali200;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session*) *slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_mali200, info);
+}
+
+
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALI200 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400PP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_PP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_PP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_mali200 ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALI200_REG_SIZEOF_REGISTER_BANK ;
+ core->irq_nr = resource->irq ;
+ core->description = resource->description;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ switch( subsystem_mali200.number_of_cores )
+ {
+ case 0:
+ core->pmm_id = MALI_PMM_CORE_PP0;
+ break;
+ case 1:
+ core->pmm_id = MALI_PMM_CORE_PP1;
+ break;
+ case 2:
+ core->pmm_id = MALI_PMM_CORE_PP2;
+ break;
+ case 3:
+ core->pmm_id = MALI_PMM_CORE_PP3;
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown supported core for PMM\n"));
+ err = _MALI_OSK_ERR_FAULT;
+ goto exit_on_error0;
+ }
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to map register\n"));
+ goto exit_on_error1;
+ }
+
+ /* Check that the register mapping of the core works.
+ Return 0 if Mali PP core is present and accessible. */
+ if (mali_benchmark) {
+#if defined(USING_MALI200)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 5 /* Fake Mali200-r0p5 */;
+#elif defined(USING_MALI400)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 0x0101 /* Fake Mali400-r1p1 */;
+#else
+#error "No supported mali core defined"
+#endif
+ } else {
+ core->core_version = mali_core_renderunit_register_read(
+ core,
+ MALI200_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = mali200_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid core\n"));
+ goto exit_on_error2;
+ }
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ mali200_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_mali200, core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to register with core\n"));
+ goto exit_on_error2;
+ }
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR(err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_mali200, message, data);
+}
+#endif
+
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+#if defined(USING_MALI400)
+ /* Mali300 and Mali400 is compatible, accept either core. */
+ if (MALI400_PP_PRODUCT_ID != mali_type && MALI300_PP_PRODUCT_ID != mali_type)
+#else
+ if (MALI_PP_PRODUCT_ID != mali_type)
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from " MALI_PP_SUBSYSTEM_NAME " version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP: core_version_legal: Reads correct mali version: %d\n", mali_type) ) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+static void mali200_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: mali200_raw_reset: %s\n", core->description));
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali PP: Bus was never stopped during core reset\n"));
+
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* use the hard reset routine to do the actual reset */
+ mali200_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+ else
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+static void mali200_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ mali200_raw_reset(core);
+ mali200_initialize_registers_mgmt(core);
+ }
+}
+
+/* Sets the registers on mali200 according to the const default_mgmt_regs array. */
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_initialize_registers_mgmt: %s\n", core->description)) ;
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ mali200_job *job200;
+
+ /* The local extended version of the general structs */
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if ( (0 == job200->user_input.frame_registers[0]) ||
+ (0 == job200->user_input.frame_registers[1]) )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)job200->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x START_RENDER Tile_list: 0x%08x\n",
+ (u32)job200->user_input.user_job_ptr,
+ job200->user_input.frame_registers[0]));
+ MALI_DEBUG_PRINT(6, ("Mali PP: RSW base addr: 0x%08x Vertex base addr: 0x%08x\n",
+ job200->user_input.frame_registers[1], job200->user_input.frame_registers[2]));
+
+ /* Frame registers. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_FRAME,
+ &(job200->user_input.frame_registers[0]),
+ MALI200_NUM_REGS_FRAME);
+
+ /* Write Back unit 0. Copy from mem to physical registers*/
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB0,
+ &(job200->user_input.wb0_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+ /* Write Back unit 1. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB1,
+ &(job200->user_input.wb1_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+ /* Write Back unit 2. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB2,
+ &(job200->user_input.wb2_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != job200->user_input.perf_counter_flag )
+ {
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ job200->user_input.perf_counter_src0);
+
+ }
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ job200->user_input.perf_counter_src1);
+
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = job200->user_input.perf_counter_l2_src0;
+ }
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = job200->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&job200->perf_counter_l2_src0,
+ &job200->perf_counter_l2_val0,
+ &job200->perf_counter_l2_src1,
+ &job200->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ subsystem_flush_mapped_mem_cache();
+ _mali_osk_mem_barrier();
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), job200->pid, job200->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+static u32 subsystem_mali200_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_irq_handler_upper_half: %s\n", core->description)) ;
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+ return 1;
+ }
+ return 0;
+}
+
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+ u32 current_tile_addr;
+ u32 core_status;
+ mali_core_job * job;
+ mali200_job * job200;
+
+ job = core->current_job;
+ job200 = GET_JOB200_PTR(job);
+
+
+ if (mali_benchmark) {
+ irq_readout = MALI200_REG_VAL_IRQ_END_OF_FRAME;
+ current_tile_addr = 0;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+ current_tile_addr = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR);
+ core_status = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ job200->irq_status |= irq_readout;
+
+ MALI_DEBUG_PRINT_IF( 3, ( 0 != irq_readout ),
+ ("Mali PP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status));
+
+ if ( MALI200_REG_VAL_IRQ_END_OF_FRAME & irq_readout)
+ {
+#if defined(USING_MALI200)
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES);
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ if (0 != job200->user_input.perf_counter_flag )
+ {
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ job200->perf_counter0 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ job200->perf_counter1 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (job200->perf_counter_l2_src0 == src0)
+ {
+ job200->perf_counter_l2_val0_raw = val0;
+ job200->perf_counter_l2_val0 = val0 - job200->perf_counter_l2_val0;
+ }
+ else
+ {
+ job200->perf_counter_l2_val0_raw = 0;
+ job200->perf_counter_l2_val0 = 0;
+ }
+
+ if (job200->perf_counter_l2_src1 == src1)
+ {
+ job200->perf_counter_l2_val1_raw = val1;
+ job200->perf_counter_l2_val1 = val1 - job200->perf_counter_l2_val1;
+ }
+ else
+ {
+ job200->perf_counter_l2_val1_raw = 0;
+ job200->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+ }
+
+ return JOB_STATUS_END_SUCCESS; /* reschedule */
+ }
+ /* Overall SW watchdog timeout or (time to do hang checking and progress detected)? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ ((CORE_HANG_CHECK_TIMEOUT == core->state) && (current_tile_addr == job200->last_tile_list_addr))
+ )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+ /* no progress detected, killed by the watchdog */
+ MALI_DEBUG_PRINT(2, ("M200: SW-Timeout Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x.\n", irq_readout ,current_tile_addr ,core_status) );
+ /* In this case will the system outside cleanup and reset the core */
+ return JOB_STATUS_END_HANG;
+ }
+ /* HW watchdog triggered or an existing hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & job200->active_mask & MALI200_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("M200: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ job200->last_tile_list_addr = current_tile_addr;
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add(core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ job200->active_mask &= ~MALI200_REG_VAL_IRQ_HANG; /* ignore the hw watchdoig from now on */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout & ~MALI200_REG_VAL_IRQ_HANG);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finished */
+ }
+ /* No irq pending, core still busy */
+ else if ((0 == (irq_readout & MALI200_REG_VAL_IRQ_MASK_USED)) && ( 0 != (core_status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)))
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* Not finished */
+ }
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali PP: Job: 0x%08x CRASH? Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status) ) ;
+
+ if (irq_readout & MALI200_REG_VAL_IRQ_BUS_ERROR)
+ {
+ u32 bus_error = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS);
+
+ MALI_DEBUG_PRINT(1, ("Bus error status: 0x%08X\n", bus_error));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x01), ("Bus write error from id 0x%02x\n", (bus_error>>2) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x02), ("Bus read error from id 0x%02x\n", (bus_error>>6) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (0 == (bus_error & 0x03)), ("Bus error but neither read or write was set as the error reason\n"));
+ (void)bus_error;
+ }
+
+ return JOB_STATUS_END_UNKNOWN_ERR; /* reschedule */
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ mali200_job *job200;
+ mali_core_job *job;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_pp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_pp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(job200 = (mali200_job *) _mali_osk_malloc(sizeof(mali200_job)), _MALI_OSK_ERR_NOMEM);
+ _mali_osk_memset(job200, 0 , sizeof(mali200_job) );
+
+ /* We read job data from Userspace pointer */
+ if ( NULL == _mali_osk_memcpy((void*)&job200->user_input, user_ptr_job_input, sizeof(job200->user_input)) )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_get_new_job_from_user 0x%x\n", (void*)job200->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: Frameregs: 0x%x 0x%x 0x%x Writeback[1] 0x%x, Pri:%d; Watchd:%d\n",
+ job200->user_input.frame_registers[0], job200->user_input.frame_registers[1], job200->user_input.frame_registers[2],
+ job200->user_input.wb0_registers[1], job200->user_input.priority,
+ job200->user_input.watchdog_msecs));
+
+ if ( job200->user_input.perf_counter_flag)
+ {
+#if defined(USING_MALI400_L2_CACHE)
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x l2_src0:0x%x l2_src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1,
+ job200->user_input.perf_counter_l2_src0,
+ job200->user_input.perf_counter_l2_src1));
+#else
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1));
+#endif
+ }
+
+ job = GET_JOB_EMBEDDED_PTR(job200);
+
+ job->session = session;
+ job_priority_set(job, job200->user_input.priority);
+ job_watchdog_set(job, job200->user_input.watchdog_msecs );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ job200->pid = _mali_osk_get_pid();
+ job200->tid = _mali_osk_get_tid();
+#endif
+
+ job->abort_id = job200->user_input.abort_id;
+ if (NULL != session->job_waiting_to_run)
+ {
+ /* IF NOT( newjow HAS HIGHER PRIORITY THAN waitingjob) EXIT_NOT_START newjob */
+ if(!job_has_higher_priority(job, session->job_waiting_to_run))
+ {
+ /* The job we try to add does NOT have higher pri than current */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+ }
+
+ /* We now know that we has a job, and a empty session slot to put it in */
+
+ job200->active_mask = MALI200_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ job200->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_PP_FINISHED,
+ sizeof(_mali_uk_pp_job_finished_s) );
+
+ if ( NULL == job200->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x INPUT from user.\n", (u32)job200->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ if ( _MALI_OSK_ERR_OK != mali_core_session_add_job(session, job, &previous_replaced_job))
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Internal error\n")) ;
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( job200->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ mali200_job *previous_replaced_job200;
+
+ previous_replaced_job200 = GET_JOB200_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Replacing job: 0x%08x\n", (u32)previous_replaced_job200->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_job200->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free job200,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if (_MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(job200);
+ }
+ MALI_ERROR(err);
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status)
+{
+ mali200_job *job200;
+ _mali_uk_pp_job_finished_s * job_out;
+ _mali_uk_pp_start_job_s * job_input;
+ mali_core_session *session;
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_PRINT(1, ("subsystem_mali200_return_job_to_user received a NULL ptr\n"));
+ return;
+ }
+
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if (NULL == job200->notification_obj)
+ {
+ MALI_DEBUG_PRINT(1, ("Found job200 with NULL notification object, abandoning userspace sending\n"));
+ return;
+ }
+
+ job_out = job200->notification_obj->result_buffer;
+ job_input= &(job200->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x OUTPUT to user. Runtime: %dms\n",
+ (u32)job200->user_input.user_job_ptr,
+ job->render_time_msecs)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_pp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+ job_out->irq_status = job200->irq_status;
+ job_out->perf_counter0 = job200->perf_counter0;
+ job_out->perf_counter1 = job200->perf_counter1;
+ job_out->render_time = job->render_time_msecs;
+
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = job200->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = job200->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = job200->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = job200->perf_counter_l2_val1;
+ job_out->perf_counter_l2_val0_raw = job200->perf_counter_l2_val0_raw;
+ job_out->perf_counter_l2_val1_raw = job200->perf_counter_l2_val1_raw;
+#endif
+
+ _mali_osk_notification_queue_send( session->notification_queue, job200->notification_obj);
+ job200->notification_obj = NULL;
+
+ _mali_osk_free(job200);
+}
+
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: mali200_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void mali200_reset_hard(struct mali_core_renderunit * core)
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_renderunit_reset_core_hard called for core %s\n", core->description));
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+}
+
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ mali200_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ mali200_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ mali200_reset_hard(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ }
+}
+
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power up core: %d - queue_only: %d\n", core_num, queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_mali200, core_num, queue_only ) );
+}
+
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power down core: %d - immediate_only: %d\n", core_num, immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_mali200, core_num, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+void mali200_subsystem_dump_state(void)
+{
+ mali_core_renderunit_dump_state(&subsystem_mali200);
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h
new file mode 100644
index 00000000000..08516c56a9f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+extern int mali_debug_level;
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c
new file mode 100644
index 00000000000..a40808bd2d8
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c
@@ -0,0 +1,892 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_gp.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_rendercore.h"
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif /* USING_MALI_PMM */
+
+/* platform specific set up */
+#include "mali_platform.h"
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_core_id = -1;
+
+/** Pointer to table of resource definitions available to the Mali driver.
+ * _mali_osk_resources_init() sets up the pointer to this table.
+ */
+static _mali_osk_resource_t *arch_configuration = NULL;
+
+/** Number of resources initialized by _mali_osk_resources_init() */
+static u32 num_resources;
+
+static _mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources );
+
+static _mali_osk_errcode_t initialize_subsystems(void);
+static void terminate_subsystems(void);
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id);
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+static _mali_osk_errcode_t build_system_info(void);
+
+/**
+ * @brief handler for MEM_VALIDATION resources
+ *
+ * This resource handler is common to all memory systems. It provides a default
+ * means for validating requests to map in external memory via
+ * _mali_ukk_map_external_mem. In addition, if _mali_ukk_va_to_pa is
+ * implemented, then _mali_ukk_va_to_pa can make use of this MEM_VALIDATION
+ * resource.
+ *
+ * MEM_VALIDATION also provide a CPU physical to Mali physical address
+ * translation, for use by _mali_ukk_map_external_mem.
+ *
+ * @note MEM_VALIDATION resources are only to handle simple cases where a
+ * certain physical address range is allowed to be mapped in by any process,
+ * e.g. a framebuffer at a fixed location. If the implementor has more complex
+ * mapping requirements, then they must either:
+ * - implement their own memory validation function
+ * - or, integrate with UMP.
+ *
+ * @param resource The resource to handle (type MEM_VALIDATION)
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource);
+
+/* MEM_VALIDATION handler state */
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+ s32 cpu_usage_adjust; /**< Offset to add to Mali Physical address to obtain CPU physical address */
+} _mali_mem_validation_t;
+
+#define INVALID_MEM 0xffffffff
+
+static _mali_mem_validation_t mem_validator = { INVALID_MEM, INVALID_MEM, -1 };
+
+static struct mali_kernel_subsystem mali_subsystem_core =
+{
+ mali_kernel_subsystem_core_setup, /* startup */
+ mali_kernel_subsystem_core_cleanup, /* shutdown */
+ NULL, /* load_complete */
+ mali_kernel_subsystem_core_system_info_fill, /* system_info_fill */
+ mali_kernel_subsystem_core_session_begin, /* session_begin */
+ NULL, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static struct mali_kernel_subsystem * subsystems[] =
+{
+ /* always initialize the hw subsystems first */
+ /* always included */
+ &mali_subsystem_memory,
+
+#if USING_MALI_PMM
+ /* The PMM must be initialized before any cores - including L2 cache */
+ &mali_subsystem_pmm,
+#endif
+
+ /* The rendercore subsystem must be initialized before any subsystem based on the
+ * rendercores is started e.g. mali_subsystem_mali200 and mali_subsystem_gp2 */
+ &mali_subsystem_rendercore,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_mali200,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_gp2,
+
+#if defined USING_MALI400_L2_CACHE
+ &mali_subsystem_l2_cache,
+#endif
+
+ /* always included */
+ /* NOTE Keep the core entry at the tail of the list */
+ &mali_subsystem_core
+};
+
+#define SUBSYSTEMS_COUNT ( sizeof(subsystems) / sizeof(subsystems[0]) )
+
+/* Pointers to this type available as incomplete struct in mali_kernel_session_manager.h */
+struct mali_session_data
+{
+ void * subsystem_data[SUBSYSTEMS_COUNT];
+ _mali_osk_notification_queue_t * ioctl_queue;
+};
+
+static mali_kernel_resource_registrator resource_handler[RESOURCE_TYPE_COUNT] = { NULL, };
+
+/* system info variables */
+static _mali_osk_lock_t *system_info_lock = NULL;
+static _mali_system_info * system_info = NULL;
+static u32 system_info_size = 0;
+
+/* is called from OS specific driver entry point */
+_mali_osk_errcode_t mali_kernel_constructor( void )
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_platform_init(NULL);
+ if (_MALI_OSK_ERR_OK != err) goto error1;
+
+ err = _mali_osk_init();
+ if (_MALI_OSK_ERR_OK != err) goto error2;
+
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Svn revision: %s\n", SVN_REV_STRING));
+
+ err = initialize_subsystems();
+ if (_MALI_OSK_ERR_OK != err) goto error3;
+
+ MALI_PRINT(("Mali device driver %s loaded\n", SVN_REV_STRING));
+
+ MALI_SUCCESS;
+
+error3:
+ MALI_PRINT(("Mali subsystems failed\n"));
+ _mali_osk_term();
+error2:
+ MALI_PRINT(("Mali device driver init failed\n"));
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit(NULL))
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+error1:
+ MALI_PRINT(("Failed to init platform\n"));
+ MALI_ERROR(err);
+}
+
+/* is called from OS specific driver exit point */
+void mali_kernel_destructor( void )
+{
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+ terminate_subsystems(); /* subsystems are responsible for their registered resources */
+ _mali_osk_term();
+
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit(NULL))
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+ MALI_DEBUG_PRINT(2, ("Module unloaded.\n"));
+}
+
+_mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources )
+{
+ _mali_osk_resource_t *arch_resource = *arch_configuration;
+ u32 i;
+#if USING_MALI_PMM
+ u32 is_pmu_first_resource = 1;
+#endif /* USING_MALI_PMM */
+
+ /* loop over arch configuration */
+ for (i = 0; i < num_resources; ++i, arch_resource++)
+ {
+ if ( (arch_resource->type >= RESOURCE_TYPE_FIRST) &&
+ (arch_resource->type < RESOURCE_TYPE_COUNT) &&
+ (NULL != resource_handler[arch_resource->type])
+ )
+ {
+#if USING_MALI_PMM
+ if((arch_resource->type != PMU) && (is_pmu_first_resource == 1))
+ {
+ _mali_osk_resource_t mali_pmu_virtual_resource;
+ mali_pmu_virtual_resource.type = PMU;
+ mali_pmu_virtual_resource.description = "Virtual PMU";
+ mali_pmu_virtual_resource.base = 0x00000000;
+ mali_pmu_virtual_resource.cpu_usage_adjust = 0;
+ mali_pmu_virtual_resource.size = 0;
+ mali_pmu_virtual_resource.irq = 0;
+ mali_pmu_virtual_resource.flags = 0;
+ mali_pmu_virtual_resource.mmu_id = 0;
+ mali_pmu_virtual_resource.alloc_order = 0;
+ MALI_CHECK_NO_ERROR(resource_handler[mali_pmu_virtual_resource.type](&mali_pmu_virtual_resource));
+ }
+ is_pmu_first_resource = 0;
+#endif /* USING_MALI_PMM */
+
+ MALI_CHECK_NO_ERROR(resource_handler[arch_resource->type](arch_resource));
+ /* the subsystem shutdown process will release all the resources already registered */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("No handler installed for resource %s, type %d\n", arch_resource->description, arch_resource->type));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t initialize_subsystems(void)
+{
+ int i, j;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; /* default error code */
+
+ MALI_CHECK_NON_NULL(system_info_lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0 ), _MALI_OSK_ERR_FAULT);
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->startup)
+ {
+ /* the subsystem has a startup function defined */
+ err = subsystems[i]->startup(i); /* the subsystem identifier is the offset in our subsystems array */
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ for (j = 0; j < (int)SUBSYSTEMS_COUNT; ++j)
+ {
+ if (NULL != subsystems[j]->load_complete)
+ {
+ /* the subsystem has a load_complete function defined */
+ err = subsystems[j]->load_complete(j);
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ /* All systems loaded and resources registered */
+ /* Build system info */
+ if (_MALI_OSK_ERR_OK != build_system_info()) goto cleanup;
+
+ MALI_SUCCESS; /* all ok */
+
+cleanup:
+ /* i is index of subsystem which failed to start, all indices before that has to be shut down */
+ for (i = i - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ /* Call possible shutdown notficiation functions */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+
+ _mali_osk_lock_term( system_info_lock );
+ MALI_ERROR(err); /* err is what the module which failed its startup returned, or the default */
+}
+
+static void terminate_subsystems(void)
+{
+ int i;
+ /* shut down subsystems in reverse order from startup */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+ if (system_info_lock) _mali_osk_lock_term( system_info_lock );
+}
+
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data)
+{
+ int i;
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->broadcast_notification)
+ {
+ subsystems[i]->broadcast_notification(message, data);
+ }
+ }
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id)
+{
+ mali_subsystem_core_id = id;
+
+ /* Register our own resources */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEM_VALIDATION, mali_kernel_core_resource_mem_validation));
+
+ /* parse the arch resource definition and tell all the subsystems */
+ /* this is why the core subsystem has to be specified last in the subsystem array */
+ MALI_CHECK_NO_ERROR(_mali_osk_resources_init(&arch_configuration, &num_resources));
+
+ MALI_CHECK_NO_ERROR(register_resources(&arch_configuration, num_resources));
+
+ /* resource parsing succeeded and the subsystem have corretly accepted their resources */
+ MALI_SUCCESS;
+}
+
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+}
+
+
+static _mali_osk_errcode_t build_system_info(void)
+{
+ unsigned int i;
+ int err = _MALI_OSK_ERR_FAULT;
+ _mali_system_info * new_info, * cleanup;
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ u32 new_size = 0;
+
+ /* create a new system info struct */
+ MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(new_info, 0, sizeof(_mali_system_info));
+
+ /* if an error happens during any of the system_info_fill calls cleanup the new info structs */
+ cleanup = new_info;
+
+ /* ask each subsystems to fill in their info */
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->system_info_fill)
+ {
+ err = subsystems[i]->system_info_fill(new_info);
+ if (_MALI_OSK_ERR_OK != err) goto error_exit;
+ }
+ }
+
+ /* building succeeded, calculate the size */
+
+ /* size needed of the system info struct itself */
+ new_size = sizeof(_mali_system_info);
+
+ /* size needed for the cores */
+ for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+ new_size += sizeof(_mali_core_info);
+ }
+
+ /* size needed for the memory banks */
+ for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ new_size += sizeof(_mali_mem_info);
+ }
+
+ /* lock system info access so a user wont't get a corrupted version */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* cleanup the old one */
+ cleanup = system_info;
+ /* set new info */
+ system_info = new_info;
+ system_info_size = new_size;
+
+ /* we're safe */
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* ok result */
+ err = _MALI_OSK_ERR_OK;
+
+ /* we share the cleanup routine with the error case */
+error_exit:
+ if (NULL == cleanup) MALI_ERROR((_mali_osk_errcode_t)err); /* no cleanup needed, return what err contains */
+
+ /* cleanup */
+
+ /* delete all the core info structs */
+ while (NULL != cleanup->core_info)
+ {
+ current_core = cleanup->core_info;
+ cleanup->core_info = cleanup->core_info->next;
+ _mali_osk_free(current_core);
+ }
+
+ /* delete all the mem info struct */
+ while (NULL != cleanup->mem_info)
+ {
+ current_mem = cleanup->mem_info;
+ cleanup->mem_info = cleanup->mem_info->next;
+ _mali_osk_free(current_mem);
+ }
+
+ /* delete the system info struct itself */
+ _mali_osk_free(cleanup);
+
+ /* return whatever err is, we could end up here in both the error and success cases */
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info_size(_mali_uk_get_system_info_size_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ args->size = system_info_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ void * current_write_pos, ** current_patch_pos;
+ u32 adjust_ptr_base;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* lock the system info */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* first check size */
+ if (args->size < system_info_size) goto exit_when_locked;
+
+ /* we build a copy of system_info in the user space buffer specified by the user and
+ * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
+ * indicate a different base address for patching the pointers (normally the
+ * address of the provided system_info buffer would be used). This is helpful when
+ * the system_info buffer needs to get copied to user space and the pointers need
+ * to be in user space.
+ */
+ if (0 == args->ukk_private)
+ {
+ adjust_ptr_base = (u32)args->system_info;
+ }
+ else
+ {
+ adjust_ptr_base = args->ukk_private;
+ }
+
+ /* copy each struct into the buffer, and update its pointers */
+ current_write_pos = (void *)args->system_info;
+
+ /* first, the master struct */
+ _mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));
+
+ /* advance write pointer */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));
+
+ /* first we write the core info structs, patch starts at master's core_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));
+
+ for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ /* then we write the mem info structs, patch starts at master's mem_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));
+
+ for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ err = _MALI_OSK_ERR_OK;
+exit_when_locked:
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if ( NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n")) ;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ info->drivermode = _MALI_DRIVER_MODE_NORMAL;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ MALI_CHECK_NON_NULL(slot, _MALI_OSK_ERR_INVALID_ARGS);
+ *slot = queue;
+ MALI_SUCCESS;
+}
+
+/* MEM_VALIDATION resource handler */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ MALI_CHECK( ((u32)-1) == mem_validator.phys_base, _MALI_OSK_ERR_FAULT );
+
+ /* Check restrictions on page alignment */
+ MALI_CHECK( 0 == (resource->base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->cpu_usage_adjust & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ mem_validator.phys_base = resource->base;
+ mem_validator.size = resource->size;
+ mem_validator.cpu_usage_adjust = resource->cpu_usage_adjust;
+ MALI_DEBUG_PRINT( 2, ("Memory Validator '%s' installed for Mali physical address base==0x%08X, size==0x%08X, cpu_adjust==0x%08X\n",
+ resource->description, mem_validator.phys_base, mem_validator.size, mem_validator.cpu_usage_adjust ));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size )
+{
+ u32 mali_phys_base;
+
+ mali_phys_base = *phys_base - mem_validator.cpu_usage_adjust;
+
+ MALI_CHECK( 0 == ( mali_phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( mali_phys_base, size ) );
+
+ *phys_base = mali_phys_base;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size )
+{
+ MALI_CHECK_GOTO( 0 == ( phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+ MALI_CHECK_GOTO( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+
+ if ( phys_base >= mem_validator.phys_base
+ && (phys_base + size) >= mem_validator.phys_base
+ && phys_base <= (mem_validator.phys_base + mem_validator.size)
+ && (phys_base + size) <= (mem_validator.phys_base + mem_validator.size) )
+ {
+ MALI_SUCCESS;
+ }
+
+ failure:
+ MALI_PRINTF( ("*******************************************************************************\n") );
+ MALI_PRINTF( ("MALI PHYSICAL RANGE VALIDATION ERROR!\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("We failed to validate a Mali-Physical range that the user-side wished to map in\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("It is likely that the user-side wished to do Direct Rendering, but a suitable\n") );
+ MALI_PRINTF( ("address range validation mechanism has not been correctly setup\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_base, size) );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("Please refer to the ARM Mali Software Integration Guide for more information.\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("*******************************************************************************\n") );
+
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler)
+{
+ MALI_CHECK(type < RESOURCE_TYPE_COUNT, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL == resource_handler[type]); /* A handler for resource already exists */
+ resource_handler[type] = handler;
+ MALI_SUCCESS;
+}
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session_data, int id)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ if(id >= SUBSYSTEMS_COUNT) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: id %d out of range\n", id)); return NULL; }
+
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: got NULL session data\n")); return NULL; }
+ return session_data->subsystem_data[id];
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ int i;
+ _mali_osk_errcode_t err;
+ struct mali_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data));
+
+ /* create a response queue for this session */
+ session_data->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session_data->ioctl_queue)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* call session_begin on all subsystems */
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->session_begin)
+ {
+ /* subsystem has a session_begin */
+ err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue);
+ MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup);
+ }
+ }
+
+ *context = (void*)session_data;
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ MALI_SUCCESS;
+
+cleanup:
+ MALI_DEBUG_PRINT(2, ("Session startup failed\n"));
+ /* i is index of subsystem which failed session begin, all indices before that has to be ended */
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = i - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ /* return what the subsystem which failed session start returned */
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ int i;
+ struct mali_session_data * session_data;
+
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(2, ("Session ending\n"));
+
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_up(queue_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ if( !queue_only )
+ {
+ /* Enable L2 cache due to power up */
+ mali_kernel_l2_cache_do_enable();
+
+ /* Invalidate the cache on power up */
+ MALI_DEBUG_PRINT(5, ("L2 Cache: Invalidate all\n"));
+ MALI_CHECK_NO_ERROR(mali_kernel_l2_cache_invalidate_all());
+ }
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(0, queue_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(1, queue_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(2, queue_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(3, queue_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power up: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_down(immediate_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ /* Nothing to do */
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(0, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(1, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(2, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(3, immediate_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power down: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
+
+
+
+#if MALI_STATE_TRACKING
+void _mali_kernel_core_dump_state(void)
+{
+ int i;
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->dump_state)
+ {
+ subsystems[i]->dump_state();
+ }
+ }
+#if USING_MALI_PMM
+ mali_pmm_dump_os_thread_state();
+#endif
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h
new file mode 100644
index 00000000000..3819ecc5e1c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_ukk.h"
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#endif
+
+_mali_osk_errcode_t mali_kernel_constructor( void );
+void mali_kernel_destructor( void );
+
+/**
+ * @brief Tranlate CPU physical to Mali physical addresses.
+ *
+ * This function is used to convert CPU physical addresses to Mali Physical
+ * addresses, such that _mali_ukk_map_external_mem may be used to map them
+ * into Mali. This will be used by _mali_ukk_va_to_mali_pa.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully translated.
+ *
+ * If a more complex, or non-static translation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a translation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param[in,out] phys_base pointer to the page-aligned base address of the
+ * physical range to be translated
+ *
+ * @param[in] size size of the address range to be translated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return on success, _MALI_OSK_ERR_OK and *phys_base is translated. If the
+ * cpu physical address range is not in the valid range, then a suitable
+ * _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size );
+
+
+/**
+ * @brief Validate a Mali physical address range.
+ *
+ * This function is used to ensure that an address range passed to
+ * _mali_ukk_map_external_mem is allowed to be mapped into Mali.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully validated.
+ *
+ * If a more complex, or non-static validation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a validation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param phys_base page-aligned base address of the Mali physical range to be
+ * validated.
+ *
+ * @param size size of the address range to be validated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return _MALI_OSK_ERR_OK if the Mali physical range is valid. Otherwise, a
+ * suitable _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size );
+
+#if USING_MALI_PMM
+/**
+ * @brief Signal a power up on a Mali core.
+ *
+ * This function flags a core as powered up.
+ * For PP and GP cores it calls functions that move the core from a power off
+ * queue into the idle queue ready to run jobs. It also tries to schedule any
+ * pending jobs to run on it.
+ *
+ * This function will fail if the core is not powered off - either running or
+ * already idle.
+ *
+ * @param core The PMM core id to power up.
+ * @param queue_only When MALI_TRUE only re-queue the core - do not reset.
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only );
+
+/**
+ * @brief Signal a power down on a Mali core.
+ *
+ * This function flags a core as powered down.
+ * For PP and GP cores it calls functions that move the core from an idle
+ * queue into the power off queue.
+ *
+ * This function will fail if the core is not idle - either running or
+ * already powered down.
+ *
+ * @param core The PMM core id to power up.
+ * @param immediate_only Do not set the core to pending power down if it can't
+ * power down immediately
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only );
+
+#endif
+
+/**
+ * Flag to indicate whether or not mali_benchmark is turned on.
+ */
+extern int mali_benchmark;
+
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000..45c280edd6a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+#if !USING_MMU
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 20);
+#else
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 116);
+#endif
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000..f626aa59455
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h
new file mode 100644
index 00000000000..c2467edc677
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_GP2_H__
+#define __MALI_KERNEL_GP2_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_gp2;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only );
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_GP2_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c
new file mode 100644
index 00000000000..fded5c93ab9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_kernel_l2_cache.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0002,
+ /*unused = 0x0003 */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0004, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0005,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0006, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x0007, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0008,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0009,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x000A,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x000B,
+} mali_l2_cache_register;
+
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_l2_cache_core
+{
+ unsigned long base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple cache cores into a list */
+ _mali_osk_lock_t *lock; /**< Serialize all L2 cache commands */
+} mali_kernel_l2_cache_core;
+
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/**
+ * Mali L2 cache subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ *
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * L2 cache subsystem complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem's notification handler for a Mali L2 cache resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each Mali L2 cache described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MALI400L2)
+ * @return 0 if the Mali L2 cache was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource);
+
+/**
+ * Write to a L2 cache register
+ * Writes the given value to the specified register
+ * @param unit The L2 cache to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val);
+
+
+
+/**
+ * Invalidate specified L2 cache
+ * @param cache The L2 cache to invalidate
+ * @return 0 if Mali L2 cache was successfully invalidated, otherwise error
+ */
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache);
+
+
+/*
+ The fixed Mali L2 cache system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_l2_cache =
+{
+ mali_l2_cache_initialize, /**< startup */
+ mali_l2_cache_terminate, /**< shutdown */
+ mali_l2_cache_load_complete, /**< load_complete */
+ NULL, /**< system_info_fill */
+ NULL, /**< session_begin */
+ NULL, /**< session_end */
+ NULL, /**< broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /**< dump_state */
+#endif
+};
+
+
+
+static _MALI_OSK_LIST_HEAD(caches_head);
+
+
+
+
+/* called during module init */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_IGNORE( id );
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system initializing\n"));
+
+ _MALI_OSK_INIT_LIST_HEAD(&caches_head);
+
+ /* This will register the function for adding Mali L2 cache cores to the subsystem */
+ err = _mali_kernel_core_register_resource_handler(MALI400L2, mali_l2_cache_core_create);
+
+ MALI_ERROR(err);
+}
+
+
+
+/* called if/when our module is unloaded */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system terminating\n"));
+
+ /* loop over all L2 cache units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list )
+ {
+ /* reset to defaults */
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ /* remove from the list of cacges on the system */
+ _mali_osk_list_del( &cache->list );
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers );
+ _mali_osk_mem_unreqregion( cache->base, cache->mapping_size );
+ _mali_osk_lock_term( cache->lock );
+ _mali_osk_free( cache );
+
+ #if USING_MALI_PMM
+ /* Unregister the L2 cache with the PMM */
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+ #endif
+ }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT ;
+ mali_kernel_l2_cache_core * cache = NULL;
+
+ MALI_DEBUG_PRINT(2, ( "Creating Mali L2 cache: %s\n", resource->description));
+
+#if USING_MALI_PMM
+ /* Register the L2 cache with the PMM */
+ err = malipmm_core_register( MALI_PMM_CORE_L2 );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to register L2 cache unit with PMM"));
+ return err;
+ }
+#endif
+
+ err = _mali_osk_mem_reqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE, resource->description);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup_requestmem_failed);
+
+ /* Reset error that might be passed out */
+ err = _MALI_OSK_ERR_FAULT;
+
+ cache = _mali_osk_malloc(sizeof(mali_kernel_l2_cache_core));
+
+ MALI_CHECK_GOTO( NULL != cache, err_cleanup);
+
+ cache->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 104 );
+
+ MALI_CHECK_GOTO( NULL != cache->lock, err_cleanup);
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&cache->list);
+
+ cache->base = resource->base;
+ cache->mapping_size = MALI400_L2_CACHE_REGISTERS_SIZE;
+
+ /* map the registers */
+ cache->mapped_registers = _mali_osk_mem_mapioregion( cache->base, cache->mapping_size, resource->description );
+
+ MALI_CHECK_GOTO( NULL != cache->mapped_registers, err_cleanup);
+
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ err = mali_kernel_l2_cache_invalidate_all_cache(cache);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup);
+
+ /* add to our list of L2 caches */
+ _mali_osk_list_add( &cache->list, &caches_head );
+
+ MALI_SUCCESS;
+
+err_cleanup:
+ /* This cleanup used when resources have been requested successfully */
+
+ if ( NULL != cache )
+ {
+ if (NULL != cache->mapped_registers)
+ {
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to map Mali L2 cache registers at 0x%08lX\n", cache->base));
+ }
+
+ if( NULL != cache->lock )
+ {
+ _mali_osk_lock_term( cache->lock );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate a lock for handling a L2 cache unit"));
+ }
+
+ _mali_osk_free( cache );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate memory for handling a L2 cache unit"));
+ }
+
+ /* A call is to request region, so this must always be reversed */
+ _mali_osk_mem_unreqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE);
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+err_cleanup_requestmem_failed:
+ MALI_DEBUG_PRINT(1, ("Failed to request Mali L2 cache '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI400_L2_CACHE_REGISTERS_SIZE - 1) );
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+}
+
+
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val)
+{
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+static u32 mali_l2_cache_register_read(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg)
+{
+ return _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+}
+
+void mali_kernel_l2_cache_do_enable(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and enable them*/
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+ }
+}
+
+
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_do_enable();
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system load complete\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_send_command(mali_kernel_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(_mali_osk_mem_ioread32(cache->mapped_registers , (u32)MALI400_L2_CACHE_REGISTER_STATUS * sizeof(u32)) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_l2_cache_register_write(cache, reg, val);
+
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_all_cache(cache) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page_cache(mali_kernel_l2_cache_core *cache, u32 page)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, page);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_page_cache(cache, page) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int reset0 = force_reset;
+ int reset1 = force_reset;
+ MALI_DEBUG_CODE(
+ int changed0 = 0;
+ int changed1 = 0;
+ )
+
+ /* loop over all L2 cache units and activate the counters on them */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+
+ if (src0 != cur_src0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, src0);
+ MALI_DEBUG_CODE(changed0 = 1;)
+ reset0 = 1;
+ }
+
+ if (src1 != cur_src1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, src1);
+ MALI_DEBUG_CODE(changed1 = 1;)
+ reset1 = 1;
+ }
+
+ if (reset0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+ }
+
+ if (reset1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters set: SRC0=%u, CHANGED0=%d, RESET0=%d, SRC1=%u, CHANGED1=%d, RESET1=%d\n",
+ src0, changed0, reset0,
+ src1, changed1, reset1));
+ }
+}
+
+
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int first_time = 1;
+ *src0 = 0;
+ *src1 = 0;
+ *val0 = 0;
+ *val1 = 0;
+
+ /* loop over all L2 cache units and read the counters */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+ u32 cur_val0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ u32 cur_val1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters get: SRC0=%u, VAL0=%u, SRC1=%u, VAL1=%u\n", cur_src0, cur_val0, cur_src1, cur_val1));
+
+ if (first_time)
+ {
+ *src0 = cur_src0;
+ *src1 = cur_src1;
+ first_time = 0;
+ }
+
+ if (*src0 == cur_src0 && *src1 == cur_src1)
+ {
+ *val0 += cur_val0;
+ *val1 += cur_val1;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Warning: Mali L2 caches has different performance counters set, not retrieving data\n"));
+ }
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h
new file mode 100644
index 00000000000..de3229a0688
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_l2_cache;
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void);
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page);
+
+void mali_kernel_l2_cache_do_enable(void);
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset);
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h
new file mode 100644
index 00000000000..681b6dd47c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_H__
+#define __MALI_KERNEL_MEM_H__
+
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_memory;
+
+#endif /* __MALI_KERNEL_MEM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c
new file mode 100644
index 00000000000..8277fd1e1c6
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c
@@ -0,0 +1,1425 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_session_manager.h"
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#ifdef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#include "mali_osk_indir_mmap.h"
+#endif
+
+/**
+ * Minimum memory allocation size
+ */
+#define MIN_BLOCK_SIZE (1024*1024UL)
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 4096
+
+/**
+ * Enum uses to store multiple fields in one u32 to keep the memory block struct small
+ */
+enum MISC_SHIFT { MISC_SHIFT_FREE = 0, MISC_SHIFT_ORDER = 1, MISC_SHIFT_TOPLEVEL = 6 };
+enum MISC_MASK { MISC_MASK_FREE = 0x01, MISC_MASK_ORDER = 0x1F, MISC_MASK_TOPLEVEL = 0x1F };
+
+/* forward declaration of the block struct */
+struct mali_memory_block;
+
+/**
+ * Definition of memory bank type.
+ * Represents a memory bank (separate address space)
+ * Each bank keeps track of its block usage.
+ * A buddy system used to track the usage
+*/
+typedef struct mali_memory_bank
+{
+ _mali_osk_list_t list; /* links multiple banks together */
+ _mali_osk_lock_t *lock;
+ u32 base_addr; /* Mali seen address of bank */
+ u32 cpu_usage_adjust; /* Adjustmen factor for what the CPU sees */
+ u32 size; /* the effective size */
+ u32 real_size; /* the real size of the bank, as given by to the subsystem */
+ int min_order;
+ int max_order;
+ struct mali_memory_block * blocklist;
+ _mali_osk_list_t *freelist;
+ _mali_osk_atomic_t num_active_allocations;
+ u32 used_for_flags;
+ u32 alloc_order; /**< Order in which the bank will be used for allocations */
+ const char *name; /**< Descriptive name of the bank */
+} mali_memory_bank;
+
+/**
+ * Definition of the memory block type
+ * Represents a memory block, which is the smallest memory unit operated on.
+ * A block keeps info about its mapping, if in use by a user process
+ */
+typedef struct mali_memory_block
+{
+ _mali_osk_list_t link; /* used for freelist and process usage list*/
+ mali_memory_bank * bank; /* the bank it belongs to */
+ void __user * mapping; /* possible user space mapping of this block */
+ u32 misc; /* used while a block is free to track the number blocks it represents */
+ int descriptor;
+ u32 mmap_cookie; /**< necessary for interaction with _mali_ukk_mem_mmap/munmap */
+} mali_memory_block;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the head of the list of memory currently in use by a session.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t memory_head; /* List of the memory blocks used by this session. */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+} memory_session;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Buddy block memory subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Reports on the memory resources registered
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+
+/**
+ * Buddy block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @param queue The user space event sink
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Buddy block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Buddy block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+/**
+ * Buddy block memory subsystem's notification handler for MEMORY resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each memory bank described in the active architecture's config.h file.
+ * Requests memory region ownership and calls backend.
+ * @param resource The resource to handle (type MEMORY)
+ * @return 0 if the memory was claimed and accepted, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+/* ioctl command implementations */
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_GET_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_GET_BIG_BLOCK command is received.
+ * Finds an available memory block and maps into the current process' address space.
+ * @param ukk_private private word for use by the User/Kernel interface
+ * @param session_data Pointer to the per-session object which will track the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_FREE_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_FREE_BIG_BLOCK command is received.
+ * Unmaps the memory from the process' address space and marks the block as free.
+ * @param session_data Pointer to the per-session object which tracks the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+
+/* this static version allows us to make use of it while holding the memory_session lock.
+ * This is required for the session_end code */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args);
+
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's memory bank registration routine
+ * Called when a MEMORY resource has been found.
+ * The memory region has already been reserved for use by this driver.
+ * Create a bank object to represent this region and initialize its slots.
+ * @note Can only be called in an module atomic scope, i.e. during module init since no locking is performed
+ * @param phys_base Physical base address of this bank
+ * @param cpu_usage_adjust Adjustment factor for CPU seen address
+ * @param size Size of the bank in bytes
+ * @param flags Memory type bits
+ * @param alloc_order Order in which the bank will be used for allocations
+ * @param name descriptive name of the bank
+ * @return Zero on success, negative on error
+ */
+static int mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name);
+
+/**
+ * Get a block of mali memory of at least the given size and of the given type
+ * This is the backend for get_big_block.
+ * @param type_id The type id of memory requested.
+ * @param minimum_size The size requested
+ * @return Pointer to a block on success, NULL on failure
+ */
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size);
+
+/**
+ * Get the mali seen address of the memory described by the block
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block);
+
+/**
+ * Get the cpu seen address of the memory described by the block
+ * The cpu_usage_adjust will be used to change the mali seen phys address
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block);
+
+/**
+ * Get the size of the memory described by the given block
+ * @param block The memory block to return the size of
+ * @return The size of the memory block described by the object
+ */
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block);
+
+/**
+ * Get the user space accessible mapping the memory described by the given memory block
+ * Returns a pointer in user space to the memory, if one has been created.
+ * @param block The memory block to return the mapping of
+ * @return User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block);
+
+/**
+ * Set the user space accessible mapping the memory described by the given memory block.
+ * Sets the stored pointer to user space for the memory described by this block.
+ * @param block The memory block to set mapping info for
+ * @param ptr User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr);
+
+/**
+ * Get the cookie for use with _mali_ukk_mem_munmap().
+ * @param block The memory block to get the cookie from
+ * @return the cookie. A return of 0 is still a valid cookie.
+ */
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block);
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie);
+
+
+/**
+ * Get a memory block's free status
+ * @param block The block to get the state of
+ */
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block);
+
+/**
+ * Set a memory block's free status
+ * @param block The block to set the state for
+ * @param state The state to set
+ */
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state);
+
+/**
+ * Set a memory block's order
+ * @param block The block to set the order for
+ * @param order The order to set
+ */
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order);
+
+/**
+ * Get a memory block's order
+ * @param block The block to get the order for
+ * @return The order this block exists on
+ */
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block);
+
+/**
+ * Tag a block as being a toplevel block.
+ * A toplevel block has no buddy and no parent
+ * @param block The block to tag as being toplevel
+ */
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level);
+
+/**
+ * Check if a block is a toplevel block
+ * @param block The block to check
+ * @return 1 if toplevel, 0 else
+ */
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block);
+
+/**
+ * Checks if the given block is a buddy at the given order and that it's free
+ * @param block The block to check
+ * @param order The order to check against
+ * @return 0 if not valid, else 1
+ */
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order);
+
+/*
+ The buddy system uses the following rules to quickly find a blocks buddy
+ and parent (block representing this block at a higher order level):
+ - Given a block with index i the blocks buddy is at index i ^ ( 1 << order)
+ - Given a block with index i the blocks parent is at i & ~(1 << order)
+*/
+
+/**
+ * Get a blocks buddy
+ * @param block The block to find the buddy for
+ * @param order The order to operate on
+ * @return Pointer to the buddy block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order);
+
+/**
+ * Get a blocks parent
+ * @param block The block to find the parent for
+ * @param order The order to operate on
+ * @return Pointer to the parent block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order);
+
+/**
+ * Release mali memory
+ * Backend for free_big_block.
+ * Will release the mali memory described by the given block struct.
+ * @param block Memory block to free
+ */
+static void block_release(mali_memory_block * block);
+
+/* end interface implementation */
+
+/**
+ * List of all the memory banks registerd with the subsystem.
+ * Access to this list is NOT synchronized since it's only
+ * written to during module init and termination.
+ */
+static _MALI_OSK_LIST_HEAD(memory_banks_list);
+
+/*
+ The buddy memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ mali_memory_core_terminate, /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = -1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&memory_banks_list);
+
+ mali_subsystem_memory_id = id;
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_memory));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga));
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ /* loop over all memory banks to free them */
+ /* we use the safe version since we delete the current bank in the body */
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ MALI_DEBUG_CODE(int usage_count = _mali_osk_atomic_read(&bank->num_active_allocations));
+ /*
+ Report leaked memory
+ If this happens we have a bug in our session cleanup code.
+ */
+ MALI_DEBUG_PRINT_IF(1, 0 != usage_count, ("%d allocation(s) from memory bank at 0x%X still in use\n", usage_count, bank->base_addr));
+
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+
+ _mali_osk_lock_term(bank->lock);
+
+ /* unlink from bank list */
+ _mali_osk_list_del(&bank->list);
+
+ /* release kernel resources used by the bank */
+ _mali_osk_mem_unreqregion(bank->base_addr, bank->real_size);
+
+ /* remove all resources used to represent this bank*/
+ _mali_osk_free(bank->freelist);
+ _mali_osk_free(bank->blocklist);
+
+ /* destroy the bank object itself */
+ _mali_osk_free(bank);
+ }
+
+ /* No need to de-initialize mali_subsystem_memory_id - it could only be
+ * re-initialized to the same value */
+}
+
+/* load_complete handler */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest number first) :\n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ if ( NULL != bank->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", bank->alloc_order, bank->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", bank->alloc_order ) );
+ }
+ }
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE u32 order_needed_for_size(u32 size, struct mali_memory_bank * bank)
+{
+ u32 order = 0;
+
+ if (0 < size)
+ {
+ for ( order = sizeof(u32)*8 - 1; ((1UL<<order) & size) == 0; --order)
+ /* nothing */;
+
+ /* check if size is pow2, if not we need increment order by one */
+ if (0 != (size & ((1UL<<order)-1))) ++order;
+ }
+
+ if ((NULL != bank) && (order < bank->min_order)) order = bank->min_order;
+ /* Not capped to max order, that doesn't make sense */
+
+ return order;
+}
+
+MALI_STATIC_INLINE u32 maximum_order_which_fits(u32 size)
+{
+ u32 order = 0;
+ u32 powsize = 1;
+ while (powsize < size)
+ {
+ powsize <<= 1;
+ if (powsize > size) break;
+ order++;
+ }
+
+ return order;
+}
+
+/* called for new MEMORY resources */
+static _mali_osk_errcode_t mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name)
+{
+ /* no locking performed due to function contract */
+ int i;
+ u32 left, offset;
+ mali_memory_bank * bank;
+ mali_memory_bank * bank_enum, *temp;
+
+ _mali_osk_errcode_t err;
+
+ /* Only a multiple of MIN_BLOCK_SIZE is usable */
+ u32 usable_size = size & ~(MIN_BLOCK_SIZE - 1);
+
+ /* handle zero sized banks and bank smaller than the fixed block size */
+ if (0 == usable_size)
+ {
+ MALI_PRINT(("Usable size == 0\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* warn for banks not a muliple of the block size */
+ MALI_DEBUG_PRINT_IF(1, usable_size != size, ("Memory bank @ 0x%X not a multiple of minimum block size. %d bytes wasted\n", phys_base, size - usable_size));
+
+ /* check against previous registrations */
+ MALI_DEBUG_CODE(
+ {
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ /* duplicate ? */
+ if (bank->base_addr == phys_base)
+ {
+ MALI_PRINT(("Duplicate registration of a memory bank at 0x%X detected\n", phys_base));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ /* overlapping ? */
+ else if (
+ ( (phys_base > bank->base_addr) && (phys_base < (bank->base_addr + bank->real_size)) ) ||
+ ( (phys_base + size) > bank->base_addr && ((phys_base + size) < (bank->base_addr + bank->real_size)) )
+ )
+ {
+ MALI_PRINT(("Overlapping memory blocks found. Memory at 0x%X overlaps with memory at 0x%X size 0x%X\n", bank->base_addr, phys_base, size));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ }
+ );
+
+ /* create an object to represent this memory bank */
+ MALI_CHECK_NON_NULL(bank = (mali_memory_bank*)_mali_osk_malloc(sizeof(mali_memory_bank)), _MALI_OSK_ERR_NOMEM);
+
+ /* init the fields */
+ _MALI_OSK_INIT_LIST_HEAD(&bank->list);
+ bank->base_addr = phys_base;
+ bank->cpu_usage_adjust = cpu_usage_adjust;
+ bank->size = usable_size;
+ bank->real_size = size;
+ bank->alloc_order = alloc_order;
+ bank->name = name;
+
+ err = _mali_osk_atomic_init(&bank->num_active_allocations, 0);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_free(bank);
+ MALI_ERROR(err);
+ }
+
+ bank->used_for_flags = flags;
+ bank->min_order = order_needed_for_size(MIN_BLOCK_SIZE, NULL);
+ bank->max_order = maximum_order_which_fits(usable_size);
+ bank->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == bank->lock)
+ {
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ bank->blocklist = _mali_osk_calloc(1, sizeof(struct mali_memory_block) * (usable_size / MIN_BLOCK_SIZE));
+ if (NULL == bank->blocklist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (usable_size / MIN_BLOCK_SIZE); i++)
+ {
+ bank->blocklist[i].bank = bank;
+ }
+
+ bank->freelist = _mali_osk_calloc(1, sizeof(_mali_osk_list_t) * (bank->max_order - bank->min_order + 1));
+ if (NULL == bank->freelist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_free(bank->blocklist);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (bank->max_order - bank->min_order + 1); i++) _MALI_OSK_INIT_LIST_HEAD(&bank->freelist[i]);
+
+ /* init slot info */
+ for (offset = 0, left = usable_size; offset < (usable_size / MIN_BLOCK_SIZE); /* updated inside the body */)
+ {
+ u32 block_order;
+ mali_memory_block * block;
+
+ /* the maximum order which fits in the remaining area */
+ block_order = maximum_order_which_fits(left);
+
+ /* find the block pointer */
+ block = &bank->blocklist[offset];
+
+ /* tag the block as being toplevel */
+ set_block_toplevel(block, block_order);
+
+ /* tag it as being free */
+ set_block_free(block, 1);
+
+ /* set the order */
+ set_block_order(block, block_order);
+
+ _mali_osk_list_addtail(&block->link, bank->freelist + (block_order - bank->min_order));
+
+ left -= (1 << block_order);
+ offset += ((1 << block_order) / MIN_BLOCK_SIZE);
+ }
+
+ /* add bank to list of banks on the system */
+ _MALI_OSK_LIST_FOREACHENTRY( bank_enum, temp, &memory_banks_list, mali_memory_bank, list )
+ {
+ if ( bank_enum->alloc_order >= alloc_order )
+ {
+ /* Found insertion point - our item must go before this one */
+ break;
+ }
+ }
+ _mali_osk_list_addtail(&bank->list, &bank_enum->list);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_mmu_register(u32 type, u32 phys_base)
+{
+ /* not supported */
+ return _MALI_OSK_ERR_INVALID_FUNC;
+}
+
+void mali_memory_mmu_unregister(u32 phys_base)
+{
+ /* not supported */
+ return;
+}
+
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size)
+{
+ mali_memory_bank * bank;
+ mali_memory_block * block = NULL;
+ u32 requested_order, current_order;
+
+ /* input validation */
+ if (0 == minimum_size)
+ {
+ /* bad size */
+ MALI_DEBUG_PRINT(2, ("Zero size block requested by mali_memory_block_get\n"));
+ return NULL;
+ }
+
+ bank = (mali_memory_bank*)type_id;
+
+ requested_order = order_needed_for_size(minimum_size, bank);
+
+ MALI_DEBUG_PRINT(4, ("For size %d we need order %d (%d)\n", minimum_size, requested_order, 1 << requested_order));
+
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ MALI_DEBUG_PRINT(7, ("Bank 0x%x locked\n", bank));
+
+ for (current_order = requested_order; current_order <= bank->max_order; ++current_order)
+ {
+ _mali_osk_list_t * list = bank->freelist + (current_order - bank->min_order);
+ MALI_DEBUG_PRINT(7, ("Checking freelist 0x%x for order %d\n", list, current_order));
+ if (0 != _mali_osk_list_empty(list)) continue; /* empty list */
+
+ MALI_DEBUG_PRINT(7, ("Found an entry on the freelist for order %d\n", current_order));
+
+
+ block = _MALI_OSK_LIST_ENTRY(list->next, mali_memory_block, link);
+ _mali_osk_list_delinit(&block->link);
+
+ while (current_order > requested_order)
+ {
+ mali_memory_block * buddy_block;
+ MALI_DEBUG_PRINT(7, ("Splitting block 0x%x\n", block));
+ current_order--;
+ list--;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ set_block_order(buddy_block, current_order);
+ set_block_free(buddy_block, 1);
+ _mali_osk_list_add(&buddy_block->link, list);
+ }
+
+ set_block_order(block, current_order);
+ set_block_free(block, 0);
+
+ /* update usage count */
+ _mali_osk_atomic_inc(&bank->num_active_allocations);
+
+ break;
+ }
+
+ /* ! critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(7, ("Lock released for bank 0x%x\n", bank));
+
+ MALI_DEBUG_PRINT_IF(7, NULL != block, ("Block 0x%x allocated\n", block));
+
+ return block;
+}
+
+
+static void block_release(mali_memory_block * block)
+{
+ mali_memory_bank * bank;
+ u32 current_order;
+
+ if (NULL == block) return;
+
+ bank = block->bank;
+
+ /* we're manipulating the free list, so we need to lock it */
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ set_block_free(block, 1);
+ current_order = get_block_order(block);
+
+ while (current_order <= bank->max_order)
+ {
+ mali_memory_block * buddy_block;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ if (!block_is_valid_buddy(buddy_block, current_order)) break;
+ _mali_osk_list_delinit(&buddy_block->link); /* remove from free list */
+ /* clear tracked data in both blocks */
+ set_block_order(block, 0);
+ set_block_free(block, 0);
+ set_block_order(buddy_block, 0);
+ set_block_free(buddy_block, 0);
+ /* make the parent control the new state */
+ block = block_get_parent(block, current_order - bank->min_order);
+ set_block_order(block, current_order + 1); /* merged has a higher order */
+ set_block_free(block, 1); /* mark it as free */
+ current_order++;
+ if (get_block_toplevel(block) == current_order) break; /* stop the merge if we've arrived at a toplevel block */
+ }
+
+ _mali_osk_list_add(&block->link, &bank->freelist[current_order - bank->min_order]);
+
+ /* update bank usage statistics */
+ _mali_osk_atomic_dec(&block->bank->num_active_allocations);
+
+ /* !critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return;
+}
+
+MALI_STATIC_INLINE u32 block_get_offset(mali_memory_block * block)
+{
+ return block - block->bank->blocklist;
+}
+
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return (block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block)) + block->bank->cpu_usage_adjust;
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block)
+{
+ if (NULL != block) return 1 << get_block_order(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mapping;
+ else return NULL;
+}
+
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr)
+{
+ if (NULL != block) block->mapping = ptr;
+}
+
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mmap_cookie;
+ else return 0;
+}
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie)
+{
+ if (NULL != block) block->mmap_cookie = cookie;
+}
+
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* create the session data object */
+ MALI_CHECK_NON_NULL(session_data = _mali_osk_malloc(sizeof(memory_session)), _MALI_OSK_ERR_NOMEM);
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->memory_head); /* no memory in use */
+ session_data->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == session_data->lock)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ *slot = session_data; /* slot will point to our data object */
+
+ MALI_SUCCESS;
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ if (NULL == *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL memory_session found in current session object"));
+ return;
+ }
+
+ _mali_osk_lock_wait(((memory_session*)*slot)->lock, _MALI_OSK_LOCKMODE_RW);
+ session_data = (memory_session *)*slot;
+ /* clear our slot */
+ *slot = NULL;
+
+ /*
+ First free all memory still being used.
+ This can happen if the caller has leaked memory or
+ the application has crashed forcing an auto-session end.
+ */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_block * block, * temp;
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ /* use the _safe version since fre_big_block removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(block, temp, &session_data->memory_head, mali_memory_block, link)
+ {
+ _mali_osk_errcode_t err;
+ _mali_uk_free_big_block_s uk_args;
+
+ MALI_DEBUG_PRINT(4, ("Freeing block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /* free the block */
+ /** @note manual type safety check-point */
+ uk_args.ctx = mali_session_data;
+ uk_args.cookie = (u32)block->descriptor;
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, &uk_args );
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT_ERROR(("_mali_ukk_free_big_block_internal() failed during session termination on block with cookie==0x%X\n",
+ uk_args.cookie)
+ );
+ }
+ }
+ }
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_term(session_data->lock);
+
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ mali_memory_bank * bank, *temp;
+ _mali_mem_info **mem_info_tail;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ mem_info_tail = &info->mem_info;
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ _mali_mem_info * mem_info;
+
+ mem_info = (_mali_mem_info *)_mali_osk_calloc(1, sizeof(_mali_mem_info));
+ if (NULL == mem_info) return _MALI_OSK_ERR_NOMEM; /* memory already allocated will be freed by the caller */
+
+ /* set info */
+ mem_info->size = bank->size;
+ mem_info->flags = (_mali_bus_usage)bank->used_for_flags;
+ mem_info->maximum_order_supported = bank->max_order;
+ mem_info->identifier = (u32)bank;
+
+ /* add to system info linked list */
+ (*mem_info_tail) = mem_info;
+ mem_info_tail = &mem_info->next;
+ }
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err;
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* call backend */
+ err = mali_memory_bank_register(resource->base, resource->cpu_usage_adjust, resource->size, resource->flags, resource->alloc_order, resource->description);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* if backend refused the memory we have to release the region again */
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(err);
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ /* Not supported by the fixed block memory system */
+ MALI_DEBUG_PRINT(1, ("MMU resource not supported by non-MMU driver!\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_FUNC);
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ u32 data;
+ data = _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ data = _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t get_big_block(void * ukk_private, struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ _mali_uk_mem_mmap_s args_mmap = {0, };
+ int md;
+ mali_memory_block * block;
+ _mali_osk_errcode_t err;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (!args->type_id)
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* at least min block size */
+ if (MIN_BLOCK_SIZE > args->minimum_size_requested) args->minimum_size_requested = MIN_BLOCK_SIZE;
+
+ /* perform the actual allocation */
+ block = mali_memory_block_get(args->type_id, args->minimum_size_requested);
+ if ( NULL == block )
+ {
+ /* no memory available with requested type_id */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, block, &md))
+ {
+ block_release(block);
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ block->descriptor = md;
+
+
+ /* fill in response */
+ args->mali_address = block_mali_addr_get(block);
+ args->block_size = block_size_get(block);
+ args->cookie = (u32)md;
+ args->flags = block->bank->used_for_flags;
+
+ /* map the block into the process' address space */
+
+ /** @note manual type safety check-point */
+ args_mmap.ukk_private = (void *)args->ukk_private;
+ args_mmap.ctx = args->ctx;
+ args_mmap.size = args->block_size;
+ args_mmap.phys_addr = block_cpu_addr_get(block);
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ err = _mali_ukk_mem_mmap( &args_mmap );
+#else
+ err = _mali_osk_specific_indirect_mmap( &args_mmap );
+#endif
+
+ /* check if the mapping failed */
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ("Memory mapping failed 0x%x\n", args->cpuptr));
+ /* mapping failed */
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+
+ /* free the mali memory */
+ block_release(block);
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+
+ args->cpuptr = args_mmap.mapping;
+ block_mmap_cookie_set(block, args_mmap.cookie);
+ block_mapping_set(block, args->cpuptr);
+
+ MALI_DEBUG_PRINT(2, ("Mali memory 0x%x (size %d) mapped in process memory space at 0x%x\n", (void*)args->mali_address, args->block_size, args->cpuptr));
+
+ /* track memory in use for the session */
+ _mali_osk_list_addtail(&block->link, &session_data->memory_head);
+
+ /* memory assigned to the session, memory mapped into the process' view */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
+
+/* Internal code that assumes the memory session lock is held */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args)
+{
+ mali_memory_block * block = NULL;
+ _mali_osk_errcode_t err;
+ _mali_uk_mem_munmap_s args_munmap = {0,};
+
+ MALI_DEBUG_ASSERT_POINTER( mali_session_data );
+ MALI_DEBUG_ASSERT_POINTER( session_data );
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ err = mali_descriptor_mapping_get(session_data->descriptor_mapping, (int)args->cookie, (void**)&block);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release memory pages\n", (int)args->cookie));
+ MALI_ERROR(err);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ MALI_DEBUG_PRINT(4, ("Asked to free block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /** @note manual type safety check-point */
+ args_munmap.ctx = (void*)mali_session_data;
+ args_munmap.mapping = block_mapping_get( block );
+ args_munmap.size = block_size_get( block );
+ args_munmap.cookie = block_mmap_cookie_get( block );
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ _mali_ukk_mem_munmap( &args_munmap );
+#else
+ _mali_osk_specific_indirect_munmap( &args_munmap );
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Session data 0x%x, lock 0x%x\n", session_data, &session_data->lock));
+
+ /* unlink from session usage list */
+ MALI_DEBUG_PRINT(5, ("unlink from session usage list\n"));
+ _mali_osk_list_delinit(&block->link);
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, (int)args->cookie);
+
+ /* free the mali memory */
+ block_release(block);
+ MALI_DEBUG_PRINT(5, ("Block freed\n"));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t free_big_block( struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ _mali_osk_errcode_t err;
+ struct mali_session_data * mali_session_data;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ /* Must always verify this, since these are provided by the user */
+ MALI_CHECK_NON_NULL(mali_session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /** @note this has been separated out so that the session_end handler can call this while it has the memory_session lock held */
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, args );
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return err;
+}
+
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_FREE) & MISC_MASK_FREE;
+}
+
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state)
+{
+ if (state) block->misc |= (MISC_MASK_FREE << MISC_SHIFT_FREE);
+ else block->misc &= ~(MISC_MASK_FREE << MISC_SHIFT_FREE);
+}
+
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order)
+{
+ block->misc &= ~(MISC_MASK_ORDER << MISC_SHIFT_ORDER);
+ block->misc |= ((order & MISC_MASK_ORDER) << MISC_SHIFT_ORDER);
+}
+
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_ORDER) & MISC_MASK_ORDER;
+}
+
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level)
+{
+ block->misc |= ((level & MISC_MASK_TOPLEVEL) << MISC_SHIFT_TOPLEVEL);
+}
+
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_TOPLEVEL) & MISC_MASK_TOPLEVEL;
+}
+
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order)
+{
+ if (get_block_free(block) && (get_block_order(block) == order)) return 1;
+ else return 0;
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order)
+{
+ return block + ( (block_get_offset(block) ^ (1 << order)) - block_get_offset(block));
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order)
+{
+ return block + ((block_get_offset(block) & ~(1 << order)) - block_get_offset(block));
+}
+
+/* This handler registered to mali_mmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ _mali_osk_errcode_t ret;
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+
+ ret = _mali_osk_mem_mapregion_init( descriptor );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_init() failed\n"));
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ ret = _mali_osk_mem_mapregion_map( descriptor, 0, &descriptor->mali_address, descriptor->size );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_map() failed\n"));
+ _mali_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ args->mapping = descriptor->mapping;
+
+ /**
+ * @note we do not require use of mali_descriptor_mapping here:
+ * the cookie gets stored in the mali_memory_block struct, which itself is
+ * protected by mali_descriptor_mapping, and so this cookie never leaves
+ * kernel space (on any OS).
+ *
+ * In the MMU case, we must use a mali_descriptor_mapping, since on _some_
+ * OSs, the cookie leaves kernel space.
+ */
+ args->cookie = (u32)descriptor;
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_munmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+
+ /** see note in _mali_ukk_mem_mmap() - no need to use descriptor mapping */
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* args->mapping and args->size are also discarded. They are only necessary for certain do_munmap implementations. However, they could be used to check the descriptor at this point. */
+ _mali_osk_mem_mapregion_unmap( descriptor, 0, descriptor->size, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_mem_mapregion_term( descriptor );
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c
new file mode 100644
index 00000000000..7a48e27ede7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c
@@ -0,0 +1,2924 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_mem_mmu.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+/**
+ * Used to disallow more than one core to run a MMU at the same time
+ *
+ * @note This value is hardwired into some systems' configuration files,
+ * which \em might not be a header file (e.g. some external data configuration
+ * file). Therefore, if this value is modified, its occurance must be
+ * \b manually checked for in the entire driver source tree.
+ */
+#define MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES 1
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_MASK = 0x07
+} mali_mmu_entry_flags;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0001, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0002, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x0003, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x004, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0005, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0006, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x0007, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0008 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_SOFT_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+} mali_mmu_status_bits;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the pointer to the huge user space virtual memory area
+ * used to access the Mali memory.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting the vm manipulation */
+
+ u32 mali_base_address; /**< Mali virtual memory area used by this session */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+
+ _mali_osk_list_t active_mmus; /**< The MMUs in this session, in increasing order of ID (so we can lock them in the correct order when necessary) */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+} memory_session;
+
+typedef struct mali_kernel_memory_mmu_idle_callback
+{
+ _mali_osk_list_t link;
+ void (*callback)(void*);
+ void * callback_argument;
+} mali_kernel_memory_mmu_idle_callback;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_memory_mmu
+{
+ int id; /**< ID of the MMU, no duplicate IDs may exist on the system */
+ const char * description; /**< Description text received from the resource manager to help identify the resource for people */
+ int irq_nr; /**< IRQ number */
+ u32 base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple MMU's into a list */
+ _mali_osk_irq_t *irq;
+ u32 flags; /**< Used to store if there is something special with this mmu. */
+
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the usage fields */
+ /* usage fields */
+ memory_session * active_session; /**< Active session, NULL if no session is active */
+ u32 usage_count; /**< Number of nested activations of the active session */
+ _mali_osk_list_t callbacks; /**< Callback registered for MMU idle notification */
+
+ int in_page_fault_handler;
+
+ _mali_osk_list_t session_link;
+} mali_kernel_memory_mmu;
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Fixed block memory subsystem startup function.
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem shutdown function.
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * MMU Memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Builds the memory overall memory list
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Fixed block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Fixed block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+
+/**
+ * Fixed block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Fixed block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource);
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource);
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static void _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+/**
+ * The MMU interrupt handler
+ * Upper half of the MMU interrupt processing.
+ * Called by the kernel when the MMU has triggered an interrupt.
+ * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
+ * @param irq The irq number (not used)
+ * @param dev_id Points to the MMU object being handled
+ * @param regs Registers of interrupted process (not used)
+ * @return Standard Linux interrupt result.
+ * Subset used by the driver is IRQ_HANDLED processed
+ * IRQ_NONE Not processed
+ */
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data);
+
+/**
+ * The MMU reset hander
+ * Bottom half of the MMU interrupt processing for page faults and bus errors
+ * @param work The item to operate on, NULL in our case
+ */
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half ( void *data );
+
+/**
+ * Read MMU register value
+ * Reads the contents of the specified register.
+ * @param unit The MMU to read from
+ * @param reg The register to read
+ * @return The contents of the register
+ */
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg);
+
+/**
+ * Write to a MMU register
+ * Writes the given value to the specified register
+ * @param unit The MMU to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val);
+
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+static void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory);
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+void mali_mmu_page_table_cache_destroy(void);
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+void mali_mmu_release_table_page(u32 pa);
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void);
+
+static void mali_free_empty_page_directory(void);
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void);
+
+static void mali_free_fault_flush_pages(void);
+
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu);
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+/* Head of the list of MMUs */
+static _MALI_OSK_LIST_HEAD(mmu_head);
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+/*
+ The fixed memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ mali_memory_core_terminate, /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = (mali_kernel_subsystem_identifier)-1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(2, ("MMU memory system initializing\n"));
+
+ /* save our subsystem id for later for use in slot lookup during session activation */
+ mali_subsystem_memory_id = id;
+
+ _MALI_OSK_INIT_LIST_HEAD(&mmu_head);
+
+ MALI_CHECK_NO_ERROR( mali_mmu_page_table_cache_create() );
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_dedicated_memory) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(OS_MEMORY, mali_memory_core_resource_os_memory) );
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, *temp_mmu;
+
+ MALI_DEBUG_PRINT(2, ("MMU memory system terminating\n"));
+
+ /* loop over all MMU units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ /* reset to defaults */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+
+ /* unregister the irq */
+ _mali_osk_irq_term(mmu->irq);
+
+ /* remove from the list of MMU's on the system */
+ _mali_osk_list_del(&mmu->list);
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion(mmu->base, mmu->mapping_size, mmu->mapped_registers);
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ }
+
+ /* free global helper pages */
+ mali_free_empty_page_directory();
+ mali_free_fault_flush_pages();
+
+ /* destroy the page table cache before shutting down backends in case we have a page table leak to report */
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+
+}
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+ _mali_osk_errcode_t err;
+ int i;
+ mali_io_address pd_mapped;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(2, ("MMU session begin\n"));
+
+ /* create the session data object */
+ session_data = _mali_osk_calloc(1, sizeof(memory_session));
+ MALI_CHECK_NON_NULL( session_data, _MALI_OSK_ERR_NOMEM );
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ err = mali_mmu_get_table_page(&session_data->page_directory, &pd_mapped);
+
+ session_data->page_directory_mapped = pd_mapped;
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(err);
+ }
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_directory_mapped );
+
+ MALI_DEBUG_PRINT(2, ("Page directory for session 0x%x placed at physical address 0x%08X\n", mali_session_data, session_data->page_directory));
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ /* mark each page table as not present */
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, sizeof(u32) * i, 0);
+ }
+
+ /* page_table_mapped[] is already set to NULL by _mali_osk_calloc call */
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->active_mmus);
+ session_data->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 128);
+ if (NULL == session_data->lock)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ *slot = session_data; /* slot will point to our data object */
+ MALI_DEBUG_PRINT(2, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(1, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+ int i;
+ const int num_page_table_entries = sizeof(session_data->page_entries_mapped) / sizeof(session_data->page_entries_mapped[0]);
+
+ MALI_DEBUG_PRINT(2, ("MMU session end\n"));
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ session_data = (memory_session *)*slot;
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+#ifndef MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+#if _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#error Indirect MMAP specified, but UKK does not have implicit MMAP cleanup. Current implementation does not handle this.
+#else
+
+ /* Free all memory engine allocations */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = mali_session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ _mali_ukk_mem_munmap_internal( &unmap_args );
+ }
+ }
+
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+#endif /* _MALI_OSK_SPECIFIC_INDIRECT_MMAP */
+#endif /* MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP */
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ /* free PTE memory */
+ if (session_data->page_directory_mapped && (_mali_osk_mem_ioread32(session_data->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+
+ if (MALI_INVALID_PAGE != session_data->page_directory)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ session_data->page_directory = MALI_INVALID_PAGE;
+ }
+
+ _mali_osk_lock_signal( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->lock );
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ /* clear our slot */
+ *slot = NULL;
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+
+ MALI_CHECK_NO_ERROR(mali_mmu_get_table_page(&mali_empty_page_directory, &mapping));
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+static void mali_free_empty_page_directory(void)
+{
+ if (MALI_INVALID_PAGE != mali_empty_page_directory)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32( mapping, i * sizeof(u32), data);
+ }
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(mali_page_fault_flush_data_page_mapping, 0);
+ fill_page(mali_page_fault_flush_page_table_mapping, mali_page_fault_flush_data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(mali_page_fault_flush_page_directory_mapping, mali_page_fault_flush_page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ mali_page_fault_flush_page_table_mapping = NULL;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ mali_page_fault_flush_data_page_mapping = NULL;
+ }
+ MALI_ERROR(err);
+}
+
+static void mali_free_fault_flush_pages(void)
+{
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_directory)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_directory);
+ mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_table)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_data_page)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* Report the allocators */
+ mali_allocation_engine_report_allocators( physical_memory_allocators );
+
+ /* allocate the helper pages */
+ MALI_CHECK_NO_ERROR( mali_allocate_empty_page_directory() );
+ if (_MALI_OSK_ERR_OK != mali_allocate_fault_flush_pages())
+ {
+ mali_free_empty_page_directory();
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* activate the empty page directory on all MMU's */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMUs activated\n"));
+ /* the MMU system is now active */
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ _mali_mem_info * mem_info;
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ info->has_mmu = 1;
+
+ mem_info = _mali_osk_calloc(1,sizeof(_mali_mem_info));
+ MALI_CHECK_NON_NULL( mem_info, _MALI_OSK_ERR_NOMEM );
+
+ mem_info->size = 2048UL * 1024UL * 1024UL;
+ mem_info->maximum_order_supported = 30;
+ mem_info->flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE;
+ mem_info->identifier = 0;
+
+ info->mem_info = mem_info;
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+
+ if (NULL != mali_memory_core_mmu_lookup(resource->mmu_id))
+ {
+ MALI_DEBUG_PRINT(1, ("Duplicate MMU ids found. The id %d is already in use\n", resource->mmu_id));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, MALI_MMU_REGISTERS_SIZE, resource->description))
+ {
+ /* specified addresses are already in used by another driver / the kernel */
+ MALI_DEBUG_PRINT(
+ 1, ("Failed to request MMU '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mmu = _mali_osk_calloc(1, sizeof(mali_kernel_memory_mmu));
+
+ if (NULL == mmu)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory for handling a MMU unit"));
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->list);
+
+ mmu->id = resource->mmu_id;
+ mmu->irq_nr = resource->irq;
+ mmu->flags = resource->flags;
+ mmu->base = resource->base;
+ mmu->mapping_size = MALI_MMU_REGISTERS_SIZE;
+ mmu->description = resource->description; /* no need to copy */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->session_link);
+ mmu->in_page_fault_handler = 0;
+
+ mmu->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 127-mmu->id);
+ if (NULL == mmu->lock)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create mmu lock\n"));
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* map the registers */
+ mmu->mapped_registers = _mali_osk_mem_mapioregion( mmu->base, mmu->mapping_size, mmu->description );
+ if (NULL == mmu->mapped_registers)
+ {
+ /* failed to map the registers */
+ MALI_DEBUG_PRINT(1, ("Failed to map MMU registers at 0x%08X\n", mmu->base));
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unreqregion(mmu->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X) mapped to 0x%08X\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1, mmu->mapped_registers
+ ));
+
+ /* setup MMU interrupt mask */
+ /* set all values to known defaults */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* setup MMU page directory pointer */
+ /* The mali_page_directory pointer is guaranteed to be 4kb aligned because we've used get_zeroed_page to accquire it */
+ /* convert the kernel virtual address into a physical address and set */
+
+ /* add to our list of MMU's */
+ _mali_osk_list_addtail(&mmu->list, &mmu_head);
+
+ mmu->irq = _mali_osk_irq_init(
+ mmu->irq_nr,
+ mali_kernel_memory_mmu_interrupt_handler_upper_half,
+ mali_kernel_memory_mmu_interrupt_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)mali_mmu_probe_irq_trigger,
+ (_mali_osk_irq_ack_t)mali_mmu_probe_irq_acknowledge,
+ mmu,
+ "mali_mmu_irq_handlers"
+ );
+ if (NULL == mmu->irq)
+ {
+ _mali_osk_list_del(&mmu->list);
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unmapioregion( mmu->base, mmu->mapping_size, mmu->mapped_registers );
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* set to a known state */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+
+ MALI_DEBUG_PRINT(2, ("MMU registered\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ MALI_DEBUG_CODE(u32 data = )
+ _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ MALI_DEBUG_CODE(data = )
+ _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = resource->alloc_order;
+
+ allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description);
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = resource->alloc_order;
+
+ /* do the lowlevel linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description );
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save lowlevel cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = resource->base;
+ cleanup_data->size = resource->size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 int_stat;
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ mmu = (mali_kernel_memory_mmu *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ /* check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+ if (0 == int_stat)
+ {
+ MALI_DEBUG_PRINT(5, ("Ignoring shared interrupt\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT); /* no bits set, we are sharing the IRQ line and someone else caused the interrupt */
+ }
+
+ MALI_DEBUG_PRINT(1, ("mali_kernel_memory_mmu_interrupt_handler_upper_half\n"));
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+
+ mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_PRINT(("Page fault on %s\n", mmu->description));
+
+ _mali_osk_irq_schedulework(mmu->irq);
+ }
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_PRINT(("Bus read error on %s\n", mmu->description));
+ /* clear interrupt flag */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* reenable it */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static void mali_kernel_mmu_bus_reset(mali_kernel_memory_mmu * mmu)
+{
+
+#if defined(USING_MALI200)
+ int i;
+ const int replay_buffer_check_interval = 10; /* must be below 1000 */
+ const int replay_buffer_max_number_of_checks = 100;
+#endif
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* add an extra reference while handling the page fault */
+ mmu->usage_count++;
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(4, ("Sending stop bus request to cores\n"));
+ /* request to stop the bus, but don't wait for it to actually stop */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, (u32)mmu);
+
+#if defined(USING_MALI200)
+ /* no new request will come from any of the connected cores from now
+ * we must now flush the playback buffer for any requests queued already
+ */
+ MALI_DEBUG_PRINT(4, ("Switching to the special page fault flush page directory\n"));
+ /* don't use the mali_mmu_activate_address_space function here as we can't stall the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_page_fault_flush_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ /* resume the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+ /* the MMU will now play back all the requests, all going to our special page fault flush data page */
+
+ /* just to be safe, check that the playback buffer is empty before continuing */
+ if (!mali_benchmark) {
+ for (i = 0; i < replay_buffer_max_number_of_checks; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY) break;
+ _mali_osk_time_ubusydelay(replay_buffer_check_interval);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, i == replay_buffer_max_number_of_checks, ("MMU: %s: Failed to flush replay buffer on page fault\n", mmu->description));
+ MALI_DEBUG_PRINT(1, ("Replay playback took %ld usec\n", i * replay_buffer_check_interval));
+ }
+#endif
+ /* notify all subsystems that the core should be reset once the bus is actually stopped */
+ MALI_DEBUG_PRINT(4,("Sending job abort command to subsystems\n"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, (u32)mmu);
+
+ /* reprogram the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ /* release the extra address space reference, will schedule */
+ mali_memory_core_mmu_release_address_space_reference(mmu);
+
+ /* resume normal operation */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, (u32)mmu);
+ MALI_DEBUG_PRINT(4, ("Page fault handling complete\n"));
+}
+
+void mali_kernel_mmu_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->description));
+
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void mali_kernel_mmu_force_bus_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ MALI_DEBUG_PRINT(1, ("Mali MMU: Force_bus_reset.\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+ mali_kernel_mmu_bus_reset(mmu);
+}
+
+
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 raw, fault_address, status;
+
+ if (NULL == data)
+ {
+ MALI_PRINT_ERROR(("MMU IRQ work queue: NULL argument"));
+ return; /* Error */
+ }
+ mmu = (mali_kernel_memory_mmu*)data;
+
+
+ MALI_DEBUG_PRINT(4, ("Locking subsystems\n"));
+ /* lock all subsystems */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP0_LOCK_SUBSYSTEM, (u32)mmu);
+
+ raw = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_RAWSTAT);
+ status = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
+ {
+ MALI_DEBUG_PRINT(1, ("MMU: Page fault bottom half: No Irq found.\n"));
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+ return;
+ }
+
+ mmu->in_page_fault_handler = 1;
+
+ fault_address = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+ MALI_PRINT(("Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->description)
+ );
+
+ if (NULL == mmu->active_session)
+ {
+ MALI_PRINT(("Spurious memory access detected from MMU %s\n", mmu->description));
+ }
+ else
+ {
+ MALI_PRINT(("Active page directory at 0x%08X\n", mmu->active_session->page_directory));
+ MALI_PRINT(("Info from page table for VA 0x%x:\n", (void*)fault_address));
+ MALI_PRINT(("DTE entry: PTE at 0x%x marked as %s\n",
+ (void*)(_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK),
+ _mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT ? "present" : "not present"
+ ));
+
+ if (_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped, MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)
+ {
+ mali_io_address pte;
+ u32 data;
+ pte = mmu->active_session->page_entries_mapped[MALI_MMU_PDE_ENTRY(fault_address)];
+ data = _mali_osk_mem_ioread32(pte, MALI_MMU_PTE_ENTRY(fault_address) * sizeof(u32));
+ MALI_PRINT(("PTE entry: Page at 0x%x, %s %s %s\n",
+ (void*)(data & ~MALI_MMU_FLAGS_MASK),
+ data & MALI_MMU_FLAGS_PRESENT ? "present" : "not present",
+ data & MALI_MMU_FLAGS_READ_PERMISSION ? "readable" : "",
+ data & MALI_MMU_FLAGS_WRITE_PERMISSION ? "writable" : ""
+ ));
+ }
+ else
+ {
+ MALI_PRINT(("PTE entry: Not present\n"));
+ }
+ }
+
+
+ mali_kernel_mmu_bus_reset(mmu);
+
+ mmu->in_page_fault_handler = 0;
+
+ /* unlock all subsystems */
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+
+}
+
+
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg)
+{
+ u32 val;
+
+ if (mali_benchmark) return 0;
+
+ val = _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_read addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32),val));
+
+ return val;
+}
+
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val)
+{
+ if (mali_benchmark) return;
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_write addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32), val));
+
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(3, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( args->phys_addr, args->size ) );
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = NULL;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 110);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == _mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* clear memory allocation */
+ fill_page(alloc->pages.mapping, 0);
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void* mali_memory_core_mmu_lookup(u32 id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* find an MMU with a matching id */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ if (id == mmu->id) return mmu;
+ }
+
+ /* not found */
+ return NULL;
+}
+
+void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory)
+{
+ const int delay_in_usecs = 10;
+ const int max_loop_count = 10;
+ int i;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark) {
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) break;
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Stall request failed, swapping anyway\n"));
+ }
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+}
+
+_mali_osk_errcode_t mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument)
+{
+ memory_session * requested_memory_session;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ MALI_DEBUG_ASSERT_POINTER(mali_session_data);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ MALI_DEBUG_PRINT(4, ("Asked to activate page table for session 0x%x on MMU %s\n", mali_session_data, mmu->description));
+ requested_memory_session = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ MALI_DEBUG_PRINT(5, ("Session 0x%x looked up as using memory session 0x%x\n", mali_session_data, requested_memory_session));
+
+ MALI_DEBUG_ASSERT_POINTER(requested_memory_session);
+
+ MALI_DEBUG_PRINT(7, ("Taking locks\n"));
+
+ _mali_osk_lock_wait(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == mmu->usage_count)
+ {
+ /* no session currently active, activate the requested session */
+ MALI_DEBUG_ASSERT(NULL == mmu->active_session);
+ mmu->active_session = requested_memory_session;
+ mmu->usage_count = 1;
+ MALI_DEBUG_PRINT(4, ("MMU idle, activating page directory 0x%08X on MMU %s\n", requested_memory_session->page_directory, mmu->description));
+ mali_mmu_activate_address_space(mmu, requested_memory_session->page_directory);
+ {
+ /* Insert mmu into the right place in the active_mmus list so that
+ * it is still sorted. The list must be sorted by ID so we can get
+ * the mutexes in the right order in
+ * _mali_ukk_mem_munmap_internal().
+ */
+ _mali_osk_list_t *entry;
+ for (entry = requested_memory_session->active_mmus.next;
+ entry != &requested_memory_session->active_mmus;
+ entry = entry->next)
+ {
+ mali_kernel_memory_mmu *temp = _MALI_OSK_LIST_ENTRY(entry, mali_kernel_memory_mmu, session_link);
+ if (mmu->id < temp->id)
+ break;
+ }
+ /* If we broke out, then 'entry' points to the list node of the
+ * first mmu with a greater ID; otherwise, it points to
+ * active_mmus. We want to add *before* this node.
+ */
+ _mali_osk_list_addtail(&mmu->session_link, entry);
+ }
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ /* Allow two cores to run in parallel if they come from the same session */
+ else if (
+ (mmu->in_page_fault_handler == 0) &&
+ (requested_memory_session == mmu->active_session ) &&
+ (0==(MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES & mmu->flags))
+ )
+ {
+ /* nested activation detected, just update the reference count */
+ MALI_DEBUG_PRINT(4, ("Nested activation detected, %d previous activations found\n", mmu->usage_count));
+ mmu->usage_count++;
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ else if (NULL != callback)
+ {
+ /* can't activate right now, notify caller on idle via callback */
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ int found = 0;
+
+ MALI_DEBUG_PRINT(3, ("The MMU is busy and is using a different address space, callback given\n"));
+ /* check for existing registration */
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ if (callback_object->callback == callback)
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ {
+ MALI_DEBUG_PRINT(5, ("Duplicate callback registration found, ignoring\n"));
+ /* callback already registered */
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(5,("New callback, registering\n"));
+ /* register the new callback */
+ callback_object = _mali_osk_malloc(sizeof(mali_kernel_memory_mmu_idle_callback));
+ if (NULL != callback_object)
+ {
+ MALI_DEBUG_PRINT(7,("Callback struct setup\n"));
+ callback_object->callback = callback;
+ callback_object->callback_argument = callback_argument;
+ _mali_osk_list_addtail(&callback_object->link, &mmu->callbacks);
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ }
+ }
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_signal(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_ERROR(err);
+}
+
+void mali_memory_core_mmu_release_address_space_reference(void* mmu_ptr)
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp;
+ mali_kernel_memory_mmu * mmu;
+ memory_session * session;
+
+ _MALI_OSK_LIST_HEAD(callbacks);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ session = mmu->active_session;
+
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_wait(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("Deactivation of address space on MMU %s, %d references exists\n", mmu->description, mmu->usage_count));
+ MALI_DEBUG_ASSERT(0 != mmu->usage_count);
+ mmu->usage_count--;
+ if (0 != mmu->usage_count)
+ {
+ MALI_DEBUG_PRINT(4, ("MMU still in use by this address space, %d references still exists\n", mmu->usage_count));
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ return;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Activating the empty page directory on %s\n", mmu->description));
+
+ /* last reference gone, deactivate current address space */
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+
+ /* unlink from session */
+ _mali_osk_list_delinit(&mmu->session_link);
+ /* remove the active session pointer */
+ mmu->active_session = NULL;
+
+ /* Notify all registered callbacks.
+ * We have to be clever here:
+ * We must call the callbacks with the spinlock unlocked and
+ * the callback list emptied to allow them to re-register.
+ * So we make a copy of the list, clears the list and then later call the callbacks on the local copy
+ */
+ /* copy list */
+ _MALI_OSK_INIT_LIST_HEAD(&callbacks);
+ _mali_osk_list_splice(&mmu->callbacks, &callbacks);
+ /* clear the original, allowing new registrations during the callback */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+
+ /* end of mmu manipulation, so safe to unlock */
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* then finally remove the (possible) session lock, supporting that no session was active (spurious page fault handling) */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp, &callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ (callback_object->callback)(callback_object->callback_argument);
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ }
+}
+
+void mali_memory_core_mmu_unregister_callback(void* mmu_ptr, void(*callback)(void*))
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ MALI_DEBUG_ASSERT_POINTER(callback);
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ if (callback_object->callback == callback)
+ {
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ break;
+ }
+ }
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ /* allocate page tables, if needed */
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address);
+ int last_pde_idx;
+ memory_session * session_data;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + _MALI_OSK_MALI_PAGE_SIZE + descriptor->size - 1);
+ }
+ else
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + descriptor->size - 1);
+ }
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ MALI_DEBUG_PRINT(4, ("allocating page tables for Mali virtual address space 0x%08X to 0x%08X\n", descriptor->mali_address, descriptor->mali_address + descriptor->size - 1));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page directory
+ * from the L2 cache if we add new page directory entries (PDEs) to the page directory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ if ( 0 == (_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT) )
+ {
+ u32 pte_phys;
+ mali_io_address pte_mapped;
+ _mali_osk_errcode_t err;
+
+ /* allocate a new page table */
+ MALI_DEBUG_ASSERT(0 == session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == session_data->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pte_phys, &pte_mapped);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ session_data->page_entries_mapped[i] = pte_mapped;
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_entries_mapped[i] );
+
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), pte_phys | MALI_MMU_FLAGS_PRESENT); /* mark page table as present */
+
+ /* update usage count */
+ session_data->page_entries_usage_count[i]++;
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ continue; /* continue loop */
+ }
+
+ MALI_DEBUG_PRINT(1, ("Page table alloc failed\n"));
+ break; /* abort loop, failed to allocate one or more page tables */
+ }
+ else
+ {
+ session_data->page_entries_usage_count[i]++;
+ }
+ }
+
+ if (i <= last_pde_idx)
+ {
+ /* one or more pages could not be allocated, release reference count for the ones we added one for */
+ /* adjust for the one which caused the for loop to be aborted */
+ i--;
+
+ while (i >= first_pde_idx)
+ {
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ session_data->page_entries_usage_count[i]--;
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ /* last reference removed */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+ }
+ i--;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus && 1 == page_dir_updated)
+ {
+ /*
+ * We have updated the page directory and have an active MMU using it, so invalidate it in the Mali L2 cache.
+ */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ int first_pde_idx;
+ int last_pde_idx;
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 left;
+ int i;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_directory_mapped);
+
+ mali_address = descriptor->mali_address;
+ mali_address_end = descriptor->mali_address + descriptor->size;
+ left = descriptor->size;
+
+ first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ MALI_DEBUG_PRINT(3, ("Zapping Mali MMU table for address 0x%08X size 0x%08X\n", mali_address, left));
+ MALI_DEBUG_PRINT(4, ("Zapping PDE %d through %d\n", first_pde_idx, last_pde_idx));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache to ensure that the memory is unmapped.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ const int size_inside_pte = left < 0x400000 ? left : 0x400000;
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_PRINT(4, ("PDE %d\n", i));
+
+ session_data->page_entries_usage_count[i]--;
+
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ }
+ else
+ {
+ int j;
+ const int first_pte_idx = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte_idx = MALI_MMU_PTE_ENTRY(mali_address + size_inside_pte - 1);
+
+ MALI_DEBUG_PRINT(4, ("Partial page table fill detected, zapping entries %d through %d (page table at 0x%08X)\n", first_pte_idx, last_pte_idx, MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)))));
+
+ for (j = first_pte_idx; j <= last_pte_idx; j++)
+ {
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[i], j * sizeof(u32), 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("zap complete\n"));
+
+ mali_address += size_inside_pte;
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ /* Invalidate the page we've just modified */
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+#endif
+ }
+ left -= size_inside_pte;
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if ((1 == page_dir_updated) && (1== has_active_mmus))
+ {
+ /* The page directory was also updated */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 current_phys_addr;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ current_phys_addr = *phys_addr;
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+ mali_address_end = descriptor->mali_address + offset + size;
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache when we have allocated more heap memory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", current_phys_addr, mali_address, size));
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped);
+
+ for ( ; mali_address < mali_address_end; mali_address += MALI_MMU_PAGE_SIZE, current_phys_addr += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), current_phys_addr | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ /*
+ * Invalidate the updated page table(s), incase they have been used for something
+ * else since last job start (invalidation of entire Mali L2 cache)
+ */
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->lock = session_data->lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ /* no need to lock the MMU as we own it already */
+ MALI_DEBUG_PRINT(5, ("Zapping the cache of mmu %s as it's using the page table we have updated\n", mmu->description));
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+ if (!mali_benchmark) {
+ while ( (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) == 0) _mali_osk_time_ubusydelay(1);
+ }
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* All ok, write out any information generated from this call */
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ /* All done */
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static void _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ memory_session * session_data;
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Stall the MMU(s) which is using the address space we're operating on.
+ * Note that active_mmus must be sorted in order of ID to avoid a mutex
+ * ordering violation.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ const int max_loop_count = 100;
+ const int sleep_duration = 1; /* must be below 1000 */
+ int i;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark)
+ {
+ for ( i = 0; i < max_loop_count; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) break;
+ _mali_osk_time_ubusydelay(sleep_duration);
+ }
+
+ MALI_DEBUG_PRINT_IF(3, max_loop_count == i, ("Stall failed, trying zap anyway\n"));
+ }
+ }
+
+ /* This function also removes the memory from the session's memory list */
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ /* any L2 maintenance was done during mali_allocation_engine_release_memory */
+ /* the session is locked, so the active mmu list should be the same */
+ /* zap the TLB and resume operation */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((memory_session*)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+ _mali_ukk_mem_munmap_internal( args );
+ /* descriptor is no longer valid - it may've been freed */
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Is called when the rendercore wants the mmu to give an interrupt */
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu)
+{
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_trigger\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu)
+{
+ u32 int_stat;
+
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char * comment, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial) MALI_DEBUG_PRINT(1, ("writereg %08X %08X # %s\n", where, what, comment));
+
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial)
+ {
+ int i;
+ for (i = 0; i < 256; i++)
+ {
+ MALI_DEBUG_PRINT(1, ("%08X: %08X %08X %08X %08X\n", phys_addr + 16*i, _mali_osk_mem_ioread32(page, (i*4 + 0) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 1) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 2) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 3) * sizeof(u32))));
+
+ }
+ }
+
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(memory_session * session_data, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != session_data->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_directory_mapped, session_data->page_directory, info, 0)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != session_data->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_entries_mapped[i], _mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info, 0)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(memory_session * session_data, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, session_data->page_directory, "set the page directory address", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info, 0));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h
new file mode 100644
index 00000000000..a199fd66b5a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_MMU_H__
+#define __MALI_KERNEL_MEM_MMU_H__
+
+#include "mali_kernel_session_manager.h"
+
+/**
+ * Lookup a MMU core by ID.
+ * @param id ID of the MMU to find
+ * @return NULL if ID not found or valid, non-NULL if a core was found.
+ */
+void* mali_memory_core_mmu_lookup(u32 id);
+
+/**
+ * Activate a user session with its address space on the given MMU.
+ * If the session can't be activated due to that the MMU is busy and
+ * a callback pointer is given, the callback will be called once the MMU becomes idle.
+ * If the same callback pointer is registered multiple time it will only be called once.
+ * Nested activations are supported.
+ * Each call must be matched by a call to @see mali_memory_core_mmu_release_address_space_reference
+ *
+ * @param mmu The MMU to activate the address space on
+ * @param mali_session_data The user session object which address space to activate
+ * @param callback Pointer to the function to call when the MMU becomes idle
+ * @param callback_arg Argument given to the callback
+ * @return 0 if the address space was activated, -EBUSY if the MMU was busy, -EFAULT in all other cases.
+ */
+int mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument);
+
+/**
+ * Release a reference to the current active address space.
+ * Once the last reference is released any callback(s) registered will be called before the function returns
+ *
+ * @note Caution must be shown calling this function with locks held due to that callback can be called
+ * @param mmu The mmu to release a reference to the active address space of
+ */
+void mali_memory_core_mmu_release_address_space_reference(void* mmu);
+
+/**
+ * Soft reset of MMU - needed after power up
+ *
+ * @param mmu_ptr The MMU pointer registered with the relevant core
+ */
+void mali_kernel_mmu_reset(void * mmu_ptr);
+
+void mali_kernel_mmu_force_bus_reset(void * mmu_ptr);
+
+/**
+ * Unregister a previously registered callback.
+ * @param mmu The MMU to unregister the callback on
+ * @param callback The function to unregister
+ */
+void mali_memory_core_mmu_unregister_callback(void* mmu, void(*callback)(void*));
+
+
+
+#endif /* __MALI_KERNEL_MEM_MMU_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c
new file mode 100644
index 00000000000..845de935ec9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, 106);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ const int allocation_order = 6; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+ void *virt;
+ const u32 pages_to_allocate = 1 << allocation_order;
+ const u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if ( (info->num_pages_allocated + pages_to_allocate > info->num_pages_max) && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if ( NULL == virt )
+ {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += pages_to_allocate;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h
new file mode 100644
index 00000000000..ff49f8a816a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c
new file mode 100644
index 00000000000..3b4ace05f01
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Non-fatal OOM, have to cleanup, stopped at offset %d for size %d\n", offset, descriptor->size));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Determine whether we need to remove this from a tracking list */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ engine->mali_address->release(descriptor);
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h
new file mode 100644
index 00000000000..80a2b4bb3a7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than commiting a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function must allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h
new file mode 100644
index 00000000000..ff1e153678c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PP_H__
+#define __MALI_KERNEL_PP_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_mali200;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only );
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_PP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c
new file mode 100644
index 00000000000..07bb894b1b4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+typedef struct mali_profiling_entry
+{
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static u32 profile_entry_count = 0;
+static _mali_osk_atomic_t profile_insert_index;
+static _mali_osk_atomic_t profile_entries_written;
+
+
+_mali_osk_errcode_t _mali_profiling_init(void)
+{
+ profile_entries = NULL;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+
+ lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_profiling_term(void)
+{
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock)
+ {
+ _mali_osk_lock_term(lock);
+ lock = NULL;
+ }
+}
+
+inline _mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
+ {
+ /*
+ * Not in recording mode, or buffer is full
+ * Decrement index again, and early out
+ */
+ _mali_osk_atomic_dec(&profile_insert_index);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ _mali_osk_atomic_inc(&profile_entries_written);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ _mali_osk_errcode_t ret;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_IDLE)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (args->limit > MALI_PROFILING_MAX_BUFFER_ENTRIES)
+ {
+ args->limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_entries = _mali_osk_malloc(args->limit * sizeof(mali_profiling_entry));
+ profile_entry_count = args->limit;
+ if (NULL == profile_entries)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ ret = _mali_timestamp_reset();
+
+ if (ret == _MALI_OSK_ERR_OK)
+ {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ }
+ else
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ return _mali_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ args->count = _mali_osk_atomic_read(&profile_insert_index);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ u32 index = args->index;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= _mali_osk_atomic_read(&profile_entries_written))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ args->timestamp = profile_entries[index].timestamp;
+ args->event_id = profile_entries[index].event_id;
+ args->data[0] = profile_entries[index].data[0];
+ args->data[1] = profile_entries[index].data[1];
+ args->data[2] = profile_entries[index].data[2];
+ args->data[3] = profile_entries[index].data[3];
+ args->data[4] = profile_entries[index].data[4];
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+ if (NULL != profile_entries)
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h
new file mode 100644
index 00000000000..5d3aa6eb841
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PROFILING_H__
+#define __MALI_KERNEL_PROFILING_H__
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+
+#include <../../../include/cinstr/mali_cinstr_profiling_events_m200.h>
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_init(void);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_profiling_term(void);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 - First data parameter, depending on event_id specified.
+ * @param data1 - Second data parameter, depending on event_id specified.
+ * @param data2 - Third data parameter, depending on event_id specified.
+ * @param data3 - Fourth data parameter, depending on event_id specified.
+ * @param data4 - Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+#endif /* MALI_TIMELINE_PROFILING_ENABLED */
+
+#endif /* __MALI_KERNEL_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c
new file mode 100644
index 00000000000..1db8f3ad5ab
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c
@@ -0,0 +1,2032 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk_list.h"
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h"
+#endif /* USING_MMU */
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif /* USING_MALI400_L2_CACHE */
+
+#define HANG_CHECK_MSECS_MIN 100
+#define HANG_CHECK_MSECS_MAX 2000 /* 2 secs */
+#define HANG_CHECK_MSECS_DEFAULT 500 /* 500 ms */
+
+#define WATCHDOG_MSECS_MIN (2*HANG_CHECK_MSECS_MIN)
+#define WATCHDOG_MSECS_MAX 3600000 /* 1 hour */
+#define WATCHDOG_MSECS_DEFAULT 900000 /* 15 mins */
+
+/* max value that will be converted from jiffies to msecs and written to job->render_time_msecs */
+#define JOB_MAX_JIFFIES 100000
+
+int mali_hang_check_interval = HANG_CHECK_MSECS_DEFAULT;
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+
+/* Subsystem entrypoints: */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id);
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+
+
+static void mali_core_subsystem_cleanup_all_renderunits(struct mali_core_subsystem* subsys);
+static void mali_core_subsystem_move_core_set_idle(struct mali_core_renderunit *core);
+
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem);
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session);
+
+static void find_and_abort(mali_core_session* session, u32 abort_id);
+
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core);
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub);
+#endif
+static void mali_core_subsystem_schedule(mali_core_subsystem*subsystem);
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status);
+
+static void mali_core_renderunit_irq_handler_remove(struct mali_core_renderunit *core);
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data);
+static void mali_core_irq_handler_bottom_half ( void *data );
+
+#if USING_MMU
+static void lock_subsystem(struct mali_core_subsystem * subsys);
+static void unlock_subsystem(struct mali_core_subsystem * subsys);
+#endif
+
+
+/**
+ * This will be one of the subsystems in the array of subsystems:
+ * static struct mali_kernel_subsystem * subsystems[];
+ * found in file: mali_kernel_core.c
+ *
+ * This subsystem is necessary for operations common to all rendercore
+ * subsystems. For example, mali_subsystem_mali200 and mali_subsystem_gp2 may
+ * share a mutex when RENDERCORES_USE_GLOBAL_MUTEX is non-zero.
+ */
+struct mali_kernel_subsystem mali_subsystem_rendercore=
+{
+ rendercore_subsystem_startup, /* startup */
+ rendercore_subsystem_terminate, /* shutdown */
+ NULL, /* load_complete */
+ NULL, /* system_info_fill */
+ NULL, /* session_begin */
+ NULL, /* session_end */
+#if USING_MMU
+ rendercore_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+} ;
+
+static _mali_osk_lock_t *rendercores_global_mutex = NULL;
+static u32 rendercores_global_mutex_is_held = 0;
+static u32 rendercores_global_mutex_owner = 0;
+
+/** The 'dummy' rendercore subsystem to allow global subsystem mutex to be
+ * locked for all subsystems that extend the ''rendercore'' */
+static mali_core_subsystem rendercore_dummy_subsystem = {0,};
+
+/*
+ * Rendercore Subsystem functions.
+ *
+ * These are exposed by mali_subsystem_rendercore
+ */
+
+/**
+ * @brief Initialize the Rendercore subsystem.
+ *
+ * This must be called before any other subsystem that extends the
+ * ''rendercore'' may be initialized. For example, this must be called before
+ * the following functions:
+ * - mali200_subsystem_startup(), from mali_subsystem_mali200
+ * - maligp_subsystem_startup(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_init(). They
+ * are related, in that mali_core_subsystem_init() may use the structures
+ * initialized by rendercore_subsystem_startup()
+ */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ rendercores_global_mutex_is_held = 0;
+ rendercores_global_mutex = _mali_osk_lock_init(
+ (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED),
+ 0, 129);
+
+ if (NULL == rendercores_global_mutex)
+ {
+ MALI_PRINT_ERROR(("Failed: _mali_osk_lock_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ rendercore_dummy_subsystem.name = "Rendercore Global Subsystem"; /* On the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = SUBSYSTEM_MAGIC_NR; /* To please the Subsystem Mutex code */
+
+#if MALI_GPU_UTILIZATION
+ if (mali_utilization_init() != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_lock_term(rendercores_global_mutex);
+ rendercores_global_mutex = NULL;
+ MALI_PRINT_ERROR(("Failed: mali_utilization_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if (_mali_profiling_init() != _MALI_OSK_ERR_OK)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Rendercore: Failed to initialize profiling, feature will be unavailable\n")) ;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex initialized\n")) ;
+ MALI_SUCCESS;
+}
+
+/**
+ * @brief Terminate the Rendercore subsystem.
+ *
+ * This must only be called \b after any other subsystem that extends the
+ * ''rendercore'' has been terminated. For example, this must be called \b after
+ * the following functions:
+ * - mali200_subsystem_terminate(), from mali_subsystem_mali200
+ * - maligp_subsystem_terminate(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_cleanup(), though,
+ * the subsystems that extend ''rendercore'' must still call
+ * mali_core_subsystem_cleanup() when they terminate.
+ */
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ /* Catch double-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_term();
+#endif
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_term();
+#endif
+
+ rendercore_dummy_subsystem.name = NULL; /* The original string was on the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = 0;
+
+ /* ASSERT that no-one's holding this */
+ MALI_DEBUG_PRINT_ASSERT( 0 == rendercores_global_mutex_is_held,
+ ("Rendercores' Global Mutex was held at termination time. Have the subsystems that extend ''rendercore'' been terminated?\n") );
+
+ _mali_osk_lock_term( rendercores_global_mutex );
+ rendercores_global_mutex = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex terminated\n")) ;
+}
+
+
+#if USING_MMU
+/**
+ * @brief Handle certain Rendercore subsystem broadcast notifications
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is non-zero, this handles the following messages:
+ * - MMU_KILL_STEP0_LOCK_SUBSYSTEM
+ * - MMU_KILL_STEP4_UNLOCK_SUBSYSTEM
+ *
+ * The purpose is to manage the Rendercode Global Mutex, which cannot be
+ * managed by any system that extends the ''rendercore''.
+ *
+ * All other messages must be handled by mali_core_subsystem_broadcast_notification()
+ *
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is 0, this function does nothing.
+ * Instead, the subsystem that extends the ''rendercore' \b must handle its
+ * own mutexes - refer to mali_core_subsystem_broadcast_notification().
+ *
+ * Used currently only for signalling when MMU has a pagefault
+ */
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ lock_subsystem( &rendercore_dummy_subsystem );
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ unlock_subsystem( &rendercore_dummy_subsystem );
+ break;
+
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+
+}
+#endif
+
+/*
+ * Functions inherited by the subsystems that extend the ''rendercore''.
+ */
+
+u32 mali_core_renderunit_register_read(mali_core_renderunit *core, u32 relative_address)
+{
+ u32 read_val;
+
+ #if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during read: Core:%s Addr:0x%04X\n",
+ core->description,relative_address));
+ return 0xDEADBEEF;
+ }
+ #endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) return 0;
+
+ if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to read from illegal register: 0x%04x in core: %s\n",
+ relative_address, core->description));
+ return 0xDEADBEEF;
+ }
+
+ read_val = _mali_osk_mem_ioread32(core->registers_mapped, relative_address);
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, read_val));
+
+ return read_val;
+}
+
+void mali_core_renderunit_register_read_array(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * result_array,
+ u32 nr_of_regs
+ )
+{
+ /* NOTE Do not use burst reads against the registers */
+
+ u32 i;
+
+ for(i=0; i<nr_of_regs; ++i)
+ {
+ result_array[i] = mali_core_renderunit_register_read(core, relative_address + i*4);
+ }
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+}
+
+void mali_core_renderunit_register_write(mali_core_renderunit *core, u32 relative_address, u32 new_val)
+{
+ #if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+ return;
+ }
+ #endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) return;
+
+ if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to write to illegal register: 0x%04x in core: %s",
+ relative_address, core->description));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("mali_core_renderunit_register_write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(core->registers_mapped, relative_address, new_val);
+}
+
+
+void mali_core_renderunit_register_write_array(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * source_array,
+ u32 nr_of_regs)
+{
+
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_write_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+
+ for( i = 0; i< nr_of_regs; i++)
+ {
+ mali_core_renderunit_register_write(core, relative_address + i*4, source_array[i]);
+ }
+}
+
+void mali_core_renderunit_timeout_function_hang_detection(void *arg)
+{
+ mali_bool action = MALI_FALSE;
+ mali_core_renderunit * core;
+
+ core = (mali_core_renderunit *) arg;
+ if( !core ) return;
+
+ /* if NOT idle OR has TIMED_OUT */
+ if ( !((CORE_WATCHDOG_TIMEOUT == core->state ) || (CORE_IDLE== core->state)) )
+ {
+ core->state = CORE_HANG_CHECK_TIMEOUT;
+ action = MALI_TRUE;
+ }
+
+ if(action) _mali_osk_irq_schedulework(core->irq);
+}
+
+
+void mali_core_renderunit_timeout_function(void *arg)
+{
+ mali_core_renderunit * core;
+ mali_bool is_watchdog;
+
+ core = (mali_core_renderunit *)arg;
+ if( !core ) return;
+
+ is_watchdog = MALI_TRUE;
+ if (mali_benchmark)
+ {
+ /* poll based core */
+ mali_core_job *job;
+ job = core->current_job;
+ if ( (NULL != job) &&
+ (0 != _mali_osk_time_after(job->watchdog_jiffies,_mali_osk_time_tickcount()))
+ )
+ {
+ core->state = CORE_POLL;
+ is_watchdog = MALI_FALSE;
+ }
+ }
+
+ if (is_watchdog)
+ {
+ MALI_DEBUG_PRINT(3, ("SW-Watchdog timeout: Core:%s\n", core->description));
+ core->state = CORE_WATCHDOG_TIMEOUT;
+ }
+
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_init(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_init: Core:%s\n", core->description));
+
+ _MALI_OSK_INIT_LIST_HEAD(&core->list) ;
+ core->timer = _mali_osk_timer_init();
+ if (NULL == core->timer)
+ {
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer, mali_core_renderunit_timeout_function, (void *)core);
+
+ core->timer_hang_detection = _mali_osk_timer_init();
+ if (NULL == core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init hang detection timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer_hang_detection, mali_core_renderunit_timeout_function_hang_detection, (void *)core);
+
+#if USING_MALI_PMM
+ /* Init no pending power downs */
+ core->pend_power_down = MALI_FALSE;
+
+ /* Register the core with the PMM - which powers it up */
+ if (_MALI_OSK_ERR_OK != malipmm_core_register( core->pmm_id ))
+ {
+ _mali_osk_timer_term(core->timer);
+ _mali_osk_timer_term(core->timer_hang_detection);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot register with PMM\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif /* USING_MALI_PMM */
+
+ core->error_recovery = MALI_FALSE;
+ core->in_detach_function = MALI_FALSE;
+ core->state = CORE_IDLE;
+ core->current_job = NULL;
+ core->magic_nr = CORE_MAGIC_NR;
+#if USING_MMU
+ core->mmu = NULL;
+#endif /* USING_MMU */
+
+ MALI_SUCCESS;
+}
+
+void mali_core_renderunit_term(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_term: Core:%s\n", core->description));
+
+ if (NULL != core->timer)
+ {
+ _mali_osk_timer_term(core->timer);
+ core->timer = NULL;
+ }
+ if (NULL != core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer_hang_detection);
+ core->timer_hang_detection = NULL;
+ }
+
+#if USING_MALI_PMM
+ /* Unregister the core with the PMM */
+ malipmm_core_unregister( core->pmm_id );
+#endif
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_map_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_map_registers: Core:%s\n", core->description)) ;
+ if( (0 == core->registers_base_addr) ||
+ (0 == core->size) ||
+ (NULL == core->description)
+ )
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure %u %u 0x%x;\n", core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(core->registers_base_addr, core->size, core->description))
+ {
+ MALI_PRINT_ERROR(("Could not request register region (0x%08X - 0x%08X) to core: %s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: request_mem_region: (0x%08X - 0x%08X) Core:%s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ }
+
+ core->registers_mapped = _mali_osk_mem_mapioregion( core->registers_base_addr, core->size, core->description );
+
+ if ( 0 == core->registers_mapped )
+ {
+ MALI_PRINT_ERROR(("Could not ioremap registers for %s .\n", core->description));
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) Core:%s\n",
+ (u32) core->registers_mapped,
+ ((u32)core->registers_mapped)+ core->size - 1,
+ core->description));
+ }
+
+ MALI_DEBUG_PRINT(4, ("Success: Mapping registers to core: %s\n",core->description));
+
+ MALI_SUCCESS;
+}
+
+/* Used by external renderunit_create<> function + other places */
+void mali_core_renderunit_unmap_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_unmap_registers: Core:%s\n", core->description));
+ if (0 == core->registers_mapped)
+ {
+ MALI_PRINT_ERROR(("Trying to unmap register-mapping with NULL from core: %s\n", core->description));
+ return;
+ }
+ _mali_osk_mem_unmapioregion(core->registers_base_addr, core->size, core->registers_mapped);
+ core->registers_mapped = 0;
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+}
+
+static void mali_core_renderunit_irq_handler_remove(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_irq_handler_remove: Core:%s\n", core->description));
+ _mali_osk_irq_term(core->irq);
+}
+
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr)
+{
+ mali_core_renderunit * core;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ if (subsys->number_of_cores <= mali_core_nr)
+ {
+ MALI_PRINT_ERROR(("Trying to get illegal mali_core_nr: 0x%x for %s", mali_core_nr, subsys->name));
+ return NULL;
+ }
+ core = (subsys->mali_core_array)[mali_core_nr];
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_get_mali_core_nr: Core:%s\n", core->description));
+ MALI_CHECK_CORE(core);
+ return core;
+}
+
+/* Is used by external function:
+ subsystem_startup<> */
+_mali_osk_errcode_t mali_core_subsystem_init(mali_core_subsystem* new_subsys)
+{
+ int i;
+
+ /* These function pointers must have been set on before calling this function */
+ if (
+ ( NULL == new_subsys->name ) ||
+ ( NULL == new_subsys->start_job ) ||
+ ( NULL == new_subsys->irq_handler_upper_half ) ||
+ ( NULL == new_subsys->irq_handler_bottom_half ) ||
+ ( NULL == new_subsys->get_new_job_from_user ) ||
+ ( NULL == new_subsys->return_job_to_user )
+ )
+ {
+ MALI_PRINT_ERROR(("Missing functions in subsystem."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_init: %s\n", new_subsys->name)) ;
+
+ /* Catch use-before-initialize/use-after-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+ new_subsys->magic_nr = SUBSYSTEM_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_idle_head); /* Idle cores of this type */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_off_head); /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduleing */
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->awaiting_sessions_head[i]);
+ }
+
+ /* Linked list of all sessions connected to this coretype */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->all_sessions_head);
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core ) break;
+ core->mmu = mali_memory_core_mmu_lookup(core->mmu_id);
+ MALI_DEBUG_PRINT(2, ("Attach mmu: 0x%x to core: %s in subsystem: %s\n", core->mmu, core->description, subsys->name));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+#endif
+
+/* This will register an IRQ handler, and add the core to the list of available cores for this subsystem. */
+_mali_osk_errcode_t mali_core_subsystem_register_renderunit(mali_core_subsystem* subsys, mali_core_renderunit * core)
+{
+ mali_core_renderunit ** mali_core_array;
+ u32 previous_nr;
+ u32 previous_size;
+ u32 new_nr;
+ u32 new_size;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* If any of these are 0 there is an error */
+ if(0 == core->subsystem ||
+ 0 == core->registers_base_addr ||
+ 0 == core->size ||
+ 0 == core->description)
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure 0x%x 0x%x 0x%x;\n",
+ core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Core: subsystem_register_renderunit: %s\n", core->description));
+
+ MALI_CHECK_NON_NULL(
+ core->irq = _mali_osk_irq_init(
+ core->irq_nr,
+ mali_core_irq_handler_upper_half,
+ mali_core_irq_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)subsys->probe_core_irq_trigger,
+ (_mali_osk_irq_ack_t)subsys->probe_core_irq_acknowledge,
+ core,
+ "mali_core_irq_handlers"
+ ),
+ _MALI_OSK_ERR_FAULT
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* Update which core number this is */
+ core->core_number = subsys->number_of_cores;
+
+ /* Update the array of cores in the subsystem. */
+ previous_nr = subsys->number_of_cores;
+ previous_size = sizeof(mali_core_renderunit*)*previous_nr;
+ new_nr = previous_nr + 1;
+ new_size = sizeof(mali_core_renderunit*)*new_nr;
+
+ if (0 != previous_nr)
+ {
+ if (NULL == subsys->mali_core_array)
+ {
+ MALI_PRINT_ERROR(("Internal error"));
+ goto exit_function;
+ }
+
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ _mali_osk_memcpy(mali_core_array, subsys->mali_core_array, previous_size);
+ _mali_osk_free( subsys->mali_core_array);
+ MALI_DEBUG_PRINT(5, ("Success: adding a new core to subsystem array %s\n", core->description) ) ;
+ }
+ else
+ {
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ MALI_DEBUG_PRINT(6, ("Success: adding first core to subsystem array %s\n", core->description) ) ;
+ }
+ subsys->mali_core_array = mali_core_array;
+ mali_core_array[previous_nr] = core;
+
+ /* Add the core to the list of available cores on the system */
+ _mali_osk_list_add(&(core->list), &(subsys->renderunit_idle_head));
+
+ /* Update total number of cores */
+ subsys->number_of_cores = new_nr;
+ MALI_DEBUG_PRINT(6, ("Success: mali_core_subsystem_register_renderunit %s\n", core->description));
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_SUCCESS;
+
+exit_function:
+ mali_core_renderunit_irq_handler_remove(core);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/**
+ * Called by the core when a system info update is needed
+ * We fill in info about all the core types available
+ * @param subsys Pointer to the core's @a mali_core_subsystem data structure
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t error code on failure
+ */
+_mali_osk_errcode_t mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info)
+{
+ u32 i;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; /* OK if no cores to update info for */
+ mali_core_renderunit * core;
+ _mali_core_info **core_info_nextp;
+ _mali_core_info * cinfo;
+
+ MALI_DEBUG_PRINT(4, ("mali_core_subsystem_system_info_fill: %s\n", subsys->name) ) ;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ core_info_nextp = &(info->core_info);
+ cinfo = info->core_info;
+
+ while(NULL!=cinfo)
+ {
+ core_info_nextp = &(cinfo->next);
+ cinfo = cinfo->next;
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core )
+ {
+ err = _MALI_OSK_ERR_FAULT;
+ goto early_exit;
+ }
+ cinfo = (_mali_core_info *)_mali_osk_calloc(1, sizeof(_mali_core_info));
+ if ( NULL==cinfo )
+ {
+ err = _MALI_OSK_ERR_NOMEM;
+ goto early_exit;
+ }
+ cinfo->version = core->core_version;
+ cinfo->type =subsys->core_type;
+ cinfo->reg_address = core->registers_base_addr;
+ cinfo->core_nr = i;
+ cinfo->next = NULL;
+ /* Writing this address to the previous' *(&next) ptr */
+ *core_info_nextp = cinfo;
+ /* Setting the next_ptr to point to &this->next_ptr */
+ core_info_nextp = &(cinfo->next);
+ }
+early_exit:
+ if ( _MALI_OSK_ERR_OK != err) MALI_PRINT_ERROR(("Error: In mali_core_subsystem_system_info_fill %d\n", err));
+ MALI_DEBUG_CODE(
+ cinfo = info->core_info;
+
+ MALI_DEBUG_PRINT(3, ("Current list of cores\n"));
+ while( NULL != cinfo )
+ {
+ MALI_DEBUG_PRINT(3, ("Type: 0x%x\n", cinfo->type));
+ MALI_DEBUG_PRINT(3, ("Version: 0x%x\n", cinfo->version));
+ MALI_DEBUG_PRINT(3, ("Reg_addr: 0x%x\n", cinfo->reg_address));
+ MALI_DEBUG_PRINT(3, ("Core_nr: 0x%x\n", cinfo->core_nr));
+ MALI_DEBUG_PRINT(3, ("Flags: 0x%x\n", cinfo->flags));
+ MALI_DEBUG_PRINT(3, ("*****\n"));
+ cinfo = cinfo->next;
+ }
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/* Is used by external function:
+ subsystem_terminate<> */
+void mali_core_subsystem_cleanup(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_cleanup: %s\n", subsys->name )) ;
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+#if USING_MMU
+ if (NULL != core->mmu)
+ {
+ /* the MMU is attached in the load_complete callback, which will never be called if the module fails to load, handle that case */
+ mali_memory_core_mmu_unregister_callback(core->mmu, mali_core_subsystem_callback_schedule_wrapper);
+ }
+#endif
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ mali_core_renderunit_irq_handler_remove(core);
+
+ /* When a process terminates, all cores running jobs from that process is reset and put to idle.
+ That means that when the module is unloading (this code) we are guaranteed that all cores are idle.
+ However: if something (we can't think of) is really wrong, a core may give an interrupt during this
+ unloading, and we may now in the code have a bottom-half-processing pending from the interrupts
+ we deregistered above. To be sure that the bottom halves do not access the structures after they
+ are deallocated we flush the bottom-halves processing here, before the deallocation. */
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+#if USING_MALI_PMM
+ /* Only reset when we are using PMM and the core is not off */
+#if MALI_PMM_NO_PMU
+ /* We need to reset when there is no PMU - but this will
+ * cause the register read/write functions to report an
+ * error (hence the if to check for CORE_OFF below) we
+ * change state to allow the reset to happen.
+ */
+ core->state = CORE_IDLE;
+#endif
+ if( core->state != CORE_OFF )
+ {
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+ }
+#else
+ /* Always reset the core */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+#endif
+
+ mali_core_renderunit_unmap_registers(core);
+
+ _mali_osk_list_delinit(&core->list);
+
+ mali_core_renderunit_term(core);
+
+ subsys->renderunit_delete(core);
+ }
+
+ mali_core_subsystem_cleanup_all_renderunits(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT(6, ("SUCCESS: mali_core_subsystem_cleanup: %s\n", subsys->name )) ;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL != number_of_cores )
+ {
+ *number_of_cores = subsystem->number_of_cores;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_number_of_cores_get: %s: %u\n", subsystem->name, *number_of_cores) ) ;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ err = subsystem->get_new_job_from_user(session, job_data);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_ERROR(err);
+}
+
+
+/* We return the version number to the first core in this subsystem */
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit * core0;
+ u32 nr_return;
+
+ subsystem = session->subsystem;
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+
+ core0 = mali_core_renderunit_get_mali_core_nr(subsystem, 0);
+
+ if( NULL == core0 )
+ {
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ nr_return = core0->core_version;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_core_version_get: %s: %u\n", subsystem->name, nr_return )) ;
+
+ *version = nr_return;
+
+ MALI_SUCCESS;
+}
+
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id)
+{
+ find_and_abort(session, id);
+}
+
+static mali_bool job_should_be_aborted(mali_core_job *job, u32 abort_id)
+{
+ if ( job->abort_id == abort_id ) return MALI_TRUE;
+ else return MALI_FALSE;
+}
+
+static void find_and_abort(mali_core_session* session, u32 abort_id)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp;
+ mali_core_job *job;
+
+ subsystem = session->subsystem;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ job = session->job_waiting_to_run;
+ if ( (job!=NULL) && job_should_be_aborted (job, abort_id) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, from the waiting_to_run slot.\n", subsystem->name, abort_id ));
+ session->job_waiting_to_run = NULL;
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ subsystem->return_job_to_user( job , JOB_STATUS_END_ABORT);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY( core, tmp, &session->renderunits_working_head, mali_core_renderunit, list )
+ {
+ job = core->current_job;
+ if ( (job!=NULL) && (job_should_be_aborted (job, abort_id) ) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, which is currently running on mali.\n", subsystem->name, abort_id ));
+ if ( core->state==CORE_IDLE )
+ {
+ MALI_PRINT_ERROR(("Aborting core with running job which is idle. Must be something very wrong."));
+ goto end_bug;
+ }
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_ABORT);
+ }
+ }
+end_bug:
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void *argument)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ if ( NULL != subsystem->suspend_response)
+ {
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE start\n"));
+ err = subsystem->suspend_response(session, argument);
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE end\n"));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ return err;
+}
+
+
+/* Is used by internal function:
+ mali_core_subsystem_cleanup<>s */
+/* All cores should be removed before calling this function
+Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_cleanup_all_renderunits(mali_core_subsystem* subsys)
+{
+ int i;
+ _mali_osk_free(subsys->mali_core_array);
+ subsys->number_of_cores = 0;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_cleanup_all_renderunits: %s\n", subsys->name) ) ;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_idle_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_idle should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_idle_head)) ;
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_off_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_off should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_off_head)) ;
+ }
+
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ if ( ! _mali_osk_list_empty(&(subsys->awaiting_sessions_head[i])))
+ {
+ MALI_PRINT_ERROR(("List awaiting_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->awaiting_sessions_head[i])) ;
+ subsys->awaiting_sessions_sum_all_priorities = 0;
+ }
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->all_sessions_head)))
+ {
+ MALI_PRINT_ERROR(("List all_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->all_sessions_head)) ;
+ }
+}
+
+/* Is used by internal functions:
+ mali_core_irq_handler_bottom_half<>;
+ mali_core_subsystem_schedule<>; */
+/* Will release the core.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_idle(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+#if USING_MALI_PMM
+ mali_core_status oldstatus;
+#endif
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ _mali_osk_timer_del(core->timer);
+ _mali_osk_timer_del(core->timer_hang_detection);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_idle: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+
+#if USING_MALI_PMM
+
+ oldstatus = core->state;
+
+ if( core->pend_power_down )
+ {
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+ /* Done the move from the active queues, so the pending power down can be done */
+ core->pend_power_down = MALI_FALSE;
+ malipmm_core_power_down_okay( core->pmm_id );
+ }
+ else
+ {
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+ }
+
+ if( CORE_OFF != oldstatus )
+ {
+ /* Message that this core is now idle or in fact off */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_FINISHED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+#if USING_MMU
+ /* Only free the reference when entering idle state from
+ * anything other than power off
+ */
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif /* USING_MMU */
+ }
+
+
+#else /* !USING_MALI_PMM */
+
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+
+#if USING_MMU
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif
+
+#endif /* USING_MALI_PMM */
+}
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_set_working(mali_core_renderunit *core, mali_core_job *job)
+{
+ mali_core_subsystem *subsystem;
+ mali_core_session *session;
+
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_set_working: %s\n", core->description) ) ;
+
+ core->current_job = job ;
+ core->state = CORE_WORKING ;
+ job->start_time_jiffies = _mali_osk_time_tickcount();
+ _mali_osk_list_move( &core->list, &session->renderunits_working_head );
+
+}
+
+#if USING_MALI_PMM
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_off(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ /* Cores must be idle before powering off */
+ MALI_DEBUG_ASSERT(core->state == CORE_IDLE);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_off: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+}
+
+#endif /* USING_MALI_PMM */
+
+/* Is used by internal function:
+ mali_core_subsystem_schedule<>; */
+/* Returns the job with the highest priority for the subsystem. NULL if none*/
+/* Must hold subsystem_mutex before entering this function */
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem)
+{
+ int i;
+
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities )
+ {
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_get_waiting_job: No awaiting session found\n"));
+ return NULL;
+ }
+
+ for( i=0; i<PRIORITY_LEVELS ; ++i)
+ {
+ if (!_mali_osk_list_empty(&subsystem->awaiting_sessions_head[i]))
+ {
+ return _MALI_OSK_LIST_ENTRY(subsystem->awaiting_sessions_head[i].next, mali_core_session, awaiting_sessions_list);
+ }
+ }
+
+ return NULL;
+}
+
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session)
+{
+ mali_core_job *job;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ _mali_osk_list_delinit(&session->awaiting_sessions_list);
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ job = session->job_waiting_to_run;
+ session->job_waiting_to_run = NULL;
+ MALI_CHECK_JOB(job);
+ return job;
+}
+
+/* Is used by internal functions:
+ mali_core_subsystem_schedule<> */
+/* This will start the job on the core. It will also release the core if it did not start.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core)
+{
+ mali_core_session *session;
+ mali_core_subsystem *subsystem;
+ _mali_osk_errcode_t err;
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_CHECK_SESSION(session);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: job_start_on_core: job=0x%x, session=0x%x, core=%s\n", job, session, core->description));
+
+ MALI_DEBUG_ASSERT(NULL == core->current_job) ;
+ MALI_DEBUG_ASSERT(CORE_IDLE == core->state );
+
+ mali_core_subsystem_move_set_working(core, job);
+
+#if defined USING_MALI400_L2_CACHE
+ /* Invalidate the L2 cache */
+ if (_MALI_OSK_ERR_OK != mali_kernel_l2_cache_invalidate_all() )
+ {
+ MALI_DEBUG_PRINT(4, ("Core: Clear of L2 failed, return job. System may not be usable for some reason.\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_SYSTEM_UNUSABLE );
+ return;
+ }
+#endif
+
+ /* Tries to start job on the core. Returns MALI_FALSE if the job could not be started */
+ err = subsystem->start_job(job, core);
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_core_start();
+#endif
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ /* This will happen only if there is something in the job object
+ which make it inpossible to start. Like if it require illegal memory.*/
+ MALI_DEBUG_PRINT(4, ("Core: start_job failed, return job and putting core back into idle list\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_ILLEGAL_JOB );
+ }
+ else
+ {
+ u32 delay = _mali_osk_time_mstoticks(job->watchdog_msecs)+1;
+ job->watchdog_jiffies = _mali_osk_time_tickcount() + delay;
+ if (mali_benchmark)
+ {
+ _mali_osk_timer_add(core->timer, 1);
+ }
+ else
+ {
+ _mali_osk_timer_add(core->timer, delay);
+ }
+ }
+}
+
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub)
+{
+ mali_core_subsystem * subsystem;
+ subsystem = (mali_core_subsystem *)sub;
+ MALI_DEBUG_PRINT(3, ("MMU: Is schedulling subsystem: %s\n", subsystem->name));
+ mali_core_subsystem_schedule(subsystem);
+}
+#endif
+
+/* Is used by internal function:
+ mali_core_irq_handler_bottom_half
+ mali_core_session_add_job
+*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_schedule(mali_core_subsystem * subsystem)
+{
+ mali_core_renderunit *core, *tmp;
+ mali_core_session *session;
+ mali_core_job *job;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_schedule: %s\n", subsystem->name )) ;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ /* First check that there are sessions with jobs waiting to run */
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: No jobs available for %s\n", subsystem->name) ) ;
+ return;
+ }
+
+ /* Returns the session with the highest priority job for the subsystem. NULL if none*/
+ session = mali_core_subsystem_get_waiting_session(subsystem);
+
+ if (NULL == session)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: No runnable job found\n"));
+ return;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+#if USING_MMU
+ int err = mali_memory_core_mmu_activate_page_table(core->mmu, session->mmu_session, mali_core_subsystem_callback_schedule_wrapper, subsystem);
+ if (0 == err)
+ {
+ /* core points to a core where the MMU page table activation succeeded */
+#endif
+ /* This will remove the job from queue system */
+ job = mali_core_subsystem_release_session_get_job(subsystem, session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Got a job 0x%x\n", job));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there is a job scheduled to run
+ * NOTE: mali_core_job_start_on_core() can fail to start
+ * the job for several reasons, but it will move the core
+ * back to idle which will create the FINISHED message
+ * so we can still say that the job is SCHEDULED
+ */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_SCHEDULED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif
+ /* This will {remove core from freelist AND start the job on the core}*/
+ mali_core_job_start_on_core(job, core);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Job started, done\n"));
+ return;
+#if USING_MMU
+ }
+#endif
+ }
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Could not activate MMU. Scheduelling postponed to MMU, checking next.\n"));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there are jobs to run */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_QUEUED,
+ 0 };
+ if( subsystem->core_type == _MALI_GP2 || subsystem->core_type == _MALI_400_GP )
+ {
+ event.data = MALI_PMM_CORE_GP;
+ }
+ else
+ {
+ /* Check the PP is supported by the PMM */
+ MALI_DEBUG_ASSERT( subsystem->core_type == _MALI_200 || subsystem->core_type == _MALI_400_PP );
+ /* We state that all PP cores are scheduled to inform the PMM
+ * that it may need to power something up!
+ */
+ event.data = MALI_PMM_CORE_PP_ALL;
+ }
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif /* USING_MALI_PMM */
+
+}
+
+/* Is used by external function:
+ session_begin<> */
+void mali_core_session_begin(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL == subsystem )
+ {
+ MALI_PRINT_ERROR(("Missing data in struct\n"));
+ return;
+ }
+ MALI_DEBUG_PRINT(2, ("Core: session_begin: for %s\n", session->subsystem->name )) ;
+
+ session->magic_nr = SESSION_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head);
+
+ session->job_waiting_to_run = NULL;
+ _MALI_OSK_INIT_LIST_HEAD(&session->awaiting_sessions_list);
+ _MALI_OSK_INIT_LIST_HEAD(&session->all_sessions_list);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ _mali_osk_list_add(&session->all_sessions_list, &session->subsystem->all_sessions_head);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_begin: for %s DONE\n", session->subsystem->name) ) ;
+}
+
+#if USING_MMU
+static void mali_core_renderunit_stop_bus(mali_core_renderunit* core)
+{
+ core->subsystem->stop_bus(core);
+}
+#endif
+
+void mali_core_session_close(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+
+ subsystem = session->subsystem;
+ MALI_DEBUG_ASSERT_POINTER(subsystem);
+
+ MALI_DEBUG_PRINT(2, ("Core: session_close: for %s\n", session->subsystem->name) ) ;
+
+ /* We must grab subsystem mutex since the list this session belongs to
+ is owned by the subsystem */
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ /* Return the potensial waiting job to user */
+ if ( session->job_waiting_to_run )
+ {
+ subsystem->return_job_to_user( session->job_waiting_to_run, JOB_STATUS_END_SHUTDOWN );
+ session->job_waiting_to_run = NULL;
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ }
+
+ /* Kill active cores working for this session - freeing their jobs
+ Since the handling of one core also could stop jobs from another core, there is a while loop */
+ while ( ! _mali_osk_list_empty(&session->renderunits_working_head) )
+ {
+ core = _MALI_OSK_LIST_ENTRY(session->renderunits_working_head.next, mali_core_renderunit, list);
+ MALI_DEBUG_PRINT(3, ("Core: session_close: Core was working: %s\n", core->description )) ;
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_SHUTDOWN );
+ break;
+ }
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head); /* Not necessary - we will _mali_osk_free session*/
+
+ /* Remove this session from the global sessionlist */
+ _mali_osk_list_delinit(&session->all_sessions_list);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_close: for %s FINISHED\n", session->subsystem->name )) ;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+/* Must hold subsystem_mutex before entering this function */
+_mali_osk_errcode_t mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return)
+{
+ mali_core_subsystem * subsystem;
+
+ job->magic_nr = JOB_MAGIC_NR;
+ MALI_CHECK_SESSION(session);
+
+ subsystem = session->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_add_job: for %s\n", subsystem->name )) ;
+
+ /* Setting the default value; No job to return */
+ MALI_DEBUG_ASSERT_POINTER(job_return);
+ *job_return = NULL;
+
+ if ( NULL != session->job_waiting_to_run)
+ {
+ MALI_DEBUG_PRINT(5, ("The session already had a job waiting\n")) ;
+ /* Checing if the new job has a higher priority than the one that was pending.*/
+ if ( job_has_higher_priority(job,session->job_waiting_to_run))
+ {
+ /* Remove this session from current priority */
+ _mali_osk_list_del( &(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ /* Returning the previous waiting job through the input double pointer*/
+ *job_return = session->job_waiting_to_run;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Illegal internal state."));
+ /* There was a job waiting in this session, and the priority of this job
+ we try to add was NOT higher. Return -1 indicated new job NOT enqueued.*/
+ /* We check prior to calling this function that we are not in this state.*/
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ /* Continue to add the new job as the next job from this session */
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job job=0x%x\n", job));
+
+ /* Adding this session to the subsystem list of sessions with pending job, with priority */
+ session->job_waiting_to_run = job;
+
+ _mali_osk_list_addtail( &(session->awaiting_sessions_list), &(subsystem->awaiting_sessions_head[job->priority]));
+ subsystem->awaiting_sessions_sum_all_priorities++;
+
+ mali_core_subsystem_schedule(subsystem);
+
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job: for %s FINISHED\n", session->subsystem->name )) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali_core_job_set_run_time(mali_core_job * job)
+{
+ u32 jiffies_used;
+ jiffies_used = _mali_osk_time_tickcount() - job->start_time_jiffies;
+ if ( jiffies_used > JOB_MAX_JIFFIES )
+ {
+ MALI_PRINT_ERROR(("Job used too many jiffies: %d\n", jiffies_used ));
+ jiffies_used = 0;
+ }
+ job->render_time_msecs = _mali_osk_time_tickstoms(jiffies_used);
+}
+
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status)
+{
+ mali_core_job * job;
+ mali_core_subsystem * subsystem;
+ mali_bool already_in_detach_function;
+
+ job = core->current_job;
+ subsystem = core->subsystem;
+ MALI_DEBUG_ASSERT(CORE_IDLE != core->state);
+
+ /* The reset_core() called some lines below might call this detach
+ * funtion again. To protect the core object from being modified by
+ * recursive calls, the in_detach_function would track if it is an recursive call
+ */
+ already_in_detach_function = core->in_detach_function;
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ core->in_detach_function = MALI_TRUE;
+ if ( NULL != job )
+ {
+ mali_core_job_set_run_time(job);
+ core->current_job = NULL;
+ }
+ }
+
+ if (JOB_STATUS_END_SEG_FAULT == end_status)
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_HARD );
+ }
+ else
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ if ( CORE_IDLE != core->state )
+ {
+ #if MALI_GPU_UTILIZATION
+ mali_utilization_core_end();
+ #endif
+ mali_core_subsystem_move_core_set_idle(core);
+ }
+
+ core->in_detach_function = MALI_FALSE;
+
+ if ( SUBSYSTEM_RESCHEDULE == reschedule )
+ {
+ mali_core_subsystem_schedule(subsystem);
+ }
+ if ( NULL != job )
+ {
+ core->subsystem->return_job_to_user(job, end_status);
+ }
+ }
+}
+
+#if USING_MMU
+/* This function intentionally does not release the semaphore. You must run
+ stop_bus_for_all_cores(), reset_all_cores_on_mmu() and continue_job_handling()
+ after calling this function, and then call unlock_subsystem() to release the
+ semaphore. */
+
+static void lock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only stops cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are stopped.
+*/
+static void stop_bus_for_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(2,("Handling: bus stop %s\n", subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We stop only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Stopping bus on core %s\n", core->description));
+ mali_core_renderunit_stop_bus(core);
+ core->error_recovery = MALI_TRUE;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4,("Core: not active %s\n", core->description ));
+ }
+ }
+ /* Mutex is still being held, to prevent things to happen while we do cleanup */
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only resets cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are reset.
+*/
+static void reset_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(3, ("Handling: reset cores from mmu: 0x%x on %s\n", mmu, subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We reset only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Abort and reset core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_WAIT, JOB_STATUS_END_SEG_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Core: not active %s\n", core->description ));
+ }
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex. */
+static void continue_job_handling(struct mali_core_subsystem * subsys)
+{
+ u32 i, j;
+
+ MALI_DEBUG_PRINT(3, ("Handling: Continue: %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ core->error_recovery = MALI_FALSE;
+ }
+
+ i = subsys->number_of_cores;
+ j = subsys->awaiting_sessions_sum_all_priorities;
+
+ /* Schedule MIN(nr_waiting_jobs , number of cores) times */
+ while( i-- && j--)
+ {
+ mali_core_subsystem_schedule(subsys);
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* Unlock the subsystem. */
+static void unlock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data)
+{
+ void * mmu;
+ mmu = (void*) data;
+
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ break;
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ stop_bus_for_all_cores_on_mmu(subsys, mmu);
+ break;
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ reset_all_cores_on_mmu(subsys, mmu );
+ break;
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ continue_job_handling(subsys);
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+}
+#endif /* USING_MMU */
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs)
+{
+ if (watchdog_msecs == 0) job->watchdog_msecs = mali_max_job_runtime; /* use the default */
+ else if (watchdog_msecs > WATCHDOG_MSECS_MAX) job->watchdog_msecs = WATCHDOG_MSECS_MAX; /* no larger than max */
+ else if (watchdog_msecs < WATCHDOG_MSECS_MIN) job->watchdog_msecs = WATCHDOG_MSECS_MIN; /* not below min */
+ else job->watchdog_msecs = watchdog_msecs;
+}
+
+u32 mali_core_hang_check_timeout_get(void)
+{
+ /* check the value. The user might have set the value outside the allowed range */
+ if (mali_hang_check_interval > HANG_CHECK_MSECS_MAX) mali_hang_check_interval = HANG_CHECK_MSECS_MAX; /* cap to max */
+ else if (mali_hang_check_interval < HANG_CHECK_MSECS_MIN) mali_hang_check_interval = HANG_CHECK_MSECS_MIN; /* cap to min */
+
+ /* return the active value */
+ return mali_hang_check_interval;
+}
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data)
+{
+ mali_core_renderunit *core;
+ u32 has_pending_irq;
+
+ core = (mali_core_renderunit * )data;
+
+ if ( (NULL == core) ||
+ (NULL == core->subsystem) ||
+ (NULL == core->subsystem->irq_handler_upper_half) )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(core->subsystem);
+
+ has_pending_irq = core->subsystem->irq_handler_upper_half(core);
+
+ if ( has_pending_irq )
+ {
+ _mali_osk_irq_schedulework( core->irq ) ;
+ MALI_SUCCESS;
+ }
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+static void mali_core_irq_handler_bottom_half ( void *data )
+{
+ mali_core_renderunit *core;
+ mali_core_subsystem* subsystem;
+
+ mali_subsystem_job_end_code job_status;
+
+ core = (mali_core_renderunit * )data;
+
+ MALI_CHECK_CORE(core);
+ subsystem = core->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+ if ( CORE_IDLE == core->state ) goto end_function;
+
+ MALI_DEBUG_PRINT(5, ("IRQ: handling irq from core %s\n", core->description )) ;
+
+ _mali_osk_cache_flushall();
+
+ /* This function must also update the job status flag */
+ job_status = subsystem->irq_handler_bottom_half( core );
+
+ /* Retval is nonzero if the job is finished. */
+ if ( JOB_STATUS_CONTINUE_RUN != job_status )
+ {
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, job_status);
+ }
+ else
+ {
+ switch ( core->state )
+ {
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_DEBUG_PRINT(2, ("Watchdog SW Timeout of job from core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_TIMEOUT_SW );
+ break;
+
+ case CORE_POLL:
+ MALI_DEBUG_PRINT(5, ("Poll core: %s\n", core->description )) ;
+ core->state = CORE_WORKING;
+ _mali_osk_timer_add( core->timer, 1);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("IRQ: The job on the core continue to run: %s\n", core->description )) ;
+ break;
+ }
+ }
+end_function:
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+}
+
+void subsystem_flush_mapped_mem_cache(void)
+{
+ _mali_osk_cache_flushall();
+ _mali_osk_mem_barrier();
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only)
+{
+ mali_core_renderunit * core = NULL;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* It is possible that this signal funciton can be called during a driver exit,
+ * and so the requested core may now be destroyed. (This is due to us not having
+ * the subsys lock before signalling power down).
+ * mali_core_renderunit_get_mali_core_nr() will report a Mali ERR because
+ * the core number is out of range (which is a valid error in other cases).
+ * So instead we check here (now that we have the subsys lock) and let the
+ * caller cope with the core get failure and check that the core has
+ * been unregistered in the PMM as part of its destruction.
+ */
+ if ( subsys->number_of_cores > mali_core_nr )
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+ }
+
+ if ( NULL == core )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: Failed to find core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if ( core->state != CORE_IDLE )
+ {
+ /* When powering down we either set a pending power down flag here so we
+ * can power down cleanly after the job completes or we don't set the
+ * flag if we have been asked to only do a power down right now
+ * In either case, return that the core is busy
+ */
+ if ( !immediate_only ) core->pend_power_down = MALI_TRUE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No idle core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down flag set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to off queue */
+ mali_core_subsystem_move_core_set_off(core);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only)
+{
+ mali_core_renderunit * core;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+
+ if( core == NULL )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: Failed to find core to power up\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if( core->state != CORE_OFF )
+ {
+ /* This will usually happen because we are trying to cancel a pending power down */
+ core->pend_power_down = MALI_FALSE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No powered off core to power up (cancelled power down?)\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to idle queue */
+ mali_core_subsystem_move_core_set_idle(core);
+
+ if( !queue_only )
+ {
+ /* Reset MMU & core - core must be idle to allow this */
+#if USING_MMU
+ if ( NULL!=core->mmu )
+ {
+#if defined(USING_MALI200)
+ if (core->pmm_id != MALI_PMM_CORE_PP0)
+ {
+#endif
+ mali_kernel_mmu_reset(core->mmu);
+#if defined(USING_MALI200)
+ }
+#endif
+
+ }
+#endif /* USING_MMU */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ /* Need to schedule work to start on this core */
+ mali_core_subsystem_schedule(subsys);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+#endif /* USING_MALI_PMM */
+
+#if MALI_STATE_TRACKING
+void mali_core_renderunit_dump_state(mali_core_subsystem* subsystem)
+{
+ u32 i;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp_core;
+
+ mali_core_session* session;
+ mali_core_session* tmp_session;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ MALI_PRINT(("Subsystem;\n"));
+ MALI_PRINT((" Name: %s\n", subsystem->name));
+
+ for (i = 0; i < subsystem->number_of_cores; i++)
+ {
+ MALI_PRINT((" Core: #%u\n", subsystem->mali_core_array[i]->core_number));
+ MALI_PRINT((" Description: %s\n", subsystem->mali_core_array[i]->description));
+ switch(subsystem->mali_core_array[i]->state)
+ {
+ case CORE_IDLE:
+ MALI_PRINT((" State: CORE_IDLE\n"));
+ break;
+ case CORE_WORKING:
+ MALI_PRINT((" State: CORE_WORKING\n"));
+ break;
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_PRINT((" State: CORE_WATCHDOG_TIMEOUT\n"));
+ break;
+ case CORE_POLL:
+ MALI_PRINT((" State: CORE_POLL\n"));
+ break;
+ case CORE_HANG_CHECK_TIMEOUT:
+ MALI_PRINT((" State: CORE_HANG_CHECK_TIMEOUT\n"));
+ break;
+ case CORE_OFF:
+ MALI_PRINT((" State: CORE_OFF\n"));
+ break;
+ default:
+ MALI_PRINT((" State: Unknown (0x%X)\n", subsystem->mali_core_array[i]->state));
+ break;
+ }
+ MALI_PRINT((" Current job: 0x%x\n", (u32)(subsystem->mali_core_array[i]->current_job)));
+ MALI_PRINT((" Core version: 0x%x\n", subsystem->mali_core_array[i]->core_version));
+#if USING_MALI_PMM
+ MALI_PRINT((" PMM id: 0x%x\n", subsystem->mali_core_array[i]->pmm_id));
+ MALI_PRINT((" Power down requested: %s\n", subsystem->mali_core_array[i]->pend_power_down ? "TRUE" : "FALSE"));
+#endif
+ }
+
+ MALI_PRINT((" Cores on idle list:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+ MALI_PRINT((" Core #%u\n", core->core_number));
+ }
+
+ MALI_PRINT((" Cores on off list:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_off_head, mali_core_renderunit, list)
+ {
+ MALI_PRINT((" Core #%u\n", core->core_number));
+ }
+
+ MALI_PRINT((" Connected sessions:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->all_sessions_head, mali_core_session, all_sessions_list)
+ {
+ MALI_PRINT((" Session 0x%X:\n", (u32)session));
+ MALI_PRINT((" Waiting job: 0x%X\n", (u32)session->job_waiting_to_run));
+ MALI_PRINT((" Notification queue: %s\n", _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY"));
+ }
+
+ MALI_PRINT((" Waiting sessions sum all priorities: %u\n", subsystem->awaiting_sessions_sum_all_priorities));
+ for (i = 0; i < PRIORITY_LEVELS; i++)
+ {
+ MALI_PRINT((" Waiting sessions with priority %u:\n", i));
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->awaiting_sessions_head[i], mali_core_session, awaiting_sessions_list)
+ {
+ MALI_PRINT((" Session 0x%X:\n", (u32)session));
+ MALI_PRINT((" Waiting job: 0x%X\n", (u32)session->job_waiting_to_run));
+ MALI_PRINT((" Notification queue: %s\n", _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY"));
+ }
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h
new file mode 100644
index 00000000000..94bce94d4e1
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_RENDERCORE_H__
+#define __MALI_RENDERCORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#define PRIORITY_LEVELS 3
+#define PRIORITY_MAX 0
+#define PRIORITY_MIN (PRIORITY_MAX+PRIORITY_LEVELS-1)
+
+/* This file contains what we need in kernel for all core types. */
+
+typedef enum
+{
+ CORE_IDLE, /**< Core is ready for a new job */
+ CORE_WORKING, /**< Core is working on a job */
+ CORE_WATCHDOG_TIMEOUT, /**< Core is working but it has timed out */
+ CORE_POLL, /**< Poll timer triggered, pending handling */
+ CORE_HANG_CHECK_TIMEOUT,/**< Timeout for hang detection */
+ CORE_OFF /**< Core is powered off */
+} mali_core_status;
+
+typedef enum
+{
+ SUBSYSTEM_RESCHEDULE,
+ SUBSYSTEM_WAIT
+} mali_subsystem_reschedule_option;
+
+typedef enum
+{
+ MALI_CORE_RESET_STYLE_RUNABLE,
+ MALI_CORE_RESET_STYLE_DISABLE,
+ MALI_CORE_RESET_STYLE_HARD
+} mali_core_reset_style;
+
+typedef enum
+{
+ JOB_STATUS_CONTINUE_RUN = 0x01,
+ JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ JOB_STATUS_END_OOM = 1<<(16+1),
+ JOB_STATUS_END_ABORT = 1<<(16+2),
+ JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ JOB_STATUS_END_HANG = 1<<(16+4),
+ JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} mali_subsystem_job_end_code;
+
+
+struct mali_core_job;
+struct mali_core_subsystem;
+struct mali_core_renderunit;
+struct mali_core_session;
+
+/* We have one of these subsystems for each core type */
+typedef struct mali_core_subsystem
+{
+ struct mali_core_renderunit ** mali_core_array; /* An array of all cores of this type */
+ u32 number_of_cores; /* Number of cores in this list */
+
+ _mali_core_type core_type;
+
+ u32 magic_nr;
+
+ _mali_osk_list_t renderunit_idle_head; /* Idle cores of this type */
+ _mali_osk_list_t renderunit_off_head; /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduelling */
+ _mali_osk_list_t awaiting_sessions_head[PRIORITY_LEVELS];
+ u32 awaiting_sessions_sum_all_priorities;
+
+ /* Linked list of all sessions connected to this coretype */
+ _mali_osk_list_t all_sessions_head;
+
+ /* Linked list of all sessions connected to this coretype */
+ struct _mali_osk_notification_queue_t * notification_queue;
+
+ const char * name;
+ mali_kernel_subsystem_identifier id;
+
+ /**** Functions registered for this core type. Set during mali_core_init ******/
+ /* Start this job on this core. Return MALI_TRUE if the job was started. */
+ _mali_osk_errcode_t (*start_job)(struct mali_core_job * job, struct mali_core_renderunit * core);
+
+ /* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+ u32 (*irq_handler_upper_half)(struct mali_core_renderunit * core);
+
+ /* This function should check if the interrupt indicates that job was finished.
+ If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+ If the job is still working after this function it should return MALI_FALSE.
+ The function must also enable the bits in the interrupt mask for the core.
+ Called by the bottom half interrupt function. */
+ int (*irq_handler_bottom_half)(struct mali_core_renderunit* core);
+
+ /* This function is called from the ioctl function and should return a mali_core_job pointer
+ to a created mali_core_job object with the data given from userspace */
+ _mali_osk_errcode_t (*get_new_job_from_user)(struct mali_core_session * session, void * argument);
+
+ _mali_osk_errcode_t (*suspend_response)(struct mali_core_session * session, void * argument);
+
+ /* This function is called from the ioctl function and should write the necessary data
+ to userspace telling which job was finished and the status and debuginfo for this job.
+ The function must also free and cleanup the input job object. */
+ void (*return_job_to_user)(struct mali_core_job * job, mali_subsystem_job_end_code end_status);
+
+ /* Is called when a subsystem shuts down. This function needs to
+ release internal pointers in the core struct, and free the
+ core struct before returning.
+ It is not allowed to write to any registers, since this
+ unmapping is already done. */
+ void (*renderunit_delete)(struct mali_core_renderunit * core);
+
+ /* Is called when we want to abort a job that is running on the core.
+ This is done if program exits while core is running */
+ void (*reset_core)(struct mali_core_renderunit * core, mali_core_reset_style style);
+
+ /* Is called when the rendercore wants the core to give an interrupt */
+ void (*probe_core_irq_trigger)(struct mali_core_renderunit* core);
+
+ /* Is called when the irq probe wants the core to acknowledge an interrupt from the hw */
+ _mali_osk_errcode_t (*probe_core_irq_acknowledge)(struct mali_core_renderunit* core);
+
+ /* Called when the rendercore want to issue a bus stop request to a core */
+ void (*stop_bus)(struct mali_core_renderunit* core);
+} mali_core_subsystem;
+
+
+/* Per core data. This must be embedded into each core type internal core info. */
+typedef struct mali_core_renderunit
+{
+ struct mali_core_subsystem * subsystem; /* The core belongs to this subsystem */
+ _mali_osk_list_t list; /* Is always in subsystem->idle_list OR session->renderunits_working */
+ mali_core_status state;
+ mali_bool error_recovery; /* Indicates if the core is waiting for external help to recover (typically the MMU) */
+ mali_bool in_detach_function;
+ struct mali_core_job * current_job; /* Current job being processed on this core ||NULL */
+ u32 magic_nr;
+ _mali_osk_timer_t * timer;
+ _mali_osk_timer_t * timer_hang_detection;
+
+ mali_io_address registers_mapped; /* IO-mapped pointer to registers */
+ u32 registers_base_addr; /* Base addres of the registers */
+ u32 size; /* The size of registers_mapped */
+ const char * description; /* Description of this core. */
+ u32 irq_nr; /* The IRQ nr for this core */
+ u32 core_version;
+#if USING_MMU
+ u32 mmu_id;
+ void * mmu; /* The MMU this rendercore is behind.*/
+#endif
+#if USING_MALI_PMM
+ mali_pmm_core_id pmm_id; /* The PMM core id */
+ mali_bool pend_power_down; /* Power down is requested */
+#endif
+
+ u32 core_number; /* 0 for first detected core of this type, 1 for second and so on */
+
+ _mali_osk_irq_t *irq;
+} mali_core_renderunit;
+
+
+/* Per open FILE data. */
+/* You must held subsystem->mutex before any transactions to this datatype. */
+typedef struct mali_core_session
+{
+ struct mali_core_subsystem * subsystem; /* The session belongs to this subsystem */
+ _mali_osk_list_t renderunits_working_head; /* List of renderunits working for this session */
+ struct mali_core_job *job_waiting_to_run; /* The next job from this session to run */
+
+ _mali_osk_list_t awaiting_sessions_list; /* Linked list of sessions with jobs, for each priority */
+ _mali_osk_list_t all_sessions_list; /* Linked list of all sessions on the system. */
+
+ _mali_osk_notification_queue_t * notification_queue; /* Messages back to Base in userspace*/
+#if USING_MMU
+ struct mali_session_data * mmu_session; /* The session associated with the MMU page tables for this core */
+#endif
+ u32 magic_nr;
+} mali_core_session;
+
+
+/* This must be embedded into a specific mali_core_job struct */
+/* use this macro to get spesific mali_core_job: container_of(ptr, type, member)*/
+typedef struct mali_core_job
+{
+ _mali_osk_list_t list; /* Linked list of jobs. Used by struct mali_core_session */
+ struct mali_core_session *session;
+ u32 magic_nr;
+ u32 priority;
+ u32 watchdog_msecs;
+ u32 render_time_msecs ;
+ u32 start_time_jiffies;
+ unsigned long watchdog_jiffies;
+ u32 abort_id;
+} mali_core_job;
+
+/*
+ * The rendercode subsystem is included in the subsystems[] array.
+ */
+extern struct mali_kernel_subsystem mali_subsystem_rendercore;
+
+void subsystem_flush_mapped_mem_cache(void);
+
+
+#define SUBSYSTEM_MAGIC_NR 0xdeadbeef
+#define CORE_MAGIC_NR 0xcafebabe
+#define SESSION_MAGIC_NR 0xbabe1234
+#define JOB_MAGIC_NR 0x0123abcd
+
+
+#define MALI_CHECK_SUBSYSTEM(subsystem)\
+ do { \
+ if ( SUBSYSTEM_MAGIC_NR != subsystem->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0)
+
+#define MALI_CHECK_CORE(CORE)\
+ do { \
+ if ( CORE_MAGIC_NR != CORE->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_SESSION(SESSION)\
+ do { \
+ if ( SESSION_MAGIC_NR != SESSION->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_JOB(JOB)\
+ do { \
+ if ( JOB_MAGIC_NR != JOB->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+
+/* Check if job_a has higher priority than job_b */
+MALI_STATIC_INLINE int job_has_higher_priority(mali_core_job * job_a, mali_core_job * job_b)
+{
+ /* The lowest number has the highest priority */
+ return (int) (job_a->priority < job_b->priority);
+}
+
+MALI_STATIC_INLINE void job_priority_set(mali_core_job * job, u32 priority)
+{
+ if (priority > PRIORITY_MIN) job->priority = PRIORITY_MIN;
+ else job->priority = priority;
+}
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs);
+
+/* For use by const default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value
+{
+ u32 address;
+ u32 value;
+} register_address_and_value ;
+
+
+/* For use by dynamic default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value_list
+{
+ _mali_osk_list_t list;
+ register_address_and_value item;
+} register_address_and_value_list ;
+
+/* Used if the user wants to set a continious block of registers */
+typedef struct register_array_user
+{
+ u32 entries_in_array;
+ u32 start_address;
+ void __user * reg_array;
+}register_array_user;
+
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRAB %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ _mali_osk_lock_wait( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRABBED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ rendercores_global_mutex_is_held = 1; \
+ rendercores_global_mutex_owner = _mali_osk_get_tid(); \
+ } while (0) ;
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASE %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ rendercores_global_mutex_is_held = 0; \
+ rendercores_global_mutex_owner = 0; \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ _mali_osk_lock_signal( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0) ;
+
+
+#define MALI_ASSERT_MUTEX_IS_GRABBED(input_pointer)\
+ do { \
+ if ( 0 == rendercores_global_mutex_is_held ) MALI_PRINT_ERROR(("ASSERT MUTEX SHOULD BE GRABBED"));\
+ if ( SUBSYSTEM_MAGIC_NR != input_pointer->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ if ( rendercores_global_mutex_owner != _mali_osk_get_tid() ) MALI_PRINT_ERROR(("Owner mismatch"));\
+ } while (0)
+
+
+u32 mali_core_renderunit_register_read(struct mali_core_renderunit *core, u32 relative_address);
+void mali_core_renderunit_register_read_array(struct mali_core_renderunit *core, u32 relative_address, u32 * result_array, u32 nr_of_regs);
+void mali_core_renderunit_register_write(struct mali_core_renderunit *core, u32 relative_address, u32 new_val);
+void mali_core_renderunit_register_write_array(struct mali_core_renderunit *core, u32 relative_address, u32 * write_array, u32 nr_of_regs);
+
+_mali_osk_errcode_t mali_core_renderunit_init(struct mali_core_renderunit * core);
+void mali_core_renderunit_term(struct mali_core_renderunit * core);
+int mali_core_renderunit_map_registers(struct mali_core_renderunit *core);
+void mali_core_renderunit_unmap_registers(struct mali_core_renderunit *core);
+int mali_core_renderunit_irq_handler_add(struct mali_core_renderunit *core);
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr);
+
+int mali_core_subsystem_init(struct mali_core_subsystem * new_subsys);
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys);
+#endif
+int mali_core_subsystem_register_renderunit(struct mali_core_subsystem * subsys, struct mali_core_renderunit * core);
+int mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info);
+void mali_core_subsystem_cleanup(struct mali_core_subsystem * subsys);
+#if USING_MMU
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data);
+#endif
+void mali_core_session_begin(mali_core_session *session);
+void mali_core_session_close(mali_core_session * session);
+int mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return);
+u32 mali_core_hang_check_timeout_get(void);
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void* argument);
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id);
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only);
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only);
+#endif
+
+#if MALI_STATE_TRACKING
+void mali_core_renderunit_dump_state(mali_core_subsystem* subsystem);
+#endif
+
+#endif /* __MALI_RENDERCORE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h
new file mode 100644
index 00000000000..ce290d0c1c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SESSION_MANAGER_H__
+#define __MALI_KERNEL_SESSION_MANAGER_H__
+
+/* Incomplete struct to pass around pointers to it */
+struct mali_session_data;
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session, int id);
+
+#endif /* __MALI_KERNEL_SESSION_MANAGER_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h
new file mode 100644
index 00000000000..e8c6530668c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_subsystem.h
+ */
+
+#ifndef __MALI_KERNEL_SUBSYSTEM_H__
+#define __MALI_KERNEL_SUBSYSTEM_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+
+/* typedefs of the datatypes used in the hook functions */
+typedef void * mali_kernel_subsystem_session_slot;
+typedef int mali_kernel_subsystem_identifier;
+typedef _mali_osk_errcode_t (*mali_kernel_resource_registrator)(_mali_osk_resource_t *);
+
+/**
+ * Broadcast notification messages
+ */
+typedef enum mali_core_notification_message
+{
+ MMU_KILL_STEP0_LOCK_SUBSYSTEM, /**< Request to lock subsystem */
+ MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, /**< Request to stop all buses */
+ MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, /**< Request kill all jobs, and not start more jobs */
+ MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, /**< Request to continue with new jobs on all cores */
+ MMU_KILL_STEP4_UNLOCK_SUBSYSTEM /**< Request to unlock subsystem */
+} mali_core_notification_message;
+
+/**
+ * A function pointer can be NULL if the subsystem isn't interested in the event.
+ */
+typedef struct mali_kernel_subsystem
+{
+ /* subsystem control */
+ _mali_osk_errcode_t (*startup)(mali_kernel_subsystem_identifier id); /**< Called during module load or system startup*/
+ void (*shutdown)(mali_kernel_subsystem_identifier id); /**< Called during module unload or system shutdown */
+
+ /**
+ * Called during module load or system startup.
+ * Called when all subsystems have reported startup OK and all resources where successfully initialized
+ */
+ _mali_osk_errcode_t (*load_complete)(mali_kernel_subsystem_identifier id);
+
+ /* per subsystem handlers */
+ _mali_osk_errcode_t (*system_info_fill)(_mali_system_info* info); /**< Fill info into info struct. MUST allocate memory with kmalloc, since it's kfree'd */
+
+ /* per session handlers */
+ /**
+ * Informs about a new session.
+ * slot can be used to track per-session per-subsystem data.
+ * queue can be used to send events to user space.
+ * _mali_osk_errcode_t error return value.
+ */
+ _mali_osk_errcode_t (*session_begin)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+ /**
+ * Informs that a session is ending
+ * slot was the same as given during session_begin
+ */
+ void (*session_end)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+ /* Used by subsystems to send messages to each other. This is the receiving end */
+ void (*broadcast_notification)(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+ /** Dump the current state of the subsystem */
+ void (*dump_state)(void);
+#endif
+} mali_kernel_subsystem;
+
+/* functions used by the subsystems to interact with the core */
+/**
+ * Register a resouce handler
+ * @param type The resoruce type to register a handler for
+ * @param handler Pointer to the function handling this resource
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler);
+
+/* function used to interact with other subsystems */
+/**
+ * Broadcast a message
+ * Sends a message to all subsystems which have registered a broadcast notification handler
+ * @param message The message to send
+ * @param data Message specific extra data
+ */
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+/**
+ * Tell all subsystems to dump their current state
+ */
+void _mali_kernel_core_dump_state(void);
+#endif
+
+
+#endif /* __MALI_KERNEL_SUBSYSTEM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c
new file mode 100644
index 00000000000..04653b06ad5
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+#define MALI_GPU_UTILIZATION_TIMEOUT 500
+
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+
+static void calculate_gpu_utilization(void* arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized;
+ u32 period_normalized;
+ u32 utilization;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time == 0 && work_start_time == 0)
+ {
+ /* Don't reschedule timer, this will be started if new work arrives */
+ timer_running = MALI_FALSE;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* No work done for this period, report zero usage */
+ mali_gpu_utilization_handler(0);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time != 0)
+ {
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = time_now;
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized = (u32)(accumulated_work_time >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF)
+ {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ }
+ else
+ {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized <<= 8;
+ }
+
+ utilization = work_normalized / period_normalized;
+
+ accumulated_work_time = 0;
+ period_start_time = time_now; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+
+ mali_gpu_utilization_handler(utilization);
+}
+
+
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+ time_data_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ|_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == time_data_lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_atomic_init(&num_running_cores, 0);
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer)
+ {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_atomic_term(&num_running_cores);
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+
+
+void mali_utilization_core_start(void)
+{
+ if (_mali_osk_atomic_inc_return(&num_running_cores) == 1)
+ {
+ /*
+ * We went from zero cores working, to one core working,
+ * we now consider the entire GPU for being busy
+ */
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ work_start_time = _mali_osk_time_get_ns();
+
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = work_start_time; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+}
+
+
+
+void mali_utilization_core_end(void)
+{
+ if (_mali_osk_atomic_dec_return(&num_running_cores) == 0)
+ {
+ /*
+ * No more cores are working, so accumulate the time we was busy.
+ */
+ u64 time_now;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ time_now = _mali_osk_time_get_ns();
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h
new file mode 100644
index 00000000000..f6485006729
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c
new file mode 100644
index 00000000000..8dfa3a393ba
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+/*#include "mali_timestamp.h"
+*/
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+/* u64 ts = _mali_timestamp_get();
+ */
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h
new file mode 100644
index 00000000000..c35d90577a3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_irq_schedulework(). Refer to \ref _mali_osk_irq_schedulework() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @brief IRQ 'bottom-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the deferred handling
+ * of a resource's IRQ. Usually, this work cannot be carried out in IRQ context
+ * by the IRQ upper-half handler.
+ *
+ * The IRQ bottom-half handler maps on to the concept of an IST that may
+ * execute some time after the actual IRQ has fired.
+ *
+ * All OSK-registered IRQ bottom-half handlers will be serialized, across all
+ * CPU-cores in the system.
+ *
+ * Refer to \ref _mali_osk_irq_schedulework() for more information on the
+ * IRQ work-queue, and the calling of the IRQ bottom-half handler.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_irq_bhandler_t)( void * arg );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Even though the structure has space for a u32, the counters will only
+ * represent signed 24-bit integers.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_irq_schedulework() to make use of the IRQ bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief The known resource types
+ *
+ * @note \b IMPORTANT: these must remain fixed, and only be extended. This is
+ * because not all systems use a header file for reading in their resources.
+ * The resources may instead come from a data file where these resources are
+ * 'hard-coded' in, because there's no easy way of transferring the enum values
+ * into such data files. E.g. the C-Pre-processor does \em not process enums.
+ */
+typedef enum _mali_osk_resource_type
+{
+ RESOURCE_TYPE_FIRST =0, /**< Duplicate resource marker for the first resource*/
+ MEMORY =0, /**< Physically contiguous memory block, not managed by the OS */
+ OS_MEMORY =1, /**< Memory managed by and shared with the OS */
+ MALI200 =3, /**< Mali200 Programmable Fragment Shader */
+ MALIGP2 =4, /**< MaliGP2 Programmable Vertex Shader */
+ MMU =5, /**< Mali MMU (Memory Management Unit) */
+ FPGA_FRAMEWORK =6, /**< Mali registers specific to FPGA implementations */
+ MALI400L2 =7, /**< Mali400 L2 Cache */
+ MALI300L2 =7, /**< Mali300 L2 Cache */
+ MALI400GP =8, /**< Mali400 Programmable Vertex Shader Core */
+ MALI300GP =8, /**< Mali300 Programmable Vertex Shader Core */
+ MALI400PP =9, /**< Mali400 Programmable Fragment Shader Core */
+ MALI300PP =9, /**< Mali300 Programmable Fragment Shader Core */
+ MEM_VALIDATION =10, /**< External Memory Validator */
+ PMU =11, /**< Power Manangement Unit */
+ RESOURCE_TYPE_COUNT /**< The total number of known resources */
+} _mali_osk_resource_type_t;
+
+/** @brief resource description struct
+ *
+ * _mali_osk_resources_init() will enumerate objects of this type. Not all
+ * members have a valid meaning across all types.
+ *
+ * The mmu_id is used to group resources to a certain MMU, since there may be
+ * more than one MMU in the system, and each resource may be using a different
+ * MMU:
+ * - For MMU resources, the setting of mmu_id is a uniquely identifying number.
+ * - For Other resources, the setting of mmu_id determines which MMU the
+ * resource uses.
+ */
+typedef struct _mali_osk_resource
+{
+ _mali_osk_resource_type_t type; /**< type of the resource */
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ s32 cpu_usage_adjust; /**< Offset added to the base address of the resource to arrive at the CPU physical address of the resource (if different from the Mali physical address) */
+ u32 size; /**< Size in bytes of the resource - either the size of its register range, or the size of the memory block. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+ u32 flags; /**< Resources-specific flags. */
+ u32 mmu_id; /**< Identifier for Mali MMU resources. */
+ u32 alloc_order; /**< Order in which MEMORY/OS_MEMORY resources are used */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Fake IRQ number for testing purposes
+ */
+#define _MALI_OSK_IRQ_NUMBER_FAKE ((u32)0xFFFFFFF1)
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief PMM Virtual IRQ number
+ */
+#define _MALI_OSK_IRQ_NUMBER_PMM ((u32)0xFFFFFFF2)
+
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * The _mali_osk_irq_t returned must be written into the resource-specific data
+ * pointed to by data. This is so that the upper and lower handlers can call
+ * _mali_osk_irq_schedulework().
+ *
+ * @note The caller must ensure that the resource does not generate an
+ * interrupt after _mali_osk_irq_init() finishes, and before the
+ * _mali_osk_irq_t is written into the resource-specific data. Otherwise,
+ * the upper-half handler will fail to call _mali_osk_irq_schedulework().
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and trigger_func and ack_func must be
+ * non-NULL.
+ * @param uhandler The upper-half handler, corresponding to a ISR handler for
+ * the resource
+ * @param bhandler The lower-half handler, corresponding to an IST handler for
+ * the resource
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param data resource-specific data, which will be passed to uhandler,
+ * bhandler and (if present) trigger_func and ack_funnc
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description );
+
+/** @brief Cause a queued, deferred call of the IRQ bottom-half.
+ *
+ * _mali_osk_irq_schedulework provides a mechanism for enqueuing deferred calls
+ * to the IRQ bottom-half handler. The queue is known as the IRQ work-queue.
+ * After calling _mali_osk_irq_schedulework(), the IRQ bottom-half handler will
+ * be scheduled to run at some point in the future.
+ *
+ * This is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_irq_schedulework() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_irq_schedulework() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the IRQ bottom-half handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_irq_schedulework()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The IRQ bottom-half callback (of type _mali_osk_irq_bhandler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_irq_schedulework() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_irq_schedulework() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * IRQs will be lost.
+ *
+ * The queue that _mali_osk_irq_schedulework implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_irq_schedulework
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered IRQs.
+ *
+ * The consequence of the queue being a 1-reader type is that calling
+ * _mali_osk_irq_schedulework() on different _mali_osk_irq_t objects causes
+ * their IRQ bottom-halves to be serialized, across all CPU-cores in the
+ * system.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ bottom-half must begin processing.
+ */
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for the
+ * IRQ work-queue to finish the work that is currently in the queue. That is,
+ * for every deferred call currently in the IRQ work-queue, it waits for each
+ * of those to be processed by their respective IRQ bottom-half handler.
+ *
+ * This function is used to ensure that the bottom-half handler of the supplied
+ * IRQ object will not be running at the completion of this function call.
+ * However, the caller must ensure that no other sources could call the
+ * _mali_osk_irq_schedulework() on the same IRQ object. For example, the
+ * relevant timers must be stopped.
+ *
+ * @note While this function is being called, other OSK-registered IRQs in the
+ * system may enqueue work for their respective bottom-half handlers. This
+ * function will not wait for those entries in the work-queue to be flushed.
+ *
+ * Since this blocks on the completion of work in the IRQ work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any OSK-registered IRQ bottom-half handler. To do so may cause a deadlock.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * The counters have storage for signed 24-bit integers. Initializing to signed
+ * values requiring more than 24-bits storage will fail.
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which affects memory
+ * mapped by _mali_osk_mem_mapregion. It will not be needed for memory
+ * mapped through _mali_osk_mem_mapioregion.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+#if MALI_STATE_TRACKING
+/** @brief Receive a notification from a queue
+ *
+ * Check if a notification queue is empty.
+ *
+ * @param queue The queue to check.
+ * @return MALI_TRUE if queue is empty, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue );
+#endif
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the absolute time at which a timer will expire, and start it if it is
+ * stopped. If \a expiry_tick is in the past (determined by
+ * _mali_osk_time_after() ), the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at absolute time \a expiry_tick, at which point, the
+ * callback function will be invoked with the callback-specific data, as set
+ * by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param expiry_tick the \em absolute time in ticks at which this timer should
+ * trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the IRQ work-queue by the timer (with
+ * \ref _mali_osk_irq_schedulework()) may still run. The timer callback and IRQ
+ * bottom-half handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h
new file mode 100644
index 00000000000..28026ba1e6d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h
new file mode 100644
index 00000000000..a8d15f2a107
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE int _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Join two lists
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to join a list into another list at a specific location
+ *
+ * @param list the new list to add
+ * @param at the location in a list to add the new list into
+ */
+MALI_STATIC_INLINE void _mali_osk_list_splice( _mali_osk_list_t *list, _mali_osk_list_t *at )
+{
+ if (!_mali_osk_list_empty(list))
+ {
+ /* insert all items from 'list' after 'at' */
+ _mali_osk_list_t *first = list->next;
+ _mali_osk_list_t *last = list->prev;
+ _mali_osk_list_t *split = at->next;
+
+ first->prev = at;
+ at->next = first;
+
+ last->next = split;
+ split->prev = last;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h
new file mode 100644
index 00000000000..9a046fb91eb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Initialize the OSK layer
+ *
+ * This function is used to setup any initialization of OSK functionality, if
+ * required.
+ *
+ * This must be the first function called from the common code, specifically,
+ * from the common code entry-point, mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_constructor when the device driver is loaded.
+ *
+ * @return _MALI_OSK_ERR_OK on success, or a suitable _mali_osk_errcode_t on
+ * failure.
+ */
+_mali_osk_errcode_t _mali_osk_init( void );
+
+/** @brief Terminate the OSK layer
+ *
+ * This function is used to terminate any resources initialized by
+ * _mali_osk_init.
+ *
+ * This must be the last function called from the common code, specifically,
+ * from the common code closedown function, mali_kernel_destructor, and the
+ * error path in mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_destructor when the device driver is terminated.
+ */
+void _mali_osk_term( void );
+
+/** @brief Read the Mali Resource configuration
+ *
+ * Populates a _mali_arch_resource_t array from configuration settings, which
+ * are stored in an OS-specific way.
+ *
+ * For example, these may be compiled in to a static structure, or read from
+ * the filesystem at startup.
+ *
+ * On failure, do not call _mali_osk_resources_term.
+ *
+ * @param arch_config a pointer to the store the pointer to the resources
+ * @param num_resources the number of resources read
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_NOMEM on allocation
+ * error. For other failures, a suitable _mali_osk_errcode_t is returned.
+ */
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources );
+
+/** @brief Free resources allocated by _mali_osk_resources_init.
+ *
+ * Frees the _mali_arch_resource_t array allocated by _mali_osk_resources_init
+ *
+ * @param arch_config a pointer to the stored the pointer to the resources
+ * @param num_resources the number of resources in the array
+ */
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources);
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h
new file mode 100644
index 00000000000..9a1583658c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/*
+ * NOTE: Because this file can be included from user-side and kernel-side,
+ * it is up to the includee to ensure certain typedefs (e.g. u32) are already
+ * defined when #including this.
+ */
+#include "regs/mali_200_regs.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_GET_SYSTEM_INFO_SIZE, /**< _mali_ukk_get_system_info_size() */
+ _MALI_UK_GET_SYSTEM_INFO, /**< _mali_ukk_get_system_info() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_ABORT_JOB, /**< Abort a job */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_PP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_pp_abort_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_gp_abort_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+
+#if USING_MALI_PMM
+ /** Power Management Module Functions */
+ _MALI_UK_PMM_EVENT_MESSAGE = 0, /**< Raise an event message */
+#endif
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+/** @brief Information about each Mali Core
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Both Fragment Processor (PP) and Vertex Processor (GP) cores are represented
+ * by this struct.
+ *
+ * The type is reported by the type field, _mali_core_info::_mali_core_type.
+ *
+ * Each core is given a unique Sequence number identifying it, the core_nr
+ * member.
+ *
+ * Flags are taken directly from the resource's flags, and are currently unused.
+ *
+ * Multiple mali_core_info structs are linked in a single linked list using the next field
+ */
+typedef struct _mali_core_info
+{
+ _mali_core_type type; /**< Type of core */
+ _mali_core_version version; /**< Core Version, as reported by the Core's Version Register */
+ u32 reg_address; /**< Address of Registers */
+ u32 core_nr; /**< Sequence number */
+ u32 flags; /**< Flags. Currently Unused. */
+ struct _mali_core_info * next; /**< Next core in Linked List */
+} _mali_core_info;
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+ _MALI_PP_READABLE = (1<<0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1<<1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1<<2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1<<3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1<<4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1<<5), /** Writeable by the CPU */
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * Each bank is uniquely identified by its identifier member. On Mali-nonMMU
+ * systems, to allocate from this bank, the value of identifier must be passed
+ * as the type_id member of the _mali_uk_get_big_block_s arguments to
+ * _mali_ukk_get_big_block.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /**< Unique identifier, to be used in allocate calls */
+ struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @brief Info about the whole Mali system.
+ *
+ * This Contains a linked list of the cores and memory banks available. Each
+ * list pointer will remain inside the system_info buffer supplied in the
+ * _mali_uk_get_system_info_s arguments to a _mali_ukk_get_system_info call.
+ *
+ * The has_mmu member must be inspected to ensure the correct group of
+ * Memory function calls is obtained - that is, those for either Mali-MMU
+ * or Mali-nonMMU. @see _mali_uk_memory
+ */
+typedef struct _mali_system_info
+{
+ _mali_core_info * core_info; /**< List of _mali_core_info structures */
+ _mali_mem_info * mem_info; /**< List of _mali_mem_info structures */
+ u32 has_mmu; /**< Non-zero if Mali-MMU present. Zero otherwise. */
+ _mali_driver_mode drivermode; /**< Reserved. Must always be _MALI_DRIVER_MODE_NORMAL */
+} _mali_system_info;
+
+/** @brief Arguments to _mali_ukk_get_system_info()
+ *
+ * A buffer of the size returned by _mali_ukk_get_system_info_size() must be
+ * allocated, and the pointer to this buffer must be written into the
+ * system_info member. The buffer must be suitably aligned for storage of
+ * the _mali_system_info structure - for example, one returned by
+ * _mali_osk_malloc(), which will be suitably aligned for any structure.
+ *
+ * The ukk_private member must be set to zero by the user-side. Under an OS
+ * implementation, the U/K interface must write in the user-side base address
+ * into the ukk_private member, so that the common code in
+ * _mali_ukk_get_system_info() can determine how to adjust the pointers such
+ * that they are sensible from user space. Leaving ukk_private as NULL implies
+ * that no pointer adjustment is necessary - which will be the case on a
+ * bare-metal/RTOS system.
+ *
+ * @see _mali_system_info
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer provided to store system information data */
+ _mali_system_info * system_info; /**< [in,out] pointer to buffer to store system information data. No initialisation of buffer required on input. */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+} _mali_uk_get_system_info_s;
+/** @} */ /* end group _mali_uk_getsysteminfo */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED, /**< Job started and bumped a lower priority job that was pending execution */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum
+{
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1<<(16+1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1<<(16+2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1<<(16+4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_gp_job_info
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space, a @c mali_gp_job_info* */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE (1<<2) /**< Enable performance counter L2_SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE (1<<3) /**< Enable performance counter L2_SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET (1<<4) /**< Enable performance counter L2_RESET for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of the GP interrupt rawstat register (see ARM DDI0415A) */
+ u32 status_reg_on_stop; /**< [out] value of the GP control register */
+ u32 vscl_stop_addr; /**< [out] value of the GP VLSCL start register */
+ u32 plbcl_stop_addr; /**< [out] value of the GP PLBCL start register */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of milliseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+ _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_pp_job
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALI200_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job, see ARM DDI0415A */
+ u32 wb0_registers[MALI200_NUM_REGS_WBx];
+ u32 wb1_registers[MALI200_NUM_REGS_WBx];
+ u32 wb2_registers[MALI200_NUM_REGS_WBx];
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of interrupt rawstat register (see ARM DDI0415A) */
+ u32 last_tile_list_addr; /**< [out] value of renderer list register (see ARM DDI0415A); necessary to restart a stopped job */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of milliseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_job_finished_s;
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union
+ {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 8
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mali_address_base; /**< [out] start of MALI address space */
+ u32 memory_size; /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** @brief Arguments for _mali_ukk_get_big_block()
+ *
+ * - type_id should be set to the value of the identifier member of one of the
+ * _mali_mem_info structures returned through _mali_ukk_get_system_info()
+ * - ukk_private must be zero when calling from user-side. On Kernel-side, the
+ * OS implementation of the U/K interface can use it to communicate data to the
+ * OS implementation of the OSK layer. Specifically, ukk_private will be placed
+ * into the ukk_private member of the _mali_uk_mem_mmap_s structure. See
+ * _mali_ukk_mem_mmap() for more details.
+ * - minimum_size_requested will be updated if it is too small
+ * - block_size will always be >= minimum_size_requested, because the underlying
+ * allocation mechanism may only be able to divide up memory regions in certain
+ * ways. To avoid wasting memory, block_size should always be taken into account
+ * rather than assuming minimum_size_requested was really allocated.
+ * - to free the memory, the returned cookie member must be stored, and used to
+ * refer to it.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 type_id; /**< [in] the type id of the memory bank to allocate memory from */
+ u32 minimum_size_requested; /**< [in,out] minimum size of the allocation */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+ u32 mali_address; /**< [out] address of the allocation in mali address space */
+ void *cpuptr; /**< [out] address of the allocation in the current process address space */
+ u32 block_size; /**< [out] size of the block that got allocated */
+ u32 flags; /**< [out] flags associated with the allocated block, of type _mali_bus_usage */
+ u32 cookie; /**< [out] identifier for the allocated block in kernel space */
+} _mali_uk_get_big_block_s;
+
+/** @brief Arguments for _mali_ukk_free_big_block()
+ *
+ * All that is required is that the cookie member must be set to the value of
+ * the cookie member returned through _mali_ukk_get_big_block()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_free_big_block_s;
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s and _mali_uk_attach_ump_mem_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+/** @note Mali-MMU only; will be supported in future version */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Fragment Processor cores in the system */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_pp_abort_job_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_gp_abort_job_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+#if USING_MALI_PMM
+
+/** @defgroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/** @brief Power management event message identifiers.
+ *
+ * U/K events start after id 200, and can range up to 999
+ * Adding new events will require updates to the PMM mali_pmm_event_id type
+ */
+#define _MALI_PMM_EVENT_UK_EXAMPLE 201
+
+/** @brief Generic PMM message data type, that will be dependent on the event msg
+ */
+typedef u32 mali_pmm_message_data;
+
+
+/** @brief Arguments to _mali_ukk_pmm_event_message()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 id; /**< [in] event id */
+ mali_pmm_message_data data; /**< [in] specific data associated with the event */
+} _mali_uk_pmm_message_s;
+
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h
new file mode 100644
index 00000000000..d066046901d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - On OS systems (not including direct function call U/K interface
+ * implementations), _mali_ukk_get_big_block() may succeed, but the subsequent
+ * copying to user space may fail.
+ * - A problem scenario exists: some memory has been reserved by
+ * _mali_ukk_get_big_block(), but the user-mode will be unaware of it (it will
+ * never receive any information about this memory). In this case, the U/K
+ * implementation must do everything necessary to 'rollback' the \em atomic
+ * _mali_ukk_get_big_block() transaction.
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Returns the size of the buffer needed for a _mali_ukk_get_system_info call
+ *
+ * This function must be called before a call is made to
+ * _mali_ukk_get_system_info, so that memory of the correct size can be
+ * allocated, and a pointer to this memory written into the system_info member
+ * of _mali_uk_get_system_info_s.
+ *
+ * @param args see _mali_uk_get_system_info_size_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info_size( _mali_uk_get_system_info_size_s *args );
+
+/** @brief Returns information about the system (cores and memory banks)
+ *
+ * A buffer for this needs to be allocated by the caller. The size of the buffer required is returned by
+ * _mali_ukk_get_system_info_size(). The user is responsible for freeing the buffer.
+ *
+ * The _mali_system_info structure will be written to the start of this buffer,
+ * and the core_info and mem_info lists will be written to locations inside
+ * the buffer, and will be suitably aligned.
+ *
+ * Under OS implementations of the U/K interface we need to pack/unpack
+ * pointers across the user/kernel boundary. This has required that we malloc()
+ * an intermediate buffer inside the kernel-side U/K interface, and free it
+ * before returning to user-side. To avoid modifying common code, we do the
+ * following pseudo-code, which we shall call 'pointer switching':
+ *
+ * @code
+ * {
+ * Copy_From_User(kargs, args, ... );
+ * void __user * local_ptr = kargs->system_info;
+ * kargs->system_info = _mali_osk_malloc( ... );
+ * _mali_ukk_get_system_info( kargs );
+ * Copy_To_User( local_ptr, kargs->system_info, ... );
+ * _mali_osk_free( kargs->system_info );
+ * }
+ * @endcode
+ * @note The user-side's args->system_info members was unmodified here.
+ *
+ * However, the current implementation requires an extra ukk_private word so that the common code can work out
+ * how to patch pointers to user-mode for an OS's U/K implementation, this should be set to the user-space
+ * destination address for pointer-patching to occur. When NULL, it is unused, an no pointer-patching occurs in the
+ * common code.
+ *
+ * @param args see _mali_uk_get_system_info_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args );
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map a block of memory into the current user process
+ *
+ * Allocates a minimum of minimum_size_requested bytes of MALI memory and maps it into the current
+ * process space. The number of bytes allocated is returned in args->block_size.
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_get_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/** @brief Unmap a block of memory from the current user process
+ *
+ * Frees allocated MALI memory and unmaps it from the current process space. The previously allocated memory
+ * is indicated by the cookie as returned by _mali_ukk_get_big_block().
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_free_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode. In Mali-nonMMU mode, the function is callable
+ * from the kernel side, and is used to implement _mali_ukk_get_big_block() in this case.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * This means that in the first (nonMMU) case, the caller must manage the physical address allocations. The caller
+ * in this case is _mali_ukk_get_big_block(), which does indeed manage the Mali physical address ranges.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a
+ * pointer to the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_pp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Abort any PP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ *
+ * @param args see _malu_uk_pp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a pointer to
+ * the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_gp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @brief Abort any GP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ *
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ * @param args see _mali_uk_gp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_gp */
+
+#if USING_MALI_PMM
+/** @addtogroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/* @brief Power Management Module event message
+ *
+ * @note The event message can fail to be sent due to OOM but this is
+ * stored in the PMM state machine to be handled later
+ *
+ * @param args see _mali_uk_pmm_event_message_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args );
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c
new file mode 100644
index 00000000000..eeb29589ddc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.c
+ * Implementation of the power management module for the kernel device driver
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_platform.h"
+
+/* Internal PMM subsystem state */
+static _mali_pmm_internal_state_t *pmm_state = NULL;
+/* Mali kernel subsystem id */
+static mali_kernel_subsystem_identifier mali_subsystem_pmm_id = -1;
+
+#define GET_PMM_STATE_PTR (pmm_state)
+
+/* Internal functions */
+static _mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource);
+static void pmm_event_process( void );
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data);
+void malipmm_irq_bhandler(void *data);
+
+/** @brief Start the PMM subsystem
+ *
+ * @param id Subsystem id to uniquely identify this subsystem
+ * @return _MALI_OSK_ERR_OK if the system started successfully, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id );
+
+/** @brief Perform post start up of the PMM subsystem
+ *
+ * Post start up includes initializing the current policy, now that the system is
+ * completely started - to stop policies turning off hardware during the start up
+ *
+ * @param id the unique subsystem id
+ * @return _MALI_OSK_ERR_OK if the post startup was successful, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id );
+
+/** @brief Terminate the PMM subsystem
+ *
+ * @param id the unique subsystem id
+ */
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id );
+
+#if MALI_STATE_TRACKING
+ void malipmm_subsystem_dump_state( void );
+#endif
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+struct mali_kernel_subsystem mali_subsystem_pmm=
+{
+ malipmm_kernel_subsystem_start, /* startup */
+ malipmm_kernel_subsystem_terminate, /* shutdown */
+ malipmm_kernel_load_complete, /* loaded all subsystems */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+#if MALI_STATE_TRACKING
+ malipmm_subsystem_dump_state, /* dump_state */
+#endif
+};
+
+#if PMM_OS_TEST
+
+u32 power_test_event = 0;
+mali_bool power_test_flag = MALI_FALSE;
+_mali_osk_timer_t *power_test_timer = NULL;
+
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS UP DONE\n"));
+}
+
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS DOWN DONE\n"));
+}
+
+/**
+ * Symbian OS Power Up call to the driver
+ */
+void power_test_callback( void *arg )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ power_test_flag = MALI_TRUE;
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+void power_test_start()
+{
+ power_test_timer = _mali_osk_timer_init();
+ _mali_osk_timer_setcallback( power_test_timer, power_test_callback, NULL );
+
+ /* First event is power down */
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ _mali_osk_timer_add( power_test_timer, 10000 );
+}
+
+mali_bool power_test_check()
+{
+ if( power_test_flag )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ 0,
+ 1 };
+ event.id = power_test_event;
+
+ power_test_flag = MALI_FALSE;
+
+ /* Send event */
+ _mali_ukk_pmm_event_message( &event );
+
+ /* Switch to next event to test */
+ if( power_test_event == MALI_PMM_EVENT_OS_POWER_DOWN )
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_UP;
+ }
+ else
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ }
+ _mali_osk_timer_add( power_test_timer, 5000 );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void power_test_end()
+{
+ _mali_osk_timer_del( power_test_timer );
+ _mali_osk_timer_term( power_test_timer );
+ power_test_timer = NULL;
+}
+
+#endif
+
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ _mali_osk_notification_t *msg;
+ mali_pmm_message_t *event;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: sending message\n") );
+
+#if MALI_PMM_TRACE && MALI_PMM_TRACE_SENT_EVENTS
+ _mali_pmm_trace_event_message( args, MALI_FALSE );
+#endif
+
+ msg = _mali_osk_notification_create( MALI_PMM_NOTIFICATION_TYPE, sizeof( mali_pmm_message_t ) );
+
+ if( msg )
+ {
+ event = (mali_pmm_message_t *)msg->result_buffer;
+ event->id = args->id;
+ event->ts = _mali_osk_time_tickcount();
+ event->data = args->data;
+
+ _mali_osk_atomic_inc( &(pmm->messages_queued) );
+
+ if( args->id > MALI_PMM_EVENT_INTERNALS )
+ {
+ /* Internal PMM message */
+ _mali_osk_notification_queue_send( pmm->iqueue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_sent++;
+ #endif
+ }
+ else
+ {
+ /* Real event */
+ _mali_osk_notification_queue_send( pmm->queue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_sent++;
+ #endif
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Could not send message %d", args->id) );
+ /* Make note of this OOM - which has caused a missed event */
+ pmm->missed++;
+ }
+
+ /* Schedule time to look at the event or the fact we couldn't create an event */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+mali_pmm_state _mali_pmm_state( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm && (mali_subsystem_pmm_id != -1) )
+ {
+ return pmm->state;
+ }
+
+ /* No working subsystem yet */
+ return MALI_PMM_STATE_UNAVAILABLE;
+}
+
+
+mali_pmm_core_mask _mali_pmm_cores_list( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_registered;
+}
+
+mali_pmm_core_mask _mali_pmm_cores_powered( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_powered;
+}
+
+
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+/* TBD - When this is not a stub... include tracing...
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( old, newpolicy );
+#endif
+*/
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy )
+{
+ if( policy )
+ {
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm )
+ {
+ *policy = pmm->policy;
+ MALI_SUCCESS;
+ }
+ else
+ {
+ *policy = MALI_PMM_POLICY_NONE;
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+ }
+
+ /* No return argument */
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+}
+
+#if MALI_PMM_TRACE
+
+/* Event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events[] = {
+ "OS_POWER_UP",
+ "OS_POWER_DOWN",
+ "JOB_SCHEDULED",
+ "JOB_QUEUED",
+ "JOB_FINISHED",
+ "TIMEOUT",
+};
+
+/* UK event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_uk[] = {
+ "UKS",
+ "UK_EXAMPLE",
+};
+
+/* Internal event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_internal[] = {
+ "INTERNALS",
+ "INTERNAL_POWER_UP_ACK",
+ "INTERNAL_POWER_DOWN_ACK",
+};
+
+/* State names - order must match mali_pmm_state enum */
+static char *pmm_trace_state[] = {
+ "UNAVAILABLE",
+ "SYSTEM ON",
+ "SYSTEM OFF",
+ "SYSTEM TRANSITION",
+};
+
+/* Policy names - order must match mali_pmm_policy enum */
+static char *pmm_trace_policy[] = {
+ "NONE",
+ "ALWAYS ON",
+ "JOB CONTROL",
+};
+
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate )
+{
+ const char *dname;
+ const char *cname;
+ const char *ename;
+
+ if( old != newstate )
+ {
+ if( newstate == 0 )
+ {
+ dname = "NO cores";
+ }
+ else
+ {
+ dname = pmm_trace_get_core_name( newstate );
+ }
+
+ /* These state checks only work if the assumption that only cores can be
+ * turned on or turned off in seperate actions is true. If core power states can
+ * be toggled (some one, some off) at the same time, this check does not work
+ */
+ if( old > newstate )
+ {
+ /* Cores have turned off */
+ cname = pmm_trace_get_core_name( old - newstate );
+ ename = "OFF";
+ }
+ else
+ {
+ /* Cores have turned on */
+ cname = pmm_trace_get_core_name( newstate - old );
+ ename = "ON";
+ }
+ MALI_PRINT( ("PMM Trace: Hardware %s ON, %s just turned %s. { 0x%08x -> 0x%08x }", dname, cname, ename, old, newstate) );
+ }
+}
+
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate )
+{
+ if( old != newstate )
+ {
+ MALI_PRINT( ("PMM Trace: State changed from %s to %s", pmm_trace_state[old], pmm_trace_state[newstate]) );
+ }
+}
+
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy )
+{
+ if( old != newpolicy )
+ {
+ MALI_PRINT( ("PMM Trace: Policy changed from %s to %s", pmm_trace_policy[old], pmm_trace_policy[newpolicy]) );
+ }
+}
+
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received )
+{
+ const char *ename;
+ const char *dname;
+ const char *tname;
+ const char *format = "PMM Trace: Event %s { (%d) %s, %d ticks, (0x%x) %s }";
+
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ tname = (received) ? "received" : "sent";
+
+ if( event->id >= MALI_PMM_EVENT_INTERNALS )
+ {
+ ename = pmm_trace_events_internal[((int)event->id) - MALI_PMM_EVENT_INTERNALS];
+ }
+ else if( event->id >= MALI_PMM_EVENT_UKS )
+ {
+ ename = pmm_trace_events_uk[((int)event->id) - MALI_PMM_EVENT_UKS];
+ }
+ else
+ {
+ ename = pmm_trace_events[event->id];
+ }
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ dname = "os event";
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ dname = pmm_trace_get_core_name( (mali_pmm_core_mask)event->data );
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ dname = "timeout start";
+ /* Print data with a different format */
+ format = "PMM Trace: Event %s { (%d) %s, %d ticks, %d ticks %s }";
+ break;
+ default:
+ dname = "unknown data";
+ }
+
+ MALI_PRINT( (format, tname, (u32)event->id, ename, event->ts, (u32)event->data, dname) );
+}
+
+#endif /* MALI_PMM_TRACE */
+
+
+/****************** Mali Kernel API *****************/
+
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id )
+{
+ mali_subsystem_pmm_id = id;
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(PMU, malipmm_create));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource)
+{
+ /* Create PMM state memory */
+ MALI_DEBUG_ASSERT( pmm_state == NULL );
+ pmm_state = (_mali_pmm_internal_state_t *) _mali_osk_malloc(sizeof(*pmm_state));
+ MALI_CHECK_NON_NULL( pmm_state, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmm_state, 0, sizeof(*pmm_state));
+
+ /* Set up the initial PMM state */
+ pmm_state->waiting = 0;
+ pmm_state->status = MALI_PMM_STATUS_IDLE;
+ pmm_state->state = MALI_PMM_STATE_UNAVAILABLE; /* Until a core registers */
+
+ /* Set up policy via compile time option for the moment */
+#if MALI_PMM_ALWAYS_ON
+ pmm_state->policy = MALI_PMM_POLICY_ALWAYS_ON;
+#else
+ pmm_state->policy = MALI_PMM_POLICY_JOB_CONTROL;
+#endif
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( MALI_PMM_POLICY_NONE, pmm_state->policy );
+#endif
+
+ /* Set up assumes all values are initialized to NULL or MALI_FALSE, so
+ * we can exit halfway through set up and perform clean up
+ */
+#if !MALI_PMM_NO_PMU
+ if( mali_platform_init(resource) != _MALI_OSK_ERR_OK ) goto pmm_fail_cleanup;
+ pmm_state->pmu_initialized = MALI_TRUE;
+#endif
+
+ pmm_state->queue = _mali_osk_notification_queue_init();
+ if( !pmm_state->queue ) goto pmm_fail_cleanup;
+
+ pmm_state->iqueue = _mali_osk_notification_queue_init();
+ if( !pmm_state->iqueue ) goto pmm_fail_cleanup;
+
+ /* We are creating an IRQ handler just for the worker thread it gives us */
+ pmm_state->irq = _mali_osk_irq_init( _MALI_OSK_IRQ_NUMBER_PMM,
+ malipmm_irq_uhandler,
+ malipmm_irq_bhandler,
+ NULL,
+ NULL,
+ (void *)pmm_state, /* PMM state is passed to IRQ */
+ "PMM handler" );
+
+ if( !pmm_state->irq ) goto pmm_fail_cleanup;
+
+ pmm_state->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 75);
+ if( !pmm_state->lock ) goto pmm_fail_cleanup;
+
+ if( _mali_osk_atomic_init( &(pmm_state->messages_queued), 0 ) != _MALI_OSK_ERR_OK )
+ {
+ goto pmm_fail_cleanup;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem created, policy=%d\n", pmm_state->policy) );
+
+ MALI_SUCCESS;
+
+pmm_fail_cleanup:
+ MALI_PRINT_ERROR( ("PMM: subsystem failed to be created\n") );
+ if( pmm_state )
+ {
+ _mali_osk_resource_type_t t = PMU;
+ if( pmm_state->lock ) _mali_osk_lock_term( pmm_state->lock );
+ if( pmm_state->irq ) _mali_osk_irq_term( pmm_state->irq );
+ if( pmm_state->queue ) _mali_osk_notification_queue_term( pmm_state->queue );
+ if( pmm_state->iqueue ) _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if( pmm_state->pmu_initialized ) ( mali_platform_deinit(&t) );
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem loaded, policy initializing\n") );
+
+#if PMM_OS_TEST
+ power_test_start();
+#endif
+
+ /* Initialize the profile now the system has loaded - so that cores are
+ * not turned off during start up
+ */
+ return pmm_policy_init( pmm );
+}
+
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id )
+{
+ /* Check this is the right system */
+ MALI_DEBUG_ASSERT( id == mali_subsystem_pmm_id );
+ MALI_DEBUG_ASSERT_POINTER(pmm_state);
+
+ if( pmm_state )
+ {
+ _mali_osk_resource_type_t t = PMU;
+#if PMM_OS_TEST
+ power_test_end();
+#endif
+ /* Get the lock so we can shutdown */
+ MALI_PMM_LOCK(pmm_state);
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+ pmm_state->status = MALI_PMM_STATUS_OFF;
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+ MALI_PMM_UNLOCK(pmm_state);
+ pmm_policy_term(pmm_state);
+ _mali_osk_irq_term( pmm_state->irq );
+ _mali_osk_notification_queue_term( pmm_state->queue );
+ _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if( pmm_state->pmu_initialized ) mali_platform_deinit(&t);
+ _mali_osk_atomic_term( &(pmm_state->messages_queued) );
+ MALI_PMM_LOCK_TERM(pmm_state);
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem terminated\n") );
+}
+
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core )
+{
+ _mali_osk_errcode_t err;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( pmm == NULL )
+ {
+ /* PMM state has not been created, this is because the PMU resource has not been
+ * created yet.
+ * This probably means that the PMU resource has not been specfied as the first
+ * resource in the config file
+ */
+ MALI_PRINT_ERROR( ("PMM: Cannot register core %s because the PMU resource has not been\n initialized. Please make sure the PMU resource is the first resource in the\n resource configuration.\n",
+ pmm_trace_get_core_name(core)) );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered more than once in PMM */
+ MALI_DEBUG_ASSERT( (pmm->cores_registered & core) == 0 );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core registered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+#if !MALI_PMM_NO_PMU
+ /* Make sure the core is powered up */
+ err = mali_platform_powerup( core );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( _MALI_OSK_ERR_OK == err )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Assume a registered core is now powered up and idle */
+ pmm->cores_registered |= core;
+ pmm->cores_idle |= core;
+ pmm->cores_powered |= core;
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) powering up registered core: (0x%x) %s\n",
+ err, core, pmm_trace_get_core_name(core)) );
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+
+ return err;
+}
+
+void malipmm_core_unregister( mali_pmm_core_id core )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered in PMM */
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, core );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core unregistered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+
+#if !MALI_PMM_NO_PMU
+ /* Turn off the core */
+ if( mali_platform_powerdown( core ) != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error powering down unregistered core: (0x%x) %s\n",
+ core, pmm_trace_get_core_name(core)) );
+ }
+#endif
+
+ /* Remove the core from the system */
+ pmm->cores_registered &= (~core);
+ pmm->cores_idle &= (~core);
+ pmm->cores_powered &= (~core);
+ pmm->cores_pend_down &= (~core);
+ pmm->cores_pend_up &= (~core);
+ pmm->cores_ack_down &= (~core);
+ pmm->cores_ack_up &= (~core);
+
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+void malipmm_core_power_down_okay( mali_pmm_core_id core )
+{
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK,
+ 0 };
+
+ event.data = core;
+
+ _mali_ukk_pmm_event_message( &event );
+}
+
+void malipmm_set_policy_check()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ pmm->check_policy = MALI_TRUE;
+
+ /* To check the policy we need to schedule some work */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data)
+{
+ MALIPMM_DEBUG_PRINT( ("PMM: uhandler - not expected to be used\n") );
+
+ MALI_SUCCESS;
+}
+
+void malipmm_irq_bhandler(void *data)
+{
+ _mali_pmm_internal_state_t *pmm;
+ pmm = (_mali_pmm_internal_state_t *)data;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+#if PMM_OS_TEST
+ if( power_test_check() ) return;
+#endif
+
+ MALI_PMM_LOCK(pmm);
+#ifdef MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Quick out when we are shutting down */
+ if( pmm->status == MALI_PMM_STATUS_OFF )
+ {
+
+ #if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+ #endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ return;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: bhandler - Processing event\n") );
+
+ if( pmm->missed > 0 )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to send %d events", pmm->missed) );
+ pmm_fatal_reset( pmm );
+ }
+
+ if( pmm->check_policy )
+ {
+ pmm->check_policy = MALI_FALSE;
+ pmm_policy_check_policy(pmm);
+ }
+ else
+ {
+ /* Perform event processing */
+ pmm_event_process();
+ if( pmm->fatal_power_err )
+ {
+ /* Try a reset */
+ pmm_fatal_reset( pmm );
+ }
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+
+static void pmm_event_process( void )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ mali_pmm_message_t *event;
+ u32 process_messages;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+
+ /* Max number of messages to process before exiting - as we shouldn't stay
+ * processing the messages for a long time
+ */
+ process_messages = _mali_osk_atomic_read( &(pmm->messages_queued) );
+
+ while( process_messages > 0 )
+ {
+ /* Check internal message queue first */
+ err = _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ if( pmm->status == MALI_PMM_STATUS_IDLE || pmm->status == MALI_PMM_STATUS_OS_WAITING || pmm->status == MALI_PMM_STATUS_DVFS_PAUSE)
+ {
+ if( pmm->waiting > 0 ) pmm->waiting--;
+
+ /* We aren't busy changing state, so look at real events */
+ err = _mali_osk_notification_queue_dequeue( pmm->queue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ pmm->no_events++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - No message to process\n") );
+ /* Nothing to do - so return */
+ return;
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_received++;
+ #endif
+ }
+ }
+ else
+ {
+ /* Waiting for an internal message */
+ pmm->waiting++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - Waiting for internal message, messages queued=%d\n", pmm->waiting) );
+ return;
+ }
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_received++;
+ #endif
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( msg );
+ /* Check the message type matches */
+ MALI_DEBUG_ASSERT( msg->notification_type == MALI_PMM_NOTIFICATION_TYPE );
+
+ event = msg->result_buffer;
+
+ _mali_osk_atomic_dec( &(pmm->messages_queued) );
+ process_messages--;
+
+ #if MALI_PMM_TRACE
+ /* Trace before we process the event in case we have an error */
+ _mali_pmm_trace_event_message( event, MALI_TRUE );
+ #endif
+ err = pmm_policy_process( pmm, event );
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) in policy %d when processing event message with id: %d",
+ err, pmm->policy, event->id) );
+ }
+
+ /* Delete notification */
+ _mali_osk_notification_delete ( msg );
+
+ if( pmm->fatal_power_err )
+ {
+ /* Nothing good has happened - exit */
+ return;
+ }
+
+
+ #if MALI_PMM_TRACE
+ MALI_PRINT( ("PMM Trace: Event processed, msgs (sent/read) = %d/%d, int msgs (sent/read) = %d/%d, no events = %d, waiting = %d\n",
+ pmm->messages_sent, pmm->messages_received, pmm->imessages_sent, pmm->imessages_received, pmm->no_events, pmm->waiting) );
+ #endif
+ }
+
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->waiting > 0 )
+ {
+ /* For events we ignored whilst we were busy, add a new
+ * scheduled time to look at them */
+ _mali_osk_irq_schedulework( pmm->irq );
+ }
+}
+
+#if MALI_STATE_TRACKING
+void malipmm_subsystem_dump_state(void)
+{
+ malipmm_state_dump();
+}
+#endif
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+void malipmm_state_dump()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( !pmm )
+ {
+ MALI_PRINT(("PMM: Null state\n"));
+ }
+ else
+ {
+ MALI_PRINT(("Locks::\nPMM_LOCK_STATUS=%ld",pmm->mali_pmm_lock_acquired));
+ MALI_PRINT(("PMM state:\nPrevious_status=%d\nstatus=%d\nCurrent_event=%d\npolicy=%d\ncheck_policy=%d\nstate=%d\n", pmm->mali_last_pmm_status,pmm->status, pmm->mali_new_event_status, pmm->policy, pmm->check_policy, pmm->state));
+ MALI_PRINT(("PMM cores:\ncores_registered=%d\ncores_powered=%d\ncores_idle=%d\ncores_pend_down=%d\ncores_pend_up=%d\ncores_ack_down=%d\ncores_ack_up=%d\n", pmm->cores_registered, pmm->cores_powered, pmm->cores_idle, pmm->cores_pend_down, pmm->cores_pend_up, pmm->cores_ack_down, pmm->cores_ack_up));
+ MALI_PRINT(("PMM misc:\npmu_init=%d\nmessages_queued=%d\nwaiting=%d\nno_events=%d\nmissed=%d\nfatal_power_err=%d\n", pmm->pmu_initialized, _mali_osk_atomic_read( &(pmm->messages_queued) ), pmm->waiting, pmm->no_events, pmm->missed, pmm->fatal_power_err));
+ }
+}
+#endif
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h
new file mode 100644
index 00000000000..fe7a046cb47
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.h
+ * Defines the power management module for the kernel device driver
+ */
+
+#ifndef __MALI_PMM_H__
+#define __MALI_PMM_H__
+
+/* For mali_pmm_message_data and MALI_PMM_EVENT_UK_* defines */
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @defgroup pmmapi Power Management Module APIs
+ *
+ * @{
+ */
+
+/** OS event tester */
+#define PMM_OS_TEST 0
+
+/** @brief Compile option to turn on/off tracing */
+#define MALI_PMM_TRACE 0
+#define MALI_PMM_TRACE_SENT_EVENTS 0
+
+/** @brief Compile option to switch between always on or job control PMM policy */
+#define MALI_PMM_ALWAYS_ON 0
+
+/** @brief Overrides hardware PMU and uses software simulation instead
+ * @note This even stops intialization of PMU and cores being powered on at start up
+ */
+#define MALI_PMM_NO_PMU 0
+
+/** @brief PMM debug print to control debug message level */
+#define MALIPMM_DEBUG_PRINT(args) \
+ MALI_DEBUG_PRINT(3, args)
+
+
+/** @brief power management event message identifiers.
+ */
+/* These must match up with the pmm_trace_events & pmm_trace_events_internal
+ * arrays
+ */
+typedef enum mali_pmm_event_id
+{
+ MALI_PMM_EVENT_OS_POWER_UP = 0, /**< OS power up event */
+ MALI_PMM_EVENT_OS_POWER_DOWN = 1, /**< OS power down event */
+ MALI_PMM_EVENT_JOB_SCHEDULED = 2, /**< Job scheduled to run event */
+ MALI_PMM_EVENT_JOB_QUEUED = 3, /**< Job queued (but not run) event */
+ MALI_PMM_EVENT_JOB_FINISHED = 4, /**< Job finished event */
+ MALI_PMM_EVENT_TIMEOUT = 5, /**< Time out timer has expired */
+ MALI_PMM_EVENT_DVFS_PAUSE = 6, /**< Mali device pause event */
+ MALI_PMM_EVENT_DVFS_RESUME = 7, /**< Mali device resume event */
+
+ MALI_PMM_EVENT_UKS = 200, /**< Events from the user-side start here */
+ MALI_PMM_EVENT_UK_EXAMPLE = _MALI_PMM_EVENT_UK_EXAMPLE,
+
+ MALI_PMM_EVENT_INTERNALS = 1000,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK = 1001, /**< Internal power up acknowledgement */
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK = 1002, /**< Internal power down acknowledgment */
+} mali_pmm_event_id;
+
+
+/** @brief Use this when the power up/down callbacks do not need any OS data. */
+#define MALI_PMM_NO_OS_DATA 1
+
+
+/* @brief Geometry and pixel processor identifiers for the PMM
+ *
+ * @note these match the ARM Mali 400 PMU hardware definitions, apart from the "SYSTEM"
+ */
+typedef enum mali_pmm_core_id_tag
+{
+ MALI_PMM_CORE_SYSTEM = 0x00000000, /**< All of the Mali hardware */
+ MALI_PMM_CORE_GP = 0x00000001, /**< Mali GP2 */
+ MALI_PMM_CORE_L2 = 0x00000002, /**< Level 2 cache */
+ MALI_PMM_CORE_PP0 = 0x00000004, /**< Mali 200 pixel processor 0 */
+ MALI_PMM_CORE_PP1 = 0x00000008, /**< Mali 200 pixel processor 1 */
+ MALI_PMM_CORE_PP2 = 0x00000010, /**< Mali 200 pixel processor 2 */
+ MALI_PMM_CORE_PP3 = 0x00000020, /**< Mali 200 pixel processor 3 */
+ MALI_PMM_CORE_PP_ALL = 0x0000003C /**< Mali 200 pixel processors 0-3 */
+} mali_pmm_core_id;
+
+/* @brief PMM bitmask of mali_pmm_core_ids
+ */
+typedef u32 mali_pmm_core_mask;
+
+/* @brief PMM event timestamp type
+ */
+typedef u32 mali_pmm_timestamp;
+
+/** @brief power management event message struct
+ */
+typedef struct _mali_pmm_message
+{
+ mali_pmm_event_id id; /**< event id */
+ mali_pmm_message_data data; /**< specific data associated with the event */
+ mali_pmm_timestamp ts; /**< timestamp the event was placed in the event queue */
+} mali_pmm_message_t;
+
+
+
+/** @brief the state of the power management module.
+ */
+/* These must match up with the pmm_trace_state array */
+typedef enum mali_pmm_state_tag
+{
+ MALI_PMM_STATE_UNAVAILABLE = 0, /**< PMM is not available */
+ MALI_PMM_STATE_SYSTEM_ON = 1, /**< All of the Mali hardware is on */
+ MALI_PMM_STATE_SYSTEM_OFF = 2, /**< All of the Mali hardware is off */
+ MALI_PMM_STATE_SYSTEM_TRANSITION = 3 /**< System is changing state */
+} mali_pmm_state;
+
+
+/** @brief a power management policy.
+ */
+/* These must match up with the pmm_trace_policy array */
+typedef enum mali_pmm_policy_tag
+{
+ MALI_PMM_POLICY_NONE = 0, /**< No policy */
+ MALI_PMM_POLICY_ALWAYS_ON = 1, /**< Always on policy */
+ MALI_PMM_POLICY_JOB_CONTROL = 2, /**< Job control policy */
+ MALI_PMM_POLICY_RUNTIME_JOB_CONTROL = 3 /**< Run time power management control policy */
+} mali_pmm_policy;
+
+/** @brief Function to report to the OS when the power down has finished
+ *
+ * @param data The event message data that initiated the power down
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data);
+
+/** @brief Function to report to the OS when the power up has finished
+ *
+ * @param data The event message data that initiated the power up
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data);
+
+/** @brief Function to report that DVFS operation done
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data);
+
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief Function to notify power management events
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id event_id);
+
+#endif
+
+/** @brief Function to report the OS that device is idle
+ *
+ * @note inform the OS that device is idle
+ */
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle( void );
+
+/** @brief Function to report the OS to activate device
+ *
+ * @note inform the os that device needs to be activated
+ */
+void _mali_osk_pmm_dev_activate( void );
+
+/** @brief Queries the current state of the PMM software
+ *
+ * @note the state of the PMM can change after this call has returned
+ *
+ * @return the current PMM state value
+ */
+mali_pmm_state _mali_pmm_state( void );
+
+/** @brief List of cores that are registered with the PMM
+ *
+ * This will return the cores that have been currently registered with the PMM,
+ * which is a bitwise OR of the mali_pmm_core_id_tags. A value of 0x0 means that
+ * there are no cores registered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that have been registered with the PMM
+ */
+mali_pmm_core_mask _mali_pmm_cores_list( void );
+
+/** @brief List of cores that are powered up in the PMM
+ *
+ * This will return the subset of the cores that can be listed using mali_pmm_cores_
+ * list, that have power. It is a bitwise OR of the mali_pmm_core_id_tags. A value of
+ * 0x0 means that none of the cores registered are powered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that are powered up
+ */
+mali_pmm_core_mask _mali_pmm_cores_powered( void );
+
+
+/** @brief List of power management policies that are supported by the PMM
+ *
+ * Given an empty array of policies - policy_list - which contains the number
+ * of entries as specified by - policy_list_size, this function will populate
+ * the list with the available policies. If the policy_list is too small for
+ * all the policies then only policy_list_size entries will be returned. If the
+ * policy_list is bigger than the number of available policies then, the extra
+ * entries will be set to MALI_PMM_POLICY_NONE.
+ * The function will also update available_policies with the number of policies
+ * that are available, even if it exceeds the policy_list_size.
+ * The function will succeed if all policies could be returned, else it will
+ * fail if none or only a subset of policies could be returned.
+ * The function will also fail if no policy_list is supplied, though
+ * available_policies is optional.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy_list_size is the number of policies that can be returned in
+ * the policy_list argument
+ * @param policy_list is an array of policies that should be populated with
+ * the list of policies that are supported by the PMM
+ * @param policies_available optional argument, if non-NULL will be set to the
+ * number of policies available
+ * @return _MALI_OSK_ERR_OK if the policies could be listed, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available );
+
+/** @brief Set the power management policy in the PMM
+ *
+ * Given a valid supported policy, this function will change the PMM to use
+ * this new policy
+ * The function will fail if the policy given is invalid or unsupported.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy the new policy to be set
+ * @return _MALI_OSK_ERR_OK if the policy could be set, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy );
+
+/** @brief Get the current power management policy in the PMM
+ *
+ * Given a pointer to a policy data type, this function will return the current
+ * policy that is in effect for the PMM. This maybe out of date if there is a
+ * pending set policy call that has not been serviced.
+ * The function will fail if the policy given is NULL.
+ *
+ * @note the policy of the PMM can change after this call has returned
+ *
+ * @param policy a pointer to a policy that can be updated to the current
+ * policy
+ * @return _MALI_OSK_ERR_OK if the policy could be returned, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy );
+
+#if MALI_PMM_TRACE
+
+/** @brief Indicates when a hardware state change occurs in the PMM
+ *
+ * @param old a mask of the cores indicating the previous state of the cores
+ * @param newstate a mask of the cores indicating the new current state of the cores
+ */
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate );
+
+/** @brief Indicates when a state change occurs in the PMM
+ *
+ * @param old the previous state for the PMM
+ * @param newstate the new current state of the PMM
+ */
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate );
+
+/** @brief Indicates when a policy change occurs in the PMM
+ *
+ * @param old the previous policy for the PMM
+ * @param newpolicy the new current policy of the PMM
+ */
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy );
+
+/** @brief Records when an event message is read by the event system
+ *
+ * @param event the message details
+ * @param received MALI_TRUE when the message is received by the PMM, else it is being sent
+ */
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received );
+
+#endif /* MALI_PMM_TRACE */
+
+/** @brief Dumps the current state of OS PMM thread
+ */
+#if MALI_STATE_TRACKING
+void mali_pmm_dump_os_thread_state( void );
+#endif /* MALI_STATE_TRACKING */
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+/** @brief Dumps the current state of the PMM
+ */
+void malipmm_state_dump( void );
+#endif
+
+/** @} */ /* end group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c
new file mode 100644
index 00000000000..327e8b4c853
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.c
+ * Implementation of the common routines for power management module
+ * policies
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+
+#include "mali_pmm_policy_alwayson.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+/* Call back function for timer expiration */
+static void pmm_policy_timer_callback( void *arg );
+
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pptimer, 0, sizeof(*pptimer));
+
+ pptimer->timer = _mali_osk_timer_init();
+ if( pptimer->timer )
+ {
+ _mali_osk_timer_setcallback( pptimer->timer, pmm_policy_timer_callback, (void *)pptimer );
+ pptimer->timeout = timeout;
+ pptimer->event_id = id;
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void pmm_policy_timer_callback( void *arg )
+{
+ _pmm_policy_timer_t *pptimer = (_pmm_policy_timer_t *)arg;
+
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT( pptimer->set );
+
+ /* Set timer expired and flag there is a policy to check */
+ pptimer->expired = MALI_TRUE;
+ malipmm_set_policy_check();
+}
+
+
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ _mali_osk_timer_del( pptimer->timer );
+ _mali_osk_timer_term( pptimer->timer );
+ pptimer->timer = NULL;
+}
+
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( !(pptimer->set) )
+ {
+ pptimer->set = MALI_TRUE;
+ pptimer->expired = MALI_FALSE;
+ pptimer->start = _mali_osk_time_tickcount();
+ _mali_osk_timer_add( pptimer->timer, pptimer->timeout );
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( pptimer->set )
+ {
+ _mali_osk_timer_del( pptimer->timer );
+ pptimer->set = MALI_FALSE;
+ pptimer->expired = MALI_FALSE;
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ if( pptimer->expired )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_TIMEOUT, /* Assume timeout id, but set it below */
+ 0 };
+
+ event.id = pptimer->event_id;
+ event.data = (mali_pmm_message_data)pptimer->start;
+
+ /* Don't need to do any other notification with this timer */
+ pptimer->expired = MALI_FALSE;
+ /* Unset timer so it is free to be set again */
+ pptimer->set = MALI_FALSE;
+
+ _mali_ukk_pmm_event_message( &event );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start )
+{
+ return (_mali_osk_time_after( other_start, timer_start ) == 0);
+}
+
+
+_mali_osk_errcode_t pmm_policy_init(_mali_pmm_internal_state_t *pmm)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_init_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_init_job_control(pmm);
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+void pmm_policy_term(_mali_pmm_internal_state_t *pmm)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ pmm_policy_term_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_term_job_control();
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ MALI_PRINT_ERROR( ("PMM: Invalid policy terminated %d\n", pmm->policy) );
+ }
+}
+
+
+_mali_osk_errcode_t pmm_policy_process(_mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_process_always_on( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_process_job_control( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_check_job_control();
+ }
+ break;
+
+ default:
+ /* Nothing needs to be done */
+ break;
+ }
+}
+
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h
new file mode 100644
index 00000000000..83cb7f29a92
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_H__
+#define __MALI_PMM_POLICY_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Generic timer for use with policies
+ */
+typedef struct _pmm_policy_timer
+{
+ u32 timeout; /**< Timeout for this timer in ticks */
+ mali_pmm_event_id event_id; /**< Event id that will be raised when timer expires */
+ _mali_osk_timer_t *timer; /**< Timer */
+ mali_bool set; /**< Timer set */
+ mali_bool expired; /**< Timer expired - event needs to be raised */
+ u32 start; /**< Timer start ticks */
+} _pmm_policy_timer_t;
+
+/** @brief Policy timer initialization
+ *
+ * This will create a timer for use in policies, but won't start it
+ *
+ * @param pptimer An empty timer structure to be initialized
+ * @param timeout Timeout in ticks for the timer
+ * @param id Event id that will be raised on timeout
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id );
+
+/** @brief Policy timer termination
+ *
+ * This will clean up a timer that was previously used in policies, it
+ * will also stop it if started
+ *
+ * @param pptimer An initialized timer structure to be terminated
+ */
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer start
+ *
+ * This will start a previously created timer for use in policies
+ * When the timer expires after the initialized timeout it will raise
+ * a PMM event of the event id given on initialization
+ * As data for the event it will pass the start time of the timer
+ *
+ * @param pptimer A previously initialized policy timer
+ * @return MALI_TRUE if the timer was started, MALI_FALSE if it is already started
+ */
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This will stop a previously created timer for use in policies
+ *
+ * @param pptimer A previously started policy timer
+ * @return MALI_TRUE if the timer was stopped, MALI_FALSE if it is already stopped
+ */
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This raise an event for an expired timer
+ *
+ * @param pptimer An expired policy timer
+ * @return MALI_TRUE if an event was raised, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer valid checker
+ *
+ * This will check that a timer was started after a given time
+ *
+ * @param timer_start Time the timer was started
+ * @param other_start Time when another event or action occurred
+ * @return MALI_TRUE if the timer was started after the other time, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start );
+
+
+/** @brief Common policy initialization
+ *
+ * This will initialize the current policy
+ *
+ * @note Any previously initialized policy should be terminated first
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy termination
+ *
+ * This will terminate the current policy.
+ * @note This can be called when a policy has not been initialized
+ */
+void pmm_policy_term( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy state changer
+ *
+ * Given the next available event message, this routine passes it to
+ * the current policy for processing
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+
+/** @brief Common policy checker
+ *
+ * If a policy timer fires then this function will be called to
+ * allow the policy to take the correct action
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm );
+
+/** @} */ /* End group pmmapi_policy */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c
new file mode 100644
index 00000000000..643bb04553b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.c
+ * Implementation of the power management module policy - always on
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_alwayson.h"
+
+_mali_osk_errcode_t pmm_policy_init_always_on(void)
+{
+ /* Nothing to set up */
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_always_on(void)
+{
+ /* Nothing to tear down */
+}
+
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* We aren't going to do anything, but signal so we don't block the OS
+ * NOTE: This may adversely affect any jobs Mali is currently running
+ */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Nothing to do */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ /* Nothing to do - we are always on */
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ default:
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h
new file mode 100644
index 00000000000..a158b09f610
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.h
+ * Defines the power management module policy for always on
+ */
+
+#ifndef __MALI_PMM_POLICY_ALWAYSON_H__
+#define __MALI_PMM_POLICY_ALWAYSON_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Always on policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_always_on(void);
+
+/** @brief Always on policy termination
+ */
+void pmm_policy_term_always_on(void);
+
+/** @brief Always on policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Always on policy will ignore all events and keep the Mali cores on
+ * all the time
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @} */ /* End group pmmapi_policies */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_ALWAYSON_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c
new file mode 100644
index 00000000000..8450bd722f4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_jobcontrol.c
+ * Implementation of the power management module policy - job control
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+typedef struct _pmm_policy_data_job_control
+{
+ _pmm_policy_timer_t latency; /**< Latency timeout timer for all cores */
+ u32 core_active_start; /**< Last time a core was set to active */
+ u32 timeout; /**< Timeout in ticks for latency timer */
+} _pmm_policy_data_job_control_t;
+
+
+/* @ brief Local data for this policy
+ */
+static _pmm_policy_data_job_control_t *data_job_control = NULL;
+
+/* @brief Set up the timeout if it hasn't already been set and if there are active cores */
+static void job_control_timeout_setup( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Do we have an inactivity time out and some powered cores? */
+ if( pptimer->timeout > 0 && pmm->cores_powered != 0 )
+ {
+ /* Is the system idle and all the powered cores are idle? */
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->cores_idle == pmm->cores_powered )
+ {
+ if( pmm_policy_timer_start(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Setting in-activity latency timer\n") );
+ }
+ }
+ else
+ {
+ /* We are not idle so there is no need for an inactivity timer
+ */
+ if( pmm_policy_timer_stop(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Removing in-activity latency timer\n") );
+ }
+ }
+ }
+}
+
+/* @brief Check the validity of the timeout - and if there is one set */
+static mali_bool job_control_timeout_valid( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer, u32 timer_start )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Not a valid timer! */
+ if( pptimer->timeout == 0 ) return MALI_FALSE;
+
+ /* Are some cores powered and are they all idle? */
+ if( (pmm->cores_powered != 0) && (pmm->cores_idle == pmm->cores_powered) )
+ {
+ /* Has latency timeout started after the last core was active? */
+ if( pmm_policy_timer_valid( timer_start, data_job_control->core_active_start ) )
+ {
+ return MALI_TRUE;
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - out of date\n") );
+ }
+ }
+ else
+ {
+ if( pmm->cores_powered == 0 )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores already off\n") );
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores active\n") );
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+_mali_osk_errcode_t pmm_policy_init_job_control( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER( pmm );
+ MALI_DEBUG_ASSERT( data_job_control == NULL );
+
+ data_job_control = (_pmm_policy_data_job_control_t *) _mali_osk_malloc(sizeof(*data_job_control));
+ MALI_CHECK_NON_NULL( data_job_control, _MALI_OSK_ERR_NOMEM );
+
+ data_job_control->core_active_start = _mali_osk_time_tickcount();
+ data_job_control->timeout = MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT;
+
+ err = pmm_policy_timer_init( &data_job_control->latency, data_job_control->timeout, MALI_PMM_EVENT_TIMEOUT );
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ return err;
+ }
+
+ /* Start the latency timeout */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_job_control(void)
+{
+ if( data_job_control != NULL )
+ {
+ pmm_policy_timer_term( &data_job_control->latency );
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ }
+}
+
+static void pmm_policy_job_control_job_queued( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+
+ /* Make sure that all cores are powered in this
+ * simple policy
+ */
+ cores = pmm->cores_registered;
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until finished */
+ pmm->status = MALI_PMM_STATUS_POLICY_POWER_UP;
+ }
+ }
+}
+
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process start - status=%d\n", pmm->status) );
+
+ /* Mainly the data is the cores */
+ cores = pmm_cores_from_event_data( pmm, event );
+
+#if MALI_STATE_TRACKING
+ pmm->mali_last_pmm_status = pmm->status;
+#endif /* MALI_STATE_TRACKING */
+
+ switch( pmm->status )
+ {
+ /**************** IDLE ****************/
+ case MALI_PMM_STATUS_IDLE:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Not expected in this state */
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+
+ /* Update idle cores to indicate active - remove these! */
+ pmm_cores_set_active( pmm, cores );
+ /* Remember when this happened */
+ data_job_control->core_active_start = event->ts;
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_SCHEDULED);
+#endif
+
+ /*** FALL THROUGH to QUEUED to check POWER UP ***/
+
+ case MALI_PMM_EVENT_JOB_QUEUED:
+
+ pmm_policy_job_control_job_queued( pmm );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_QUEUED);
+#endif
+ break;
+
+ case MALI_PMM_EVENT_DVFS_PAUSE:
+
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if ( cores_subset != 0 )
+ {
+ if ( !pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 1;
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ pmm_save_os_event_data( pmm, event->data );
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done(0);
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+
+ /* Need to power down all cores even if we need to wait for them */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering down */
+ if( !pmm_invoke_power_down( pmm ) )
+ {
+ /* We need to wait until they are idle */
+
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ /* All cores now down - respond to OS power event */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+
+ /* Update idle cores - add these! */
+ pmm_cores_set_idle( pmm, cores );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_FINISHED);
+#endif
+ if( data_job_control->timeout > 0 )
+ {
+ /* Wait for time out to fire */
+ break;
+ }
+ /* For job control policy - turn off all cores */
+ cores = pmm->cores_powered;
+
+ /*** FALL THROUGH to TIMEOUT TEST as NO TIMEOUT ***/
+
+ case MALI_PMM_EVENT_TIMEOUT:
+
+ /* Main job control policy - turn off cores after inactivity */
+ if( job_control_timeout_valid( pmm, &data_job_control->latency, (u32)event->data ) )
+ {
+ /* Valid timeout of inactivity - so find out if we can power down
+ * immedately - if we can't then this means the cores are still in fact
+ * active
+ */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_TRUE );
+ if( cores_subset != 0 )
+ {
+ /* Check if we can really power down, if not then we are not
+ * really in-active
+ */
+ if( !pmm_invoke_power_down( pmm ) )
+ {
+ pmm_power_down_cancel( pmm );
+ }
+ }
+ /* else there are no cores powered up! */
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_TIMEOUT);
+#endif
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /******************DVFS PAUSE**************/
+ case MALI_PMM_STATUS_DVFS_PAUSE:
+ switch ( event->id )
+ {
+ case MALI_PMM_EVENT_DVFS_RESUME:
+
+ if ( pmm->cores_powered != 0 )
+ {
+ pmm->cores_ack_down =0;
+ pmm_power_down_cancel( pmm );
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ else
+ {
+ pmm_policy_job_control_job_queued( pmm );
+ }
+ _mali_osk_pmm_dvfs_operation_done( 0 );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ if ( pmm->cores_powered != 0 )
+ {
+ if ( pmm_invoke_power_down( pmm ) )
+ {
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ }
+ }
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ default:
+ break;
+ }
+ break;
+
+ /**************** POWER UP ****************/
+ case MALI_PMM_STATUS_OS_POWER_UP:
+ case MALI_PMM_STATUS_POLICY_POWER_UP:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ /* Make sure cores powered off equal what we expect */
+ MALI_DEBUG_ASSERT( cores == pmm->cores_pend_up );
+ pmm_cores_set_up_ack( pmm, cores );
+
+ if( pmm_invoke_power_up( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /**************** POWER DOWN ****************/
+ case MALI_PMM_STATUS_OS_POWER_DOWN:
+ case MALI_PMM_STATUS_POLICY_POWER_DOWN:
+ switch( event->id )
+ {
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+
+ pmm_cores_set_down_ack( pmm, cores );
+
+ if ( pmm->is_dvfs_active == 1 )
+ {
+ if( pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 0;
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ break;
+ }
+
+ /* Now check if we can power down */
+ if( pmm_invoke_power_down( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ case MALI_PMM_STATUS_OS_WAITING:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until power up complete */
+ pmm->status = MALI_PMM_STATUS_OS_POWER_UP;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ /* All cores now up - respond to OS power up event */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ default:
+ /* All other messages are ignored in this state */
+ break;
+ }
+ break;
+
+ default:
+ /* Unexpected state */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Set in-activity latency timer - if required */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ /* Update the PMM state */
+ pmm_update_system_state( pmm );
+#if MALI_STATE_TRACKING
+ pmm->mali_new_event_status = event->id;
+#endif /* MALI_STATE_TRACKING */
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process end - status=%d and event=%d\n", pmm->status,event->id) );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_check_job_control()
+{
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ /* Latency timer must have expired raise the event */
+ pmm_policy_timer_raise_event(&data_job_control->latency);
+}
+
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h
new file mode 100644
index 00000000000..455234b80bc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_JOBCONTROL_H__
+#define __MALI_PMM_POLICY_JOBCONTROL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief The jobcontrol policy inactivity latency timeout (in ticks)
+ * before the hardware is switched off
+ *
+ * @note Setting this low whilst tracing or producing debug output can
+ * cause alot of timeouts to fire which can affect the PMM behaviour
+ */
+#define MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT 50
+
+/** @brief Job control policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_job_control(_mali_pmm_internal_state_t *pmm);
+
+/** @brief Job control policy termination
+ */
+void pmm_policy_term_job_control(void);
+
+/** @brief Job control policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Job control policy depends on events from the Mali cores, and will
+ * power down all cores after an inactivity latency timeout. It will
+ * power the cores back on again when a job is scheduled to run.
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Job control policy checker
+ *
+ * The latency timer has fired and we need to raise the correct event to
+ * handle it
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_job_control(void);
+
+/** @} */ /* End group pmmapi_policy */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_JOBCONTROL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c
new file mode 100644
index 00000000000..a0ac1c7790c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_state.c
+ * Implementation of the power management module internal state
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_system.h"
+
+#include "mali_kernel_core.h"
+#include "mali_platform.h"
+
+#define SIZEOF_CORES_LIST 6
+
+/* NOTE: L2 *MUST* be first on the list so that it
+ * is correctly powered on first and powered off last
+ */
+static mali_pmm_core_id cores_list[] = { MALI_PMM_CORE_L2,
+ MALI_PMM_CORE_GP,
+ MALI_PMM_CORE_PP0,
+ MALI_PMM_CORE_PP1,
+ MALI_PMM_CORE_PP2,
+ MALI_PMM_CORE_PP3 };
+
+
+
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_state state;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm->cores_registered == 0 )
+ {
+ state = MALI_PMM_STATE_UNAVAILABLE;
+ }
+ else if( pmm->cores_powered == 0 )
+ {
+ state = MALI_PMM_STATE_SYSTEM_OFF;
+ }
+ else if( pmm->cores_powered == pmm->cores_registered )
+ {
+ state = MALI_PMM_STATE_SYSTEM_ON;
+ }
+ else
+ {
+ /* Some other state where not everything is on or off */
+ state = MALI_PMM_STATE_SYSTEM_TRANSITION;
+ }
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_state_change( pmm->state, state );
+#endif
+ pmm->state = state;
+}
+
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* All cores - the system */
+ cores = pmm->cores_registered;
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Currently the main event data is only the cores
+ * for these messages
+ */
+ cores = (mali_pmm_core_mask)event->data;
+ if( cores == MALI_PMM_CORE_SYSTEM )
+ {
+ cores = pmm->cores_registered;
+ }
+ else if( cores == MALI_PMM_CORE_PP_ALL )
+ {
+ /* Get the subset of registered PP cores */
+ cores = (pmm->cores_registered & MALI_PMM_CORE_PP_ALL);
+ }
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+ break;
+
+ default:
+ /* Assume timeout messages - report cores still powered */
+ cores = pmm->cores_powered;
+ break;
+ }
+
+ return cores;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power down when asked for power up */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ cores_subset = (~(pmm->cores_powered) & cores);
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ pmm->cores_pend_up = cores_subset;
+ }
+
+ return cores_subset;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only )
+{
+ mali_pmm_core_mask cores_subset;
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power up when asked for power down */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ cores_subset = (pmm->cores_powered & cores);
+ if( cores_subset != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *ppowered = &(pmm->cores_powered);
+
+ /* There are some cores that need powering up, but we may
+ * need to wait until they are idle
+ */
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & cores_subset) != 0 )
+ {
+ /* Core is to be powered down */
+ pmm->cores_pend_down |= cores_list[n];
+
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * down, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Signal the core to power down
+ * If it is busy (not idle) it will set a pending power down flag
+ * (as long as we don't want to only immediately power down).
+ * If it isn't busy it will move out of the idle queue right
+ * away
+ */
+ err = mali_core_signal_power_down( cores_list[n], immediate_only );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Re-read cores_subset in case it has changed */
+ cores_subset = (*ppowered & cores);
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+ /* We moved an idle core to the power down queue
+ * which means it is now acknowledged (if it is still
+ * registered)
+ */
+ pmm->cores_ack_down |= (cores_list[n] & cores_subset);
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT( err == _MALI_OSK_ERR_BUSY ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*ppowered & cores_list[n]) == 0) );
+ /* If we didn't move a core - it must be active, so
+ * leave it pending, so we get an acknowledgement (when
+ * not in immediate only mode)
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ }
+
+ return cores_subset;
+}
+
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm )
+{
+ int n;
+ mali_pmm_core_mask pd, ad;
+ _mali_osk_errcode_t err;
+ volatile mali_pmm_core_mask *pregistered;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Cancelling power down\n") );
+
+ pd = pmm->cores_pend_down;
+ ad = pmm->cores_ack_down;
+ /* Clear the pending cores so that they don't move to the off
+ * queue if they haven't already
+ */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+ pregistered = &(pmm->cores_registered);
+
+ /* Power up all the pending power down cores - just so
+ * we make sure the system is in a known state, as a
+ * pending core might have sent an acknowledged message
+ * which hasn't been read yet.
+ */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & pd) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following power up function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* As we are cancelling - only move the cores back to the queue -
+ * no reset needed
+ */
+ err = mali_core_signal_power_up( cores_list[n], MALI_TRUE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Update pending list with the current registered cores */
+ pd &= (*pregistered);
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_BUSY &&
+ ((cores_list[n] & ad) == 0)) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ /* If we didn't power up a core - it must be active and
+ * hasn't actually tried to power down - this is expected
+ * for cores that haven't acknowledged
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ /* Only used in debug builds */
+ MALI_IGNORE(ad);
+}
+
+
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_down == pmm->cores_ack_down ? MALI_TRUE : MALI_FALSE );
+}
+
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power down during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down != 0 );
+ /* Check that cores are not pending power up during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ if( !pmm_power_down_okay( pmm ) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: Waiting for cores to go idle for power off - 0x%08x / 0x%08x\n",
+ pmm->cores_pend_down, pmm->cores_ack_down) );
+ return MALI_FALSE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ err = mali_platform_powerdown( pmm->cores_pend_down );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Remove powered down cores from idle and powered list */
+ pmm->cores_powered &= ~(pmm->cores_pend_down);
+ pmm->cores_idle &= ~(pmm->cores_pend_down);
+ /* Reset pending/acknowledged status */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power down cores - (0x%x) %s",
+ pmm->cores_pend_down, pmm_trace_get_core_name(pmm->cores_pend_down)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_up == pmm->cores_ack_up ? MALI_TRUE : MALI_FALSE );
+}
+
+
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power up during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up != 0 );
+ /* Check that cores are not pending power down during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ if( pmm_power_up_okay( pmm ) )
+ {
+ /* Power up has completed - sort out subsystem core status */
+
+ int n;
+ /* Use volatile to access, so that it is updated if any cores are unregistered */
+ volatile mali_pmm_core_mask *ppendup = &(pmm->cores_pend_up);
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Move cores into idle queues */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & (*ppendup)) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_FAULT &&
+ (*ppendup & cores_list[n]) == 0) );
+ /* We only expect this to fail when we are shutting down
+ * and the core has been unregistered
+ */
+ }
+ }
+ }
+ /* Finished power up - add cores to idle and powered list */
+ pmm->cores_powered |= (*ppendup);
+ pmm->cores_idle |= (*ppendup);
+ /* Reset pending/acknowledge status */
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_up = 0;
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ return MALI_TRUE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ /* Power up must now be done */
+ err = mali_platform_powerup( pmm->cores_pend_up );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power up cores - (0x%x) %s",
+ pmm->cores_pend_up, pmm_trace_get_core_name(pmm->cores_pend_up)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ else
+ {
+ /* TBD - Update core status immediately rather than use event message */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK,
+ 0 };
+ /* All the cores that were pending power up, have now completed power up */
+ event.data = pmm->cores_pend_up;
+ _mali_ukk_pmm_event_message( &event );
+ MALIPMM_DEBUG_PRINT( ("PMM: Sending ACK to power up") );
+ }
+ }
+
+ /* Always return false, as we need an interrupt to acknowledge
+ * when power up is complete
+ */
+ return MALI_FALSE;
+}
+
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle &= (~cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle |= (cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power down */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_down & cores) != 0 );
+ /* Check core has not acknowledged power down more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_down & cores) == 0 );
+
+ pmm->cores_ack_down |= (cores);
+
+ return pmm->cores_ack_down;
+}
+
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ mali_pmm_status status;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALIPMM_DEBUG_PRINT( ("PMM: Fatal Reset called") );
+
+ MALI_DEBUG_ASSERT( pmm->status != MALI_PMM_STATUS_OFF );
+
+ /* Reset the common status */
+ pmm->waiting = 0;
+ pmm->missed = 0;
+ pmm->fatal_power_err = MALI_FALSE;
+ pmm->no_events = 0;
+ pmm->check_policy = MALI_FALSE;
+ pmm->cores_pend_down = 0;
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_down = 0;
+ pmm->cores_ack_up = 0;
+ pmm->is_dvfs_active = 0;
+#if MALI_PMM_TRACE
+ pmm->messages_sent = 0;
+ pmm->messages_received = 0;
+ pmm->imessages_sent = 0;
+ pmm->imessages_received = 0;
+ MALI_PRINT( ("PMM Trace: *** Fatal reset occurred ***") );
+#endif
+
+ /* Set that we are unavailable whilst resetting */
+ pmm->state = MALI_PMM_STATE_UNAVAILABLE;
+ status = pmm->status;
+ pmm->status = MALI_PMM_STATUS_OFF;
+
+ /* We want all cores powered */
+ pmm->cores_powered = pmm->cores_registered;
+ /* The cores may not be idle, but this state will be rectified later */
+ pmm->cores_idle = pmm->cores_registered;
+
+ /* So power on any cores that are registered */
+ if( pmm->cores_registered != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *pregistered = &(pmm->cores_registered);
+#if !MALI_PMM_NO_PMU
+ err = mali_platform_powerup( pmm->cores_registered );
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ /* This is very bad as we can't even be certain the cores are now
+ * powered up
+ */
+ MALI_PRINT_ERROR( ("PMM: Failed to perform PMM reset!\n") );
+ /* TBD driver exit? */
+ }
+
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & (*pregistered)) != 0 )
+ {
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Core is now active - so try putting it in the idle queue */
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* We either succeeded, or we were not off anyway, or we have
+ * just be deregistered
+ */
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_OK) ||
+ (err == _MALI_OSK_ERR_BUSY) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ }
+ }
+ }
+
+ /* Unblock any pending OS event */
+ if( status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ if( status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down
+ * NOTE: We are not powered down at this point due to power problems,
+ * so we are lying to the system, but something bad has already
+ * happened and we are trying unstick things
+ * TBD - Add busy loop to power down cores?
+ */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+
+ /* Purge the event queues */
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->queue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ /* Return status/state to normal */
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ pmm_update_system_state(pmm);
+}
+
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power up */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_up & cores) != 0 );
+ /* Check core has not acknowledged power up more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_up & cores) == 0 );
+
+ pmm->cores_ack_up |= (cores);
+
+ return pmm->cores_ack_up;
+}
+
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is no saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data == 0 );
+ /* Can't store zero data - as retrieve check will fail */
+ MALI_DEBUG_ASSERT( data != 0 );
+
+ pmm->os_data = data;
+}
+
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm)
+{
+ mali_pmm_message_data data;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data != 0 );
+
+ /* Get data, and clear the saved version */
+ data = pmm->os_data;
+ pmm->os_data = 0;
+
+ return data;
+}
+
+/* Create list of core names to look up
+ * We are doing it this way to overcome the need for
+ * either string allocation, or stack space, so we
+ * use constant strings instead
+ */
+typedef struct pmm_trace_corelist
+{
+ mali_pmm_core_mask id;
+ const char *name;
+} pmm_trace_corelist_t;
+
+static pmm_trace_corelist_t pmm_trace_cores[] = {
+ { MALI_PMM_CORE_SYSTEM, "SYSTEM" },
+ { MALI_PMM_CORE_GP, "GP" },
+ { MALI_PMM_CORE_L2, "L2" },
+ { MALI_PMM_CORE_PP0, "PP0" },
+ { MALI_PMM_CORE_PP1, "PP1" },
+ { MALI_PMM_CORE_PP2, "PP2" },
+ { MALI_PMM_CORE_PP3, "PP3" },
+ { MALI_PMM_CORE_PP_ALL, "PP (all)" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0),
+ "GP+L2+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0),
+ "GP+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+L2+PP0+PP1" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+PP0+PP1" },
+ { 0, NULL } /* Terminator of list */
+};
+
+const char *pmm_trace_get_core_name( mali_pmm_core_mask cores )
+{
+ const char *dname = NULL;
+ int cl;
+
+ /* Look up name in corelist */
+ cl = 0;
+ while( pmm_trace_cores[cl].name != NULL )
+ {
+ if( pmm_trace_cores[cl].id == cores )
+ {
+ dname = pmm_trace_cores[cl].name;
+ break;
+ }
+ cl++;
+ }
+
+ if( dname == NULL )
+ {
+ /* We don't know a good short-hand for the configuration */
+ dname = "[multi-core]";
+ }
+
+ return dname;
+}
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h
new file mode 100644
index 00000000000..3c8c31f066d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_state.h
+ * Defines the internal power management module state
+ */
+
+#ifndef __MALI_PMM_STATE_H__
+#define __MALI_PMM_STATE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_state Power Management Module State
+ *
+ * @{
+ */
+
+/* Check that the subset is really a subset of cores */
+#define MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( cores, subset ) \
+ MALI_DEBUG_ASSERT( ((~(cores)) & (subset)) == 0 )
+
+
+/* Locking macros */
+#define MALI_PMM_LOCK(pmm) \
+ _mali_osk_lock_wait( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_UNLOCK(pmm) \
+ _mali_osk_lock_signal( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_LOCK_TERM(pmm) \
+ _mali_osk_lock_term( pmm->lock )
+
+/* Notification type for messages */
+#define MALI_PMM_NOTIFICATION_TYPE 0
+
+/** @brief Status of the PMM state machine
+ */
+typedef enum mali_pmm_status_tag
+{
+ MALI_PMM_STATUS_IDLE, /**< PMM is waiting next event */
+ MALI_PMM_STATUS_POLICY_POWER_DOWN, /**< Policy initiated power down */
+ MALI_PMM_STATUS_POLICY_POWER_UP, /**< Policy initiated power down */
+ MALI_PMM_STATUS_OS_WAITING, /**< PMM is waiting for OS power up */
+ MALI_PMM_STATUS_OS_POWER_DOWN, /**< OS initiated power down */
+ MALI_PMM_STATUS_RUNTIME_IDLE_IN_PROGRESS,
+ MALI_PMM_STATUS_DVFS_PAUSE, /**< PMM DVFS Status Pause */
+ MALI_PMM_STATUS_OS_POWER_UP, /**< OS initiated power up */
+ MALI_PMM_STATUS_OFF, /**< PMM is not active */
+} mali_pmm_status;
+
+
+/** @brief Internal state of the PMM
+ */
+typedef struct _mali_pmm_internal_state
+{
+ mali_pmm_status status; /**< PMM state machine */
+ mali_pmm_policy policy; /**< PMM policy */
+ mali_bool check_policy; /**< PMM policy needs checking */
+ mali_pmm_state state; /**< PMM state */
+ mali_pmm_core_mask cores_registered; /**< Bitmask of cores registered */
+ mali_pmm_core_mask cores_powered; /**< Bitmask of cores powered up */
+ mali_pmm_core_mask cores_idle; /**< Bitmask of cores idle */
+ mali_pmm_core_mask cores_pend_down; /**< Bitmask of cores pending power down */
+ mali_pmm_core_mask cores_pend_up; /**< Bitmask of cores pending power up */
+ mali_pmm_core_mask cores_ack_down; /**< Bitmask of cores acknowledged power down */
+ mali_pmm_core_mask cores_ack_up; /**< Bitmask of cores acknowledged power up */
+
+ _mali_osk_notification_queue_t *queue; /**< PMM event queue */
+ _mali_osk_notification_queue_t *iqueue; /**< PMM internal event queue */
+ _mali_osk_irq_t *irq; /**< PMM irq handler */
+ _mali_osk_lock_t *lock; /**< PMM lock */
+
+ mali_pmm_message_data os_data; /**< OS data sent via the OS events */
+
+ mali_bool pmu_initialized; /**< PMU initialized */
+
+ _mali_osk_atomic_t messages_queued; /**< PMM event messages queued */
+ u32 waiting; /**< PMM waiting events - due to busy */
+ u32 no_events; /**< PMM called to process when no events */
+
+ u32 missed; /**< PMM missed events due to OOM */
+ mali_bool fatal_power_err; /**< PMM has had a fatal power error? */
+ u32 is_dvfs_active; /**< PMM DVFS activity */
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+ u32 mali_last_pmm_status;
+ u32 mali_new_event_status;
+ u32 mali_pmm_lock_acquired;
+#endif
+
+#if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ u32 messages_sent; /**< Total event messages sent */
+ u32 messages_received; /**< Total event messages received */
+ u32 imessages_sent; /**< Total event internal messages sent */
+ u32 imessages_received; /**< Total event internal messages received */
+#endif
+} _mali_pmm_internal_state_t;
+
+/** @brief Sets that a policy needs a check before processing events
+ *
+ * A timer or something has expired that needs dealing with
+ */
+void malipmm_set_policy_check(void);
+
+/** @brief Update the PMM externally viewable state depending on the current PMM internal state
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the timeout is valid, else MALI_FALSE
+ */
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Returns the core mask from the event data - if applicable
+ *
+ * @param pmm internal PMM state
+ * @param event event message to get the core mask from
+ * @return mask of cores that is relevant to this event message
+ */
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Sort out which cores need to be powered up from the given core mask
+ *
+ * All cores that can be powered up will be put into a pending state
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered up
+ * @return mask of cores that need to be powered up, this can be 0 if all cores
+ * are powered up already
+ */
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Sort out which cores need to be powered down from the given core mask
+ *
+ * All cores that can be powered down will be put into a pending state. If they
+ * can be powered down immediately they will also be acknowledged that they can be
+ * powered down. If the immediate_only flag is set, then only those cores that
+ * can be acknowledged for power down will be put into a pending state.
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered down
+ * @param immediate_only MALI_TRUE means that only cores that can power down now will
+ * be put into a pending state
+ * @return mask of cores that need to be powered down, this can be 0 if all cores
+ * are powered down already
+ */
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only );
+
+/** @brief Cancel an invokation to power down (pmm_invoke_power_down)
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if a call to invoke power down should succeed, or fail
+ *
+ * This will report MALI_FALSE if some of the cores are still active and need
+ * to acknowledge that they are ready to power down
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power down have acknowledged they
+ * can power down, else MALI_FALSE
+ */
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power down
+ *
+ * If all the pending cores have acknowledged they can power down, this will call the
+ * PMU power down function to turn them off
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered down, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if all the pending cores to power up have done so
+ *
+ * This will report MALI_FALSE if some of the cores are still powered off
+ * and have not acknowledged that they have powered up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power up have acknowledged they
+ * are now powered up, else MALI_FALSE
+ */
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power up
+ *
+ * If all the pending cores have acknowledged they have powered up, this will
+ * make the cores start processing jobs again, else this will call the PMU
+ * power up function to turn them on, and the PMM is then expected to wait for an
+ * interrupt to acknowledge the power up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered up, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Set the cores that are now active in the system
+ *
+ * Updates which cores are active and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to active
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that are now idle in the system
+ *
+ * Updates which cores are idle and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to idle
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power down
+ *
+ * Updates which cores have acknowledged the pending power down and are now ready
+ * to be turned off
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power down
+ * @return mask of all the cores that have acknowledged the power down
+ */
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power up
+ *
+ * Updates which cores have acknowledged the pending power up and are now
+ * fully powered and ready to run jobs
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power up
+ * @return mask of all the cores that have acknowledged the power up
+ */
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+
+/** @brief Tries to reset the PMM and PMU hardware to a known state after any fatal issues
+ *
+ * This will try and make all the cores powered up and reset the PMM state
+ * to its initial state after core registration - all cores powered but not
+ * pending or active.
+ * All events in the event queues will be thrown away.
+ *
+ * @note: Any pending power down will be cancelled including the OS calling for power down
+ */
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Save the OS specific data for an OS power up/down event
+ *
+ * @param pmm internal PMM state
+ * @param data OS specific event data
+ */
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data);
+
+/** @brief Retrieve the OS specific data for an OS power up/down event
+ *
+ * This will clear the stored OS data, as well as return it.
+ *
+ * @param pmm internal PMM state
+ * @return OS specific event data that was saved previously
+ */
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm);
+
+
+/** @brief Get a human readable name for the cores in a core mask
+ *
+ * @param core the core mask
+ * @return string containing a name relating to the given core mask
+ */
+const char *pmm_trace_get_core_name( mali_pmm_core_mask core );
+
+/** @} */ /* End group pmmapi_state */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_STATE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h
new file mode 100644
index 00000000000..f5106479e33
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_system.h
+ * Defines the power management module system functions
+ */
+
+#ifndef __MALI_PMM_SYSTEM_H__
+#define __MALI_PMM_SYSTEM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_system Power Management Module System Functions
+ *
+ * @{
+ */
+
+extern struct mali_kernel_subsystem mali_subsystem_pmm;
+
+/** @brief Register a core with the PMM, which will power up
+ * the core
+ *
+ * @param core the core to register with the PMM
+ * @return error if the core cannot be powered up
+ */
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core );
+
+/** @brief Unregister a core with the PMM
+ *
+ * @param core the core to unregister with the PMM
+ */
+void malipmm_core_unregister( mali_pmm_core_id core );
+
+/** @brief Acknowledge that a power down is okay to happen
+ *
+ * A core should not be running a job, or be in the idle queue when this
+ * is called.
+ *
+ * @param core the core that can now be powered down
+ */
+void malipmm_core_power_down_okay( mali_pmm_core_id core );
+
+/** @} */ /* End group pmmapi_system */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 00000000000..e9e5e55a082
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c
new file mode 100644
index 00000000000..1c1bf306688
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c
@@ -0,0 +1,82 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+int mali_dev_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h
new file mode 100644
index 00000000000..5362f88cdbd
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#if USING_MALI_PMM
+int mali_dev_pause(void);
+int mali_dev_resume(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h
new file mode 100644
index 00000000000..30a6fa041db
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_IOCTL_H__
+#define __MALI_KERNEL_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_GET_SYSTEM_INFO_SIZE _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO_SIZE, _mali_uk_get_system_info_s *)
+#define MALI_IOC_GET_SYSTEM_INFO _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO, _mali_uk_get_system_info_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, _mali_uk_get_big_block_s *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, _mali_uk_free_big_block_s *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_ABORT_JOB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_ABORT_JOB, _mali_uk_pp_abort_job_s * )
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_ABORT_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_ABORT_JOB, _mali_uk_gp_abort_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_IOCTL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
new file mode 100644
index 00000000000..a5144faae2b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
@@ -0,0 +1,565 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory mananger definitions */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+
+/* the mali kernel subsystem types */
+#include "mali_kernel_subsystem.h"
+
+/* A memory subsystem always exists, so no need to conditionally include it */
+#include "mali_kernel_common.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_pm.h"
+
+/* */
+#include "mali_kernel_license.h"
+
+/* Setting this parameter will override memory settings in arch/config.h */
+char *mali_mem = "";
+module_param(mali_mem, charp, S_IRUSR | S_IWUSR | S_IWGRP | S_IROTH); /* rw-r--r-- */
+MODULE_PARM_DESC(mali_mem, "Override mali memory configuration. e.g. 32M@224M or 64M@OS_MEMORY");
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int mali_major = 0;
+module_param(mali_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(mali_major, "Device major number");
+
+int mali_benchmark = 0;
+module_param(mali_benchmark, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_benchmark, "Bypass Mali hardware when non-zero");
+
+extern int mali_hang_check_interval;
+module_param(mali_hang_check_interval, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_hang_check_interval, "Interval at which to check for progress after the hw watchdog has been triggered");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+#if defined(USING_MALI400_L2_CACHE)
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+#endif
+
+struct mali_dev
+{
+ struct cdev cdev;
+#if MALI_LICENSE_IS_GPL
+ struct class * mali_class;
+#endif
+};
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* the mali device */
+static struct mali_dev device;
+
+#if MALI_STATE_TRACKING
+static struct proc_dir_entry *proc_entry;
+static int mali_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data);
+#endif
+
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+/* Linux char file operations provided by the Mali module */
+struct file_operations mali_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+int mali_driver_init(void)
+{
+ int err;
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ err = _mali_dev_platform_register();
+ if (err)
+ {
+ return err;
+ }
+#endif
+#endif
+#endif
+ err = mali_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+ MALI_PRINT(("Failed to initialize driver (error %d)\n", err));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void mali_driver_exit(void)
+{
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ _mali_osk_pmm_dev_activate();
+#endif
+#endif
+#endif
+#endif
+ mali_kernel_destructor();
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ _mali_osk_pmm_dev_idle();
+#endif
+#endif
+#endif
+#endif
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+}
+
+/* called from _mali_osk_init */
+int initialize_kernel_device(void)
+{
+ int err;
+ dev_t dev = 0;
+ if (0 == mali_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0/*first minor*/, 1/*count*/, mali_dev_name);
+ mali_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(mali_major, 0);
+ err = register_chrdev_region(dev, 1/*count*/, mali_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&device, 0, sizeof(device));
+
+ /* initialize our char dev data */
+ cdev_init(&device.cdev, &mali_fops);
+ device.cdev.owner = THIS_MODULE;
+ device.cdev.ops = &mali_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&device.cdev, dev, 1/*count*/);
+
+ if (0 == err)
+ {
+#if MALI_STATE_TRACKING
+ proc_entry = create_proc_entry(mali_dev_name, 0444, NULL);
+ if (proc_entry != NULL)
+ {
+ proc_entry->read_proc = mali_proc_read;
+#endif
+#if MALI_LICENSE_IS_GPL
+ device.mali_class = class_create(THIS_MODULE, mali_dev_name);
+ if (IS_ERR(device.mali_class))
+ {
+ err = PTR_ERR(device.mali_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(device.mali_class, NULL, dev, NULL, mali_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&device.cdev);
+#else
+ return 0;
+#endif
+#if MALI_STATE_TRACKING
+ remove_proc_entry(mali_dev_name, NULL);
+ }
+ else
+ {
+ err = EFAULT;
+ }
+#endif
+ }
+ unregister_chrdev_region(dev, 1/*count*/);
+ }
+
+
+ return err;
+}
+
+/* called from _mali_osk_term */
+void terminate_kernel_device(void)
+{
+ dev_t dev = MKDEV(mali_major, 0);
+
+#if MALI_LICENSE_IS_GPL
+ device_destroy(device.mali_class, dev);
+ class_destroy(device.mali_class);
+#endif
+
+#if MALI_STATE_TRACKING
+ remove_proc_entry(mali_dev_name, NULL);
+#endif
+
+ /* unregister char device */
+ cdev_del(&device.cdev);
+ /* free major */
+ unregister_chrdev_region(dev, 1/*count*/);
+ return;
+}
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ struct mali_session_data * session_data;
+ _mali_uk_mem_mmap_s args = {0, };
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start)) );
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+ args.size = vma->vm_end - vma->vm_start;
+ args.ukk_private = vma;
+
+ /* Call the common mmap handler */
+ MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+ return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void*)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+ if (NULL == (void *)arg)
+ {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch(cmd)
+ {
+ case MALI_IOC_GET_SYSTEM_INFO_SIZE:
+ err = get_system_info_size_wrapper(session_data, (_mali_uk_get_system_info_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_SYSTEM_INFO:
+ err = get_system_info_wrapper(session_data, (_mali_uk_get_system_info_s __user *)arg);
+ break;
+
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+#endif
+
+ case MALI_IOC_MEM_INIT:
+ err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_TERM:
+ err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK:
+ err = mem_get_big_block_wrapper(filp, (_mali_uk_get_big_block_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ err = mem_free_big_block_wrapper(session_data, (_mali_uk_free_big_block_s __user *)arg);
+ break;
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_ABORT_JOB:
+ err = pp_abort_job_wrapper(session_data, (_mali_uk_pp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_ABORT_JOB:
+ err = gp_abort_job_wrapper(session_data, (_mali_uk_gp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+#if MALI_STATE_TRACKING
+static int mali_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+
+ MALI_DEBUG_PRINT(1, ("mali_proc_read(page=%p, start=%p, off=%u, count=%d, eof=%p, data=%p\n", page, start, off, count, eof, data));
+
+ if (off > 0)
+ {
+ return 0;
+ }
+
+ if (count < 1024)
+ {
+ return 0;
+ }
+
+ len = sprintf(page + len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += sprintf(page + len, "License: %s\n", MALI_KERNEL_LINUX_LICENSE);
+
+ /*
+ * A more elegant solution would be to gather information from all subsystems and
+ * then report it all in the /proc/mali file, but this would require a bit more work.
+ * Use MALI_PRINT for now so we get the information in the dmesg log at least.
+ */
+ _mali_kernel_core_dump_state();
+
+ return len;
+}
+#endif
+
+
+module_init(mali_driver_init);
+module_exit(mali_driver_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h
new file mode 100644
index 00000000000..969449d8ef4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t initialize_kernel_device(void);
+void terminate_kernel_device(void);
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c
new file mode 100644
index 00000000000..aaafc151077
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c
@@ -0,0 +1,786 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_pm.c
+ * Implementation of the Linux Power Management for Mali GPU kernel driver
+ */
+#undef CONFIG_HAS_EARLYSUSPEND /*ARM will remove .early_suspend support for r2p2_rel*/
+#if USING_MALI_PMM
+#include <linux/sched.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <linux/suspend.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif /* MALI_GPU_UTILIZATION */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+#include "mali_linux_pm_testsuite.h"
+unsigned int pwr_mgmt_status_reg = 0;
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+#if MALI_STATE_TRACKING
+ int is_os_pmm_thread_waiting = -1;
+#endif /* MALI_STATE_TRACKING */
+
+/* kernel should be configured with power management support */
+#ifdef CONFIG_PM
+
+/* License should be GPL */
+#if MALI_LICENSE_IS_GPL
+
+/* Linux kernel major version */
+#define LINUX_KERNEL_MAJOR_VERSION 2
+
+/* Linux kernel minor version */
+#define LINUX_KERNEL_MINOR_VERSION 6
+
+/* Linux kernel development version */
+#define LINUX_KERNEL_DEVELOPMENT_VERSION 29
+
+#ifdef CONFIG_PM_DEBUG
+static const char* const mali_states[_MALI_MAX_DEBUG_OPERATIONS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ [_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB] = "early_suspend_level_disable_framebuffer",
+ [_MALI_DEVICE_LATERESUME] = "late_resume",
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ [_MALI_DVFS_PAUSE_EVENT] = "dvfs_pause",
+ [_MALI_DVFS_RESUME_EVENT] = "dvfs_resume",
+};
+
+#endif /* CONFIG_PM_DEBUG */
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+extern void set_mali_parent_power_domain(struct platform_device* dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_HAS_EARLYSUSPEND
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy);
+
+static struct notifier_block mali_pwr_notif_block = {
+ .notifier_call = mali_pwr_suspend_notifier
+};
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Power management thread pointer */
+struct task_struct *pm_thread;
+
+/* dvfs power management thread */
+struct task_struct *dvfs_pm_thread;
+
+/* is wake up needed */
+short is_wake_up_needed = 0;
+int timeout_fired = 2;
+unsigned int is_mali_pmm_testsuite_enabled = 0;
+
+_mali_device_power_states mali_device_state = _MALI_DEVICE_RESUME;
+_mali_device_power_states mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+_mali_osk_lock_t *lock;
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+const char* const mali_pmm_recording_events[_MALI_DEVICE_MAX_PMM_EVENTS] = {
+ [_MALI_DEVICE_PMM_TIMEOUT_EVENT] = "timeout",
+ [_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS] = "job_scheduling",
+ [_MALI_DEVICE_PMM_REGISTERED_CORES] = "cores",
+
+};
+
+unsigned int mali_timeout_event_recording_on = 0;
+unsigned int mali_job_scheduling_events_recording_on = 0;
+unsigned int is_mali_pmu_present = 0;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/* Function prototypes */
+static int mali_pm_probe(struct platform_device *pdev);
+static int mali_pm_remove(struct platform_device *pdev);
+
+/* Mali device suspend function */
+static int mali_pm_suspend(struct device *dev);
+
+/* Mali device resume function */
+static int mali_pm_resume(struct device *dev);
+
+/* Run time suspend and resume functions */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+static int mali_device_runtime_suspend(struct device *dev);
+static int mali_device_runtime_resume(struct device *dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Early suspend functions */
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mali_pm_early_suspend(struct early_suspend *mali_dev);
+static void mali_pm_late_resume(struct early_suspend *mali_dev);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* OS suspend and resume callbacks */
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state);
+#else
+static int mali_pm_os_suspend(struct device *dev);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev);
+#else
+static int mali_pm_os_resume(struct device *dev);
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+/* OS Hibernation suspend callback */
+static int mali_pm_os_suspend_on_hibernation(struct device *dev);
+
+/* OS Hibernation resume callback */
+static int mali_pm_os_resume_on_hibernation(struct device *dev);
+
+static void _mali_release_pm(struct device* device);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static const struct dev_pm_ops mali_dev_pm_ops = {
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .runtime_suspend = mali_device_runtime_suspend,
+ .runtime_resume = mali_device_runtime_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .poweroff = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+};
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+struct pm_ext_ops mali_pm_operations = {
+ .base = {
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .poweroff = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+ },
+};
+#endif
+
+static struct platform_driver mali_plat_driver = {
+ .probe = mali_pm_probe,
+ .remove = mali_pm_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+ .pm = &mali_pm_operations,
+#endif
+
+ .driver = {
+ .name = "mali_dev",
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* Early suspend hooks */
+static struct early_suspend mali_dev_early_suspend = {
+ .suspend = mali_pm_early_suspend,
+ .resume = mali_pm_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+};
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* Mali GPU platform device */
+struct platform_device mali_gpu_device = {
+ .name = "mali_dev",
+ .id = 0,
+ .dev.release = _mali_release_pm
+};
+
+/** This function is called when platform device is unregistered. This function
+ * is necessary when the platform device is unregistered.
+ */
+static void _mali_release_pm(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Platform device removed\n" ));
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+void mali_is_pmu_present(void)
+{
+ int temp = 0;
+ temp = pmu_get_power_up_down_info();
+ if (4095 == temp)
+ {
+ is_mali_pmu_present = 0;
+ }
+ else
+ {
+ is_mali_pmu_present = 1;
+ }
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* MALI_LICENSE_IS_GPL */
+
+#if MALI_LICENSE_IS_GPL
+
+static int mali_wait_for_power_management_policy_event(void)
+{
+ int err = 0;
+ for (; ;)
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ {
+ err = -EINTR;
+ break;
+ }
+ if (is_wake_up_needed == 1)
+ {
+ break;
+ }
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ is_wake_up_needed =0;
+ return err;
+}
+
+/** This function is invoked when mali device is suspended
+ */
+int mali_device_suspend(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being suspended\n" ));
+ _mali_ukk_pmm_event_message(&event);
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 1;
+#endif /* MALI_STATE_TRACKING */
+ err = mali_wait_for_power_management_policy_event();
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 0;
+#endif /* MALI_STATE_TRACKING */
+ return err;
+}
+
+/** This function is called when Operating system wants to power down
+ * the mali GPU device.
+ */
+static int mali_pm_suspend(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+#if MALI_GPU_UTILIZATION
+ mali_utilization_suspend();
+#endif /* MALI_GPU_UTILIZATION */
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || mali_device_state == (_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ mali_device_state = _MALI_DEVICE_SUSPEND_IN_PROGRESS;
+ err = mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_SUSPEND;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state)
+#else
+static int mali_pm_os_suspend(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_HAS_EARLYSUSPEND
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy)
+{
+ int err = 0;
+ switch (event)
+ {
+ case PM_SUSPEND_PREPARE:
+ err = mali_pm_suspend(NULL);
+ break;
+
+ case PM_POST_SUSPEND:
+ err = mali_pm_resume(NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/** This function is called when mali GPU device is to be resumed.
+ */
+int mali_device_resume(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being resumed\n" ));
+ _mali_ukk_pmm_event_message(&event);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power up event is scheduled\n" ));
+
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ err = mali_wait_for_power_management_policy_event();
+
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ return err;
+}
+
+/** This function is called when mali GPU device is to be resumed
+ */
+
+static int mali_pm_resume(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ err = mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_device_state = _MALI_DEVICE_RESUME;
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev)
+#else
+static int mali_pm_os_resume(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+static int mali_pm_os_suspend_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+
+static int mali_pm_os_resume_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+/** This function is called when runtime suspend of mali device is required.
+ */
+static int mali_device_runtime_suspend(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time suspended \n" ));
+ return 0;
+}
+
+/** This function is called when runtime resume of mali device is required.
+ */
+static int mali_device_runtime_resume(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time Resumed \n" ));
+ return 0;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+/* This function is called from android framework.
+ */
+static void mali_pm_early_suspend(struct early_suspend *mali_dev)
+{
+ switch(mali_dev->level)
+ {
+ /* Screen should be turned off but framebuffer will be accessible */
+ case EARLY_SUSPEND_LEVEL_BLANK_SCREEN:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Screen is off\n" ));
+ break;
+
+ case EARLY_SUSPEND_LEVEL_STOP_DRAWING:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Suspend level stop drawing\n" ));
+ break;
+
+ /* Turn off the framebuffer. In our case No Mali GPU operation */
+ case EARLY_SUSPEND_LEVEL_DISABLE_FB:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Suspend level Disable framebuffer\n" ));
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+#if MALI_GPU_UTILIZATION
+ mali_utilization_suspend();
+#endif /* MALI_GPU_UTILIZATION */
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return;
+ }
+ mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Suspend Mode\n" ));
+ break;
+ }
+}
+
+/* This function is invoked from android framework when mali device needs to be
+ * resumed.
+ */
+static void mali_pm_late_resume(struct early_suspend *mali_dev)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return;
+ }
+ if (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB)
+ {
+ mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ mali_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_DEBUG
+
+/** This function is used for debugging purposes when the user want to see
+ * which power management operations are supported for
+ * mali device.
+ */
+static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char *str = buf;
+#if !MALI_POWER_MGMT_TEST_SUITE
+ int pm_counter = 0;
+ for (pm_counter = 0; pm_counter<_MALI_MAX_DEBUG_OPERATIONS; pm_counter++)
+ {
+ str += sprintf(str, "%s ", mali_states[pm_counter]);
+ }
+#else
+ str += sprintf(str, "%d ",pwr_mgmt_status_reg);
+#endif
+ if (str != buf)
+ {
+ *(str-1) = '\n';
+ }
+ return (str-buf);
+}
+
+/** This function is called when user wants to suspend the mali GPU device in order
+ * to simulate the power up and power down events.
+ */
+static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int err = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend mali_dev;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ int test_flag_dvfs = 0;
+ pwr_mgmt_status_reg = 0;
+ mali_is_pmu_present();
+
+#endif
+ if (!strncmp(buf,mali_states[_MALI_DEVICE_SUSPEND],strlen(mali_states[_MALI_DEVICE_SUSPEND])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI suspend Power operation is scheduled\n" ));
+ err = mali_pm_suspend(NULL);
+ }
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Device get number of registerd cores\n" ));
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ return count;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI timeout event recording is enabled\n" ));
+ mali_timeout_event_recording_on = 1;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Job scheduling events recording is enabled\n" ));
+ mali_job_scheduling_events_recording_on = 1;
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_RESUME],strlen(mali_states[_MALI_DEVICE_RESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ err = mali_pm_resume(NULL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB],strlen(mali_states[_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB])))
+ {
+ mali_dev.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Android early suspend operation is scheduled\n" ));
+ mali_pm_early_suspend(&mali_dev);
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_LATERESUME],strlen(mali_states[_MALI_DEVICE_LATERESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ mali_pm_late_resume(NULL);
+ }
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_PAUSE_EVENT],strlen(mali_states[_MALI_DVFS_PAUSE_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Pause Power operation is scheduled\n" ));
+ err = mali_dev_pause();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_RESUME_EVENT],strlen(mali_states[_MALI_DVFS_RESUME_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Resume Power operation is scheduled\n" ));
+ err = mali_dev_resume();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Power Mode Operation selected\n" ));
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (test_flag_dvfs == 1)
+ {
+ if (err)
+ {
+ pwr_mgmt_status_reg = 2;
+ }
+ else
+ {
+ pwr_mgmt_status_reg = 1;
+ }
+ }
+ else
+ {
+ if (1 == is_mali_pmu_present)
+ {
+ pwr_mgmt_status_reg = pmu_get_power_up_down_info();
+ }
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ return count;
+}
+
+/* Device attribute file */
+static DEVICE_ATTR(file, 0644, show_file, store_file);
+#endif /* CONFIG_PM_DEBUG */
+
+static int mali_pm_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ device_remove_file(&mali_gpu_device.dev, &dev_attr_file);
+#endif /* CONFIG_PM_DEBUG */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ pm_runtime_disable(&pdev->dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ return 0;
+}
+
+/** This function is called when the device is probed */
+static int mali_pm_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ int err;
+ err = device_create_file(&mali_gpu_device.dev, &dev_attr_file);
+ if (err)
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Error in creating device file\n" ));
+ }
+#endif /* CONFIG_PM_DEBUG */
+ return 0;
+}
+
+/** This function is called when Mali GPU device is initialized
+ */
+int _mali_dev_platform_register(void)
+{
+ int err;
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ set_mali_parent_power_domain(&mali_gpu_device);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ err = register_pm_notifier(&mali_pwr_notif_block);
+ if (err)
+ {
+ return err;
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+ err = platform_device_register(&mali_gpu_device);
+ lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)( _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 0);
+ if (!err)
+ {
+ err = platform_driver_register(&mali_plat_driver);
+ if (!err)
+ {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ register_early_suspend(&mali_dev_early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ }
+ else
+ {
+ _mali_osk_lock_term(lock);
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+ platform_device_unregister(&mali_gpu_device);
+ }
+ }
+ return err;
+}
+
+/** This function is called when Mali GPU device is unloaded
+ */
+void _mali_dev_platform_unregister(void)
+{
+ _mali_osk_lock_term(lock);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&mali_dev_early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+
+ platform_driver_unregister(&mali_plat_driver);
+ platform_device_unregister(&mali_gpu_device);
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+
+#if MALI_STATE_TRACKING
+void mali_pmm_dump_os_thread_state( void )
+{
+ MALI_PRINTF(("\nOSPMM::The OS PMM thread state is...%d",is_os_pmm_thread_waiting));
+}
+#endif /* MALI_STATE_TRACKING */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h
new file mode 100644
index 00000000000..6e879fea447
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PM_H__
+#define __MALI_KERNEL_PM_H__
+
+#ifdef USING_MALI_PMM
+int _mali_dev_platform_register(void);
+void _mali_dev_platform_unregister(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_KERNEL_PM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c
new file mode 100644
index 00000000000..3d60d18e005
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c
@@ -0,0 +1,72 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_linux_dvfs_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_dvfs_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_dvfs_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_dvfs_pause);
+
+int mali_dev_dvfs_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_dvfs_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h
new file mode 100644
index 00000000000..614d5e1deee
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h
@@ -0,0 +1,57 @@
+
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_LINUX_PM_H__
+#define __MALI_LINUX_PM_H__
+
+#if USING_MALI_PMM
+
+#ifdef CONFIG_PM
+/* Number of power states supported for making power up and down */
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND, /* Suspend */
+ _MALI_DEVICE_RESUME, /* Resume */
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB, /* Early suspend */
+ _MALI_DEVICE_LATERESUME, /* Late resume */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ _MALI_DEVICE_SUSPEND_IN_PROGRESS, /* Suspend in progress */
+ _MALI_DEVICE_MAX_POWER_STATES, /* Maximum power states */
+} _mali_device_power_states;
+
+/* Number of DVFS events */
+typedef enum
+{
+ _MALI_DVFS_PAUSE_EVENT = _MALI_DEVICE_MAX_POWER_STATES-1, /* DVFS Pause event */
+ _MALI_DVFS_RESUME_EVENT, /* DVFS Resume event */
+ _MALI_MAX_DEBUG_OPERATIONS,
+} _mali_device_dvfs_events;
+
+extern _mali_device_power_states mali_device_state;
+extern _mali_device_power_states mali_dvfs_device_state;
+extern _mali_osk_lock_t *lock;
+extern short is_wake_up_needed;
+extern int timeout_fired;
+extern struct platform_device mali_gpu_device;
+
+/* dvfs pm thread */
+extern struct task_struct *dvfs_pm_thread;
+
+/* Power management thread */
+extern struct task_struct *pm_thread;
+
+int mali_device_suspend(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_device_resume(u32 event_id, struct task_struct **pwr_mgmt_thread);
+
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_H___ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h
new file mode 100644
index 00000000000..c80b0b0a5e3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_LINUX_PM_TESTSUITE_H__
+#define __MALI_LINUX_PM_TESTSUITE_H__
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+
+typedef enum
+{
+ _MALI_DEVICE_PMM_TIMEOUT_EVENT,
+ _MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS,
+ _MALI_DEVICE_PMM_REGISTERED_CORES,
+ _MALI_DEVICE_MAX_PMM_EVENTS
+
+} _mali_device_pmm_recording_events;
+
+extern unsigned int mali_timeout_event_recording_on;
+extern unsigned int mali_job_scheduling_events_recording_on;
+extern unsigned int pwr_mgmt_status_reg;
+extern unsigned int is_mali_pmm_testsuite_enabled;
+extern unsigned int is_mali_pmu_present;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_TESTSUITE_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c
new file mode 100644
index 00000000000..cc029c4a8f8
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+ MALI_IGNORE(atom);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c
new file mode 100644
index 00000000000..b60bf825897
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * @file mali_osk_specific.c
+ * Implementation of per-OS Kernel level specifics
+ */
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args )
+{
+ /* args->ctx ignored here; args->ukk_private required instead */
+ /* we need to lock the mmap semaphore before calling the do_mmap function */
+ down_write(&current->mm->mmap_sem);
+
+ args->mapping = (void __user *)do_mmap(
+ (struct file *)args->ukk_private,
+ 0, /* start mapping from any address after NULL */
+ args->size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ args->phys_addr
+ );
+
+ /* and unlock it after the call */
+ up_write(&current->mm->mmap_sem);
+
+ /* No cookie required here */
+ args->cookie = 0;
+ /* uku_private meaningless, so zero */
+ args->uku_private = NULL;
+
+ if ( (NULL == args->mapping) || IS_ERR((void *)args->mapping) )
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Success */
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args )
+{
+ /* args->ctx and args->cookie ignored here */
+
+ if ((NULL != current) && (NULL != current->mm))
+ {
+ /* remove mapping of mali memory from the process' view */
+ /* lock mmap semaphore before call */
+ /* lock mmap_sem before calling do_munmap */
+ down_write(&current->mm->mmap_sem);
+ do_munmap(
+ current->mm,
+ (unsigned long)args->mapping,
+ args->size
+ );
+ /* and unlock after call */
+ up_write(&current->mm->mmap_sem);
+ MALI_DEBUG_PRINT(5, ("unmapped\n"));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Freeing of a big block while no user process attached, assuming crash cleanup in progress\n"));
+ }
+
+ return _MALI_OSK_ERR_OK; /* always succeeds */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h
new file mode 100644
index 00000000000..f87739bb8f9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_INDIR_MMAP_H__
+#define __MALI_OSK_INDIR_MMAP_H__
+
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Linux specific means for calling _mali_ukk_mem_mmap/munmap
+ *
+ * The presence of _MALI_OSK_SPECIFIC_INDIRECT_MMAP indicates that
+ * _mali_osk_specific_indirect_mmap and _mali_osk_specific_indirect_munmap
+ * should be used instead of _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * The arguments are the same as _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * In ALL operating system other than Linux, it is expected that common code
+ * should be able to call _mali_ukk_mem_mmap/_mali_ukk_mem_munmap directly.
+ * Such systems should NOT define _MALI_OSK_SPECIFIC_INDIRECT_MMAP.
+ */
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args );
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_INDIR_MMAP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c
new file mode 100644
index 00000000000..1a4bbcecdce
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+ _mali_osk_irq_bhandler_t bhandler;
+ struct work_struct work_queue_irq_handle; /* Workqueue for the bottom half of the IRQ-handling. This job is activated when this core gets an IRQ.*/
+} mali_osk_irq_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *pmm_wq=NULL;
+#endif
+
+typedef void (*workqueue_func_t)(void *);
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work );
+#else
+static void irq_handler_bottom_half ( void * input );
+#endif
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description )
+{
+ mali_osk_irq_object_t *irq_object;
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) return NULL;
+
+ /* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+ /* New syntax: INIT_WORK( struct work_struct *work, void (*function)(struct work_struct *)) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half);
+#else
+ /* Old syntax: INIT_WORK( struct work_struct *work, void (*function)(void *), void *data) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half, irq_object);
+#endif /* defined(INIT_DELAYED_WORK) */
+
+ if (-1 == irqnum)
+ {
+ /* Probe for IRQ */
+ if ( (NULL != trigger_func) && (NULL != ack_func) )
+ {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do
+ {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(data);
+ }
+ while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ }
+ else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum)
+ {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->bhandler = bhandler;
+ irq_object->data = data;
+
+ /* Is this a real IRQ handler we need? */
+ if (!mali_benchmark && irqnum != _MALI_OSK_IRQ_NUMBER_FAKE && irqnum != _MALI_OSK_IRQ_NUMBER_PMM)
+ {
+ if (-1 == irqnum)
+ {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, IRQF_SHARED, description, irq_object))
+ {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+ }
+
+#if MALI_LICENSE_IS_GPL
+ if ( _MALI_OSK_IRQ_NUMBER_PMM == irqnum )
+ {
+ pmm_wq = create_singlethread_workqueue("mali-pmm-wq");
+ }
+#endif
+
+ return irq_object;
+}
+
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+#if MALI_LICENSE_IS_GPL
+ if ( irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ queue_work(pmm_wq,&irq_object->work_queue_irq_handle);
+ }
+ else
+ {
+#endif
+ schedule_work(&irq_object->work_queue_irq_handle);
+#if MALI_LICENSE_IS_GPL
+ }
+#endif
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+
+#if MALI_LICENSE_IS_GPL
+ if(irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ flush_workqueue(pmm_wq);
+ destroy_workqueue(pmm_wq);
+ }
+#endif
+ if (!mali_benchmark)
+ {
+ free_irq(irq_object->irqnum, irq_object);
+ }
+ kfree(irq_object);
+ flush_scheduled_work();
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (irq_object->uhandler(irq_object->data) == _MALI_OSK_ERR_OK)
+ {
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* Is executed when an interrupt occur on one core */
+/* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work )
+#else
+static void irq_handler_bottom_half ( void * input )
+#endif
+{
+ mali_osk_irq_object_t *irq_object;
+
+#if defined(INIT_DELAYED_WORK)
+ irq_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_irq_object_t, work_queue_irq_handle);
+#else
+ if ( NULL == input )
+ {
+ MALI_PRINT_ERROR(("IRQ: Null pointer! Illegal!"));
+ return; /* Error */
+ }
+ irq_object = (mali_osk_irq_object_t *) input;
+#endif
+
+ irq_object->bhandler(irq_object->data);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c
new file mode 100644
index 00000000000..11285efc682
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN, /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ, /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX, /* Interruptable, use up()/down_interruptable() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT, /* Non-Interruptable, use up()/down() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {up,down}{read,write}() */
+
+ /* Linux supports, but we do not support:
+ * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+ */
+
+ /* Linux does not support:
+ * One-locks, of any sort - no optimization for this fact will be made.
+ */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+ _mali_osk_internal_locktype type;
+ unsigned long flags;
+ union
+ {
+ spinlock_t spinlock;
+ struct semaphore sema;
+ struct rw_semaphore rw_sema;
+ } obj;
+ MALI_DEBUG_CODE(
+ /** original flags for debug checking */
+ _mali_osk_lock_flags_t orig_flags;
+ _mali_osk_lock_mode_t locked_as;
+ ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+ _mali_osk_lock_t *lock = NULL;
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_READERWRITER
+ | _MALI_OSK_LOCKFLAG_ORDERED
+ | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+ /* Spinlocks are always non-interruptable */
+ MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+ || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+ /* Parameter initial SBZ - for future expansion */
+ MALI_DEBUG_ASSERT( 0 == initial );
+
+ lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+ if ( NULL == lock )
+ {
+ return lock;
+ }
+
+ /* Determine type of mutex: */
+ /* defaults to interruptable mutex if no flags are specified */
+
+ if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+ {
+ /* Non-interruptable Spinlocks override all others */
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+ lock->flags = 0;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+ && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+ init_rwsem( &lock->obj.rw_sema );
+ }
+ else
+ {
+ /* Usual mutex types */
+ if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+ }
+ else
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+ }
+
+ /* Initially unlocked */
+ sema_init( &lock->obj.sema, 1 );
+ }
+
+ MALI_DEBUG_CODE(
+ /* Debug tracking of flags */
+ lock->orig_flags = flags;
+ lock->locked_as = _MALI_OSK_LOCKMODE_UNDEF;
+ ); /* MALI_DEBUG_CODE */
+
+ return lock;
+}
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_lock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_lock_irqsave(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ if ( down_interruptible(&lock->obj.sema) )
+ {
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ down(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ down_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ down_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+
+ /* DEBUG tracking of previously locked state - occurs after lock obtained */
+ MALI_DEBUG_CODE(
+ if ( _MALI_OSK_ERR_OK == err )
+ {
+ /* Assert that this is not currently locked */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_UNDEF == lock->locked_as );
+
+ lock->locked_as = mode;
+ }
+ ); /* MALI_DEBUG_CODE */
+
+ return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ /* For DEBUG only, assert that we previously locked this, and in the same way (RW/RO) */
+ MALI_DEBUG_ASSERT( mode == lock->locked_as );
+
+ /* DEBUG tracking of previously locked state - occurs before lock released */
+ MALI_DEBUG_CODE( lock->locked_as = _MALI_OSK_LOCKMODE_UNDEF );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_unlock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ /* FALLTHROUGH */
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ up(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ up_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ up_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ /* For DEBUG only, assert that this is not currently locked */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_UNDEF == lock->locked_as );
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
new file mode 100644
index 00000000000..6454709caaf
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+ int references;
+ u32 cookie;
+} mali_vma_usage_tracker;
+
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+ struct AllocationList *next;
+ u32 offset;
+ u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+ struct vm_area_struct *vma;
+ struct AllocationList *list;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+spinlock_t allocation_list_spinlock;
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+ static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+ static int pre_allocated_memory_size_max = 6 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+ .open = mali_kernel_memory_vma_open,
+ .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+ .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+
+void mali_osk_low_level_mem_init(void)
+{
+ spin_lock_init( &allocation_list_spinlock );
+ pre_allocated_memory = (AllocationList*) NULL ;
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+ while ( NULL != pre_allocated_memory )
+ {
+ AllocationList *item;
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+ }
+ pre_allocated_memory_size_current = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+ struct page *new_page;
+ u32 linux_phys_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if ( NULL == new_page )
+ {
+ return 0;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+ struct page *unmap_page;
+
+ #if 1
+ dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ #endif
+
+ unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+ MALI_DEBUG_ASSERT_POINTER( unmap_page );
+ __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+ AllocationList *item = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory )
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = pre_allocated_memory->next;
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return item;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ item = _mali_osk_malloc( sizeof(AllocationList) );
+ if ( NULL == item)
+ {
+ return NULL;
+ }
+
+ item->physaddr = _kernel_page_allocate();
+ if ( 0 == item->physaddr )
+ {
+ /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+ _mali_osk_free( item );
+ return NULL;
+ }
+ return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+ {
+ item->next = pre_allocated_memory;
+ pre_allocated_memory = item;
+ pre_allocated_memory_size_current += PAGE_SIZE;
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker->references++;
+
+ return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+ _mali_uk_mem_munmap_s args = {0, };
+ mali_memory_allocation * descriptor;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+ BUG_ON(!vma_usage_tracker);
+ BUG_ON(0 == vma_usage_tracker->references);
+
+ vma_usage_tracker->references--;
+
+ if (0 != vma_usage_tracker->references)
+ {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+ return;
+ }
+
+ /** @note args->context unused, initialized to 0.
+ * Instead, we use the memory_session from the cookie */
+
+ descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+ args.cookie = (u32)descriptor;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ _mali_ukk_mem_munmap( &args );
+
+ /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+ * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+
+void _mali_osk_mem_barrier( void )
+{
+ mb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+ void * virt;
+ MALI_DEBUG_ASSERT_POINTER( phys );
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+ MALI_DEBUG_ASSERT( 0 != size );
+
+ /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+ * 2 to 14 MiB is available. This should be enough for the page tables, which
+ * currently is the only user of this function. */
+ virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
+
+ MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+ MALI_DEBUG_PRINT(1, ("Solution: When configuring and building linux kernel, set CONSISTENT_DMA_SIZE to be 14 MB.\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+ MALI_DEBUG_ASSERT( 0 != size );
+ MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+ dma_free_coherent(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+ release_mem_region(phys, size);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+ return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+ wmb();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct *vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ /* Re-write the process_addr_mapping_info */
+ mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+ if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+ if (NULL == vma_usage_tracker)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+ _mali_osk_free( mappingInfo );
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo->vma = vma;
+ descriptor->process_addr_mapping_info = mappingInfo;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+ /* list member is already NULL */
+
+ /*
+ set some bits which indicate that:
+ The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+ The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+ vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+ vma->vm_private_data = vma_usage_tracker;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = mappingInfo->vma;
+
+ MALI_DEBUG_ASSERT_POINTER( vma );
+
+ /* ASSERT that there are no allocations on the list. Unmap should've been
+ * called on all OS allocations. */
+ MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ _mali_osk_free(vma_usage_tracker);
+
+ _mali_osk_free( mappingInfo );
+ return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+ struct vm_area_struct *vma;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+ if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+ *phys_addr, size, descriptor->mapping, offset));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ vma = mappingInfo->vma;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+ {
+ _mali_osk_errcode_t ret;
+ AllocationList *alloc_item;
+ u32 linux_phys_frame_num;
+
+ alloc_item = _allocation_list_item_get();
+
+ linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+ ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+ if ( ret != _MALI_OSK_ERR_OK)
+ {
+ _allocation_list_item_release(alloc_item);
+ return ret;
+ }
+
+ /* Put our alloc_item into the list of allocations on success */
+ alloc_item->next = mappingInfo->list;
+ alloc_item->offset = offset;
+
+ /*alloc_item->physaddr = linux_phys_addr;*/
+ mappingInfo->list = alloc_item;
+
+ /* Write out new physical address on success */
+ *phys_addr = alloc_item->physaddr;
+
+ return ret;
+ }
+
+ /* Otherwise, Use the supplied physical address */
+
+ /* ASSERT that supplied phys_addr is page aligned */
+ MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ if (NULL == descriptor->mapping) return;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+ size, descriptor->mapping, offset));
+ return;
+ }
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+ {
+ /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+ * so needs to be unmapped
+ */
+ while (size)
+ {
+ /* First find the allocation in the list of allocations */
+ AllocationList *alloc = mappingInfo->list;
+ AllocationList **prev = &(mappingInfo->list);
+ while (NULL != alloc && alloc->offset != offset)
+ {
+ prev = &(alloc->next);
+ alloc = alloc->next;
+ }
+ if (alloc == NULL) {
+ MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ continue;
+ }
+
+ _kernel_page_release(alloc->physaddr);
+
+ /* Remove the allocation from the list */
+ *prev = alloc->next;
+ _mali_osk_free( alloc );
+
+ /* Move onto the next allocation */
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+ }
+
+ /* Linux does the right thing as part of munmap to remove the mapping */
+
+ return;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c
new file mode 100644
index 00000000000..694d8eeaabc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Added support for overriding memory settings in arch_configuration using
+ * mali_mem module parameter.
+ *
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "mali_kernel_linux.h" /* exports initialize/terminate_kernel_device() definition of mali_osk_low_level_mem_init() and term */
+#include "arch/config.h" /* contains the configuration of the arch we are compiling for */
+
+extern char* mali_mem;
+
+/* is called from mali_kernel_constructor in common code */
+_mali_osk_errcode_t _mali_osk_init( void )
+{
+ if (0 != initialize_kernel_device()) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ mali_osk_low_level_mem_init();
+
+ MALI_SUCCESS;
+}
+
+/* is called from mali_kernel_deconstructor in common code */
+void _mali_osk_term( void )
+{
+ mali_osk_low_level_mem_term();
+ terminate_kernel_device();
+}
+
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources )
+{
+ *num_resources = sizeof(arch_configuration) / sizeof(arch_configuration[0]);
+ *arch_config = arch_configuration;
+
+ /* override the MEMORY resource if a value has been supplied from the command line */
+ if ('\0' != mali_mem[0]) {
+ char *p = mali_mem;
+ _mali_osk_resource_type_t mem_type = MEMORY;
+ unsigned long mem_base = 0;
+ unsigned long mem_size = memparse(p, &p);
+ if (*p == '@') {
+ if ((*(p + 1) == 'O') || (*(p + 1) == 'o')) { /* as in OS. e.g. mali_mem=64M@OS_MEMORY */
+ mem_type = OS_MEMORY;
+ mem_base = 0;
+ } else { /* parse the base address */
+ mem_type = MEMORY;
+ mem_base = memparse(p + 1, &p);
+ }
+ }
+
+ /* change the first memory entry in the architecture config. */
+ if (0 < mem_size) {
+ int i;
+ for (i = 0; i < *num_resources; ++i) {
+ if ((MEMORY == arch_configuration[i].type) ||
+ (OS_MEMORY == arch_configuration[i].type)) {
+ MALI_DEBUG_PRINT( 1, ("Overriding arch resource[%d] :\n",i));
+ MALI_DEBUG_PRINT( 1, ("Type: %s, base: %x, size %x\n",
+ (OS_MEMORY==mem_type?"OS_MEMORY":"MEMORY"),mem_base,mem_size));
+ arch_configuration[i].type = mem_type;
+ arch_configuration[i].base = mem_base;
+ arch_configuration[i].size = mem_size;
+ break;
+ }
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources )
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c
new file mode 100644
index 00000000000..2f7fc15847b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+ return 32-fls(input);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c
new file mode 100644
index 00000000000..bf6679f7bac
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c
new file mode 100644
index 00000000000..3afd2da736c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ return (u32)current->pid;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c
new file mode 100644
index 00000000000..2efc140f60a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+ struct semaphore mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+ _mali_osk_notification_queue_t * result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ sema_init(&result->mutex, 1);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size, GFP_KERNEL );
+ if (NULL == notification)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size)
+ {
+ notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ }
+ else
+ {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* Remove from the list */
+ list_del(&notification->list);
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* lock queue access */
+ down(&queue->mutex);
+ /* add to list */
+ list_add_tail(&notification->list, &queue->head);
+ /* unlock the queue */
+ up(&queue->mutex);
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+static int _mali_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ int ret;
+
+ down(&queue->mutex);
+ ret = list_empty(&queue->head);
+ up(&queue->mutex);
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ return _mali_notification_queue_is_empty(queue) ? MALI_TRUE : MALI_FALSE;
+}
+#endif
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+ down(&queue->mutex);
+
+ if (!list_empty(&queue->head))
+ {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ up(&queue->mutex);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( result );
+
+ /* default result */
+ *result = NULL;
+
+ while (_MALI_OSK_ERR_OK != _mali_osk_notification_queue_dequeue(queue, result))
+ {
+ if (wait_event_interruptible(queue->receive_queue, !_mali_notification_queue_is_empty(queue)))
+ {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c
new file mode 100644
index 00000000000..04fcd4572df
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c
@@ -0,0 +1,195 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/platform_device.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_pm_testsuite.h"
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifdef CONFIG_PM_RUNTIME
+static int is_runtime =0;
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+#ifdef CONFIG_PM
+unsigned int mali_pmm_events_triggered_mask = 0;
+#endif /* CONFIG_PM */
+
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id mali_pmm_event)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+
+ switch (mali_pmm_event)
+ {
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<0);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<1);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<2);
+ mali_job_scheduling_events_recording_on = 0;
+ pwr_mgmt_status_reg = mali_pmm_events_triggered_mask;
+ }
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ if (mali_timeout_event_recording_on == 1)
+ {
+ pwr_mgmt_status_reg = (1<<3);
+ mali_timeout_event_recording_on = 0;
+ }
+ break;
+
+ default:
+
+ break;
+
+ }
+#endif /* CONFIG_PM */
+
+#endif /* MALI_LICENSE_IS_GPL */
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/** This function is called when the Mali device has completed power up
+ * operation.
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK Power up Done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is called when the Mali device has completed power down
+ * operation.
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (is_mali_pmu_present == 0)
+ {
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power down Done\n" ));
+ return;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is invoked when mali device is idle.
+*/
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle(void)
+{
+ _mali_osk_errcode_t err = 0;
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ err = pm_runtime_put_sync(&(mali_gpu_device.dev));
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_idle\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+ return err;
+}
+
+/** This funtion is invoked when mali device needs to be activated.
+*/
+void _mali_osk_pmm_dev_activate(void)
+{
+
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ int err = 0;
+ if(is_runtime == 0)
+ {
+ pm_suspend_ignore_children(&(mali_gpu_device.dev), true);
+ pm_runtime_enable(&(mali_gpu_device.dev));
+ pm_runtime_get_sync(&(mali_gpu_device.dev));
+ is_runtime = 1;
+ }
+ else
+ {
+ err = pm_runtime_get_sync(&(mali_gpu_device.dev));
+ }
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_activate\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(dvfs_pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK DVFS Operation done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h
new file mode 100644
index 00000000000..54acfdd138c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c
new file mode 100644
index 00000000000..aa7962380e6
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks( u32 ms )
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms( u32 ticks )
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount( void )
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c
new file mode 100644
index 00000000000..3dfba76bcb3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = _mali_osk_time_tickcount() + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), expiry_tick);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c
new file mode 100644
index 00000000000..0f745492613
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs)
+{
+ _mali_uk_get_system_info_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_system_info_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs)
+{
+ _mali_uk_get_system_info_s kargs;
+ _mali_osk_errcode_t err;
+ _mali_system_info *system_info_user;
+ _mali_system_info *system_info_kernel;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.system_info, &uargs->system_info)) return -EFAULT;
+ if (0 != get_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ /* A temporary kernel buffer for the system_info datastructure is passed through the system_info
+ * member. The ukk_private member will point to the user space destination of this buffer so
+ * that _mali_ukk_get_system_info() can correct the pointers in the system_info correctly
+ * for user space.
+ */
+ system_info_kernel = kmalloc(kargs.size, GFP_KERNEL);
+ if (NULL == system_info_kernel) return -EFAULT;
+
+ system_info_user = kargs.system_info;
+ kargs.system_info = system_info_kernel;
+ kargs.ukk_private = (u32)system_info_user;
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_system_info(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ kfree(system_info_kernel);
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(system_info_user, system_info_kernel, kargs.size))
+ {
+ kfree(system_info_kernel);
+ return -EFAULT;
+ }
+
+ kfree(system_info_kernel);
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+ {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ }
+ else
+ {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c
new file mode 100644
index 00000000000..a6f355fd459
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_uk_gp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs)
+{
+ _mali_uk_gp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_gp_abort_job(&kargs);
+
+ return 0;
+}
+
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
new file mode 100644
index 00000000000..3f67a1e3328
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+ _mali_uk_init_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_init_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+ if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+ return 0;
+
+mem_init_rollback:
+ {
+ _mali_uk_term_mem_s kargs;
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+ _mali_uk_term_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ kargs.buffer = _mali_osk_malloc(kargs.size);
+ if (NULL == kargs.buffer)
+ {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_free(kargs.buffer);
+ return rc;
+}
+
+
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument )
+{
+ _mali_uk_get_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ return -EFAULT;
+ }
+
+ /* This interface inserts something into the ukk_private word */
+ uk_args.ukk_private = (u32)filp;
+ uk_args.ctx = filp->private_data;
+ err_code = _mali_ukk_get_big_block( &uk_args );
+
+ /* Do not leak the private word back into user space */
+ uk_args.ukk_private = 0;
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ return map_errcode(err_code);
+ }
+
+ /* From this point on, we must roll-back any failing action to preserve the
+ * meaning of the U/K interface (e.g. when excluded) */
+
+ /* transfer response back to user space */
+ if ( 0 != copy_to_user(argument, &uk_args, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ /* Roll-back - the _mali_uk_get_big_block call succeeded, so all
+ * values in uk_args will be correct */
+ _mali_uk_free_big_block_s uk_args_rollback = {0, };
+
+ uk_args_rollback.ctx = uk_args.ctx;
+ uk_args_rollback.cookie = uk_args.cookie;
+ err_code = _mali_ukk_free_big_block( &uk_args_rollback );
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ /* error in DEBUG and RELEASE */
+ MALI_PRINT_ERROR( ("Failed to rollback get_big_block: %.8X\n", (u32)err_code) );
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_free_big_block_wrapper(struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument)
+{
+ _mali_uk_free_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL );
+
+ /* get call arguments from user space. get_user returns 0 on success */
+ if ( 0 != get_user(uk_args.cookie, &argument->cookie) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_free_big_block( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c
new file mode 100644
index 00000000000..f77a177d865
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_uk_pp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_pp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_pp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.returned_user_job_ptr, &uargs->returned_user_job_ptr) ||
+ 0 != put_user(kargs.status, &uargs->status))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs)
+{
+ _mali_uk_pp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_abort_job(&kargs);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c
new file mode 100644
index 00000000000..636bd03580e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c
new file mode 100644
index 00000000000..965ee411535
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h
new file mode 100644
index 00000000000..54e3f656b37
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs);
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs);
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument );
+int mem_free_big_block_wrapper( struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c
new file mode 100644
index 00000000000..36301a93a2c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c
new file mode 100644
index 00000000000..b8c47094177
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+#include "mali_pmm.h"
+
+static int is_run_time = 0;
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ if(is_run_time == 1)
+ {
+ _mali_osk_pmm_dev_idle();
+ is_run_time =0;
+ }
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ if(is_run_time == 0)
+ {
+ _mali_osk_pmm_dev_activate();
+ is_run_time = 1;
+ }
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c
new file mode 100644
index 00000000000..cf95b8ae09e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#if USING_MALI_PMM
+
+#include "mali_pmm.h"
+
+/* Internal test on/off */
+#define PMU_TEST 0
+
+/** @brief PMU hardware info
+ */
+typedef struct platform_pmu
+{
+ u32 reg_base_addr; /**< PMU registers base address */
+ u32 reg_size; /**< PMU registers size */
+ const char *name; /**< PMU name */
+ u32 irq_num; /**< PMU irq number */
+
+ mali_io_address reg_mapped; /**< IO-mapped pointer to registers */
+} platform_pmu_t;
+
+static platform_pmu_t *pmu_info = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_STAT = 0x14, /*< Interrupt status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Software delay register */
+ PMU_REG_ADDR_MGMT_MASTER_PWR_UP = 0x24, /*< Master power up register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+/* Internal functions */
+u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address);
+void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val);
+mali_pmm_core_mask pmu_translate_cores_to_pmu(mali_pmm_core_mask cores);
+#if PMU_TEST
+void pmm_pmu_dump_regs( platform_pmu_t *pmu );
+void pmm_pmu_test( platform_pmu_t *pmu, u32 cores );
+#endif
+
+#endif /* USING_MALI_PMM */
+
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+#if USING_MALI_PMM
+ if( resource == NULL )
+ {
+ /* Nothing to set up for the system */
+ }
+ else if( resource->type == PMU )
+ {
+ if( (resource->base == 0) ||
+ (resource->description == NULL) )
+ {
+ /* NOTE: We currently don't care about any other resource settings */
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Missing PMU set up information\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_ASSERT( pmu_info == NULL );
+ pmu_info = (platform_pmu_t *)_mali_osk_malloc(sizeof(*pmu_info));
+ MALI_CHECK_NON_NULL( pmu_info, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmu_info, 0, sizeof(*pmu_info));
+
+ pmu_info->reg_base_addr = resource->base;
+ pmu_info->reg_size = (u32)PMU_REGISTER_ADDRESS_SPACE_SIZE;
+ pmu_info->name = resource->description;
+ pmu_info->irq_num = resource->irq;
+
+ if( _MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name) )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not request register region (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: request_mem_region: (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ }
+
+ pmu_info->reg_mapped = _mali_osk_mem_mapioregion( pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name );
+
+ if( 0 == pmu_info->reg_mapped )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not ioremap registers for %s .\n", pmu_info->name));
+ _mali_osk_mem_unreqregion( pmu_info->reg_base_addr, pmu_info->reg_size );
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) for %s\n",
+ (u32) pmu_info->reg_mapped,
+ ((u32)pmu_info->reg_mapped)+ pmu_info->reg_size - 1,
+ pmu_info->name));
+ }
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: Mapping registers to %s\n", pmu_info->name));
+
+#if PMU_TEST
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP));
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP|MALI_PMM_CORE_L2|MALI_PMM_CORE_PP0));
+#endif
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Initialized - %s\n", pmu_info->name) );
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+cleanup:
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+#else
+ /* Nothing to do when not using PMM - as mali already on */
+ MALI_SUCCESS;
+#endif
+
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+#if USING_MALI_PMM
+ if( type == NULL )
+ {
+ /* Nothing to tear down for the system */
+ }
+ else if (*type == PMU)
+ {
+ if( pmu_info )
+ {
+ _mali_osk_mem_unmapioregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->reg_mapped);
+ _mali_osk_mem_unreqregion(pmu_info->reg_base_addr, pmu_info->reg_size);
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Terminated PMU\n") );
+ }
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+#if USING_MALI_PMM
+ u32 stat;
+ u32 timeout;
+ u32 cores_pmu;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power down (0x%x)\n", cores) );
+
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_DOWN, cores_pmu );
+
+ /* Wait for cores to be powered down */
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == cores_pmu ) break; /* All cores we wanted are now asleep */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+#if USING_MALI_PMM
+ u32 cores_pmu;
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power up (0x%x)\n", cores) );
+
+ /* Don't use interrupts - just poll status */
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_INT_MASK, 0 );
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_UP, cores_pmu );
+
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == 0 ) break; /* All cores we wanted are now awake */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if USING_MALI_PMM
+
+/***** INTERNAL *****/
+
+/** @brief Internal PMU function to translate the cores bit mask
+ * into something the hardware PMU understands
+ *
+ * @param cores PMM cores bitmask
+ * @return PMU hardware cores bitmask
+ */
+u32 pmu_translate_cores_to_pmu(mali_pmm_core_mask cores)
+{
+ /* For Mali 400 PMU the cores mask is already the same as what
+ * the hardware PMU expects.
+ * For other hardware, some translation can be done here, by
+ * translating the MALI_PMM_CORE_* bits into specific hardware
+ * bits
+ */
+ return cores;
+}
+
+/** @brief Internal PMU function to read a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to read from
+ * @return 32-bit value that was read from the address
+ */
+u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address)
+{
+ u32 read_val;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ read_val = _mali_osk_mem_ioread32(pmu->reg_mapped, relative_address);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_read: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, read_val));
+
+ return read_val;
+}
+
+/** @brief Internal PMU function to write to a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to write to
+ * @param new_val new 32-bit value to write into the address
+ */
+void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_write: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(pmu->reg_mapped, relative_address, new_val);
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+u32 pmu_get_power_up_down_info(void)
+{
+ return pmu_reg_read(pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS);
+}
+
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+#endif /* USING_MALI_PMM */
+
+
+#if USING_MALI_PMM && PMU_TEST
+
+/***** TEST *****/
+
+void pmu_dump_regs( platform_pmu_t *pmu )
+{
+ u32 addr;
+ for( addr = 0x0; addr < PMU_REGISTER_ADDRESS_SPACE_SIZE; addr += 0x4 )
+ {
+ MALI_PRINT( ("PMU_REG: 0x%08x: 0x%04x\n", (addr + pmu->reg_base_addr), pmu_reg_read( pmu, addr ) ) );
+ }
+}
+
+/* This function is an internal test for the PMU without any Mali h/w interaction */
+void pmu_test( platform_pmu_t *pmu, u32 cores )
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_PRINT( ("PMU_TEST: Start\n") );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power down cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_down( pmu, cores, MALI_TRUE );
+
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == cores ? "SUCCESS" : "FAIL" ) );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power up cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_up( pmu, cores, MALI_FALSE );
+
+ MALI_PRINT( ("PMU_TEST: Waiting for power up...\n") );
+ timeout = 1000; /* 1 sec */
+ while( !_mali_pmm_pmu_irq_power_up(pmu) && timeout > 0 )
+ {
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ }
+
+ MALI_PRINT( ("PMU_TEST: Waited %dms for interrupt\n", (1000-timeout)) );
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == 0 ? "SUCCESS" : "FAIL" ) );
+
+ _mali_pmm_pmu_irq_power_up_clear(pmu);
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Finish\n") );
+}
+#endif /* USING_MALI_PMM && PMU_TEST */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h
new file mode 100644
index 00000000000..575c1fb6113
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif
+
+#if !USING_MALI_PMM
+/* @brief System power up/down cores that can be passed into mali_platform_powerdown/up() */
+#define MALI_PLATFORM_SYSTEM 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ * When using PMM, it is also called from the PMM start up to initialise the
+ * system PMU
+ *
+ * @param resource This is NULL when called on first driver start up, else it will
+ * be a pointer to a PMU resource
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ * When using PMM, it is also called from the PMM termination code to clean up the
+ * system PMU
+ *
+ * @param type This is NULL when called on driver exit, else it will
+ * be a pointer to a PMU resource type (not the full resource)
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Called as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores);
+
+/** @brief Platform specific powerup sequence of MALI
+ *
+ * Called as part of platform deinit if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_powerup(u32 cores);
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(u32 utilization);
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief function to get status of individual cores
+ *
+ * This function is used by power management test suite to get the status of powered up/down the number
+ * of cores
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+u32 pmu_get_power_up_down_info(void);
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c
new file mode 100644
index 00000000000..481e6e76c54
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Mali related Ux500 platform initialization
+ *
+ * Author: Marta Lofstedt <marta.lofstedt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for ST-Ericsson's Ux500 platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define MALI_HIGH_TO_LOW_LEVEL_UTILIZATION_LIMIT 64
+#define MALI_LOW_TO_HIGH_LEVEL_UTILIZATION_LIMIT 192
+
+static int is_running;
+static struct regulator *regulator;
+static struct clk *clk_sga;
+static u32 last_utilization;
+static struct work_struct mali_utilization_work;
+static struct workqueue_struct *mali_utilization_workqueue;
+
+/* Rationale behind the values for:
+* MALI_HIGH_LEVEL_UTILIZATION_LIMIT and MALI_LOW_LEVEL_UTILIZATION_LIMIT
+* When operating at half clock frequency a faster clock is requested when
+* reaching 75% utilization. When operating at full clock frequency a slower
+* clock is requested when reaching 25% utilization. There is a margin of 25%
+* at the high range of the slow clock to avoid complete saturation of the
+* hardware and there is some overlap to avoid an oscillating situation where
+* the clock goes back and forth from high to low.
+*
+* Utilization on full speed clock
+* 0 64 128 192 255
+* |---------------|---------------|---------------|---------------|
+* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+* | ^
+* V |
+* XXXXXXXXXXXXXXXXXXXXXXXXX
+* 0 64 128 192 255
+* |-------|-------|-------|-------|
+* Utilization on half speed clock
+*/
+void mali_utilization_function(struct work_struct *ptr)
+{
+ /*By default, platform start with 50% APE OPP and 25% DDR OPP*/
+ static u32 has_requested_low = 1;
+
+ if (last_utilization > MALI_LOW_TO_HIGH_LEVEL_UTILIZATION_LIMIT) {
+ if (has_requested_low) {
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_HIGH\n", last_utilization));
+ /*Request 100% APE_OPP.*/
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "mali", 100)) {
+ MALI_DEBUG_PRINT(2, ("MALI 100% APE_OPP failed\n"));
+ return;
+ }
+ /*
+ * Since the utilization values will be reported higher
+ * if DDR_OPP is lowered, we also request 100% DDR_OPP.
+ */
+ if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, "mali", 100)) {
+ MALI_DEBUG_PRINT(2, ("MALI 100% DDR_OPP failed\n"));
+ return;
+ }
+ has_requested_low = 0;
+ }
+ } else {
+ if (last_utilization < MALI_HIGH_TO_LOW_LEVEL_UTILIZATION_LIMIT) {
+ if (!has_requested_low) {
+ /*Remove APE_OPP and DDR_OPP requests*/
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "mali");
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP, "mali");
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_LOW\n", last_utilization));
+ has_requested_low = 1;
+ }
+ }
+ }
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u\n", last_utilization));
+}
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ is_running = 0;
+ last_utilization = 0;
+
+ mali_utilization_workqueue = create_singlethread_workqueue("mali_utilization_workqueue");
+ if (NULL == mali_utilization_workqueue) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to setup workqueue %s\n", __func__, "v-mali"));
+ goto error;
+ }
+ INIT_WORK(&mali_utilization_work, mali_utilization_function);
+
+ regulator = regulator_get(NULL, "v-mali");
+ if (IS_ERR(regulator)) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to get regulator %s\n", __func__, "v-mali"));
+ goto error;
+ }
+
+ clk_sga = clk_get_sys("mali", NULL);
+ if (IS_ERR(clk_sga)) {
+ regulator_put(regulator);
+ MALI_DEBUG_PRINT(2, ("%s: Failed to get clock %s\n", __func__, "mali"));
+ goto error;
+ }
+
+ MALI_SUCCESS;
+error:
+ MALI_DEBUG_PRINT(1, ("SGA initialization failed.\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ destroy_workqueue(mali_utilization_workqueue);
+ regulator_put(regulator);
+ clk_put(clk_sga);
+ MALI_DEBUG_PRINT(2, ("SGA terminated.\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ if (is_running) {
+ clk_disable(clk_sga);
+ if (regulator) {
+ int ret = regulator_disable(regulator);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to disable regulator %s\n", __func__, "v-mali"));
+ is_running = 0;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ is_running = 0;
+ }
+ MALI_DEBUG_PRINT(4, ("mali_platform_powerdown is_running: %u cores: %u\n", is_running, cores));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ if (!is_running) {
+ int ret = regulator_enable(regulator);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to enable regulator %s\n", __func__, "v-mali"));
+ goto error;
+ }
+
+ ret = clk_enable(clk_sga);
+ if (ret < 0) {
+ regulator_disable(regulator);
+ MALI_DEBUG_PRINT(2, ("%s: Failed to enable clock %s\n", __func__, "mali"));
+ goto error;
+ }
+ is_running = 1;
+ }
+ MALI_DEBUG_PRINT(4, ("mali_platform_powerup is_running:%u cores: %u\n", is_running, cores));
+ MALI_SUCCESS;
+error:
+ MALI_DEBUG_PRINT(1, ("Failed to power up.\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+ last_utilization = utilization;
+ /*
+ * We should not cancel the potentially not yet run old work
+ * in favor of a new work.
+ * Since the utilization value will change,
+ * the mali_utilization_function will evaluate based on
+ * what is the utilization now and not on what it was
+ * when it was scheduled.
+ */
+ queue_work(mali_utilization_workqueue, &mali_utilization_work);
+}
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c
new file mode 100644
index 00000000000..b321b504839
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "ump_kernel_types.h"
+#include "mali_kernel_common.h"
+
+#include <linux/hwmem.h>
+#include <linux/err.h>
+
+
+/* The UMP kernel API for hwmem has been mapped so that
+ * ump_dd_handle == hwmem_alloc
+ * ump_secure_id == hwmem global name
+ *
+ * The current implementation is limited to contiguous memory
+ */
+
+ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ int hwmem_name = hwmem_get_name((struct hwmem_alloc *) memh);
+
+ if (unlikely(hwmem_name < 0)) {
+ MALI_DEBUG_PRINT(1, ("%s: Invalid Alloc 0x%x\n",__func__, memh));
+ return UMP_INVALID_SECURE_ID;
+ }
+
+ return (ump_secure_id)hwmem_name;
+}
+
+
+
+ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name((int) secure_id);
+
+ if (IS_ERR(alloc)) {
+ MALI_DEBUG_PRINT(1, ("%s: Invalid UMP id %d\n",__func__, secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ hwmem_get_info(alloc, NULL, &mem_type, &access);
+
+ if (unlikely((access & (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT)) !=
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT))) {
+ MALI_DEBUG_PRINT(1, ("%s: Access denied on UMP id %d, (access==%d)\n",
+ __func__, secure_id, access));
+ hwmem_release(alloc);
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (unlikely(HWMEM_MEM_CONTIGUOUS_SYS != mem_type)) {
+ MALI_DEBUG_PRINT(1, ("%s: UMP id %d is non-contiguous! (not supported)\n",
+ __func__, secure_id));
+ hwmem_release(alloc);
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ return (ump_dd_handle)alloc;
+}
+
+
+
+unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ return 1;
+}
+
+
+
+ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh,
+ ump_dd_physical_block * blocks,
+ unsigned long num_blocks)
+{
+ struct hwmem_mem_chunk hwmem_mem_chunk;
+ size_t hwmem_mem_chunk_length = 1;
+
+ int hwmem_result;
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+
+ if (unlikely(blocks == NULL)) {
+ MALI_DEBUG_PRINT(1, ("%s: blocks == NULL\n",__func__));
+ return UMP_DD_INVALID;
+ }
+
+ if (unlikely(1 != num_blocks)) {
+ MALI_DEBUG_PRINT(1, ("%s: num_blocks == %d (!= 1)\n",__func__, num_blocks));
+ return UMP_DD_INVALID;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Returning physical block information. Alloc: 0x%x\n", memh));
+
+ /* It might not look natural to pin here, but it matches the usage by the mali kernel module */
+ hwmem_result = hwmem_pin(alloc, &hwmem_mem_chunk, &hwmem_mem_chunk_length);
+
+ if (unlikely(hwmem_result < 0)) {
+ MALI_DEBUG_PRINT(1, ("%s: Pin failed. Alloc: 0x%x\n",__func__, memh));
+ return UMP_DD_INVALID;
+ }
+
+ blocks[0].addr = hwmem_mem_chunk.paddr;
+ blocks[0].size = hwmem_mem_chunk.size;
+
+ hwmem_set_domain(alloc, HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_SYNC, NULL);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh,
+ unsigned long index,
+ ump_dd_physical_block * block)
+{
+ if (unlikely(0 != index)) {
+ MALI_DEBUG_PRINT(1, ("%s: index == %d (!= 0)\n",__func__, index));
+ return UMP_DD_INVALID;
+ }
+ return ump_dd_phys_blocks_get(memh, block, 1);
+}
+
+
+
+unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+ int size;
+
+ hwmem_get_info(alloc, &size, NULL, NULL);
+
+ return size;
+}
+
+
+
+void ump_dd_reference_add(ump_dd_handle memh)
+{
+ /* This is never called from tha mali kernel driver */
+}
+
+
+
+void ump_dd_reference_release(ump_dd_handle memh)
+{
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+
+ hwmem_unpin(alloc);
+ hwmem_release(alloc);
+
+ return;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt
new file mode 100644
index 00000000000..af40fddd20f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_MMU=<mmu_option> USING_UMP=<ump_option> \
+BUILD=<build_option> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ mmu_option: 1 = Mali MMU(s) on
+ 0 = Mali MMU(s) off
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+ your_config: Name of the sub-folder to find the required config.h(**) file
+ ("arch-" will be prepended)
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+(**) The config.h file contains the configuration parameters needed, like where the
+ Mali GPU is located, interrupts it uses, memory and so on.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h
new file mode 100644
index 00000000000..6b30802bb2d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1<<0),
+#if defined(USING_MALI200)
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1<<3),
+#endif
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1<<5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1<<6),
+#if defined(USING_MALI400)
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1<<7),
+#endif
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1<<0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1<<1),
+ MALI200_REG_VAL_IRQ_HANG = (1<<2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1<<3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1<<4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1<<5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1<<6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1<<7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1<<8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1<<10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1<<11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1<<12),
+};
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+#else
+#error "No supported mali core defined"
+#endif
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+#else
+#error "No supported mali core defined"
+#endif
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1<<0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1<<4),
+};
+
+enum mali200_render_unit
+{
+ MALI200_REG_ADDR_FRAME = 0x0000,
+};
+
+#if defined USING_MALI200
+#define MALI200_NUM_REGS_FRAME ((0x04C/4)+1)
+#elif defined USING_MALI400
+#define MALI200_NUM_REGS_FRAME ((0x058/4)+1)
+#else
+#error "No supported mali core defined"
+#endif
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+/** The number of registers in one single writeback unit */
+#ifndef MALI200_NUM_REGS_WBx
+#define MALI200_NUM_REGS_WBx ((0x02C/4)+1)
+#endif
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#if defined USING_MALI200
+#define MALI_PP_PRODUCT_ID 0xC807
+#elif defined USING_MALI400
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI_PP_PRODUCT_ID MALI400_PP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h
new file mode 100644
index 00000000000..11eb55c424e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor controll registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builer in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+ MALIGP2_REG_VAL_CMD_START_VS = (1<< 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1<< 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1<< 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1<< 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1<< 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1<< 9),
+#if defined(USING_MALI400)
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1<<10),
+#endif
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#if defined USING_MALI400
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+#elif !defined USING_MALI200
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining all IRQs in MaliGP2 */
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining the IRQs in MaliGP2 which we use*/
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#if defined(USING_MALI200)
+#define MALI_GP_PRODUCT_ID 0xA07
+#elif defined(USING_MALI400)
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI_GP_PRODUCT_ID MALI400_GP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 00000000000..a6b1d76d567
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 00000000000..96b709bc166
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__ ("MCR p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__ ("MRC p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c
new file mode 100644
index 00000000000..a6b1d76d567
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h
new file mode 100644
index 00000000000..11031d675ca
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile
new file mode 100644
index 00000000000..97225bbbf0b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile
@@ -0,0 +1,115 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+UMP_FILE_PREFIX =
+UDD_FILE_PREFIX = ../mali/
+
+ifneq ($(KBUILD_EXTMOD),)
+include $(KBUILD_EXTMOD)/Makefile.common
+else
+include ./Makefile.common
+endif
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+ARCH ?= arm
+## @note Should allow overriding of building UMP for non-debug:
+EXTRA_CFLAGS += -DDEBUG -DMALI_STATE_TRACKING=0
+
+# linux build system integration
+
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/common -I$(KBUILD_EXTMOD)/linux -I$(KBUILD_EXTMOD)/../mali/common -I$(KBUILD_EXTMOD)/../mali/linux -I$(KBUILD_EXTMOD)/../../ump/include/ump
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(KBUILD_EXTMOD)/linux/license/gpl/*),)
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/linux/license/proprietary
+else
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/linux/license/gpl
+endif
+
+SRC += $(UMP_FILE_PREFIX)linux/ump_kernel_linux.c \
+ $(UMP_FILE_PREFIX)linux/ump_kernel_memory_backend_os.c \
+ $(UMP_FILE_PREFIX)linux/ump_kernel_memory_backend_dedicated.c \
+ $(UMP_FILE_PREFIX)linux/ump_memory_backend.c \
+ $(UMP_FILE_PREFIX)linux/ump_ukk_wrappers.c \
+ $(UMP_FILE_PREFIX)linux/ump_ukk_ref_wrappers.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_atomics.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_low_level_mem.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_misc.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_atomics.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_locks.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_memory.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_math.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_misc.c
+
+# Selecting files to compile by parsing the config file
+
+MODULE:=ump.ko
+
+obj-m := $(MODULE:.ko=.o)
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ # default to Virtex5
+ CONFIG ?= pb-virtex5
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= ct11mp
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d arch-$(CONFIG) ] && [ -f arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L arch ] && rm arch)
+$(shell ln -sf arch-$(CONFIG) arch)
+$(shell touch arch/config.h)
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR)/../mali clean
+
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common
new file mode 100644
index 00000000000..f797c85a6f2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_descriptor_mapping.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_api.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_ref_drv.c
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qvE '(exported|Unversioned)' && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h
new file mode 100644
index 00000000000..38ae1eeb3c2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xCE000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c
new file mode 100644
index 00000000000..719370c3de7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+
+
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+ return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ ump_dd_mem * mem;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ ump_dd_reference_add(mem);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return (ump_dd_handle)mem;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*) memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (blocks == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (mem->nr_blocks != num_blocks)
+ {
+ DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+ _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (block == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (index >= mem->nr_blocks)
+ {
+ DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+ *block = mem->block_array[index];
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+ int new_ref;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+ int new_ref;
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ /* We must hold this mutex while doing the atomic_dec_and_read, to protect
+ that elements in the ump_descriptor_mapping table is always valid. If they
+ are not, userspace may accidently map in this secure_ids right before its freed
+ giving a mapped backdoor into unallocated memory.*/
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+
+ if (0 == new_ref)
+ {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+{
+ ump_session_data * session_data;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ session_data = (ump_session_data *)args->ctx;
+
+ /* check compatability */
+ if (args->version == UMP_IOCTL_API_VERSION)
+ {
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else if (args->version == MAKE_VERSION_ID(1))
+ {
+ DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else
+ {
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ args->compatible = 0;
+ args->version = UMP_IOCTL_API_VERSION; /* report our version */
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+{
+ ump_session_memory_list_element * session_memory_element;
+ ump_session_memory_list_element * tmp;
+ ump_session_data * session_data;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+ int secure_id;
+
+ DEBUG_ASSERT_POINTER( release_info );
+ DEBUG_ASSERT_POINTER( release_info->ctx );
+
+ /* Retreive the session data */
+ session_data = (ump_session_data*)release_info->ctx;
+
+ /* If there are many items in the memory session list we
+ * could be de-referencing this pointer a lot so keep a local copy
+ */
+ secure_id = release_info->secure_id;
+
+ DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+ /* Iterate through the memory list looking for the requested secure ID */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ if ( session_memory_element->mem->secure_id == secure_id)
+ {
+ ump_dd_mem *release_mem;
+
+ release_mem = session_memory_element->mem;
+ _mali_osk_list_del(&session_memory_element->list);
+ ump_dd_reference_release(release_mem);
+ _mali_osk_free(session_memory_element);
+
+ ret = _MALI_OSK_ERR_OK;
+ break;
+ }
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+ DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+ return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+{
+ ump_dd_mem * mem;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+
+ /* We lock the mappings so things don't get removed while we are looking for the memory */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
+ {
+ user_interaction->size = mem->size_bytes;
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+ ret = _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ user_interaction->size = 0;
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+
+
+void _ump_ukk_msync( _ump_uk_msync_s *args )
+{
+ ump_dd_mem * mem = NULL;
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (NULL==mem)
+ {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ /* Returns the cache settings back to Userspace */
+ args->is_cached=mem->is_cached;
+
+ /* If this flag is the only one set, we should not do the actual flush, only the readout */
+ if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+ return;
+ }
+
+ /* Nothing to do if the memory is not caches */
+ if ( 0==mem->is_cached )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+ return ;
+ }
+ DBG_MSG(3, ("_ump_ukk_msync FLUSHING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+
+ /* The actual cache flush - Implemented for each OS*/
+ _ump_osk_msync( mem , args->op);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c
new file mode 100644
index 00000000000..b99c3e7c7d2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128 )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+ _mali_osk_errcode_t err;
+
+ /* Perform OS Specific initialization */
+ err = _ump_osk_init();
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+ return err;
+ }
+
+ /* Init the global device */
+ _mali_osk_memset(&device, 0, sizeof(device) );
+
+ /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+ device.secure_id_map_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0 , 0);
+ if (NULL == device.secure_id_map_lock)
+ {
+ MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+ if (NULL == device.secure_id_map)
+ {
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ MSG_ERR(("Failed to create secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Init memory backend */
+ device.backend = ump_memory_backend_create();
+ if (NULL == device.backend)
+ {
+ MSG_ERR(("Failed to create memory backend\n"));
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+ DEBUG_ASSERT_POINTER(device.secure_id_map);
+ DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
+
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ device.secure_id_map_lock = NULL;
+
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ device.secure_id_map = NULL;
+
+ device.backend->shutdown(device.backend);
+ device.backend = NULL;
+
+ ump_memory_backend_destroy();
+
+ _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open( void** context )
+{
+ struct ump_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0);
+ if( NULL == session_data->lock )
+ {
+ MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+
+ if ( NULL == session_data->cookies_map )
+ {
+ MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+ _mali_osk_lock_term( session_data->lock );
+ _mali_osk_free( session_data );
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+ /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+ that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+ Current and later API versions would do an additional call to this IOCTL and update this variable
+ to the correct one.*/
+ session_data->api_version = MAKE_VERSION_ID(1);
+
+ *context = (void*)session_data;
+
+ DBG_MSG(2, ("New session opened\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close( void** context )
+{
+ struct ump_session_data * session_data;
+ ump_session_memory_list_element * item;
+ ump_session_memory_list_element * tmp;
+
+ session_data = (struct ump_session_data *)*context;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Unmap any descriptors mapped in. */
+ if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list))
+ {
+ ump_memory_allocation *descriptor;
+ ump_memory_allocation *temp;
+
+ DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list)
+ {
+ _ump_uk_unmap_mem_s unmap_args;
+ DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void*)session_data;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.size = descriptor->size;
+ unmap_args._ukk_private = NULL; /* NOTE: unused */
+ unmap_args.cookie = descriptor->cookie;
+
+ /* NOTE: This modifies the list_head_session_memory_mappings_list */
+ _ump_ukk_unmap_mem( &unmap_args );
+ }
+ }
+
+ /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+ * can fail silently. */
+ DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+
+ _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ _mali_osk_list_del(&item->list);
+ DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+ ump_dd_reference_release(item->mem);
+ _mali_osk_free(item);
+ }
+
+ ump_descriptor_mapping_destroy( session_data->cookies_map );
+
+ _mali_osk_lock_term(session_data->lock);
+ _mali_osk_free(session_data);
+
+ DBG_MSG(2, ("Session closed\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor; /* Describes current mapping of memory */
+ _mali_osk_errcode_t err;
+ unsigned long offset = 0;
+ unsigned long left;
+ ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
+ ump_dd_mem * mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ u32 block;
+ int map_id;
+
+ session_data = (ump_session_data *)args->ctx;
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+ if (NULL == descriptor)
+ {
+ MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ _mali_osk_free(descriptor);
+ DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mem = (ump_dd_mem*)handle;
+ DEBUG_ASSERT(mem);
+ if (mem->size_bytes != args->size)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+
+ if (map_id < 0)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ descriptor->size = args->size;
+ descriptor->handle = handle;
+ descriptor->phys_addr = args->phys_addr;
+ descriptor->process_mapping_info = args->_ukk_private;
+ descriptor->ump_session = session_data;
+ descriptor->cookie = (u32)map_id;
+
+ if ( mem->is_cached )
+ {
+ descriptor->is_cached = 1;
+ args->is_cached = 1;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+ }
+ else
+ {
+ descriptor->is_cached = 0;
+ args->is_cached = 0;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
+ }
+
+ _mali_osk_list_init( &descriptor->list );
+
+ err = _ump_osk_mem_mapregion_init( descriptor );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(mem);
+ return err;
+ }
+
+ DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
+
+ left = descriptor->size;
+ /* loop over all blocks and map them in */
+ for (block = 0; block < mem->nr_blocks; block++)
+ {
+ unsigned long size_to_map;
+
+ if (left > mem->block_array[block].size)
+ {
+ size_to_map = mem->block_array[block].size;
+ }
+ else
+ {
+ size_to_map = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) )
+ {
+ DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_dd_reference_release(mem);
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ left -= size_to_map;
+ offset += size_to_map;
+ }
+
+ /* Add to the ump_memory_allocation tracking list */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = descriptor->cookie;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor;
+ ump_dd_handle handle;
+
+ session_data = (ump_session_data *)args->ctx;
+
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) )
+ {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+ return;
+ }
+
+ DEBUG_ASSERT_POINTER(descriptor);
+
+ handle = descriptor->handle;
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+ return;
+ }
+
+ /* Remove the ump_memory_allocation from the list of tracked mappings */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_del( &descriptor->list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+
+ ump_dd_reference_release(handle);
+
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h
new file mode 100644
index 00000000000..0c55b14bc94
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+ extern int ump_debug_level;
+ #define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+ #define UMP_DEBUG_CODE(args) args
+ #define DBG_MSG(level,args) do { /* args should be in brackets */ \
+ ((level) <= ump_debug_level)?\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+ UMP_DEBUG_PRINT(args):0; \
+ } while (0)
+
+ #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+ if((condition)&&((level) <= ump_debug_level)) {\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+ else if((level) <= ump_debug_level) { \
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+ #define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+ #define UMP_DEBUG_PRINT(args) do {} while(0)
+ #define UMP_DEBUG_CODE(args)
+ #define DBG_MSG(level,args) do {} while(0)
+ #define DBG_MSG_IF(level,condition,args) do {} while(0)
+ #define DBG_MSG_ELSE(level,args) do {} while(0)
+ #define DEBUG_ASSERT(condition) do {} while(0)
+ #define DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
+ } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data
+{
+ _mali_osk_list_t list_head_session_memory_list; /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+ _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+ int api_version;
+ _mali_osk_lock_t * lock;
+ ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element
+{
+ struct ump_dd_mem * mem;
+ _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev
+{
+ _mali_osk_lock_t * secure_id_map_lock;
+ ump_descriptor_mapping * secure_id_map;
+ ump_memory_backend * backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode( _mali_osk_errcode_t err );
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000..2531f802127
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table * table);
+
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_READERWRITER, 0 , 0);
+ if ( NULL != map->lock )
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term( map->lock );
+ _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+{
+ int descriptor = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings)
+ {
+ int nr_mappings_new;
+ /* no free descriptor, try to expand the table */
+ ump_descriptor_table * new_table;
+ ump_descriptor_table * old_table = map->table;
+ nr_mappings_new= map->current_nr_mappings *2;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+{
+ int result = -1;/*-EFAULT;*/
+ DEBUG_ASSERT(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+{
+ int result = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = 0;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table * descriptor_table_alloc(int count)
+{
+ ump_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000..92bbe54bd35
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ ump_descriptor_table * table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h
new file mode 100644
index 00000000000..02a64707036
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation
+{
+ void * phys_addr;
+ void * mapping;
+ unsigned long size;
+ ump_dd_handle handle;
+ void * process_mapping_info;
+ u32 cookie; /**< necessary on some U/K interface implementations */
+ struct ump_session_data * ump_session; /**< Session that this allocation belongs to */
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+ u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend
+{
+ int (*allocate)(void* ctx, ump_dd_mem * descriptor);
+ void (*release)(void* ctx, ump_dd_mem * descriptor);
+ void (*shutdown)(struct ump_memory_backend * backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32 (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void * ctx;
+} ump_memory_backend;
+
+ump_memory_backend * ump_memory_backend_create ( void );
+void ump_memory_backend_destroy( void );
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c
new file mode 100644
index 00000000000..5a997e222a7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE 4096
+#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem;
+ unsigned long size_total = 0;
+ int map_id;
+ u32 i;
+
+ /* Go through the input blocks and verify that they are sane */
+ for (i=0; i < num_blocks; i++)
+ {
+ unsigned long addr = blocks[i].addr;
+ unsigned long size = blocks[i].size;
+
+ DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+ size_total += blocks[i].size;
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
+ {
+ MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(size))
+ {
+ MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+ return UMP_DD_HANDLE_INVALID;
+ }
+ }
+
+ /* Allocate the ump_dd_mem struct for this allocation */
+ mem = _mali_osk_malloc(sizeof(*mem));
+ if (NULL == mem)
+ {
+ DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Find a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Now, make a copy of the block information supplied by the user */
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+ if (NULL == mem->block_array)
+ {
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+ /* And setup the rest of the ump_dd_mem struct */
+ _mali_osk_atomic_init(&mem->ref_count, 1);
+ mem->secure_id = (ump_secure_id)map_id;
+ mem->size_bytes = size_total;
+ mem->nr_blocks = num_blocks;
+ mem->backend_info = NULL;
+ mem->ctx = NULL;
+ mem->release_func = phys_blocks_release;
+ /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+ mem->is_cached = 0;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+{
+ _mali_osk_free(descriptor->block_array);
+ descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+{
+ ump_session_data * session_data = NULL;
+ ump_dd_mem *new_allocation = NULL;
+ ump_session_memory_list_element * session_memory_element = NULL;
+ int map_id;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+ DEBUG_ASSERT_POINTER( user_interaction->ctx );
+
+ session_data = (ump_session_data *) user_interaction->ctx;
+
+ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+ if (NULL == session_memory_element)
+ {
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+
+ new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
+ if (NULL==new_allocation)
+ {
+ _mali_osk_free(session_memory_element);
+ DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Create a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return - _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ /* Initialize the part of the new_allocation that we know so for */
+ new_allocation->secure_id = (ump_secure_id)map_id;
+ _mali_osk_atomic_init(&new_allocation->ref_count,1);
+ if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+ new_allocation->is_cached = 0;
+ else new_allocation->is_cached = 1;
+
+ /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+ if (0 == user_interaction->size)
+ {
+ user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+ }
+
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+
+ /* Now, ask the active memory backend to do the actual memory allocation */
+ if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
+ {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(new_allocation);
+ _mali_osk_free(session_memory_element);
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ new_allocation->ctx = device.backend->ctx;
+ new_allocation->release_func = device.backend->release;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element->mem = new_allocation;
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ user_interaction->secure_id = new_allocation->secure_id;
+ user_interaction->size = new_allocation->size_bytes;
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h
new file mode 100644
index 00000000000..dc79b6f289b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem
+{
+ ump_secure_id secure_id;
+ _mali_osk_atomic_t ref_count;
+ unsigned long size_bytes;
+ unsigned long nr_blocks;
+ ump_dd_physical_block * block_array;
+ void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
+ void * ctx;
+ void * backend_info;
+ int is_cached;
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h
new file mode 100644
index 00000000000..73284f02b47
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include <ump_uk_types.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t _ump_osk_init( void );
+
+_mali_osk_errcode_t _ump_osk_term( void );
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h
new file mode 100644
index 00000000000..b08335f61f1
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+}_ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void * _ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void * _ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h
new file mode 100644
index 00000000000..a3317fc7b21
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open( void** context );
+
+_mali_osk_errcode_t _ump_ukk_close( void** context );
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_ukk_msync( _ump_uk_msync_s *args );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h
new file mode 100644
index 00000000000..17b930d2c57
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h
new file mode 100644
index 00000000000..b11429816ab
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "../common/ump_uk_types.h"
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_size_get_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c
new file mode 100644
index 00000000000..76d6b2f913a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory management functions and types */
+#include <asm/uaccess.h> /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include "arch/config.h" /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker
+{
+ int references;
+ ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device
+{
+ struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+ struct class * ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops =
+{
+ .owner = THIS_MODULE,
+ .open = ump_file_open,
+ .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = ump_file_ioctl,
+#else
+ .ioctl = ump_file_ioctl,
+#endif
+ .mmap = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+ _mali_osk_errcode_t err;
+
+ DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+ err = ump_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("UMP device driver init failed\n"));
+ return map_errcode(err);
+ }
+
+ MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+ DBG_MSG(2, ("Unloading UMP device driver\n"));
+ ump_kernel_destructor();
+ DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+
+ if (0 == ump_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&ump_device, 0, sizeof(ump_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_device.cdev, &ump_fops);
+ ump_device.cdev.owner = THIS_MODULE;
+ ump_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+ if (0 == err)
+ {
+
+#if UMP_LICENSE_IS_GPL
+ ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_device.ump_class))
+ {
+ err = PTR_ERR(ump_device.ump_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&ump_device.cdev);
+#else
+ return 0;
+#endif
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+ device_destroy(ump_device.ump_class, dev);
+ class_destroy(ump_device.ump_class);
+#endif
+
+ /* unregister char device */
+ cdev_del(&ump_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+ struct ump_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev))
+ {
+ MSG_ERR(("Minor not zero in ump_file_open()\n"));
+ return -ENODEV;
+ }
+
+ /* Call the OS-Independent UMP Open function */
+ err = _ump_ukk_open((void**) &session_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Ump failed to open a new session\n"));
+ return map_errcode( err );
+ }
+
+ filp->private_data = (void*)session_data;
+ filp->f_pos = 0;
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ err = _ump_ukk_close((void**) &filp->private_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err = -ENOTTY;
+ void __user * argument;
+ struct ump_session_data * session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* inode not used */
+#endif
+
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("No session data attached to file object\n"));
+ return -ENOTTY;
+ }
+
+ /* interpret the argument as a user pointer to something */
+ argument = (void __user *)arg;
+
+ switch (cmd)
+ {
+ case UMP_IOC_QUERY_API_VERSION:
+ err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_ALLOCATE :
+ err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_RELEASE:
+ err = ump_release_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SIZE_GET:
+ err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_MSYNC:
+ err = ump_msync_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ default:
+ DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ _ump_uk_map_mem_s args;
+ _mali_osk_errcode_t err;
+ struct ump_session_data * session_data;
+
+ /* Validate the session data */
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("mmap() called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = 0;
+ args.size = vma->vm_end - vma->vm_start;
+ args._ukk_private = vma;
+ args.secure_id = vma->vm_pgoff;
+ args.is_cached = 0;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ {
+ args.is_cached = 1;
+ vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
+ DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+ }
+
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+
+ /* Call the common mmap handler */
+ err = _ump_ukk_map_mem( &args );
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+module_init(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h
new file mode 100644
index 00000000000..464c035974b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100644
index 00000000000..5c8a7f3783f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL) /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+
+
+typedef struct block_allocator
+{
+ struct semaphore mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 num_blocks;
+ u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend * backend);
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
+static void block_allocator_release(void * ctx, ump_dd_mem * handle);
+static inline u32 get_phys(block_allocator * allocator, block_info * block);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+{
+ ump_memory_backend * backend;
+ block_allocator * allocator;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+ num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+ if (0 == usable_size)
+ {
+ DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+ return NULL;
+ }
+
+ DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+ DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+ backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL != backend)
+ {
+ allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+ if (NULL != allocator)
+ {
+ allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
+ if (NULL != allocator->all_blocks)
+ {
+ int i;
+
+ allocator->first_free = NULL;
+ allocator->num_blocks = num_blocks;
+ allocator->num_free = num_blocks;
+ allocator->base = base_address;
+ sema_init(&allocator->mutex, 1);
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ allocator->all_blocks[i].next = allocator->first_free;
+ allocator->first_free = &allocator->all_blocks[i];
+ }
+
+ backend->ctx = allocator;
+ backend->allocate = block_allocator_allocate;
+ backend->release = block_allocator_release;
+ backend->shutdown = block_allocator_shutdown;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+ }
+ kfree(allocator);
+ }
+ kfree(backend);
+ }
+
+ return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend * backend)
+{
+ block_allocator * allocator;
+
+ BUG_ON(!backend);
+ BUG_ON(!backend->ctx);
+
+ allocator = (block_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+ kfree(allocator->all_blocks);
+ kfree(allocator);
+ kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+{
+ block_allocator * allocator;
+ u32 left;
+ block_info * last_allocated = NULL;
+ int i = 0;
+
+ BUG_ON(!ctx);
+ BUG_ON(!mem);
+
+ allocator = (block_allocator*)ctx;
+ left = mem->size_bytes;
+
+ BUG_ON(!left);
+ BUG_ON(!&allocator->mutex);
+
+ mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+ mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ if (NULL == mem->block_array)
+ {
+ MSG_ERR(("Failed to allocate block array\n"));
+ return 0;
+ }
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Could not get mutex to do block_allocate\n"));
+ return 0;
+ }
+
+ mem->size_bytes = 0;
+
+ while ((left > 0) && (allocator->first_free))
+ {
+ block_info * block;
+
+ block = allocator->first_free;
+ allocator->first_free = allocator->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+ allocator->num_free--;
+
+ mem->block_array[i].addr = get_phys(allocator, block);
+ mem->block_array[i].size = UMP_BLOCK_SIZE;
+ mem->size_bytes += UMP_BLOCK_SIZE;
+
+ i++;
+
+ if (left < UMP_BLOCK_SIZE) left = 0;
+ else left -= UMP_BLOCK_SIZE;
+ }
+
+ if (left)
+ {
+ block_info * block;
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ block = last_allocated->next;
+ last_allocated->next = allocator->first_free;
+ allocator->first_free = last_allocated;
+ last_allocated = block;
+ allocator->num_free++;
+ }
+
+ vfree(mem->block_array);
+ mem->backend_info = NULL;
+ mem->block_array = NULL;
+
+ DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+ up(&allocator->mutex);
+
+ return 0;
+ }
+
+ mem->backend_info = last_allocated;
+
+ up(&allocator->mutex);
+ mem->is_cached=0;
+
+ return 1;
+}
+
+
+
+static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+{
+ block_allocator * allocator;
+ block_info * block, * next;
+
+ BUG_ON(!ctx);
+ BUG_ON(!handle);
+
+ allocator = (block_allocator*)ctx;
+ block = (block_info*)handle->backend_info;
+ BUG_ON(!block);
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+ return;
+ }
+
+ while (block)
+ {
+ next = block->next;
+
+ BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+ block->next = allocator->first_free;
+ allocator->first_free = block;
+ allocator->num_free++;
+
+ block = next;
+ }
+ DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+ up(&allocator->mutex);
+
+ vfree(handle->block_array);
+ handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator * allocator, block_info * block)
+{
+ return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100644
index 00000000000..58ebe15e5a2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c
new file mode 100644
index 00000000000..99c9c223036
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator
+{
+ struct semaphore mutex;
+ u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
+ u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void* ctx, ump_dd_mem * descriptor);
+static int os_allocate(void* ctx, ump_dd_mem * descriptor);
+static void os_memory_backend_destroy(ump_memory_backend * backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend * backend;
+ os_allocator * info;
+
+ info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ info->num_pages_max = max_allocation >> PAGE_SHIFT;
+ info->num_pages_allocated = 0;
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend)
+ {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = os_allocate;
+ backend->release = os_free;
+ backend->shutdown = os_memory_backend_destroy;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend * backend)
+{
+ os_allocator * info = (os_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+ kfree(info);
+ kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+{
+ u32 left;
+ os_allocator * info;
+ int pages_allocated = 0;
+ int is_cached;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size_bytes;
+ is_cached = descriptor->is_cached;
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return 0; /* failure */
+ }
+
+ descriptor->backend_info = NULL;
+ descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+ descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+ if (NULL == descriptor->block_array)
+ {
+ up(&info->mutex);
+ DBG_MSG(1, ("Block array could not be allocated\n"));
+ return 0; /* failure */
+ }
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
+ {
+ struct page * new_page;
+
+ if (is_cached)
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
+ } else
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+ }
+ if (NULL == new_page)
+ {
+ break;
+ }
+
+ /* Ensure page caches are flushed. */
+ if ( is_cached )
+ {
+ descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ } else
+ {
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ }
+
+ DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+ if (left < PAGE_SIZE)
+ {
+ left = 0;
+ }
+ else
+ {
+ left -= PAGE_SIZE;
+ }
+
+ pages_allocated++;
+ }
+
+ DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
+
+ if (left)
+ {
+ DBG_MSG(1, ("Failed to allocate needed pages\n"));
+
+ while(pages_allocated)
+ {
+ pages_allocated--;
+ if ( !is_cached )
+ {
+ dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+ }
+
+ up(&info->mutex);
+
+ return 0; /* failure */
+ }
+
+ info->num_pages_allocated += pages_allocated;
+
+ DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ up(&info->mutex);
+
+ return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void* ctx, ump_dd_mem * descriptor)
+{
+ os_allocator * info;
+ int i;
+
+ BUG_ON(!ctx);
+ BUG_ON(!descriptor);
+
+ info = (os_allocator*)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+ info->num_pages_allocated -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ for ( i = 0; i < descriptor->nr_blocks; i++)
+ {
+ DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+ if ( ! descriptor->is_cached)
+ {
+ dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+ }
+
+ vfree(descriptor->block_array);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h
new file mode 100644
index 00000000000..d6083477d72
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c
new file mode 100644
index 00000000000..1c1190a537f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/ioport.h> /* request_mem_region */
+
+#include "arch/config.h" /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend* ump_memory_backend_create ( void )
+{
+ ump_memory_backend * backend = NULL;
+
+ /* Create the dynamic memory allocator backend */
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+ DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+ /* Ask the OS if we can use the specified physical memory */
+ if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory"))
+ {
+ MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+ return NULL;
+ }
+ backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+ }
+ else if (1 == ump_backend)
+ {
+ DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_os_memory_backend_create(ump_memory_size);
+ }
+
+ return backend;
+}
+
+void ump_memory_backend_destroy( void )
+{
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+ release_mem_region(ump_memory_address, ump_memory_size);
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c
new file mode 100644
index 00000000000..b3300ab691a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c
new file mode 100644
index 00000000000..7a1c5e97886
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker
+{
+ atomic_t references;
+ ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct * vma);
+static void ump_vma_close(struct vm_area_struct * vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops =
+{
+ .open = ump_vma_open,
+ .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = ump_cpu_page_fault_handler
+#else
+ .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+ MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ _ump_uk_unmap_mem_s args;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+ if (0 == new_val)
+ {
+ ump_memory_allocation * descriptor;
+
+ descriptor = vma_usage_tracker->descriptor;
+
+ args.ctx = descriptor->ump_session;
+ args.cookie = descriptor->cookie;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ args._ukk_private = NULL; /** @note unused */
+
+ DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+ _ump_ukk_unmap_mem( & args );
+
+ /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+ }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ struct vm_area_struct *vma;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+ if (NULL == vma_usage_tracker)
+ {
+ DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+ return -_MALI_OSK_ERR_FAULT;
+ }
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ vma->vm_private_data = vma_usage_tracker;
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ if (0==descriptor->is_cached)
+ {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+
+ /* Setup the functions which handle further VMA handling */
+ vma->vm_ops = &ump_vm_ops;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+
+ atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+ vma_usage_tracker->descriptor = descriptor;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ ump_vma_usage_tracker * vma_usage_tracker;
+
+ if (NULL == descriptor) return;
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ kfree(vma_usage_tracker);
+ return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+{
+ struct vm_area_struct *vma;
+ _mali_osk_errcode_t retval;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+ DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+ return retval;
+}
+
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op )
+{
+ int i;
+ DBG_MSG(3, ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n", mem->nr_blocks, mem->block_array[0].addr, phys_to_virt(mem->block_array[0].addr), mem->block_array[0].size));
+
+ /* TODO: Use args->size and args->address to select a subrange of this allocation to flush */
+ for (i=0 ; i<mem->nr_blocks; i++)
+ {
+ /* TODO: Find out which flush method is best of 1)Dma OR 2)Normal flush functions */
+ /* TODO: Use args->op to select the flushing method: CLEAN_AND_INVALIDATE or CLEAN */
+ /*#define USING_DMA_FLUSH*/
+ #ifdef USING_DMA_FLUSH
+ DEBUG_ASSERT( (PAGE_SIZE==mem->block_array[i].size));
+ dma_map_page(NULL, pfn_to_page(mem->block_array[i].addr >> PAGE_SHIFT), 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);*/
+ #else
+ /* Normal style flush */
+ ump_dd_physical_block *block;
+ u32 start_p, end_p;
+ const void *start_v, *end_v;
+ block = &mem->block_array[i];
+
+ start_p = (u32)block->addr;
+ start_v = phys_to_virt( start_p ) ;
+
+ end_p = start_p + block->size-1;
+ end_v = phys_to_virt( end_p ) ;
+
+ dmac_flush_range(start_v, end_v);
+ outer_flush_range(start_p, end_p);
+ #endif
+ }
+
+ return ;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c
new file mode 100644
index 00000000000..78569c4457b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init( void )
+{
+ if (0 != ump_kernel_device_initialize())
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term( void )
+{
+ ump_kernel_device_terminate();
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c
new file mode 100644
index 00000000000..155635026aa
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_allocate_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ err = _ump_ukk_allocate( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+ return map_errcode(err);
+ }
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+ _ump_uk_release_s release_args;
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ release_args.ctx = (void *) session_data;
+ release_args.secure_id = user_interaction.secure_id;
+
+ err = _ump_ukk_release( &release_args );
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+ }
+
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h
new file mode 100644
index 00000000000..b2bef1152c3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c
new file mode 100644
index 00000000000..d14b631246c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_api_version_s version_info;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&version_info, argument, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ version_info.ctx = (void*) session_data;
+ err = _ump_uku_get_api_version( &version_info );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+ return map_errcode(err);
+ }
+
+ version_info.ctx = NULL;
+
+ /* Copy ouput data back to user space */
+ if (0 != copy_to_user(argument, &version_info, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_release_s release_args;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&release_args, argument, sizeof(release_args)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ release_args.ctx = (void*) session_data;
+ err = _ump_ukk_release( &release_args );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+ return map_errcode(err);
+ }
+
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_size_get_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+ err = _ump_ukk_size_get( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+ return map_errcode(err);
+ }
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+ int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_msync_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_msync( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h
new file mode 100644
index 00000000000..31afe2dca56
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt
new file mode 100644
index 00000000000..bf1bf61d60e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt
@@ -0,0 +1,17 @@
+Building the UMP Device Driver for Linux
+----------------------------------------
+
+Build the UMP Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ your_config: Name of the sub-folder to find the required config.h file
+ ("arch-" will be prepended)
+
+The config.h file contains the configuration parameters needed, like the
+memory backend to use, and the amount of memory.
+
+The result will be a ump.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/gpu/mali/mali400ko/mali.spec b/drivers/gpu/mali/mali400ko/mali.spec
new file mode 100644
index 00000000000..62e8fa0e1b4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/mali.spec
@@ -0,0 +1,57 @@
+%define kernel_target u8500
+%define kernel_version %(find /lib/modules -name "*%{kernel_target}" | cut -c 14-)
+
+Name: mali400ko
+License: GPL
+Summary: Mali400 kernel module
+Version: 1.0
+Release: 1
+URL: http://stericsson.com
+BuildRoot: %{_tmppath}/%{name}
+Requires: kernel
+BuildRequires: kernel-u8500-devel
+Requires(post): ldconfig
+Requires(postun): ldconfig
+
+Source0: mali400ko.tar.gz
+
+%description
+Mali400 kernel module.
+
+%files
+%defattr(-,root,root)
+/lib/modules/%{kernel_version}/extra/mali.ko
+/lib/modules/%{kernel_version}/extra/mali_drm.ko
+
+%prep
+%setup -q
+
+%build
+export CROSS_COMPILE=""
+export KERNELDIR=/usr/src/kernels/%{kernel_version}
+export KERNEL_BUILD_DIR=/usr/src/kernels/%{kernel_version}
+export USING_UMP=1
+export USING_HWMEM=1
+#make V=0 mali-devicedrv
+
+%install
+export CROSS_COMPILE=""
+export KERNELDIR=/usr/src/kernels/%{kernel_version}
+export KERNEL_BUILD_DIR=/usr/src/kernels/%{kernel_version}
+export USING_UMP=1
+export USING_HWMEM=1
+export INSTALL_MOD_DIR=extra
+export INSTALL_MOD_PATH=$RPM_BUILD_ROOT
+make V=0 install-mali install-mali_drm
+
+# Remove kernel modules.* files
+rm -f $RPM_BUILD_ROOT/lib/modules/%{kernel_version}/modules.*
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%post
+
+%postun
+
+
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt b/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt
new file mode 100644
index 00000000000..47009fe7686
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt
@@ -0,0 +1,24 @@
+Notes on integrating the Mali DRM module:
+
+The Mali DRM is a platform device, meaning that you have to add an entry for it in your kernel architecture specification.
+
+Example: (arch/arm/mach-<platform>/mach-<platform>.c)
+
+#ifdef CONFIG_DRM_MALI
+static struct platform_device <platform>_device_mali_drm = {
+ .name = "mali_drm",
+ .id = -1,
+};
+#endif
+
+static struct platform_device *<platform>_devices[] __initdata = {
+...
+#ifdef CONFIG_DRM_MALI
+ &<platform>_device_mali_drm,
+#endif
+...
+};
+
+Where <platform> is substituted with the selected platform.
+
+The "mali" folder should be placed under drivers/gpu/drm/
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile
new file mode 100644
index 00000000000..0f3ace966df
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile
@@ -0,0 +1,17 @@
+#
+# * Copyright (C) 2010 ARM Limited. All rights reserved.
+# *
+# * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# *
+# * A copy of the licence is included with the program, and can also be obtained from Free Software
+# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+#
+# Makefile for the Mali drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+obj-y += mali_drm.o
+mali_drm-objs := mali_drv.o
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/include -I$(KBUILD_EXTMOD)/../drm/include/
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c
new file mode 100644
index 00000000000..571ab04e1e2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c
@@ -0,0 +1,158 @@
+/**
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_drv.c
+ * Implementation of the Linux device driver entrypoints for Mali DRM
+ */
+#include <linux/module.h>
+#include <linux/vermagic.h>
+#include <drm/drmP.h>
+#include "mali_drv.h"
+
+static struct platform_device *dev0;
+
+void mali_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+}
+
+void mali_drm_lastclose(struct drm_device *dev)
+{
+}
+
+static int mali_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mali_drm_resume(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int mali_drm_load(struct drm_device *dev, unsigned long chipset)
+{
+ return 0;
+}
+
+static int mali_drm_unload(struct drm_device *dev)
+{
+ return 0;
+}
+
+static const struct file_operations mali_driver_fops =
+{
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+
+static struct drm_driver driver =
+{
+ .load = mali_drm_load,
+ .unload = mali_drm_unload,
+ .context_dtor = NULL,
+ .reclaim_buffers = NULL,
+ .reclaim_buffers_idlelocked = NULL,
+ .preclose = mali_drm_preclose,
+ .lastclose = mali_drm_lastclose,
+ .suspend = mali_drm_suspend,
+ .resume = mali_drm_resume,
+ .ioctls = NULL,
+ .fops = &mali_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct platform_driver platform_drm_driver;
+
+int mali_drm_init(struct platform_device *dev)
+{
+ printk(KERN_INFO "Mali DRM initialize, driver name: %s, version %d.%d\n", DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR);
+ if (dev == dev0) {
+ driver.num_ioctls = 0;
+ return drm_platform_init(&driver, dev0);
+ }
+ return 0;
+
+}
+
+void mali_drm_exit(struct platform_device *dev)
+{
+ if (dev0 == dev)
+ drm_platform_exit(&driver, dev);
+}
+
+static int __devinit mali_platform_drm_probe(struct platform_device *dev)
+{
+ return mali_drm_init(dev);
+}
+
+static int mali_platform_drm_remove(struct platform_device *dev)
+{
+ mali_drm_exit(dev);
+
+ return 0;
+}
+
+static int mali_platform_drm_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mali_platform_drm_resume(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver platform_drm_driver = {
+ .probe = mali_platform_drm_probe,
+ .remove = __devexit_p(mali_platform_drm_remove),
+ .suspend = mali_platform_drm_suspend,
+ .resume = mali_platform_drm_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mali_platform_drm_init(void)
+{
+ dev0 = platform_device_register_simple("mali_drm", 0, NULL, 0);
+ return platform_driver_register(&platform_drm_driver);
+}
+
+static void __exit mali_platform_drm_exit(void)
+{
+ platform_driver_unregister(&platform_drm_driver);
+ platform_device_unregister(dev0);
+}
+
+#ifdef MODULE
+module_init(mali_platform_drm_init);
+#else
+late_initcall(mali_platform_drm_init);
+#endif
+module_exit(mali_platform_drm_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_ALIAS(DRIVER_ALIAS);
+MODULE_INFO(vermagic, VERMAGIC_STRING);
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h
new file mode 100644
index 00000000000..188f427ee6d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h
@@ -0,0 +1,25 @@
+/**
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_DRV_H_
+#define _MALI_DRV_H_
+
+#define DRIVER_AUTHOR "ARM Ltd."
+#define DRIVER_NAME "mali_drm"
+#define DRIVER_DESC "DRM module for Mali-200, Mali-400"
+#define DRIVER_LICENSE "GPL v2"
+#define DRIVER_ALIAS "platform:mali_drm"
+#define DRIVER_DATE "20101111"
+#define DRIVER_VERSION "0.2"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 1
+
+#endif /* _MALI_DRV_H_ */
diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig
new file mode 100644
index 00000000000..2c76de438eb
--- /dev/null
+++ b/drivers/hsi/Kconfig
@@ -0,0 +1,20 @@
+#
+# HSI driver configuration
+#
+menuconfig HSI
+ tristate "HSI support"
+ ---help---
+ The "High speed synchronous Serial Interface" is
+ synchronous serial interface used mainly to connect
+ application engines and cellular modems.
+
+if HSI
+
+config HSI_BOARDINFO
+ bool
+ default y
+
+source "drivers/hsi/controllers/Kconfig"
+source "drivers/hsi/clients/Kconfig"
+
+endif # HSI
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile
new file mode 100644
index 00000000000..d47ca5de18c
--- /dev/null
+++ b/drivers/hsi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for HSI
+#
+obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o
+obj-$(CONFIG_HSI) += hsi.o
+obj-y += controllers/ clients/
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
new file mode 100644
index 00000000000..46eef1f77fd
--- /dev/null
+++ b/drivers/hsi/clients/Kconfig
@@ -0,0 +1,19 @@
+#
+# HSI clients configuration
+#
+
+comment "HSI clients"
+
+config HSI_CHAR
+ tristate "HSI/SSI character driver"
+ depends on HSI
+ ---help---
+ If you say Y here, you will enable the HSI/SSI character driver.
+ This driver provides a simple character device interface for
+ serial communication with the cellular modem over HSI/SSI bus.
+config HSI_CAIF
+ tristate "CAIF HSI driver"
+ depends on HSI
+ default n
+ ---help---
+ Provides HSI-CAIF glue layer
diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile
new file mode 100644
index 00000000000..dfe33584975
--- /dev/null
+++ b/drivers/hsi/clients/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for HSI clients
+#
+
+obj-$(CONFIG_HSI_CHAR) += hsi_char.o
+obj-$(CONFIG_HSI_CAIF) += cfhsi.o
diff --git a/drivers/hsi/clients/cfhsi.c b/drivers/hsi/clients/cfhsi.c
new file mode 100644
index 00000000000..cf7ce0cb1cb
--- /dev/null
+++ b/drivers/hsi/clients/cfhsi.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Daniel Martensson <Daniel.Martensson@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <net/caif/caif_hsi.h>
+
+#include <linux/hsi/hsi.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
+MODULE_DESCRIPTION("CAIF HSI V3 glue");
+
+#define NR_OF_CAIF_HSI_CHANNELS 2
+
+struct cfhsi_v3 {
+ struct list_head list;
+ struct cfhsi_dev dev;
+ struct platform_device pdev;
+ struct hsi_msg *tx_msg;
+ struct hsi_msg *rx_msg;
+};
+
+/* TODO: Lists are not protected with regards to device removal. */
+static LIST_HEAD(cfhsi_dev_list);
+
+static struct hsi_client *cfhsi_client;
+
+static int cfhsi_tx(u8 *ptr, int len, struct cfhsi_dev *dev)
+{
+ int res;
+ struct cfhsi_v3 *cfhsi = NULL;
+
+ /* Check length and alignment. */
+ BUG_ON(((int)ptr)%4);
+ BUG_ON(len%4);
+
+ cfhsi = container_of(dev, struct cfhsi_v3, dev);
+
+ sg_init_one(cfhsi->tx_msg->sgt.sgl, (const void *)ptr,
+ (unsigned int)len);
+
+ /* Write on HSI device. */
+ res = hsi_async_write(cfhsi_client, cfhsi->tx_msg);
+
+ return res;
+}
+
+static int cfhsi_rx(u8 *ptr, int len, struct cfhsi_dev *dev)
+{
+ int res;
+ struct cfhsi_v3 *cfhsi = NULL;
+
+ /* Check length and alignment. */
+ BUG_ON(((int)ptr)%4);
+ BUG_ON(len%4);
+
+ cfhsi = container_of(dev, struct cfhsi_v3, dev);
+
+ sg_init_one(cfhsi->rx_msg->sgt.sgl, (const void *)ptr,
+ (unsigned int)len);
+
+ /* Read from HSI device. */
+ res = hsi_async_read(cfhsi_client, cfhsi->rx_msg);
+
+ return res;
+}
+
+void cfhsi_v3_release(struct device *dev)
+{
+ pr_warning("%s:%d cfhsi_v3_release called\n", __FILE__, __LINE__);
+}
+
+static inline void cfhsi_v3_destructor(struct hsi_msg *msg)
+{
+ pr_warning("%s:%d cfhsi_v3_destructor called\n", __FILE__, __LINE__);
+}
+
+static inline void cfhsi_v3_read_cb(struct hsi_msg *msg)
+{
+ struct cfhsi_v3 *cfhsi = (struct cfhsi_v3 *)msg->context;
+
+ /* TODO: Error checking. */
+ BUG_ON(!cfhsi->dev.drv);
+ BUG_ON(!cfhsi->dev.drv->rx_done_cb);
+
+ cfhsi->dev.drv->rx_done_cb(cfhsi->dev.drv);
+}
+
+static inline void cfhsi_v3_write_cb(struct hsi_msg *msg)
+{
+ struct cfhsi_v3 *cfhsi = (struct cfhsi_v3 *)msg->context;
+
+ /* TODO: Error checking. */
+ BUG_ON(!cfhsi->dev.drv);
+ BUG_ON(!cfhsi->dev.drv->tx_done_cb);
+
+ cfhsi->dev.drv->tx_done_cb(cfhsi->dev.drv);
+}
+
+static int hsi_proto_probe(struct device *dev)
+{
+ int res;
+ int i;
+ struct cfhsi_v3 *cfhsi = NULL;
+
+ if (cfhsi_client)
+ return -ENODEV; /* TODO: Not correct return. */
+
+ cfhsi_client = to_hsi_client(dev);
+
+ res = hsi_claim_port(cfhsi_client, 0);
+ if (res) {
+ pr_warning("hsi_proto_probe: hsi_claim_port:%d.\n", res);
+ goto err_hsi_claim;
+ }
+
+ /* Right now we don't care about AC_WAKE (No power management). */
+ cfhsi_client->hsi_start_rx = NULL;
+ cfhsi_client->hsi_stop_rx = NULL;
+
+ /* CAIF HSI TX configuration. */
+ cfhsi_client->tx_cfg.mode = HSI_MODE_STREAM;
+ cfhsi_client->tx_cfg.flow = HSI_FLOW_SYNC;
+ cfhsi_client->tx_cfg.channels = NR_OF_CAIF_HSI_CHANNELS;
+ cfhsi_client->tx_cfg.speed = 100000; /* TODO: What speed should be used. */
+ cfhsi_client->tx_cfg.arb_mode = HSI_ARB_RR;
+
+ /* CAIF HSI RX configuration. */
+ cfhsi_client->rx_cfg.mode = HSI_MODE_STREAM;
+ cfhsi_client->rx_cfg.flow = HSI_FLOW_SYNC;
+ cfhsi_client->rx_cfg.channels = NR_OF_CAIF_HSI_CHANNELS;
+ cfhsi_client->rx_cfg.speed = 200000; /* TODO: What speed should be used. */
+ cfhsi_client->rx_cfg.arb_mode = HSI_ARB_RR;
+
+ res = hsi_setup(cfhsi_client);
+ if (res) {
+ pr_warning("hsi_proto_probe: hsi_setup:%d.\n", res);
+ goto err_hsi_setup;
+ }
+
+ /* Make sure that AC_WAKE is high (No power management). */
+ res = hsi_start_tx(cfhsi_client);
+ if (res) {
+ pr_warning("hsi_proto_probe: hsi_start_tx:%d.\n", res);
+ goto err_hsi_start_tx;
+ }
+
+ /* Connect channels to CAIF HSI devices. */
+ for (i = 0; i < NR_OF_CAIF_HSI_CHANNELS; i++) {
+ cfhsi = kzalloc(sizeof(struct cfhsi_v3), GFP_KERNEL);
+ if (!cfhsi) {
+ res = -ENOMEM;
+ /* TODO: Error handling. */
+ }
+
+ /* Assign HSI client to this CAIF HSI device. */
+ cfhsi->dev.cfhsi_tx = cfhsi_tx;
+ cfhsi->dev.cfhsi_rx = cfhsi_rx;
+
+ /* Allocate HSI messages. */
+ cfhsi->tx_msg = hsi_alloc_msg(1, GFP_KERNEL);
+ cfhsi->rx_msg = hsi_alloc_msg(1, GFP_KERNEL);
+ if (!cfhsi->tx_msg || !cfhsi->rx_msg) {
+ res = -ENOMEM;
+ /* TODO: Error handling. */
+ }
+
+ /* Set up TX message. */
+ cfhsi->tx_msg->cl = cfhsi_client;
+ cfhsi->tx_msg->context = (void *)cfhsi;
+ cfhsi->tx_msg->complete = cfhsi_v3_write_cb;
+ cfhsi->tx_msg->destructor = cfhsi_v3_destructor;
+ cfhsi->tx_msg->channel = i;
+ cfhsi->tx_msg->ttype = HSI_MSG_WRITE;
+ cfhsi->tx_msg->break_frame = 0; /* No break frame. */
+
+ /* Set up RX message. */
+ cfhsi->rx_msg->cl = cfhsi_client;
+ cfhsi->rx_msg->context = (void *)cfhsi;
+ cfhsi->rx_msg->complete = cfhsi_v3_read_cb;
+ cfhsi->rx_msg->destructor = cfhsi_v3_destructor;
+ cfhsi->rx_msg->channel = i;
+ cfhsi->rx_msg->ttype = HSI_MSG_READ;
+ cfhsi->rx_msg->break_frame = 0; /* No break frame. */
+
+ /* Initialize CAIF HSI platform device. */
+ cfhsi->pdev.name = "cfhsi";
+ cfhsi->pdev.dev.platform_data = &cfhsi->dev;
+ cfhsi->pdev.dev.release = cfhsi_v3_release;
+ /* Use channel number as id. */
+ cfhsi->pdev.id = i;
+ /* Register platform device. */
+ res = platform_device_register(&cfhsi->pdev);
+ if (res) {
+ pr_warning("hsi_proto_probe: plat_dev_reg:%d.\n", res);
+ res = -ENODEV;
+ /* TODO: Error handling. */
+ }
+
+ /* Add HSI device to device list. */
+ list_add_tail(&cfhsi->list, &cfhsi_dev_list);
+ }
+
+ return res;
+
+ err_hsi_start_tx:
+ err_hsi_setup:
+ hsi_release_port(cfhsi_client);
+ err_hsi_claim:
+ cfhsi_client = NULL;
+
+ return res;
+}
+
+static int hsi_proto_remove(struct device *dev)
+{
+ struct cfhsi_v3 *cfhsi = NULL;
+ struct list_head *list_node;
+ struct list_head *n;
+
+ if (!cfhsi_client)
+ return -ENODEV;
+
+ list_for_each_safe(list_node, n, &cfhsi_dev_list) {
+ cfhsi = list_entry(list_node, struct cfhsi_v3, list);
+ /* Remove from list. */
+ list_del(list_node);
+ /* Our HSI device is gone, unregister CAIF HSI device. */
+ platform_device_del(&cfhsi->pdev);
+ hsi_free_msg(cfhsi->tx_msg);
+ hsi_free_msg(cfhsi->rx_msg);
+ /* Free memory. */
+ kfree(cfhsi);
+ }
+
+ hsi_stop_tx(cfhsi_client);
+ hsi_release_port(cfhsi_client);
+
+ cfhsi_client = NULL;
+
+ return 0;
+}
+
+static int hsi_proto_suspend(struct device *dev, pm_message_t mesg)
+{
+ /* Not handled. */
+ pr_info("hsi_proto_suspend.\n");
+
+ return 0;
+}
+
+static int hsi_proto_resume(struct device *dev)
+{
+ /* Not handled. */
+ pr_info("hsi_proto_resume.\n");
+
+ return 0;
+}
+
+static struct hsi_client_driver cfhsi_v3_driver = {
+ .driver = {
+ .name = "cfhsi_v3_driver",
+ .owner = THIS_MODULE,
+ .probe = hsi_proto_probe,
+ .remove = __devexit_p(hsi_proto_remove),
+ .suspend = hsi_proto_suspend,
+ .resume = hsi_proto_resume,
+ },
+};
+
+static int __init cfhsi_v3_init(void)
+{
+ int res;
+
+ /* Register protocol driver for HSI interface. */
+ res = hsi_register_client_driver(&cfhsi_v3_driver);
+ if (res)
+ pr_warning("Failed to register CAIF HSI V3 driver.\n");
+
+ return res;
+}
+
+static void __exit cfhsi_v3_exit(void)
+{
+ struct cfhsi_v3 *cfhsi = NULL;
+ struct list_head *list_node;
+ struct list_head *n;
+
+ /* Unregister driver. */
+ hsi_unregister_client_driver(&cfhsi_v3_driver);
+
+ if (!cfhsi_client)
+ return;
+
+ list_for_each_safe(list_node, n, &cfhsi_dev_list) {
+ cfhsi = list_entry(list_node, struct cfhsi_v3, list);
+ platform_device_del(&cfhsi->pdev);
+ hsi_free_msg(cfhsi->tx_msg);
+ hsi_free_msg(cfhsi->rx_msg);
+ kfree(cfhsi);
+ }
+
+ hsi_stop_tx(cfhsi_client);
+ hsi_release_port(cfhsi_client);
+
+ cfhsi_client = NULL;
+}
+
+module_init(cfhsi_v3_init);
+module_exit(cfhsi_v3_exit);
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
new file mode 100644
index 00000000000..6d5bf8d0012
--- /dev/null
+++ b/drivers/hsi/clients/hsi_char.c
@@ -0,0 +1,1118 @@
+/*
+ * hsi-char.c
+ *
+ * HSI character device driver, implements the character device
+ * interface.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Andras Domokos <andras.domokos@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/atomic.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/ioctl.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
+#include <linux/hsi/hsi.h>
+#include <linux/hsi/hsi_char.h>
+
+#define HSI_CHAR_CHANNELS 8
+#define HSI_CHAR_DEVS 8
+#define HSI_CHAR_MSGS 4
+
+#define HSI_CHST_UNAVAIL 0 /* SBZ! */
+#define HSI_CHST_AVAIL 1
+
+#define HSI_CHST_CLOSED (0 << 4)
+#define HSI_CHST_CLOSING (1 << 4)
+#define HSI_CHST_OPENING (2 << 4)
+#define HSI_CHST_OPENED (3 << 4)
+
+#define HSI_CHST_READOFF (0 << 8)
+#define HSI_CHST_READON (1 << 8)
+#define HSI_CHST_READING (2 << 8)
+
+#define HSI_CHST_WRITEOFF (0 << 12)
+#define HSI_CHST_WRITEON (1 << 12)
+#define HSI_CHST_WRITING (2 << 12)
+
+#define HSI_CHST_OC_MASK 0xf0
+#define HSI_CHST_RD_MASK 0xf00
+#define HSI_CHST_WR_MASK 0xf000
+
+#define HSI_CHST_OC(c) ((c)->state & HSI_CHST_OC_MASK)
+#define HSI_CHST_RD(c) ((c)->state & HSI_CHST_RD_MASK)
+#define HSI_CHST_WR(c) ((c)->state & HSI_CHST_WR_MASK)
+
+#define HSI_CHST_OC_SET(c, v) \
+ do { \
+ (c)->state &= ~HSI_CHST_OC_MASK; \
+ (c)->state |= v; \
+ } while (0);
+
+#define HSI_CHST_RD_SET(c, v) \
+ do { \
+ (c)->state &= ~HSI_CHST_RD_MASK; \
+ (c)->state |= v; \
+ } while (0);
+
+#define HSI_CHST_WR_SET(c, v) \
+ do { \
+ (c)->state &= ~HSI_CHST_WR_MASK; \
+ (c)->state |= v; \
+ } while (0);
+
+#define HSI_CHAR_POLL_RST (-1)
+#define HSI_CHAR_POLL_OFF 0
+#define HSI_CHAR_POLL_ON 1
+
+#define HSI_CHAR_RX 0
+#define HSI_CHAR_TX 1
+
+struct hsi_char_channel {
+ unsigned int ch;
+ unsigned int state;
+ int wlrefcnt;
+ int rxpoll;
+ struct hsi_client *cl;
+ struct list_head free_msgs_list;
+ struct list_head rx_msgs_queue;
+ struct list_head tx_msgs_queue;
+ int poll_event;
+ spinlock_t lock;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t rx_wait;
+ wait_queue_head_t tx_wait;
+};
+
+struct hsi_char_client_data {
+ atomic_t refcnt;
+ int attached;
+ atomic_t breq;
+ struct hsi_char_channel channels[HSI_CHAR_DEVS];
+};
+
+static unsigned int max_data_size = 0x1000;
+module_param(max_data_size, uint, 1);
+MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
+
+static int channels_map[HSI_CHAR_DEVS] = {0, -1, -1, -1, -1, -1, -1, -1};
+module_param_array(channels_map, int, NULL, 0);
+MODULE_PARM_DESC(channels_map, "Array of HSI channels ([0...7]) to be probed");
+
+static dev_t hsi_char_dev;
+static struct hsi_char_client_data hsi_char_cl_data;
+
+static inline void hsi_char_msg_free(struct hsi_msg *msg)
+{
+ msg->complete = NULL;
+ msg->destructor = NULL;
+ kfree(sg_virt(msg->sgt.sgl));
+ hsi_free_msg(msg);
+}
+
+static inline void hsi_char_msgs_free(struct hsi_char_channel *channel)
+{
+ struct hsi_msg *msg, *tmp;
+
+ list_for_each_entry_safe(msg, tmp, &channel->free_msgs_list, link) {
+ list_del(&msg->link);
+ hsi_char_msg_free(msg);
+ }
+ list_for_each_entry_safe(msg, tmp, &channel->rx_msgs_queue, link) {
+ list_del(&msg->link);
+ hsi_char_msg_free(msg);
+ }
+ list_for_each_entry_safe(msg, tmp, &channel->tx_msgs_queue, link) {
+ list_del(&msg->link);
+ hsi_char_msg_free(msg);
+ }
+}
+
+static inline struct hsi_msg *hsi_char_msg_alloc(unsigned int alloc_size)
+{
+ struct hsi_msg *msg;
+ void *buf;
+
+ msg = hsi_alloc_msg(1, GFP_KERNEL);
+ if (!msg)
+ goto out;
+ buf = kmalloc(alloc_size, GFP_KERNEL);
+ if (!buf) {
+ hsi_free_msg(msg);
+ goto out;
+ }
+ sg_init_one(msg->sgt.sgl, buf, alloc_size);
+ msg->context = buf;
+ return msg;
+out:
+ return NULL;
+}
+
+static inline int hsi_char_msgs_alloc(struct hsi_char_channel *channel)
+{
+ struct hsi_msg *msg;
+ int i;
+
+ for (i = 0; i < HSI_CHAR_MSGS; i++) {
+ msg = hsi_char_msg_alloc(max_data_size);
+ if (!msg)
+ goto out;
+ msg->channel = channel->ch;
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ }
+ return 0;
+out:
+ hsi_char_msgs_free(channel);
+
+ return -ENOMEM;
+}
+
+static int _hsi_char_release(struct hsi_char_channel *channel, int remove)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(channel->cl);
+ int ret = 0, refcnt;
+
+ spin_lock_bh(&channel->lock);
+ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED)
+ goto out;
+ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSING);
+ spin_unlock_bh(&channel->lock);
+
+ while (channel->wlrefcnt > 0) {
+ hsi_stop_tx(channel->cl);
+ channel->wlrefcnt--;
+ }
+
+ if (channel->rxpoll == HSI_CHAR_POLL_ON)
+ channel->poll_event |= POLLERR;
+
+ wake_up_interruptible(&channel->rx_wait);
+ wake_up_interruptible(&channel->tx_wait);
+
+ refcnt = atomic_dec_return(&cl_data->refcnt);
+ if (!refcnt) {
+ hsi_flush(channel->cl);
+ hsi_release_port(channel->cl);
+ cl_data->attached = 0;
+ }
+ hsi_char_msgs_free(channel);
+
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
+ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
+out:
+ if (remove)
+ channel->cl = NULL;
+ spin_unlock_bh(&channel->lock);
+
+ return ret;
+}
+
+static struct hsi_client_driver hsi_char_driver;
+static struct cdev hsi_char_cdev;
+static const struct file_operations hsi_char_fops;
+static struct class *hsi_char_class;
+
+static int __devinit hsi_char_probe(struct device *dev)
+{
+ struct hsi_char_client_data *cl_data = &hsi_char_cl_data;
+ struct hsi_char_channel *channel = cl_data->channels;
+ struct hsi_client *cl = to_hsi_client(dev);
+ char devname[] = "hsi_char";
+ int i;
+ int ret;
+
+ for (i = 0; i < HSI_CHAR_DEVS; i++, channel++) {
+ if (channel->state == HSI_CHST_AVAIL)
+ channel->cl = cl;
+ }
+ cl->hsi_start_rx = NULL;
+ cl->hsi_stop_rx = NULL;
+ atomic_set(&cl_data->refcnt, 0);
+ atomic_set(&cl_data->breq, 1);
+ cl_data->attached = 0;
+ hsi_client_set_drvdata(cl, cl_data);
+
+ ret = alloc_chrdev_region(&hsi_char_dev, 0, HSI_CHAR_DEVS, devname);
+ if (ret < 0) {
+ hsi_unregister_client_driver(&hsi_char_driver);
+ return ret;
+ }
+
+ cdev_init(&hsi_char_cdev, &hsi_char_fops);
+ ret = cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_CHAR_DEVS);
+ if (ret) {
+ unregister_chrdev_region(hsi_char_dev, HSI_CHAR_DEVS);
+ hsi_unregister_client_driver(&hsi_char_driver);
+ return ret;
+ }
+
+ hsi_char_class = class_create(THIS_MODULE, "hsi");
+ if (IS_ERR(hsi_char_class))
+ pr_err("ERROR: hsi class creation failed!\n");
+
+ device_create(hsi_char_class, NULL, hsi_char_cdev.dev, NULL, devname);
+
+ return 0;
+}
+
+static int __devexit hsi_char_remove(struct device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
+ struct hsi_char_channel *channel = cl_data->channels;
+ int i;
+
+ for (i = 0; i < HSI_CHAR_DEVS; i++, channel++) {
+ if (!(channel->state & HSI_CHST_AVAIL))
+ continue;
+ _hsi_char_release(channel, 1);
+ }
+
+ return 0;
+}
+
+static inline unsigned int hsi_char_msg_len_get(struct hsi_msg *msg)
+{
+ return msg->sgt.sgl->length;
+}
+
+static inline void hsi_char_msg_len_set(struct hsi_msg *msg, unsigned int len)
+{
+ msg->sgt.sgl->length = len;
+}
+
+static void hsi_char_data_available(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
+ int ret;
+
+ if (msg->status == HSI_STATUS_ERROR) {
+ ret = hsi_async_read(channel->cl, msg);
+ if (ret < 0) {
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ channel->rxpoll = HSI_CHAR_POLL_OFF;
+ spin_unlock_bh(&channel->lock);
+ }
+ } else {
+ spin_lock_bh(&channel->lock);
+ channel->rxpoll = HSI_CHAR_POLL_OFF;
+ channel->poll_event |= (POLLIN | POLLRDNORM);
+ spin_unlock_bh(&channel->lock);
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ spin_unlock_bh(&channel->lock);
+ wake_up_interruptible(&channel->rx_wait);
+ }
+}
+
+static void hsi_char_rx_completed(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
+
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->rx_msgs_queue);
+ spin_unlock_bh(&channel->lock);
+ wake_up_interruptible(&channel->rx_wait);
+}
+
+static void hsi_char_rx_msg_destructor(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
+
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
+ spin_unlock_bh(&channel->lock);
+}
+
+static void hsi_char_rx_poll_destructor(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
+
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ channel->rxpoll = HSI_CHAR_POLL_RST;
+ spin_unlock_bh(&channel->lock);
+}
+
+static int hsi_char_rx_poll(struct hsi_char_channel *channel)
+{
+ struct hsi_msg *msg;
+ int ret = 0;
+
+ spin_lock_bh(&channel->lock);
+ if (list_empty(&channel->free_msgs_list)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (channel->rxpoll == HSI_CHAR_POLL_ON)
+ goto out;
+ msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link);
+ list_del(&msg->link);
+ channel->rxpoll = HSI_CHAR_POLL_ON;
+ spin_unlock_bh(&channel->lock);
+ hsi_char_msg_len_set(msg, 0);
+ msg->complete = hsi_char_data_available;
+ msg->destructor = hsi_char_rx_poll_destructor;
+ /* don't touch msg->context! */
+ ret = hsi_async_read(channel->cl, msg);
+ spin_lock_bh(&channel->lock);
+ if (ret < 0) {
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ channel->rxpoll = HSI_CHAR_POLL_OFF;
+ goto out;
+ }
+out:
+ spin_unlock_bh(&channel->lock);
+
+ return ret;
+}
+
+static void hsi_char_tx_completed(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
+
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->tx_msgs_queue);
+ channel->poll_event |= (POLLOUT | POLLWRNORM);
+ spin_unlock_bh(&channel->lock);
+ wake_up_interruptible(&channel->tx_wait);
+}
+
+static void hsi_char_tx_msg_destructor(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ struct hsi_char_channel *channel = cl_data->channels + msg->channel;
+
+ spin_lock_bh(&channel->lock);
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
+ spin_unlock_bh(&channel->lock);
+}
+
+static void hsi_char_rx_poll_rst(struct hsi_client *cl)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
+ struct hsi_char_channel *channel = cl_data->channels;
+ int i;
+
+ for (i = 0; i < HSI_CHAR_DEVS; i++, channel++) {
+ if ((HSI_CHST_OC(channel) == HSI_CHST_OPENED) &&
+ (channel->rxpoll == HSI_CHAR_POLL_RST))
+ hsi_char_rx_poll(channel);
+ }
+}
+
+static void hsi_char_reset(struct hsi_client *cl)
+{
+ hsi_flush(cl);
+ hsi_char_rx_poll_rst(cl);
+}
+
+static void hsi_char_rx_cancel(struct hsi_char_channel *channel)
+{
+ hsi_flush(channel->cl);
+ hsi_char_rx_poll_rst(channel->cl);
+}
+
+static void hsi_char_tx_cancel(struct hsi_char_channel *channel)
+{
+ hsi_flush(channel->cl);
+ hsi_char_rx_poll_rst(channel->cl);
+}
+
+static void hsi_char_bcast_break(struct hsi_client *cl)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
+ struct hsi_char_channel *channel = cl_data->channels;
+ int i;
+
+ for (i = 0; i < HSI_CHAR_DEVS; i++, channel++) {
+ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED)
+ continue;
+ channel->poll_event |= POLLPRI;
+ wake_up_interruptible(&channel->rx_wait);
+ wake_up_interruptible(&channel->tx_wait);
+ }
+}
+
+static void hsi_char_break_received(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+ int ret;
+
+ hsi_char_bcast_break(msg->cl);
+ ret = hsi_async_read(msg->cl, msg);
+ if (ret < 0) {
+ hsi_free_msg(msg);
+ atomic_inc(&cl_data->breq);
+ }
+}
+
+static void hsi_char_break_req_destructor(struct hsi_msg *msg)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl);
+
+ hsi_free_msg(msg);
+ atomic_inc(&cl_data->breq);
+}
+
+static int hsi_char_break_request(struct hsi_client *cl)
+{
+ struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl);
+ struct hsi_msg *msg;
+ int ret = 0;
+
+ if (!atomic_dec_and_test(&cl_data->breq)) {
+ atomic_inc(&cl_data->breq);
+ return -EBUSY;
+ }
+ msg = hsi_alloc_msg(0, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->break_frame = 1;
+ msg->complete = hsi_char_break_received;
+ msg->destructor = hsi_char_break_req_destructor;
+ ret = hsi_async_read(cl, msg);
+ if (ret < 0)
+ hsi_free_msg(msg);
+
+ return ret;
+}
+
+static int hsi_char_break_send(struct hsi_client *cl)
+{
+ struct hsi_msg *msg;
+ int ret = 0;
+
+ msg = hsi_alloc_msg(0, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+ msg->break_frame = 1;
+ msg->complete = hsi_free_msg;
+ msg->destructor = hsi_free_msg;
+ ret = hsi_async_write(cl, msg);
+ if (ret < 0)
+ hsi_free_msg(msg);
+
+ return ret;
+}
+
+static inline int ssi_check_common_cfg(struct hsi_config *cfg)
+{
+ if ((cfg->mode != HSI_MODE_STREAM) && (cfg->mode != HSI_MODE_FRAME))
+ return -EINVAL;
+ if ((cfg->channels == 0) || (cfg->channels > HSI_CHAR_CHANNELS))
+ return -EINVAL;
+ if (cfg->channels & (cfg->channels - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int ssi_check_rx_cfg(struct hsi_config *cfg)
+{
+ int ret;
+
+ ret = ssi_check_common_cfg(cfg);
+ if (ret < 0)
+ return ret;
+ if ((cfg->flow != HSI_FLOW_SYNC) && (cfg->flow != HSI_FLOW_PIPE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int ssi_check_tx_cfg(struct hsi_config *cfg)
+{
+ int ret;
+
+ ret = ssi_check_common_cfg(cfg);
+ if (ret < 0)
+ return ret;
+ if ((cfg->arb_mode != HSI_ARB_RR) && (cfg->arb_mode != HSI_ARB_PRIO))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int hsi_char_cfg_set(struct hsi_client *cl,
+ struct hsi_config *cfg, int dir)
+{
+ struct hsi_config *rxtx_cfg;
+ int ret = 0;
+
+ if (dir == HSI_CHAR_RX) {
+ rxtx_cfg = &cl->rx_cfg;
+ ret = ssi_check_rx_cfg(cfg);
+ } else {
+ rxtx_cfg = &cl->tx_cfg;
+ ret = ssi_check_tx_cfg(cfg);
+ }
+ if (ret < 0)
+ return ret;
+
+ *rxtx_cfg = *cfg;
+ ret = hsi_setup(cl);
+ if (ret < 0)
+ return ret;
+
+ if ((dir == HSI_CHAR_RX) && (cfg->mode == HSI_MODE_FRAME))
+ hsi_char_break_request(cl);
+
+ return ret;
+}
+
+static inline void hsi_char_cfg_get(struct hsi_client *cl,
+ struct hsi_config *cfg, int dir)
+{
+ struct hsi_config *rxtx_cfg;
+
+ if (dir == HSI_CHAR_RX)
+ rxtx_cfg = &cl->rx_cfg;
+ else
+ rxtx_cfg = &cl->tx_cfg;
+ *cfg = *rxtx_cfg;
+}
+
+static inline void hsi_char_rx2icfg(struct hsi_config *cfg,
+ struct hsc_rx_config *rx_cfg)
+{
+ cfg->mode = rx_cfg->mode;
+ cfg->flow = rx_cfg->flow;
+ cfg->channels = rx_cfg->channels;
+ cfg->speed = 0;
+}
+
+static inline void hsi_char_tx2icfg(struct hsi_config *cfg,
+ struct hsc_tx_config *tx_cfg)
+{
+ int ch;
+
+ cfg->mode = tx_cfg->mode;
+ cfg->channels = tx_cfg->channels;
+ cfg->speed = tx_cfg->speed;
+ cfg->arb_mode = tx_cfg->arb_mode;
+ for (ch = 0; ch < HSI_MAX_CHANNELS; ch++)
+ cfg->ch_prio[ch] = (tx_cfg->priority >> ch) & 1;
+}
+
+static inline void hsi_char_rx2ecfg(struct hsc_rx_config *rx_cfg,
+ struct hsi_config *cfg)
+{
+ rx_cfg->mode = cfg->mode;
+ rx_cfg->flow = cfg->flow;
+ rx_cfg->channels = cfg->channels;
+}
+
+static inline void hsi_char_tx2ecfg(struct hsc_tx_config *tx_cfg,
+ struct hsi_config *cfg)
+{
+ int ch;
+
+ tx_cfg->mode = cfg->mode;
+ tx_cfg->channels = cfg->channels;
+ tx_cfg->speed = cfg->speed;
+ tx_cfg->arb_mode = cfg->arb_mode;
+ tx_cfg->priority = 0;
+ for (ch = 0; ch < HSI_MAX_CHANNELS; ch++)
+ if (cfg->ch_prio[ch])
+ tx_cfg->priority |= (1 << ch);
+}
+
+static ssize_t hsi_char_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct hsi_char_channel *channel = file->private_data;
+ struct hsi_msg *msg = NULL;
+ ssize_t ret;
+
+ if (len == 0) {
+ channel->poll_event &= ~POLLPRI;
+ return 0;
+ }
+ channel->poll_event &= ~POLLPRI;
+
+ if (!IS_ALIGNED(len, sizeof(u32)))
+ return -EINVAL;
+
+ if (len > max_data_size)
+ len = max_data_size;
+
+ spin_lock_bh(&channel->lock);
+ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (HSI_CHST_RD(channel) != HSI_CHST_READOFF) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (channel->ch >= channel->cl->rx_cfg.channels) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (list_empty(&channel->free_msgs_list)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link);
+ list_del(&msg->link);
+ spin_unlock_bh(&channel->lock);
+ hsi_char_msg_len_set(msg, len);
+ msg->complete = hsi_char_rx_completed;
+ msg->destructor = hsi_char_rx_msg_destructor;
+ ret = hsi_async_read(channel->cl, msg);
+ spin_lock_bh(&channel->lock);
+ if (ret < 0)
+ goto out;
+ HSI_CHST_RD_SET(channel, HSI_CHST_READING);
+ msg = NULL;
+
+ for ( ; ; ) {
+ DEFINE_WAIT(wait);
+
+ if (!list_empty(&channel->rx_msgs_queue)) {
+ msg = list_first_entry(&channel->rx_msgs_queue,
+ struct hsi_msg, link);
+ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
+ channel->poll_event &= ~(POLLIN | POLLRDNORM);
+ list_del(&msg->link);
+ spin_unlock_bh(&channel->lock);
+ if (msg->status == HSI_STATUS_ERROR) {
+ ret = -EIO;
+ } else {
+ ret = copy_to_user((void __user *)buf,
+ msg->context,
+ hsi_char_msg_len_get(msg));
+ if (ret)
+ ret = -EFAULT;
+ else
+ ret = hsi_char_msg_len_get(msg);
+ }
+ spin_lock_bh(&channel->lock);
+ break;
+ } else if (signal_pending(current)) {
+ spin_unlock_bh(&channel->lock);
+ hsi_char_rx_cancel(channel);
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_RD_SET(channel, HSI_CHST_READOFF);
+ ret = -EINTR;
+ break;
+ } else if ((HSI_CHST_OC(channel) == HSI_CHST_CLOSING) ||
+ (HSI_CHST_OC(channel) == HSI_CHST_CLOSING)) {
+ ret = -EIO;
+ break;
+ }
+ prepare_to_wait(&channel->rx_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&channel->lock);
+
+ schedule();
+
+ spin_lock_bh(&channel->lock);
+ finish_wait(&channel->rx_wait, &wait);
+ }
+out:
+ if (msg)
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+ spin_unlock_bh(&channel->lock);
+
+ return ret;
+}
+
+static ssize_t hsi_char_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct hsi_char_channel *channel = file->private_data;
+ struct hsi_msg *msg = NULL;
+ ssize_t ret;
+
+ if ((len == 0) || !IS_ALIGNED(len, sizeof(u32)))
+ return -EINVAL;
+
+ if (len > max_data_size)
+ len = max_data_size;
+
+ spin_lock_bh(&channel->lock);
+ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (HSI_CHST_WR(channel) != HSI_CHST_WRITEOFF) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (channel->ch >= channel->cl->tx_cfg.channels) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (list_empty(&channel->free_msgs_list)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link);
+ list_del(&msg->link);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEON);
+ spin_unlock_bh(&channel->lock);
+
+ if (copy_from_user(msg->context, (void __user *)buf, len)) {
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ hsi_char_msg_len_set(msg, len);
+ msg->complete = hsi_char_tx_completed;
+ msg->destructor = hsi_char_tx_msg_destructor;
+ channel->poll_event &= ~(POLLOUT | POLLWRNORM);
+ ret = hsi_async_write(channel->cl, msg);
+ spin_lock_bh(&channel->lock);
+ if (ret < 0) {
+ channel->poll_event |= (POLLOUT | POLLWRNORM);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
+ goto out;
+ }
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITING);
+ msg = NULL;
+
+ for ( ; ; ) {
+ DEFINE_WAIT(wait);
+
+ if (!list_empty(&channel->tx_msgs_queue)) {
+ msg = list_first_entry(&channel->tx_msgs_queue,
+ struct hsi_msg, link);
+ list_del(&msg->link);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
+ if (msg->status == HSI_STATUS_ERROR)
+ ret = -EIO;
+ else
+ ret = hsi_char_msg_len_get(msg);
+ break;
+ } else if (signal_pending(current)) {
+ spin_unlock_bh(&channel->lock);
+ hsi_char_tx_cancel(channel);
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF);
+ ret = -EINTR;
+ break;
+ } else if ((HSI_CHST_OC(channel) == HSI_CHST_CLOSING) ||
+ (HSI_CHST_OC(channel) == HSI_CHST_CLOSING)) {
+ ret = -EIO;
+ break;
+ }
+ prepare_to_wait(&channel->tx_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&channel->lock);
+
+ schedule();
+
+ spin_lock_bh(&channel->lock);
+ finish_wait(&channel->tx_wait, &wait);
+ }
+out:
+ if (msg)
+ list_add_tail(&msg->link, &channel->free_msgs_list);
+
+ spin_unlock_bh(&channel->lock);
+
+ return ret;
+}
+
+static unsigned int hsi_char_poll(struct file *file, poll_table *wait)
+{
+ struct hsi_char_channel *channel = file->private_data;
+ unsigned int ret;
+
+ spin_lock_bh(&channel->lock);
+ if ((HSI_CHST_OC(channel) != HSI_CHST_OPENED) ||
+ (channel->ch >= channel->cl->rx_cfg.channels)) {
+ spin_unlock_bh(&channel->lock);
+ return -ENODEV;
+ }
+ poll_wait(file, &channel->rx_wait, wait);
+ poll_wait(file, &channel->tx_wait, wait);
+ ret = channel->poll_event;
+ spin_unlock_bh(&channel->lock);
+ hsi_char_rx_poll(channel);
+
+ return ret;
+}
+
+static long hsi_char_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct hsi_char_channel *channel = file->private_data;
+ unsigned int state;
+ struct hsi_config cfg;
+ struct hsc_rx_config rx_cfg;
+ struct hsc_tx_config tx_cfg;
+ long ret = 0;
+
+ if (HSI_CHST_OC(channel) != HSI_CHST_OPENED)
+ return -ENODEV;
+
+ switch (cmd) {
+ case HSC_RESET:
+ hsi_char_reset(channel->cl);
+ break;
+ case HSC_SET_PM:
+ if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
+ return -EFAULT;
+ if (state == HSC_PM_DISABLE) {
+ ret = hsi_start_tx(channel->cl);
+ if (!ret)
+ channel->wlrefcnt++;
+ } else if ((state == HSC_PM_ENABLE)
+ && (channel->wlrefcnt > 0)) {
+ ret = hsi_stop_tx(channel->cl);
+ if (!ret)
+ channel->wlrefcnt--;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case HSC_SEND_BREAK:
+ return hsi_char_break_send(channel->cl);
+ case HSC_SET_RX:
+ if (copy_from_user(&rx_cfg, (void __user *)arg, sizeof(rx_cfg)))
+ return -EFAULT;
+ hsi_char_rx2icfg(&cfg, &rx_cfg);
+ return hsi_char_cfg_set(channel->cl, &cfg, HSI_CHAR_RX);
+ case HSC_GET_RX:
+ hsi_char_cfg_get(channel->cl, &cfg, HSI_CHAR_RX);
+ hsi_char_rx2ecfg(&rx_cfg, &cfg);
+ if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg)))
+ return -EFAULT;
+ break;
+ case HSC_SET_TX:
+ if (copy_from_user(&tx_cfg, (void __user *)arg, sizeof(tx_cfg)))
+ return -EFAULT;
+ hsi_char_tx2icfg(&cfg, &tx_cfg);
+ return hsi_char_cfg_set(channel->cl, &cfg, HSI_CHAR_TX);
+ case HSC_GET_TX:
+ hsi_char_cfg_get(channel->cl, &cfg, HSI_CHAR_TX);
+ hsi_char_tx2ecfg(&tx_cfg, &cfg);
+ if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg)))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static int hsi_char_open(struct inode *inode, struct file *file)
+{
+ struct hsi_char_client_data *cl_data = &hsi_char_cl_data;
+ struct hsi_char_channel *channel = cl_data->channels + iminor(inode);
+ int ret = 0, refcnt;
+
+ spin_lock_bh(&channel->lock);
+ if ((channel->state == HSI_CHST_UNAVAIL) || (!channel->cl)) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (HSI_CHST_OC(channel) != HSI_CHST_CLOSED) {
+ ret = -EBUSY;
+ goto out;
+ }
+ HSI_CHST_OC_SET(channel, HSI_CHST_OPENING);
+ spin_unlock_bh(&channel->lock);
+
+ refcnt = atomic_inc_return(&cl_data->refcnt);
+ if (refcnt == 1) {
+ if (cl_data->attached) {
+ atomic_dec(&cl_data->refcnt);
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = hsi_claim_port(channel->cl, 0);
+ if (ret < 0) {
+ atomic_dec(&cl_data->refcnt);
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
+ goto out;
+ }
+ hsi_setup(channel->cl);
+ } else if (!cl_data->attached) {
+ atomic_dec(&cl_data->refcnt);
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
+ ret = -ENODEV;
+ goto out;
+ }
+ ret = hsi_char_msgs_alloc(channel);
+
+ if (ret < 0) {
+ refcnt = atomic_dec_return(&cl_data->refcnt);
+ if (!refcnt)
+ hsi_release_port(channel->cl);
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED);
+ goto out;
+ }
+ if (refcnt == 1)
+ cl_data->attached = 1;
+ channel->wlrefcnt = 0;
+ channel->rxpoll = HSI_CHAR_POLL_OFF;
+ channel->poll_event = (POLLOUT | POLLWRNORM);
+ file->private_data = channel;
+ spin_lock_bh(&channel->lock);
+ HSI_CHST_OC_SET(channel, HSI_CHST_OPENED);
+out:
+ spin_unlock_bh(&channel->lock);
+
+ return ret;
+}
+
+static int hsi_char_release(struct inode *inode, struct file *file)
+{
+ struct hsi_char_channel *channel = file->private_data;
+ return _hsi_char_release(channel, 0);
+}
+
+static int hsi_char_fasync(int fd, struct file *file, int on)
+{
+ struct hsi_char_channel *channel = file->private_data;
+
+ if (fasync_helper(fd, file, on, &channel->async_queue) < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static const struct file_operations hsi_char_fops = {
+ .owner = THIS_MODULE,
+ .read = hsi_char_read,
+ .write = hsi_char_write,
+ .poll = hsi_char_poll,
+ .unlocked_ioctl = hsi_char_ioctl,
+ .open = hsi_char_open,
+ .release = hsi_char_release,
+ .fasync = hsi_char_fasync,
+};
+
+static struct hsi_client_driver hsi_char_driver = {
+ .driver = {
+ .name = "hsi_char",
+ .owner = THIS_MODULE,
+ .probe = hsi_char_probe,
+ .remove = hsi_char_remove,
+ },
+};
+
+static inline void hsi_char_channel_init(struct hsi_char_channel *channel)
+{
+ channel->state = HSI_CHST_AVAIL;
+ INIT_LIST_HEAD(&channel->free_msgs_list);
+ init_waitqueue_head(&channel->rx_wait);
+ init_waitqueue_head(&channel->tx_wait);
+ spin_lock_init(&channel->lock);
+ INIT_LIST_HEAD(&channel->rx_msgs_queue);
+ INIT_LIST_HEAD(&channel->tx_msgs_queue);
+}
+
+static struct cdev hsi_char_cdev;
+
+static int __init hsi_char_init(void)
+{
+ struct hsi_char_client_data *cl_data = &hsi_char_cl_data;
+ struct hsi_char_channel *channel = cl_data->channels;
+ unsigned long ch_mask = 0;
+ unsigned int i;
+ int ret;
+
+ if ((max_data_size < 4) || (max_data_size > 0x10000) ||
+ (max_data_size & (max_data_size - 1))) {
+ pr_err("Invalid max read/write data size");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HSI_CHAR_DEVS && channels_map[i] >= 0; i++) {
+ if (channels_map[i] >= HSI_CHAR_DEVS) {
+ pr_err("Invalid HSI/SSI channel specified");
+ return -EINVAL;
+ }
+ set_bit(channels_map[i], &ch_mask);
+ }
+
+ if (i == 0) {
+ pr_err("No HSI channels available");
+ return -EINVAL;
+ }
+
+ memset(cl_data->channels, 0, sizeof(cl_data->channels));
+ for (i = 0; i < HSI_CHAR_DEVS; i++, channel++) {
+ channel->ch = i;
+ channel->state = HSI_CHST_UNAVAIL;
+ if (test_bit(i, &ch_mask))
+ hsi_char_channel_init(channel);
+ }
+
+ ret = hsi_register_client_driver(&hsi_char_driver);
+ if (ret) {
+ pr_err("Error while registering HSI/SSI driver %d", ret);
+ return ret;
+ }
+
+ pr_info("HSI/SSI char device loaded\n");
+
+ return 0;
+}
+module_init(hsi_char_init);
+
+static void __exit hsi_char_exit(void)
+{
+ device_destroy(hsi_char_class, hsi_char_cdev.dev);
+ class_destroy(hsi_char_class);
+ cdev_del(&hsi_char_cdev);
+ unregister_chrdev_region(hsi_char_dev, HSI_CHAR_DEVS);
+ hsi_unregister_client_driver(&hsi_char_driver);
+ pr_info("HSI char device removed\n");
+}
+module_exit(hsi_char_exit);
+
+MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
+MODULE_ALIAS("hsi:hsi_char");
+MODULE_DESCRIPTION("HSI character device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig
new file mode 100644
index 00000000000..76d339eaf32
--- /dev/null
+++ b/drivers/hsi/controllers/Kconfig
@@ -0,0 +1,33 @@
+#
+# HSI controllers configuration
+#
+comment "HSI controllers"
+
+config STE_HSI
+ tristate "STE HSI controller driver"
+ depends on (ARCH_U8500 || ARCH_NOMADIK) && HSI
+ default n
+ help
+ ST-Ericsson HSI controller.
+ If you say Y here, you will enable the U8500 HSI hardware driver.
+
+ If unsure, say N.
+
+config OMAP_SSI
+ tristate "OMAP SSI hardware driver"
+ depends on ARCH_OMAP && HSI
+ default n
+ ---help---
+ SSI is a legacy version of HSI. It is usually used to connect
+ an application engine with a cellular modem.
+ If you say Y here, you will enable the OMAP SSI hardware driver.
+
+ If unsure, say N.
+
+if OMAP_SSI
+
+config OMAP_SSI_CONFIG
+ boolean
+ default y
+
+endif # OMAP_SSI
diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile
new file mode 100644
index 00000000000..475637a0f23
--- /dev/null
+++ b/drivers/hsi/controllers/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for HSI controllers drivers
+#
+
+obj-$(CONFIG_STE_HSI) += ste_hsi.o
+obj-$(CONFIG_OMAP_SSI) += omap_ssi.o
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
new file mode 100644
index 00000000000..a82ea0e13cc
--- /dev/null
+++ b/drivers/hsi/controllers/omap_ssi.c
@@ -0,0 +1,1853 @@
+/*
+ * omap_ssi.c
+ *
+ * Implements the OMAP SSI driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hsi/hsi.h>
+#include <linux/debugfs.h>
+#include <plat/omap-pm.h>
+#include <plat/clock.h>
+#include <plat/ssi.h>
+
+#define SSI_MAX_CHANNELS 8
+#define SSI_MAX_GDD_LCH 8
+#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
+
+/**
+ * struct ssi_clk_res - Device resource data for the SSI clocks
+ * @clk: Pointer to the clock
+ * @nb: Pointer to the clock notifier for clk, if any
+ */
+struct ssi_clk_res {
+ struct clk *clk;
+ struct notifier_block *nb;
+};
+
+/**
+ * struct gdd_trn - GDD transaction data
+ * @msg: Pointer to the HSI message being served
+ * @sg: Pointer to the current sg entry being served
+ */
+struct gdd_trn {
+ struct hsi_msg *msg;
+ struct scatterlist *sg;
+};
+
+/**
+ * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context
+ * @mode: Bit transmission mode
+ * @channels: Number of channels
+ * @framesize: Frame size in bits
+ * @timeout: RX frame timeout
+ * @divisor: TX divider
+ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
+ */
+struct omap_ssm_ctx {
+ u32 mode;
+ u32 channels;
+ u32 frame_size;
+ union {
+ u32 timeout; /* Rx Only */
+ struct {
+ u32 arb_mode;
+ u32 divisor;
+ }; /* Tx only */
+ };
+};
+
+/**
+ * struct omap_ssi_port - OMAP SSI port data
+ * @dev: device associated to the port (HSI port)
+ * @sst_dma: SSI transmitter physical base address
+ * @ssr_dma: SSI receiver physical base address
+ * @sst_base: SSI transmitter base address
+ * @ssr_base: SSI receiver base address
+ * @wk_lock: spin lock to serialize access to the wake lines
+ * @lock: Spin lock to serialize access to the SSI port
+ * @channels: Current number of channels configured (1,2,4 or 8)
+ * @txqueue: TX message queues
+ * @rxqueue: RX message queues
+ * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode)
+ * @irq: IRQ number
+ * @wake_irq: IRQ number for incoming wake line (-1 if none)
+ * @pio_tasklet: Bottom half for PIO transfers and events
+ * @wake_tasklet: Bottom half for incoming wake events
+ * @wkin_cken: Keep track of clock references due to the incoming wake line
+ * @wk_refcount: Reference count for output wake line
+ * @sys_mpu_enable: Context for the interrupt enable register for irq 0
+ * @sst: Context for the synchronous serial transmitter
+ * @ssr: Context for the synchronous serial receiver
+ */
+struct omap_ssi_port {
+ struct device *dev;
+ dma_addr_t sst_dma;
+ dma_addr_t ssr_dma;
+ void __iomem *sst_base;
+ void __iomem *ssr_base;
+ spinlock_t wk_lock;
+ spinlock_t lock;
+ unsigned int channels;
+ struct list_head txqueue[SSI_MAX_CHANNELS];
+ struct list_head rxqueue[SSI_MAX_CHANNELS];
+ struct list_head brkqueue;
+ unsigned int irq;
+ int wake_irq;
+ struct tasklet_struct pio_tasklet;
+ struct tasklet_struct wake_tasklet;
+ unsigned int wkin_cken:1; /* Workaround */
+ int wk_refcount;
+ /* OMAP SSI port context */
+ u32 sys_mpu_enable; /* We use only one irq */
+ struct omap_ssm_ctx sst;
+ struct omap_ssm_ctx ssr;
+};
+
+/**
+ * struct omap_ssi_controller - OMAP SSI controller data
+ * @dev: device associated to the controller (HSI controller)
+ * @sys: SSI I/O base address
+ * @gdd: GDD I/O base address
+ * @ick: SSI interconnect clock
+ * @fck: SSI functional clock
+ * @ck_refcount: References count for clocks
+ * @gdd_irq: IRQ line for GDD
+ * @gdd_tasklet: bottom half for DMA transfers
+ * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers
+ * @lock: lock to serialize access to GDD
+ * @ck_lock: lock to serialize access to the clocks
+ * @loss_count: To follow if we need to restore context or not
+ * @max_speed: Maximum TX speed (Kb/s) set by the clients.
+ * @sysconfig: SSI controller saved context
+ * @gdd_gcr: SSI GDD saved context
+ * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any
+ * @port: Array of pointers of the ports of the controller
+ * @dir: Debugfs SSI root directory
+ */
+struct omap_ssi_controller {
+ struct device *dev;
+ void __iomem *sys;
+ void __iomem *gdd;
+ struct clk *ick;
+ struct clk *fck;
+ int ck_refcount;
+ unsigned int gdd_irq;
+ struct tasklet_struct gdd_tasklet;
+ struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH];
+ spinlock_t lock;
+ spinlock_t ck_lock;
+ unsigned long fck_rate;
+ int loss_count;
+ u32 max_speed;
+ /* OMAP SSI Controller context */
+ u32 sysconfig;
+ u32 gdd_gcr;
+ int (*get_loss)(struct device *dev);
+ struct omap_ssi_port **port;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir;
+#endif
+};
+
+static inline unsigned int ssi_wakein(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ return gpio_get_value(irq_to_gpio(omap_port->wake_irq));
+}
+
+static int ssi_for_each_port(struct hsi_controller *ssi, void *data,
+ int (*fn)(struct omap_ssi_port *p, void *data))
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ unsigned int i = 0;
+ int err = 0;
+
+ for (i = 0; ((i < ssi->num_ports) && !err); i++)
+ err = (*fn)(omap_ssi->port[i], data);
+
+ return err;
+}
+
+static int ssi_set_port_mode(struct omap_ssi_port *omap_port, void *data)
+{
+ u32 *mode = data;
+
+ __raw_writel(*mode, omap_port->sst_base + SSI_SST_MODE_REG);
+ __raw_writel(*mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
+ /* OCP barrier */
+ *mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
+
+ return 0;
+}
+
+static inline void ssi_set_mode(struct hsi_controller *ssi, u32 mode)
+{
+ ssi_for_each_port(ssi, &mode, ssi_set_port_mode);
+}
+
+static int ssi_restore_port_mode(struct omap_ssi_port *omap_port,
+ void *data __maybe_unused)
+{
+ u32 mode;
+
+ __raw_writel(omap_port->sst.mode,
+ omap_port->sst_base + SSI_SST_MODE_REG);
+ __raw_writel(omap_port->ssr.mode,
+ omap_port->ssr_base + SSI_SSR_MODE_REG);
+ /* OCP barrier */
+ mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
+
+ return 0;
+}
+
+static int ssi_restore_divisor(struct omap_ssi_port *omap_port,
+ void *data __maybe_unused)
+{
+ __raw_writel(omap_port->sst.divisor,
+ omap_port->sst_base + SSI_SST_DIVISOR_REG);
+
+ return 0;
+}
+
+static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port,
+ void *data __maybe_unused)
+{
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *base = omap_port->sst_base;
+
+ __raw_writel(omap_port->sys_mpu_enable,
+ omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ /* SST context */
+ __raw_writel(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
+ __raw_writel(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
+ __raw_writel(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
+ /* SSR context */
+ base = omap_port->ssr_base;
+ __raw_writel(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
+ __raw_writel(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
+ __raw_writel(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
+
+ return 0;
+}
+
+static int ssi_save_port_ctx(struct omap_ssi_port *omap_port,
+ void *data __maybe_unused)
+{
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ omap_port->sys_mpu_enable = __raw_readl(omap_ssi->sys +
+ SSI_MPU_ENABLE_REG(port->num, 0));
+
+ return 0;
+}
+
+static int ssi_clk_enable(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int err = 0;
+
+ spin_lock_bh(&omap_ssi->ck_lock);
+ if (omap_ssi->ck_refcount++)
+ goto out;
+ err = clk_enable(omap_ssi->fck);
+ if (unlikely(err < 0))
+ goto out;
+ err = clk_enable(omap_ssi->ick);
+ if (unlikely(err < 0)) {
+ clk_disable(omap_ssi->fck);
+ goto out;
+ }
+ if ((omap_ssi->get_loss) && (omap_ssi->loss_count ==
+ (*omap_ssi->get_loss)(ssi->device.parent)))
+ goto mode; /* We always need to restore the mode & TX divisor */
+
+ __raw_writel(omap_ssi->sysconfig, omap_ssi->sys + SSI_SYSCONFIG_REG);
+ __raw_writel(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG);
+
+ ssi_for_each_port(ssi, NULL, ssi_restore_port_ctx);
+mode:
+ ssi_for_each_port(ssi, NULL, ssi_restore_divisor);
+ ssi_for_each_port(ssi, NULL, ssi_restore_port_mode);
+out:
+ spin_unlock_bh(&omap_ssi->ck_lock);
+
+ return err;
+}
+
+static void ssi_clk_disable(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ spin_lock_bh(&omap_ssi->ck_lock);
+ WARN_ON(omap_ssi->ck_refcount <= 0);
+ if (--omap_ssi->ck_refcount)
+ goto out;
+
+ ssi_set_mode(ssi, SSI_MODE_SLEEP);
+
+ if (omap_ssi->get_loss)
+ omap_ssi->loss_count =
+ (*omap_ssi->get_loss)(ssi->device.parent);
+
+ ssi_for_each_port(ssi, NULL, ssi_save_port_ctx);
+ clk_disable(omap_ssi->ick);
+ clk_disable(omap_ssi->fck);
+out:
+ spin_unlock_bh(&omap_ssi->ck_lock);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused)
+{
+ struct hsi_controller *ssi = m->private;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sys = omap_ssi->sys;
+
+ ssi_clk_enable(ssi);
+ seq_printf(m, "REVISION\t: 0x%08x\n",
+ __raw_readl(sys + SSI_REVISION_REG));
+ seq_printf(m, "SYSCONFIG\t: 0x%08x\n",
+ __raw_readl(sys + SSI_SYSCONFIG_REG));
+ seq_printf(m, "SYSSTATUS\t: 0x%08x\n",
+ __raw_readl(sys + SSI_SYSSTATUS_REG));
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
+{
+ struct hsi_port *port = m->private;
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *base = omap_ssi->sys;
+ unsigned int ch;
+
+ ssi_clk_enable(ssi);
+ if (omap_port->wake_irq > 0)
+ seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
+ seq_printf(m, "WAKE\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_WAKE_REG(port->num)));
+ seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
+ __raw_readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
+ seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
+ __raw_readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
+ /* SST */
+ base = omap_port->sst_base;
+ seq_printf(m, "\nSST\n===\n");
+ seq_printf(m, "ID SST\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_ID_REG));
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_MODE_REG));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_FRAMESIZE_REG));
+ seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_DIVISOR_REG));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_CHANNELS_REG));
+ seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_ARBMODE_REG));
+ seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_TXSTATE_REG));
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_BUFSTATE_REG));
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SST_BREAK_REG));
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ __raw_readl(base + SSI_SST_BUFFER_CH_REG(ch)));
+ }
+ /* SSR */
+ base = omap_port->ssr_base;
+ seq_printf(m, "\nSSR\n===\n");
+ seq_printf(m, "ID SSR\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_ID_REG));
+ seq_printf(m, "MODE\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_MODE_REG));
+ seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_FRAMESIZE_REG));
+ seq_printf(m, "CHANNELS\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_CHANNELS_REG));
+ seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_TIMEOUT_REG));
+ seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_RXSTATE_REG));
+ seq_printf(m, "BUFSTATE\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_BUFSTATE_REG));
+ seq_printf(m, "BREAK\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_BREAK_REG));
+ seq_printf(m, "ERROR\t\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_ERROR_REG));
+ seq_printf(m, "ERRORACK\t: 0x%08x\n",
+ __raw_readl(base + SSI_SSR_ERRORACK_REG));
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
+ __raw_readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
+ }
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused)
+{
+ struct hsi_controller *ssi = m->private;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *gdd = omap_ssi->gdd;
+ int lch;
+
+ ssi_clk_enable(ssi);
+ seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
+ __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG));
+ seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
+ __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG));
+ seq_printf(m, "HW_ID\t\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_HW_ID_REG));
+ seq_printf(m, "PPORT_ID\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_PPORT_ID_REG));
+ seq_printf(m, "MPORT_ID\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_MPORT_ID_REG));
+ seq_printf(m, "TEST\t\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_TEST_REG));
+ seq_printf(m, "GCR\t\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_GCR_REG));
+
+ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
+ seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
+ seq_printf(m, "CSDP\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CSDP_REG(lch)));
+ seq_printf(m, "CCR\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CCR_REG(lch)));
+ seq_printf(m, "CICR\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CICR_REG(lch)));
+ seq_printf(m, "CSR\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CSR_REG(lch)));
+ seq_printf(m, "CSSA\t\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_CSSA_REG(lch)));
+ seq_printf(m, "CDSA\t\t: 0x%08x\n",
+ __raw_readl(gdd + SSI_GDD_CDSA_REG(lch)));
+ seq_printf(m, "CEN\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CEN_REG(lch)));
+ seq_printf(m, "CSAC\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CSAC_REG(lch)));
+ seq_printf(m, "CDAC\t\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CDAC_REG(lch)));
+ seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
+ __raw_readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch)));
+ }
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static int ssi_div_get(void *data, u64 *val)
+{
+ struct hsi_port *port = data;
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+
+ ssi_clk_enable(ssi);
+ *val = __raw_readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static int ssi_div_set(void *data, u64 val)
+{
+ struct hsi_port *port = data;
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+
+ if (val > 127)
+ return -EINVAL;
+
+ ssi_clk_enable(ssi);
+ __raw_writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
+ omap_port->sst.divisor = val;
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static int ssi_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssi_debug_show, inode->i_private);
+}
+
+static int ssi_port_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssi_debug_port_show, inode->i_private);
+}
+
+static int ssi_gdd_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ssi_debug_gdd_show, inode->i_private);
+}
+
+static const struct file_operations ssi_regs_fops = {
+ .open = ssi_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations ssi_port_regs_fops = {
+ .open = ssi_port_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations ssi_gdd_regs_fops = {
+ .open = ssi_gdd_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
+
+static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
+ void *data)
+{
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+ struct dentry *dir = data;
+
+ dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
+ dir = debugfs_create_dir("sst", dir);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
+ &ssi_sst_div_fops);
+
+ return 0;
+}
+
+static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct dentry *dir;
+ int err;
+
+ /* SSI controller */
+ omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL);
+ if (IS_ERR(omap_ssi->dir))
+ return PTR_ERR(omap_ssi->dir);
+
+ debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi,
+ &ssi_regs_fops);
+ /* SSI GDD (DMA) */
+ dir = debugfs_create_dir("gdd", omap_ssi->dir);
+ if (IS_ERR(dir))
+ goto rback;
+ debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops);
+ /* SSI ports */
+ err = ssi_for_each_port(ssi, omap_ssi->dir, ssi_debug_add_port);
+ if (err < 0)
+ goto rback;
+
+ return 0;
+rback:
+ debugfs_remove_recursive(omap_ssi->dir);
+
+ return PTR_ERR(dir);
+}
+
+static void ssi_debug_remove_ctrl(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ debugfs_remove_recursive(omap_ssi->dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int ssi_claim_lch(struct hsi_msg *msg)
+{
+
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int lch;
+
+ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
+ if (!omap_ssi->gdd_trn[lch].msg) {
+ omap_ssi->gdd_trn[lch].msg = msg;
+ omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
+ return lch;
+ }
+
+ return -EBUSY;
+}
+
+static int ssi_start_pio(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ u32 val;
+
+ ssi_clk_enable(ssi);
+ if (msg->ttype == HSI_MSG_WRITE) {
+ val = SSI_DATAACCEPT(msg->channel);
+ ssi_clk_enable(ssi); /* Hold clocks for pio writes */
+ } else {
+ val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
+ }
+ dev_dbg(&port->device, "Single %s transfer\n",
+ msg->ttype ? "write" : "read");
+ val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ ssi_clk_disable(ssi);
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PROCEEDING;
+
+ return 0;
+}
+
+static int ssi_start_dma(struct hsi_msg *msg, int lch)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *gdd = omap_ssi->gdd;
+ int err;
+ u16 csdp;
+ u16 ccr;
+ u32 s_addr;
+ u32 d_addr;
+ u32 tmp;
+
+ if (msg->ttype == HSI_MSG_READ) {
+ err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
+ DMA_FROM_DEVICE);
+ if (err < 0) {
+ dev_dbg(&ssi->device, "DMA map SG failed !\n");
+ return err;
+ }
+ csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
+ SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
+ SSI_DATA_TYPE_S32;
+ ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
+ ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
+ SSI_CCR_ENABLE;
+ s_addr = omap_port->ssr_dma +
+ SSI_SSR_BUFFER_CH_REG(msg->channel);
+ d_addr = sg_dma_address(msg->sgt.sgl);
+ } else {
+ err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
+ DMA_TO_DEVICE);
+ if (err < 0) {
+ dev_dbg(&ssi->device, "DMA map SG failed !\n");
+ return err;
+ }
+ csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
+ SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
+ SSI_DATA_TYPE_S32;
+ ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
+ ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
+ SSI_CCR_ENABLE;
+ s_addr = sg_dma_address(msg->sgt.sgl);
+ d_addr = omap_port->sst_dma +
+ SSI_SST_BUFFER_CH_REG(msg->channel);
+ }
+ dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x"
+ " d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr);
+ ssi_clk_enable(ssi); /* Hold clocks during the transfer */
+ __raw_writew(csdp, gdd + SSI_GDD_CSDP_REG(lch));
+ __raw_writew(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
+ __raw_writel(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
+ __raw_writel(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
+ __raw_writew(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
+ gdd + SSI_GDD_CEN_REG(lch));
+
+ spin_lock_bh(&omap_ssi->lock);
+ tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ tmp |= SSI_GDD_LCH(lch);
+ __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ spin_unlock_bh(&omap_ssi->lock);
+ __raw_writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
+ msg->status = HSI_STATUS_PROCEEDING;
+
+ return 0;
+}
+
+static int ssi_start_transfer(struct list_head *queue)
+{
+ struct hsi_msg *msg;
+ int lch = -1;
+
+ if (list_empty(queue))
+ return 0;
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if (msg->status != HSI_STATUS_QUEUED)
+ return 0;
+ if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
+ lch = ssi_claim_lch(msg);
+ if (lch >= 0)
+ return ssi_start_dma(msg, lch);
+ else
+ return ssi_start_pio(msg);
+}
+
+static void ssi_transfer(struct omap_ssi_port *omap_port,
+ struct list_head *queue)
+{
+ struct hsi_msg *msg;
+ int err = -1;
+
+ spin_lock_bh(&omap_port->lock);
+ while (err < 0) {
+ err = ssi_start_transfer(queue);
+ if (err < 0) {
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ msg->status = HSI_STATUS_ERROR;
+ msg->actual_len = 0;
+ list_del(&msg->link);
+ spin_unlock_bh(&omap_port->lock);
+ msg->complete(msg);
+ spin_lock_bh(&omap_port->lock);
+ }
+ }
+ spin_unlock_bh(&omap_port->lock);
+}
+
+static u32 ssi_calculate_div(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ u32 tx_fckrate = (u32) omap_ssi->fck_rate;
+
+ /* / 2 : SSI TX clock is always half of the SSI functional clock */
+ tx_fckrate >>= 1;
+ /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
+ tx_fckrate--;
+ dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
+ tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
+ omap_ssi->max_speed);
+
+ return tx_fckrate / omap_ssi->max_speed;
+}
+
+static void ssi_error(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ unsigned int i;
+ u32 err;
+ u32 val;
+ u32 tmp;
+
+ /* ACK error */
+ err = __raw_readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
+ dev_err(&port->device, "SSI error: 0x%02x\n", err);
+ if (!err) {
+ dev_dbg(&port->device, "spurious SSI error ignored!\n");
+ return;
+ }
+ spin_lock(&omap_ssi->lock);
+ /* Cancel all GDD read transfers */
+ for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
+ msg = omap_ssi->gdd_trn[i].msg;
+ if ((msg) && (msg->ttype == HSI_MSG_READ)) {
+ __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
+ val |= (1 << i);
+ omap_ssi->gdd_trn[i].msg = NULL;
+ }
+ }
+ tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ tmp &= ~val;
+ __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ spin_unlock(&omap_ssi->lock);
+ /* Cancel all PIO read transfers */
+ spin_lock(&omap_port->lock);
+ tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
+ __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ /* ACK error */
+ __raw_writel(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
+ __raw_writel(SSI_ERROROCCURED,
+ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ /* Signal the error all current pending read requests */
+ for (i = 0; i < omap_port->channels; i++) {
+ if (list_empty(&omap_port->rxqueue[i]))
+ continue;
+ msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
+ link);
+ list_del(&msg->link);
+ msg->status = HSI_STATUS_ERROR;
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ /* Now restart queued reads if any */
+ ssi_transfer(omap_port, &omap_port->rxqueue[i]);
+ spin_lock(&omap_port->lock);
+ }
+ spin_unlock(&omap_port->lock);
+}
+
+static void ssi_break_complete(struct hsi_port *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ struct hsi_msg *tmp;
+ u32 val;
+
+ dev_dbg(&port->device, "HWBREAK received\n");
+
+ spin_lock(&omap_port->lock);
+ val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ val &= ~SSI_BREAKDETECTED;
+ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
+ __raw_writel(SSI_BREAKDETECTED,
+ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ spin_unlock(&omap_port->lock);
+
+ list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
+ msg->status = HSI_STATUS_COMPLETED;
+ spin_lock(&omap_port->lock);
+ list_del(&msg->link);
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ }
+
+}
+
+static int ssi_async_break(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int err = 0;
+ u32 tmp;
+
+ ssi_clk_enable(ssi);
+ if (msg->ttype == HSI_MSG_WRITE) {
+ if (omap_port->sst.mode != SSI_MODE_FRAME) {
+ err = -EINVAL;
+ goto out;
+ }
+ __raw_writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
+ msg->status = HSI_STATUS_COMPLETED;
+ msg->complete(msg);
+ } else {
+ if (omap_port->ssr.mode != SSI_MODE_FRAME) {
+ err = -EINVAL;
+ goto out;
+ }
+ spin_lock_bh(&omap_port->lock);
+ tmp = __raw_readl(omap_ssi->sys +
+ SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(tmp | SSI_BREAKDETECTED,
+ omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ msg->status = HSI_STATUS_PROCEEDING;
+ list_add_tail(&msg->link, &omap_port->brkqueue);
+ spin_unlock_bh(&omap_port->lock);
+ }
+out:
+ ssi_clk_disable(ssi);
+
+ return err;
+}
+
+static int ssi_async(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct list_head *queue;
+ int err = 0;
+
+ BUG_ON(!msg);
+
+ if (msg->sgt.nents > 1)
+ return -ENOSYS; /* TODO: Add sg support */
+
+ if (msg->break_frame)
+ return ssi_async_break(msg);
+
+ if (msg->ttype) {
+ BUG_ON(msg->channel >= omap_port->sst.channels);
+ queue = &omap_port->txqueue[msg->channel];
+ } else {
+ BUG_ON(msg->channel >= omap_port->ssr.channels);
+ queue = &omap_port->rxqueue[msg->channel];
+ }
+ msg->status = HSI_STATUS_QUEUED;
+ spin_lock_bh(&omap_port->lock);
+ list_add_tail(&msg->link, queue);
+ err = ssi_start_transfer(queue);
+ if (err < 0) {
+ list_del(&msg->link);
+ msg->status = HSI_STATUS_ERROR;
+ }
+ spin_unlock_bh(&omap_port->lock);
+ dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
+ msg->status, msg->ttype, msg->channel);
+
+ return err;
+}
+
+static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
+{
+ struct list_head *node, *tmp;
+ struct hsi_msg *msg;
+
+ list_for_each_safe(node, tmp, queue) {
+ msg = list_entry(node, struct hsi_msg, link);
+ if ((cl) && (cl != msg->cl))
+ continue;
+ list_del(node);
+ pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
+ msg->channel, msg, msg->sgt.sgl->length,
+ msg->ttype, msg->context);
+ if (msg->destructor)
+ msg->destructor(msg);
+ else
+ hsi_free_msg(msg);
+ }
+}
+
+static int ssi_setup(struct hsi_client *cl)
+{
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sst = omap_port->sst_base;
+ void __iomem *ssr = omap_port->ssr_base;
+ u32 div;
+ u32 val;
+ int err = 0;
+
+ ssi_clk_enable(ssi);
+ spin_lock_bh(&omap_port->lock);
+ if (cl->tx_cfg.speed)
+ omap_ssi->max_speed = cl->tx_cfg.speed;
+ div = ssi_calculate_div(ssi);
+ if (div > SSI_MAX_DIVISOR) {
+ dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
+ cl->tx_cfg.speed, div);
+ err = -EINVAL;
+ goto out;
+ }
+ /* Set TX/RX module to sleep to stop TX/RX during cfg update */
+ __raw_writel(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
+ __raw_writel(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
+ /* Flush posted write */
+ val = __raw_readl(ssr + SSI_SSR_MODE_REG);
+ /* TX */
+ __raw_writel(31, sst + SSI_SST_FRAMESIZE_REG);
+ __raw_writel(div, sst + SSI_SST_DIVISOR_REG);
+ __raw_writel(cl->tx_cfg.channels, sst + SSI_SST_CHANNELS_REG);
+ __raw_writel(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
+ __raw_writel(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
+ /* RX */
+ __raw_writel(31, ssr + SSI_SSR_FRAMESIZE_REG);
+ __raw_writel(cl->rx_cfg.channels, ssr + SSI_SSR_CHANNELS_REG);
+ __raw_writel(0, ssr + SSI_SSR_TIMEOUT_REG);
+ /* Cleanup the break queue if we leave FRAME mode */
+ if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
+ (cl->rx_cfg.mode != SSI_MODE_FRAME))
+ ssi_flush_queue(&omap_port->brkqueue, cl);
+ __raw_writel(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
+ omap_port->channels = max(cl->rx_cfg.channels, cl->tx_cfg.channels);
+ /* Shadow registering for OFF mode */
+ /* SST */
+ omap_port->sst.divisor = div;
+ omap_port->sst.frame_size = 31;
+ omap_port->sst.channels = cl->tx_cfg.channels;
+ omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
+ omap_port->sst.mode = cl->tx_cfg.mode;
+ /* SSR */
+ omap_port->ssr.frame_size = 31;
+ omap_port->ssr.timeout = 0;
+ omap_port->ssr.channels = cl->rx_cfg.channels;
+ omap_port->ssr.mode = cl->rx_cfg.mode;
+out:
+ spin_unlock_bh(&omap_port->lock);
+ ssi_clk_disable(ssi);
+
+ return err;
+}
+
+static void ssi_cleanup_queues(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ unsigned int i;
+ u32 rxbufstate = 0;
+ u32 txbufstate = 0;
+ u32 status = SSI_ERROROCCURED;
+ u32 tmp;
+
+ ssi_flush_queue(&omap_port->brkqueue, cl);
+ if (list_empty(&omap_port->brkqueue))
+ status |= SSI_BREAKDETECTED;
+
+ for (i = 0; i < omap_port->channels; i++) {
+ if (list_empty(&omap_port->txqueue[i]))
+ continue;
+ msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
+ link);
+ if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
+ txbufstate |= (1 << i);
+ status |= SSI_DATAACCEPT(i);
+ /* Release the clocks writes, also GDD ones */
+ ssi_clk_disable(ssi);
+ }
+ ssi_flush_queue(&omap_port->txqueue[i], cl);
+ }
+ for (i = 0; i < omap_port->channels; i++) {
+ if (list_empty(&omap_port->rxqueue[i]))
+ continue;
+ msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
+ link);
+ if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
+ rxbufstate |= (1 << i);
+ status |= SSI_DATAAVAILABLE(i);
+ }
+ ssi_flush_queue(&omap_port->rxqueue[i], cl);
+ /* Check if we keep the error detection interrupt armed */
+ if (!list_empty(&omap_port->rxqueue[i]))
+ status &= ~SSI_ERROROCCURED;
+ }
+ /* Cleanup write buffers */
+ tmp = __raw_readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
+ tmp &= ~txbufstate;
+ __raw_writel(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
+ /* Cleanup read buffers */
+ tmp = __raw_readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
+ tmp &= ~rxbufstate;
+ __raw_writel(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
+ /* Disarm and ack pending interrupts */
+ tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ tmp &= ~status;
+ __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+}
+
+static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ unsigned int i;
+ u32 val = 0;
+ u32 tmp;
+
+ for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
+ msg = omap_ssi->gdd_trn[i].msg;
+ if ((!msg) || (msg->cl != cl))
+ continue;
+ __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
+ val |= (1 << i);
+ /*
+ * Clock references for write will be handled in
+ * ssi_cleanup_queues
+ */
+ if (msg->ttype == HSI_MSG_READ)
+ ssi_clk_disable(ssi);
+ omap_ssi->gdd_trn[i].msg = NULL;
+ }
+ tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ tmp &= ~val;
+ __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+}
+
+static int ssi_release(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ spin_lock_bh(&omap_port->lock);
+ ssi_clk_enable(ssi);
+ /* Stop all the pending DMA requests for that client */
+ ssi_cleanup_gdd(ssi, cl);
+ /* Now cleanup all the queues */
+ ssi_cleanup_queues(cl);
+ ssi_clk_disable(ssi);
+ /* If it is the last client of the port, do extra checks and cleanup */
+ if (port->claimed <= 1) {
+ /*
+ * Drop the clock reference for the incoming wake line
+ * if it is still kept high by the other side.
+ */
+ if (omap_port->wkin_cken) {
+ ssi_clk_disable(ssi);
+ omap_port->wkin_cken = 0;
+ }
+ ssi_clk_enable(ssi);
+ /* Stop any SSI TX/RX without a client */
+ ssi_set_mode(ssi, SSI_MODE_SLEEP);
+ omap_port->sst.mode = SSI_MODE_SLEEP;
+ omap_port->ssr.mode = SSI_MODE_SLEEP;
+ ssi_clk_disable(ssi);
+ WARN_ON(omap_port->wk_refcount != 0);
+ WARN_ON(omap_ssi->ck_refcount != 0);
+ }
+ spin_unlock_bh(&omap_port->lock);
+
+ return 0;
+}
+
+static int ssi_flush(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg;
+ void __iomem *sst = omap_port->sst_base;
+ void __iomem *ssr = omap_port->ssr_base;
+ unsigned int i;
+ u32 err;
+
+ ssi_clk_enable(ssi);
+ spin_lock_bh(&omap_port->lock);
+ /* Stop all DMA transfers */
+ for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
+ msg = omap_ssi->gdd_trn[i].msg;
+ if (!msg || (port != hsi_get_port(msg->cl)))
+ continue;
+ __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
+ if (msg->ttype == HSI_MSG_READ)
+ ssi_clk_disable(ssi);
+ omap_ssi->gdd_trn[i].msg = NULL;
+ }
+ /* Flush all SST buffers */
+ __raw_writel(0, sst + SSI_SST_BUFSTATE_REG);
+ __raw_writel(0, sst + SSI_SST_TXSTATE_REG);
+ /* Flush all SSR buffers */
+ __raw_writel(0, ssr + SSI_SSR_RXSTATE_REG);
+ __raw_writel(0, ssr + SSI_SSR_BUFSTATE_REG);
+ /* Flush all errors */
+ err = __raw_readl(ssr + SSI_SSR_ERROR_REG);
+ __raw_writel(err, ssr + SSI_SSR_ERRORACK_REG);
+ /* Flush break */
+ __raw_writel(0, ssr + SSI_SSR_BREAK_REG);
+ /* Clear interrupts */
+ __raw_writel(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(0xffffff00,
+ omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ __raw_writel(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ __raw_writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ /* Dequeue all pending requests */
+ for (i = 0; i < omap_port->channels; i++) {
+ /* Release write clocks */
+ if (!list_empty(&omap_port->txqueue[i]))
+ ssi_clk_disable(ssi);
+ ssi_flush_queue(&omap_port->txqueue[i], NULL);
+ ssi_flush_queue(&omap_port->rxqueue[i], NULL);
+ }
+ ssi_flush_queue(&omap_port->brkqueue, NULL);
+ spin_unlock_bh(&omap_port->lock);
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static int ssi_start_tx(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
+
+ spin_lock_bh(&omap_port->wk_lock);
+ if (omap_port->wk_refcount++) {
+ spin_unlock_bh(&omap_port->wk_lock);
+ return 0;
+ }
+ ssi_clk_enable(ssi); /* Grab clocks */
+ __raw_writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
+ spin_unlock_bh(&omap_port->wk_lock);
+
+ return 0;
+}
+
+static int ssi_stop_tx(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
+
+ spin_lock_bh(&omap_port->wk_lock);
+ BUG_ON(!omap_port->wk_refcount);
+ if (--omap_port->wk_refcount) {
+ spin_unlock_bh(&omap_port->wk_lock);
+ return 0;
+ }
+ __raw_writel(SSI_WAKE(0),
+ omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
+ ssi_clk_disable(ssi); /* Release clocks */
+ spin_unlock_bh(&omap_port->wk_lock);
+
+ return 0;
+}
+
+static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
+{
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct hsi_msg *msg;
+ u32 *buf;
+ u32 reg;
+ u32 val;
+
+ spin_lock(&omap_port->lock);
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PENDING;
+ }
+ if (msg->ttype == HSI_MSG_WRITE)
+ val = SSI_DATAACCEPT(msg->channel);
+ else
+ val = SSI_DATAAVAILABLE(msg->channel);
+ if (msg->status == HSI_STATUS_PROCEEDING) {
+ buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
+ if (msg->ttype == HSI_MSG_WRITE)
+ __raw_writel(*buf, omap_port->sst_base +
+ SSI_SST_BUFFER_CH_REG(msg->channel));
+ else
+ *buf = __raw_readl(omap_port->ssr_base +
+ SSI_SSR_BUFFER_CH_REG(msg->channel));
+ dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
+ msg->ttype, *buf);
+ msg->actual_len += sizeof(*buf);
+ if (msg->actual_len >= msg->sgt.sgl->length)
+ msg->status = HSI_STATUS_COMPLETED;
+ /*
+ * Wait for the last written frame to be really sent before
+ * we call the complete callback
+ */
+ if ((msg->status == HSI_STATUS_PROCEEDING) ||
+ ((msg->status == HSI_STATUS_COMPLETED) &&
+ (msg->ttype == HSI_MSG_WRITE))) {
+ __raw_writel(val, omap_ssi->sys +
+ SSI_MPU_STATUS_REG(port->num, 0));
+ spin_unlock(&omap_port->lock);
+
+ return;
+ }
+
+ }
+ /* Transfer completed at this point */
+ reg = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ if (msg->ttype == HSI_MSG_WRITE)
+ ssi_clk_disable(ssi); /* Release clocks for write transfer */
+ reg &= ~val;
+ __raw_writel(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
+ list_del(&msg->link);
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ ssi_transfer(omap_port, queue);
+}
+
+static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg;
+ struct hsi_port *port = to_hsi_port(msg->cl->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ unsigned int dir;
+ u32 csr;
+ u32 val;
+
+ spin_lock(&omap_ssi->lock);
+
+ val = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+ val &= ~SSI_GDD_LCH(lch);
+ __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
+
+ if (msg->ttype == HSI_MSG_READ) {
+ dir = DMA_FROM_DEVICE;
+ val = SSI_DATAAVAILABLE(msg->channel);
+ ssi_clk_disable(ssi);
+ } else {
+ dir = DMA_TO_DEVICE;
+ val = SSI_DATAACCEPT(msg->channel);
+ /* Keep clocks reference for write pio event */
+ }
+ dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir);
+ csr = __raw_readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch));
+ omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */
+ dev_dbg(&port->device, "DMA completed ch %d ttype %d\n",
+ msg->channel, msg->ttype);
+ spin_unlock(&omap_ssi->lock);
+ if (csr & SSI_CSR_TOUR) { /* Timeout error */
+ msg->status = HSI_STATUS_ERROR;
+ msg->actual_len = 0;
+ spin_lock(&omap_port->lock);
+ list_del(&msg->link); /* Dequeue msg */
+ spin_unlock(&omap_port->lock);
+ msg->complete(msg);
+ return;
+ }
+ spin_lock(&omap_port->lock);
+ val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ spin_unlock(&omap_port->lock);
+
+ msg->status = HSI_STATUS_COMPLETED;
+ msg->actual_len = sg_dma_len(msg->sgt.sgl);
+}
+
+static void ssi_gdd_tasklet(unsigned long dev)
+{
+ struct hsi_controller *ssi = (struct hsi_controller *)dev;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sys = omap_ssi->sys;
+ unsigned int lch;
+ u32 status_reg;
+
+ ssi_clk_enable(ssi);
+
+ status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
+ if (status_reg & SSI_GDD_LCH(lch))
+ ssi_gdd_complete(ssi, lch);
+ }
+ __raw_writel(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
+ ssi_clk_disable(ssi);
+ if (status_reg)
+ tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
+ else
+ enable_irq(omap_ssi->gdd_irq);
+
+}
+
+static irqreturn_t ssi_gdd_isr(int irq, void *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void ssi_pio_tasklet(unsigned long ssi_port)
+{
+ struct hsi_port *port = (struct hsi_port *)ssi_port;
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ void __iomem *sys = omap_ssi->sys;
+ unsigned int ch;
+ u32 status_reg;
+
+ ssi_clk_enable(ssi);
+ status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
+ status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
+
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ if (status_reg & SSI_DATAACCEPT(ch))
+ ssi_pio_complete(port, &omap_port->txqueue[ch]);
+ if (status_reg & SSI_DATAAVAILABLE(ch))
+ ssi_pio_complete(port, &omap_port->rxqueue[ch]);
+ }
+ if (status_reg & SSI_BREAKDETECTED)
+ ssi_break_complete(port);
+ if (status_reg & SSI_ERROROCCURED)
+ ssi_error(port);
+
+ status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
+ status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
+ ssi_clk_disable(ssi);
+
+ if (status_reg)
+ tasklet_hi_schedule(&omap_port->pio_tasklet);
+ else
+ enable_irq(omap_port->irq);
+}
+
+static irqreturn_t ssi_pio_isr(int irq, void *port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ tasklet_hi_schedule(&omap_port->pio_tasklet);
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void ssi_wake_tasklet(unsigned long ssi_port)
+{
+ struct hsi_port *port = (struct hsi_port *)ssi_port;
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+
+ if (ssi_wakein(port)) {
+ /**
+ * We can have a quick High-Low-High transition in the line.
+ * In such a case if we have long interrupt latencies,
+ * we can miss the low event or get twice a high event.
+ * This workaround will avoid breaking the clock reference
+ * count when such a situation ocurrs.
+ */
+ spin_lock(&omap_port->lock);
+ if (!omap_port->wkin_cken) {
+ omap_port->wkin_cken = 1;
+ ssi_clk_enable(ssi);
+ }
+ spin_unlock(&omap_port->lock);
+ dev_dbg(&ssi->device, "Wake in high\n");
+ hsi_event(port, HSI_EVENT_START_RX);
+ } else {
+ dev_dbg(&ssi->device, "Wake in low\n");
+ hsi_event(port, HSI_EVENT_STOP_RX);
+ spin_lock(&omap_port->lock);
+ if (omap_port->wkin_cken) {
+ ssi_clk_disable(ssi);
+ omap_port->wkin_cken = 0;
+ }
+ spin_unlock(&omap_port->lock);
+ }
+}
+
+static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
+
+ tasklet_hi_schedule(&omap_port->wake_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ssi_port_irq(struct hsi_port *port,
+ struct platform_device *pd)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct resource *irq;
+ int err;
+
+ irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 1);
+ if (!irq) {
+ dev_err(&port->device, "Port IRQ resource missing\n");
+ return -ENXIO;
+ }
+ omap_port->irq = irq->start;
+ tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
+ (unsigned long)port);
+ err = devm_request_irq(&pd->dev, omap_port->irq, ssi_pio_isr,
+ IRQF_DISABLED, irq->name, port);
+ if (err < 0)
+ dev_err(&port->device, "Request IRQ %d failed (%d)\n",
+ omap_port->irq, err);
+ return err;
+}
+
+static int __init ssi_wake_irq(struct hsi_port *port,
+ struct platform_device *pd)
+{
+ struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ struct resource *irq;
+ int err;
+
+ irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 3);
+ if (!irq) {
+ dev_err(&port->device, "Wake in IRQ resource missing");
+ return -ENXIO;
+ }
+ if (irq->flags & IORESOURCE_UNSET) {
+ dev_info(&port->device, "No Wake in support\n");
+ omap_port->wake_irq = -1;
+ return 0;
+ }
+ omap_port->wake_irq = irq->start;
+ tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
+ (unsigned long)port);
+ err = devm_request_irq(&pd->dev, omap_port->wake_irq, ssi_wake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ irq->name, port);
+ if (err < 0)
+ dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
+ omap_port->wake_irq, err);
+ err = enable_irq_wake(omap_port->wake_irq);
+ if (err < 0)
+ dev_err(&port->device, "Enable wake on the wakeline in irq %d"
+ " failed %d\n", omap_port->wake_irq, err);
+
+ return err;
+}
+
+static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
+{
+ unsigned int ch;
+
+ for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
+ INIT_LIST_HEAD(&omap_port->txqueue[ch]);
+ INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
+ }
+ INIT_LIST_HEAD(&omap_port->brkqueue);
+}
+
+static int __init ssi_get_iomem(struct platform_device *pd,
+ unsigned int num, void __iomem **pbase, dma_addr_t *phy)
+{
+ struct resource *mem;
+ struct resource *ioarea;
+ void __iomem *base;
+
+ mem = platform_get_resource(pd, IORESOURCE_MEM, num);
+ if (!mem) {
+ dev_err(&pd->dev, "IO memory region missing (%d)\n", num);
+ return -ENXIO;
+ }
+ ioarea = devm_request_mem_region(&pd->dev, mem->start,
+ resource_size(mem), dev_name(&pd->dev));
+ if (!ioarea) {
+ dev_err(&pd->dev, "%s IO memory region request failed\n",
+ mem->name);
+ return -ENXIO;
+ }
+ base = devm_ioremap(&pd->dev, mem->start, resource_size(mem));
+ if (!base) {
+ dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
+ return -ENXIO;
+ }
+ *pbase = base;
+
+ if (phy)
+ *phy = mem->start;
+
+ return 0;
+}
+
+static int __init ssi_ports_init(struct hsi_controller *ssi,
+ struct platform_device *pd)
+{
+ struct hsi_port *port;
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ struct omap_ssi_port *omap_port;
+ unsigned int i;
+ int err;
+
+ omap_ssi->port = devm_kzalloc(&pd->dev,
+ sizeof(omap_port) * ssi->num_ports, GFP_KERNEL);
+ if (!omap_ssi->port)
+ return -ENOMEM;
+
+ for (i = 0; i < ssi->num_ports; i++) {
+ port = &ssi->port[i];
+ omap_port = devm_kzalloc(&pd->dev, sizeof(*omap_port),
+ GFP_KERNEL);
+ if (!omap_port)
+ return -ENOMEM;
+ port->async = ssi_async;
+ port->setup = ssi_setup;
+ port->flush = ssi_flush;
+ port->start_tx = ssi_start_tx;
+ port->stop_tx = ssi_stop_tx;
+ port->release = ssi_release;
+ hsi_port_set_drvdata(port, omap_port);
+ /* Get SST base addresses*/
+ err = ssi_get_iomem(pd, ((i * 2) + 2), &omap_port->sst_base,
+ &omap_port->sst_dma);
+ if (err < 0)
+ return err;
+ /* Get SSR base addresses */
+ err = ssi_get_iomem(pd, ((i * 2) + 3), &omap_port->ssr_base,
+ &omap_port->ssr_dma);
+ if (err < 0)
+ return err;
+ err = ssi_port_irq(port, pd);
+ if (err < 0)
+ return err;
+ err = ssi_wake_irq(port, pd);
+ if (err < 0)
+ return err;
+ ssi_queues_init(omap_port);
+ spin_lock_init(&omap_port->lock);
+ spin_lock_init(&omap_port->wk_lock);
+ omap_port->dev = &port->device;
+ omap_ssi->port[i] = omap_port;
+ }
+
+ return 0;
+}
+
+static void ssi_ports_exit(struct hsi_controller *ssi)
+{
+ struct omap_ssi_port *omap_port;
+ unsigned int i;
+
+ for (i = 0; i < ssi->num_ports; i++) {
+ omap_port = hsi_port_drvdata(&ssi->port[i]);
+ tasklet_kill(&omap_port->wake_tasklet);
+ tasklet_kill(&omap_port->pio_tasklet);
+ }
+}
+
+static void ssi_clk_release(struct device *dev __maybe_unused, void *res)
+{
+ struct ssi_clk_res *r = res;
+
+ clk_put(r->clk);
+}
+
+static struct clk *__init ssi_devm_clk_get(struct device *dev, const char *id)
+{
+ struct ssi_clk_res *pclk;
+ struct clk *clk;
+
+ pclk = devres_alloc(ssi_clk_release, sizeof(*pclk), GFP_KERNEL);
+ if (!pclk) {
+ dev_err(dev, "Could not allocate the device resource entry\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ clk = clk_get(dev, id);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "clock get %s failed %li\n", id, PTR_ERR(clk));
+ devres_free(pclk);
+ } else {
+ pclk->clk = clk;
+ devres_add(dev, pclk);
+ }
+
+ return clk;
+}
+
+static int __init ssi_add_controller(struct hsi_controller *ssi,
+ struct platform_device *pd)
+{
+ struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data;
+ struct omap_ssi_controller *omap_ssi;
+ struct resource *irq;
+ int err;
+
+ omap_ssi = devm_kzalloc(&pd->dev, sizeof(*omap_ssi), GFP_KERNEL);
+ if (!omap_ssi) {
+ dev_err(&pd->dev, "not enough memory for omap ssi\n");
+ return -ENOMEM;
+ }
+ ssi->id = pd->id;
+ ssi->owner = THIS_MODULE;
+ ssi->device.parent = &pd->dev;
+ dev_set_name(&ssi->device, "ssi%d", ssi->id);
+ hsi_controller_set_drvdata(ssi, omap_ssi);
+ omap_ssi->dev = &ssi->device;
+ err = ssi_get_iomem(pd, 0, &omap_ssi->sys, NULL);
+ if (err < 0)
+ return err;
+ err = ssi_get_iomem(pd, 1, &omap_ssi->gdd, NULL);
+ if (err < 0)
+ return err;
+ irq = platform_get_resource(pd, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pd->dev, "GDD IRQ resource missing\n");
+ return -ENXIO;
+ }
+ omap_ssi->gdd_irq = irq->start;
+ tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet,
+ (unsigned long)ssi);
+ err = devm_request_irq(&pd->dev, omap_ssi->gdd_irq, ssi_gdd_isr,
+ IRQF_DISABLED, irq->name, ssi);
+ if (err < 0) {
+ dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)",
+ omap_ssi->gdd_irq, err);
+ return err;
+ }
+ err = ssi_ports_init(ssi, pd);
+ if (err < 0)
+ return err;
+ omap_ssi->get_loss = omap_ssi_pdata->get_dev_context_loss_count;
+ omap_ssi->max_speed = UINT_MAX;
+ spin_lock_init(&omap_ssi->lock);
+ spin_lock_init(&omap_ssi->ck_lock);
+ omap_ssi->ick = ssi_devm_clk_get(&pd->dev, "ssi_ick");
+ if (IS_ERR(omap_ssi->ick))
+ return PTR_ERR(omap_ssi->ick);
+ omap_ssi->fck = ssi_devm_clk_get(&pd->dev, "ssi_ssr_fck");
+ if (IS_ERR(omap_ssi->fck))
+ return PTR_ERR(omap_ssi->fck);
+ err = hsi_register_controller(ssi);
+
+ return err;
+}
+
+static int __init ssi_hw_init(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ unsigned int i;
+ u32 val;
+ int err;
+
+ err = ssi_clk_enable(ssi);
+ if (err < 0) {
+ dev_err(&ssi->device, "Failed to enable the clocks %d\n", err);
+ return err;
+ }
+ /* Reseting SSI controller */
+ __raw_writel(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG);
+ val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
+ for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) {
+ msleep(20);
+ val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
+ }
+ if (!(val & SSI_RESETDONE)) {
+ dev_err(&ssi->device, "SSI HW reset failed\n");
+ ssi_clk_disable(ssi);
+ return -EIO;
+ }
+ /* Reseting GDD */
+ __raw_writel(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
+ /* Get FCK rate */
+ omap_ssi->fck_rate = clk_get_rate(omap_ssi->fck) / 1000; /* KHz */
+ dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate);
+ /* Set default PM settings */
+ val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART;
+ __raw_writel(val, omap_ssi->sys + SSI_SYSCONFIG_REG);
+ omap_ssi->sysconfig = val;
+ __raw_writel(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
+ omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
+ ssi_clk_disable(ssi);
+
+ return 0;
+}
+
+static void ssi_remove_controller(struct hsi_controller *ssi)
+{
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ ssi_ports_exit(ssi);
+ tasklet_kill(&omap_ssi->gdd_tasklet);
+ hsi_unregister_controller(ssi);
+}
+
+static int __init ssi_probe(struct platform_device *pd)
+{
+ struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data;
+ struct hsi_controller *ssi;
+ int err;
+
+ if (!omap_ssi_pdata) {
+ dev_err(&pd->dev, "No OMAP SSI platform data\n");
+ return -EINVAL;
+ }
+ ssi = hsi_alloc_controller(omap_ssi_pdata->num_ports, GFP_KERNEL);
+ if (!ssi) {
+ dev_err(&pd->dev, "No memory for controller\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pd, ssi);
+ err = ssi_add_controller(ssi, pd);
+ if (err < 0)
+ goto out1;
+ err = ssi_hw_init(ssi);
+ if (err < 0)
+ goto out2;
+#ifdef CONFIG_DEBUG_FS
+ err = ssi_debug_add_ctrl(ssi);
+ if (err < 0)
+ goto out2;
+#endif
+ return err;
+out2:
+ ssi_remove_controller(ssi);
+out1:
+ platform_set_drvdata(pd, NULL);
+ hsi_free_controller(ssi);
+
+ return err;
+}
+
+static int __exit ssi_remove(struct platform_device *pd)
+{
+ struct hsi_controller *ssi = platform_get_drvdata(pd);
+
+#ifdef CONFIG_DEBUG_FS
+ ssi_debug_remove_ctrl(ssi);
+#endif
+ ssi_remove_controller(ssi);
+ platform_set_drvdata(pd, NULL);
+ hsi_free_controller(ssi);
+
+ return 0;
+}
+
+static struct platform_driver ssi_pdriver = {
+ .remove = __exit_p(ssi_remove),
+ .driver = {
+ .name = "omap_ssi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_ssi_init(void)
+{
+ pr_info("OMAP SSI hw driver loaded\n");
+ return platform_driver_probe(&ssi_pdriver, ssi_probe);
+}
+module_init(omap_ssi_init);
+
+static void __exit omap_ssi_exit(void)
+{
+ platform_driver_unregister(&ssi_pdriver);
+ pr_info("OMAP SSI driver removed\n");
+}
+module_exit(omap_ssi_exit);
+
+MODULE_ALIAS("platform:omap_ssi");
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_DESCRIPTION("Synchronous Serial Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c
new file mode 100644
index 00000000000..beae4ff95b1
--- /dev/null
+++ b/drivers/hsi/controllers/ste_hsi.c
@@ -0,0 +1,1843 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> for ST-Ericsson
+ * Author: Lukasz Baj <lukasz.baj@tieto.com> for ST-Ericsson
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hsi/hsi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#ifdef CONFIG_STE_DMA40
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#endif
+
+#include <mach/hsi.h>
+
+/*
+ * Copy of HSIR/HSIT context for restoring after HW reset (Vape power off).
+ */
+struct ste_hsi_hw_context {
+ unsigned int tx_mode;
+ unsigned int tx_divisor;
+ unsigned int tx_channels;
+ unsigned int tx_priority;
+ unsigned int rx_mode;
+ unsigned int rx_channels;
+};
+
+/**
+ * struct ste_hsi_controller - STE HSI controller data
+ * @dev: device associated to STE HSI controller
+ * @tx_dma_base: HSI TX peripheral physical address
+ * @rx_dma_base: HSI RX peripheral physical address
+ * @rx_base: HSI RX peripheral virtual address
+ * @tx_base: HSI TX peripheral virtual address
+ * @regulator: STE HSI Vape consumer regulator
+ * @context: copy of client-configured HSI TX / HSI RX registers
+ * @tx_clk: HSI TX core clock (HSITXCLK)
+ * @rx_clk: HSI RX core clock (HSIRXCLK)
+ * @ssitx_clk: HSI TX host clock (HCLK)
+ * @ssirx_clk: HSI RX host clock (HCLK)
+ * @clk_work: structure for delayed HSI clock disabling
+ * @overrun_irq: HSI channels overrun IRQ table
+ * @ck_refcount: reference count for clock enable operation
+ * @ck_lock: locking primitive for HSI clocks
+ * @lock: locking primitive for HSI controller
+ * @use_dma: flag for DMA enabled
+ * @ck_on: flag for HSI clocks enabled
+ */
+struct ste_hsi_controller {
+ struct device *dev;
+ dma_addr_t tx_dma_base;
+ dma_addr_t rx_dma_base;
+ unsigned char __iomem *rx_base;
+ unsigned char __iomem *tx_base;
+ struct regulator *regulator;
+ struct ste_hsi_hw_context *context;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+ struct clk *ssitx_clk;
+ struct clk *ssirx_clk;
+ struct delayed_work clk_work;
+ int overrun_irq[STE_HSI_MAX_CHANNELS];
+ int ck_refcount;
+ spinlock_t ck_lock;
+ spinlock_t lock;
+ unsigned int use_dma:1;
+ unsigned int ck_on:1;
+};
+
+#ifdef CONFIG_STE_DMA40
+struct ste_hsi_channel_dma {
+ struct dma_chan *dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+};
+#endif
+
+struct ste_hsi_port {
+ struct device *dev;
+ struct list_head txqueue[STE_HSI_MAX_CHANNELS];
+ struct list_head rxqueue[STE_HSI_MAX_CHANNELS];
+ struct list_head brkqueue;
+ int cawake_irq;
+ int acwake_gpio;
+ int tx_irq;
+ int rx_irq;
+ int excep_irq;
+ struct tasklet_struct cawake_tasklet;
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ struct tasklet_struct exception_tasklet;
+ struct tasklet_struct overrun_tasklet;
+ unsigned char channels;
+#ifdef CONFIG_STE_DMA40
+ struct ste_hsi_channel_dma tx_dma[STE_HSI_MAX_CHANNELS];
+ struct ste_hsi_channel_dma rx_dma[STE_HSI_MAX_CHANNELS];
+#endif
+};
+
+#define hsi_to_ste_port(port) (hsi_port_drvdata(port))
+#define hsi_to_ste_controller(con) (hsi_controller_drvdata(con))
+#define client_to_ste_port(cl) (hsi_port_drvdata(hsi_get_port(cl)))
+#define client_to_hsi(cl) \
+ (to_hsi_controller(hsi_get_port(cl)->device.parent))
+#define client_to_ste_controller(cl) \
+ (hsi_controller_drvdata(client_to_hsi(cl)))
+#define ste_port_to_ste_controller(port) \
+ ((struct ste_hsi_controller *)hsi_controller_drvdata( \
+ to_hsi_controller(port->dev->parent)))
+
+static u32 ste_hsir_periphid[8] = { 0x2C, 0, 0x8, 0x18, 0xD, 0xF0, 0x5, 0xB1 };
+static u32 ste_hsit_periphid[8] = { 0x2B, 0, 0x8, 0x18, 0xD, 0xF0, 0x5, 0xB1 };
+
+/*
+ * linux/amba/bus.h macros can not be used, because 8 bytes are validated:
+ * PERIPHID0..3 and PCELLID0..3 for HSIR and HSIT.
+ */
+static inline int compare_periphid(u32 *id1, u32 *id2, int count)
+{
+ while (count && *id1++ == *id2++)
+ count--;
+
+ return count;
+}
+
+static void ste_hsi_clk_free(struct clk **pclk)
+{
+ if (IS_ERR(*pclk) && *pclk != NULL)
+ clk_put(*pclk);
+ *pclk = NULL;
+}
+
+static void ste_hsi_init_registers(struct ste_hsi_controller *ste_hsi)
+{
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_FLUSHBITS);
+ /* TO DO: TX channel priorities will be implemented later */
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_PRIORITY);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_DATASWAP);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKID);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM);
+
+ /* 0x23 is reset value per DB8500 Design Spec */
+ writel(0x23, ste_hsi->rx_base + STE_HSI_RX_THRESHOLD);
+
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE);
+
+ /* HSIR clock recovery mode */
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_DETECTOR);
+
+ /* Bits 0,1,2 set to 1 to clear exception flags */
+ writel(0x07, ste_hsi->rx_base + STE_HSI_RX_ACK);
+
+ /* Bits 0..7 set to 1 to clear OVERRUN IRQ */
+ writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK);
+
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM);
+
+ /* Flush all errors */
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEP);
+
+ /* 2 is Flush state, no RX exception generated afterwards */
+ writel(2, ste_hsi->rx_base + STE_HSI_RX_STATE);
+
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEPIM);
+}
+
+static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi)
+{
+ unsigned int buffers, i;
+ struct ste_hsi_hw_context *pcontext = ste_hsi->context;
+
+ /*
+ * Configure TX
+ */
+ writel(pcontext->tx_mode, ste_hsi->tx_base + STE_HSI_TX_MODE);
+ writel(pcontext->tx_divisor, ste_hsi->tx_base + STE_HSI_TX_DIVISOR);
+ writel(pcontext->tx_channels, ste_hsi->tx_base + STE_HSI_TX_CHANNELS);
+ writel(pcontext->tx_priority, ste_hsi->tx_base + STE_HSI_TX_PRIORITY);
+
+ /* Calculate buffers number per channel */
+ buffers = STE_HSI_MAX_BUFFERS / pcontext->tx_channels;
+ for (i = 0; i < pcontext->tx_channels; i++) {
+ /* Set 32 bit long frames */
+ writel(31, ste_hsi->tx_base + STE_HSI_TX_FRAMELENX + 4 * i);
+ writel(buffers * i,
+ ste_hsi->tx_base + STE_HSI_TX_BASEX + 4 * i);
+ writel(buffers - 1,
+ ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * i);
+
+ /*
+ * The DMA burst request and the buffer occupation interrupt are
+ * asserted when the free space in the corresponding channel buffer
+ * is greater than the value programmed in TX_WATERMARKX field.
+ * The field value must be less than the corresponding SPAN value.
+ */
+#ifdef CONFIG_STE_DMA40
+ writel(STE_HSI_DMA_MAX_BURST-1,
+ ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i);
+#else /* IRQ mode */
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i);
+#endif
+ }
+
+ /*
+ * The value read from this register gives the synchronized status
+ * of the transmitter state and this synchronization takes 2 HSITCLK
+ * cycles plus 3 HCLK cycles.
+ */
+ while (STE_HSI_STATE_IDLE != readl(ste_hsi->tx_base + STE_HSI_TX_STATE))
+ cpu_relax();
+
+ /*
+ * Configure RX
+ */
+ writel(pcontext->rx_mode, ste_hsi->rx_base + STE_HSI_RX_MODE);
+
+ if (STE_HSI_MODE_PIPELINED == pcontext->rx_mode)
+ /*
+ * 0xFF: The READY line is negated after the start of the
+ * 256th frame reception in PIPELINED mode.
+ */
+ writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT);
+ else
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT);
+
+ writel(pcontext->rx_channels, ste_hsi->rx_base + STE_HSI_RX_CHANNELS);
+ /* Calculate buffers number per channel */
+ buffers = STE_HSI_MAX_BUFFERS / pcontext->rx_channels;
+ for (i = 0; i < pcontext->rx_channels; i++) {
+ /* Set 32 bit long frames */
+ writel(31, ste_hsi->rx_base + STE_HSI_RX_FRAMELENX + 4 * i);
+ writel(buffers * i,
+ ste_hsi->rx_base + STE_HSI_RX_BASEX + 4 * i);
+ writel(buffers - 1,
+ ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * i);
+
+ /*
+ * The DMA burst request and the buffer occupation interrupt are
+ * asserted when the busy space in the corresponding channel buffer
+ * is greater than the value programmed in RX_WATERMARKX field.
+ * The field value must be less than the corresponding SPAN value.
+ */
+#ifdef CONFIG_STE_DMA40
+ writel(STE_HSI_DMA_MAX_BURST-1,
+ ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i);
+#else /* IRQ mode */
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i);
+#endif
+ }
+
+ /*
+ * The value read from this register gives the synchronized status
+ * of the receiver state and this synchronization takes 2 HSIRCLK
+ * cycles plus 3 HCLK cycles.
+ */
+ while (STE_HSI_STATE_IDLE != readl(ste_hsi->tx_base + STE_HSI_RX_STATE))
+ cpu_relax();
+}
+
+/*
+ * When cpuidle framework is setting the sleep or deep sleep state then
+ * the Vape is OFF. This results in re-setting the HSIT/HSIR registers
+ * to default (idle) values.
+ * Function ste_hsi_context() is checking and restoring the HSI registers
+ * to these set by the HSI client by ste_hsi_setup().
+ */
+static void ste_hsi_context(struct ste_hsi_controller *ste_hsi)
+{
+ unsigned int tx_channels;
+ unsigned int rx_channels;
+
+
+ tx_channels = readl(ste_hsi->tx_base + STE_HSI_TX_CHANNELS);
+ rx_channels = readl(ste_hsi->rx_base + STE_HSI_RX_CHANNELS);
+
+ /*
+ * Checking if the context was lost.
+ * The target config is at least 2 channels for both TX and RX.
+ * TX and RX channels are set to 1 after HW reset.
+ */
+ if ((ste_hsi->context->tx_channels != tx_channels) ||
+ (ste_hsi->context->rx_channels != rx_channels)) {
+ /*
+ * TO DO: remove "dev_info" after thorough testing.
+ * Debug left for getting the statistics how frequently the context
+ * is lost during regular HSI operation.
+ */
+ dev_info(ste_hsi->dev, "context\n");
+
+ ste_hsi_init_registers(ste_hsi);
+ ste_hsi_setup_registers(ste_hsi);
+ }
+}
+
+static void ste_hsi_clks_free(struct ste_hsi_controller *ste_hsi)
+{
+ ste_hsi_clk_free(&ste_hsi->rx_clk);
+ ste_hsi_clk_free(&ste_hsi->tx_clk);
+ ste_hsi_clk_free(&ste_hsi->ssirx_clk);
+ ste_hsi_clk_free(&ste_hsi->ssitx_clk);
+}
+
+static int ste_hsi_clock_enable(struct hsi_controller *hsi)
+{
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ int err = 0;
+
+ spin_lock_bh(&ste_hsi->ck_lock);
+ if (ste_hsi->ck_refcount++ || ste_hsi->ck_on)
+ goto out;
+
+ err = clk_enable(ste_hsi->ssirx_clk);
+ if (unlikely(err))
+ goto out;
+
+ err = clk_enable(ste_hsi->ssitx_clk);
+ if (unlikely(err)) {
+ clk_disable(ste_hsi->ssirx_clk);
+ goto out;
+ }
+
+ err = clk_enable(ste_hsi->rx_clk);
+ if (unlikely(err)) {
+ clk_disable(ste_hsi->ssitx_clk);
+ clk_disable(ste_hsi->ssirx_clk);
+ goto out;
+ }
+
+ err = clk_enable(ste_hsi->tx_clk);
+ if (unlikely(err)) {
+ clk_disable(ste_hsi->rx_clk);
+ clk_disable(ste_hsi->ssitx_clk);
+ clk_disable(ste_hsi->ssirx_clk);
+ goto out;
+ }
+
+ ste_hsi->ck_on = 1;
+out:
+ if (err)
+ ste_hsi->ck_refcount--;
+
+ spin_unlock_bh(&ste_hsi->ck_lock);
+
+ return err;
+}
+
+static void ste_hsi_delayed_disable_clock(struct work_struct *work)
+{
+ struct ste_hsi_controller *ste_hsi;
+ ste_hsi = container_of(work, struct ste_hsi_controller, clk_work.work);
+
+ spin_lock_bh(&ste_hsi->ck_lock);
+
+ /*
+ * If clock should not be off (enable clock called in meantime)
+ * or clock is already off nothing to do
+ */
+ if (ste_hsi->ck_refcount || !ste_hsi->ck_on)
+ goto out;
+
+ if (readl(ste_hsi->tx_base + STE_HSI_TX_STATE) != STE_HSI_STATE_IDLE ||
+ readl(ste_hsi->rx_base + STE_HSI_RX_STATE)
+ != STE_HSI_STATE_IDLE ||
+ readl(ste_hsi->rx_base + STE_HSI_RX_BUFSTATE) != 0) {
+ /* Try again later */
+ int err = schedule_delayed_work(&ste_hsi->clk_work, HZ);
+ if (err < 0)
+ dev_err(ste_hsi->dev, "Error scheduling work\n");
+ goto out;
+ }
+
+ /* Actual clocks disable */
+ clk_disable(ste_hsi->tx_clk);
+ clk_disable(ste_hsi->rx_clk);
+ clk_disable(ste_hsi->ssitx_clk);
+ clk_disable(ste_hsi->ssirx_clk);
+ ste_hsi->ck_on = 0;
+
+out:
+ spin_unlock_bh(&ste_hsi->ck_lock);
+}
+
+static void ste_hsi_clock_disable(struct hsi_controller *hsi)
+{
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+
+ spin_lock_bh(&ste_hsi->ck_lock);
+
+ /* Sanity check */
+ if (ste_hsi->ck_refcount <= 0)
+ WARN_ON(ste_hsi->ck_refcount <= 0);
+
+ /* Need clock to be disable now? */
+ if (--ste_hsi->ck_refcount)
+ goto out;
+
+ /*
+ * If receiver or transmitter is in the middle something delay clock off
+ */
+ if (readl(ste_hsi->tx_base + STE_HSI_TX_STATE) != STE_HSI_STATE_IDLE ||
+ readl(ste_hsi->rx_base + STE_HSI_RX_STATE)
+ != STE_HSI_STATE_IDLE ||
+ readl(ste_hsi->rx_base + STE_HSI_RX_BUFSTATE) != 0) {
+ int err = schedule_delayed_work(&ste_hsi->clk_work, HZ);
+ if (err < 0)
+ dev_err(&hsi->device, "Error scheduling work\n");
+
+ goto out;
+ }
+
+ /* Actual clocks disabled */
+ clk_disable(ste_hsi->tx_clk);
+ clk_disable(ste_hsi->rx_clk);
+ clk_disable(ste_hsi->ssitx_clk);
+ clk_disable(ste_hsi->ssirx_clk);
+ ste_hsi->ck_on = 0;
+
+out:
+ spin_unlock_bh(&ste_hsi->ck_lock);
+}
+
+static int ste_hsi_start_irq(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ u32 val;
+ int err;
+
+ err = ste_hsi_clock_enable(hsi);
+ if (unlikely(err))
+ return err;
+
+ ste_hsi_context(ste_hsi);
+
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PROCEEDING;
+
+ if (msg->ttype == HSI_MSG_WRITE) {
+ val = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM) |
+ (1 << msg->channel);
+ writel(val, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM);
+ } else {
+ val = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM) |
+ (1 << msg->channel);
+ writel(val, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM);
+
+ val = readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM) |
+ (1 << msg->channel);
+ writel(val, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM);
+ }
+
+ return 0;
+}
+
+static int ste_hsi_start_transfer(struct ste_hsi_port *ste_port,
+ struct list_head *queue);
+#ifdef CONFIG_STE_DMA40
+static void ste_hsi_dma_callback(void *dma_async_param)
+{
+ struct hsi_msg *msg = dma_async_param;
+ struct hsi_controller *hsi = client_to_hsi(msg->cl);
+ struct ste_hsi_port *ste_port = client_to_ste_port(msg->cl);
+ struct ste_hsi_controller *ste_hsi = client_to_ste_controller(msg->cl);
+ struct list_head *queue;
+ struct dma_chan *chan;
+ struct ste_hsi_channel_dma *hsi_dma_chan;
+ char *dma_enable_address;
+ enum dma_data_direction direction;
+ u32 dma_mask;
+
+ /* Message finished, remove from list and notify client */
+ spin_lock_bh(&ste_hsi->lock);
+ list_del(&msg->link);
+
+ if (msg->ttype == HSI_MSG_WRITE) {
+ queue = &ste_port->txqueue[msg->channel];
+ direction = DMA_TO_DEVICE;
+ dma_enable_address = ste_hsi->tx_base + STE_HSI_TX_DMAEN;
+ hsi_dma_chan = &ste_port->tx_dma[msg->channel];
+ } else {
+ queue = &ste_port->rxqueue[msg->channel];
+ direction = DMA_FROM_DEVICE;
+ dma_enable_address = ste_hsi->rx_base + STE_HSI_RX_DMAEN;
+ hsi_dma_chan = &ste_port->rx_dma[msg->channel];
+ }
+
+ dma_sync_sg_for_cpu(&hsi->device, msg->sgt.sgl,
+ msg->sgt.nents, direction);
+ chan = hsi_dma_chan->dma_chan;
+
+ /* disable DMA channel on HSI controller */
+ dma_mask = readl(dma_enable_address);
+ writel(dma_mask & ~(1 << msg->channel), dma_enable_address);
+
+ hsi_dma_chan->desc = NULL;
+
+ dma_unmap_sg(&hsi->device, msg->sgt.sgl, msg->sgt.nents, direction);
+
+ msg->status = HSI_STATUS_COMPLETED;
+ msg->actual_len = sg_dma_len(msg->sgt.sgl);
+
+ spin_unlock_bh(&ste_hsi->lock);
+
+ msg->complete(msg);
+
+ ste_hsi_clock_disable(hsi);
+
+ spin_lock_bh(&ste_hsi->lock);
+ ste_hsi_start_transfer(ste_port, queue);
+ spin_unlock_bh(&ste_hsi->lock);
+}
+
+static void dma_device_control(struct ste_hsi_channel_dma *chan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ chan->dma_chan->device->device_control(chan->dma_chan, cmd, arg);
+}
+
+static void ste_hsi_terminate_dma_chan(struct ste_hsi_channel_dma *chan)
+{
+ if (chan->desc) {
+ dma_device_control(chan, DMA_TERMINATE_ALL, 0);
+ chan->desc = NULL;
+ }
+ chan->cookie = 0;
+}
+
+static void ste_hsi_terminate_dma(struct ste_hsi_port *ste_port)
+{
+ int i;
+
+ for (i = 0; i < ste_port->channels; ++i) {
+ ste_hsi_terminate_dma_chan(&ste_port->tx_dma[i]);
+ ste_hsi_terminate_dma_chan(&ste_port->rx_dma[i]);
+ }
+}
+
+static int ste_hsi_start_dma(struct hsi_msg *msg)
+{
+ struct hsi_controller *hsi = client_to_hsi(msg->cl);
+ struct ste_hsi_port *ste_port = client_to_ste_port(msg->cl);
+ struct ste_hsi_controller *ste_hsi = client_to_ste_controller(msg->cl);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+ struct ste_hsi_channel_dma *hsi_dma_chan;
+ char *dma_enable_address;
+ enum dma_data_direction direction;
+ u32 dma_mask;
+ int err;
+
+ err = ste_hsi_clock_enable(hsi);
+ if (unlikely(err))
+ return err;
+
+ ste_hsi_context(ste_hsi);
+
+ if (msg->ttype == HSI_MSG_WRITE) {
+ direction = DMA_TO_DEVICE;
+ dma_enable_address = ste_hsi->tx_base + STE_HSI_TX_DMAEN;
+ hsi_dma_chan = &ste_port->tx_dma[msg->channel];
+ } else {
+ u32 val;
+ direction = DMA_FROM_DEVICE;
+ dma_enable_address = ste_hsi->rx_base + STE_HSI_RX_DMAEN;
+ hsi_dma_chan = &ste_port->rx_dma[msg->channel];
+
+ /* enable overrun for this channel */
+ val = readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM) |
+ (1 << msg->channel);
+ writel(val, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM);
+ }
+
+ chan = hsi_dma_chan->dma_chan;
+
+ if (0 == dma_map_sg(&hsi->device, msg->sgt.sgl, msg->sgt.nents,
+ direction)) {
+ dev_dbg(&hsi->device, "DMA map SG failed !\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare the scatterlist */
+ desc = chan->device->device_prep_slave_sg(chan,
+ msg->sgt.sgl,
+ msg->sgt.nents,
+ direction,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+
+ if (!desc) {
+ dma_unmap_sg(&hsi->device, msg->sgt.sgl, msg->sgt.nents,
+ direction);
+ /* "Complete" DMA (errorpath) */
+ ste_hsi_terminate_dma_chan(hsi_dma_chan);
+ err = -EBUSY;
+ goto out;
+ }
+ desc->callback = ste_hsi_dma_callback;
+ desc->callback_param = msg;
+ hsi_dma_chan->cookie = desc->tx_submit(desc);
+ hsi_dma_chan->desc = desc;
+
+ /* Fire the DMA transaction */
+ chan->device->device_issue_pending(chan);
+
+ /* Enable DMA channel on HSI controller */
+ dma_mask = readl(dma_enable_address);
+ writel(dma_mask | 1 << msg->channel, dma_enable_address);
+
+out:
+ if (unlikely(err))
+ ste_hsi_clock_disable(hsi);
+
+ return err;
+}
+
+static void __init ste_hsi_init_dma(struct ste_hsi_platform_data *data,
+ struct hsi_controller *hsi)
+{
+ struct hsi_port *port;
+ struct ste_hsi_port *ste_port;
+ struct ste_hsi_controller *ste_hsi = hsi_to_ste_controller(hsi);
+ dma_cap_mask_t mask;
+ int i, ch;
+
+ ste_hsi->use_dma = 1;
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ for (i = 0; i < hsi->num_ports; ++i) {
+ port = &hsi->port[i];
+ ste_port = hsi_port_drvdata(port);
+
+ for (ch = 0; ch < STE_HSI_MAX_CHANNELS; ++ch) {
+ ste_port->tx_dma[ch].dma_chan =
+ dma_request_channel(mask,
+ data->port_cfg[i].dma_filter,
+ &data->port_cfg[i].
+ dma_tx_cfg[ch]);
+
+ ste_port->rx_dma[ch].dma_chan =
+ dma_request_channel(mask,
+ data->port_cfg[i].dma_filter,
+ &data->port_cfg[i].
+ dma_rx_cfg[ch]);
+ }
+ }
+}
+
+static int ste_hsi_setup_dma(struct hsi_client *cl)
+{
+ int i;
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct dma_slave_config rx_conf = {
+ .src_addr = 0, /* dynamic data */
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .direction = DMA_FROM_DEVICE,
+ .src_maxburst = STE_HSI_DMA_MAX_BURST,
+ };
+ struct dma_slave_config tx_conf = {
+ .dst_addr = 0, /* dynamic data */
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .direction = DMA_TO_DEVICE,
+ .dst_maxburst = STE_HSI_DMA_MAX_BURST,
+ };
+
+ if (!ste_hsi->use_dma)
+ return 0;
+
+ for (i = 0; i < ste_port->channels; ++i) {
+ struct dma_chan *chan;
+
+ chan = ste_port->tx_dma[i].dma_chan;
+ tx_conf.dst_addr = (dma_addr_t) ste_hsi->tx_dma_base +
+ STE_HSI_TX_BUFFERX + 4 * i;
+ chan->device->device_control(chan,
+ DMA_SLAVE_CONFIG,
+ (unsigned long)&tx_conf);
+
+ chan = ste_port->rx_dma[i].dma_chan;
+ rx_conf.src_addr = (dma_addr_t) ste_hsi->rx_dma_base +
+ STE_HSI_RX_BUFFERX + 4 * i;
+ chan->device->device_control(chan,
+ DMA_SLAVE_CONFIG,
+ (unsigned long)&rx_conf);
+ }
+
+ return 0;
+}
+
+#else
+#define ste_hsi_init_dma(data, hsi) do { } while (0)
+#define ste_hsi_start_dma ste_hsi_start_irq
+#define ste_hsi_terminate_dma(ste_port) do { } while (0)
+#define ste_hsi_setup_dma(cl) do { } while (0)
+#endif
+
+static int ste_hsi_start_transfer(struct ste_hsi_port *ste_port,
+ struct list_head *queue)
+{
+ struct hsi_msg *msg;
+ int err;
+
+ if (list_empty(queue))
+ return 0;
+
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if (msg->status != HSI_STATUS_QUEUED)
+ return 0;
+
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PROCEEDING;
+
+ if (ste_port_to_ste_controller(ste_port)->use_dma)
+ err = ste_hsi_start_dma(msg);
+ else
+ err = ste_hsi_start_irq(msg);
+
+ return err;
+}
+
+static void ste_hsi_receive_data(struct hsi_port *port, unsigned int channel)
+{
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct list_head *queue = &ste_port->rxqueue[channel];
+ struct hsi_msg *msg;
+ char *bufferx;
+ u8 *buf;
+ int span;
+
+ spin_lock_bh(&ste_hsi->lock);
+
+ if (list_empty(queue))
+ goto out;
+
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PENDING;
+ }
+
+ if (msg->status == HSI_STATUS_PROCEEDING && msg->ttype == HSI_MSG_READ) {
+ unsigned char len;
+ bufferx = ste_hsi->rx_base + STE_HSI_RX_BUFFERX + 4 * channel;
+
+ len = readl(ste_hsi->rx_base + STE_HSI_RX_GAUGEX + 4 * channel);
+ buf = sg_virt(msg->sgt.sgl);
+ buf += msg->actual_len;
+ while (len--) {
+ *(u32 *) buf = readl(bufferx);
+ buf += 4;
+ msg->actual_len += 4;
+ if (msg->actual_len >= msg->sgt.sgl->length) {
+ msg->status = HSI_STATUS_COMPLETED;
+ break;
+ }
+ }
+ }
+
+ /* re-enable interrupt by watermark manipulation */
+ span = readl(ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * channel);
+ writel(span, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * channel);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * channel);
+
+ /*
+ * If message was not transmitted completely enable interrupt for
+ * further work
+ */
+ if (msg->status == HSI_STATUS_PROCEEDING) {
+ u32 val;
+ val = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM) |
+ (1 << channel);
+ writel(val, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM);
+ goto out;
+ }
+
+ /* Message finished, remove from list and notify client */
+ list_del(&msg->link);
+ spin_unlock_bh(&ste_hsi->lock);
+ msg->complete(msg);
+
+ ste_hsi_clock_disable(hsi);
+
+ spin_lock_bh(&ste_hsi->lock);
+
+ ste_hsi_start_transfer(ste_port, queue);
+out:
+ spin_unlock_bh(&ste_hsi->lock);
+}
+
+static void ste_hsi_transmit_data(struct hsi_port *port, unsigned int channel)
+{
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct list_head *queue = &ste_port->txqueue[channel];
+ struct hsi_msg *msg;
+ u8 *buf;
+ int span;
+
+ if (list_empty(queue))
+ return;
+
+ spin_lock_bh(&ste_hsi->lock);
+ msg = list_first_entry(queue, struct hsi_msg, link);
+ if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
+ msg->actual_len = 0;
+ msg->status = HSI_STATUS_PENDING;
+ }
+
+ if (msg->status == HSI_STATUS_PROCEEDING &&
+ msg->ttype == HSI_MSG_WRITE) {
+ unsigned char free_space;
+
+ free_space = readl(ste_hsi->tx_base +
+ STE_HSI_TX_GAUGEX + 4 * channel);
+ buf = sg_virt(msg->sgt.sgl);
+ buf += msg->actual_len;
+ while (free_space--) {
+ writel(*(u32 *) buf, ste_hsi->tx_base +
+ STE_HSI_TX_BUFFERX + 4 * channel);
+ buf += 4;
+ msg->actual_len += 4;
+ if (msg->actual_len >= msg->sgt.sgl->length) {
+ msg->status = HSI_STATUS_COMPLETED;
+ break;
+ }
+ }
+ }
+
+ span = readl(ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * channel);
+ writel(span, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * channel);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * channel);
+
+ if (msg->status == HSI_STATUS_PROCEEDING) {
+ u32 val;
+ val = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM) |
+ (1 << channel);
+ writel(val, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM);
+ goto out;
+ }
+
+ list_del(&msg->link);
+ spin_unlock_bh(&ste_hsi->lock);
+ msg->complete(msg);
+
+ ste_hsi_clock_disable(hsi);
+
+ spin_lock_bh(&ste_hsi->lock);
+ ste_hsi_start_transfer(ste_port, queue);
+out:
+ spin_unlock_bh(&ste_hsi->lock);
+}
+
+static void ste_hsi_cawake_tasklet(unsigned long data)
+{
+ struct hsi_port *port = (struct hsi_port *)data;
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ u32 prcm_line_value;
+ int level;
+
+ prcm_line_value = prcmu_read(DB8500_PRCM_LINE_VALUE);
+ level = (prcm_line_value & DB8500_PRCM_LINE_VALUE_HSI_CAWAKE0) ? 1 : 0;
+
+ dev_info(ste_hsi->dev, "cawake %s\n", level ? "HIGH" : "LOW");
+ hsi_event(hsi->port, level ? HSI_EVENT_START_RX : HSI_EVENT_STOP_RX);
+ enable_irq(ste_port->cawake_irq);
+}
+
+static irqreturn_t ste_hsi_cawake_isr(int irq, void *data)
+{
+ struct hsi_port *port = data;
+
+ /* IRQ processed only if device initialized */
+ if ((port->device.parent) && (data)) {
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+
+ disable_irq_nosync(irq);
+ tasklet_hi_schedule(&ste_port->cawake_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ste_hsi_rx_tasklet(unsigned long data)
+{
+ struct hsi_port *port = (struct hsi_port *)data;
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ u32 irq_status, irq_mask;
+ unsigned int i;
+
+ irq_status = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKMIS);
+ if (!irq_status)
+ goto out;
+
+ irq_mask = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM);
+ writel(irq_mask & ~irq_status,
+ ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM);
+ writel(irq_mask, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC);
+
+ for (i = 0; i < STE_HSI_MAX_CHANNELS; ++i) {
+ if (irq_status & (1 << i))
+ ste_hsi_receive_data(port, i);
+ }
+out:
+ enable_irq(ste_port->rx_irq);
+}
+
+static irqreturn_t ste_hsi_rx_isr(int irq, void *data)
+{
+ struct hsi_port *port = data;
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+
+ disable_irq_nosync(irq);
+ tasklet_hi_schedule(&ste_port->rx_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ste_hsi_tx_isr(int irq, void *data)
+{
+ struct hsi_port *port = data;
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+
+ disable_irq_nosync(irq);
+ tasklet_hi_schedule(&ste_port->tx_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void ste_hsi_tx_tasklet(unsigned long data)
+{
+ struct hsi_port *port = (struct hsi_port *)data;
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ u32 irq_status, irq_mask;
+ unsigned int i;
+
+ irq_status = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKMIS);
+ if (!irq_status)
+ goto out;
+
+ irq_mask = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM);
+ writel(irq_mask & ~irq_status,
+ ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM);
+ writel(irq_mask, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC);
+
+ for (i = 0; i < STE_HSI_MAX_CHANNELS; ++i) {
+ if (irq_status & (1 << i))
+ ste_hsi_transmit_data(port, i);
+ }
+out:
+ enable_irq(ste_port->tx_irq);
+}
+
+static void ste_hsi_break_complete(struct hsi_port *port,
+ struct ste_hsi_controller *ste_hsi)
+{
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_msg *msg, *tmp;
+ u32 mask;
+
+ dev_dbg(port->device.parent, "HWBREAK received\n");
+
+ spin_lock_bh(&ste_hsi->lock);
+
+ mask = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEPIM);
+ writel(mask & ~STE_HSI_EXCEP_BREAK,
+ ste_hsi->rx_base + STE_HSI_RX_EXCEPIM);
+
+ spin_unlock_bh(&ste_hsi->lock);
+
+ list_for_each_entry_safe(msg, tmp, &ste_port->brkqueue, link) {
+ msg->status = HSI_STATUS_COMPLETED;
+ list_del(&msg->link);
+ msg->complete(msg);
+ }
+}
+
+static void ste_hsi_error(struct hsi_port *port)
+{
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_msg *msg;
+ unsigned int i;
+
+ for (i = 0; i < ste_port->channels; i++) {
+ if (list_empty(&ste_port->rxqueue[i]))
+ continue;
+ msg = list_first_entry(&ste_port->rxqueue[i], struct hsi_msg,
+ link);
+ list_del(&msg->link);
+ msg->status = HSI_STATUS_ERROR;
+ msg->complete(msg);
+ /* Now restart queued reads if any */
+ ste_hsi_start_transfer(ste_port, &ste_port->rxqueue[i]);
+ }
+}
+
+static void ste_hsi_exception_tasklet(unsigned long data)
+{
+ struct hsi_port *port = (struct hsi_port *)data;
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ u32 error_status;
+ u32 error_interrupts;
+
+ error_status = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEP);
+ /*
+ * sometimes interrupt that cause running this tasklet is already
+ * inactive so base handling of exception on masked interrupt status
+ * not on exception state register.
+ */
+ error_interrupts = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEPMIS);
+
+ if (error_interrupts & STE_HSI_EXCEP_BREAK)
+ ste_hsi_break_complete(port, ste_hsi);
+
+ if (error_interrupts & STE_HSI_EXCEP_TIMEOUT)
+ dev_err(&hsi->device, "timeout exception occurred\n");
+ if (error_interrupts & STE_HSI_EXCEP_OVERRUN)
+ dev_err(&hsi->device, "overrun exception occurred\n");
+ if (error_interrupts & STE_HSI_EXCEP_PARITY)
+ dev_err(&hsi->device, "parity exception occurred\n");
+
+ if (error_interrupts & ~STE_HSI_EXCEP_BREAK)
+ ste_hsi_error(port);
+
+ /* Acknowledge exception interrupts */
+ writel(error_status, ste_hsi->rx_base + STE_HSI_RX_ACK);
+
+ enable_irq(ste_port->excep_irq);
+}
+
+static irqreturn_t ste_hsi_exception_isr(int irq, void *data)
+{
+ struct hsi_port *port = data;
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+
+ disable_irq_nosync(irq);
+ tasklet_hi_schedule(&ste_port->exception_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void ste_hsi_overrun_tasklet(unsigned long data)
+{
+ struct hsi_controller *hsi = (struct hsi_controller *)data;
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct hsi_port *hsi_port = &hsi->port[0];
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(hsi_port);
+ struct hsi_msg *msg;
+
+ unsigned int channel;
+ u8 rised_overrun;
+ u8 mask;
+ u8 blocked = 0;
+
+ rised_overrun = (u8) readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNMIS);
+ mask = rised_overrun;
+ for (channel = 0; mask; ++channel, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ do {
+ /*
+ * No more messages, block interrupt
+ */
+ if (list_empty(&ste_port->rxqueue[channel])) {
+ blocked |= 1 << channel;
+ break;
+ }
+ /*
+ * Complete with error
+ */
+ msg = list_first_entry(&ste_port->rxqueue[channel],
+ struct hsi_msg, link);
+ list_del(&msg->link);
+ msg->status = HSI_STATUS_ERROR;
+ msg->complete(msg);
+
+ /*
+ * Now restart queued reads if any
+ * If start_transfer fails, try with next message
+ */
+ if (ste_hsi_start_transfer(ste_port,
+ &ste_port->rxqueue[channel]))
+ continue;
+ } while (0);
+ }
+
+ /* Overrun acknowledge */
+ writel(rised_overrun, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK);
+ writel(~blocked & readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM),
+ ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM);
+
+ /*
+ * Enable all that should not be blocked
+ */
+ mask = rised_overrun & ~blocked;
+ for (channel = 0; mask; ++channel, mask >>= 1)
+ enable_irq(ste_hsi->overrun_irq[channel]);
+}
+
+static irqreturn_t ste_hsi_overrun_isr(int irq, void *data)
+{
+ struct hsi_port *port = data;
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+
+ disable_irq_nosync(irq);
+ tasklet_hi_schedule(&ste_port->overrun_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void __init ste_hsi_queues_init(struct ste_hsi_port *ste_port)
+{
+ unsigned int ch;
+
+ for (ch = 0; ch < STE_HSI_MAX_CHANNELS; ch++) {
+ INIT_LIST_HEAD(&ste_port->txqueue[ch]);
+ INIT_LIST_HEAD(&ste_port->rxqueue[ch]);
+ }
+ INIT_LIST_HEAD(&ste_port->brkqueue);
+}
+
+static int __init ste_hsi_get_iomem(struct platform_device *pdev,
+ const char *res_name,
+ unsigned char __iomem **base,
+ dma_addr_t *phy)
+{
+ struct resource *mem;
+ struct resource *ioarea;
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+ if (!mem) {
+ dev_err(&pdev->dev, "IO memory region missing!\n");
+ return -ENXIO;
+ }
+
+ ioarea = devm_request_mem_region(&pdev->dev, mem->start,
+ resource_size(mem),
+ dev_name(&pdev->dev));
+ if (!ioarea) {
+ dev_err(&pdev->dev, "Can't request IO memory region!\n");
+ return -ENXIO;
+ }
+
+ *base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!base) {
+ dev_err(&pdev->dev, "%s IO remap failed!\n", mem->name);
+ return -ENXIO;
+ }
+ if (phy)
+ *phy = (dma_addr_t) mem->start;
+
+ return 0;
+}
+
+static int __init ste_hsi_acwake_gpio_init(struct platform_device *pdev,
+ int *gpio)
+{
+ int err = 0;
+ const char *gpio_name = "hsi0_acwake";
+ struct resource *resource;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IO, gpio_name);
+ if (unlikely(!resource)) {
+ dev_err(&pdev->dev, "hsi0_acwake does not exist\n");
+ return -EINVAL;
+ }
+
+ *gpio = resource->start;
+ err = gpio_request(*gpio, gpio_name);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request GPIO %d\n", *gpio);
+ return err;
+ }
+
+ /* Initial level set to 0 (LOW) */
+ err = gpio_direction_output(*gpio, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't init GPIO %d\n", *gpio);
+ gpio_free(*gpio);
+ }
+
+ return err;
+}
+
+static int __init ste_hsi_get_irq(struct platform_device *pdev,
+ const char *res_name,
+ irqreturn_t(*isr) (int, void *), void *data,
+ int *irq_number)
+{
+ struct resource *irq;
+ int err;
+
+ irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+ if (!irq) {
+ dev_err(&pdev->dev, "IO memory region missing!\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(&pdev->dev, irq->start, isr,
+ IRQF_DISABLED, irq->name, data);
+ if (err)
+ dev_err(&pdev->dev, "%s IRQ request failed!\n", irq->name);
+
+ if (irq_number)
+ *irq_number = irq->start;
+
+ return err;
+}
+
+static void ste_hsi_flush_queue(struct list_head *queue, struct hsi_client *cl)
+{
+ struct list_head *node, *tmp;
+ struct hsi_msg *msg;
+
+ list_for_each_safe(node, tmp, queue) {
+ msg = list_entry(node, struct hsi_msg, link);
+ if ((cl) && (cl != msg->cl))
+ continue;
+ list_del(node);
+
+ if (msg->destructor)
+ msg->destructor(msg);
+ else
+ hsi_free_msg(msg);
+ }
+}
+
+static int ste_hsi_async_break(struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(msg->cl);
+ struct ste_hsi_port *ste_port = hsi_to_ste_port(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ int err;
+
+ err = ste_hsi_clock_enable(hsi);
+ if (unlikely(err))
+ return err;
+
+ if (msg->ttype == HSI_MSG_WRITE) {
+ if (port->tx_cfg.mode != HSI_MODE_FRAME) {
+ err = -EINVAL;
+ goto out;
+ }
+ writel(1, ste_hsi->tx_base + STE_HSI_TX_BREAK);
+ msg->status = HSI_STATUS_COMPLETED;
+ msg->complete(msg);
+ } else {
+ u32 mask;
+ if (port->rx_cfg.mode != HSI_MODE_FRAME) {
+ err = -EINVAL;
+ goto out;
+ }
+ spin_lock_bh(&ste_hsi->lock);
+ msg->status = HSI_STATUS_PROCEEDING;
+ mask = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEPIM);
+ /* Enable break exception on controller */
+ if (!(mask & STE_HSI_EXCEP_BREAK))
+ writel(mask | STE_HSI_EXCEP_BREAK,
+ ste_hsi->rx_base + STE_HSI_RX_EXCEPIM);
+
+ list_add_tail(&msg->link, &ste_port->brkqueue);
+ spin_unlock_bh(&ste_hsi->lock);
+ }
+
+out:
+ ste_hsi_clock_disable(hsi);
+ return err;
+}
+
+static int ste_hsi_async(struct hsi_msg *msg)
+{
+ struct ste_hsi_controller *ste_hsi;
+ struct ste_hsi_port *ste_port;
+ struct list_head *queue;
+ int err = 0;
+
+ if (unlikely(!msg))
+ return -ENOSYS;
+
+ if (msg->sgt.nents > 1)
+ return -ENOSYS;
+
+ if (unlikely(msg->break_frame))
+ return ste_hsi_async_break(msg);
+
+ ste_port = client_to_ste_port(msg->cl);
+ ste_hsi = client_to_ste_controller(msg->cl);
+
+ if (msg->ttype == HSI_MSG_WRITE) {
+ /* TX transfer */
+ BUG_ON(msg->channel >= ste_port->channels);
+ queue = &ste_port->txqueue[msg->channel];
+ } else {
+ /* RX transfer */
+ queue = &ste_port->rxqueue[msg->channel];
+ }
+
+ spin_lock_bh(&ste_hsi->lock);
+ list_add_tail(&msg->link, queue);
+ msg->status = HSI_STATUS_QUEUED;
+
+ err = ste_hsi_start_transfer(ste_port, queue);
+ if (err)
+ list_del(&msg->link);
+
+ spin_unlock_bh(&ste_hsi->lock);
+
+ return err;
+}
+
+static int ste_hsi_setup(struct hsi_client *cl)
+{
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ int err;
+ u32 div = 0;
+ int ch;
+
+ if (ste_hsi->regulator)
+ regulator_enable(ste_hsi->regulator);
+
+ err = ste_hsi_clock_enable(hsi);
+ if (unlikely(err))
+ return err;
+
+ if (cl->tx_cfg.speed) {
+ div = clk_get_rate(ste_hsi->tx_clk) / 1000 / cl->tx_cfg.speed;
+ if (div)
+ --div;
+ }
+
+ if (!ste_hsi->context)
+ ste_hsi->context = kzalloc(sizeof(struct ste_hsi_hw_context), GFP_KERNEL);
+
+ if (!ste_hsi->context) {
+ dev_err(ste_hsi->dev, "Not enough memory for context!\n");
+ return -ENOMEM;
+ } else {
+ /* Save HSI context */
+ ste_hsi->context->tx_mode = cl->tx_cfg.mode;
+ ste_hsi->context->tx_divisor = div;
+ ste_hsi->context->tx_channels = cl->tx_cfg.channels;
+ ste_hsi->context->tx_priority = 0;
+ if (HSI_ARB_PRIO == cl->tx_cfg.arb_mode)
+ for (ch = 0; ch < STE_HSI_MAX_CHANNELS; ch++)
+ if (cl->tx_cfg.ch_prio[ch])
+ ste_hsi->context->tx_priority |=
+ (1 << ch);
+
+ if ((HSI_FLOW_PIPE == cl->rx_cfg.flow) &&
+ (HSI_MODE_FRAME == cl->rx_cfg.mode))
+ ste_hsi->context->rx_mode = STE_HSI_MODE_PIPELINED;
+ else
+ ste_hsi->context->rx_mode = cl->rx_cfg.mode;
+
+ ste_hsi->context->rx_channels = cl->rx_cfg.channels;
+ }
+
+ port->tx_cfg = cl->tx_cfg;
+ port->rx_cfg = cl->rx_cfg;
+
+ ste_hsi_setup_registers(ste_hsi);
+
+ ste_port->channels = max(cl->tx_cfg.channels, cl->rx_cfg.channels);
+
+ ste_hsi_setup_dma(cl);
+
+ ste_hsi_clock_disable(hsi);
+
+ if (ste_hsi->regulator)
+ regulator_disable(ste_hsi->regulator);
+
+ return err;
+}
+
+static int ste_hsi_flush(struct hsi_client *cl)
+{
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ int i;
+
+ ste_hsi_clock_enable(hsi);
+
+ /* Enter sleep mode */
+ writel(STE_HSI_MODE_SLEEP, ste_hsi->rx_base + STE_HSI_RX_MODE);
+
+ /* Disable DMA, and terminate all outstanding jobs */
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN);
+ ste_hsi_terminate_dma(ste_port);
+
+ /* Flush HSIT buffers */
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_STATE);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE);
+
+ /* Flush HSIR pipeline and channel buffers */
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_STATE);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_PIPEGAUGE);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE);
+
+ /* Flush all errors */
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEP);
+
+ /* Clear interrupts */
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM);
+ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC);
+ writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM);
+ writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEPIM);
+ writel(0x0F, ste_hsi->rx_base + STE_HSI_RX_ACK);
+
+ /* Dequeue all pending requests */
+ for (i = 0; i < ste_port->channels; i++) {
+ /* Release write clocks */
+ if (!list_empty(&ste_port->txqueue[i]))
+ ste_hsi_clock_disable(hsi);
+ if (!list_empty(&ste_port->rxqueue[i]))
+ ste_hsi_clock_disable(hsi);
+ ste_hsi_flush_queue(&ste_port->txqueue[i], NULL);
+ ste_hsi_flush_queue(&ste_port->rxqueue[i], NULL);
+ }
+ ste_hsi_flush_queue(&ste_port->brkqueue, NULL);
+
+ ste_hsi_clock_disable(hsi);
+
+ return 0;
+}
+
+static int ste_hsi_start_tx(struct hsi_client *cl)
+{
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+
+ if (ste_hsi->regulator)
+ regulator_enable(ste_hsi->regulator);
+
+ gpio_set_value(ste_port->acwake_gpio, 1); /* HIGH */
+
+ return 0;
+}
+
+static int ste_hsi_stop_tx(struct hsi_client *cl)
+{
+ struct hsi_port *port = to_hsi_port(cl->device.parent);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+ struct hsi_controller *hsi = to_hsi_controller(port->device.parent);
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+
+ gpio_set_value(ste_port->acwake_gpio, 0); /* LOW */
+
+ if (ste_hsi->regulator)
+ regulator_disable(ste_hsi->regulator);
+
+ return 0;
+}
+
+static int ste_hsi_release(struct hsi_client *cl)
+{
+ int err;
+ struct ste_hsi_controller *ste_hsi = client_to_ste_controller(cl);
+
+ err = ste_hsi_flush(cl);
+ cancel_delayed_work(&ste_hsi->clk_work);
+
+ return 0;
+}
+
+static int ste_hsi_ports_init(struct hsi_controller *hsi,
+ struct platform_device *pdev)
+{
+ struct hsi_port *port;
+ struct ste_hsi_port *ste_port;
+ unsigned int i;
+ char irq_name[20];
+ int err;
+
+ for (i = 0; i < hsi->num_ports; i++) {
+ ste_port = devm_kzalloc(&pdev->dev, sizeof *ste_port,
+ GFP_KERNEL);
+ if (!ste_port)
+ return -ENOMEM;
+
+ port = &hsi->port[i];
+ port->async = ste_hsi_async;
+ port->setup = ste_hsi_setup;
+ port->flush = ste_hsi_flush;
+ port->start_tx = ste_hsi_start_tx;
+ port->stop_tx = ste_hsi_stop_tx;
+ port->release = ste_hsi_release;
+ hsi_port_set_drvdata(port, ste_port);
+ ste_port->dev = &port->device;
+
+ err = ste_hsi_acwake_gpio_init(pdev, &ste_port->acwake_gpio);
+ if (err)
+ return err;
+
+ sprintf(irq_name, "hsi0_cawake");
+ err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_cawake_isr, port,
+ &ste_port->cawake_irq);
+ if (err)
+ return err;
+
+ sprintf(irq_name, "hsi_rx_irq%d", i);
+ err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_rx_isr, port,
+ &ste_port->rx_irq);
+ if (err)
+ return err;
+
+ sprintf(irq_name, "hsi_tx_irq%d", i);
+ err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_tx_isr, port,
+ &ste_port->tx_irq);
+ if (err)
+ return err;
+
+ tasklet_init(&ste_port->cawake_tasklet, ste_hsi_cawake_tasklet,
+ (unsigned long)port);
+
+ tasklet_init(&ste_port->rx_tasklet, ste_hsi_rx_tasklet,
+ (unsigned long)port);
+
+ tasklet_init(&ste_port->tx_tasklet, ste_hsi_tx_tasklet,
+ (unsigned long)port);
+
+ tasklet_init(&ste_port->exception_tasklet,
+ ste_hsi_exception_tasklet, (unsigned long)port);
+
+ tasklet_init(&ste_port->overrun_tasklet,
+ ste_hsi_overrun_tasklet, (unsigned long)port);
+
+ sprintf(irq_name, "hsi_rx_excep%d", i);
+ err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_exception_isr,
+ port, &ste_port->excep_irq);
+ if (err)
+ return err;
+
+ ste_hsi_queues_init(ste_port);
+ }
+ return 0;
+}
+
+static int __init ste_hsi_hw_init(struct hsi_controller *hsi)
+{
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ int err;
+
+ err = ste_hsi_clock_enable(hsi);
+ if (unlikely(err))
+ return err;
+
+ ste_hsi_init_registers(ste_hsi);
+
+ ste_hsi_clock_disable(hsi);
+
+ return err;
+}
+
+static int __init ste_hsi_add_controller(struct hsi_controller *hsi,
+ struct platform_device *pdev)
+{
+ struct ste_hsi_controller *ste_hsi;
+ char overrun_name[] = "hsi_rx_overrun_chxxx";
+ unsigned char i;
+ int err;
+ unsigned long rate;
+
+ ste_hsi = kzalloc(sizeof(struct ste_hsi_controller), GFP_KERNEL);
+ if (!ste_hsi) {
+ dev_err(&pdev->dev, "Not enough memory for ste_hsi!\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ste_hsi->lock);
+ spin_lock_init(&ste_hsi->ck_lock);
+ INIT_DELAYED_WORK(&ste_hsi->clk_work, ste_hsi_delayed_disable_clock);
+
+ hsi->id = pdev->id;
+ hsi->device.parent = &pdev->dev;
+ dev_set_name(&hsi->device, "ste-hsi.%d", hsi->id);
+ ste_hsi->dev = &hsi->device;
+ hsi_controller_set_drvdata(hsi, ste_hsi);
+
+ /* Get and enable regulator */
+ ste_hsi->regulator = regulator_get(&pdev->dev, "v-hsi");
+ if (IS_ERR(ste_hsi->regulator)) {
+ dev_err(&pdev->dev, "could not get v-hsi regulator\n");
+ ste_hsi->regulator = NULL;
+ } else {
+ regulator_enable(ste_hsi->regulator);
+ }
+
+ /* Get and reserve resources for receiver */
+ err = ste_hsi_get_iomem(pdev, "hsi_rx_base", &ste_hsi->rx_base,
+ &ste_hsi->rx_dma_base);
+ if (err)
+ goto err_free_mem;
+ dev_info(&pdev->dev, "hsi_rx_base = %p\n", ste_hsi->rx_base);
+
+ /* Get and reserve resources for transmitter */
+ err = ste_hsi_get_iomem(pdev, "hsi_tx_base", &ste_hsi->tx_base,
+ &ste_hsi->tx_dma_base);
+ if (err)
+ goto err_free_mem;
+ dev_info(&pdev->dev, "hsi_tx_base = %p\n", ste_hsi->tx_base);
+
+ /* Get HSIT HSITXCLK clock */
+ ste_hsi->tx_clk = clk_get(&pdev->dev, "hsit_hsitxclk");
+ if (IS_ERR(ste_hsi->tx_clk)) {
+ dev_err(&hsi->device, "Couldn't get HSIT HSITXCLK clock\n");
+ err = PTR_ERR(ste_hsi->tx_clk);
+ goto err_free_mem;
+ }
+
+ /* Get HSIR HSIRXCLK clock */
+ ste_hsi->rx_clk = clk_get(&pdev->dev, "hsir_hsirxclk");
+ if (IS_ERR(ste_hsi->rx_clk)) {
+ dev_err(&hsi->device, "Couldn't get HSIR HSIRXCLK clock\n");
+ err = PTR_ERR(ste_hsi->rx_clk);
+ goto err_clk_free;
+ }
+
+ /* Get HSIT HCLK clock */
+ ste_hsi->ssitx_clk = clk_get(&pdev->dev, "hsit_hclk");
+ if (IS_ERR(ste_hsi->ssitx_clk)) {
+ dev_err(&hsi->device, "Couldn't get HSIT HCLK clock\n");
+ err = PTR_ERR(ste_hsi->ssitx_clk);
+ goto err_clk_free;
+ }
+
+ /* Get HSIR HCLK clock */
+ ste_hsi->ssirx_clk = clk_get(&pdev->dev, "hsir_hclk");
+ if (IS_ERR(ste_hsi->ssirx_clk)) {
+ dev_err(&hsi->device, "Couldn't get HSIR HCLK clock\n");
+ err = PTR_ERR(ste_hsi->ssirx_clk);
+ goto err_clk_free;
+ }
+
+ /* Set HSITXCLK rate to 100 MHz */
+ rate = clk_round_rate(ste_hsi->tx_clk, 100000000);
+ err = clk_set_rate(ste_hsi->tx_clk, rate);
+ if (unlikely(err)) {
+ dev_err(&hsi->device, "Couldn't set HSIT clock rate\n");
+ goto err_clk_free;
+ }
+
+ /* Set HSIRXCLK rate to 200 MHz */
+ rate = clk_round_rate(ste_hsi->rx_clk, 200000000);
+ err = clk_set_rate(ste_hsi->rx_clk, rate);
+ if (unlikely(err)) {
+ dev_err(&hsi->device, "Couldn't set HSIR clock rate\n");
+ goto err_clk_free;
+ }
+
+ err = ste_hsi_clock_enable(hsi);
+ if (unlikely(err))
+ goto err_clk_free;
+
+ /* Check if controller is at specified address */
+ if (compare_periphid(ste_hsir_periphid,
+ (u32 *) (ste_hsi->rx_base + 0xFE0), 8)) {
+ dev_err(&pdev->dev, "No hsir controller at = %p\n",
+ ste_hsi->rx_base);
+ err = -ENXIO;
+ goto err_clk_free;
+ }
+
+ /* Check if controller is at specified address */
+ if (compare_periphid(ste_hsit_periphid,
+ (u32 *) (ste_hsi->tx_base + 0xFE0), 8)) {
+ dev_err(&pdev->dev, "No hsit controller at = %p\n",
+ ste_hsi->tx_base);
+ err = -ENXIO;
+ goto err_clk_free;
+ }
+ ste_hsi_clock_disable(hsi);
+
+ err = ste_hsi_hw_init(hsi);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init HSI controller!\n");
+ goto err_clk_free;
+ }
+
+ for (i = 0; i < STE_HSI_MAX_CHANNELS; i++) {
+ sprintf(overrun_name, "hsi_rx_overrun_ch%d", i);
+ err = ste_hsi_get_irq(pdev, overrun_name, ste_hsi_overrun_isr,
+ hsi, &ste_hsi->overrun_irq[i]);
+ if (err)
+ goto err_clk_free;
+ }
+
+ err = ste_hsi_ports_init(hsi, pdev);
+ if (err)
+ goto err_clk_free;
+
+ err = hsi_register_controller(hsi);
+
+ if (ste_hsi->regulator)
+ regulator_disable(ste_hsi->regulator);
+
+ if (err)
+ goto err_clk_free;
+
+ return 0;
+
+err_clk_free:
+ ste_hsi_clks_free(ste_hsi);
+err_free_mem:
+ kfree(ste_hsi);
+ return err;
+}
+
+static int ste_hsi_remove_controller(struct hsi_controller *hsi,
+ struct platform_device *pdev)
+{
+ struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi);
+ struct hsi_port *port = to_hsi_port(&pdev->dev);
+ struct ste_hsi_port *ste_port = hsi_port_drvdata(port);
+
+ if (ste_hsi->regulator)
+ regulator_put(ste_hsi->regulator);
+
+ gpio_free(ste_port->acwake_gpio);
+
+ ste_hsi_clks_free(ste_hsi);
+ hsi_unregister_controller(hsi);
+
+ kfree(ste_hsi->context);
+ kfree(ste_hsi);
+
+ return 0;
+}
+
+static int __init ste_hsi_probe(struct platform_device *pdev)
+{
+ struct hsi_controller *hsi;
+ struct ste_hsi_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No HSI platform data!\n");
+ return -EINVAL;
+ }
+
+ hsi = hsi_alloc_controller(pdata->num_ports, GFP_KERNEL);
+ if (!hsi) {
+ dev_err(&pdev->dev, "No memory to allocate HSI controller!\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, hsi);
+
+ err = ste_hsi_add_controller(hsi, pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't add HSI controller!\n");
+ goto err_free_controller;
+ }
+
+ if (pdata->use_dma)
+ ste_hsi_init_dma(pdata, hsi);
+
+ return 0;
+
+err_free_controller:
+ platform_set_drvdata(pdev, NULL);
+ hsi_free_controller(hsi);
+
+ return err;
+}
+
+static int __exit ste_hsi_remove(struct platform_device *pdev)
+{
+ struct hsi_controller *hsi = platform_get_drvdata(pdev);
+
+ ste_hsi_remove_controller(hsi, pdev);
+ hsi_free_controller(hsi);
+
+ return 0;
+}
+
+static struct platform_driver ste_hsi_driver __refdata = {
+ .driver = {
+ .name = "ste_hsi",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(ste_hsi_remove),
+};
+
+static int __init ste_hsi_init(void)
+{
+ return platform_driver_probe(&ste_hsi_driver, ste_hsi_probe);
+}
+module_init(ste_hsi_init)
+
+static void __exit ste_hsi_exit(void)
+{
+ platform_driver_unregister(&ste_hsi_driver);
+}
+module_exit(ste_hsi_exit)
+
+MODULE_AUTHOR("Lukasz Baj <lukasz.baj@tieto.com");
+MODULE_DESCRIPTION("STE HSI driver.");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
new file mode 100644
index 00000000000..06b574394cd
--- /dev/null
+++ b/drivers/hsi/hsi.c
@@ -0,0 +1,496 @@
+/*
+ * hsi.c
+ *
+ * HSI core.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/compiler.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "hsi_core.h"
+
+static struct device_type hsi_ctrl = {
+ .name = "hsi_controller",
+};
+
+static struct device_type hsi_cl = {
+ .name = "hsi_client",
+};
+
+static struct device_type hsi_port = {
+ .name = "hsi_port",
+};
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *a __maybe_unused, char *buf)
+{
+ return sprintf(buf, "hsi:%s\n", dev_name(dev));
+}
+
+static struct device_attribute hsi_bus_dev_attrs[] = {
+ __ATTR_RO(modalias),
+ __ATTR_NULL,
+};
+
+static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ if (dev->type == &hsi_cl)
+ add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
+
+ return 0;
+}
+
+static int hsi_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return strcmp(dev_name(dev), driver->name) == 0;
+}
+
+static struct bus_type hsi_bus_type = {
+ .name = "hsi",
+ .dev_attrs = hsi_bus_dev_attrs,
+ .match = hsi_bus_match,
+ .uevent = hsi_bus_uevent,
+};
+
+static void hsi_client_release(struct device *dev)
+{
+ kfree(to_hsi_client(dev));
+}
+
+static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
+{
+ struct hsi_client *cl;
+ unsigned long flags;
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return;
+ cl->device.type = &hsi_cl;
+ cl->tx_cfg = info->tx_cfg;
+ cl->rx_cfg = info->rx_cfg;
+ cl->device.bus = &hsi_bus_type;
+ cl->device.parent = &port->device;
+ cl->device.release = hsi_client_release;
+ dev_set_name(&cl->device, info->name);
+ cl->device.platform_data = info->platform_data;
+ spin_lock_irqsave(&port->clock, flags);
+ list_add_tail(&cl->link, &port->clients);
+ spin_unlock_irqrestore(&port->clock, flags);
+ if (info->archdata)
+ cl->device.archdata = *info->archdata;
+ if (device_register(&cl->device) < 0) {
+ pr_err("hsi: failed to register client: %s\n", info->name);
+ kfree(cl);
+ }
+}
+
+static void hsi_scan_board_info(struct hsi_controller *hsi)
+{
+ struct hsi_cl_info *cl_info;
+ struct hsi_port *p;
+
+ list_for_each_entry(cl_info, &hsi_board_list, list)
+ if (cl_info->info.hsi_id == hsi->id) {
+ p = hsi_find_port_num(hsi, cl_info->info.port);
+ if (!p)
+ continue;
+ hsi_new_client(p, &cl_info->info);
+ }
+}
+
+static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
+{
+ struct hsi_client *cl = to_hsi_client(dev);
+ struct hsi_port *port = to_hsi_port(dev->parent);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->clock, flags);
+ list_del(&cl->link);
+ spin_unlock_irqrestore(&port->clock, flags);
+ device_unregister(dev);
+
+ return 0;
+}
+
+static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
+{
+ device_for_each_child(dev, NULL, hsi_remove_client);
+ device_unregister(dev);
+
+ return 0;
+}
+
+static void hsi_controller_release(struct device *dev __maybe_unused)
+{
+}
+
+static void hsi_port_release(struct device *dev __maybe_unused)
+{
+}
+
+/**
+ * hsi_unregister_controller - Unregister an HSI controller
+ * @hsi: The HSI controller to register
+ */
+void hsi_unregister_controller(struct hsi_controller *hsi)
+{
+ device_for_each_child(&hsi->device, NULL, hsi_remove_port);
+ device_unregister(&hsi->device);
+}
+EXPORT_SYMBOL_GPL(hsi_unregister_controller);
+
+/**
+ * hsi_register_controller - Register an HSI controller and its ports
+ * @hsi: The HSI controller to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_controller(struct hsi_controller *hsi)
+{
+ unsigned int i;
+ int err;
+
+ hsi->device.type = &hsi_ctrl;
+ hsi->device.bus = &hsi_bus_type;
+ hsi->device.release = hsi_controller_release;
+ err = device_register(&hsi->device);
+ if (err < 0)
+ return err;
+ for (i = 0; i < hsi->num_ports; i++) {
+ hsi->port[i].device.parent = &hsi->device;
+ hsi->port[i].device.bus = &hsi_bus_type;
+ hsi->port[i].device.release = hsi_port_release;
+ hsi->port[i].device.type = &hsi_port;
+ INIT_LIST_HEAD(&hsi->port[i].clients);
+ spin_lock_init(&hsi->port[i].clock);
+ err = device_register(&hsi->port[i].device);
+ if (err < 0)
+ goto out;
+ }
+ /* Populate HSI bus with HSI clients */
+ hsi_scan_board_info(hsi);
+
+ return 0;
+out:
+ hsi_unregister_controller(hsi);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_register_controller);
+
+/**
+ * hsi_register_client_driver - Register an HSI client to the HSI bus
+ * @drv: HSI client driver to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_client_driver(struct hsi_client_driver *drv)
+{
+ drv->driver.bus = &hsi_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(hsi_register_client_driver);
+
+static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
+{
+ return 0;
+}
+
+static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
+{
+ return 0;
+}
+
+/**
+ * hsi_alloc_controller - Allocate an HSI controller and its ports
+ * @n_ports: Number of ports on the HSI controller
+ * @flags: Kernel allocation flags
+ *
+ * Return NULL on failure or a pointer to an hsi_controller on success.
+ */
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
+{
+ struct hsi_controller *hsi;
+ struct hsi_port *port;
+ unsigned int i;
+
+ if (!n_ports)
+ return NULL;
+
+ port = kzalloc(sizeof(*port)*n_ports, flags);
+ if (!port)
+ return NULL;
+ hsi = kzalloc(sizeof(*hsi), flags);
+ if (!hsi)
+ goto out;
+ for (i = 0; i < n_ports; i++) {
+ dev_set_name(&port[i].device, "port%d", i);
+ port[i].num = i;
+ port[i].async = hsi_dummy_msg;
+ port[i].setup = hsi_dummy_cl;
+ port[i].flush = hsi_dummy_cl;
+ port[i].start_tx = hsi_dummy_cl;
+ port[i].stop_tx = hsi_dummy_cl;
+ port[i].release = hsi_dummy_cl;
+ mutex_init(&port[i].lock);
+ }
+ hsi->num_ports = n_ports;
+ hsi->port = port;
+
+ return hsi;
+out:
+ kfree(port);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_controller);
+
+/**
+ * hsi_free_controller - Free an HSI controller
+ * @hsi: Pointer to HSI controller
+ */
+void hsi_free_controller(struct hsi_controller *hsi)
+{
+ if (!hsi)
+ return;
+
+ kfree(hsi->port);
+ kfree(hsi);
+}
+EXPORT_SYMBOL_GPL(hsi_free_controller);
+
+/**
+ * hsi_free_msg - Free an HSI message
+ * @msg: Pointer to the HSI message
+ *
+ * Client is responsible to free the buffers pointed by the scatterlists.
+ */
+void hsi_free_msg(struct hsi_msg *msg)
+{
+ if (!msg)
+ return;
+ sg_free_table(&msg->sgt);
+ kfree(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_free_msg);
+
+/**
+ * hsi_alloc_msg - Allocate an HSI message
+ * @nents: Number of memory entries
+ * @flags: Kernel allocation flags
+ *
+ * nents can be 0. This mainly makes sense for read transfer.
+ * In that case, HSI drivers will call the complete callback when
+ * there is data to be read without consuming it.
+ *
+ * Return NULL on failure or a pointer to an hsi_msg on success.
+ */
+struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags)
+{
+ struct hsi_msg *msg;
+ int err;
+
+ msg = kzalloc(sizeof(*msg), flags);
+ if (!msg)
+ return NULL;
+
+ if (!nents)
+ return msg;
+
+ err = sg_alloc_table(&msg->sgt, nents, flags);
+ if (unlikely(err)) {
+ kfree(msg);
+ msg = NULL;
+ }
+
+ return msg;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_msg);
+
+/**
+ * hsi_async - Submit an HSI transfer to the controller
+ * @cl: HSI client sending the transfer
+ * @msg: The HSI transfer passed to controller
+ *
+ * The HSI message must have the channel, ttype, complete and destructor
+ * fields set beforehand. If nents > 0 then the client has to initialize
+ * also the scatterlists to point to the buffers to write to or read from.
+ *
+ * HSI controllers relay on pre-allocated buffers from their clients and they
+ * do not allocate buffers on their own.
+ *
+ * Once the HSI message transfer finishes, the HSI controller calls the
+ * complete callback with the status and actual_len fields of the HSI message
+ * updated. The complete callback can be called before returning from
+ * hsi_async.
+ *
+ * Returns -errno on failure or 0 on success
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+
+ WARN_ON_ONCE(!msg->destructor || !msg->complete);
+ msg->cl = cl;
+
+ return port->async(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_async);
+
+/**
+ * hsi_claim_port - Claim the HSI client's port
+ * @cl: HSI client that wants to claim its port
+ * @share: Flag to indicate if the client wants to share the port or not.
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_claim_port(struct hsi_client *cl, unsigned int share)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ int err = 0;
+
+ mutex_lock(&port->lock);
+ if ((port->claimed) && (!port->shared || !share)) {
+ err = -EBUSY;
+ goto out;
+ }
+ if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) {
+ err = -ENODEV;
+ goto out;
+ }
+ port->claimed++;
+ port->shared = !!share;
+ cl->pclaimed = 1;
+out:
+ mutex_unlock(&port->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_claim_port);
+
+/**
+ * hsi_release_port - Release the HSI client's port
+ * @cl: HSI client which previously claimed its port
+ */
+void hsi_release_port(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ mutex_lock(&port->lock);
+ /* Allow HW driver to do some cleanup */
+ port->release(cl);
+ if (cl->pclaimed)
+ port->claimed--;
+ BUG_ON(port->claimed < 0);
+ cl->pclaimed = 0;
+ if (!port->claimed)
+ port->shared = 0;
+ module_put(to_hsi_controller(port->device.parent)->owner);
+ mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(hsi_release_port);
+
+static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused)
+{
+ if (cl->hsi_start_rx)
+ (*cl->hsi_start_rx)(cl);
+
+ return 0;
+}
+
+static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused)
+{
+ if (cl->hsi_stop_rx)
+ (*cl->hsi_stop_rx)(cl);
+
+ return 0;
+}
+
+static int hsi_port_for_each_client(struct hsi_port *port, void *data,
+ int (*fn)(struct hsi_client *cl, void *data))
+{
+ struct hsi_client *cl;
+
+ spin_lock(&port->clock);
+ list_for_each_entry(cl, &port->clients, link) {
+ spin_unlock(&port->clock);
+ (*fn)(cl, data);
+ spin_lock(&port->clock);
+ }
+ spin_unlock(&port->clock);
+
+ return 0;
+}
+
+/**
+ * hsi_event -Notifies clients about port events
+ * @port: Port where the event occurred
+ * @event: The event type
+ *
+ * Clients should not be concerned about wake line behavior. However, due
+ * to a race condition in HSI HW protocol, clients need to be notified
+ * about wake line changes, so they can implement a workaround for it.
+ *
+ * Events:
+ * HSI_EVENT_START_RX - Incoming wake line high
+ * HSI_EVENT_STOP_RX - Incoming wake line down
+ */
+void hsi_event(struct hsi_port *port, unsigned int event)
+{
+ int (*fn)(struct hsi_client *cl, void *data);
+
+ switch (event) {
+ case HSI_EVENT_START_RX:
+ fn = hsi_start_rx;
+ break;
+ case HSI_EVENT_STOP_RX:
+ fn = hsi_stop_rx;
+ break;
+ default:
+ return;
+ }
+ hsi_port_for_each_client(port, NULL, fn);
+}
+EXPORT_SYMBOL_GPL(hsi_event);
+
+static int __init hsi_init(void)
+{
+ return bus_register(&hsi_bus_type);
+}
+postcore_initcall(hsi_init);
+
+static void __exit hsi_exit(void)
+{
+ bus_unregister(&hsi_bus_type);
+}
+module_exit(hsi_exit);
+
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hsi/hsi_boardinfo.c b/drivers/hsi/hsi_boardinfo.c
new file mode 100644
index 00000000000..3a9e4e86652
--- /dev/null
+++ b/drivers/hsi/hsi_boardinfo.c
@@ -0,0 +1,64 @@
+/*
+ * hsi_boardinfo.c
+ *
+ * HSI clients registration interface
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "hsi_core.h"
+
+/*
+ * hsi_board_list is only used internally by the HSI framework.
+ * No one else is allowed to make use of it.
+ */
+LIST_HEAD(hsi_board_list);
+EXPORT_SYMBOL_GPL(hsi_board_list);
+
+/**
+ * hsi_register_board_info - Register HSI clients information
+ * @info: Array of HSI clients on the board
+ * @len: Length of the array
+ *
+ * HSI clients are statically declared and registered on board files.
+ *
+ * HSI clients will be automatically registered to the HSI bus once the
+ * controller and the port where the clients wishes to attach are registered
+ * to it.
+ *
+ * Return -errno on failure, 0 on success.
+ */
+int __init hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len)
+{
+ struct hsi_cl_info *cl_info;
+
+ cl_info = kzalloc(sizeof(*cl_info) * len, GFP_KERNEL);
+ if (!cl_info)
+ return -ENOMEM;
+
+ for (; len; len--, info++, cl_info++) {
+ cl_info->info = *info;
+ list_add_tail(&cl_info->list, &hsi_board_list);
+ }
+
+ return 0;
+}
diff --git a/drivers/hsi/hsi_core.h b/drivers/hsi/hsi_core.h
new file mode 100644
index 00000000000..8005509a28b
--- /dev/null
+++ b/drivers/hsi/hsi_core.h
@@ -0,0 +1,37 @@
+/*
+ * hsi_core.h
+ *
+ * HSI framework internal interfaces,
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_CORE_H__
+#define __LINUX_HSI_CORE_H__
+
+#include <linux/hsi/hsi.h>
+
+struct hsi_cl_info {
+ struct list_head list;
+ struct hsi_board_info info;
+};
+
+extern struct list_head hsi_board_list;
+
+#endif /* __LINUX_HSI_CORE_H__ */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index dad895fec62..36829af0fbd 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -39,6 +39,44 @@ config HWMON_DEBUG_CHIP
comment "Native drivers"
+config SENSORS_AB8500
+ tristate "AB8500 thermal monitoring"
+ depends on AB8500_GPADC
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the AB8500 chip. The driver includes thermal management for
+ AB8500 die and two GPADC channels. The GPADC channel are preferably
+ used to access sensors outside the AB8500 chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called abx500-temp.
+
+config SENSORS_AB5500
+ tristate "AB5500 thermal monitoring"
+ depends on AB5500_GPADC
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the AB5500 chip. The driver includes thermal management for
+ AB5500 die, pcb and RF XTAL temperature.
+
+ This driver can also be built as a module. If so, the module
+ will be called abx500-temp.
+
+config SENSORS_DBX500
+ tristate "DBX500 thermal monitoring"
+ depends on MFD_DB8500_PRCMU || MFD_DB5500_PRCMU
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the DBX500 chip. The driver includes thermal management for
+ DBX500 die.
+
+ This driver can also be built as a module. If so, the module
+ will be called dbx500_temp.
+
+
config SENSORS_ABITUGURU
tristate "Abit uGuru (rev 1 & 2)"
depends on X86 && DMI && EXPERIMENTAL
@@ -685,6 +723,54 @@ config SENSORS_LTC4151
This driver can also be built as a module. If so, the module will
be called ltc4151.
+config SENSORS_LSM303DLH
+ tristate "ST LSM303DLH 3-axis accelerometer and 3-axis magnetometer"
+ depends on I2C
+ default n
+ help
+ This driver provides support for the LSM303DLH chip which includes a
+ 3-axis accelerometer and a 3-axis magnetometer.
+
+ This driver can also be built as modules. If so, the module for
+ accelerometer will be called lsm303dlh_a and for magnetometer it will
+ be called lsm303dlh_m.
+
+ Say Y here if you have a device containing lsm303dlh chip.
+
+config SENSORS_LSM303DLH_INPUT_DEVICE
+ bool "ST LSM303DLH INPUT DEVICE"
+ depends on SENSORS_LSM303DLH
+ default n
+ help
+ This driver allows device to be used as an input device with
+ interrupts, need to be enabled only when input device support
+ is required.
+
+config SENSORS_LSM303DLHC
+ tristate "ST LSM303DLHC 3-axis accelerometer and 3-axis magnetometer"
+ depends on I2C
+ default n
+ help
+ This driver provides support for the LSM303DLHC chip which includes a
+ 3-axis accelerometer and a 3-axis magnetometer.
+
+ This driver can also be built as modules. If so, the module for
+ accelerometer will be called lsm303dlhc_a and for magnetometer it will
+ be called lsm303dlh_m.
+
+ Say Y here if you have a device containing lsm303dlhc chip.
+
+config SENSORS_L3G4200D
+ tristate "ST L3G4200D 3-axis gyroscope"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for 3-axis gyroscope device
+ L3g4200D.
+
+ This driver can also be built as a module. If so, the module
+ will be called l3g4200d.
+
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 8251ce8cd03..e6dda2a5a66 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_SENSORS_W83795) += w83795.o
obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
+obj-$(CONFIG_SENSORS_AB8500) += abx500.o ab8500.o
+obj-$(CONFIG_SENSORS_AB5500) += abx500.o ab5500.o
+obj-$(CONFIG_SENSORS_DBX500) += dbx500.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
@@ -84,6 +87,9 @@ obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
+obj-$(CONFIG_SENSORS_LSM303DLH) += lsm303dlh_a.o lsm303dlh_m.o
+obj-$(CONFIG_SENSORS_LSM303DLHC)+= lsm303dlhc_a.o
+obj-$(CONFIG_SENSORS_L3G4200D) += l3g4200d.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
diff --git a/drivers/hwmon/ab5500.c b/drivers/hwmon/ab5500.c
new file mode 100644
index 00000000000..cafadeba51c
--- /dev/null
+++ b/drivers/hwmon/ab5500.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * If/when the AB5500 thermal warning temperature is reached (threshold
+ * 125C cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event. If a shut down is not
+ * triggered by user space and temperature reaches beyond critical
+ * limit(130C) pm_power off is called.
+ *
+ * If/when AB5500 thermal shutdown temperature is reached a hardware
+ * shutdown of the AB5500 will occur.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include "abx500.h"
+#include <asm/mach-types.h>
+
+/* AB5500 driver monitors GPADC - XTAL_TEMP, PCB_TEMP,
+ * BTEMP_BALL, BAT_CTRL and DIE_TEMP
+ */
+#define NUM_MONITORED_SENSORS 5
+
+#define SHUTDOWN_AUTO_MIN_LIMIT -25
+#define SHUTDOWN_AUTO_MAX_LIMIT 130
+
+static int ab5500_output_convert(int val, u8 sensor)
+{
+ int res = val;
+ /* GPADC returns die temperature in Celsius
+ * convert it to millidegree celsius
+ */
+ if (sensor == DIE_TEMP)
+ res = val * 1000;
+
+ return res;
+}
+
+static int ab5500_read_sensor(struct abx500_temp *data, u8 sensor)
+{
+ int val;
+ /*
+ * Special treatment for BAT_CTRL node, since this
+ * temperature measurement is more complex than just
+ * an ADC readout
+ */
+ if (sensor == BAT_CTRL)
+ val = ab5500_btemp_get_batctrl_temp(data->ab5500_btemp);
+ else
+ val = ab5500_gpadc_convert(data->ab5500_gpadc, sensor);
+
+ if (val < 0)
+ return val;
+ else
+ return ab5500_output_convert(val, sensor);
+}
+
+static ssize_t ab5500_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "ab5500\n");
+}
+
+static ssize_t ab5500_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ char *name;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+
+ /*
+ * Make sure these labels correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR.
+ * Temperature sensors outside ab8500 (read via GPADC) are marked
+ * with prefix ext_
+ */
+ switch (index) {
+ case 1:
+ name = "xtal_temp";
+ break;
+ case 2:
+ name = "pcb_temp";
+ break;
+ case 3:
+ name = "bat_temp";
+ break;
+ case 4:
+ name = "bat_ctrl";
+ break;
+ case 5:
+ name = "ab5500";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%s\n", name);
+}
+
+static int temp_shutdown_trig(int mux)
+{
+ pm_power_off();
+ return 0;
+}
+
+static int ab5500_temp_shutdown_auto(struct abx500_temp *data)
+{
+ int ret;
+ struct adc_auto_input *auto_ip;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(&data->pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = DIE_TEMP;
+ auto_ip->freq = MS500;
+ /*
+ * As per product specification, voltage decreases as
+ * temperature increases. Hence the min and max values
+ * should be passed in reverse order.
+ */
+ auto_ip->min = SHUTDOWN_AUTO_MAX_LIMIT;
+ auto_ip->max = SHUTDOWN_AUTO_MIN_LIMIT;
+ auto_ip->auto_adc_callback = temp_shutdown_trig;
+ data->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(data->ab5500_gpadc,
+ data->gpadc_auto);
+ if (ret < 0)
+ kfree(auto_ip);
+
+ return ret;
+}
+
+static int ab5500_is_visible(struct attribute *attr, int n)
+{
+ return attr->mode;
+}
+
+static int ab5500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+ /*
+ * Make sure the magic numbers below corresponds to the node
+ * used for AB5500 thermal warning from HW.
+ */
+ mutex_lock(&data->lock);
+ data->crit_alarm[4] = 1;
+ mutex_unlock(&data->lock);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm");
+ dev_info(&data->pdev->dev, "ABX500 thermal warning,"
+ " power off system now!\n");
+ return 0;
+}
+
+int __init ab5500_hwmon_init(struct abx500_temp *data)
+{
+ int err;
+
+ data->ab5500_gpadc = ab5500_gpadc_get("ab5500-adc.0");
+ if (IS_ERR(data->ab5500_gpadc))
+ return PTR_ERR(data->ab5500_gpadc);
+
+ data->ab5500_btemp = ab5500_btemp_get();
+ if (IS_ERR(data->ab5500_btemp))
+ return PTR_ERR(data->ab5500_btemp);
+
+ err = ab5500_temp_shutdown_auto(data);
+ if (err < 0) {
+ dev_err(&data->pdev->dev, "Failed to register"
+ " auto trigger(%d)\n", err);
+ return err;
+ }
+
+ /*
+ * Setup HW defined data.
+ *
+ * Reference hardware (HREF):
+ *
+ * XTAL_TEMP, PCB_TEMP, BTEMP_BALL refer to millivolts and
+ * BAT_CTRL and DIE_TEMP refer to millidegrees
+ *
+ * Make sure indexes correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR
+ */
+ data->gpadc_addr[0] = XTAL_TEMP;
+ data->gpadc_addr[1] = PCB_TEMP;
+ data->gpadc_addr[2] = BTEMP_BALL;
+ data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[4] = DIE_TEMP;
+ data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+ data->ops.read_sensor = ab5500_read_sensor;
+ data->ops.irq_handler = ab5500_temp_irq_handler;
+ data->ops.show_name = ab5500_show_name;
+ data->ops.show_label = ab5500_show_label;
+ data->ops.is_visible = ab5500_is_visible;
+
+ return 0;
+}
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
new file mode 100644
index 00000000000..a652f32dc75
--- /dev/null
+++ b/drivers/hwmon/ab8500.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * If/when the AB8500 thermal warning temperature is reached (threshold
+ * cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event. If a shut down is not
+ * triggered by user space within a certain time frame,
+ * pm_power off is called.
+ *
+ * If/when AB8500 thermal shutdown temperature is reached a hardware
+ * shutdown of the AB8500 will occur.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/ab8500/bm.h>
+#include "abx500.h"
+#include <asm/mach-types.h>
+
+#define DEFAULT_POWER_OFF_DELAY 10000
+
+/*
+ * The driver monitors GPADC - ADC_AUX1, ADC_AUX2, BTEMP_BALL
+ * and BAT_CTRL.
+ */
+#define NUM_MONITORED_SENSORS 4
+
+static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor)
+{
+ int val;
+ /*
+ * Special treatment for the BAT_CTRL node, since this
+ * temperature measurement is more complex than just
+ * an ADC readout
+ */
+ if (sensor == BAT_CTRL)
+ val = ab8500_btemp_get_batctrl_temp(data->ab8500_btemp);
+ else
+ val = ab8500_gpadc_convert(data->ab8500_gpadc, sensor);
+
+ return val;
+}
+
+static void ab8500_thermal_power_off(struct work_struct *work)
+{
+ struct abx500_temp *data = container_of(work, struct abx500_temp,
+ power_off_work.work);
+
+ dev_warn(&data->pdev->dev, "Power off due to AB8500 thermal warning\n");
+ pm_power_off();
+}
+
+static ssize_t ab8500_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "ab8500\n");
+}
+
+static ssize_t ab8500_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ char *name;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+
+ /*
+ * Make sure these labels correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR.
+ * Temperature sensors outside ab8500 (read via GPADC) are marked
+ * with prefix ext_
+ */
+ switch (index) {
+ case 1:
+ name = "ext_rtc_xtal";
+ break;
+ case 2:
+ name = "ext_db8500";
+ break;
+ case 3:
+ name = "bat_temp";
+ break;
+ case 4:
+ name = "bat_ctrl";
+ break;
+ case 5:
+ name = "ab8500";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%s\n", name);
+}
+
+static int ab8500_is_visible(struct attribute *attr, int n)
+{
+ if (!strcmp(attr->name, "temp5_input") ||
+ !strcmp(attr->name, "temp5_min") ||
+ !strcmp(attr->name, "temp5_max") ||
+ !strcmp(attr->name, "temp5_max_hyst") ||
+ !strcmp(attr->name, "temp5_min_alarm") ||
+ !strcmp(attr->name, "temp5_max_alarm") ||
+ !strcmp(attr->name, "temp5_max_hyst_alarm"))
+ return 0;
+
+ return attr->mode;
+}
+
+static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+ unsigned long delay_in_jiffies;
+ /*
+ * Make sure the magic numbers below corresponds to the node
+ * used for AB8500 thermal warning from HW.
+ */
+ mutex_lock(&data->lock);
+ data->crit_alarm[4] = 1;
+ mutex_unlock(&data->lock);
+
+ hwmon_notify(data->crit_alarm[4], NULL);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm");
+ dev_info(&data->pdev->dev, "AB8500 thermal warning,"
+ " power off in %lu s\n", data->power_off_delay);
+ delay_in_jiffies = msecs_to_jiffies(data->power_off_delay);
+ schedule_delayed_work(&data->power_off_work, delay_in_jiffies);
+ return 0;
+}
+
+int __init ab8500_hwmon_init(struct abx500_temp *data)
+{
+ data->ab8500_gpadc = ab8500_gpadc_get();
+ if (IS_ERR(data->ab8500_gpadc))
+ return PTR_ERR(data->ab8500_gpadc);
+
+ data->ab8500_btemp = ab8500_btemp_get();
+ if (IS_ERR(data->ab8500_btemp))
+ return PTR_ERR(data->ab8500_btemp);
+
+ INIT_DELAYED_WORK(&data->power_off_work, ab8500_thermal_power_off);
+
+ /*
+ * Setup HW defined data.
+ *
+ * Reference hardware (HREF):
+ *
+ * GPADC - ADC_AUX1, connected to NTC R2148 next to RTC_XTAL on HREF
+ * GPADC - ADC_AUX2, connected to NTC R2150 near DB8500 on HREF
+ * Hence, temp#_min/max/max_hyst refer to millivolts and not
+ * millidegrees
+ * This is not the case for BAT_CTRL where millidegrees is used
+ *
+ * HREF HW does not support reading AB8500 temperature. BUT an
+ * AB8500 IRQ will be launched if die crit temp limit is reached.
+ *
+ * Make sure indexes correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR
+ */
+ data->gpadc_addr[0] = ADC_AUX1;
+ data->gpadc_addr[1] = ADC_AUX2;
+ data->gpadc_addr[2] = BTEMP_BALL;
+ data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[4] = DIE_TEMP;
+ data->power_off_delay = DEFAULT_POWER_OFF_DELAY;
+ data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+ data->ops.read_sensor = ab8500_read_sensor;
+ data->ops.irq_handler = ab8500_temp_irq_handler;
+ data->ops.show_name = ab8500_show_name;
+ data->ops.show_label = ab8500_show_label;
+ data->ops.is_visible = ab8500_is_visible;
+
+ return 0;
+}
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
new file mode 100644
index 00000000000..7aa9994c54a
--- /dev/null
+++ b/drivers/hwmon/abx500.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * ABX500 does not provide auto ADC, so to monitor the required
+ * temperatures, a periodic work is used. It is more important
+ * to not wake up the CPU than to perform this job, hence the use
+ * of a deferred delay.
+ *
+ * A deferred delay for thermal monitor is considered safe because:
+ * If the chip gets too hot during a sleep state it's most likely
+ * due to external factors, such as the surrounding temperature.
+ * I.e. no SW decisions will make any difference.
+ *
+ * If/when the ABX500 thermal warning temperature is reached (threshold
+ * cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event.
+ *
+ * If/when ABX500 thermal shutdown temperature is reached a hardware
+ * shutdown of the ABX500 will occur.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <asm/mach-types.h>
+
+#include "abx500.h"
+
+#define DEFAULT_MONITOR_DELAY 1000
+
+/*
+ * Thresholds are considered inactive if set to 0.
+ * To avoid confusion for user space applications,
+ * the temp monitor delay is set to 0 if all thresholds
+ * are 0.
+ */
+static bool find_active_thresholds(struct abx500_temp *data)
+{
+ int i;
+ for (i = 0; i < data->monitored_sensors; i++)
+ if (data->max[i] != 0 || data->max_hyst[i] != 0
+ || data->min[i] != 0)
+ return true;
+
+ dev_dbg(&data->pdev->dev, "No active thresholds,"
+ "cancel deferred job (if it exists)"
+ "and reset temp monitor delay\n");
+ cancel_delayed_work_sync(&data->work);
+ return false;
+}
+
+static inline void schedule_monitor(struct abx500_temp *data)
+{
+ unsigned long delay_in_jiffies;
+ delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay);
+ schedule_delayed_work(&data->work, delay_in_jiffies);
+}
+
+static inline void gpadc_monitor_exit(struct abx500_temp *data)
+{
+ cancel_delayed_work_sync(&data->work);
+}
+
+static void gpadc_monitor(struct work_struct *work)
+{
+ unsigned long delay_in_jiffies;
+ int val, i, ret;
+ /* Container for alarm node name */
+ char alarm_node[30];
+
+ bool updated_min_alarm = false;
+ bool updated_max_alarm = false;
+ bool updated_max_hyst_alarm = false;
+ struct abx500_temp *data = container_of(work, struct abx500_temp,
+ work.work);
+
+ for (i = 0; i < data->monitored_sensors; i++) {
+ /* Thresholds are considered inactive if set to 0 */
+ if (data->max[i] == 0 && data->max_hyst[i] == 0
+ && data->min[i] == 0)
+ continue;
+
+ val = data->ops.read_sensor(data, data->gpadc_addr[i]);
+ if (val < 0) {
+ dev_err(&data->pdev->dev, "GPADC read failed\n");
+ continue;
+ }
+
+ mutex_lock(&data->lock);
+ if (data->min[i] != 0) {
+ if (val < data->min[i]) {
+ if (data->min_alarm[i] == 0) {
+ data->min_alarm[i] = 1;
+ updated_min_alarm = true;
+ }
+ } else {
+ if (data->min_alarm[i] == 1) {
+ data->min_alarm[i] = 0;
+ updated_min_alarm = true;
+ }
+ }
+
+ }
+ if (data->max[i] != 0) {
+ if (val > data->max[i]) {
+ if (data->max_alarm[i] == 0) {
+ data->max_alarm[i] = 1;
+ updated_max_alarm = true;
+ }
+ } else {
+ if (data->max_alarm[i] == 1) {
+ data->max_alarm[i] = 0;
+ updated_max_alarm = true;
+ }
+ }
+
+ }
+ if (data->max_hyst[i] != 0) {
+ if (val > data->max_hyst[i]) {
+ if (data->max_hyst_alarm[i] == 0) {
+ data->max_hyst_alarm[i] = 1;
+ updated_max_hyst_alarm = true;
+ }
+ } else {
+ if (data->max_hyst_alarm[i] == 1) {
+ data->max_hyst_alarm[i] = 0;
+ updated_max_hyst_alarm = true;
+ }
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ /* hwmon attr index starts at 1, thus "i+1" below */
+ if (updated_min_alarm) {
+ ret = snprintf(alarm_node, 16, "temp%d_min_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ if (updated_max_alarm) {
+ ret = snprintf(alarm_node, 16, "temp%d_max_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ hwmon_notify(data->max_alarm[i], NULL);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ if (updated_max_hyst_alarm) {
+ ret = snprintf(alarm_node, 21, "temp%d_max_hyst_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ }
+ delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay);
+ schedule_delayed_work(&data->work, delay_in_jiffies);
+}
+
+static ssize_t set_temp_monitor_delay(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int res;
+ unsigned long delay_in_s;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+
+ res = strict_strtoul(buf, 10, &delay_in_s);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ data->gpadc_monitor_delay = delay_in_s * 1000;
+
+ if (find_active_thresholds(data))
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_temp_power_off_delay(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int res;
+ unsigned long delay_in_s;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+
+ res = strict_strtoul(buf, 10, &delay_in_s);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ data->power_off_delay = delay_in_s * 1000;
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t show_temp_monitor_delay(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ /* return time in s, not ms */
+ return sprintf(buf, "%lu\n", (data->gpadc_monitor_delay) / 1000);
+}
+
+static ssize_t show_temp_power_off_delay(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ /* return time in s, not ms */
+ return sprintf(buf, "%lu\n", (data->power_off_delay) / 1000);
+}
+
+/* HWMON sysfs interface */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ /*
+ * To avoid confusion between sensor label and chip name, the function
+ * "show_label" is not used to return the chip name.
+ */
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.show_name(dev, devattr, buf);
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.show_label(dev, devattr, buf);
+}
+
+static ssize_t show_input(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ u8 gpadc_addr = data->gpadc_addr[attr->index - 1];
+
+ val = data->ops.read_sensor(data, gpadc_addr);
+ if (val < 0)
+ dev_err(&data->pdev->dev, "GPADC read failed\n");
+
+ return sprintf(buf, "%d\n", val);
+}
+
+/* set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->min_alarm[attr->index - 1] = 0;
+
+ data->min[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->max_alarm[attr->index - 1] = 0;
+
+ data->max[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_max_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->max_hyst_alarm[attr->index - 1] = 0;
+
+ data->max_hyst[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * show functions (RO nodes)
+ */
+static ssize_t show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->min[attr->index - 1]);
+}
+
+static ssize_t show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max[attr->index - 1]);
+}
+
+static ssize_t show_max_hyst(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_hyst[attr->index - 1]);
+}
+
+/* Alarms */
+static ssize_t show_min_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->min_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_hyst_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_hyst_alarm[attr->index - 1]);
+}
+
+static ssize_t show_crit_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->crit_alarm[attr->index - 1]);
+}
+
+static mode_t abx500_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.is_visible(a, n);
+}
+
+static SENSOR_DEVICE_ATTR(temp_monitor_delay, S_IRUGO | S_IWUSR,
+ show_temp_monitor_delay, set_temp_monitor_delay, 0);
+static SENSOR_DEVICE_ATTR(temp_power_off_delay, S_IRUGO | S_IWUSR,
+ show_temp_power_off_delay,
+ set_temp_power_off_delay, 0);
+
+/* Chip name, required by hwmon*/
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+/* GPADC - SENSOR1 */
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 1);
+
+/* GPADC - SENSOR2 */
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min, 2);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 2);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_min_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_max_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 2);
+
+/* GPADC - SENSOR3 */
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min, 3);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 3);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_min_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_max_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 3);
+
+/* GPADC - SENSOR4 */
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_min, set_min, 4);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_max, set_max, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 4);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 4);
+
+/* GPADC - SENSOR5 */
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, show_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_input, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_min, S_IWUSR | S_IRUGO, show_min, set_min, 5);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IWUSR | S_IRUGO, show_max, set_max, 5);
+static SENSOR_DEVICE_ATTR(temp5_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 5);
+static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_min_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_max_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO,
+ show_crit_alarm, NULL, 5);
+
+struct attribute *abx500_temp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp_monitor_delay.dev_attr.attr,
+ &sensor_dev_attr_temp_power_off_delay.dev_attr.attr,
+ /* GPADC SENSOR1 */
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR2 */
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR3 */
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR4 */
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR5*/
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_min.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_hyst_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group abx500_temp_group = {
+ .attrs = abx500_temp_attributes,
+ .is_visible = abx500_attrs_visible,
+};
+
+static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct abx500_temp *data = platform_get_drvdata(pdev);
+ data->ops.irq_handler(irq, data);
+ return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct platform_device *pdev)
+{
+ int ret;
+ int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM");
+
+ if (irq < 0)
+ dev_err(&pdev->dev, "Get irq by name failed\n");
+
+ ret = request_threaded_irq(irq, NULL, abx500_temp_irq_handler,
+ IRQF_NO_SUSPEND, "abx500-temp", pdev);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int __devinit abx500_temp_probe(struct platform_device *pdev)
+{
+ struct abx500_temp *data;
+ int err;
+
+ data = kzalloc(sizeof(struct abx500_temp), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ mutex_init(&data->lock);
+
+ /* Chip specific initialization */
+ if (!machine_is_u5500())
+ err = ab8500_hwmon_init(data);
+ else
+ err = ab5500_hwmon_init(data);
+ if (err < 0) {
+ dev_err(&pdev->dev, "abx500 init failed");
+ goto exit;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+ goto exit;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&data->work, gpadc_monitor);
+ data->gpadc_monitor_delay = DEFAULT_MONITOR_DELAY;
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+ goto exit_platform_data;
+ }
+
+ err = setup_irqs(pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "irq setup failed (%d)\n", err);
+ goto exit_sysfs_group;
+ }
+ return 0;
+
+exit_sysfs_group:
+ sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+exit_platform_data:
+ hwmon_device_unregister(data->hwmon_dev);
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(data->gpadc_auto);
+ kfree(data);
+ return err;
+}
+
+static int __devexit abx500_temp_remove(struct platform_device *pdev)
+{
+ struct abx500_temp *data = platform_get_drvdata(pdev);
+
+ gpadc_monitor_exit(data);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data->gpadc_auto);
+ kfree(data);
+ return 0;
+}
+
+/* No action required in suspend/resume, thus the lack of functions */
+static struct platform_driver abx500_temp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "abx500-temp",
+ },
+ .probe = abx500_temp_probe,
+ .remove = __devexit_p(abx500_temp_remove),
+};
+
+static int __init abx500_temp_init(void)
+{
+ return platform_driver_register(&abx500_temp_driver);
+}
+
+static void __exit abx500_temp_exit(void)
+{
+ platform_driver_unregister(&abx500_temp_driver);
+}
+
+MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 temperature driver");
+MODULE_LICENSE("GPL");
+
+module_init(abx500_temp_init)
+module_exit(abx500_temp_exit)
diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h
new file mode 100644
index 00000000000..9fe28dac28f
--- /dev/null
+++ b/drivers/hwmon/abx500.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License v2
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ */
+
+#ifndef _ABX500_H
+#define _ABX500_H
+
+#define NUM_SENSORS 5
+
+struct ab8500_gpadc;
+struct ab5500_gpadc;
+struct ab8500_btemp;
+struct ab5500_btemp;
+struct adc_auto_input;
+struct abx500_temp;
+
+/**
+ * struct abx500_temp_ops - abx500 chip specific ops
+ * @read_sensor: reads gpadc output
+ * @irq_handler: irq handler
+ * @show_name: hwmon device name
+ * @show_label: hwmon attribute label
+ * @is_visible: is attribute visible
+ */
+struct abx500_temp_ops {
+ int (*read_sensor)(struct abx500_temp *, u8);
+ int (*irq_handler)(int, struct abx500_temp *);
+ ssize_t (*show_name)(struct device *,
+ struct device_attribute *, char *);
+ ssize_t (*show_label) (struct device *,
+ struct device_attribute *, char *);
+ int (*is_visible)(struct attribute *, int);
+};
+
+/**
+ * struct abx500_temp - representation of temp mon device
+ * @pdev: platform device
+ * @hwmon_dev: hwmon device
+ * @ab8500_gpadc: gpadc interface for ab8500
+ * @ab5500_gpadc: gpadc interface for ab5500
+ * @btemp: battery temperature interface for ab8500
+ * @adc_auto_input: gpadc auto trigger
+ * @gpadc_addr: gpadc channel address
+ * @temp: sensor temperature input value
+ * @min: sensor temperature min value
+ * @max: sensor temperature max value
+ * @max_hyst: sensor temperature hysteresis value for max limit
+ * @crit: sensor temperature critical value
+ * @min_alarm: sensor temperature min alarm
+ * @max_alarm: sensor temperature max alarm
+ * @max_hyst_alarm: sensor temperature hysteresis alarm
+ * @crit_alarm: sensor temperature critical value alarm
+ * @work: delayed work scheduled to monitor temperature periodically
+ * @power_off_work: delayed work scheduled to power off the system
+ when critical temperature is reached
+ * @lock: mutex
+ * @gpadc_monitor_delay: delay between temperature readings in ms
+ * @power_off_delay: delay before power off in ms
+ * @monitored_sensors: number of monitored sensors
+ */
+struct abx500_temp {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ struct ab8500_gpadc *ab8500_gpadc;
+ struct ab5500_gpadc *ab5500_gpadc;
+ struct ab8500_btemp *ab8500_btemp;
+ struct ab5500_btemp *ab5500_btemp;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_temp_ops ops;
+ u8 gpadc_addr[NUM_SENSORS];
+ unsigned long temp[NUM_SENSORS];
+ unsigned long min[NUM_SENSORS];
+ unsigned long max[NUM_SENSORS];
+ unsigned long max_hyst[NUM_SENSORS];
+ unsigned long crit[NUM_SENSORS];
+ unsigned long min_alarm[NUM_SENSORS];
+ unsigned long max_alarm[NUM_SENSORS];
+ unsigned long max_hyst_alarm[NUM_SENSORS];
+ unsigned long crit_alarm[NUM_SENSORS];
+ struct delayed_work work;
+ struct delayed_work power_off_work;
+ struct mutex lock;
+ /* Delay (ms) between temperature readings */
+ unsigned long gpadc_monitor_delay;
+ /* Delay (ms) before power off */
+ unsigned long power_off_delay;
+ int monitored_sensors;
+};
+
+int ab8500_hwmon_init(struct abx500_temp *data) __init;
+int ab5500_hwmon_init(struct abx500_temp *data) __init;
+
+#endif /* _ABX500_H */
diff --git a/drivers/hwmon/dbx500.c b/drivers/hwmon/dbx500.c
new file mode 100644
index 00000000000..c034b48f8dd
--- /dev/null
+++ b/drivers/hwmon/dbx500.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ *
+ * Author: WenHai Fang <wenhai.h.fang@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+/*
+ * Default measure period to 0xFF x cycle32k
+ */
+#define DEFAULT_MEASURE_TIME 0xFF
+
+/*
+ * Default critical sensor temperature
+ */
+#define DEFAULT_CRITICAL_TEMP 85
+
+/* This driver monitors DB thermal*/
+#define NUM_SENSORS 1
+
+struct dbx500_temp {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ unsigned char min[NUM_SENSORS];
+ unsigned char max[NUM_SENSORS];
+ unsigned char crit[NUM_SENSORS];
+ unsigned char min_alarm[NUM_SENSORS];
+ unsigned char max_alarm[NUM_SENSORS];
+ unsigned short measure_time;
+ bool monitoring_active;
+ struct mutex lock;
+};
+
+static inline void start_temp_monitoring(struct dbx500_temp *data,
+ const int index)
+{
+ unsigned int i;
+
+ /* determine if there are any sensors worth monitoring */
+ for (i = 0; i < NUM_SENSORS; i++)
+ if (data->min[i] || data->max[i])
+ goto start_monitoring;
+
+ return;
+
+start_monitoring:
+ /* kick off the monitor job */
+ data->min_alarm[index] = 0;
+ data->max_alarm[index] = 0;
+
+ (void) prcmu_start_temp_sense(data->measure_time);
+ data->monitoring_active = true;
+}
+
+static inline void stop_temp_monitoring(struct dbx500_temp *data)
+{
+ if (data->monitoring_active) {
+ (void) prcmu_stop_temp_sense();
+ data->monitoring_active = false;
+ }
+}
+
+/* HWMON sysfs interface */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "dbx500\n");
+}
+
+static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return show_name(dev, devattr, buf);
+}
+
+/* set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ if (val > data->max[attr->index - 1])
+ val = data->max[attr->index - 1];
+
+ data->min[attr->index - 1] = val;
+
+ stop_temp_monitoring(data);
+
+ (void) prcmu_config_hotmon(data->min[attr->index - 1],
+ data->max[attr->index - 1]);
+
+ start_temp_monitoring(data, (attr->index - 1));
+
+ mutex_unlock(&data->lock);
+ return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ if (val < data->min[attr->index - 1])
+ val = data->min[attr->index - 1];
+
+ data->max[attr->index - 1] = val;
+
+ stop_temp_monitoring(data);
+
+ (void) prcmu_config_hotmon(data->min[attr->index - 1],
+ data->max[attr->index - 1]);
+
+ start_temp_monitoring(data, (attr->index - 1));
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_crit(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ data->crit[attr->index - 1] = val;
+ (void) prcmu_config_hotdog(data->crit[attr->index - 1]);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * show functions (RO nodes)
+ * Notice that min/max/crit refer to degrees
+ */
+static ssize_t show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->min[attr->index - 1]);
+}
+
+static ssize_t show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->max[attr->index - 1]);
+}
+
+static ssize_t show_crit(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->crit[attr->index - 1]);
+}
+
+/* Alarms */
+static ssize_t show_min_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->min_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->max_alarm[attr->index - 1]);
+}
+
+/* Chip name, required by hwmon*/
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+ show_crit, set_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+
+static struct attribute *dbx500_temp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dbx500_temp_group = {
+ .attrs = dbx500_temp_attributes,
+};
+
+static irqreturn_t prcmu_hotmon_low_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ mutex_lock(&data->lock);
+ data->min_alarm[0] = 1;
+ mutex_unlock(&data->lock);
+
+ sysfs_notify(&pdev->dev.kobj, NULL, "temp1_min_alarm");
+ dev_dbg(&pdev->dev, "DBX500 thermal low warning\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prcmu_hotmon_high_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ mutex_lock(&data->lock);
+ data->max_alarm[0] = 1;
+ mutex_unlock(&data->lock);
+
+ hwmon_notify(data->max_alarm[0], NULL);
+ sysfs_notify(&pdev->dev.kobj, NULL, "temp1_max_alarm");
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit dbx500_temp_probe(struct platform_device *pdev)
+{
+ struct dbx500_temp *data;
+ int err = 0, i;
+ int irq;
+
+ dev_dbg(&pdev->dev, "dbx500_temp: Function dbx500_temp_probe.\n");
+
+ data = kzalloc(sizeof(struct dbx500_temp), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed\n");
+ goto exit;
+ }
+
+ err = request_threaded_irq(irq, NULL,
+ prcmu_hotmon_low_irq_handler,
+ IRQF_NO_SUSPEND,
+ "dbx500_temp_low", pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_LOW.\n");
+ goto exit;
+ } else {
+ dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_LOW.\n");
+ }
+
+ irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed\n");
+ goto exit;
+ }
+
+ err = request_threaded_irq(irq, NULL,
+ prcmu_hotmon_high_irq_handler,
+ IRQF_NO_SUSPEND,
+ "dbx500_temp_high", pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_HIGH.\n");
+ goto exit;
+ } else {
+ dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_HIGH.\n");
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+ goto exit;
+ }
+
+ for (i = 0; i < NUM_SENSORS; i++) {
+ data->min[i] = 0;
+ data->max[i] = 0;
+ data->crit[i] = DEFAULT_CRITICAL_TEMP;
+ data->min_alarm[i] = 0;
+ data->max_alarm[i] = 0;
+ }
+
+ mutex_init(&data->lock);
+
+ data->pdev = pdev;
+ data->measure_time = DEFAULT_MEASURE_TIME;
+ data->monitoring_active = false;
+
+ /* set PRCMU to disable platform when we get to the critical temp */
+ (void) prcmu_config_hotdog(DEFAULT_CRITICAL_TEMP);
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &dbx500_temp_group);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+ goto exit_platform_data;
+ }
+
+ return 0;
+
+exit_platform_data:
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(data);
+ return err;
+}
+
+static int __devexit dbx500_temp_remove(struct platform_device *pdev)
+{
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &dbx500_temp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+ return 0;
+}
+
+/* No action required in suspend/resume, thus the lack of functions */
+static struct platform_driver dbx500_temp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dbx500_temp",
+ },
+ .probe = dbx500_temp_probe,
+ .remove = __devexit_p(dbx500_temp_remove),
+};
+
+static int __init dbx500_temp_init(void)
+{
+ return platform_driver_register(&dbx500_temp_driver);
+}
+
+static void __exit dbx500_temp_exit(void)
+{
+ platform_driver_unregister(&dbx500_temp_driver);
+}
+
+MODULE_AUTHOR("WenHai Fang <wenhai.h.fang@stericsson.com>");
+MODULE_DESCRIPTION("DBX500 temperature driver");
+MODULE_LICENSE("GPL");
+
+module_init(dbx500_temp_init)
+module_exit(dbx500_temp_exit)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 6460487e41b..ac718a57b88 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -21,6 +21,7 @@
#include <linux/gfp.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/notifier.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -29,6 +30,8 @@ static struct class *hwmon_class;
static DEFINE_IDA(hwmon_ida);
+static BLOCKING_NOTIFIER_HEAD(hwmon_notifier_list);
+
/**
* hwmon_device_register - register w/ hwmon
* @dev: the device to register
@@ -73,6 +76,24 @@ void hwmon_device_unregister(struct device *dev)
"hwmon_device_unregister() failed: bad class ID!\n");
}
+int hwmon_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&hwmon_notifier_list, nb);
+}
+EXPORT_SYMBOL(hwmon_notifier_register);
+
+int hwmon_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&hwmon_notifier_list, nb);
+}
+EXPORT_SYMBOL(hwmon_notifier_unregister);
+
+void hwmon_notify(unsigned long val, void *v)
+{
+ blocking_notifier_call_chain(&hwmon_notifier_list, val, v);
+}
+EXPORT_SYMBOL(hwmon_notify);
+
static void __init hwmon_pci_quirks(void)
{
#if defined CONFIG_X86 && defined CONFIG_PCI
diff --git a/drivers/hwmon/l3g4200d.c b/drivers/hwmon/l3g4200d.c
new file mode 100644
index 00000000000..ffe3e1a9730
--- /dev/null
+++ b/drivers/hwmon/l3g4200d.c
@@ -0,0 +1,717 @@
+/*
+ * ST L3G4200D 3-Axis Gyroscope Driver
+ *
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Chethan Krishna N <chethan.krishna@stericsson.com> for ST-Ericsson
+ * Licence terms: GNU General Public Licence (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/l3g4200d.h>
+#include <linux/regulator/consumer.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+/* l3g4200d gyroscope registers */
+
+#define WHO_AM_I 0x0F
+
+#define CTRL_REG1 0x20 /* CTRL REG1 */
+#define CTRL_REG2 0x21 /* CTRL REG2 */
+#define CTRL_REG3 0x22 /* CTRL_REG3 */
+#define CTRL_REG4 0x23 /* CTRL_REG4 */
+#define CTRL_REG5 0x24 /* CTRL_REG5 */
+#define OUT_TEMP 0x26 /* OUT_TEMP */
+
+#define AXISDATA_REG 0x28
+
+/** Registers Contents */
+
+#define WHOAMI_L3G4200D 0x00D3 /* Expected content for WAI register*/
+
+/* CTRL_REG1 */
+#define PM_OFF 0x00
+#define PM_ON 0x01
+#define ENABLE_ALL_AXES 0x07
+#define BW00 0x00
+#define BW01 0x10
+#define BW10 0x20
+#define BW11 0x30
+#define ODR00 0x00 /* ODR = 100Hz */
+#define ODR01 0x40 /* ODR = 200Hz */
+#define ODR10 0x80 /* ODR = 400Hz */
+#define ODR11 0xC0 /* ODR = 800Hz */
+#define L3G4200D_PM_BIT 3
+#define L3G4200D_PM_MASK (0x01 << L3G4200D_PM_BIT)
+#define L3G4200D_ODR_BIT 4
+#define L3G4200D_ODR_MASK (0x0F << L3G4200D_ODR_BIT)
+#define L3G4200D_ODR_MIN_VAL 0x00
+#define L3G4200D_ODR_MAX_VAL 0x0F
+
+/* CTRL_REG4 */
+#define FS250 0x00
+#define FS500 0x01
+#define FS2000 0x03
+#define BDU_ENABLE 0x80
+#define L3G4200D_FS_BIT 6
+#define L3G4200D_FS_MASK (0x3 << L3G4200D_FS_BIT)
+
+/* multiple byte transfer enable */
+#define MULTIPLE_I2C_TR 0x80
+
+/* device status defines */
+#define DEVICE_OFF 0
+#define DEVICE_ON 1
+#define DEVICE_SUSPENDED 2
+
+/*
+ * L3G4200D gyroscope data
+ * brief structure containing gyroscope values for yaw, pitch and roll in
+ * signed short
+ */
+
+struct l3g4200d_gyro_values {
+ short x; /* x-axis angular rate data. */
+ short y; /* y-axis angluar rate data. */
+ short z; /* z-axis angular rate data. */
+};
+
+struct l3g4200d_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct l3g4200d_gyro_values data;
+ struct l3g4200d_gyr_platform_data pdata;
+ struct regulator *regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ unsigned char powermode;
+ unsigned char odr;
+ unsigned char range;
+ int device_status;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void l3g4200d_early_suspend(struct early_suspend *ddata);
+static void l3g4200d_late_resume(struct early_suspend *ddata);
+#endif
+
+static int l3g4200d_write(struct l3g4200d_data *ddata, u8 reg,
+ u8 val, char *msg)
+{
+ int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static int l3g4200d_read(struct l3g4200d_data *ddata, u8 reg, char *msg)
+{
+ int ret = i2c_smbus_read_byte_data(ddata->client, reg);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static int l3g4200d_readdata(struct l3g4200d_data *ddata)
+{
+ unsigned char gyro_data[6];
+ short data[3];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(ddata->client,
+ AXISDATA_REG | MULTIPLE_I2C_TR, 6, gyro_data);
+ if (ret < 0) {
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register AXISDATA_REG\n", ret);
+ return ret;
+ }
+
+ data[0] = (short) (((gyro_data[1]) << 8) | gyro_data[0]);
+ data[1] = (short) (((gyro_data[3]) << 8) | gyro_data[2]);
+ data[2] = (short) (((gyro_data[5]) << 8) | gyro_data[4]);
+
+ data[ddata->pdata.axis_map_x] = ddata->pdata.negative_x ?
+ -data[ddata->pdata.axis_map_x] : data[ddata->pdata.axis_map_x];
+ data[ddata->pdata.axis_map_y] = ddata->pdata.negative_y ?
+ -data[ddata->pdata.axis_map_y] : data[ddata->pdata.axis_map_y];
+ data[ddata->pdata.axis_map_z] = ddata->pdata.negative_z ?
+ -data[ddata->pdata.axis_map_z] : data[ddata->pdata.axis_map_z];
+
+ ddata->data.x = data[ddata->pdata.axis_map_x];
+ ddata->data.y = data[ddata->pdata.axis_map_y];
+ ddata->data.z = data[ddata->pdata.axis_map_z];
+
+ return ret;
+}
+
+static ssize_t l3g4200d_show_gyrodata(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->powermode == PM_OFF ||
+ ddata->device_status == DEVICE_SUSPENDED) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ ret = l3g4200d_readdata(ddata);
+
+ if (ret < 0) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return sprintf(buf, "%8x:%8x:%8x\n", ddata->data.x, ddata->data.y,
+ ddata->data.z);
+}
+
+static ssize_t l3g4200d_show_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->range);
+}
+
+static ssize_t l3g4200d_store_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+ long received_value;
+ unsigned char value;
+ int error;
+
+ error = strict_strtol(buf, 0, &received_value);
+ if (error)
+ return error;
+
+ /* check if the received range is in valid range */
+ if (received_value < FS250 || received_value > FS2000)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->powermode == PM_OFF) {
+ dev_info(&ddata->client->dev,
+ "The device is switched off, turn it ON using powermode\n");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* enable the BDU bit */
+ value = BDU_ENABLE;
+ value |= ((received_value << L3G4200D_FS_BIT) & L3G4200D_FS_MASK);
+
+ ddata->range = received_value;
+
+ error = l3g4200d_write(ddata, CTRL_REG4, value, "CTRL_REG4");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+ return count;
+}
+
+static ssize_t l3g4200d_show_datarate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->odr >> L3G4200D_ODR_BIT);
+}
+
+static ssize_t l3g4200d_store_datarate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+ long received_value;
+ unsigned char value;
+ int error;
+
+ error = strict_strtol(buf, 0, &received_value);
+ if (error)
+ return error;
+
+ /* check if the received output datarate value is in valid range */
+ if (received_value < L3G4200D_ODR_MIN_VAL ||
+ received_value > L3G4200D_ODR_MAX_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->powermode == PM_OFF) {
+ dev_info(&ddata->client->dev,
+ "The device is switched off, turn it ON using powermode\n");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /*
+ * read the current contents of CTRL_REG1
+ * retain any bits set other than the odr bits
+ */
+ error = l3g4200d_read(ddata, CTRL_REG1, "CTRL_REG1");
+
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ } else
+ value = error;
+
+ value &= ~L3G4200D_ODR_MASK;
+ value |= ((received_value << L3G4200D_ODR_BIT) & L3G4200D_ODR_MASK);
+
+ ddata->odr = received_value << L3G4200D_ODR_BIT;
+
+ error = l3g4200d_write(ddata, CTRL_REG1, value, "CTRL_REG1");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+ return count;
+}
+
+static ssize_t l3g4200d_show_powermode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->powermode);
+}
+
+static ssize_t l3g4200d_store_powermode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+ long received_value;
+ unsigned char value;
+ int error;
+
+ error = strict_strtol(buf, 0, &received_value);
+ if (error)
+ return error;
+
+ /* check if the received power mode is either 0 or 1 */
+ if (received_value < PM_OFF || received_value > PM_ON)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->device_status == DEVICE_SUSPENDED &&
+ received_value == PM_OFF) {
+ ddata->powermode = received_value;
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* if sent value is same as current value do nothing */
+ if (ddata->powermode == received_value) {
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* turn on the power suppliy if it was turned off previously */
+ if (ddata->regulator && ddata->powermode == PM_OFF
+ && (ddata->device_status == DEVICE_OFF
+ || ddata->device_status == DEVICE_SUSPENDED)) {
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+ }
+
+ /*
+ * read the current contents of CTRL_REG1
+ * retain any bits set other than the power bit
+ */
+ error = l3g4200d_read(ddata, CTRL_REG1, "CTRL_REG1");
+
+ if (error < 0) {
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ } else
+ value = error;
+
+ value &= ~L3G4200D_PM_MASK;
+ value |= ((received_value << L3G4200D_PM_BIT) & L3G4200D_PM_MASK);
+
+ ddata->powermode = received_value;
+
+ error = l3g4200d_write(ddata, CTRL_REG1, value, "CTRL_REG1");
+ if (error < 0) {
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ if (received_value == PM_OFF) {
+ /* set the other configuration values to defaults */
+ ddata->odr = ODR00 | BW00;
+ ddata->range = FS250;
+
+ /* turn off the power supply */
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+ mutex_unlock(&ddata->lock);
+ return count;
+}
+
+static ssize_t l3g4200d_show_gyrotemp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct l3g4200d_data *ddata = platform_get_drvdata(pdev);
+ int ret;
+
+ if (ddata->powermode == PM_OFF ||
+ ddata->device_status == DEVICE_SUSPENDED)
+ return -EINVAL;
+
+ ret = l3g4200d_read(ddata, OUT_TEMP, "OUT_TEMP");
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static DEVICE_ATTR(gyrodata, S_IRUGO, l3g4200d_show_gyrodata, NULL);
+
+static DEVICE_ATTR(range, S_IRUGO | S_IWUSR,
+ l3g4200d_show_range, l3g4200d_store_range);
+
+static DEVICE_ATTR(datarate, S_IRUGO | S_IWUSR,
+ l3g4200d_show_datarate, l3g4200d_store_datarate);
+
+static DEVICE_ATTR(powermode, S_IRUGO | S_IWUSR,
+ l3g4200d_show_powermode, l3g4200d_store_powermode);
+
+static DEVICE_ATTR(gyrotemp, S_IRUGO, l3g4200d_show_gyrotemp, NULL);
+
+static struct attribute *l3g4200d_attributes[] = {
+ &dev_attr_gyrodata.attr,
+ &dev_attr_range.attr,
+ &dev_attr_datarate.attr,
+ &dev_attr_powermode.attr,
+ &dev_attr_gyrotemp.attr,
+ NULL
+};
+
+static const struct attribute_group l3g4200d_attr_group = {
+ .attrs = l3g4200d_attributes,
+};
+
+static int __devinit l3g4200d_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ int ret = -1;
+ struct l3g4200d_data *ddata = NULL;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ goto exit;
+
+ ddata = kzalloc(sizeof(struct l3g4200d_data), GFP_KERNEL);
+ if (ddata == NULL) {
+ ret = -ENOMEM;
+ goto error_op_failed;
+ }
+
+ ddata->client = client;
+ i2c_set_clientdata(client, ddata);
+
+ memcpy(&ddata->pdata, client->dev.platform_data, sizeof(ddata->pdata));
+ /* store default values in the data structure */
+ ddata->odr = ODR00 | BW00;
+ ddata->range = FS250;
+ ddata->powermode = PM_OFF;
+ ddata->device_status = DEVICE_OFF;
+
+ dev_set_name(&client->dev, ddata->pdata.name_gyr);
+
+ ddata->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ ddata->regulator = NULL;
+ }
+
+ if (ddata->regulator) {
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+ }
+
+ ret = l3g4200d_read(ddata, WHO_AM_I, "WHO_AM_I");
+ if (ret < 0)
+ goto exit_free_regulator;
+
+ if (ret == WHOAMI_L3G4200D)
+ dev_info(&client->dev, "3-Axis Gyroscope device identification: %d\n", ret);
+ else
+ dev_info(&client->dev, "Gyroscope identification did not match\n");
+
+ mutex_init(&ddata->lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &l3g4200d_attr_group);
+ if (ret)
+ goto exit_free_regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = l3g4200d_early_suspend;
+ ddata->early_suspend.resume = l3g4200d_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ /*
+ * turn off the supplies until somebody turns on the device
+ * using l3g4200d_store_powermode
+ */
+ if (ddata->device_status == DEVICE_ON && ddata->regulator) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+
+ return ret;
+
+exit_free_regulator:
+ if (ddata->device_status == DEVICE_ON && ddata->regulator) {
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+error_op_failed:
+ kfree(ddata);
+exit:
+ dev_err(&client->dev, "probe function failed %x\n", ret);
+ return ret;
+}
+
+static int __devexit l3g4200d_remove(struct i2c_client *client)
+{
+ struct l3g4200d_data *ddata;
+ ddata = i2c_get_clientdata(client);
+ sysfs_remove_group(&client->dev.kobj, &l3g4200d_attr_group);
+
+ /* safer to turn off the device */
+ if (ddata->powermode != PM_OFF) {
+ l3g4200d_write(ddata, CTRL_REG1, PM_OFF, "CONTROL");
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(ddata);
+
+ return 0;
+}
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+
+static int l3g4200d_do_suspend(struct l3g4200d_data *ddata)
+{
+ int ret;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->powermode == PM_OFF) {
+ mutex_unlock(&ddata->lock);
+ return 0;
+ }
+
+ ret = l3g4200d_write(ddata, CTRL_REG1, PM_OFF, "CONTROL");
+
+ /* turn off the power when suspending the device */
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+
+ ddata->device_status = DEVICE_SUSPENDED;
+
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+
+static int l3g4200d_do_resume(struct l3g4200d_data *ddata)
+{
+ unsigned char range_value;
+ unsigned char shifted_powermode = (ddata->powermode << L3G4200D_PM_BIT);
+ unsigned char shifted_odr = (ddata->odr << L3G4200D_ODR_BIT);
+ unsigned context = ((shifted_powermode | shifted_odr) | ENABLE_ALL_AXES);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->device_status == DEVICE_ON)
+ goto fail;
+
+ /* in correct mode, no need to change it */
+ if (ddata->powermode == PM_OFF) {
+ ddata->device_status = DEVICE_OFF;
+ goto fail;
+ } else {
+ ddata->device_status = DEVICE_ON;
+ }
+
+ /* turn on the power when resuming the device */
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ ret = l3g4200d_write(ddata, CTRL_REG1, context, "CONTROL");
+ if (ret < 0)
+ goto fail;
+
+ range_value = ddata->range;
+ range_value <<= L3G4200D_FS_BIT;
+ range_value |= BDU_ENABLE;
+
+ ret = l3g4200d_write(ddata, CTRL_REG4, range_value, "RANGE");
+
+fail:
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int l3g4200d_suspend(struct device *dev)
+{
+ struct l3g4200d_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = l3g4200d_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
+
+ return ret;
+}
+
+static int l3g4200d_resume(struct device *dev)
+{
+ struct l3g4200d_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = l3g4200d_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops l3g4200d_dev_pm_ops = {
+ .suspend = l3g4200d_suspend,
+ .resume = l3g4200d_resume,
+};
+#endif
+#else
+static void l3g4200d_early_suspend(struct early_suspend *data)
+{
+ struct l3g4200d_data *ddata =
+ container_of(data, struct l3g4200d_data, early_suspend);
+ int ret;
+
+ ret = l3g4200d_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
+}
+
+static void l3g4200d_late_resume(struct early_suspend *data)
+{
+ struct l3g4200d_data *ddata =
+ container_of(data, struct l3g4200d_data, early_suspend);
+ int ret;
+
+ ret = l3g4200d_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+}
+#endif
+
+static const struct i2c_device_id l3g4200d_id[] = {
+ {"l3g4200d", 0 },
+ { },
+};
+
+static struct i2c_driver l3g4200d_driver = {
+ .driver = {
+ .name = "l3g4200d",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+ .pm = &l3g4200d_dev_pm_ops,
+#endif
+ },
+ .probe = l3g4200d_probe,
+ .remove = l3g4200d_remove,
+ .id_table = l3g4200d_id,
+};
+
+static int __init l3g4200d_init(void)
+{
+ return i2c_add_driver(&l3g4200d_driver);
+}
+
+static void __exit l3g4200d_exit(void)
+{
+ i2c_del_driver(&l3g4200d_driver);
+}
+
+module_init(l3g4200d_init);
+module_exit(l3g4200d_exit);
+
+MODULE_DESCRIPTION("l3g4200d digital gyroscope driver");
+MODULE_AUTHOR("Chethan Krishna N");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lsm303dlh_a.c b/drivers/hwmon/lsm303dlh_a.c
new file mode 100644
index 00000000000..a6e724facc3
--- /dev/null
+++ b/drivers/hwmon/lsm303dlh_a.c
@@ -0,0 +1,1371 @@
+/*
+ * lsm303dlh_a.c
+ * ST 3-Axis Accelerometer Driver
+ *
+ * Copyright (C) 2010 STMicroelectronics
+ * Author: Carmine Iascone (carmine.iascone@st.com)
+ * Author: Matteo Dameno (matteo.dameno@st.com)
+ *
+ * Copyright (C) 2010 STEricsson
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * Updated:Preetham Rao Kaskurthi <preetham.rao@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <mach/gpio.h>
+#endif
+
+#include <linux/lsm303dlh.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/regulator/consumer.h>
+
+ /* lsm303dlh accelerometer registers */
+ #define WHO_AM_I 0x0F
+
+ /* ctrl 1: pm2 pm1 pm0 dr1 dr0 zenable yenable zenable */
+ #define CTRL_REG1 0x20 /* power control reg */
+ #define CTRL_REG2 0x21 /* power control reg */
+ #define CTRL_REG3 0x22 /* power control reg */
+ #define CTRL_REG4 0x23 /* interrupt control reg */
+ #define CTRL_REG5 0x24 /* interrupt control reg */
+
+ #define STATUS_REG 0x27 /* status register */
+
+ #define AXISDATA_REG 0x28 /* axis data */
+
+ #define INT1_CFG 0x30 /* interrupt 1 configuration */
+ #define INT1_SRC 0x31 /* interrupt 1 source reg */
+ #define INT1_THS 0x32 /* interrupt 1 threshold */
+ #define INT1_DURATION 0x33 /* interrupt 1 threshold */
+
+ #define INT2_CFG 0x34 /* interrupt 2 configuration */
+ #define INT2_SRC 0x35 /* interrupt 2 source reg */
+ #define INT2_THS 0x36 /* interrupt 2 threshold */
+ #define INT2_DURATION 0x37 /* interrupt 2 threshold */
+
+ /* Sensitivity adjustment */
+ #define SHIFT_ADJ_2G 4 /* 1/16*/
+ #define SHIFT_ADJ_4G 3 /* 2/16*/
+ #define SHIFT_ADJ_8G 2 /* ~3.9/16*/
+
+ /* Control register 1 */
+ #define LSM303DLH_A_CR1_PM_BIT 5
+ #define LSM303DLH_A_CR1_PM_MASK (0x7 << LSM303DLH_A_CR1_PM_BIT)
+ #define LSM303DLH_A_CR1_DR_BIT 3
+ #define LSM303DLH_A_CR1_DR_MASK (0x3 << LSM303DLH_A_CR1_DR_BIT)
+ #define LSM303DLH_A_CR1_EN_BIT 0
+ #define LSM303DLH_A_CR1_EN_MASK (0x7 << LSM303DLH_A_CR1_EN_BIT)
+ #define LSM303DLH_A_CR1_AXIS_ENABLE 7
+
+ /* Control register 2 */
+ #define LSM303DLH_A_CR4_ST_BIT 1
+ #define LSM303DLH_A_CR4_ST_MASK (0x1 << LSM303DLH_A_CR4_ST_BIT)
+ #define LSM303DLH_A_CR4_STS_BIT 3
+ #define LSM303DLH_A_CR4_STS_MASK (0x1 << LSM303DLH_A_CR4_STS_BIT)
+ #define LSM303DLH_A_CR4_FS_BIT 4
+ #define LSM303DLH_A_CR4_FS_MASK (0x3 << LSM303DLH_A_CR4_FS_BIT)
+ #define LSM303DLH_A_CR4_BLE_BIT 6
+ #define LSM303DLH_A_CR4_BLE_MASK (0x3 << LSM303DLH_A_CR4_BLE_BIT)
+ #define LSM303DLH_A_CR4_BDU_BIT 7
+ #define LSM303DLH_A_CR4_BDU_MASK (0x1 << LSM303DLH_A_CR4_BDU_BIT)
+
+ /* Control register 3 */
+ #define LSM303DLH_A_CR3_I1_BIT 0
+ #define LSM303DLH_A_CR3_I1_MASK (0x3 << LSM303DLH_A_CR3_I1_BIT)
+ #define LSM303DLH_A_CR3_LIR1_BIT 2
+ #define LSM303DLH_A_CR3_LIR1_MASK (0x1 << LSM303DLH_A_CR3_LIR1_BIT)
+ #define LSM303DLH_A_CR3_I2_BIT 3
+ #define LSM303DLH_A_CR3_I2_MASK (0x3 << LSM303DLH_A_CR3_I2_BIT)
+ #define LSM303DLH_A_CR3_LIR2_BIT 5
+ #define LSM303DLH_A_CR3_LIR2_MASK (0x1 << LSM303DLH_A_CR3_LIR2_BIT)
+ #define LSM303DLH_A_CR3_PPOD_BIT 6
+ #define LSM303DLH_A_CR3_PPOD_MASK (0x1 << LSM303DLH_A_CR3_PPOD_BIT)
+ #define LSM303DLH_A_CR3_IHL_BIT 7
+ #define LSM303DLH_A_CR3_IHL_MASK (0x1 << LSM303DLH_A_CR3_IHL_BIT)
+
+ #define LSM303DLH_A_CR3_I_SELF 0x0
+ #define LSM303DLH_A_CR3_I_OR 0x1
+ #define LSM303DLH_A_CR3_I_DATA 0x2
+ #define LSM303DLH_A_CR3_I_BOOT 0x3
+
+ #define LSM303DLH_A_CR3_LIR_LATCH 0x1
+
+ /* Range */
+ #define LSM303DLH_A_RANGE_2G 0x00
+ #define LSM303DLH_A_RANGE_4G 0x01
+ #define LSM303DLH_A_RANGE_8G 0x03
+
+ /* Mode */
+ #define LSM303DLH_A_MODE_OFF 0x00
+ #define LSM303DLH_A_MODE_NORMAL 0x01
+ #define LSM303DLH_A_MODE_LP_HALF 0x02
+ #define LSM303DLH_A_MODE_LP_1 0x03
+ #define LSM303DLH_A_MODE_LP_2 0x02
+ #define LSM303DLH_A_MODE_LP_5 0x05
+ #define LSM303DLH_A_MODE_LP_10 0x06
+
+ /* Rate */
+ #define LSM303DLH_A_RATE_50 0x00
+ #define LSM303DLH_A_RATE_100 0x01
+ #define LSM303DLH_A_RATE_400 0x02
+ #define LSM303DLH_A_RATE_1000 0x03
+
+ /* Sleep & Wake */
+ #define LSM303DLH_A_SLEEPWAKE_DISABLE 0x00
+ #define LSM303DLH_A_SLEEPWAKE_ENABLE 0x3
+
+/* Multiple byte transfer enable */
+#define MULTIPLE_I2C_TR 0x80
+
+/* device status defines */
+#define DEVICE_OFF 0
+#define DEVICE_ON 1
+#define DEVICE_SUSPENDED 2
+
+/* Range -2048 to 2047 */
+struct lsm303dlh_a_t {
+ short x;
+ short y;
+ short z;
+};
+
+/**
+ * struct lsm303dlh_a_data - data structure used by lsm303dlh_a driver
+ * @client: i2c client
+ * @lock: mutex lock for sysfs operations
+ * @data: lsm303dlh_a_t struct containing x, y and z values
+ * @input_dev: input device
+ * @input_dev2: input device
+ * @pdata: lsm303dlh platform data
+ * @regulator: regulator
+ * @range: current range value of accelerometer
+ * @mode: current mode of operation
+ * @rate: current sampling rate
+ * @sleep_wake: sleep wake setting
+ * @shift_adjust: current shift adjust value set according to range
+ * @interrupt_control: interrupt control settings
+ * @interrupt_channel: interrupt channel 0 or 1
+ * @interrupt_configure: interrupt configurations for two channels
+ * @interrupt_duration: interrupt duration for two channels
+ * @interrupt_threshold: interrupt threshold for two channels
+ * @early_suspend: early suspend structure
+ * @device_status: device is ON, OFF or SUSPENDED
+ * @id: accelerometer device id
+ */
+struct lsm303dlh_a_data {
+ struct i2c_client *client;
+ /* lock for sysfs operations */
+ struct mutex lock;
+ struct lsm303dlh_a_t data;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ struct input_dev *input_dev;
+ struct input_dev *input_dev2;
+#endif
+
+ struct lsm303dlh_platform_data pdata;
+ struct regulator *regulator;
+
+ unsigned char range;
+ unsigned char mode;
+ unsigned char rate;
+ unsigned char sleep_wake;
+ int shift_adjust;
+
+ unsigned char interrupt_control;
+ unsigned int interrupt_channel;
+
+ unsigned char interrupt_configure[2];
+ unsigned char interrupt_duration[2];
+ unsigned char interrupt_threshold[2];
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ int device_status;
+ int id;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void lsm303dlh_a_early_suspend(struct early_suspend *data);
+static void lsm303dlh_a_late_resume(struct early_suspend *data);
+#endif
+
+static int lsm303dlh_a_write(struct lsm303dlh_a_data *ddata, u8 reg,
+ u8 val, char *msg)
+{
+ int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static int lsm303dlh_a_read(struct lsm303dlh_a_data *ddata, u8 reg, char *msg)
+{
+ int ret = i2c_smbus_read_byte_data(ddata->client, reg);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int lsm303dlh_a_do_suspend(struct lsm303dlh_a_data *ddata)
+{
+ int ret;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ mutex_unlock(&ddata->lock);
+ return 0;
+ }
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ disable_irq(gpio_to_irq(ddata->pdata.irq_a1));
+ disable_irq(gpio_to_irq(ddata->pdata.irq_a2));
+#endif
+
+ ret = lsm303dlh_a_write(ddata, CTRL_REG1,
+ LSM303DLH_A_MODE_OFF, "CONTROL");
+
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+
+ ddata->device_status = DEVICE_SUSPENDED;
+
+ mutex_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static int lsm303dlh_a_restore(struct lsm303dlh_a_data *ddata)
+{
+ unsigned char reg;
+ unsigned char shifted_mode = (ddata->mode << LSM303DLH_A_CR1_PM_BIT);
+ unsigned char shifted_rate = (ddata->rate << LSM303DLH_A_CR1_DR_BIT);
+ unsigned char context = (shifted_mode | shifted_rate);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->device_status == DEVICE_ON) {
+ mutex_unlock(&ddata->lock);
+ return 0;
+ }
+
+ /* in correct mode, no need to change it */
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ ddata->device_status = DEVICE_OFF;
+ mutex_unlock(&ddata->lock);
+ return 0;
+ } else
+ ddata->device_status = DEVICE_ON;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ enable_irq(gpio_to_irq(ddata->pdata.irq_a1));
+ enable_irq(gpio_to_irq(ddata->pdata.irq_a2));
+#endif
+
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ /* BDU should be enabled by default/recommened */
+ reg = ddata->range;
+ reg |= LSM303DLH_A_CR4_BDU_MASK;
+ context |= LSM303DLH_A_CR1_AXIS_ENABLE;
+
+ ret = lsm303dlh_a_write(ddata, CTRL_REG1, context,
+ "CTRL_REG1");
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, CTRL_REG4, reg, "CTRL_REG4");
+
+ if (ret < 0)
+ goto fail;
+
+ /* write to the boot bit to reboot memory content */
+ ret = lsm303dlh_a_write(ddata, CTRL_REG2, 0x80, "CTRL_REG2");
+
+ if (ret < 0)
+ goto fail;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ ret = lsm303dlh_a_write(ddata, CTRL_REG3, ddata->interrupt_control,
+ "CTRL_REG3");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, INT1_CFG, ddata->interrupt_configure[0],
+ "INT1_CFG");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, INT2_CFG, ddata->interrupt_configure[1],
+ "INT2_CFG");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, INT1_THS, ddata->interrupt_threshold[0],
+ "INT1_THS");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, INT2_THS, ddata->interrupt_threshold[1],
+ "INT2_THS");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, INT1_DURATION,
+ ddata->interrupt_duration[0], "INT1_DURATION");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_a_write(ddata, INT1_DURATION,
+ ddata->interrupt_duration[1], "INT1_DURATION");
+
+ if (ret < 0)
+ goto fail;
+#endif
+
+fail:
+ if (ret < 0)
+ dev_err(&ddata->client->dev, "could not restore the device %d\n", ret);
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+static int lsm303dlh_a_readdata(struct lsm303dlh_a_data *ddata)
+{
+ unsigned char acc_data[6];
+ short data[3];
+
+ int ret = i2c_smbus_read_i2c_block_data(ddata->client,
+ AXISDATA_REG | MULTIPLE_I2C_TR, 6, acc_data);
+ if (ret < 0) {
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register AXISDATA_REG \n", ret);
+ return ret;
+ }
+
+ data[0] = (short) (((acc_data[1]) << 8) | acc_data[0]);
+ data[1] = (short) (((acc_data[3]) << 8) | acc_data[2]);
+ data[2] = (short) (((acc_data[5]) << 8) | acc_data[4]);
+
+ data[0] >>= ddata->shift_adjust;
+ data[1] >>= ddata->shift_adjust;
+ data[2] >>= ddata->shift_adjust;
+
+ /* taking position and orientation of x,y,z axis into account*/
+
+ data[ddata->pdata.axis_map_x] = ddata->pdata.negative_x ?
+ -data[ddata->pdata.axis_map_x] : data[ddata->pdata.axis_map_x];
+ data[ddata->pdata.axis_map_y] = ddata->pdata.negative_y ?
+ -data[ddata->pdata.axis_map_y] : data[ddata->pdata.axis_map_y];
+ data[ddata->pdata.axis_map_z] = ddata->pdata.negative_z ?
+ -data[ddata->pdata.axis_map_z] : data[ddata->pdata.axis_map_z];
+
+ ddata->data.x = data[ddata->pdata.axis_map_x];
+ ddata->data.y = data[ddata->pdata.axis_map_y];
+ ddata->data.z = data[ddata->pdata.axis_map_z];
+
+ return ret;
+}
+
+static ssize_t lsm303dlh_a_show_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF ||
+ ddata->device_status == DEVICE_SUSPENDED) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ ret = lsm303dlh_a_readdata(ddata);
+
+ if (ret < 0) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return sprintf(buf, "%8x:%8x:%8x\n", ddata->data.x, ddata->data.y,
+ ddata->data.z);
+}
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+static irqreturn_t lsm303dlh_a_gpio_irq(int irq, void *device_data)
+{
+
+ struct lsm303dlh_a_data *ddata = device_data;
+ int ret;
+ unsigned char reg;
+ struct input_dev *input;
+
+ /* know your interrupt source */
+ if (irq == gpio_to_irq(ddata->pdata.irq_a1)) {
+ reg = INT1_SRC;
+ input = ddata->input_dev;
+ } else if (irq == gpio_to_irq(ddata->pdata.irq_a2)) {
+ reg = INT2_SRC;
+ input = ddata->input_dev2;
+ } else {
+ dev_err(&ddata->client->dev, "spurious interrupt");
+ return IRQ_HANDLED;
+ }
+
+ /* read the axis */
+ ret = lsm303dlh_a_readdata(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "reading data of xyz failed error %d\n", ret);
+
+ input_report_abs(input, ABS_X, ddata->data.x);
+ input_report_abs(input, ABS_Y, ddata->data.y);
+ input_report_abs(input, ABS_Z, ddata->data.z);
+ input_sync(input);
+
+ /* clear the value by reading it */
+ ret = lsm303dlh_a_read(ddata, reg, "INTTERUPT SOURCE");
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "clearing interrupt source failed error %d\n", ret);
+
+ return IRQ_HANDLED;
+
+}
+
+static ssize_t lsm303dlh_a_show_interrupt_control(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->interrupt_control);
+}
+
+static ssize_t lsm303dlh_a_store_interrupt_control(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ ddata->interrupt_control = val;
+
+ error = lsm303dlh_a_write(ddata, CTRL_REG3, val, "CTRL_REG3");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_interrupt_channel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->interrupt_channel);
+}
+
+static ssize_t lsm303dlh_a_store_interrupt_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ ddata->interrupt_channel = val;
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_interrupt_configure(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n",
+ ddata->interrupt_configure[ddata->interrupt_channel]);
+}
+
+static ssize_t lsm303dlh_a_store_interrupt_configure(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ ddata->interrupt_configure[ddata->interrupt_channel] = val;
+
+ if (ddata->interrupt_channel == 0x0)
+ error = lsm303dlh_a_write(ddata, INT1_CFG, val, "INT1_CFG");
+ else
+ error = lsm303dlh_a_write(ddata, INT2_CFG, val, "INT2_CFG");
+
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_interrupt_duration(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n",
+ ddata->interrupt_duration[ddata->interrupt_channel]);
+}
+
+static ssize_t lsm303dlh_a_store_interrupt_duration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ ddata->interrupt_duration[ddata->interrupt_channel] = val;
+
+ if (ddata->interrupt_channel == 0x0)
+ error = lsm303dlh_a_write(ddata, INT1_DURATION, val,
+ "INT1_DURATION");
+ else
+ error = lsm303dlh_a_write(ddata, INT2_DURATION, val,
+ "INT2_DURATION");
+
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_interrupt_threshold(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n",
+ ddata->interrupt_threshold[ddata->interrupt_channel]);
+}
+
+static ssize_t lsm303dlh_a_store_interrupt_threshold(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ ddata->interrupt_threshold[ddata->interrupt_channel] = val;
+
+ if (ddata->interrupt_channel == 0x0)
+ error = lsm303dlh_a_write(ddata, INT1_THS, val, "INT1_THS");
+ else
+ error = lsm303dlh_a_write(ddata, INT2_THS, val, "INT2_THS");
+
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+#endif
+
+static ssize_t lsm303dlh_a_show_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->range >> LSM303DLH_A_CR4_FS_BIT);
+}
+
+static ssize_t lsm303dlh_a_store_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ long val;
+ unsigned long bdu_enabled_val;
+ int error;
+
+
+ error = strict_strtol(buf, 0, &val);
+ if (error)
+ return error;
+
+ if (val < LSM303DLH_A_RANGE_2G || val > LSM303DLH_A_RANGE_8G)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ ddata->range = val;
+ ddata->range <<= LSM303DLH_A_CR4_FS_BIT;
+
+ /*
+ * Block mode update is recommended for not
+ * ending up reading different values
+ */
+ bdu_enabled_val = ddata->range;
+ bdu_enabled_val |= LSM303DLH_A_CR4_BDU_MASK;
+
+ error = lsm303dlh_a_write(ddata, CTRL_REG4, bdu_enabled_val,
+ "CTRL_REG4");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ switch (val) {
+ case LSM303DLH_A_RANGE_2G:
+ ddata->shift_adjust = SHIFT_ADJ_2G;
+ break;
+ case LSM303DLH_A_RANGE_4G:
+ ddata->shift_adjust = SHIFT_ADJ_4G;
+ break;
+ case LSM303DLH_A_RANGE_8G:
+ ddata->shift_adjust = SHIFT_ADJ_8G;
+ break;
+ default:
+ mutex_unlock(&ddata->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->mode);
+}
+
+static ssize_t lsm303dlh_a_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ long val;
+ unsigned char data;
+ int error;
+ bool set_boot_bit = false;
+
+ error = strict_strtol(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ /* not in correct range */
+
+ if (val < LSM303DLH_A_MODE_OFF || val > LSM303DLH_A_MODE_LP_10) {
+ mutex_unlock(&ddata->lock);
+ return -EINVAL;
+ }
+
+ if (ddata->device_status == DEVICE_SUSPENDED) {
+ if (val == LSM303DLH_A_MODE_OFF) {
+ ddata->mode = val;
+ mutex_unlock(&ddata->lock);
+ return count;
+ } else {
+ /* device is turning on after suspend, reset memory */
+ set_boot_bit = true;
+ }
+ }
+
+ /* if same mode as existing, return */
+ if (ddata->mode == val) {
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* turn on the supplies if already off */
+ if (ddata->regulator && ddata->mode == LSM303DLH_A_MODE_OFF
+ && (ddata->device_status == DEVICE_OFF
+ || ddata->device_status == DEVICE_SUSPENDED)) {
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ enable_irq(gpio_to_irq(ddata->pdata.irq_a1));
+ enable_irq(gpio_to_irq(ddata->pdata.irq_a2));
+#endif
+ }
+
+ data = lsm303dlh_a_read(ddata, CTRL_REG1, "CTRL_REG1");
+
+ /*
+ * If chip doesn't get reset during suspend/resume,
+ * x,y and z axis bits are getting cleared,so set
+ * these bits to get x,y,z axis data.
+ */
+ data |= LSM303DLH_A_CR1_AXIS_ENABLE;
+ data &= ~LSM303DLH_A_CR1_PM_MASK;
+
+ ddata->mode = val;
+ data |= ((val << LSM303DLH_A_CR1_PM_BIT) & LSM303DLH_A_CR1_PM_MASK);
+
+ error = lsm303dlh_a_write(ddata, CTRL_REG1, data, "CTRL_REG1");
+ if (error < 0) {
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ /*
+ * Power on request when device is in suspended state
+ * write to the boot bit in CTRL_REG2 to reboot memory content
+ * and ensure correct device behavior after it resumes
+ */
+ if (set_boot_bit) {
+ error = lsm303dlh_a_write(ddata, CTRL_REG2, 0x80, "CTRL_REG2");
+ if (error < 0) {
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+ }
+
+ if (val == LSM303DLH_A_MODE_OFF) {
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ disable_irq(gpio_to_irq(ddata->pdata.irq_a1));
+ disable_irq(gpio_to_irq(ddata->pdata.irq_a2));
+#endif
+ /*
+ * No need to store context here
+ * it is not like suspend/resume
+ * but fall back to default values
+ */
+ ddata->rate = LSM303DLH_A_RATE_50;
+ ddata->range = LSM303DLH_A_RANGE_2G;
+ ddata->shift_adjust = SHIFT_ADJ_2G;
+
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_rate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->rate);
+}
+
+static ssize_t lsm303dlh_a_store_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ long val;
+ unsigned char data;
+ int error;
+
+ error = strict_strtol(buf, 0, &val);
+ if (error)
+ return error;
+
+ if (val < LSM303DLH_A_RATE_50 || val > LSM303DLH_A_RATE_1000)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ data = lsm303dlh_a_read(ddata, CTRL_REG1, "CTRL_REG1");
+
+ data &= ~LSM303DLH_A_CR1_DR_MASK;
+
+ ddata->rate = val;
+
+ data |= ((val << LSM303DLH_A_CR1_DR_BIT) & LSM303DLH_A_CR1_DR_MASK);
+
+ error = lsm303dlh_a_write(ddata, CTRL_REG1, data, "CTRL_REG1");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_sleepwake(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->sleep_wake);
+}
+
+static ssize_t lsm303dlh_a_store_sleepwake(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+ long val;
+ int error;
+
+ if (ddata->mode == LSM303DLH_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ return count;
+ }
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ ddata->sleep_wake = val;
+
+ error = lsm303dlh_a_write(ddata, CTRL_REG5, ddata->sleep_wake,
+ "CTRL_REG5");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlh_a_show_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->id);
+}
+
+static DEVICE_ATTR(id, S_IRUGO, lsm303dlh_a_show_id, NULL);
+
+static DEVICE_ATTR(data, S_IRUGO, lsm303dlh_a_show_data, NULL);
+
+static DEVICE_ATTR(range, S_IWUSR | S_IRUGO,
+ lsm303dlh_a_show_range, lsm303dlh_a_store_range);
+
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
+ lsm303dlh_a_show_mode, lsm303dlh_a_store_mode);
+
+static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO,
+ lsm303dlh_a_show_rate, lsm303dlh_a_store_rate);
+
+static DEVICE_ATTR(sleep_wake, S_IWUSR | S_IRUGO,
+ lsm303dlh_a_show_sleepwake, lsm303dlh_a_store_sleepwake);
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+static DEVICE_ATTR(interrupt_control, S_IWUGO | S_IRUGO,
+ lsm303dlh_a_show_interrupt_control,
+ lsm303dlh_a_store_interrupt_control);
+
+static DEVICE_ATTR(interrupt_channel, S_IWUGO | S_IRUGO,
+ lsm303dlh_a_show_interrupt_channel,
+ lsm303dlh_a_store_interrupt_channel);
+
+static DEVICE_ATTR(interrupt_configure, S_IWUGO | S_IRUGO,
+ lsm303dlh_a_show_interrupt_configure,
+ lsm303dlh_a_store_interrupt_configure);
+
+static DEVICE_ATTR(interrupt_duration, S_IWUGO | S_IRUGO,
+ lsm303dlh_a_show_interrupt_duration,
+ lsm303dlh_a_store_interrupt_duration);
+
+static DEVICE_ATTR(interrupt_threshold, S_IWUGO | S_IRUGO,
+ lsm303dlh_a_show_interrupt_threshold,
+ lsm303dlh_a_store_interrupt_threshold);
+#endif
+
+static struct attribute *lsm303dlh_a_attributes[] = {
+ &dev_attr_id.attr,
+ &dev_attr_data.attr,
+ &dev_attr_range.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_rate.attr,
+ &dev_attr_sleep_wake.attr,
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ &dev_attr_interrupt_control.attr,
+ &dev_attr_interrupt_channel.attr,
+ &dev_attr_interrupt_configure.attr,
+ &dev_attr_interrupt_duration.attr,
+ &dev_attr_interrupt_threshold.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group lsm303dlh_a_attr_group = {
+ .attrs = lsm303dlh_a_attributes,
+};
+
+static int __devinit lsm303dlh_a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lsm303dlh_a_data *ddata = NULL;
+
+ ddata = kzalloc(sizeof(struct lsm303dlh_a_data), GFP_KERNEL);
+ if (ddata == NULL) {
+ ret = -ENOMEM;
+ goto err_op_failed;
+ }
+
+ ddata->client = client;
+ i2c_set_clientdata(client, ddata);
+
+ /* copy platform specific data */
+ memcpy(&ddata->pdata, client->dev.platform_data, sizeof(ddata->pdata));
+ ddata->mode = LSM303DLH_A_MODE_OFF;
+ ddata->rate = LSM303DLH_A_RATE_50;
+ ddata->range = LSM303DLH_A_RANGE_2G;
+ ddata->sleep_wake = LSM303DLH_A_SLEEPWAKE_DISABLE;
+ ddata->shift_adjust = SHIFT_ADJ_2G;
+ ddata->device_status = DEVICE_OFF;
+ dev_set_name(&client->dev, ddata->pdata.name_a);
+
+ ddata->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ ddata->regulator = NULL;
+ }
+
+ if (ddata->regulator) {
+ /*
+ * 0.83 milliamps typical with magnetic sensor setting ODR =
+ * 7.5 Hz, Accelerometer sensor ODR = 50 Hz. Double for
+ * safety.
+ */
+ regulator_set_optimum_mode(ddata->regulator, 830 * 2);
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+ }
+
+ ret = lsm303dlh_a_read(ddata, WHO_AM_I, "WHO_AM_I");
+ if (ret < 0)
+ goto exit_free_regulator;
+
+ dev_info(&client->dev, "3-Axis Accelerometer, ID : %d\n",
+ ret);
+ ddata->id = ret;
+
+ mutex_init(&ddata->lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &lsm303dlh_a_attr_group);
+ if (ret)
+ goto exit_free_regulator;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+
+ /* accelerometer has two interrupts channels
+ (thresholds,durations and sources)
+ and can support two input devices */
+
+ ddata->input_dev = input_allocate_device();
+ if (!ddata->input_dev) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ goto exit_free_regulator;
+ }
+
+ ddata->input_dev2 = input_allocate_device();
+ if (!ddata->input_dev2) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ goto err_input_alloc_failed;
+ }
+
+ set_bit(EV_ABS, ddata->input_dev->evbit);
+ set_bit(EV_ABS, ddata->input_dev2->evbit);
+
+ /* x-axis acceleration */
+ input_set_abs_params(ddata->input_dev, ABS_X, -32768, 32767, 0, 0);
+ input_set_abs_params(ddata->input_dev2, ABS_X, -32768, 32767, 0, 0);
+ /* y-axis acceleration */
+ input_set_abs_params(ddata->input_dev, ABS_Y, -32768, 32767, 0, 0);
+ input_set_abs_params(ddata->input_dev2, ABS_Y, -32768, 32767, 0, 0);
+ /* z-axis acceleration */
+ input_set_abs_params(ddata->input_dev, ABS_Z, -32768, 32767, 0, 0);
+ input_set_abs_params(ddata->input_dev2, ABS_Z, -32768, 32767, 0, 0);
+
+ ddata->input_dev->name = "accelerometer";
+ ddata->input_dev2->name = "motion";
+
+ ret = input_register_device(ddata->input_dev);
+ if (ret) {
+ dev_err(&client->dev, "Unable to register input device: %s\n",
+ ddata->input_dev->name);
+ goto err_input_register_failed;
+ }
+
+ ret = input_register_device(ddata->input_dev2);
+ if (ret) {
+ dev_err(&client->dev, "Unable to register input device: %s\n",
+ ddata->input_dev->name);
+ goto err_input_register_failed2;
+ }
+
+ /* Register interrupt */
+ ret = request_threaded_irq(gpio_to_irq(ddata->pdata.irq_a1), NULL,
+ lsm303dlh_a_gpio_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "lsm303dlh_a", ddata);
+ if (ret) {
+ dev_err(&client->dev, "request irq1 failed\n");
+ goto err_input_failed;
+ }
+
+ ret = request_threaded_irq(gpio_to_irq(ddata->pdata.irq_a2), NULL,
+ lsm303dlh_a_gpio_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "lsm303dlh_a", ddata);
+ if (ret) {
+ dev_err(&client->dev, "request irq2 failed\n");
+ goto err_input_failed;
+ }
+
+ /* only mode can enable it */
+ disable_irq(gpio_to_irq(ddata->pdata.irq_a1));
+ disable_irq(gpio_to_irq(ddata->pdata.irq_a2));
+
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = lsm303dlh_a_early_suspend;
+ ddata->early_suspend.resume = lsm303dlh_a_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ if (ddata->device_status == DEVICE_ON && ddata->regulator) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ return ret;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+err_input_failed:
+ input_unregister_device(ddata->input_dev2);
+err_input_register_failed2:
+ input_unregister_device(ddata->input_dev);
+err_input_register_failed:
+ input_free_device(ddata->input_dev2);
+err_input_alloc_failed:
+ input_free_device(ddata->input_dev);
+#endif
+exit_free_regulator:
+ if (ddata->device_status == DEVICE_ON && ddata->regulator) {
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+err_op_failed:
+ kfree(ddata);
+ dev_err(&client->dev, "probe function fails %x", ret);
+ return ret;
+}
+
+static int __devexit lsm303dlh_a_remove(struct i2c_client *client)
+{
+ int ret;
+ struct lsm303dlh_a_data *ddata;
+
+ ddata = i2c_get_clientdata(client);
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ input_unregister_device(ddata->input_dev);
+ input_unregister_device(ddata->input_dev2);
+ input_free_device(ddata->input_dev);
+ input_free_device(ddata->input_dev2);
+#endif
+ sysfs_remove_group(&client->dev.kobj, &lsm303dlh_a_attr_group);
+
+ /* safer to make device off */
+ if (ddata->mode != LSM303DLH_A_MODE_OFF) {
+ ret = lsm303dlh_a_write(ddata, CTRL_REG1, 0, "CONTROL");
+
+ if (ret < 0) {
+ dev_err(&client->dev, "could not turn off the device %d", ret);
+ return ret;
+ }
+
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(ddata);
+
+ return 0;
+}
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int lsm303dlh_a_suspend(struct device *dev)
+{
+ struct lsm303dlh_a_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = lsm303dlh_a_do_suspend(ddata);
+
+ return ret;
+}
+
+static int lsm303dlh_a_resume(struct device *dev)
+{
+ struct lsm303dlh_a_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = lsm303dlh_a_restore(ddata);
+
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device");
+
+ return ret;
+}
+static const struct dev_pm_ops lsm303dlh_a_dev_pm_ops = {
+ .suspend = lsm303dlh_a_suspend,
+ .resume = lsm303dlh_a_resume,
+};
+#endif
+#else
+static void lsm303dlh_a_early_suspend(struct early_suspend *data)
+{
+ struct lsm303dlh_a_data *ddata =
+ container_of(data, struct lsm303dlh_a_data, early_suspend);
+ int ret;
+
+ ret = lsm303dlh_a_do_suspend(ddata);
+}
+
+static void lsm303dlh_a_late_resume(struct early_suspend *data)
+{
+ struct lsm303dlh_a_data *ddata =
+ container_of(data, struct lsm303dlh_a_data, early_suspend);
+ int ret;
+
+ ret = lsm303dlh_a_restore(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "lsm303dlh_a late resume failed\n");
+}
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id lsm303dlh_a_id[] = {
+ { "lsm303dlh_a", 0 },
+ { },
+};
+
+static struct i2c_driver lsm303dlh_a_driver = {
+ .probe = lsm303dlh_a_probe,
+ .remove = lsm303dlh_a_remove,
+ .id_table = lsm303dlh_a_id,
+ .driver = {
+ .name = "lsm303dlh_a",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+ .pm = &lsm303dlh_a_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init lsm303dlh_a_init(void)
+{
+ return i2c_add_driver(&lsm303dlh_a_driver);
+}
+
+static void __exit lsm303dlh_a_exit(void)
+{
+ i2c_del_driver(&lsm303dlh_a_driver);
+}
+
+module_init(lsm303dlh_a_init)
+module_exit(lsm303dlh_a_exit)
+
+MODULE_DESCRIPTION("lSM303DLH 3-Axis Accelerometer Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("STMicroelectronics");
diff --git a/drivers/hwmon/lsm303dlh_m.c b/drivers/hwmon/lsm303dlh_m.c
new file mode 100644
index 00000000000..11815e3470f
--- /dev/null
+++ b/drivers/hwmon/lsm303dlh_m.c
@@ -0,0 +1,924 @@
+/*
+ * lsm303dlh_m.c
+ * ST 3-Axis Magnetometer Driver
+ *
+ * Copyright (C) 2010 STMicroelectronics
+ * Author: Carmine Iascone (carmine.iascone@st.com)
+ * Author: Matteo Dameno (matteo.dameno@st.com)
+ *
+ * Copyright (C) 2010 STEricsson
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * Updated:Preetham Rao Kaskurthi <preetham.rao@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <mach/gpio.h>
+#endif
+
+#include <linux/lsm303dlh.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/kernel.h>
+
+/* lsm303dlh magnetometer registers */
+#define IRA_REG_M 0x0A
+
+/* Magnetometer registers */
+#define CRA_REG_M 0x00 /* Configuration register A */
+#define CRB_REG_M 0x01 /* Configuration register B */
+#define MR_REG_M 0x02 /* Mode register */
+#define SR_REG_M 0x09 /* Status register */
+
+/* Output register start address*/
+#define OUT_X_M 0x03
+#define OUT_Y_M 0x05
+#define OUT_Z_M 0x07
+
+/* Magnetometer X-Y gain */
+#define XY_GAIN_1_3 1055 /* XY gain at 1.3G */
+#define XY_GAIN_1_9 795 /* XY gain at 1.9G */
+#define XY_GAIN_2_5 635 /* XY gain at 2.5G */
+#define XY_GAIN_4_0 430 /* XY gain at 4.0G */
+#define XY_GAIN_4_7 375 /* XY gain at 4.7G */
+#define XY_GAIN_5_6 320 /* XY gain at 5.6G */
+#define XY_GAIN_8_1 230 /* XY gain at 8.1G */
+
+/* Magnetometer Z gain */
+#define Z_GAIN_1_3 950 /* Z gain at 1.3G */
+#define Z_GAIN_1_9 710 /* Z gain at 1.9G */
+#define Z_GAIN_2_5 570 /* Z gain at 2.5G */
+#define Z_GAIN_4_0 385 /* Z gain at 4.0G */
+#define Z_GAIN_4_7 335 /* Z gain at 4.7G */
+#define Z_GAIN_5_6 285 /* Z gain at 5.6G */
+#define Z_GAIN_8_1 205 /* Z gain at 8.1G */
+
+/* Control A regsiter. */
+#define LSM303DLH_M_CRA_DO_BIT 2
+#define LSM303DLH_M_CRA_DO_MASK (0x7 << LSM303DLH_M_CRA_DO_BIT)
+#define LSM303DLH_M_CRA_MS_BIT 0
+#define LSM303DLH_M_CRA_MS_MASK (0x3 << LSM303DLH_M_CRA_MS_BIT)
+
+/* Control B regsiter. */
+#define LSM303DLH_M_CRB_GN_BIT 5
+#define LSM303DLH_M_CRB_GN_MASK (0x7 << LSM303DLH_M_CRB_GN_BIT)
+
+/* Control Mode regsiter. */
+#define LSM303DLH_M_MR_MD_BIT 0
+#define LSM303DLH_M_MR_MD_MASK (0x3 << LSM303DLH_M_MR_MD_BIT)
+
+/* Control Status regsiter. */
+#define LSM303DLH_M_SR_RDY_BIT 0
+#define LSM303DLH_M_SR_RDY_MASK (0x1 << LSM303DLH_M_SR_RDY_BIT)
+#define LSM303DLH_M_SR_LOC_BIT 1
+#define LSM303DLH_M_SR_LCO_MASK (0x1 << LSM303DLH_M_SR_LOC_BIT)
+#define LSM303DLH_M_SR_REN_BIT 2
+#define LSM303DLH_M_SR_REN_MASK (0x1 << LSM303DLH_M_SR_REN_BIT)
+
+/* Magnetometer gain setting */
+#define LSM303DLH_M_RANGE_1_3G 0x01
+#define LSM303DLH_M_RANGE_1_9G 0x02
+#define LSM303DLH_M_RANGE_2_5G 0x03
+#define LSM303DLH_M_RANGE_4_0G 0x04
+#define LSM303DLH_M_RANGE_4_7G 0x05
+#define LSM303DLH_M_RANGE_5_6G 0x06
+#define LSM303DLH_M_RANGE_8_1G 0x07
+
+/* Magnetometer capturing mode */
+#define LSM303DLH_M_MODE_CONTINUOUS 0
+#define LSM303DLH_M_MODE_SINGLE 1
+#define LSM303DLH_M_MODE_SLEEP 3
+
+/* Magnetometer output data rate */
+#define LSM303DLH_M_RATE_00_75 0x00
+#define LSM303DLH_M_RATE_01_50 0x01
+#define LSM303DLH_M_RATE_03_00 0x02
+#define LSM303DLH_M_RATE_07_50 0x03
+#define LSM303DLH_M_RATE_15_00 0x04
+#define LSM303DLH_M_RATE_30_00 0x05
+#define LSM303DLH_M_RATE_75_00 0x06
+
+#ifdef CONFIG_SENSORS_LSM303DLHC
+#define LSM303DLH_M_RATE_220_00 0x07
+#endif
+
+/* Multiple byte transfer enable */
+#define MULTIPLE_I2C_TR 0x80
+
+/* device status defines */
+#define DEVICE_OFF 0
+#define DEVICE_ON 1
+#define DEVICE_SUSPENDED 2
+
+/* device CHIP ID defines */
+#define LSM303DLHC_CHIP_ID 51
+
+/**
+ * struct lsm303dlh_m_data - data structure used by lsm303dlh_m driver
+ * @client: i2c client
+ * @lock: mutex lock for sysfs operations
+ * @input_dev: input device
+ * @regulator: regulator
+ * @pdata: lsm303dlh platform data
+ * @gain: x, y and z axes gain
+ * @data: Magnetic field values of x, y and z axes
+ * @mode: current mode of operation
+ * @rate: current sampling rate
+ * @range: current range value of magnetometer
+ * @early_suspend: early suspend structure
+ * @device_status: device is ON, OFF or SUSPENDED
+ */
+struct lsm303dlh_m_data {
+ struct i2c_client *client;
+ /* lock for sysfs operations */
+ struct mutex lock;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ struct input_dev *input_dev;
+#endif
+ struct regulator *regulator;
+ struct lsm303dlh_platform_data pdata;
+
+ short gain[3];
+ short data[3];
+ unsigned char mode;
+ unsigned char rate;
+ unsigned char range;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ int device_status;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void lsm303dlh_m_early_suspend(struct early_suspend *data);
+static void lsm303dlh_m_late_resume(struct early_suspend *data);
+#endif
+
+static int lsm303dlh_m_set_mode(struct lsm303dlh_m_data *ddata,
+ unsigned char mode);
+static int lsm303dlh_m_write(struct lsm303dlh_m_data *ddata,
+ u8 reg, u8 val, char *msg)
+{
+ int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int lsm303dlh_m_do_suspend(struct lsm303dlh_m_data *ddata)
+{
+ int ret;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_M_MODE_SLEEP) {
+ mutex_unlock(&ddata->lock);
+ return 0;
+ }
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ disable_irq(gpio_to_irq(ddata->pdata.irq_m));
+#endif
+
+ ret = lsm303dlh_m_set_mode(ddata, LSM303DLH_M_MODE_SLEEP);
+
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+
+ ddata->device_status = DEVICE_SUSPENDED;
+
+ mutex_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static int lsm303dlh_m_restore(struct lsm303dlh_m_data *ddata)
+{
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->device_status == DEVICE_ON) {
+ mutex_unlock(&ddata->lock);
+ return 0;
+ }
+
+ /* in correct mode, no need to change it */
+ if (ddata->mode == LSM303DLH_M_MODE_SLEEP) {
+ ddata->device_status = DEVICE_OFF;
+ mutex_unlock(&ddata->lock);
+ return 0;
+ } else
+ ddata->device_status = DEVICE_ON;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ enable_irq(gpio_to_irq(ddata->pdata.irq_m));
+#endif
+
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ ret = lsm303dlh_m_write(ddata, CRB_REG_M, ddata->range, "SET RANGE");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_m_write(ddata, CRA_REG_M, ddata->rate, "SET RATE");
+
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlh_m_set_mode(ddata, ddata->mode);
+
+ if (ret < 0)
+ goto fail;
+
+fail:
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+static int lsm303dlh_m_read_multi(struct lsm303dlh_m_data *ddata, u8 reg,
+ u8 count, u8 *val, char *msg)
+{
+ int ret = i2c_smbus_read_i2c_block_data(ddata->client,
+ reg | MULTIPLE_I2C_TR, count, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_i2c_block_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static ssize_t lsm303dlh_m_show_rate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->rate >> LSM303DLH_M_CRA_DO_BIT);
+}
+
+/* set lsm303dlh magnetometer bandwidth */
+static ssize_t lsm303dlh_m_store_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+ unsigned long val;
+ unsigned char data;
+ int error;
+
+ error = strict_strtoul(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+ if (ddata->mode == LSM303DLH_M_MODE_SLEEP) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ data = ((val << LSM303DLH_M_CRA_DO_BIT) & LSM303DLH_M_CRA_DO_MASK);
+ ddata->rate = data;
+
+ error = lsm303dlh_m_write(ddata, CRA_REG_M, data, "SET RATE");
+
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static int lsm303dlh_m_xyz_read(struct lsm303dlh_m_data *ddata)
+{
+ unsigned char xyz_data[6];
+ short temp;
+ int ret = lsm303dlh_m_read_multi(ddata, OUT_X_M,
+ 6, xyz_data, "OUT_X_M");
+ if (ret < 0)
+ return -EINVAL;
+
+ /* MSB is at lower address */
+ ddata->data[0] = (short)
+ (((xyz_data[0]) << 8) | xyz_data[1]);
+ ddata->data[1] = (short)
+ (((xyz_data[2]) << 8) | xyz_data[3]);
+ ddata->data[2] = (short)
+ (((xyz_data[4]) << 8) | xyz_data[5]);
+
+ /* check if chip is DHLC */
+ if (ddata->pdata.chip_id == LSM303DLHC_CHIP_ID) {
+ /*
+ * the out registers are in x, z and y order
+ * so swap y and z values
+ */
+ temp = ddata->data[1];
+ ddata->data[1] = ddata->data[2];
+ ddata->data[2] = temp;
+ }
+ /* taking orientation of x,y,z axis into account*/
+
+ ddata->data[ddata->pdata.axis_map_x] = ddata->pdata.negative_x ?
+ -ddata->data[ddata->pdata.axis_map_x] :
+ ddata->data[ddata->pdata.axis_map_x];
+ ddata->data[ddata->pdata.axis_map_y] = ddata->pdata.negative_y ?
+ -ddata->data[ddata->pdata.axis_map_y] :
+ ddata->data[ddata->pdata.axis_map_y];
+ ddata->data[ddata->pdata.axis_map_z] = ddata->pdata.negative_z ?
+ -ddata->data[ddata->pdata.axis_map_z] :
+ ddata->data[ddata->pdata.axis_map_z];
+
+ return ret;
+}
+
+static ssize_t lsm303dlh_m_gain(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%8x:%8x:%8x\n",
+ ddata->gain[ddata->pdata.axis_map_x],
+ ddata->gain[ddata->pdata.axis_map_y],
+ ddata->gain[ddata->pdata.axis_map_z]);
+}
+
+static ssize_t lsm303dlh_m_values(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_M_MODE_SLEEP ||
+ ddata->device_status == DEVICE_SUSPENDED) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ ret = lsm303dlh_m_xyz_read(ddata);
+
+ if (ret < 0) {
+ mutex_unlock(&ddata->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ /* taking orientation of x,y,z axis into account*/
+
+ return sprintf(buf, "%8x:%8x:%8x\n",
+ ddata->data[ddata->pdata.axis_map_x],
+ ddata->data[ddata->pdata.axis_map_y],
+ ddata->data[ddata->pdata.axis_map_z]);
+}
+
+static int lsm303dlh_m_set_mode(struct lsm303dlh_m_data *ddata,
+ unsigned char mode)
+{
+ int ret;
+
+ mode = (mode << LSM303DLH_M_MR_MD_BIT);
+
+ ret = i2c_smbus_write_byte_data(ddata->client, MR_REG_M, mode);
+
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d\
+ Register (%s)\n", ret, "MODE CONTROL");
+
+ return ret;
+}
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+
+static irqreturn_t lsm303dlh_m_gpio_irq(int irq, void *device_data)
+{
+ struct lsm303dlh_m_data *ddata = device_data;
+ int ret;
+
+ ret = lsm303dlh_m_xyz_read(ddata);
+
+ if (ret < 0) {
+ dev_err(&ddata->client->dev,
+ "reading data of xyz failed error %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* taking orientation of x,y,z axis into account*/
+
+ input_report_abs(ddata->input_dev, ABS_X,
+ ddata->data[ddata->pdata.axis_map_x]);
+ input_report_abs(ddata->input_dev, ABS_Y,
+ ddata->data[ddata->pdata.axis_map_y]);
+ input_report_abs(ddata->input_dev, ABS_Z,
+ ddata->data[ddata->pdata.axis_map_z]);
+ input_sync(ddata->input_dev);
+
+ return IRQ_HANDLED;
+
+}
+#endif
+
+static ssize_t lsm303dlh_m_show_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->range >> LSM303DLH_M_CRB_GN_BIT);
+}
+
+static ssize_t lsm303dlh_m_store_range(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+ short xy_gain;
+ short z_gain;
+ unsigned long range;
+ int error;
+
+ error = strict_strtoul(buf, 0, &range);
+
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLH_M_MODE_SLEEP) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ switch (range) {
+ case LSM303DLH_M_RANGE_1_3G:
+ xy_gain = XY_GAIN_1_3;
+ z_gain = Z_GAIN_1_3;
+ break;
+ case LSM303DLH_M_RANGE_1_9G:
+ xy_gain = XY_GAIN_1_9;
+ z_gain = Z_GAIN_1_9;
+ break;
+ case LSM303DLH_M_RANGE_2_5G:
+ xy_gain = XY_GAIN_2_5;
+ z_gain = Z_GAIN_2_5;
+ break;
+ case LSM303DLH_M_RANGE_4_0G:
+ xy_gain = XY_GAIN_4_0;
+ z_gain = Z_GAIN_4_0;
+ break;
+ case LSM303DLH_M_RANGE_4_7G:
+ xy_gain = XY_GAIN_4_7;
+ z_gain = Z_GAIN_4_7;
+ break;
+ case LSM303DLH_M_RANGE_5_6G:
+ xy_gain = XY_GAIN_5_6;
+ z_gain = Z_GAIN_5_6;
+ break;
+ case LSM303DLH_M_RANGE_8_1G:
+ xy_gain = XY_GAIN_8_1;
+ z_gain = Z_GAIN_8_1;
+ break;
+ default:
+ mutex_unlock(&ddata->lock);
+ return -EINVAL;
+ }
+
+ ddata->gain[ddata->pdata.axis_map_x] = xy_gain;
+ ddata->gain[ddata->pdata.axis_map_y] = xy_gain;
+ ddata->gain[ddata->pdata.axis_map_z] = z_gain;
+
+ range <<= LSM303DLH_M_CRB_GN_BIT;
+ range &= LSM303DLH_M_CRB_GN_MASK;
+
+ ddata->range = range;
+
+ error = lsm303dlh_m_write(ddata, CRB_REG_M, range, "SET RANGE");
+ mutex_unlock(&ddata->lock);
+
+ if (error < 0)
+ return error;
+
+ return count;
+}
+
+static ssize_t lsm303dlh_m_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->mode);
+}
+
+static ssize_t lsm303dlh_m_store_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlh_m_data *ddata = platform_get_drvdata(pdev);
+ unsigned long mode;
+ int error;
+
+ error = strict_strtoul(buf, 0, &mode);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->device_status == DEVICE_SUSPENDED &&
+ mode == LSM303DLH_M_MODE_SLEEP) {
+ ddata->mode = (mode >> LSM303DLH_M_MR_MD_BIT);
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* if same mode as existing, return */
+ if (ddata->mode == mode) {
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* turn on the supplies if already off */
+ if (ddata->mode == LSM303DLH_M_MODE_SLEEP && ddata->regulator
+ && (ddata->device_status == DEVICE_OFF
+ || ddata->device_status == DEVICE_SUSPENDED)) {
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ enable_irq(gpio_to_irq(ddata->pdata.irq_m));
+#endif
+ }
+
+ error = lsm303dlh_m_set_mode(ddata, mode);
+
+ ddata->mode = (mode >> LSM303DLH_M_MR_MD_BIT);
+ if (error < 0) {
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ if (mode == LSM303DLH_M_MODE_SLEEP) {
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ disable_irq(gpio_to_irq(ddata->pdata.irq_m));
+#endif
+
+ /*
+ * No need to store context here, it is not like
+ * suspend/resume but fall back to default values
+ */
+ ddata->rate = LSM303DLH_M_RATE_00_75;
+ ddata->range = LSM303DLH_M_RANGE_1_3G;
+ ddata->range <<= LSM303DLH_M_CRB_GN_BIT;
+ ddata->range &= LSM303DLH_M_CRB_GN_MASK;
+ ddata->gain[ddata->pdata.axis_map_x] = XY_GAIN_1_3;
+ ddata->gain[ddata->pdata.axis_map_y] = XY_GAIN_1_3;
+ ddata->gain[ddata->pdata.axis_map_z] = Z_GAIN_1_3;
+
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(gain, S_IRUGO, lsm303dlh_m_gain, NULL);
+
+static DEVICE_ATTR(data, S_IRUGO, lsm303dlh_m_values, NULL);
+
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
+ lsm303dlh_m_show_mode, lsm303dlh_m_store_mode);
+
+static DEVICE_ATTR(range, S_IWUSR | S_IRUGO,
+ lsm303dlh_m_show_range, lsm303dlh_m_store_range);
+
+static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO,
+ lsm303dlh_m_show_rate, lsm303dlh_m_store_rate);
+
+static struct attribute *lsm303dlh_m_attributes[] = {
+ &dev_attr_gain.attr,
+ &dev_attr_data.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_range.attr,
+ &dev_attr_rate.attr,
+ NULL
+};
+
+static const struct attribute_group lsm303dlh_m_attr_group = {
+ .attrs = lsm303dlh_m_attributes,
+};
+
+static int __devinit lsm303dlh_m_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lsm303dlh_m_data *ddata = NULL;
+ unsigned char version[3];
+
+ ddata = kzalloc(sizeof(struct lsm303dlh_m_data), GFP_KERNEL);
+ if (ddata == NULL) {
+ ret = -ENOMEM;
+ goto err_op_failed;
+ }
+
+ ddata->client = client;
+ i2c_set_clientdata(client, ddata);
+
+ /* copy platform specific data */
+ memcpy(&ddata->pdata, client->dev.platform_data, sizeof(ddata->pdata));
+
+ ddata->mode = LSM303DLH_M_MODE_SLEEP;
+ ddata->rate = LSM303DLH_M_RATE_00_75;
+ ddata->range = LSM303DLH_M_RANGE_1_3G;
+ ddata->range <<= LSM303DLH_M_CRB_GN_BIT;
+ ddata->range &= LSM303DLH_M_CRB_GN_MASK;
+ ddata->gain[ddata->pdata.axis_map_x] = XY_GAIN_1_3;
+ ddata->gain[ddata->pdata.axis_map_y] = XY_GAIN_1_3;
+ ddata->gain[ddata->pdata.axis_map_z] = Z_GAIN_1_3;
+ ddata->device_status = DEVICE_OFF;
+ dev_set_name(&client->dev, ddata->pdata.name_m);
+ ddata->regulator = regulator_get(&client->dev, "vdd");
+
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ ddata->regulator = NULL;
+ }
+
+ if (ddata->regulator) {
+ /*
+ * 0.83 milliamps typical with magnetic sensor setting ODR =
+ * 7.5 Hz, Accelerometer sensor ODR = 50 Hz. Double for
+ * safety.
+ */
+ regulator_set_optimum_mode(ddata->regulator, 830 * 2);
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+ }
+
+ ret = lsm303dlh_m_read_multi(ddata, IRA_REG_M, 3, version, "IRA_REG_M");
+ if (ret < 0)
+ goto exit_free_regulator;
+
+ dev_info(&client->dev, "Magnetometer, ID : %x:%x:%x",
+ version[0], version[1], version[2]);
+
+ mutex_init(&ddata->lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &lsm303dlh_m_attr_group);
+ if (ret)
+ goto exit_free_regulator;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+
+ ddata->input_dev = input_allocate_device();
+ if (!ddata->input_dev) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ goto exit_free_regulator;
+ }
+
+ set_bit(EV_ABS, ddata->input_dev->evbit);
+
+ /* x-axis acceleration */
+ input_set_abs_params(ddata->input_dev, ABS_X, -32768, 32767, 0, 0);
+ /* y-axis acceleration */
+ input_set_abs_params(ddata->input_dev, ABS_Y, -32768, 32767, 0, 0);
+ /* z-axis acceleration */
+ input_set_abs_params(ddata->input_dev, ABS_Z, -32768, 32767, 0, 0);
+
+ ddata->input_dev->name = "magnetometer";
+
+ ret = input_register_device(ddata->input_dev);
+ if (ret) {
+ dev_err(&client->dev, "Unable to register input device: %s\n",
+ ddata->input_dev->name);
+ goto err_input_register_failed;
+ }
+
+ /* register interrupt */
+ ret = request_threaded_irq(gpio_to_irq(ddata->pdata.irq_m), NULL,
+ lsm303dlh_m_gpio_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "lsm303dlh_m",
+ ddata);
+ if (ret) {
+ dev_err(&client->dev, "request irq EGPIO_PIN_1 failed\n");
+ goto err_input_failed;
+ }
+
+ disable_irq(gpio_to_irq(ddata->pdata.irq_m));
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = lsm303dlh_m_early_suspend;
+ ddata->early_suspend.resume = lsm303dlh_m_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ if (ddata->device_status == DEVICE_ON && ddata->regulator) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+
+ return ret;
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+err_input_failed:
+ input_unregister_device(ddata->input_dev);
+err_input_register_failed:
+ input_free_device(ddata->input_dev);
+#endif
+exit_free_regulator:
+ if (ddata->device_status == DEVICE_ON && ddata->regulator) {
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+err_op_failed:
+ dev_err(&client->dev, "lsm303dlh_m_probe failed %x", ret);
+ kfree(ddata);
+ return ret;
+}
+
+static int __devexit lsm303dlh_m_remove(struct i2c_client *client)
+{
+ struct lsm303dlh_m_data *ddata;
+
+ ddata = i2c_get_clientdata(client);
+
+#ifdef CONFIG_SENSORS_LSM303DLH_INPUT_DEVICE
+ input_unregister_device(ddata->input_dev);
+ input_free_device(ddata->input_dev);
+#endif
+
+ sysfs_remove_group(&client->dev.kobj, &lsm303dlh_m_attr_group);
+
+ /* safer to make device off */
+ if (ddata->mode != LSM303DLH_M_MODE_SLEEP) {
+ lsm303dlh_m_set_mode(ddata, LSM303DLH_M_MODE_SLEEP);
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ regulator_put(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(ddata);
+
+ return 0;
+}
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int lsm303dlh_m_suspend(struct device *dev)
+{
+ struct lsm303dlh_m_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = lsm303dlh_m_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device");
+
+ return ret;
+}
+
+static int lsm303dlh_m_resume(struct device *dev)
+{
+ struct lsm303dlh_m_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = lsm303dlh_m_restore(ddata);
+
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device");
+
+ return ret;
+}
+static const struct dev_pm_ops lsm303dlh_m_dev_pm_ops = {
+ .suspend = lsm303dlh_m_suspend,
+ .resume = lsm303dlh_m_resume,
+};
+#endif
+#else
+static void lsm303dlh_m_early_suspend(struct early_suspend *data)
+{
+ struct lsm303dlh_m_data *ddata =
+ container_of(data, struct lsm303dlh_m_data, early_suspend);
+ int ret;
+
+ ret = lsm303dlh_m_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device");
+}
+
+static void lsm303dlh_m_late_resume(struct early_suspend *data)
+{
+ struct lsm303dlh_m_data *ddata =
+ container_of(data, struct lsm303dlh_m_data, early_suspend);
+ int ret;
+
+ ret = lsm303dlh_m_restore(ddata);
+
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "lsm303dlh_m late resume failed\n");
+}
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id lsm303dlh_m_id[] = {
+ { "lsm303dlh_m", 0 },
+ { },
+};
+
+static struct i2c_driver lsm303dlh_m_driver = {
+ .probe = lsm303dlh_m_probe,
+ .remove = lsm303dlh_m_remove,
+ .id_table = lsm303dlh_m_id,
+ .driver = {
+ .name = "lsm303dlh_m",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+ .pm = &lsm303dlh_m_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init lsm303dlh_m_init(void)
+{
+ return i2c_add_driver(&lsm303dlh_m_driver);
+}
+
+static void __exit lsm303dlh_m_exit(void)
+{
+ i2c_del_driver(&lsm303dlh_m_driver);
+}
+
+module_init(lsm303dlh_m_init);
+module_exit(lsm303dlh_m_exit);
+
+MODULE_DESCRIPTION("lSM303DLH 3-Axis Magnetometer Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("STMicroelectronics");
diff --git a/drivers/hwmon/lsm303dlhc_a.c b/drivers/hwmon/lsm303dlhc_a.c
new file mode 100644
index 00000000000..f4012442b68
--- /dev/null
+++ b/drivers/hwmon/lsm303dlhc_a.c
@@ -0,0 +1,704 @@
+/*
+ * ST LSM303DLHC 3-Axis Accelerometer Driver
+ *
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Chethan Krishna N <chethan.krishna@stericsson.com> for ST-Ericsson
+ * Licence terms: GNU General Public Licence (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/lsm303dlh.h>
+#include <linux/regulator/consumer.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#define WHO_AM_I 0x0F
+
+/* lsm303dlhc accelerometer registers */
+#define CTRL_REG1 0x20
+#define CTRL_REG2 0x21
+#define CTRL_REG3 0x22
+#define CTRL_REG4 0x23
+#define CTRL_REG5 0x24
+#define CTRL_REG6 0x25
+
+/* lsm303dlhc accelerometer defines */
+#define LSM303DLHC_A_MODE_OFF 0x00
+#define LSM303DLHC_A_MODE_ON 0x04
+#define LSM303DLHC_A_MODE_MAX 0x09
+#define LSM303DLHC_A_CR1_MODE_BIT 4
+#define LSM303DLHC_A_CR1_MODE_MASK (0xF << LSM303DLHC_A_CR1_MODE_BIT)
+ #define LSM303DLHC_A_CR1_AXIS_ENABLE 7
+
+/* Range */
+#define LSM303DLHC_A_RANGE_2G 0x00
+#define LSM303DLHC_A_RANGE_4G 0x01
+#define LSM303DLHC_A_RANGE_8G 0x02
+#define LSM303DLHC_A_RANGE_16G 0x03
+#define LSM303DLHC_A_CR4_FS_BIT 4
+
+/* Sensitivity adjustment */
+#define SHIFT_ADJ_2G 4 /* 1/16*/
+#define SHIFT_ADJ_4G 3 /* 2/16*/
+#define SHIFT_ADJ_8G 2 /* ~3.9/16*/
+#define SHIFT_ADJ_16G 1 /* ~3.9/16*/
+
+#define AXISDATA_REG 0x28 /* axis data */
+
+/* lsm303dlh magnetometer registers */
+#define IRA_REG_M 0x0A
+
+/* multiple byte transfer enable */
+#define MULTIPLE_I2C_TR 0x80
+
+/* device status defines */
+#define DEVICE_OFF 0
+#define DEVICE_ON 1
+#define DEVICE_SUSPENDED 2
+
+struct lsm303dlhc_a_t {
+ short x;
+ short y;
+ short z;
+};
+
+/**
+ * struct lsm303dlhc_a_data - data structure used by lsm303dlhc_a driver
+ * @client: i2c client
+ * @lock: mutex lock for sysfs operations
+ * @data: lsm303dlhc_a_t struct containing x, y and z values
+ * @pdata: lsm303dlh platform data
+ * @regulator: regulator
+ * @range: current range value of accelerometer
+ * @mode: current mode of operation
+ * @rate: current sampling rate
+ * @shift_adjust: current shift adjust value set according to range
+ * @early_suspend: early suspend structure
+ * @device_status: device is ON, OFF or SUSPENDED
+ * @id: accelerometer device id
+ */
+struct lsm303dlhc_a_data {
+ struct i2c_client *client;
+ /* lock for sysfs operations */
+ struct mutex lock;
+ struct lsm303dlhc_a_t data;
+ struct lsm303dlh_platform_data pdata;
+ struct regulator *regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ unsigned char range;
+ unsigned char mode;
+ unsigned char rate;
+ int shift_adjust;
+ int device_status;
+ int id;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void lsm303dlhc_a_early_suspend(struct early_suspend *data);
+static void lsm303dlhc_a_late_resume(struct early_suspend *data);
+#endif
+
+static int lsm303dlhc_a_write(struct lsm303dlhc_a_data *ddata, u8 reg,
+ u8 val, char *msg)
+{
+ int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_write_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+static int lsm303dlhc_a_read(struct lsm303dlhc_a_data *ddata, u8 reg, char *msg)
+{
+ int ret = i2c_smbus_read_byte_data(ddata->client, reg);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register (%s)\n", ret, msg);
+ return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int lsm303dlhc_a_do_suspend(struct lsm303dlhc_a_data *ddata)
+{
+ int ret;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLHC_A_MODE_OFF) {
+ ret = 0;
+ goto exit;
+ }
+
+ ret = lsm303dlhc_a_write(ddata, CTRL_REG1,
+ LSM303DLHC_A_MODE_OFF, "CONTROL");
+
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+
+ ddata->device_status = DEVICE_SUSPENDED;
+
+exit:
+ mutex_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static int lsm303dlhc_a_restore(struct lsm303dlhc_a_data *ddata)
+{
+ unsigned char reg;
+ unsigned char shifted_mode = (ddata->mode << LSM303DLHC_A_CR1_MODE_BIT);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->device_status == DEVICE_ON) {
+ mutex_unlock(&ddata->lock);
+ return 0;
+ }
+
+ /* in correct mode, no need to change it */
+ if (ddata->mode == LSM303DLHC_A_MODE_OFF) {
+ ddata->device_status = DEVICE_OFF;
+ goto fail;
+ } else
+ ddata->device_status = DEVICE_ON;
+
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ /* BDU should be enabled by default/recommened */
+ reg = ddata->range;
+ shifted_mode |= LSM303DLHC_A_CR1_AXIS_ENABLE;
+
+ ret = lsm303dlhc_a_write(ddata, CTRL_REG1, shifted_mode,
+ "CTRL_REG1");
+ if (ret < 0)
+ goto fail;
+
+ ret = lsm303dlhc_a_write(ddata, CTRL_REG4, reg, "CTRL_REG4");
+
+ if (ret < 0)
+ goto fail;
+
+ /* write to the boot bit to reboot memory content */
+ ret = lsm303dlhc_a_write(ddata, CTRL_REG5, 0x80, "CTRL_REG5");
+
+ if (ret < 0)
+ goto fail;
+
+fail:
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "could not restore the device %d\n", ret);
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+static int lsm303dlhc_a_readdata(struct lsm303dlhc_a_data *ddata)
+{
+ unsigned char acc_data[6];
+ short data[3];
+
+ int ret = i2c_smbus_read_i2c_block_data(ddata->client,
+ AXISDATA_REG | MULTIPLE_I2C_TR, 6, acc_data);
+ if (ret < 0) {
+ dev_err(&ddata->client->dev,
+ "i2c_smbus_read_byte_data failed error %d\
+ Register AXISDATA_REG \n", ret);
+ return ret;
+ }
+
+ data[0] = (short) (((acc_data[1]) << 8) | acc_data[0]);
+ data[1] = (short) (((acc_data[3]) << 8) | acc_data[2]);
+ data[2] = (short) (((acc_data[5]) << 8) | acc_data[4]);
+
+ data[0] >>= ddata->shift_adjust;
+ data[1] >>= ddata->shift_adjust;
+ data[2] >>= ddata->shift_adjust;
+
+ /* taking position and orientation of x,y,z axis into account*/
+
+ data[ddata->pdata.axis_map_x] = ddata->pdata.negative_x ?
+ -data[ddata->pdata.axis_map_x] : data[ddata->pdata.axis_map_x];
+ data[ddata->pdata.axis_map_y] = ddata->pdata.negative_y ?
+ -data[ddata->pdata.axis_map_y] : data[ddata->pdata.axis_map_y];
+ data[ddata->pdata.axis_map_z] = ddata->pdata.negative_z ?
+ -data[ddata->pdata.axis_map_z] : data[ddata->pdata.axis_map_z];
+
+ ddata->data.x = data[ddata->pdata.axis_map_x];
+ ddata->data.y = data[ddata->pdata.axis_map_y];
+ ddata->data.z = data[ddata->pdata.axis_map_z];
+
+ return ret;
+}
+
+static ssize_t lsm303dlhc_a_show_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlhc_a_data *ddata = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLHC_A_MODE_OFF ||
+ ddata->device_status == DEVICE_SUSPENDED) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ ret = lsm303dlhc_a_readdata(ddata);
+
+ if (ret < 0) {
+ mutex_unlock(&ddata->lock);
+ return ret;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return sprintf(buf, "%8x:%8x:%8x\n", ddata->data.x, ddata->data.y,
+ ddata->data.z);
+}
+
+static ssize_t lsm303dlhc_a_show_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlhc_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->range >> LSM303DLHC_A_CR4_FS_BIT);
+}
+
+static ssize_t lsm303dlhc_a_store_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlhc_a_data *ddata = platform_get_drvdata(pdev);
+ long val;
+ int error;
+
+ error = strict_strtol(buf, 0, &val);
+ if (error)
+ return error;
+
+ if (val < LSM303DLHC_A_RANGE_2G || val > LSM303DLHC_A_RANGE_16G)
+ return -EINVAL;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->mode == LSM303DLHC_A_MODE_OFF) {
+ dev_info(&ddata->client->dev,
+ "device is switched off,make it ON using MODE");
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ ddata->range = val;
+ ddata->range <<= LSM303DLHC_A_CR4_FS_BIT;
+
+ error = lsm303dlhc_a_write(ddata, CTRL_REG4, ddata->range,
+ "CTRL_REG4");
+ if (error < 0) {
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ switch (val) {
+ case LSM303DLHC_A_RANGE_2G:
+ ddata->shift_adjust = SHIFT_ADJ_2G;
+ break;
+ case LSM303DLHC_A_RANGE_4G:
+ ddata->shift_adjust = SHIFT_ADJ_4G;
+ break;
+ case LSM303DLHC_A_RANGE_8G:
+ ddata->shift_adjust = SHIFT_ADJ_8G;
+ break;
+ case LSM303DLHC_A_RANGE_16G:
+ ddata->shift_adjust = SHIFT_ADJ_16G;
+ break;
+ default:
+ mutex_unlock(&ddata->lock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlhc_a_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlhc_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->mode);
+}
+
+static ssize_t lsm303dlhc_a_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlhc_a_data *ddata = platform_get_drvdata(pdev);
+ long val;
+ unsigned char data;
+ int error;
+ bool set_boot_bit = false;
+
+ error = strict_strtol(buf, 0, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&ddata->lock);
+
+ /* not in correct range */
+
+ if (val < LSM303DLHC_A_MODE_OFF || val > LSM303DLHC_A_MODE_MAX) {
+ mutex_unlock(&ddata->lock);
+ return -EINVAL;
+ }
+
+ if (ddata->device_status == DEVICE_SUSPENDED) {
+ if (val == LSM303DLHC_A_MODE_OFF) {
+ ddata->mode = val;
+ mutex_unlock(&ddata->lock);
+ return count;
+ } else {
+ /* device is turning on after suspend, reset memory */
+ set_boot_bit = true;
+ }
+ }
+
+ /* if same mode as existing, return */
+ if (ddata->mode == val) {
+ mutex_unlock(&ddata->lock);
+ return count;
+ }
+
+ /* turn on the supplies if already off */
+ if (ddata->regulator && ddata->mode == LSM303DLHC_A_MODE_OFF
+ && (ddata->device_status == DEVICE_OFF
+ || ddata->device_status == DEVICE_SUSPENDED)) {
+ regulator_enable(ddata->regulator);
+ ddata->device_status = DEVICE_ON;
+ }
+
+ data = lsm303dlhc_a_read(ddata, CTRL_REG1, "CTRL_REG1");
+
+ /*
+ * If chip doesn't get reset during suspend/resume,
+ * x,y and z axis bits are getting cleared,so set
+ * these bits to get x,y,z data.
+ */
+ data |= LSM303DLHC_A_CR1_AXIS_ENABLE;
+
+ data &= ~LSM303DLHC_A_CR1_MODE_MASK;
+
+ ddata->mode = val;
+
+ data |= ((val << LSM303DLHC_A_CR1_MODE_BIT)
+ & LSM303DLHC_A_CR1_MODE_MASK);
+
+ error = lsm303dlhc_a_write(ddata, CTRL_REG1, data, "CTRL_REG1");
+ if (error < 0) {
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+
+ /*
+ * Power on request when device is in suspended state
+ * write to the boot bit in CTRL_REG2 to reboot memory content
+ * and ensure correct device behavior after it resumes
+ */
+ if (set_boot_bit) {
+ error = lsm303dlhc_a_write(ddata, CTRL_REG5, 0x80, "CTRL_REG5");
+ if (error < 0) {
+ if (ddata->regulator &&
+ ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ mutex_unlock(&ddata->lock);
+ return error;
+ }
+ }
+
+ if (val == LSM303DLHC_A_MODE_OFF) {
+
+ /*
+ * No need to store context here
+ * it is not like suspend/resume
+ * but fall back to default values
+ */
+ ddata->range = LSM303DLHC_A_RANGE_2G;
+ ddata->shift_adjust = SHIFT_ADJ_2G;
+
+ if (ddata->regulator && ddata->device_status == DEVICE_ON) {
+ regulator_disable(ddata->regulator);
+ ddata->device_status = DEVICE_OFF;
+ }
+ }
+ mutex_unlock(&ddata->lock);
+
+ return count;
+}
+
+static ssize_t lsm303dlhc_a_show_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lsm303dlhc_a_data *ddata = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%d\n", ddata->id);
+}
+
+static DEVICE_ATTR(id, S_IRUGO, lsm303dlhc_a_show_id, NULL);
+
+static DEVICE_ATTR(data, S_IRUGO, lsm303dlhc_a_show_data, NULL);
+
+static DEVICE_ATTR(range, S_IWUSR | S_IRUGO,
+ lsm303dlhc_a_show_range, lsm303dlhc_a_store_range);
+
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
+ lsm303dlhc_a_show_mode, lsm303dlhc_a_store_mode);
+
+static struct attribute *lsm303dlhc_a_attributes[] = {
+ &dev_attr_data.attr,
+ &dev_attr_range.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_id.attr,
+ NULL
+};
+
+static const struct attribute_group lsm303dlhc_a_attr_group = {
+ .attrs = lsm303dlhc_a_attributes,
+};
+
+static int __devinit lsm303dlhc_a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lsm303dlhc_a_data *adata = NULL;
+
+ adata = kzalloc(sizeof(struct lsm303dlhc_a_data), GFP_KERNEL);
+ if (adata == NULL) {
+ ret = -ENOMEM;
+ goto err_op_failed;
+ }
+
+ adata->client = client;
+ i2c_set_clientdata(client, adata);
+
+ /* copy platform specific data */
+ memcpy(&adata->pdata, client->dev.platform_data, sizeof(adata->pdata));
+ adata->mode = LSM303DLHC_A_MODE_OFF;
+ adata->range = LSM303DLHC_A_RANGE_2G;
+ adata->shift_adjust = SHIFT_ADJ_2G;
+ adata->device_status = DEVICE_OFF;
+ dev_set_name(&client->dev, adata->pdata.name_a);
+
+ adata->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(adata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(adata->regulator);
+ adata->regulator = NULL;
+ }
+
+ if (adata->regulator) {
+ /*
+ * 130 microamps typical with magnetic sensor setting ODR = 7.5
+ * Hz, Accelerometer sensor ODR = 50 Hz. Double for safety.
+ */
+ regulator_set_optimum_mode(adata->regulator, 130 * 2);
+ regulator_enable(adata->regulator);
+ adata->device_status = DEVICE_ON;
+ }
+
+ ret = lsm303dlhc_a_read(adata, WHO_AM_I, "WHO_AM_I");
+ if (ret < 0)
+ goto exit_free_regulator;
+
+ dev_info(&client->dev, "3-Axis Accelerometer, ID : %d\n",
+ ret);
+ adata->id = ret;
+
+ mutex_init(&adata->lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &lsm303dlhc_a_attr_group);
+ if (ret)
+ goto exit_free_regulator;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ adata->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ adata->early_suspend.suspend = lsm303dlhc_a_early_suspend;
+ adata->early_suspend.resume = lsm303dlhc_a_late_resume;
+ register_early_suspend(&adata->early_suspend);
+#endif
+
+ if (adata->device_status == DEVICE_ON && adata->regulator) {
+ regulator_disable(adata->regulator);
+ adata->device_status = DEVICE_OFF;
+ }
+
+ return ret;
+
+exit_free_regulator:
+ if (adata->device_status == DEVICE_ON && adata->regulator) {
+ regulator_disable(adata->regulator);
+ regulator_put(adata->regulator);
+ adata->device_status = DEVICE_OFF;
+ }
+err_op_failed:
+ kfree(adata);
+ dev_err(&client->dev, "probe function fails %x", ret);
+ return ret;
+}
+
+static int __devexit lsm303dlhc_a_remove(struct i2c_client *client)
+{
+ int ret;
+ struct lsm303dlhc_a_data *adata;
+
+ adata = i2c_get_clientdata(client);
+ sysfs_remove_group(&client->dev.kobj, &lsm303dlhc_a_attr_group);
+
+ /* safer to make device off */
+ if (adata->mode != LSM303DLHC_A_MODE_OFF) {
+ ret = lsm303dlhc_a_write(adata, CTRL_REG1, 0, "CONTROL");
+
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "could not turn off the device %d",
+ ret);
+ return ret;
+ }
+
+ if (adata->regulator && adata->device_status == DEVICE_ON) {
+ regulator_disable(adata->regulator);
+ regulator_put(adata->regulator);
+ adata->device_status = DEVICE_OFF;
+ }
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(adata);
+
+ return 0;
+}
+
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+static int lsm303dlhc_a_suspend(struct device *dev)
+{
+ struct lsm303dlhc_a_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = lsm303dlhc_a_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device");
+
+ return ret;
+}
+
+static int lsm303dlhc_a_resume(struct device *dev)
+{
+ struct lsm303dlhc_a_data *ddata;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+
+ ret = lsm303dlhc_a_restore(ddata);
+
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device");
+
+ return ret;
+}
+static const struct dev_pm_ops lsm303dlhc_a_dev_pm_ops = {
+ .suspend = lsm303dlhc_a_suspend,
+ .resume = lsm303dlhc_a_resume,
+};
+#else
+static void lsm303dlhc_a_early_suspend(struct early_suspend *data)
+{
+ struct lsm303dlhc_a_data *ddata =
+ container_of(data, struct lsm303dlhc_a_data, early_suspend);
+ int ret;
+
+ ret = lsm303dlhc_a_do_suspend(ddata);
+}
+
+static void lsm303dlhc_a_late_resume(struct early_suspend *data)
+{
+ struct lsm303dlhc_a_data *ddata =
+ container_of(data, struct lsm303dlhc_a_data, early_suspend);
+ int ret;
+
+ ret = lsm303dlhc_a_restore(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "lsm303dlhc_a late resume failed\n");
+}
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id lsm303dlhc_a_id[] = {
+ { "lsm303dlhc_a", 0 },
+ { },
+};
+
+static struct i2c_driver lsm303dlhc_a_driver = {
+ .probe = lsm303dlhc_a_probe,
+ .remove = lsm303dlhc_a_remove,
+ .id_table = lsm303dlhc_a_id,
+ .driver = {
+ .name = "lsm303dlhc_a",
+ #if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+ .pm = &lsm303dlhc_a_dev_pm_ops,
+ #endif
+ },
+};
+
+static int __init lsm303dlhc_a_init(void)
+{
+ return i2c_add_driver(&lsm303dlhc_a_driver);
+}
+
+static void __exit lsm303dlhc_a_exit(void)
+{
+ i2c_del_driver(&lsm303dlhc_a_driver);
+}
+
+module_init(lsm303dlhc_a_init)
+module_exit(lsm303dlhc_a_exit)
+
+MODULE_DESCRIPTION("lSM303DLH 3-Axis Accelerometer Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("STMicroelectronics");
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5267ab93d55..9ddf2c97d26 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -431,7 +431,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n",
+ dev_err(&dev->pdev->dev, "Read from Slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -518,7 +518,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n",
+ dev_err(&dev->pdev->dev, "Write to slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -628,12 +628,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
dev->busy = true;
- if (dev->regulator)
- regulator_enable(dev->regulator);
pm_runtime_get_sync(&dev->pdev->dev);
- clk_enable(dev->clk);
-
status = init_hw(dev);
if (status)
goto out;
@@ -666,10 +662,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
}
out:
- clk_disable(dev->clk);
- pm_runtime_put_sync(&dev->pdev->dev);
- if (dev->regulator)
- regulator_disable(dev->regulator);
+
+ pm_runtime_put(&dev->pdev->dev);
dev->busy = false;
@@ -859,9 +853,9 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
#ifdef CONFIG_PM
-static int nmk_i2c_suspend(struct device *dev)
+
+static int nmk_i2c_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct platform_device *pdev = to_platform_device(dev);
struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
if (nmk_i2c->busy)
@@ -870,23 +864,53 @@ static int nmk_i2c_suspend(struct device *dev)
return 0;
}
-static int nmk_i2c_resume(struct device *dev)
+static int nmk_i2c_suspend_noirq(struct device *dev)
{
+ struct nmk_i2c_dev *nmk_i2c =
+ platform_get_drvdata(to_platform_device(dev));
+
+ if (nmk_i2c->busy)
+ return -EBUSY;
+
return 0;
}
+
#else
#define nmk_i2c_suspend NULL
-#define nmk_i2c_resume NULL
+#define nmk_i2c_suspend_noirq NULL
#endif
+static int nmk_i2c_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
+
+ clk_disable(nmk_i2c->clk);
+ if (nmk_i2c->regulator)
+ regulator_disable(nmk_i2c->regulator);
+ return 0;
+}
+
+static int nmk_i2c_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
+
+ if (nmk_i2c->regulator)
+ regulator_enable(nmk_i2c->regulator);
+ clk_enable(nmk_i2c->clk);
+ return 0;
+}
+
/*
* We use noirq so that we suspend late and resume before the wakeup interrupt
* to ensure that we do the !pm_runtime_suspended() check in resume before
* there has been a regular pm runtime resume (via pm_runtime_get_sync()).
*/
static const struct dev_pm_ops nmk_i2c_pm = {
- .suspend_noirq = nmk_i2c_suspend,
- .resume_noirq = nmk_i2c_resume,
+ SET_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, nmk_i2c_runtime_resume,
+ NULL)
+ .suspend_noirq = nmk_i2c_suspend_noirq,
};
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
@@ -1047,6 +1071,7 @@ static struct platform_driver nmk_i2c_driver = {
},
.probe = nmk_i2c_probe,
.remove = __devexit_p(nmk_i2c_remove),
+ .suspend = nmk_i2c_suspend,
};
static int __init nmk_i2c_init(void)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index cdc385b2cf7..1cfd730f212 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -151,6 +151,16 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
+config KEYBOARD_DB5500
+ tristate "DB5500 keyboard"
+ depends on UX500_SOC_DB5500
+ help
+ Say Y here to enable the on-chip keypad controller on the
+ ST-Ericsson U5500 platform.
+
+ To compile this driver as a module, choose M here: the
+ module will be called db5500_keypad.
+
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -381,7 +391,7 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
-config KEYBOARD_NOMADIK
+config KEYBOARD_NOMADIK_SKE
tristate "ST-Ericsson Nomadik SKE keyboard"
depends on PLAT_NOMADIK
help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index df7061f1291..90a01405e51 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
+obj-$(CONFIG_KEYBOARD_DB5500) += db5500_keypad.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
@@ -31,7 +32,7 @@ obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
+obj-$(CONFIG_KEYBOARD_NOMADIK_SKE) += nomadik-ske-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
diff --git a/drivers/input/keyboard/db5500_keypad.c b/drivers/input/keyboard/db5500_keypad.c
new file mode 100644
index 00000000000..729775d99e8
--- /dev/null
+++ b/drivers/input/keyboard/db5500_keypad.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License, version 2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <mach/db5500-keypad.h>
+#include <linux/regulator/consumer.h>
+
+#define KEYPAD_CTR 0x0
+#define KEYPAD_IRQ_CLEAR 0x4
+#define KEYPAD_INT_ENABLE 0x8
+#define KEYPAD_INT_STATUS 0xC
+#define KEYPAD_ARRAY_01 0x18
+
+#define KEYPAD_NUM_ARRAY_REGS 5
+
+#define KEYPAD_CTR_WRITE_IRQ_ENABLE (1 << 10)
+#define KEYPAD_CTR_WRITE_CONTROL (1 << 8)
+#define KEYPAD_CTR_SCAN_ENABLE (1 << 7)
+
+#define KEYPAD_ARRAY_CHANGEBIT (1 << 15)
+
+#define KEYPAD_DEBOUNCE_PERIOD_MIN 5 /* ms */
+#define KEYPAD_DEBOUNCE_PERIOD_MAX 80 /* ms */
+
+#define KEYPAD_GND_ROW 8
+
+#define KEYPAD_ROW_SHIFT 3
+#define KEYPAD_KEYMAP_SIZE \
+ (KEYPAD_MAX_ROWS * KEYPAD_MAX_COLS)
+
+#define KEY_PRESSED_DELAY 10
+/**
+ * struct db5500_keypad - data structure used by keypad driver
+ * @irq: irq number
+ * @base: keypad registers base address
+ * @input: pointer to input device object
+ * @board: keypad platform data
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ * @regulator : regulator used by keypad
+ * @switch_work : delayed work variable for switching to gpio
+ * @gpio_work : delayed work variable for reporting key event in gpio mode
+ * @previous_set: previous set of registers
+ * @enable : flag to enable the driver event
+ * @enable_on_resume: set if keypad should be enabled on resume
+ * @valid_key : hold the state of valid key press
+ * @db5500_rows : rows gpio array for db5500 keypad
+ * @db5500_cols : cols gpio array for db5500 keypad
+ * @gpio_input_irq : array for gpio irqs
+ * @gpio_row : gpio row
+ * @gpio_col : gpio_col
+ */
+struct db5500_keypad {
+ int irq;
+ void __iomem *base;
+ struct input_dev *input;
+ const struct db5500_keypad_platform_data *board;
+ unsigned short keymap[KEYPAD_KEYMAP_SIZE];
+ struct clk *clk;
+ struct regulator *regulator;
+ struct delayed_work switch_work;
+ struct delayed_work gpio_work;
+ u8 previous_set[KEYPAD_MAX_ROWS];
+ bool enable;
+ bool enable_on_resume;
+ bool valid_key;
+ int db5500_rows[KEYPAD_MAX_ROWS];
+ int db5500_cols[KEYPAD_MAX_COLS];
+ int gpio_input_irq[KEYPAD_MAX_ROWS];
+ int gpio_row;
+ int gpio_col;
+};
+
+/**
+ * db5500_keypad_report() - reports the keypad event
+ * @keypad: pointer to device structure
+ * @row: row value of keypad
+ * @curr: current event
+ * @previous: previous event
+ *
+ * This function uses to reports the event of the keypad
+ * and returns NONE.
+ *
+ * By default all column reads are 1111 1111b. Any press will pull the column
+ * down, leading to a 0 in any of these locations. We invert these values so
+ * that a 1 means means "column pressed". *
+ * If curr changes from the previous from 0 to 1, we report it as a key press.
+ * If curr changes from the previous from 1 to 0, we report it as a key
+ * release.
+ */
+static void db5500_keypad_report(struct db5500_keypad *keypad, int row,
+ u8 curr, u8 previous)
+{
+ struct input_dev *input = keypad->input;
+ u8 changed = curr ^ previous;
+
+ while (changed) {
+ int col = __ffs(changed);
+ bool press = curr & BIT(col);
+ int code = MATRIX_SCAN_CODE(row, col, KEYPAD_ROW_SHIFT);
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], press);
+ input_sync(input);
+
+ changed &= ~BIT(col);
+ }
+}
+
+static void db5500_keypad_scan(struct db5500_keypad *keypad)
+{
+ u8 current_set[ARRAY_SIZE(keypad->previous_set)];
+ int tries = 100;
+ bool changebit;
+ u32 data_reg;
+ u8 allrows;
+ u8 common;
+ int i;
+
+ writel(0x1, keypad->base + KEYPAD_IRQ_CLEAR);
+
+again:
+ if (!tries--) {
+ dev_warn(&keypad->input->dev, "values failed to stabilize\n");
+ return;
+ }
+
+ changebit = readl(keypad->base + KEYPAD_ARRAY_01)
+ & KEYPAD_ARRAY_CHANGEBIT;
+
+ for (i = 0; i < KEYPAD_NUM_ARRAY_REGS; i++) {
+ data_reg = readl(keypad->base + KEYPAD_ARRAY_01 + 4 * i);
+
+ /* If the change bit changed, we need to reread the data */
+ if (changebit != !!(data_reg & KEYPAD_ARRAY_CHANGEBIT))
+ goto again;
+
+ current_set[2 * i] = ~(data_reg & 0xff);
+
+ /* Last array reg has only one valid set of columns */
+ if (i != KEYPAD_NUM_ARRAY_REGS - 1)
+ current_set[2 * i + 1] = ~((data_reg & 0xff0000) >> 16);
+ }
+
+ allrows = current_set[KEYPAD_GND_ROW];
+
+ /*
+ * Sometimes during a GND row release, an incorrect report is received
+ * where the ARRAY8 all rows setting does not match the other ARRAY*
+ * rows. Ignore this report; the correct one has been observed to
+ * follow it.
+ */
+ common = 0xff;
+ for (i = 0; i < KEYPAD_GND_ROW; i++)
+ common &= current_set[i];
+
+ if ((allrows & common) != common)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(current_set); i++) {
+ /*
+ * If there is an allrows press (GND row), we need to ignore
+ * the allrows values from the reset of the ARRAYs.
+ */
+ if (i < KEYPAD_GND_ROW && allrows)
+ current_set[i] &= ~allrows;
+
+ if (keypad->previous_set[i] == current_set[i])
+ continue;
+
+ db5500_keypad_report(keypad, i, current_set[i],
+ keypad->previous_set[i]);
+ }
+
+ /* update the reference set of array registers */
+ memcpy(keypad->previous_set, current_set, sizeof(keypad->previous_set));
+
+ return;
+}
+
+/**
+ * db5500_keypad_writel() - write into keypad registers
+ * @keypad: pointer to device structure
+ * @val: value to write into register
+ * @reg: register offset
+ *
+ * This function uses to write into the keypad registers
+ * and returns NONE.
+ */
+static void db5500_keypad_writel(struct db5500_keypad *keypad, u32 val, u32 reg)
+{
+ int timeout = 4;
+ int allowedbit;
+
+ switch (reg) {
+ case KEYPAD_CTR:
+ allowedbit = KEYPAD_CTR_WRITE_CONTROL;
+ break;
+ case KEYPAD_INT_ENABLE:
+ allowedbit = KEYPAD_CTR_WRITE_IRQ_ENABLE;
+ break;
+ default:
+ BUG();
+ }
+
+ do {
+ u32 ctr = readl(keypad->base + KEYPAD_CTR);
+
+ if (ctr & allowedbit)
+ break;
+
+ udelay(50);
+ } while (--timeout);
+
+ /* Five 32k clk cycles (~150us) required, we waited 200us */
+ WARN_ON(!timeout);
+
+ writel(val, keypad->base + reg);
+}
+
+/**
+ * db5500_keypad_chip_init() - initialize the keypad chip
+ * @keypad: pointer to device structure
+ *
+ * This function uses to initializes the keypad controller
+ * and returns integer.
+ */
+static int db5500_keypad_chip_init(struct db5500_keypad *keypad)
+{
+ int debounce = keypad->board->debounce_ms;
+ int debounce_hits = 0;
+
+ if (debounce < KEYPAD_DEBOUNCE_PERIOD_MIN)
+ debounce = KEYPAD_DEBOUNCE_PERIOD_MIN;
+
+ if (debounce > KEYPAD_DEBOUNCE_PERIOD_MAX) {
+ debounce_hits = DIV_ROUND_UP(debounce,
+ KEYPAD_DEBOUNCE_PERIOD_MAX) - 1;
+ debounce = KEYPAD_DEBOUNCE_PERIOD_MAX;
+ }
+
+ /* Convert the milliseconds to the bit mask */
+ debounce = DIV_ROUND_UP(debounce, KEYPAD_DEBOUNCE_PERIOD_MIN) - 1;
+
+ clk_enable(keypad->clk);
+
+ db5500_keypad_writel(keypad,
+ KEYPAD_CTR_SCAN_ENABLE
+ | ((debounce_hits & 0x7) << 4)
+ | debounce,
+ KEYPAD_CTR);
+
+ db5500_keypad_writel(keypad, 0x1, KEYPAD_INT_ENABLE);
+
+ return 0;
+}
+
+static void db5500_mode_enable(struct db5500_keypad *keypad, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ db5500_keypad_writel(keypad, 0, KEYPAD_CTR);
+ db5500_keypad_writel(keypad, 0, KEYPAD_INT_ENABLE);
+ if (keypad->board->exit)
+ keypad->board->exit();
+ for (i = 0; i < keypad->board->krow; i++) {
+ enable_irq(keypad->gpio_input_irq[i]);
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ } else {
+ regulator_enable(keypad->regulator);
+ clk_enable(keypad->clk);
+ for (i = 0; i < keypad->board->krow; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ if (keypad->board->init)
+ keypad->board->init();
+ db5500_keypad_chip_init(keypad);
+ }
+}
+
+static void db5500_gpio_switch_work(struct work_struct *work)
+{
+ struct db5500_keypad *keypad = container_of(work,
+ struct db5500_keypad, switch_work.work);
+
+ db5500_mode_enable(keypad, false);
+ keypad->enable = false;
+}
+
+static void db5500_gpio_release_work(struct work_struct *work)
+{
+ int code;
+ struct db5500_keypad *keypad = container_of(work,
+ struct db5500_keypad, gpio_work.work);
+ struct input_dev *input = keypad->input;
+
+ code = MATRIX_SCAN_CODE(keypad->gpio_col, keypad->gpio_row,
+ KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], 1);
+ input_sync(input);
+ input_report_key(input, keypad->keymap[code], 0);
+ input_sync(input);
+}
+
+static int db5500_read_get_gpio_row(struct db5500_keypad *keypad)
+{
+ int row;
+ int value = 0;
+ int ret;
+
+ /* read all rows GPIO data register values */
+ for (row = 0; row < keypad->board->krow; row++) {
+ ret = gpio_get_value(keypad->db5500_rows[row]);
+ value += (1 << row) * ret;
+ }
+
+ /* get the exact row */
+ for (row = 0; row < keypad->board->krow; row++) {
+ if (((1 << row) & value) == 0)
+ return row;
+ }
+
+ return -1;
+}
+
+static void db5500_set_cols(struct db5500_keypad *keypad, int col)
+{
+ int i, ret;
+ int value;
+
+ /*
+ * Set all columns except the requested column
+ * output pin as high
+ */
+ for (i = 0; i < keypad->board->kcol; i++) {
+ if (i == col)
+ value = 0;
+ else
+ value = 1;
+ ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd");
+
+ if (ret < 0) {
+ pr_err("db5500_set_cols: gpio request failed\n");
+ continue;
+ }
+
+ gpio_direction_output(keypad->db5500_cols[i], value);
+ gpio_free(keypad->db5500_cols[i]);
+ }
+}
+
+static void db5500_free_cols(struct db5500_keypad *keypad)
+{
+ int i, ret;
+
+ for (i = 0; i < keypad->board->kcol; i++) {
+ ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd");
+
+ if (ret < 0) {
+ pr_err("db5500_free_cols: gpio request failed\n");
+ continue;
+ }
+
+ gpio_direction_output(keypad->db5500_cols[i], 0);
+ gpio_free(keypad->db5500_cols[i]);
+ }
+}
+
+static void db5500_manual_scan(struct db5500_keypad *keypad)
+{
+ int row;
+ int col;
+
+ keypad->valid_key = false;
+
+ for (col = 0; col < keypad->board->kcol; col++) {
+ db5500_set_cols(keypad, col);
+ row = db5500_read_get_gpio_row(keypad);
+ if (row >= 0) {
+ keypad->valid_key = true;
+ keypad->gpio_row = row;
+ keypad->gpio_col = col;
+ break;
+ }
+ }
+ db5500_free_cols(keypad);
+}
+
+static irqreturn_t db5500_keypad_gpio_irq(int irq, void *dev_id)
+{
+ struct db5500_keypad *keypad = dev_id;
+
+ if (!gpio_get_value(IRQ_TO_GPIO(irq))) {
+ db5500_manual_scan(keypad);
+ if (!keypad->enable) {
+ keypad->enable = true;
+ db5500_mode_enable(keypad, true);
+ }
+
+ /*
+ * Schedule the work queue to change it to
+ * report the key pressed, if it is not detected in keypad mode.
+ */
+ if (keypad->valid_key) {
+ schedule_delayed_work(&keypad->gpio_work,
+ KEY_PRESSED_DELAY);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t db5500_keypad_irq(int irq, void *dev_id)
+{
+ struct db5500_keypad *keypad = dev_id;
+
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ db5500_keypad_scan(keypad);
+
+ /*
+ * Schedule the work queue to change it to
+ * GPIO mode, if there is no activity in keypad mode
+ */
+ if (keypad->enable)
+ schedule_delayed_work(&keypad->switch_work,
+ keypad->board->switch_delay);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * db5500_keypad_probe() - Initialze the the keypad driver
+ * @pdev: pointer to platform device structure
+ *
+ * This function will allocate and initialize the instance
+ * data and request the irq and register to input subsystem driver.
+ */
+static int __devinit db5500_keypad_probe(struct platform_device *pdev)
+{
+ struct db5500_keypad_platform_data *plat;
+ struct db5500_keypad *keypad;
+ struct resource *res;
+ struct input_dev *input;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+ int irq;
+ int i;
+
+ plat = pdev->dev.platform_data;
+ if (!plat) {
+ dev_err(&pdev->dev, "invalid keypad platform data\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get keypad irq\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ ret = -EBUSY;
+ goto out_ret;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto out_freerequest_memregions;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to clk_get\n");
+ ret = PTR_ERR(clk);
+ goto out_iounmap;
+ }
+
+ keypad = kzalloc(sizeof(struct db5500_keypad), GFP_KERNEL);
+ if (!keypad) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ ret = -ENOMEM;
+ goto out_freeclk;
+ }
+
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&pdev->dev, "failed to input_allocate_device\n");
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+
+ keypad->regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(keypad->regulator)) {
+ dev_err(&pdev->dev, "regulator_get failed\n");
+ keypad->regulator = NULL;
+ ret = -EINVAL;
+ goto out_regulator_get;
+ } else {
+ ret = regulator_enable(keypad->regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "regulator_enable failed\n");
+ goto out_regulator_enable;
+ }
+ }
+
+ input->id.bustype = BUS_HOST;
+ input->name = "db5500-keypad";
+ input->dev.parent = &pdev->dev;
+
+ input->keycode = keypad->keymap;
+ input->keycodesize = sizeof(keypad->keymap[0]);
+ input->keycodemax = ARRAY_SIZE(keypad->keymap);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, input->evbit);
+ if (!plat->no_autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ matrix_keypad_build_keymap(plat->keymap_data, KEYPAD_ROW_SHIFT,
+ input->keycode, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ keypad->base = base;
+ keypad->clk = clk;
+
+ INIT_DELAYED_WORK(&keypad->switch_work, db5500_gpio_switch_work);
+ INIT_DELAYED_WORK(&keypad->gpio_work, db5500_gpio_release_work);
+
+ clk_enable(keypad->clk);
+if (!keypad->board->init) {
+ dev_err(&pdev->dev, "init funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->init() < 0) {
+ dev_err(&pdev->dev, "keyboard init config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (!keypad->board->exit) {
+ dev_err(&pdev->dev, "exit funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->exit() < 0) {
+ dev_err(&pdev->dev, "keyboard exit config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ keypad->db5500_rows[i] = *plat->gpio_input_pins;
+ keypad->gpio_input_irq[i] =
+ GPIO_TO_IRQ(keypad->db5500_rows[i]);
+ plat->gpio_input_pins++;
+ }
+
+ for (i = 0; i < keypad->board->kcol; i++) {
+ keypad->db5500_cols[i] = *plat->gpio_output_pins;
+ plat->gpio_output_pins++;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ ret = request_threaded_irq(keypad->gpio_input_irq[i],
+ NULL, db5500_keypad_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+ "db5500-keypad-gpio", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate gpio irq %d failed\n",
+ keypad->gpio_input_irq[i]);
+ goto out_unregisterinput;
+ }
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+
+ ret = request_threaded_irq(keypad->irq, NULL, db5500_keypad_irq,
+ IRQF_ONESHOT, "db5500-keypad", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
+ goto out_unregisterinput;
+ }
+
+ platform_set_drvdata(pdev, keypad);
+
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ return 0;
+
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
+ clk_disable(keypad->clk);
+out_freeinput:
+ input_free_device(input);
+out_regulator_enable:
+ regulator_put(keypad->regulator);
+out_regulator_get:
+ input_free_device(input);
+out_freekeypad:
+ kfree(keypad);
+out_freeclk:
+ clk_put(clk);
+out_iounmap:
+ iounmap(base);
+out_freerequest_memregions:
+ release_mem_region(res->start, resource_size(res));
+out_ret:
+ return ret;
+}
+
+/**
+ * db5500_keypad_remove() - Removes the keypad driver
+ * @pdev: pointer to platform device structure
+ *
+ * This function uses to remove the keypad
+ * driver and returns integer.
+ */
+static int __devexit db5500_keypad_remove(struct platform_device *pdev)
+{
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ free_irq(keypad->irq, keypad);
+ input_unregister_device(keypad->input);
+
+ clk_disable(keypad->clk);
+ clk_put(keypad->clk);
+
+ if (keypad->board->exit)
+ keypad->board->exit();
+
+ regulator_put(keypad->regulator);
+
+ iounmap(keypad->base);
+
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(keypad);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * db5500_keypad_suspend() - suspend the keypad controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to suspend the
+ * keypad controller and returns integer
+ */
+static int db5500_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(irq);
+ else {
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ disable_irq(irq);
+ keypad->enable_on_resume = keypad->enable;
+ if (keypad->enable) {
+ db5500_mode_enable(keypad, false);
+ keypad->enable = false;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * db5500_keypad_resume() - resume the keypad controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to resume the keypad
+ * controller and returns integer.
+ */
+static int db5500_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(irq);
+ else {
+ if (keypad->enable_on_resume && !keypad->enable) {
+ keypad->enable = true;
+ db5500_mode_enable(keypad, true);
+ /*
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity keypad mode
+ */
+ schedule_delayed_work(&keypad->switch_work,
+ keypad->board->switch_delay);
+ }
+ enable_irq(irq);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops db5500_keypad_dev_pm_ops = {
+ .suspend = db5500_keypad_suspend,
+ .resume = db5500_keypad_resume,
+};
+#endif
+
+static struct platform_driver db5500_keypad_driver = {
+ .driver = {
+ .name = "db5500-keypad",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &db5500_keypad_dev_pm_ops,
+#endif
+ },
+ .probe = db5500_keypad_probe,
+ .remove = __devexit_p(db5500_keypad_remove),
+};
+
+/**
+ * db5500_keypad_init() - Initialize the keypad driver
+ *
+ * This function uses to initializes the db5500
+ * keypad driver and returns integer.
+ */
+static int __init db5500_keypad_init(void)
+{
+ return platform_driver_register(&db5500_keypad_driver);
+}
+module_init(db5500_keypad_init);
+
+/**
+ * db5500_keypad_exit() - De-initialize the keypad driver
+ *
+ * This function uses to de-initialize the db5500
+ * keypad driver and returns none.
+ */
+static void __exit db5500_keypad_exit(void)
+{
+ platform_driver_unregister(&db5500_keypad_driver);
+}
+module_exit(db5500_keypad_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_DESCRIPTION("DB5500 Keypad Driver");
+MODULE_ALIAS("platform:db5500-keypad");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ed1ed469d08..a6d4ae1a23a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -28,6 +28,7 @@
#include <linux/gpio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
struct gpio_button_data {
struct gpio_keys_button *button;
@@ -42,6 +43,8 @@ struct gpio_keys_drvdata {
struct input_dev *input;
struct mutex disable_lock;
unsigned int n_buttons;
+ bool enabled;
+ bool enable_after_suspend;
int (*enable)(struct device *dev);
void (*disable)(struct device *dev);
struct gpio_button_data data[0];
@@ -437,6 +440,8 @@ static int gpio_keys_open(struct input_dev *input)
{
struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
+ pm_runtime_get_sync(input->dev.parent);
+ ddata->enabled = true;
return ddata->enable ? ddata->enable(input->dev.parent) : 0;
}
@@ -446,6 +451,8 @@ static void gpio_keys_close(struct input_dev *input)
if (ddata->disable)
ddata->disable(input->dev.parent);
+ ddata->enabled = false;
+ pm_runtime_put(input->dev.parent);
}
/*
@@ -578,6 +585,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
ddata->n_buttons = pdata->nbuttons;
ddata->enable = pdata->enable;
ddata->disable = pdata->disable;
+ ddata->enabled = false;
mutex_init(&ddata->disable_lock);
platform_set_drvdata(pdev, ddata);
@@ -594,6 +602,8 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ pm_runtime_enable(&pdev->dev);
+
/* Enable auto repeat feature of Linux input subsystem */
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
@@ -667,6 +677,8 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
struct input_dev *input = ddata->input;
int i;
+ pm_runtime_disable(&pdev->dev);
+
sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
device_init_wakeup(&pdev->dev, 0);
@@ -709,6 +721,10 @@ static int gpio_keys_suspend(struct device *dev)
enable_irq_wake(irq);
}
}
+ } else {
+ ddata->enable_after_suspend = ddata->enabled;
+ if (ddata->enabled && ddata->disable)
+ ddata->disable(dev);
}
return 0;
@@ -729,6 +745,11 @@ static int gpio_keys_resume(struct device *dev)
gpio_keys_report_event(&ddata->data[i]);
}
+
+ if (!device_may_wakeup(dev) && ddata->enable_after_suspend
+ && ddata->enable)
+ ddata->enable(dev);
+
input_sync(ddata->input);
return 0;
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index e35566aa102..f74775f6006 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -2,7 +2,7 @@
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * co-Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
*
* License terms:GNU General Public License (GPL) version 2
*
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -19,8 +20,10 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <plat/ske.h>
+#include <linux/gpio/nomadik.h>
/* SKE_CR bits */
#define SKE_KPMLT (0x1 << 6)
@@ -48,17 +51,38 @@
#define SKE_ASR3 0x2C
#define SKE_NUM_ASRX_REGISTERS (4)
+#define KEY_PRESSED_DELAY 10
+
+
+#define KEY_REPORTED 1
+#define KEY_PRESSED 2
/**
* struct ske_keypad - data structure used by keypad driver
- * @irq: irq no
- * @reg_base: ske regsiters base address
- * @input: pointer to input device object
- * @board: keypad platform device
- * @keymap: matrix scan code table for keycodes
- * @clk: clock structure pointer
+ * @dev: Pointer to the structure device
+ * @irq: irq no
+ * @reg_base: ske regsiters base address
+ * @input: pointer to input device object
+ * @board: keypad platform device
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ * @ske_keypad_lock: lock used while writting into registers
+ * @enable: flag to enable the driver event
+ * @enable_on_resume: set if keypad should be enabled on resume
+ * @regulator: pointer to the regulator used for ske kyepad
+ * @gpio_input_irq: array for gpio irqs
+ * @key_pressed: hold the key state
+ * @work: delayed work variable for gpio switch
+ * @ske_rows: rows gpio array for ske
+ * @ske_cols: columns gpio array for ske
+ * @gpio_row: gpio row
+ * @gpio_col: gpio column
+ * @gpio_work: delayed work variable for release gpio key
+ * @keys: matrix holding key status
+ * @scan_work: delayed work for scaning new key actions
*/
struct ske_keypad {
+ struct device *dev;
int irq;
void __iomem *reg_base;
struct input_dev *input;
@@ -66,6 +90,19 @@ struct ske_keypad {
unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
struct clk *clk;
spinlock_t ske_keypad_lock;
+ bool enable;
+ bool enable_on_resume;
+ struct regulator *regulator;
+ int gpio_input_irq[SKE_KPD_MAX_ROWS];
+ int key_pressed;
+ struct delayed_work work;
+ int ske_rows[SKE_KPD_MAX_ROWS];
+ int ske_cols[SKE_KPD_MAX_COLS];
+ int gpio_row;
+ int gpio_col;
+ struct delayed_work gpio_work;
+ u8 keys[SKE_KPD_MAX_ROWS][SKE_KPD_MAX_COLS];
+ struct delayed_work scan_work;
};
static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
@@ -83,15 +120,15 @@ static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
spin_unlock(&keypad->ske_keypad_lock);
}
-/*
+/**
* ske_keypad_chip_init: init keypad controller configuration
- *
+ * @keypad: pointer to device structure
* Enable Multi key press detection, auto scan mode
*/
static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
{
u32 value;
- int timeout = 50;
+ int timeout = keypad->board->debounce_ms;
/* check SKE_RIS to be 0 */
while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
@@ -100,7 +137,7 @@ static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
if (!timeout)
return -EINVAL;
- /*
+ /**
* set debounce value
* keypad dbounce is configured in DBCR[15:8]
* dbounce value in steps of 32/32.768 ms
@@ -115,7 +152,7 @@ static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
/* enable multi key detection */
ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
- /*
+ /**
* set up the number of columns
* KPCN[5:3] defines no. of keypad columns to be auto scanned
*/
@@ -134,14 +171,121 @@ static int __devinit ske_keypad_chip_init(struct ske_keypad *keypad)
return 0;
}
+static void ske_mode_enable(struct ske_keypad *keypad, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ dev_dbg(keypad->dev, "%s disable keypad\n", __func__);
+ writel(0, keypad->reg_base + SKE_CR);
+ if (keypad->board->exit)
+ keypad->board->exit();
+ for (i = 0; i < keypad->board->krow; i++) {
+ enable_irq(keypad->gpio_input_irq[i]);
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ } else {
+ dev_dbg(keypad->dev, "%s enable keypad\n", __func__);
+ regulator_enable(keypad->regulator);
+ clk_enable(keypad->clk);
+ for (i = 0; i < keypad->board->krow; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ if (keypad->board->init)
+ keypad->board->init();
+ ske_keypad_chip_init(keypad);
+ }
+}
+static void ske_enable(struct ske_keypad *keypad, bool enable)
+{
+ keypad->enable = enable;
+ if (keypad->enable) {
+ enable_irq(keypad->irq);
+ ske_mode_enable(keypad, true);
+ } else {
+ ske_mode_enable(keypad, false);
+ disable_irq(keypad->irq);
+ }
+}
+
+static ssize_t ske_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", keypad->enable);
+}
+
+static ssize_t ske_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (keypad->enable != val) {
+ keypad->enable = val ? true : false;
+ ske_enable(keypad, keypad->enable);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ ske_show_attr_enable, ske_store_attr_enable);
+
+static struct attribute *ske_keypad_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group ske_attr_group = {
+ .attrs = ske_keypad_attrs,
+};
+
+static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col)
+{
+ int row = 0, code, pos;
+ u32 ske_ris;
+ int num_of_rows;
+
+ /* find out the row */
+ num_of_rows = hweight8(status);
+ do {
+ pos = __ffs(status);
+ row = pos;
+ status &= ~(1 << pos);
+
+ code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
+ ske_ris = readl(keypad->reg_base + SKE_RIS);
+ keypad->key_pressed = ske_ris & SKE_KPRISA;
+
+ dev_dbg(keypad->dev,
+ "%s key_pressed:%d code:%d row:%d col:%d\n",
+ __func__, keypad->key_pressed, code, row, col);
+
+ if (keypad->key_pressed)
+ keypad->keys[row][col] |= KEY_PRESSED;
+
+ num_of_rows--;
+ } while (num_of_rows);
+}
+
static void ske_keypad_read_data(struct ske_keypad *keypad)
{
- struct input_dev *input = keypad->input;
- u16 status;
- int col = 0, row = 0, code;
- int ske_asr, ske_ris, key_pressed, i;
+ u8 status;
+ int col = 0;
+ int ske_asr, i;
- /*
+ /**
* Read the auto scan registers
*
* Each SKE_ASRx (x=0 to x=3) contains two row values.
@@ -153,59 +297,266 @@ static void ske_keypad_read_data(struct ske_keypad *keypad)
if (!ske_asr)
continue;
- /* now that ASRx is zero, find out the column x and row y*/
- if (ske_asr & 0xff) {
+ /* now that ASRx is zero, find out the coloumn x and row y */
+ status = ske_asr & 0xff;
+ if (status) {
col = i * 2;
- status = ske_asr & 0xff;
- } else {
+ ske_keypad_report(keypad, status, col);
+ }
+ status = (ske_asr & 0xff00) >> 8;
+ if (status) {
col = (i * 2) + 1;
- status = (ske_asr & 0xff00) >> 8;
+ ske_keypad_report(keypad, status, col);
}
+ }
+}
- /* find out the row */
- row = __ffs(status);
+static void ske_keypad_scan_work(struct work_struct *work)
+{
+ int timeout = 10;
+ int i, j, code;
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, scan_work.work);
+ struct input_dev *input = keypad->input;
- code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
- ske_ris = readl(keypad->reg_base + SKE_RIS);
- key_pressed = ske_ris & SKE_KPRISA;
+ /* Wait for autoscan to complete */
+ while (readl(keypad->reg_base + SKE_CR) & SKE_KPASON)
+ cpu_relax();
- input_event(input, EV_MSC, MSC_SCAN, code);
- input_report_key(input, keypad->keymap[code], key_pressed);
- input_sync(input);
+ /* SKEx registers are stable and can be read */
+ ske_keypad_read_data(keypad);
+
+ /* Check for key actions */
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ for (j = 0; j < SKE_KPD_MAX_COLS; j++) {
+ switch (keypad->keys[i][j]) {
+ case KEY_REPORTED:
+ /**
+ * Key was reported but is no longer pressed,
+ * report it as released.
+ */
+ code = MATRIX_SCAN_CODE(i, j,
+ SKE_KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code],
+ 0);
+ input_sync(input);
+ keypad->keys[i][j] = 0;
+ dev_dbg(keypad->dev,
+ "%s Key release reported, code:%d\n",
+ __func__, code);
+ break;
+ case KEY_PRESSED:
+ /* Key pressed but not yet reported, report */
+ code = MATRIX_SCAN_CODE(i, j,
+ SKE_KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code],
+ 1);
+ input_sync(input);
+ dev_dbg(keypad->dev,
+ "%s Key press reported, code:%d\n",
+ __func__, code);
+ /* Intentional fall though */
+ case (KEY_REPORTED | KEY_PRESSED):
+ /**
+ * Key pressed and reported, just reset
+ * KEY_PRESSED for next scan
+ */
+ keypad->keys[i][j] = KEY_REPORTED;
+ break;
+ }
+ }
}
+
+ if (keypad->key_pressed) {
+ /*
+ * Key still pressed, schedule work to poll changes in 100 ms
+ * After increasing the delay from 50 to 100 it is taking
+ * 2% to 3% load on average.
+ */
+ schedule_delayed_work(&keypad->scan_work,
+ msecs_to_jiffies(100));
+ } else {
+ /* For safty measure, clear interrupt once more */
+ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
+
+ /* Wait for raw interrupt to clear */
+ while ((readl(keypad->reg_base + SKE_RIS) & SKE_KPRISA) &&
+ --timeout) {
+ udelay(10);
+ }
+
+ if (!timeout)
+ dev_err(keypad->dev,
+ "%s Timeed out waiting on irq to clear\n",
+ __func__);
+
+ /* enable auto scan interrupts */
+ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+
+ /**
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity in SKE mode
+ */
+ if (!keypad->key_pressed && keypad->enable)
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+}
+
+static void ske_gpio_switch_work(struct work_struct *work)
+{
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, work.work);
+
+ ske_mode_enable(keypad, false);
+ keypad->enable = false;
}
+static void ske_gpio_release_work(struct work_struct *work)
+{
+ int code;
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, gpio_work.work);
+ struct input_dev *input = keypad->input;
+
+ code = MATRIX_SCAN_CODE(keypad->gpio_row, keypad->gpio_col,
+ SKE_KEYPAD_ROW_SHIFT);
+
+ dev_dbg(keypad->dev, "%s Key press reported, code:%d\n",
+ __func__, code);
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], 1);
+ input_sync(input);
+ input_report_key(input, keypad->keymap[code], 0);
+ input_sync(input);
+}
+
+static int ske_read_get_gpio_row(struct ske_keypad *keypad)
+{
+ int row;
+ int value = 0;
+ int ret;
+
+ /* read all rows GPIO data register values */
+ for (row = 0; row < SKE_KPD_MAX_ROWS ; row++) {
+ ret = gpio_get_value(keypad->ske_rows[row]);
+ value += (1 << row) * ret;
+ }
+
+ /* get the exact row */
+ for (row = 0; row < keypad->board->krow; row++) {
+ if (((1 << row) & value) == 0)
+ return row;
+ }
+
+ return -1;
+}
+
+static void ske_set_cols(struct ske_keypad *keypad, int col)
+{
+ int i ;
+ int value;
+
+ /**
+ * Set all columns except the requested column
+ * output pin as high
+ */
+ for (i = 0; i < SKE_KPD_MAX_COLS; i++) {
+ if (i == col)
+ value = 0;
+ else
+ value = 1;
+ gpio_request(keypad->ske_cols[i], "ske-kp");
+ gpio_direction_output(keypad->ske_cols[i], value);
+ gpio_free(keypad->ske_cols[i]);
+ }
+}
+
+static void ske_free_cols(struct ske_keypad *keypad)
+{
+ int i ;
+
+ for (i = 0; i < SKE_KPD_MAX_COLS; i++) {
+ gpio_request(keypad->ske_cols[i], "ske-kp");
+ gpio_direction_output(keypad->ske_cols[i], 0);
+ gpio_free(keypad->ske_cols[i]);
+ }
+}
+
+static void ske_manual_scan(struct ske_keypad *keypad)
+{
+ int row;
+ int col;
+
+ for (col = 0; col < keypad->board->kcol; col++) {
+ ske_set_cols(keypad, col);
+ row = ske_read_get_gpio_row(keypad);
+ if (row >= 0) {
+ keypad->key_pressed = 1;
+ keypad->gpio_row = row;
+ keypad->gpio_col = col;
+ break;
+ }
+ }
+ ske_free_cols(keypad);
+}
+
+static irqreturn_t ske_keypad_gpio_irq(int irq, void *dev_id)
+{
+ struct ske_keypad *keypad = dev_id;
+
+ if (!gpio_get_value(NOMADIK_IRQ_TO_GPIO(irq))) {
+ ske_manual_scan(keypad);
+ if (!keypad->enable) {
+ keypad->enable = true;
+ ske_mode_enable(keypad, true);
+ /**
+ * Schedule the work queue to change it back to GPIO
+ * mode if there is no activity in SKE mode
+ */
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+ /**
+ * Schedule delayed work to report key press if it is not
+ * detected in SKE mode.
+ */
+ if (keypad->key_pressed)
+ schedule_delayed_work(&keypad->gpio_work,
+ KEY_PRESSED_DELAY);
+ }
+
+ return IRQ_HANDLED;
+}
static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
{
struct ske_keypad *keypad = dev_id;
- int retries = 20;
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
/* disable auto scan interrupt; mask the interrupt generated */
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ ske_keypad_set_bits(keypad, SKE_IMSC, SKE_KPIMA, 0x0);
ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
- while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --retries)
- msleep(5);
-
- if (retries) {
- /* SKEx registers are stable and can be read */
- ske_keypad_read_data(keypad);
- }
-
- /* enable auto scan interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+ schedule_delayed_work(&keypad->scan_work, 0);
return IRQ_HANDLED;
}
static int __devinit ske_keypad_probe(struct platform_device *pdev)
{
- const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
struct ske_keypad *keypad;
+ struct resource *res = NULL;
struct input_dev *input;
- struct resource *res;
+ struct clk *clk;
+ void __iomem *reg_base;
+ int ret = 0;
int irq;
- int error;
+ int i;
+ struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
if (!plat) {
dev_err(&pdev->dev, "invalid keypad platform data\n");
@@ -219,42 +570,56 @@ static int __devinit ske_keypad_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ if (res == NULL) {
dev_err(&pdev->dev, "missing platform resources\n");
- return -EINVAL;
+ return -ENXIO;
}
- keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
- input = input_allocate_device();
- if (!keypad || !input) {
- dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ return -EBUSY;
}
- keypad->irq = irq;
- keypad->board = plat;
- keypad->input = input;
- spin_lock_init(&keypad->ske_keypad_lock);
+ reg_base = ioremap(res->start, resource_size(res));
+ if (!reg_base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto out_freerequest_memregions;
+ }
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_mem;
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to clk_get\n");
+ ret = PTR_ERR(clk);
+ goto out_freeioremap;
}
- keypad->reg_base = ioremap(res->start, resource_size(res));
- if (!keypad->reg_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto err_free_mem_region;
+ /* resources are sane; we begin allocation */
+ keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
+ if (!keypad) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ goto out_freeclk;
}
+ keypad->dev = &pdev->dev;
- keypad->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_iounmap;
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&pdev->dev, "failed to input_allocate_device\n");
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+ keypad->regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(keypad->regulator)) {
+ dev_err(&pdev->dev, "regulator_get failed\n");
+ keypad->regulator = NULL;
+ goto out_regulator_get;
+ } else {
+ ret = regulator_enable(keypad->regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "regulator_enable failed\n");
+ goto out_regulator_enable;
+ }
}
input->id.bustype = BUS_HOST;
@@ -266,38 +631,91 @@ static int __devinit ske_keypad_probe(struct platform_device *pdev)
input->keycodemax = ARRAY_SIZE(keypad->keymap);
input_set_capability(input, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input, keypad);
__set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
+ input->keycode, input->keybit);
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ keypad->reg_base = reg_base;
+ keypad->clk = clk;
+ INIT_DELAYED_WORK(&keypad->work, ske_gpio_switch_work);
+ INIT_DELAYED_WORK(&keypad->gpio_work, ske_gpio_release_work);
+ INIT_DELAYED_WORK(&keypad->scan_work, ske_keypad_scan_work);
+
+ /* allocations are sane, we begin HW initialization */
clk_enable(keypad->clk);
- /* go through board initialization helpers */
- if (keypad->board->init)
- keypad->board->init();
+ if (!keypad->board->init) {
+ dev_err(&pdev->dev, "init funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->init() < 0) {
+ dev_err(&pdev->dev, "keyboard init config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (!keypad->board->exit) {
+ dev_err(&pdev->dev, "exit funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->exit() < 0) {
+ dev_err(&pdev->dev, "keyboard exit config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+ for (i = 0; i < SKE_KPD_MAX_ROWS; i++) {
+ keypad->ske_rows[i] = *plat->gpio_input_pins;
+ keypad->ske_cols[i] = *plat->gpio_output_pins;
+ keypad->gpio_input_irq[i] =
+ NOMADIK_GPIO_TO_IRQ(keypad->ske_rows[i]);
+ plat->gpio_input_pins++;
+ plat->gpio_output_pins++;
+ }
- error = ske_keypad_chip_init(keypad);
- if (error) {
- dev_err(&pdev->dev, "unable to init keypad hardware\n");
- goto err_clk_disable;
+ for (i = 0; i < keypad->board->krow; i++) {
+ ret = request_threaded_irq(keypad->gpio_input_irq[i],
+ NULL, ske_keypad_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+ "ske-keypad-gpio", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate gpio irq %d failed\n",
+ keypad->gpio_input_irq[i]);
+ goto out_unregisterinput;
+ }
+ enable_irq_wake(keypad->gpio_input_irq[i]);
}
- error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
- if (error) {
+ ret = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
+ IRQF_ONESHOT, "ske-keypad", keypad);
+ if (ret) {
dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- goto err_clk_disable;
+ goto out_unregisterinput;
}
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- goto err_free_irq;
+ /* sysfs implementation for dynamic enable/disable the input event */
+ ret = sysfs_create_group(&pdev->dev.kobj, &ske_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs entries\n");
+ goto out_free_irq;
}
if (plat->wakeup_enable)
@@ -305,21 +723,32 @@ static int __devinit ske_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+
return 0;
-err_free_irq:
+out_free_irq:
free_irq(keypad->irq, keypad);
-err_clk_disable:
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
clk_disable(keypad->clk);
- clk_put(keypad->clk);
-err_iounmap:
- iounmap(keypad->reg_base);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
+out_freeinput:
+ regulator_disable(keypad->regulator);
+out_regulator_enable:
+ regulator_put(keypad->regulator);
+out_regulator_get:
input_free_device(input);
+out_freekeypad:
kfree(keypad);
- return error;
+out_freeclk:
+ clk_put(keypad->clk);
+out_freeioremap:
+ iounmap(reg_base);
+out_freerequest_memregions:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
}
static int __devexit ske_keypad_remove(struct platform_device *pdev)
@@ -327,16 +756,22 @@ static int __devexit ske_keypad_remove(struct platform_device *pdev)
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
+ cancel_delayed_work_sync(&keypad->scan_work);
free_irq(keypad->irq, keypad);
input_unregister_device(keypad->input);
+ sysfs_remove_group(&pdev->dev.kobj, &ske_attr_group);
clk_disable(keypad->clk);
clk_put(keypad->clk);
if (keypad->board->exit)
keypad->board->exit();
+ regulator_put(keypad->regulator);
+
iounmap(keypad->reg_base);
release_mem_region(res->start, resource_size(res));
kfree(keypad);
@@ -353,8 +788,19 @@ static int ske_keypad_suspend(struct device *dev)
if (device_may_wakeup(dev))
enable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ else {
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
+ cancel_delayed_work_sync(&keypad->scan_work);
+ disable_irq(irq);
+
+ keypad->enable_on_resume = keypad->enable;
+
+ if (keypad->enable) {
+ ske_mode_enable(keypad, false);
+ keypad->enable = false;
+ }
+ }
return 0;
}
@@ -367,8 +813,20 @@ static int ske_keypad_resume(struct device *dev)
if (device_may_wakeup(dev))
disable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+ else {
+ if (keypad->enable_on_resume && !keypad->enable) {
+ keypad->enable = true;
+ ske_mode_enable(keypad, true);
+ /*
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity in SKE mode
+ */
+ if (!keypad->key_pressed)
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+ enable_irq(irq);
+ }
return 0;
}
@@ -393,7 +851,7 @@ static struct platform_driver ske_keypad_driver = {
static int __init ske_keypad_init(void)
{
- return platform_driver_probe(&ske_keypad_driver, ske_keypad_probe);
+ return platform_driver_register(&ske_keypad_driver);
}
module_init(ske_keypad_init);
@@ -404,6 +862,6 @@ static void __exit ske_keypad_exit(void)
module_exit(ske_keypad_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com>");
MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
MODULE_ALIAS("platform:nomadik-ske-keypad");
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 9397cf9c625..892335275dd 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -108,10 +108,52 @@ struct stmpe_keypad {
unsigned int rows;
unsigned int cols;
+ bool enable;
unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
};
+static ssize_t stmpe_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", keypad->enable);
+}
+
+static ssize_t stmpe_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (keypad->enable != val) {
+ keypad->enable = val;
+ if (!val)
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+ else
+ stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ stmpe_show_attr_enable, stmpe_store_attr_enable);
+
+static struct attribute *stmpe_keypad_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group stmpe_attr_group = {
+ .attrs = stmpe_keypad_attrs,
+};
+
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
{
const struct stmpe_keypad_variant *variant = keypad->variant;
@@ -285,7 +327,7 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
goto out_freekeypad;
}
- input->name = "STMPE keypad";
+ input->name = "STMPE-keypad";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
@@ -332,10 +374,20 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
goto out_unregisterinput;
}
+ /* sysfs implementation for dynamic enable/disable the input event */
+ ret = sysfs_create_group(&pdev->dev.kobj, &stmpe_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs entries\n");
+ goto out_free_irq;
+ }
+
+ keypad->enable = true;
platform_set_drvdata(pdev, keypad);
return 0;
+out_free_irq:
+ free_irq(irq, keypad);
out_unregisterinput:
input_unregister_device(input);
input = NULL;
@@ -354,6 +406,7 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+ sysfs_remove_group(&pdev->dev.kobj, &stmpe_attr_group);
free_irq(irq, keypad);
input_unregister_device(keypad->input);
platform_set_drvdata(pdev, NULL);
@@ -362,9 +415,43 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int stmpe_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+
+ if (!device_may_wakeup(stmpe->dev))
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ return 0;
+}
+
+static int stmpe_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+
+ if (!device_may_wakeup(stmpe->dev))
+ stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ return 0;
+}
+
+static const struct dev_pm_ops stmpe_keypad_dev_pm_ops = {
+ .suspend = stmpe_keypad_suspend,
+ .resume = stmpe_keypad_resume,
+};
+#endif
+
static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &stmpe_keypad_dev_pm_ops,
+#endif
.probe = stmpe_keypad_probe,
.remove = __devexit_p(stmpe_keypad_remove),
};
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7b46781c30c..cd69fd6164c 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,12 +22,26 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AB8500_ACCDET
+ bool "AB8500 AV Accessory detection"
+ depends on AB8500_CORE && AB8500_GPADC && GPIO_AB8500
+ help
+ Say Y here to enable AV accessory detection features for ST-Ericsson's
+ AB8500 Mix-Sig PMIC.
+
+config INPUT_AB5500_ACCDET
+ bool "AB5500 AV Accessory detection"
+ depends on AB5500_CORE && AB5500_GPADC
+ help
+ Say Y here to enable AV accessory detection features for ST-Ericsson's
+ AB5500 Mix-Sig PMIC.
+
config INPUT_AB8500_PONKEY
- tristate "AB8500 Pon (PowerOn) Key"
- depends on AB8500_CORE
+ tristate "AB5500/AB8500 Pon (PowerOn) Key"
+ depends on AB5500_CORE || AB8500_CORE
help
- Say Y here to use the PowerOn Key for ST-Ericsson's AB8500
- Mix-Sig PMIC.
+ Say Y here to use the PowerOn Key for ST-Ericsson's AB5500/AB8500
+ Mix-Sig PMICs.
To compile this driver as a module, choose M here: the module
will be called ab8500-ponkey.
@@ -300,6 +314,22 @@ config INPUT_KXTJ9_POLLED_MODE
help
Say Y here if you need accelerometer to work in polling mode.
+config INPUT_LPS001WP
+ tristate "LPS0001WP pressure sensor from ST Micro"
+ default y if MACH_U8500
+ help
+ This is a pressure sensor connected to I2C, mounted on the
+ snowball and other ST-E boards
+
+config LPS001WP_INPUT_DEVICE
+ bool "ST LPS001WP INPUT DEVICE"
+ depends on INPUT_LPS001WP
+ default n
+ help
+ This driver allows device to be used as an input device
+ need to be enabled only when input device support
+ is required.
+
config INPUT_POWERMATE
tristate "Griffin PowerMate and Contour Jog support"
depends on USB_ARCH_HAS_HCD
@@ -569,4 +599,14 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
+config INPUT_STE_FF_VIBRA
+ tristate "ST-Ericsson Force Feedback Vibrator"
+ depends on STE_AUDIO_IO_DEV
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for ST-Ericsson's Vibrator which
+ registers as an input force feedback driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called ste_ff_vibra.
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 46671a875b9..3a19186a7b1 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,8 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AB8500_ACCDET) += abx500-accdet.o ab8500-accdet.o
+obj-$(CONFIG_INPUT_AB5500_ACCDET) += abx500-accdet.o ab5500-accdet.o
obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o
obj-$(CONFIG_INPUT_AD714X) += ad714x.o
obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
+obj-$(CONFIG_INPUT_LPS001WP) += lps001wp_prs.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
@@ -53,3 +56,4 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
+obj-$(CONFIG_INPUT_STE_FF_VIBRA) += ste_ff_vibra.o
diff --git a/drivers/input/misc/ab5500-accdet.c b/drivers/input/misc/ab5500-accdet.c
new file mode 100644
index 00000000000..7622f3b45e9
--- /dev/null
+++ b/drivers/input/misc/ab5500-accdet.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <mach/abx500-accdet.h>
+
+/*
+ * Register definition for accessory detection.
+ */
+#define AB5500_REGU_CTRL1_SPARE_REG 0x84
+#define AB5500_ACC_DET_DB1_REG 0x20
+#define AB5500_ACC_DET_DB2_REG 0x21
+#define AB5500_ACC_DET_CTRL_REG 0x23
+#define AB5500_VDENC_CTRL0 0x80
+
+/* REGISTER: AB8500_ACC_DET_CTRL_REG */
+#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08)
+#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01)
+
+/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */
+#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01
+
+/* REGISTER: AB8500_IT_SOURCE5_REG */
+#define BIT_ITSOURCE5_ACCDET1 0x02
+
+static struct accessory_irq_descriptor ab5500_irq_desc[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "acc_detedt1db_falling",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "acc_detedt1db_rising",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "acc_detedt21db_falling",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "acc_detedt21db_rising",
+ .isr = button_release_irq_handler,
+ },
+};
+
+static struct accessory_regu_descriptor ab5500_regu_desc[] = {
+ {
+ .id = REGULATOR_VAMIC1,
+ .name = "v-amic",
+ },
+};
+
+
+/*
+ * configures accdet2 input on/off
+ */
+static void ab5500_config_accdetect2_hw(struct abx500_ad *dd, int enable)
+{
+ int ret = 0;
+
+ if (!dd->accdet2_th_set) {
+ /* Configure accdetect21+22 thresholds */
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_DB2_REG,
+ dd->pdata->accdet2122_th);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ goto out;
+ } else {
+ dd->accdet2_th_set = 1;
+ }
+ }
+
+ /* Enable/Disable accdetect21 comparators + pullup */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL2_ENA,
+ enable ? BITS_ACCDETCTRL2_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n",
+ __func__, ret);
+out:
+ return;
+}
+
+/*
+ * configures accdet1 input on/off
+ */
+static void ab5500_config_accdetect1_hw(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ if (!dd->accdet1_th_set) {
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_DB1_REG,
+ dd->pdata->accdet1_dbth);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ else
+ dd->accdet1_th_set = 1;
+ }
+
+ /* enable accdetect1 comparator */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL1_ENA,
+ enable ? BITS_ACCDETCTRL1_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * returns the high level status whether some accessory is connected (1|0).
+ */
+static int ab5500_detect_plugged_in(struct abx500_ad *dd)
+{
+ u8 value = 0;
+
+ int status = abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_IT,
+ AB5500_IT_SOURCE3_REG,
+ &value);
+ if (status < 0) {
+ dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n",
+ __func__, status);
+ return 0;
+ }
+
+ if (dd->pdata->is_detection_inverted)
+ return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0;
+ else
+ return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1;
+}
+
+/*
+ * mic_line_voltage_stable - measures a relative stable voltage from spec. input
+ */
+static int ab5500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int iterations = 2;
+ int v1, v2, dv;
+
+ v1 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ do {
+ msleep(1);
+ --iterations;
+ v2 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ dv = abs(v2 - v1);
+ v1 = v2;
+ } while (iterations > 0 && dv > MAX_VOLT_DIFF);
+
+ return v1;
+}
+
+/*
+ * not implemented
+ */
+static int ab5500_meas_alt_voltage_stable(struct abx500_ad *dd)
+{
+ return -1;
+}
+
+/*
+ * configures HW so that it is possible to make decision whether
+ * accessory is connected or not.
+ */
+static void ab5500_config_hw_test_plug_connected(struct abx500_ad *dd,
+ int enable)
+{
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ /* enable mic BIAS2 */
+ if (enable)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+}
+
+/*
+ * configures HW so that carkit/headset detection can be accomplished.
+ */
+static void ab5500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable)
+{
+ /* enable mic BIAS2 */
+ if (enable)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+}
+
+static u8 acc_det_ctrl_suspend_val;
+
+static void ab5500_turn_off_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn off AccDetect comparators and pull-up */
+ (void) abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ &acc_det_ctrl_suspend_val);
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ 0);
+}
+
+static void ab5500_turn_on_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn on AccDetect comparators and pull-up */
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ acc_det_ctrl_suspend_val);
+}
+
+static void *ab5500_accdet_abx500_gpadc_get(void)
+{
+ return ab5500_gpadc_get("ab5500-adc.0");
+}
+
+struct abx500_accdet_platform_data *
+ ab5500_get_platform_data(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+struct abx500_ad ab5500_accessory_det_callbacks = {
+ .irq_desc_norm = ab5500_irq_desc,
+ .irq_desc_inverted = NULL,
+ .no_irqs = ARRAY_SIZE(ab5500_irq_desc),
+ .regu_desc = ab5500_regu_desc,
+ .no_of_regu_desc = ARRAY_SIZE(ab5500_regu_desc),
+ .config_accdetect2_hw = ab5500_config_accdetect2_hw,
+ .config_accdetect1_hw = ab5500_config_accdetect1_hw,
+ .detect_plugged_in = ab5500_detect_plugged_in,
+ .meas_voltage_stable = ab5500_meas_voltage_stable,
+ .meas_alt_voltage_stable = ab5500_meas_alt_voltage_stable,
+ .config_hw_test_basic_carkit = ab5500_config_hw_test_basic_carkit,
+ .turn_off_accdet_comparator = ab5500_turn_off_accdet_comparator,
+ .turn_on_accdet_comparator = ab5500_turn_on_accdet_comparator,
+ .accdet_abx500_gpadc_get = ab5500_accdet_abx500_gpadc_get,
+ .config_hw_test_plug_connected = ab5500_config_hw_test_plug_connected,
+ .set_av_switch = NULL,
+ .get_platform_data = ab5500_get_platform_data,
+};
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ab8500-accdet.c b/drivers/input/misc/ab8500-accdet.c
new file mode 100644
index 00000000000..0fe60364d54
--- /dev/null
+++ b/drivers/input/misc/ab8500-accdet.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/abx500.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <mach/abx500-accdet.h>
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+#include <sound/ux500_ab8500_ext.h>
+#endif
+
+#define MAX_DET_COUNT 10
+#define MAX_VOLT_DIFF 30
+#define MIN_MIC_POWER -100
+
+/* Unique value used to identify Headset button input device */
+#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn"
+#define BTN_INPUT_DEV_NAME "AB8500 Hs Button"
+
+#define DEBOUNCE_PLUG_EVENT_MS 100
+#define DEBOUNCE_PLUG_RETEST_MS 25
+#define DEBOUNCE_UNPLUG_EVENT_MS 0
+
+/*
+ * Register definition for accessory detection.
+ */
+#define AB8500_REGU_CTRL1_SPARE_REG 0x84
+#define AB8500_ACC_DET_DB1_REG 0x80
+#define AB8500_ACC_DET_DB2_REG 0x81
+#define AB8500_ACC_DET_CTRL_REG 0x82
+#define AB8500_IT_SOURCE5_REG 0x04
+
+/* REGISTER: AB8500_ACC_DET_CTRL_REG */
+#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08)
+#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01)
+
+/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */
+#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01
+
+/* REGISTER: AB8500_IT_SOURCE5_REG */
+#define BIT_ITSOURCE5_ACCDET1 0x04
+
+/* After being loaded, how fast the first check is to be made */
+#define INIT_DELAY_MS 3000
+
+/* Voltage limits (mV) for various types of AV Accessories */
+#define ACCESSORY_DET_VOL_DONTCARE -1
+#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0
+#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40
+#define ACCESSORY_CVIDEO_DET_VOL_MIN 41
+#define ACCESSORY_CVIDEO_DET_VOL_MAX 105
+#define ACCESSORY_CARKIT_DET_VOL_MIN 1100
+#define ACCESSORY_CARKIT_DET_VOL_MAX 1300
+#define ACCESSORY_HEADSET_DET_VOL_MIN 0
+#define ACCESSORY_HEADSET_DET_VOL_MAX 200
+#define ACCESSORY_OPENCABLE_DET_VOL_MIN 1730
+#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150
+
+/* Static data initialization */
+
+static struct accessory_regu_descriptor ab8500_regu_desc[3] = {
+ {
+ .id = REGULATOR_VAUDIO,
+ .name = "v-audio",
+ },
+ {
+ .id = REGULATOR_VAMIC1,
+ .name = "v-amic1",
+ },
+ {
+ .id = REGULATOR_AVSWITCH,
+ .name = "vcc-N2158",
+ },
+};
+
+static struct accessory_irq_descriptor ab8500_irq_desc_norm[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "ACC_DETECT_22DB_F",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "ACC_DETECT_22DB_R",
+ .isr = button_release_irq_handler,
+ },
+};
+
+static struct accessory_irq_descriptor ab8500_irq_desc_inverted[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "ACC_DETECT_22DB_R",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "ACC_DETECT_22DB_F",
+ .isr = button_release_irq_handler,
+ },
+};
+
+/*
+ * configures accdet2 input on/off
+ */
+static void ab8500_config_accdetect2_hw(struct abx500_ad *dd, int enable)
+{
+ int ret = 0;
+
+ if (!dd->accdet2_th_set) {
+ /* Configure accdetect21+22 thresholds */
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_DB2_REG,
+ dd->pdata->accdet2122_th);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ goto out;
+ } else {
+ dd->accdet2_th_set = 1;
+ }
+ }
+
+ /* Enable/Disable accdetect21 comparators + pullup */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL2_ENA,
+ enable ? BITS_ACCDETCTRL2_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n",
+ __func__, ret);
+
+out:
+ return;
+}
+
+/*
+ * configures accdet1 input on/off
+ */
+static void ab8500_config_accdetect1_hw(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ if (!dd->accdet1_th_set) {
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_DB1_REG,
+ dd->pdata->accdet1_dbth);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ else
+ dd->accdet1_th_set = 1;
+ }
+
+ /* enable accdetect1 comparator */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL1_ENA,
+ enable ? BITS_ACCDETCTRL1_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * returns the high level status whether some accessory is connected (1|0).
+ */
+static int ab8500_detect_plugged_in(struct abx500_ad *dd)
+{
+ u8 value = 0;
+
+ int status = abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_INTERRUPT,
+ AB8500_IT_SOURCE5_REG,
+ &value);
+ if (status < 0) {
+ dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n",
+ __func__, status);
+ return 0;
+ }
+
+ if (dd->pdata->is_detection_inverted)
+ return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0;
+ else
+ return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1;
+}
+
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+
+/*
+ * meas_voltage_stable - measures relative stable voltage from spec. input
+ */
+static int ab8500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int ret, mv;
+
+ ret = ux500_ab8500_audio_gpadc_measure((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2, false, &mv);
+
+ return (ret < 0) ? ret : mv;
+}
+
+/*
+ * meas_alt_voltage_stable - measures relative stable voltage from spec. input
+ */
+static int ab8500_meas_alt_voltage_stable(struct abx500_ad *dd)
+{
+ int ret, mv;
+
+ ret = ux500_ab8500_audio_gpadc_measure((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2, true, &mv);
+
+ return (ret < 0) ? ret : mv;
+}
+
+#else
+
+/*
+ * meas_voltage_stable - measures relative stable voltage from spec. input
+ */
+static int ab8500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int iterations = 2;
+ int v1, v2, dv;
+
+ v1 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ do {
+ msleep(1);
+ --iterations;
+ v2 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ dv = abs(v2 - v1);
+ v1 = v2;
+ } while (iterations > 0 && dv > MAX_VOLT_DIFF);
+
+ return v1;
+}
+
+/*
+ * not implemented for non soc setups
+ */
+static int ab8500_meas_alt_voltage_stable(struct abx500_ad *dd)
+{
+ return -1;
+}
+
+#endif
+
+/*
+ * configures HW so that it is possible to make decision whether
+ * accessory is connected or not.
+ */
+static void ab8500_config_hw_test_plug_connected(struct abx500_ad *dd,
+ int enable)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ ret = ab8500_config_pulldown(&dd->pdev->dev,
+ dd->pdata->video_ctrl_gpio, !enable);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+ return;
+ }
+
+ if (enable)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+}
+
+/*
+ * configures HW so that carkit/headset detection can be accomplished.
+ */
+static void ab8500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ if (enable)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+
+ /* Un-Ground the VAMic1 output when enabled */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_REGU_CTRL1,
+ AB8500_REGU_CTRL1_SPARE_REG,
+ BIT_REGUCTRL1SPARE_VAMIC1_GROUND,
+ enable ? BIT_REGUCTRL1SPARE_VAMIC1_GROUND : 0);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * sets the av switch direction - audio-in vs video-out
+ */
+static void ab8500_set_av_switch(struct abx500_ad *dd,
+ enum accessory_avcontrol_dir dir)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (%d)\n", __func__, dir);
+ if (dir == NOT_SET) {
+ ret = gpio_direction_input(dd->pdata->video_ctrl_gpio);
+ dd->gpio35_dir_set = 0;
+ ret = gpio_direction_output(dd->pdata->video_ctrl_gpio, 0);
+ } else if (!dd->gpio35_dir_set) {
+ ret = gpio_direction_output(dd->pdata->video_ctrl_gpio,
+ dir == AUDIO_IN ? 1 : 0);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Output video ctrl signal failed (%d).\n",
+ __func__, ret);
+ } else {
+ dd->gpio35_dir_set = 1;
+ dev_dbg(&dd->pdev->dev, "AV-SWITCH: %s\n",
+ dir == AUDIO_IN ? "AUDIO_IN" : "VIDEO_OUT");
+ }
+ } else {
+ gpio_set_value(dd->pdata->video_ctrl_gpio,
+ dir == AUDIO_IN ? 1 : 0);
+ }
+}
+
+static u8 acc_det_ctrl_suspend_val;
+
+static void ab8500_turn_off_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn off AccDetect comparators and pull-up */
+ (void) abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ &acc_det_ctrl_suspend_val);
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ 0);
+
+}
+
+static void ab8500_turn_on_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn on AccDetect comparators and pull-up */
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ acc_det_ctrl_suspend_val);
+
+}
+
+static void *ab8500_accdet_abx500_gpadc_get(void)
+{
+ return ab8500_gpadc_get();
+}
+
+struct abx500_accdet_platform_data *
+ ab8500_get_platform_data(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *plat;
+
+ plat = dev_get_platdata(pdev->dev.parent);
+
+ if (!plat || !plat->accdet) {
+ dev_err(&pdev->dev, "%s: Failed to get accdet plat data.\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return plat->accdet;
+}
+
+struct abx500_ad ab8500_accessory_det_callbacks = {
+ .irq_desc_norm = ab8500_irq_desc_norm,
+ .irq_desc_inverted = ab8500_irq_desc_inverted,
+ .no_irqs = ARRAY_SIZE(ab8500_irq_desc_norm),
+ .regu_desc = ab8500_regu_desc,
+ .no_of_regu_desc = ARRAY_SIZE(ab8500_regu_desc),
+ .config_accdetect2_hw = ab8500_config_accdetect2_hw,
+ .config_accdetect1_hw = ab8500_config_accdetect1_hw,
+ .detect_plugged_in = ab8500_detect_plugged_in,
+ .meas_voltage_stable = ab8500_meas_voltage_stable,
+ .meas_alt_voltage_stable = ab8500_meas_alt_voltage_stable,
+ .config_hw_test_basic_carkit = ab8500_config_hw_test_basic_carkit,
+ .turn_off_accdet_comparator = ab8500_turn_off_accdet_comparator,
+ .turn_on_accdet_comparator = ab8500_turn_on_accdet_comparator,
+ .accdet_abx500_gpadc_get = ab8500_accdet_abx500_gpadc_get,
+ .config_hw_test_plug_connected = ab8500_config_hw_test_plug_connected,
+ .set_av_switch = ab8500_set_av_switch,
+ .get_platform_data = ab8500_get_platform_data,
+};
+
+MODULE_DESCRIPTION("AB8500 AV Accessory detection driver");
+MODULE_ALIAS("platform:ab8500-acc-det");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 350fd0c385d..c3c3c51d302 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -6,7 +6,6 @@
*
* AB8500 Power-On Key handler
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -14,128 +13,208 @@
#include <linux/interrupt.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+/* Ponkey time control bits */
+#define AB5500_MCB 0x2F
+#define AB5500_PONKEY_10SEC 0x0
+#define AB5500_PONKEY_5SEC 0x1
+#define AB5500_PONKEY_DISABLE 0x2
+#define AB5500_PONKEY_TMR_MASK 0x1
+#define AB5500_PONKEY_TR_MASK 0x2
+
+static int ab5500_ponkey_hw_init(struct platform_device *);
+
+struct ab8500_ponkey_variant {
+ const char *irq_falling;
+ const char *irq_rising;
+ int (*hw_init)(struct platform_device *);
+};
+
+static const struct ab8500_ponkey_variant ab5500_onswa = {
+ .irq_falling = "ONSWAn_falling",
+ .irq_rising = "ONSWAn_rising",
+ .hw_init = ab5500_ponkey_hw_init,
+};
+
+static const struct ab8500_ponkey_variant ab8500_ponkey = {
+ .irq_falling = "ONKEY_DBF",
+ .irq_rising = "ONKEY_DBR",
+};
/**
- * struct ab8500_ponkey - ab8500 ponkey information
+ * struct ab8500_ponkey_info - ab8500 ponkey information
* @input_dev: pointer to input device
- * @ab8500: ab8500 parent
* @irq_dbf: irq number for falling transition
* @irq_dbr: irq number for rising transition
*/
-struct ab8500_ponkey {
+struct ab8500_ponkey_info {
struct input_dev *idev;
- struct ab8500 *ab8500;
int irq_dbf;
int irq_dbr;
};
+static int ab5500_ponkey_hw_init(struct platform_device *pdev)
+{
+ u8 val;
+ struct ab5500_ponkey_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata) {
+ switch (pdata->shutdown_secs) {
+ case 0:
+ val = AB5500_PONKEY_DISABLE;
+ break;
+ case 5:
+ val = AB5500_PONKEY_5SEC;
+ break;
+ case 10:
+ val = AB5500_PONKEY_10SEC;
+ break;
+ default:
+ val = AB5500_PONKEY_10SEC;
+ }
+ } else {
+ val = AB5500_PONKEY_10SEC;
+ }
+ return abx500_mask_and_set(
+ &pdev->dev,
+ AB5500_BANK_STARTUP,
+ AB5500_MCB,
+ AB5500_PONKEY_TMR_MASK | AB5500_PONKEY_TR_MASK,
+ val);
+}
+
/* AB8500 gives us an interrupt when ONKEY is held */
static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
{
- struct ab8500_ponkey *ponkey = data;
+ struct ab8500_ponkey_info *info = data;
- if (irq == ponkey->irq_dbf)
- input_report_key(ponkey->idev, KEY_POWER, true);
- else if (irq == ponkey->irq_dbr)
- input_report_key(ponkey->idev, KEY_POWER, false);
+ if (irq == info->irq_dbf)
+ input_report_key(info->idev, KEY_POWER, true);
+ else if (irq == info->irq_dbr)
+ input_report_key(info->idev, KEY_POWER, false);
- input_sync(ponkey->idev);
+ input_sync(info->idev);
return IRQ_HANDLED;
}
static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_ponkey *ponkey;
- struct input_dev *input;
- int irq_dbf, irq_dbr;
- int error;
+ const struct ab8500_ponkey_variant *variant;
+ struct ab8500_ponkey_info *info;
+ int irq_dbf, irq_dbr, ret;
+
+ variant = (const struct ab8500_ponkey_variant *)
+ pdev->id_entry->driver_data;
+
+ if (variant->hw_init) {
+ ret = variant->hw_init(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init hw");
+ return ret;
+ }
+ }
- irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
+ irq_dbf = platform_get_irq_byname(pdev, variant->irq_falling);
if (irq_dbf < 0) {
- dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf);
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ variant->irq_falling, irq_dbf);
return irq_dbf;
}
- irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
+ irq_dbr = platform_get_irq_byname(pdev, variant->irq_rising);
if (irq_dbr < 0) {
- dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr);
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ variant->irq_rising, irq_dbr);
return irq_dbr;
}
- ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL);
- input = input_allocate_device();
- if (!ponkey || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ info = kzalloc(sizeof(struct ab8500_ponkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- ponkey->idev = input;
- ponkey->ab8500 = ab8500;
- ponkey->irq_dbf = irq_dbf;
- ponkey->irq_dbr = irq_dbr;
+ info->irq_dbf = irq_dbf;
+ info->irq_dbr = irq_dbr;
- input->name = "AB8500 POn(PowerOn) Key";
- input->dev.parent = &pdev->dev;
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(&pdev->dev, "Failed to allocate input dev\n");
+ ret = -ENOMEM;
+ goto out;
+ }
- input_set_capability(input, EV_KEY, KEY_POWER);
+ info->idev->name = "AB8500 POn(PowerOn) Key";
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
- error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbf", ponkey);
- if (error < 0) {
- dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
- ponkey->irq_dbf, error);
- goto err_free_mem;
+ ret = input_register_device(info->idev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
+ goto out_unfreedevice;
}
- error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbr", ponkey);
- if (error < 0) {
- dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
- ponkey->irq_dbr, error);
- goto err_free_dbf_irq;
+ ret = request_threaded_irq(info->irq_dbf, NULL, ab8500_ponkey_handler,
+ IRQF_NO_SUSPEND, "ab8500-ponkey-dbf",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ info->irq_dbf, ret);
+ goto out_unregisterdevice;
}
- error = input_register_device(ponkey->idev);
- if (error) {
- dev_err(ab8500->dev, "Can't register input device: %d\n", error);
- goto err_free_dbr_irq;
+ ret = request_threaded_irq(info->irq_dbr, NULL, ab8500_ponkey_handler,
+ IRQF_NO_SUSPEND, "ab8500-ponkey-dbr",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ info->irq_dbr, ret);
+ goto out_irq_dbf;
}
- platform_set_drvdata(pdev, ponkey);
- return 0;
+ platform_set_drvdata(pdev, info);
-err_free_dbr_irq:
- free_irq(ponkey->irq_dbr, ponkey);
-err_free_dbf_irq:
- free_irq(ponkey->irq_dbf, ponkey);
-err_free_mem:
- input_free_device(input);
- kfree(ponkey);
+ return 0;
- return error;
+out_irq_dbf:
+ free_irq(info->irq_dbf, info);
+out_unregisterdevice:
+ input_unregister_device(info->idev);
+ info->idev = NULL;
+out_unfreedevice:
+ input_free_device(info->idev);
+out:
+ kfree(info);
+ return ret;
}
static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
{
- struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
-
- free_irq(ponkey->irq_dbf, ponkey);
- free_irq(ponkey->irq_dbr, ponkey);
- input_unregister_device(ponkey->idev);
- kfree(ponkey);
-
- platform_set_drvdata(pdev, NULL);
+ struct ab8500_ponkey_info *info = platform_get_drvdata(pdev);
+ free_irq(info->irq_dbf, info);
+ free_irq(info->irq_dbr, info);
+ input_unregister_device(info->idev);
+ kfree(info);
return 0;
}
+static struct platform_device_id ab8500_ponkey_id_table[] = {
+ { "ab5500-onswa", (kernel_ulong_t)&ab5500_onswa, },
+ { "ab8500-poweron-key", (kernel_ulong_t)&ab8500_ponkey, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, ab8500_ponkey_id_table);
+
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
.owner = THIS_MODULE,
},
+ .id_table = ab8500_ponkey_id_table,
.probe = ab8500_ponkey_probe,
.remove = __devexit_p(ab8500_ponkey_remove),
};
diff --git a/drivers/input/misc/abx500-accdet.c b/drivers/input/misc/abx500-accdet.c
new file mode 100644
index 00000000000..d716a1d8870
--- /dev/null
+++ b/drivers/input/misc/abx500-accdet.c
@@ -0,0 +1,1011 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/mfd/abx500.h>
+#include <linux/interrupt.h>
+#include <sound/jack.h>
+#include <mach/abx500-accdet.h>
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+#include <sound/ux500_ab8500.h>
+#else
+#define ux500_ab8500_jack_report(i)
+#endif
+
+/* Unique value used to identify Headset button input device */
+#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn"
+#define BTN_INPUT_DEV_NAME "AB8500 Hs Button"
+
+#define DEBOUNCE_PLUG_EVENT_MS 100
+#define DEBOUNCE_PLUG_RETEST_MS 25
+#define DEBOUNCE_UNPLUG_EVENT_MS 0
+
+/* After being loaded, how fast the first check is to be made */
+#define INIT_DELAY_MS 3000
+
+/* Voltage limits (mV) for various types of AV Accessories */
+#define ACCESSORY_DET_VOL_DONTCARE -1
+#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0
+#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40
+#define ACCESSORY_U_HEADSET_DET_VOL_MIN 47
+#define ACCESSORY_U_HEADSET_DET_VOL_MAX 732
+#define ACCESSORY_U_HEADSET_ALT_DET_VOL_MIN 25
+#define ACCESSORY_U_HEADSET_ALT_DET_VOL_MAX 50
+#define ACCESSORY_CVIDEO_DET_VOL_MIN 41
+#define ACCESSORY_CVIDEO_DET_VOL_MAX 105
+#define ACCESSORY_CARKIT_DET_VOL_MIN 1100
+#define ACCESSORY_CARKIT_DET_VOL_MAX 1300
+#define ACCESSORY_HEADSET_DET_VOL_MIN 1301
+#define ACCESSORY_HEADSET_DET_VOL_MAX 2000
+#define ACCESSORY_OPENCABLE_DET_VOL_MIN 2001
+#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150
+
+
+/* Macros */
+
+/*
+ * Conviniency macros to check jack characteristics.
+ */
+#define jack_supports_mic(type) \
+ (type == JACK_TYPE_HEADSET || type == JACK_TYPE_CARKIT)
+#define jack_supports_spkr(type) \
+ ((type != JACK_TYPE_DISCONNECTED) && (type != JACK_TYPE_CONNECTED))
+#define jack_supports_buttons(type) \
+ ((type == JACK_TYPE_HEADSET) ||\
+ (type == JACK_TYPE_CARKIT) ||\
+ (type == JACK_TYPE_OPENCABLE) ||\
+ (type == JACK_TYPE_CONNECTED))
+
+
+/* Forward declarations */
+static void config_accdetect(struct abx500_ad *dd);
+static enum accessory_jack_type detect(struct abx500_ad *dd, int *required_det);
+
+/* Static data initialization */
+static struct accessory_detect_task detect_ops[] = {
+ {
+ .type = JACK_TYPE_DISCONNECTED,
+ .typename = "DISCONNECTED",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .maxvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_HEADPHONE,
+ .typename = "HEADPHONE",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_HEADPHONE_DET_VOL_MIN,
+ .maxvol = ACCESSORY_HEADPHONE_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_UNSUPPORTED_HEADSET,
+ .typename = "UNSUPPORTED HEADSET",
+ .meas_mv = 1,
+ .req_det_count = 2,
+ .minvol = ACCESSORY_U_HEADSET_DET_VOL_MIN,
+ .maxvol = ACCESSORY_U_HEADSET_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_U_HEADSET_ALT_DET_VOL_MIN,
+ .alt_maxvol = ACCESSORY_U_HEADSET_ALT_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_CVIDEO,
+ .typename = "CVIDEO",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_CVIDEO_DET_VOL_MIN,
+ .maxvol = ACCESSORY_CVIDEO_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_OPENCABLE,
+ .typename = "OPENCABLE",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_OPENCABLE_DET_VOL_MIN,
+ .maxvol = ACCESSORY_OPENCABLE_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_CARKIT,
+ .typename = "CARKIT",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_CARKIT_DET_VOL_MIN,
+ .maxvol = ACCESSORY_CARKIT_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_HEADSET,
+ .typename = "HEADSET",
+ .meas_mv = 0,
+ .req_det_count = 2,
+ .minvol = ACCESSORY_HEADSET_DET_VOL_MIN,
+ .maxvol = ACCESSORY_HEADSET_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_CONNECTED,
+ .typename = "CONNECTED",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .maxvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ }
+};
+
+static struct accessory_irq_descriptor *abx500_accdet_irq_desc;
+
+/*
+ * textual represenation of the accessory type
+ */
+static const char *accessory_str(enum accessory_jack_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(detect_ops); i++)
+ if (type == detect_ops[i].type)
+ return detect_ops[i].typename;
+
+ return "UNKNOWN?";
+}
+
+/*
+ * enables regulator but only if it has not been enabled earlier.
+ */
+void accessory_regulator_enable(struct abx500_ad *dd,
+ enum accessory_regulator reg)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (reg & dd->regu_desc[i].id) {
+ if (!dd->regu_desc[i].enabled) {
+ if (!regulator_enable(dd->regu_desc[i].handle))
+ dd->regu_desc[i].enabled = 1;
+ }
+ }
+ }
+}
+
+/*
+ * disables regulator but only if it has been previously enabled.
+ */
+void accessory_regulator_disable(struct abx500_ad *dd,
+ enum accessory_regulator reg)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (reg & dd->regu_desc[i].id) {
+ if (dd->regu_desc[i].enabled) {
+ if (!regulator_disable(dd->regu_desc[i].handle))
+ dd->regu_desc[i].enabled = 0;
+ }
+ }
+ }
+}
+
+/*
+ * frees previously retrieved regulators.
+ */
+static void free_regulators(struct abx500_ad *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (dd->regu_desc[i].handle) {
+ regulator_put(dd->regu_desc[i].handle);
+ dd->regu_desc[i].handle = NULL;
+ }
+ }
+}
+
+/*
+ * gets required regulators.
+ */
+static int create_regulators(struct abx500_ad *dd)
+{
+ int i;
+ int status = 0;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ struct regulator *regu =
+ regulator_get(&dd->pdev->dev, dd->regu_desc[i].name);
+ if (IS_ERR(regu)) {
+ status = PTR_ERR(regu);
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get supply '%s' (%d).\n",
+ __func__, dd->regu_desc[i].name, status);
+ free_regulators(dd);
+ goto out;
+ } else {
+ dd->regu_desc[i].handle = regu;
+ }
+ }
+
+out:
+ return status;
+}
+
+/*
+ * create input device for button press reporting
+ */
+static int create_btn_input_dev(struct abx500_ad *dd)
+{
+ int err;
+
+ dd->btn_input_dev = input_allocate_device();
+ if (!dd->btn_input_dev) {
+ dev_err(&dd->pdev->dev, "%s: Failed to allocate input dev.\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ input_set_capability(dd->btn_input_dev,
+ EV_KEY,
+ dd->pdata->btn_keycode);
+
+ dd->btn_input_dev->name = BTN_INPUT_DEV_NAME;
+ dd->btn_input_dev->uniq = BTN_INPUT_UNIQUE_VALUE;
+ dd->btn_input_dev->dev.parent = &dd->pdev->dev;
+
+ err = input_register_device(dd->btn_input_dev);
+ if (err) {
+ dev_err(&dd->pdev->dev,
+ "%s: register_input_device failed (%d).\n", __func__,
+ err);
+ input_free_device(dd->btn_input_dev);
+ dd->btn_input_dev = NULL;
+ goto out;
+ }
+out:
+ return err;
+}
+
+/*
+ * reports jack status
+ */
+void report_jack_status(struct abx500_ad *dd)
+{
+ int value = 0;
+
+ /* Never report possible open cable */
+ if (dd->jack_type == JACK_TYPE_OPENCABLE)
+ goto out;
+
+ /* Never report same state twice in a row */
+ if (dd->jack_type == dd->reported_jack_type)
+ goto out;
+ dd->reported_jack_type = dd->jack_type;
+
+ dev_info(&dd->pdev->dev, "Accessory: %s\n",
+ accessory_str(dd->jack_type));
+
+ /* Never report unsupported headset */
+ if (dd->jack_type == JACK_TYPE_UNSUPPORTED_HEADSET)
+ goto out;
+
+ if (dd->jack_type != JACK_TYPE_DISCONNECTED &&
+ dd->jack_type != JACK_TYPE_UNSPECIFIED)
+ value |= SND_JACK_MECHANICAL;
+ if (jack_supports_mic(dd->jack_type))
+ value |= SND_JACK_MICROPHONE;
+ if (jack_supports_spkr(dd->jack_type))
+ value |= (SND_JACK_HEADPHONE | SND_JACK_LINEOUT);
+ if (dd->jack_type == JACK_TYPE_CVIDEO) {
+ value |= SND_JACK_VIDEOOUT;
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, VIDEO_OUT);
+ }
+ ux500_ab8500_jack_report(value);
+
+out: return;
+}
+
+/*
+ * worker routine to handle accessory unplug case
+ */
+void unplug_irq_handler_work(struct work_struct *work)
+{
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, unplug_irq_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ dd->jack_type = dd->jack_type_temp = JACK_TYPE_DISCONNECTED;
+ dd->jack_det_count = dd->total_jack_det_count = 0;
+ dd->btn_state = BUTTON_UNK;
+ config_accdetect(dd);
+
+ accessory_regulator_disable(dd, REGULATOR_ALL);
+
+ report_jack_status(dd);
+}
+
+/*
+ * interrupt service routine for accessory unplug.
+ */
+irqreturn_t unplug_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work,
+ msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupt service routine for accessory plug.
+ */
+irqreturn_t plug_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n",
+ __func__, irq);
+
+ switch (dd->jack_type) {
+ case JACK_TYPE_DISCONNECTED:
+ case JACK_TYPE_UNSPECIFIED:
+ queue_delayed_work(dd->irq_work_queue, &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS));
+ break;
+
+ default:
+ dev_err(&dd->pdev->dev, "%s: Unexpected plug IRQ\n", __func__);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * worker routine to perform detection.
+ */
+static void detect_work(struct work_struct *work)
+{
+ int req_det_count = 1;
+ enum accessory_jack_type new_type;
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, detect_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, AUDIO_IN);
+
+ new_type = detect(dd, &req_det_count);
+
+ dd->total_jack_det_count++;
+ if (dd->jack_type_temp == new_type) {
+ dd->jack_det_count++;
+ } else {
+ dd->jack_det_count = 1;
+ dd->jack_type_temp = new_type;
+ }
+
+ if (dd->total_jack_det_count >= MAX_DET_COUNT) {
+ dev_err(&dd->pdev->dev,
+ "%s: MAX_DET_COUNT(=%d) reached. Bailing out.\n",
+ __func__, MAX_DET_COUNT);
+ queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work,
+ msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS));
+ } else if (dd->jack_det_count >= req_det_count) {
+ dd->total_jack_det_count = dd->jack_det_count = 0;
+ dd->jack_type = new_type;
+ dd->detect_jiffies = jiffies;
+ report_jack_status(dd);
+ config_accdetect(dd);
+ } else {
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_RETEST_MS));
+ }
+}
+
+/*
+ * reports a button event (pressed, released).
+ */
+static void report_btn_event(struct abx500_ad *dd, int down)
+{
+ input_report_key(dd->btn_input_dev, dd->pdata->btn_keycode, down);
+ input_sync(dd->btn_input_dev);
+
+ dev_dbg(&dd->pdev->dev, "HS-BTN: %s\n", down ? "PRESSED" : "RELEASED");
+}
+
+/*
+ * interrupt service routine invoked when hs button is pressed down.
+ */
+irqreturn_t button_press_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ unsigned long accept_jiffies = dd->detect_jiffies +
+ msecs_to_jiffies(1000);
+ if (time_before(jiffies, accept_jiffies)) {
+ dev_dbg(&dd->pdev->dev, "%s: Skipped spurious btn press.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ if (dd->jack_type == JACK_TYPE_OPENCABLE) {
+ /* Someting got connected to open cable -> detect.. */
+ dd->config_accdetect2_hw(dd, 0);
+ queue_delayed_work(dd->irq_work_queue, &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS));
+ return IRQ_HANDLED;
+ }
+
+ if (dd->btn_state == BUTTON_PRESSED)
+ return IRQ_HANDLED;
+
+ if (jack_supports_buttons(dd->jack_type)) {
+ dd->btn_state = BUTTON_PRESSED;
+ report_btn_event(dd, 1);
+ } else {
+ dd->btn_state = BUTTON_UNK;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupts service routine invoked when hs button is released.
+ */
+irqreturn_t button_release_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ if (dd->jack_type == JACK_TYPE_OPENCABLE)
+ return IRQ_HANDLED;
+
+ if (dd->btn_state != BUTTON_PRESSED)
+ return IRQ_HANDLED;
+
+ if (jack_supports_buttons(dd->jack_type)) {
+ report_btn_event(dd, 0);
+ dd->btn_state = BUTTON_RELEASED;
+ } else {
+ dd->btn_state = BUTTON_UNK;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * checks whether measured voltage is in given range. depending on arguments,
+ * voltage might be re-measured or previously measured voltage is reused.
+ */
+static int mic_vol_in_range(struct abx500_ad *dd,
+ int lo, int hi, int alt_lo, int alt_hi, int force_read)
+{
+ static int mv = MIN_MIC_POWER;
+ static int alt_mv = MIN_MIC_POWER;
+
+ if (mv == MIN_MIC_POWER || force_read)
+ mv = dd->meas_voltage_stable(dd);
+
+ if (mv < lo || mv > hi)
+ return 0;
+
+ if (ACCESSORY_DET_VOL_DONTCARE == alt_lo &&
+ ACCESSORY_DET_VOL_DONTCARE == alt_hi)
+ return 1;
+
+ if (alt_mv == MIN_MIC_POWER || force_read)
+ alt_mv = dd->meas_alt_voltage_stable(dd);
+
+ if (alt_mv < alt_lo || alt_mv > alt_hi)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * checks whether the currently connected HW is of given type.
+ */
+static int detect_hw(struct abx500_ad *dd,
+ struct accessory_detect_task *task)
+{
+ int status;
+
+ switch (task->type) {
+ case JACK_TYPE_DISCONNECTED:
+ dd->config_hw_test_plug_connected(dd, 1);
+ status = !dd->detect_plugged_in(dd);
+ break;
+ case JACK_TYPE_CONNECTED:
+ dd->config_hw_test_plug_connected(dd, 1);
+ status = dd->detect_plugged_in(dd);
+ break;
+ case JACK_TYPE_CARKIT:
+ case JACK_TYPE_HEADPHONE:
+ case JACK_TYPE_CVIDEO:
+ case JACK_TYPE_HEADSET:
+ case JACK_TYPE_UNSUPPORTED_HEADSET:
+ case JACK_TYPE_OPENCABLE:
+ status = mic_vol_in_range(dd,
+ task->minvol,
+ task->maxvol,
+ task->alt_minvol,
+ task->alt_maxvol,
+ task->meas_mv);
+ break;
+ default:
+ status = 0;
+ }
+
+ return status;
+}
+
+/*
+ * Tries to detect the currently attached accessory
+ */
+static enum accessory_jack_type detect(struct abx500_ad *dd,
+ int *req_det_count)
+{
+ enum accessory_jack_type type = JACK_TYPE_DISCONNECTED;
+ int i;
+
+ accessory_regulator_enable(dd, REGULATOR_VAUDIO | REGULATOR_AVSWITCH);
+ /* enable the VAMIC1 regulator */
+ dd->config_hw_test_basic_carkit(dd, 0);
+
+ for (i = 0; i < ARRAY_SIZE(detect_ops); ++i) {
+ if (detect_hw(dd, &detect_ops[i])) {
+ type = detect_ops[i].type;
+ *req_det_count = detect_ops[i].req_det_count;
+ break;
+ }
+ }
+
+ dd->config_hw_test_plug_connected(dd, 0);
+
+ if (jack_supports_buttons(type))
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+ else
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1 |
+ REGULATOR_AVSWITCH);
+
+ accessory_regulator_disable(dd, REGULATOR_VAUDIO);
+
+ return type;
+}
+
+/*
+ * registers to specific interrupt
+ */
+static void claim_irq(struct abx500_ad *dd, enum accessory_irq irq_id)
+{
+ int ret;
+ int irq;
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ if (abx500_accdet_irq_desc[irq_id].registered)
+ return;
+
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+ if (irq < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get irq %s\n", __func__,
+ abx500_accdet_irq_desc[irq_id].name);
+ return;
+ }
+
+ ret = request_threaded_irq(irq,
+ NULL,
+ abx500_accdet_irq_desc[irq_id].isr,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ abx500_accdet_irq_desc[irq_id].name,
+ dd);
+ if (ret != 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to claim irq %s (%d)\n",
+ __func__,
+ abx500_accdet_irq_desc[irq_id].name,
+ ret);
+ } else {
+ abx500_accdet_irq_desc[irq_id].registered = 1;
+ dev_dbg(&dd->pdev->dev, "%s: %s\n",
+ __func__, abx500_accdet_irq_desc[irq_id].name);
+ }
+}
+
+/*
+ * releases specific interrupt
+ */
+static void release_irq(struct abx500_ad *dd, enum accessory_irq irq_id)
+{
+ int irq;
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ if (!abx500_accdet_irq_desc[irq_id].registered)
+ return;
+
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+ if (irq < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get irq %s (%d)\n",
+ __func__,
+ abx500_accdet_irq_desc[irq_id].name, irq);
+ } else {
+ free_irq(irq, dd);
+ abx500_accdet_irq_desc[irq_id].registered = 0;
+ dev_dbg(&dd->pdev->dev, "%s: %s\n",
+ __func__, abx500_accdet_irq_desc[irq_id].name);
+ }
+}
+
+/*
+ * configures interrupts + detection hardware to meet the requirements
+ * set by currently attached accessory type.
+ */
+static void config_accdetect(struct abx500_ad *dd)
+{
+ switch (dd->jack_type) {
+ case JACK_TYPE_UNSPECIFIED:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 0);
+
+ release_irq(dd, PLUG_IRQ);
+ release_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ break;
+
+ case JACK_TYPE_DISCONNECTED:
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ case JACK_TYPE_HEADPHONE:
+ case JACK_TYPE_CVIDEO:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 0);
+
+ claim_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ break;
+
+ case JACK_TYPE_UNSUPPORTED_HEADSET:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 1);
+
+ release_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ break;
+
+ case JACK_TYPE_CONNECTED:
+ case JACK_TYPE_HEADSET:
+ case JACK_TYPE_CARKIT:
+ case JACK_TYPE_OPENCABLE:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 1);
+
+ release_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ claim_irq(dd, BUTTON_PRESS_IRQ);
+ claim_irq(dd, BUTTON_RELEASE_IRQ);
+ break;
+
+ default:
+ dev_err(&dd->pdev->dev, "%s: Unknown type: %d\n",
+ __func__, dd->jack_type);
+ }
+}
+
+/*
+ * Deferred initialization of the work.
+ */
+static void init_work(struct work_struct *work)
+{
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, init_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ dd->jack_type = dd->reported_jack_type = JACK_TYPE_UNSPECIFIED;
+ config_accdetect(dd);
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->detect_work,
+ msecs_to_jiffies(0));
+}
+
+/*
+ * performs platform device initialization
+ */
+static int abx500_accessory_init(struct platform_device *pdev)
+{
+ int ret;
+ struct abx500_ad *dd = (struct abx500_ad *)pdev->id_entry->driver_data;
+
+ dev_dbg(&pdev->dev, "Enter: %s\n", __func__);
+
+ dd->pdev = pdev;
+ dd->pdata = dd->get_platform_data(pdev);
+ if (IS_ERR(dd->pdata))
+ return PTR_ERR(dd->pdata);
+
+ if (dd->pdata->video_ctrl_gpio) {
+ ret = gpio_is_valid(dd->pdata->video_ctrl_gpio);
+ if (!ret) {
+ dev_err(&pdev->dev,
+ "%s: Video ctrl GPIO invalid (%d).\n", __func__,
+ dd->pdata->video_ctrl_gpio);
+
+ return ret;
+ }
+ ret = gpio_request(dd->pdata->video_ctrl_gpio,
+ "Video Control");
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Get video ctrl GPIO"
+ "failed.\n", __func__);
+ return ret;
+ }
+ }
+
+ (ret = create_btn_input_dev(dd));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: create_button_input_dev failed.\n",
+ __func__);
+ goto fail_no_btn_input_dev;
+ }
+
+ ret = create_regulators(dd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: failed to create regulators\n",
+ __func__);
+ goto fail_no_regulators;
+ }
+ dd->btn_state = BUTTON_UNK;
+
+ dd->irq_work_queue = create_singlethread_workqueue("abx500_accdet_wq");
+ if (!dd->irq_work_queue) {
+ dev_err(&pdev->dev, "%s: Failed to create wq\n", __func__);
+ ret = -ENOMEM;
+ goto fail_no_mem_for_wq;
+ }
+
+ dd->gpadc = dd->accdet_abx500_gpadc_get();
+
+ INIT_DELAYED_WORK(&dd->detect_work, detect_work);
+ INIT_DELAYED_WORK(&dd->unplug_irq_work, unplug_irq_handler_work);
+ INIT_DELAYED_WORK(&dd->init_work, init_work);
+
+ /* Deferred init/detect since no use for the info early in boot */
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->init_work,
+ msecs_to_jiffies(INIT_DELAY_MS));
+
+ platform_set_drvdata(pdev, dd);
+
+ return 0;
+fail_no_mem_for_wq:
+ free_regulators(dd);
+fail_no_regulators:
+ input_unregister_device(dd->btn_input_dev);
+fail_no_btn_input_dev:
+ if (dd->pdata->video_ctrl_gpio)
+ gpio_free(dd->pdata->video_ctrl_gpio);
+ return ret;
+}
+
+/*
+ * Performs platform device cleanup
+ */
+static void abx500_accessory_cleanup(struct abx500_ad *dd)
+{
+ dev_dbg(&dd->pdev->dev, "Enter: %s\n", __func__);
+
+ dd->jack_type = JACK_TYPE_UNSPECIFIED;
+ config_accdetect(dd);
+
+ gpio_free(dd->pdata->video_ctrl_gpio);
+ input_unregister_device(dd->btn_input_dev);
+ free_regulators(dd);
+
+ cancel_delayed_work(&dd->detect_work);
+ cancel_delayed_work(&dd->unplug_irq_work);
+ cancel_delayed_work(&dd->init_work);
+ flush_workqueue(dd->irq_work_queue);
+ destroy_workqueue(dd->irq_work_queue);
+
+ kfree(dd);
+}
+
+static int __devinit abx500_acc_detect_probe(struct platform_device *pdev)
+{
+
+ return abx500_accessory_init(pdev);
+}
+
+static int __devexit abx500_acc_detect_remove(struct platform_device *pdev)
+{
+ abx500_accessory_cleanup(platform_get_drvdata(pdev));
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int abx500_acc_detect_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+ int irq_id, irq;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ cancel_delayed_work_sync(&dd->unplug_irq_work);
+ cancel_delayed_work_sync(&dd->detect_work);
+ cancel_delayed_work_sync(&dd->init_work);
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) {
+ if (abx500_accdet_irq_desc[irq_id].registered == 1) {
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+
+ disable_irq(irq);
+ }
+ }
+
+ dd->turn_off_accdet_comparator(pdev);
+
+ if (dd->jack_type == JACK_TYPE_HEADSET)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+
+ return 0;
+}
+
+static int abx500_acc_detect_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+ int irq_id, irq;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ if (dd->jack_type == JACK_TYPE_HEADSET)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+
+ dd->turn_on_accdet_comparator(pdev);
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) {
+ if (abx500_accdet_irq_desc[irq_id].registered == 1) {
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+
+ enable_irq(irq);
+
+ }
+ }
+
+ /* After resume, reinitialize */
+ dd->gpio35_dir_set = dd->accdet1_th_set = dd->accdet2_th_set = 0;
+ queue_delayed_work(dd->irq_work_queue, &dd->init_work, 0);
+
+ return 0;
+}
+#else
+#define abx500_acc_detect_suspend NULL
+#define abx500_acc_detect_resume NULL
+#endif
+
+static struct platform_device_id abx500_accdet_ids[] = {
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+ { "ab5500-acc-det", (kernel_ulong_t)&ab5500_accessory_det_callbacks, },
+#endif
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ { "ab8500-acc-det", (kernel_ulong_t)&ab8500_accessory_det_callbacks, },
+#endif
+ { },
+};
+
+static const struct dev_pm_ops abx_ops = {
+ .suspend = abx500_acc_detect_suspend,
+ .resume = abx500_acc_detect_resume,
+};
+
+static struct platform_driver abx500_acc_detect_platform_driver = {
+ .driver = {
+ .name = "abx500-acc-det",
+ .owner = THIS_MODULE,
+ .pm = &abx_ops,
+ },
+ .probe = abx500_acc_detect_probe,
+ .id_table = abx500_accdet_ids,
+ .remove = __devexit_p(abx500_acc_detect_remove),
+};
+
+static int __init abx500_acc_detect_init(void)
+{
+ return platform_driver_register(&abx500_acc_detect_platform_driver);
+}
+
+static void __exit abx500_acc_detect_exit(void)
+{
+ platform_driver_unregister(&abx500_acc_detect_platform_driver);
+}
+
+module_init(abx500_acc_detect_init);
+module_exit(abx500_acc_detect_exit);
+
+MODULE_DESCRIPTION("ABx500 AV Accessory detection driver");
+MODULE_ALIAS("platform:abx500-acc-det");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/lps001wp_prs.c b/drivers/input/misc/lps001wp_prs.c
new file mode 100644
index 00000000000..45a322729ff
--- /dev/null
+++ b/drivers/input/misc/lps001wp_prs.c
@@ -0,0 +1,1453 @@
+
+/******************** (C) COPYRIGHT 2010 STMicroelectronics ********************
+*
+* File Name : lps001wp_prs.c
+* Authors : MSH - Motion Mems BU - Application Team
+* : Matteo Dameno (matteo.dameno@st.com)
+* : Carmine Iascone (carmine.iascone@st.com)
+* : Both authors are willing to be considered the contact
+* : and update points for the driver.
+* Version : V 1.1.1
+* Date : 2010/11/22
+* Description : LPS001WP pressure temperature sensor driver
+*
+********************************************************************************
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THE PRESENT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
+* OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, FOR THE SOLE
+* PURPOSE TO SUPPORT YOUR APPLICATION DEVELOPMENT.
+* AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
+* INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
+* CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
+* INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+*
+******************************************************************************
+
+ Revision 0.9.0 01/10/2010:
+ first beta release
+ Revision 1.1.0 05/11/2010:
+ add sysfs management
+ Revision 1.1.1 22/11/2010:
+ moved to input/misc
+******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/input/lps001wp.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#define DEBUG 1
+
+#define PR_ABS_MAX 0xffff
+#define PR_ABS_MIN 0x0000
+#define PR_DLT_MAX 0x7ffff
+#define PR_DLT_MIN -0x80000 /* 16-bit signed value */
+#define TEMP_MAX 0x7fff
+#define TEMP_MIN -0x80000 /* 16-bit signed value */
+
+
+#define SENSITIVITY_T_SHIFT 6 /** = 64 LSB/degrC */
+#define SENSITIVITY_P_SHIFT 4 /** = 16 LSB/mbar */
+
+
+#define OUTDATA_REG 0x28
+#define REF_PRESS_REG 0X30
+
+#define WHOAMI_LPS001WP_PRS 0xBA /* Expctd content for WAI */
+
+/* CONTROL REGISTERS */
+#define WHO_AM_I 0x0F /* WhoAmI register */
+#define CTRL_REG1 0x20 /* power / ODR control reg */
+#define CTRL_REG2 0x21 /* boot reg */
+#define CTRL_REG3 0x22 /* interrupt control reg */
+
+#define STATUS_REG 0X27 /* status reg */
+
+#define PRESS_OUT_L OUTDATA_REG
+
+
+#define REF_P_L REF_PRESS_REG /* pressure reference */
+#define REF_P_H 0x31 /* pressure reference */
+#define THS_P_L 0x32 /* pressure threshold */
+#define THS_P_H 0x33 /* pressure threshold */
+
+#define INT_CFG 0x34 /* interrupt config */
+#define INT_SRC 0x35 /* interrupt source */
+#define INT_ACK 0x36 /* interrupt acknoledge */
+/* end CONTROL REGISTRES */
+
+
+/* Barometer and Termometer output data rate ODR */
+#define LPS001WP_PRS_ODR_MASK 0x30 /* Mask to access odr bits only */
+#define LPS001WP_PRS_ODR_7_1 0x00 /* 7Hz baro and 1Hz term ODR */
+#define LPS001WP_PRS_ODR_7_7 0x10 /* 7Hz baro and 7Hz term ODR */
+#define LPS001WP_PRS_ODR_12_12 0x30 /* 12.5Hz baro and 12.5Hz term ODR */
+
+#define LPS001WP_PRS_ENABLE_MASK 0x40 /* */
+#define LPS001WP_PRS_DIFF_MASK 0x08
+#define LPS001WP_PRS_LPOW_MASK 0x80
+
+#define LPS001WP_PRS_DIFF_ON 0x08
+#define LPS001WP_PRS_DIFF_OFF 0x00
+
+#define LPS001WP_PRS_LPOW_ON 0x80
+#define LPS001WP_PRS_LPOW_OFF 0x00
+
+#define FUZZ 0
+#define FLAT 0
+#define I2C_RETRY_DELAY 5
+#define I2C_RETRIES 5
+#define I2C_AUTO_INCREMENT 0x80
+
+/* RESUME STATE INDICES */
+#define RES_CTRL_REG1 0
+#define RES_CTRL_REG2 1
+#define RES_CTRL_REG3 2
+#define RES_REF_P_L 3
+#define RES_REF_P_H 4
+#define RES_THS_P_L 5
+#define RES_THS_P_H 6
+#define RES_INT_CFG 7
+
+#define RESUME_ENTRIES 8
+/* end RESUME STATE INDICES */
+
+/* Pressure Sensor Operating Mode */
+#define LPS001WP_PRS_DIFF_ENABLE 1
+#define LPS001WP_PRS_DIFF_DISABLE 0
+#define LPS001WP_PRS_LPOWER_EN 1
+#define LPS001WP_PRS_LPOWER_DIS 0
+
+static const struct {
+ unsigned int cutoff_ms;
+ unsigned int mask;
+} lps001wp_prs_odr_table[] = {
+ {80, LPS001WP_PRS_ODR_12_12 },
+ {143, LPS001WP_PRS_ODR_7_7 },
+ {1000, LPS001WP_PRS_ODR_7_1 },
+};
+
+/**
+ * struct lps001wp_prs_data - data structure used by lps001wp_prs driver
+ * @client: i2c client
+ * @pdata: lsm303dlh platform data
+ * @lock: mutex lock for sysfs operations
+ * @input_work: work queue to read sensor data
+ * @input_dev: input device
+ * @regulator: regulator
+ * @early_suspend: early suspend structure
+ * @hw_initialized: saves hw initialisation status
+ * @hw_working: saves hw status
+ * @diff_enabled: store value of diff enable
+ * @lpowmode_enabled: flag to set lowpower mode
+ * @enabled: to store mode of device
+ * @on_before_suspend: to store status of device during suspend
+ * @resume_state:store regester values
+ * @reg_addr: stores reg address to debug
+ */
+struct lps001wp_prs_data {
+ struct i2c_client *client;
+ struct lps001wp_prs_platform_data *pdata;
+
+ struct mutex lock;
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+ struct delayed_work input_work;
+ struct input_dev *input_dev;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+
+ int hw_initialized;
+ /* hw_working=-1 means not tested yet */
+ int hw_working;
+ u8 diff_enabled;
+ u8 lpowmode_enabled ;
+
+ atomic_t enabled;
+ int on_before_suspend;
+
+ u8 resume_state[RESUME_ENTRIES];
+
+ struct regulator *regulator;
+
+#ifdef DEBUG
+ u8 reg_addr;
+#endif
+};
+
+struct outputdata {
+ u16 abspress;
+ s16 temperature;
+ s16 deltapress;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void lps001wp_prs_early_suspend(struct early_suspend *data);
+static void lps001wp_prs_late_resume(struct early_suspend *data);
+#endif
+
+
+static int lps001wp_prs_i2c_read(struct lps001wp_prs_data *prs,
+ u8 *buf, int len)
+{
+ int err;
+ int tries = 0;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = prs->client->addr,
+ .flags = prs->client->flags & I2C_M_TEN,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = prs->client->addr,
+ .flags = (prs->client->flags & I2C_M_TEN) | I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ },
+ };
+
+ do {
+ err = i2c_transfer(prs->client->adapter, msgs, 2);
+ if (err != 2)
+ msleep_interruptible(I2C_RETRY_DELAY);
+ } while ((err != 2) && (++tries < I2C_RETRIES));
+
+ if (err != 2) {
+ dev_err(&prs->client->dev, "read transfer error\n");
+ err = -EIO;
+ } else {
+ err = 0;
+ }
+
+ return err;
+}
+
+static int lps001wp_prs_i2c_write(struct lps001wp_prs_data *prs,
+ u8 *buf, int len)
+{
+ int err;
+ int tries = 0;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = prs->client->addr,
+ .flags = prs->client->flags & I2C_M_TEN,
+ .len = len + 1,
+ .buf = buf,
+ },
+ };
+
+ do {
+ err = i2c_transfer(prs->client->adapter, msgs, 1);
+ if (err != 1)
+ msleep_interruptible(I2C_RETRY_DELAY);
+ } while ((err != 1) && (++tries < I2C_RETRIES));
+
+ if (err != 1) {
+ dev_err(&prs->client->dev, "write transfer error\n");
+ err = -EIO;
+ } else {
+ err = 0;
+ }
+
+ return err;
+}
+
+static int lps001wp_prs_register_write(struct lps001wp_prs_data *prs, u8 *buf,
+ u8 reg_address, u8 new_value)
+{
+ int err = -EINVAL;
+
+ /* Sets configuration register at reg_address
+ * NOTE: this is a straight overwrite */
+ buf[0] = reg_address;
+ buf[1] = new_value;
+ err = lps001wp_prs_i2c_write(prs, buf, 1);
+ if (err < 0)
+ return err;
+ return err;
+}
+
+static int lps001wp_prs_register_read(struct lps001wp_prs_data *prs, u8 *buf,
+ u8 reg_address)
+{
+
+ int err = -EINVAL;
+ buf[0] = (reg_address);
+ err = lps001wp_prs_i2c_read(prs, buf, 1);
+
+ return err;
+}
+
+static int lps001wp_prs_register_update(struct lps001wp_prs_data *prs, u8 *buf,
+ u8 reg_address, u8 mask, u8 new_bit_values)
+{
+ int err = -EINVAL;
+ u8 init_val;
+ u8 updated_val;
+ err = lps001wp_prs_register_read(prs, buf, reg_address);
+ if (!(err < 0)) {
+ init_val = buf[0];
+ updated_val = ((mask & new_bit_values) | ((~mask) & init_val));
+ err = lps001wp_prs_register_write(prs, buf, reg_address,
+ updated_val);
+ }
+ return err;
+}
+
+/* */
+
+
+static int lps001wp_prs_hw_init(struct lps001wp_prs_data *prs)
+{
+ int err = -EINVAL;
+ u8 buf[6];
+
+ dev_dbg(&prs->client->dev, "%s: hw init start\n",
+ LPS001WP_PRS_DEV_NAME);
+
+ buf[0] = WHO_AM_I;
+ err = lps001wp_prs_i2c_read(prs, buf, 1);
+ if (err < 0)
+ goto error_firstread;
+ else
+ prs->hw_working = 1;
+ if (buf[0] != WHOAMI_LPS001WP_PRS) {
+ err = -EINVAL; /* TODO:choose the right coded error */
+ goto error_unknown_device;
+ }
+
+
+ buf[0] = (I2C_AUTO_INCREMENT | REF_PRESS_REG);
+ buf[1] = prs->resume_state[RES_REF_P_L];
+ buf[2] = prs->resume_state[RES_REF_P_H];
+ buf[3] = prs->resume_state[RES_THS_P_L];
+ buf[4] = prs->resume_state[RES_THS_P_H];
+ err = lps001wp_prs_i2c_write(prs, buf, 4);
+ if (err < 0)
+ goto error1;
+
+ buf[0] = (I2C_AUTO_INCREMENT | CTRL_REG1);
+ buf[1] = prs->resume_state[RES_CTRL_REG1];
+ buf[2] = prs->resume_state[RES_CTRL_REG2];
+ buf[3] = prs->resume_state[RES_CTRL_REG3];
+ err = lps001wp_prs_i2c_write(prs, buf, 3);
+ if (err < 0)
+ goto error1;
+
+ buf[0] = INT_CFG;
+ buf[1] = prs->resume_state[RES_INT_CFG];
+ err = lps001wp_prs_i2c_write(prs, buf, 1);
+ if (err < 0)
+ goto error1;
+
+
+ prs->hw_initialized = 1;
+ dev_dbg(&prs->client->dev, "%s: hw init done\n", LPS001WP_PRS_DEV_NAME);
+ return 0;
+
+error_firstread:
+ prs->hw_working = 0;
+ dev_warn(&prs->client->dev, "Error reading WHO_AM_I: is device "
+ "available/working?\n");
+ goto error1;
+error_unknown_device:
+ dev_err(&prs->client->dev,
+ "device unknown. Expected: 0x%x,"
+ " Replies: 0x%x\n", WHOAMI_LPS001WP_PRS, buf[0]);
+error1:
+ prs->hw_initialized = 0;
+ dev_err(&prs->client->dev, "hw init error 0x%x,0x%x: %d\n", buf[0],
+ buf[1], err);
+ return err;
+}
+
+static void lps001wp_prs_device_power_off(struct lps001wp_prs_data *prs)
+{
+ int err;
+ u8 buf[2] = { CTRL_REG1, LPS001WP_PRS_PM_OFF };
+
+ err = lps001wp_prs_i2c_write(prs, buf, 1);
+ if (err < 0)
+ dev_err(&prs->client->dev, "soft power off failed: %d\n", err);
+
+ /* disable regulator */
+ if (prs->regulator) {
+ err = regulator_disable(prs->regulator);
+ if (err < 0)
+ dev_err(&prs->client->dev, "failed to disable regulator\n");
+ }
+
+ if (prs->hw_initialized) {
+ prs->hw_initialized = 0;
+ }
+
+}
+
+static int lps001wp_prs_device_power_on(struct lps001wp_prs_data *prs)
+{
+ int err = -EINVAL;
+
+ /* get the regulator the first time */
+ if (!prs->regulator) {
+ prs->regulator = regulator_get(&prs->client->dev, "vdd");
+ if (IS_ERR(prs->regulator)) {
+ dev_err(&prs->client->dev, "failed to get regulator\n");
+ prs->regulator = NULL;
+ return PTR_ERR(prs->regulator);
+ }
+ }
+
+ /* enable it also */
+ err = regulator_enable(prs->regulator);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "failed to enable regulator\n");
+ return err;
+ }
+
+ if (!prs->hw_initialized) {
+ err = lps001wp_prs_hw_init(prs);
+ if (prs->hw_working == 1 && err < 0) {
+ lps001wp_prs_device_power_off(prs);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+
+
+int lps001wp_prs_update_odr(struct lps001wp_prs_data *prs, int poll_interval_ms)
+{
+ int err = -EINVAL;
+ int i;
+
+ u8 buf[2];
+ u8 updated_val;
+ u8 init_val;
+ u8 new_val;
+ u8 mask = LPS001WP_PRS_ODR_MASK;
+
+ /* Following, looks for the longest possible odr interval scrolling the
+ * odr_table vector from the end (shortest interval) backward (longest
+ * interval), to support the poll_interval requested by the system.
+ * It must be the longest interval lower then the poll interval.*/
+ for (i = ARRAY_SIZE(lps001wp_prs_odr_table) - 1; i >= 0; i--) {
+ if (lps001wp_prs_odr_table[i].cutoff_ms <= poll_interval_ms)
+ break;
+ }
+
+ new_val = lps001wp_prs_odr_table[i].mask;
+
+ /* If device is currently enabled, we need to write new
+ * configuration out to it */
+ if (atomic_read(&prs->enabled)) {
+ buf[0] = CTRL_REG1;
+ err = lps001wp_prs_i2c_read(prs, buf, 1);
+ if (err < 0)
+ goto error;
+ init_val = buf[0];
+ prs->resume_state[RES_CTRL_REG1] = init_val;
+
+ buf[0] = CTRL_REG1;
+ updated_val = ((mask & new_val) | ((~mask) & init_val));
+ buf[1] = updated_val;
+ buf[0] = CTRL_REG1;
+ err = lps001wp_prs_i2c_write(prs, buf, 1);
+ if (err < 0)
+ goto error;
+ prs->resume_state[RES_CTRL_REG1] = updated_val;
+ }
+ return err;
+
+error:
+ dev_err(&prs->client->dev, "update odr failed 0x%x,0x%x: %d\n",
+ buf[0], buf[1], err);
+
+ return err;
+}
+
+static int lps001wp_prs_set_press_reference(struct lps001wp_prs_data *prs,
+ u16 new_reference)
+{
+ int err = -EINVAL;
+ u8 const reg_addressL = REF_P_L;
+ u8 const reg_addressH = REF_P_H;
+ u8 bit_valuesL, bit_valuesH;
+ u8 buf[2];
+ /*
+ * We need to set new configurations, only if device
+ * is currently enabled
+ */
+ if (!atomic_read(&prs->enabled))
+ return err;
+ bit_valuesL = (u8) (new_reference & 0x00FF);
+ bit_valuesH = (u8)((new_reference & 0xFF00) >> 8);
+
+ err = lps001wp_prs_register_write(prs, buf, reg_addressL,
+ bit_valuesL);
+ if (err < 0)
+ return err;
+ err = lps001wp_prs_register_write(prs, buf, reg_addressH,
+ bit_valuesH);
+ if (err < 0) {
+ lps001wp_prs_register_write(prs, buf, reg_addressL,
+ prs->resume_state[RES_REF_P_L]);
+ return err;
+ }
+ prs->resume_state[RES_REF_P_L] = bit_valuesL;
+ prs->resume_state[RES_REF_P_H] = bit_valuesH;
+ return err;
+}
+
+static int lps001wp_prs_get_press_reference(struct lps001wp_prs_data *prs,
+ u16 *buf16)
+{
+ int err = -EINVAL;
+
+ u8 bit_valuesL, bit_valuesH;
+ u8 buf[2] = {0};
+ u16 temp = 0;
+ /*
+ * We need to read configurations, only if device
+ * is currently enabled
+ */
+ if (!atomic_read(&prs->enabled))
+ return err;
+ err = lps001wp_prs_register_read(prs, buf, REF_P_L);
+ if (err < 0)
+ return err;
+ bit_valuesL = buf[0];
+ err = lps001wp_prs_register_read(prs, buf, REF_P_H);
+ if (err < 0)
+ return err;
+ bit_valuesH = buf[0];
+
+ temp = (((u16) bit_valuesH) << 8);
+ *buf16 = (temp | ((u16) bit_valuesL));
+
+ return err;
+}
+
+static int lps001wp_prs_lpow_manage(struct lps001wp_prs_data *prs, u8 control)
+{
+ int err = -EINVAL;
+ u8 buf[2] = {0x00, 0x00};
+ u8 const mask = LPS001WP_PRS_LPOW_MASK;
+ u8 bit_values = LPS001WP_PRS_LPOW_OFF;
+
+ /*
+ * We need to set new configurations, only if device
+ * is currently enabled
+ */
+ if (!atomic_read(&prs->enabled))
+ return err;
+ if (control >= LPS001WP_PRS_LPOWER_EN) {
+ bit_values = LPS001WP_PRS_LPOW_ON;
+ }
+
+ err = lps001wp_prs_register_update(prs, buf, CTRL_REG1,
+ mask, bit_values);
+
+ if (err < 0)
+ return err;
+ prs->resume_state[RES_CTRL_REG1] = ((mask & bit_values) |
+ (~mask & prs->resume_state[RES_CTRL_REG1]));
+ if (bit_values == LPS001WP_PRS_LPOW_ON)
+ prs->lpowmode_enabled = 1;
+ else
+ prs->lpowmode_enabled = 0;
+ return err;
+}
+
+static int lps001wp_prs_diffen_manage(struct lps001wp_prs_data *prs, u8 control)
+{
+ int err = -EINVAL;
+ u8 buf[2] = {0x00, 0x00};
+ u8 const mask = LPS001WP_PRS_DIFF_MASK;
+ u8 bit_values = LPS001WP_PRS_DIFF_OFF;
+
+ /*
+ * We need to set new configurations, only if device
+ * is currently enabled
+ */
+ if (!atomic_read(&prs->enabled))
+ return err;
+ if (control >= LPS001WP_PRS_DIFF_ENABLE) {
+ bit_values = LPS001WP_PRS_DIFF_ON;
+ }
+
+ err = lps001wp_prs_register_update(prs, buf, CTRL_REG1,
+ mask, bit_values);
+
+ if (err < 0)
+ return err;
+ prs->resume_state[RES_CTRL_REG1] = ((mask & bit_values) |
+ (~mask & prs->resume_state[RES_CTRL_REG1]));
+ if (bit_values == LPS001WP_PRS_DIFF_ON)
+ prs->diff_enabled = 1;
+ else
+ prs->diff_enabled = 0;
+ return err;
+}
+
+
+static int lps001wp_prs_get_presstemp_data(struct lps001wp_prs_data *prs,
+ struct outputdata *out)
+{
+ int err = -EINVAL;
+ /* Data bytes from hardware PRESS_OUT_L, PRESS_OUT_H,
+ * TEMP_OUT_L, TEMP_OUT_H,
+ * DELTA_L, DELTA_H */
+ u8 prs_data[6];
+
+ u16 abspr;
+ s16 temperature, deltapr;
+ int regToRead = 4;
+ prs_data[4] = 0;
+ prs_data[5] = 0;
+
+ if (prs->diff_enabled)
+ regToRead = 6;
+
+ prs_data[0] = (I2C_AUTO_INCREMENT | OUTDATA_REG);
+ err = lps001wp_prs_i2c_read(prs, prs_data, regToRead);
+ if (err < 0)
+ return err;
+
+ abspr = ((((u16) prs_data[1] << 8) | ((u16) prs_data[0])));
+ temperature = ((s16) (((u16) prs_data[3] << 8) | ((u16)prs_data[2])));
+
+ out->abspress = (abspr >> SENSITIVITY_P_SHIFT);
+ out->temperature = (temperature >> SENSITIVITY_T_SHIFT);
+
+ deltapr = ((s16) (((u16) prs_data[5] << 8) | ((u16)prs_data[4])));
+ out->deltapress = deltapr;
+
+ return err;
+}
+
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+static void lps001wp_prs_report_values(struct lps001wp_prs_data *prs,
+ struct outputdata *out)
+{
+ input_report_abs(prs->input_dev, ABS_PR, out->abspress);
+ input_report_abs(prs->input_dev, ABS_TEMP, out->temperature);
+ input_report_abs(prs->input_dev, ABS_DLTPR, out->deltapress);
+ input_sync(prs->input_dev);
+}
+#endif
+
+static int lps001wp_prs_enable(struct lps001wp_prs_data *prs)
+{
+ int err;
+
+ if (!atomic_cmpxchg(&prs->enabled, 0, 1)) {
+ if (prs->regulator)
+ regulator_enable(prs->regulator);
+ err = lps001wp_prs_device_power_on(prs);
+ if (err < 0) {
+ atomic_set(&prs->enabled, 0);
+ return err;
+ }
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+ schedule_delayed_work(&prs->input_work,
+ msecs_to_jiffies(prs->pdata->poll_interval));
+#endif
+ }
+
+ return 0;
+}
+
+static int lps001wp_prs_disable(struct lps001wp_prs_data *prs)
+{
+ if (atomic_cmpxchg(&prs->enabled, 1, 0)) {
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+ cancel_delayed_work_sync(&prs->input_work);
+#endif
+ lps001wp_prs_device_power_off(prs);
+ if (prs->regulator)
+ regulator_disable(prs->regulator);
+ }
+
+ return 0;
+}
+
+static ssize_t attr_get_polling_rate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int val;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ mutex_lock(&prs->lock);
+ val = prs->pdata->poll_interval;
+ mutex_unlock(&prs->lock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t attr_set_polling_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ unsigned long interval_ms;
+ int err = -EINVAL;
+
+ if (strict_strtoul(buf, 10, &interval_ms))
+ return -EINVAL;
+ if (!interval_ms)
+ return -EINVAL;
+ mutex_lock(&prs->lock);
+ prs->pdata->poll_interval = interval_ms;
+ err = lps001wp_prs_update_odr(prs, interval_ms);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "failed to update odr %ld\n",
+ interval_ms);
+ size = err;
+ }
+ mutex_unlock(&prs->lock);
+ return size;
+}
+
+static ssize_t attr_get_diff_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 val;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ mutex_lock(&prs->lock);
+ val = prs->diff_enabled;
+ mutex_unlock(&prs->lock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t attr_set_diff_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ unsigned long val;
+ int err = -EINVAL;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&prs->lock);
+ err = lps001wp_prs_diffen_manage(prs, (u8) val);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "failed to diff enable %ld\n", val);
+ mutex_unlock(&prs->lock);
+ return err;
+ }
+ mutex_unlock(&prs->lock);
+ return size;
+}
+
+static ssize_t attr_get_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ int val = atomic_read(&prs->enabled);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t attr_set_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val)
+ lps001wp_prs_enable(prs);
+ else
+ lps001wp_prs_disable(prs);
+
+ return size;
+}
+
+static ssize_t attr_get_press_ref(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err = -EINVAL;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ u16 val = 0;
+
+ mutex_lock(&prs->lock);
+ err = lps001wp_prs_get_press_reference(prs, &val);
+ mutex_unlock(&prs->lock);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "failed to get ref press\n");
+ return err;
+ }
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t attr_set_press_ref(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = -EINVAL;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ unsigned long val = 0;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val < PR_ABS_MIN || val > PR_ABS_MAX)
+ return -EINVAL;
+
+ mutex_lock(&prs->lock);
+ err = lps001wp_prs_set_press_reference(prs, val);
+ mutex_unlock(&prs->lock);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "failed to set ref press %ld\n",
+ val);
+ return err;
+ }
+ return size;
+}
+
+
+static ssize_t attr_get_lowpowmode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 val;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ mutex_lock(&prs->lock);
+ val = prs->lpowmode_enabled;
+ mutex_unlock(&prs->lock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t attr_set_lowpowmode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = -EINVAL;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&prs->lock);
+ err = lps001wp_prs_lpow_manage(prs, (u8) val);
+ mutex_unlock(&prs->lock);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "failed to set low powermode\n");
+ return err;
+ }
+ return size;
+}
+static ssize_t lps001wp_prs_get_press_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ struct outputdata out;
+ int err = -EINVAL;
+ mutex_lock(&prs->lock);
+ /*
+ * If device is currently enabled, we need to read
+ * data from it.
+ */
+ if (!atomic_read(&prs->enabled))
+ goto out;
+ err = lps001wp_prs_get_presstemp_data(prs, &out);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "get_pressure_data failed\n");
+ goto out;
+ }
+ mutex_unlock(&prs->lock);
+ return sprintf(buf, "%d", out.abspress);
+out:
+ mutex_unlock(&prs->lock);
+ return err;
+}
+
+static ssize_t lps001wp_prs_get_deltapr_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ struct outputdata out;
+ int err = -EINVAL;
+ mutex_lock(&prs->lock);
+ /*
+ * If device is currently enabled, we need to read
+ * data from it.
+ */
+ if (!atomic_read(&prs->enabled)) {
+ mutex_unlock(&prs->lock);
+ return err;
+ }
+ err = lps001wp_prs_get_presstemp_data(prs, &out);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "get_deltapress_data failed\n");
+ mutex_unlock(&prs->lock);
+ return err;
+ }
+ mutex_unlock(&prs->lock);
+ return sprintf(buf, "%d", out.deltapress);
+}
+
+static ssize_t lps001wp_prs_get_temp_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ struct outputdata out;
+ int err = -EINVAL;
+ mutex_lock(&prs->lock);
+ /*
+ * If device is currently enabled, we need to read
+ * data from it.
+ */
+ if (!atomic_read(&prs->enabled)) {
+ mutex_unlock(&prs->lock);
+ return err;
+ }
+ err = lps001wp_prs_get_presstemp_data(prs, &out);
+ if (err < 0) {
+ dev_err(&prs->client->dev, "get_temperature_data failed\n");
+ mutex_unlock(&prs->lock);
+ return err;
+ }
+ mutex_unlock(&prs->lock);
+ return sprintf(buf, "%d", out.temperature);
+}
+#ifdef DEBUG
+static ssize_t attr_reg_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int rc;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ u8 x[2];
+ unsigned long val;
+
+ if (strict_strtoul(buf, 16, &val))
+ return -EINVAL;
+ mutex_lock(&prs->lock);
+ x[0] = prs->reg_addr;
+ mutex_unlock(&prs->lock);
+ x[1] = val;
+ rc = lps001wp_prs_i2c_write(prs, x, 1);
+ /*TODO: error need to be managed */
+ return size;
+}
+
+static ssize_t attr_reg_get(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ int rc;
+ u8 data;
+
+ mutex_lock(&prs->lock);
+ data = prs->reg_addr;
+ mutex_unlock(&prs->lock);
+ rc = lps001wp_prs_i2c_read(prs, &data, 1);
+ /*TODO: error need to be managed */
+ ret = sprintf(buf, "0x%02x\n", data);
+ return ret;
+}
+
+static ssize_t attr_addr_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ unsigned long val;
+ if (strict_strtoul(buf, 16, &val))
+ return -EINVAL;
+ mutex_lock(&prs->lock);
+ prs->reg_addr = val;
+ mutex_unlock(&prs->lock);
+ return size;
+}
+#endif
+
+
+
+static struct device_attribute attributes[] = {
+ __ATTR(pollrate_ms, S_IWUSR | S_IRUGO, attr_get_polling_rate,
+ attr_set_polling_rate),
+ __ATTR(enable, S_IWUGO | S_IRUGO, attr_get_enable, attr_set_enable),
+ __ATTR(diff_enable, S_IWUSR | S_IRUGO, attr_get_diff_enable,
+ attr_set_diff_enable),
+ __ATTR(press_reference, S_IWUSR | S_IRUGO, attr_get_press_ref,
+ attr_set_press_ref),
+ __ATTR(lowpow_enable, S_IWUSR | S_IRUGO, attr_get_lowpowmode,
+ attr_set_lowpowmode),
+ __ATTR(press_data, S_IRUGO, lps001wp_prs_get_press_data, NULL),
+ __ATTR(temp_data, S_IRUGO, lps001wp_prs_get_temp_data, NULL),
+ __ATTR(deltapr_data, S_IRUGO, lps001wp_prs_get_deltapr_data, NULL),
+#ifdef DEBUG
+ __ATTR(reg_value, S_IWUSR | S_IRUGO, attr_reg_get, attr_reg_set),
+ __ATTR(reg_addr, S_IWUSR, NULL, attr_addr_set),
+#endif
+};
+
+static int create_sysfs_interfaces(struct device *dev)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(attributes); i++)
+ if (device_create_file(dev, attributes + i))
+ goto error;
+ return 0;
+
+error:
+ for ( ; i >= 0; i--)
+ device_remove_file(dev, attributes + i);
+ dev_err(dev, "%s:Unable to create interface\n", __func__);
+ return -1;
+}
+
+static int remove_sysfs_interfaces(struct device *dev)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(attributes); i++)
+ device_remove_file(dev, attributes + i);
+ return 0;
+}
+
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+static void lps001wp_prs_input_work_func(struct work_struct *work)
+{
+ struct lps001wp_prs_data *prs;
+
+ struct outputdata output;
+ struct outputdata *out = &output;
+ int err;
+
+ prs = container_of((struct delayed_work *)work,
+ struct lps001wp_prs_data, input_work);
+
+ mutex_lock(&prs->lock);
+ err = lps001wp_prs_get_presstemp_data(prs, out);
+ if (err < 0)
+ dev_err(&prs->client->dev, "get_pressure_data failed\n");
+ else
+ lps001wp_prs_report_values(prs, out);
+
+ schedule_delayed_work(&prs->input_work,
+ msecs_to_jiffies(prs->pdata->poll_interval));
+ mutex_unlock(&prs->lock);
+}
+
+int lps001wp_prs_input_open(struct input_dev *input)
+{
+ struct lps001wp_prs_data *prs = input_get_drvdata(input);
+
+ return lps001wp_prs_enable(prs);
+}
+
+void lps001wp_prs_input_close(struct input_dev *dev)
+{
+ struct lps001wp_prs_data *prs = input_get_drvdata(dev);
+
+ lps001wp_prs_disable(prs);
+}
+#endif
+
+static int lps001wp_prs_validate_pdata(struct lps001wp_prs_data *prs)
+{
+ prs->pdata->poll_interval = max(prs->pdata->poll_interval,
+ prs->pdata->min_interval);
+
+ /* Enforce minimum polling interval */
+ if (prs->pdata->poll_interval < prs->pdata->min_interval) {
+ dev_err(&prs->client->dev, "minimum poll interval violated\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+static int lps001wp_prs_input_init(struct lps001wp_prs_data *prs)
+{
+ int err;
+ INIT_DELAYED_WORK(&prs->input_work, lps001wp_prs_input_work_func);
+ prs->input_dev = input_allocate_device();
+ if (!prs->input_dev) {
+ err = -ENOMEM;
+ dev_err(&prs->client->dev, "input device allocate failed\n");
+ goto err0;
+ }
+
+ prs->input_dev->open = lps001wp_prs_input_open;
+ prs->input_dev->close = lps001wp_prs_input_close;
+ prs->input_dev->name = LPS001WP_PRS_DEV_NAME;
+ prs->input_dev->id.bustype = BUS_I2C;
+ prs->input_dev->dev.parent = &prs->client->dev;
+
+ input_set_drvdata(prs->input_dev, prs);
+
+ set_bit(EV_ABS, prs->input_dev->evbit);
+
+ input_set_abs_params(prs->input_dev, ABS_PR,
+ PR_ABS_MIN, PR_ABS_MAX, FUZZ, FLAT);
+ input_set_abs_params(prs->input_dev, ABS_TEMP,
+ PR_DLT_MIN, PR_DLT_MAX, FUZZ, FLAT);
+ input_set_abs_params(prs->input_dev, ABS_DLTPR,
+ TEMP_MIN, TEMP_MAX, FUZZ, FLAT);
+
+
+ prs->input_dev->name = "LPS001WP barometer";
+
+ err = input_register_device(prs->input_dev);
+ if (err) {
+ dev_err(&prs->client->dev,
+ "unable to register input polled device %s\n",
+ prs->input_dev->name);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ input_free_device(prs->input_dev);
+err0:
+ return err;
+}
+
+static void lps001wp_prs_input_cleanup(struct lps001wp_prs_data *prs)
+{
+ input_unregister_device(prs->input_dev);
+ input_free_device(prs->input_dev);
+}
+#endif
+static int lps001wp_prs_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lps001wp_prs_data *prs;
+ int err = -EINVAL;
+ int tempvalue;
+
+ pr_info("%s: probe start.\n", LPS001WP_PRS_DEV_NAME);
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "client not i2c capable\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "client not smb-i2c capable:2\n");
+ err = -EIO;
+ goto exit_check_functionality_failed;
+ }
+
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)){
+ dev_err(&client->dev, "client not smb-i2c capable:3\n");
+ err = -EIO;
+ goto exit_check_functionality_failed;
+ }
+
+
+ prs = kzalloc(sizeof(struct lps001wp_prs_data), GFP_KERNEL);
+ if (prs == NULL) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "failed to allocate memory for module data: "
+ "%d\n", err);
+ goto exit_alloc_data_failed;
+ }
+
+ mutex_init(&prs->lock);
+ mutex_lock(&prs->lock);
+
+ prs->client = client;
+ i2c_set_clientdata(client, prs);
+
+ prs->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(prs->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ err = PTR_ERR(prs->regulator);
+ prs->regulator = NULL;
+ }
+ if (prs->regulator)
+ regulator_enable(prs->regulator);
+
+ if (i2c_smbus_read_byte(client) < 0) {
+ dev_err(&client->dev, "i2c_smbus_read_byte error!!\n");
+ goto err_mutexunlockfreedata;
+ } else {
+ dev_dbg(&client->dev, "%s Device detected!\n",
+ LPS001WP_PRS_DEV_NAME);
+ }
+
+ /* read chip id */
+ tempvalue = i2c_smbus_read_word_data(client, WHO_AM_I);
+ if ((tempvalue & 0x00FF) == WHOAMI_LPS001WP_PRS) {
+ dev_dbg(&client->dev, "%s I2C driver registered!\n",
+ LPS001WP_PRS_DEV_NAME);
+ } else {
+ prs->client = NULL;
+ dev_dbg(&client->dev, "I2C driver not registered!"
+ " Device unknown\n");
+ goto err_mutexunlockfreedata;
+ }
+
+ prs->pdata = kmalloc(sizeof(*prs->pdata), GFP_KERNEL);
+ if (prs->pdata == NULL) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "failed to allocate memory for pdata: %d\n",
+ err);
+ goto err_mutexunlockfreedata;
+ }
+
+ memcpy(prs->pdata, client->dev.platform_data, sizeof(*prs->pdata));
+
+ err = lps001wp_prs_validate_pdata(prs);
+ if (err < 0) {
+ dev_err(&client->dev, "failed to validate platform data\n");
+ goto exit_kfree_pdata;
+ }
+
+ i2c_set_clientdata(client, prs);
+
+
+ if (prs->pdata->init) {
+ err = prs->pdata->init();
+ if (err < 0) {
+ dev_err(&client->dev, "init failed: %d\n", err);
+ goto err2;
+ }
+ }
+
+ memset(prs->resume_state, 0, ARRAY_SIZE(prs->resume_state));
+
+ prs->resume_state[RES_CTRL_REG1] = LPS001WP_PRS_PM_NORMAL;
+ prs->resume_state[RES_CTRL_REG2] = 0x00;
+ prs->resume_state[RES_CTRL_REG3] = 0x00;
+ prs->resume_state[RES_REF_P_L] = 0x00;
+ prs->resume_state[RES_REF_P_H] = 0x00;
+ prs->resume_state[RES_THS_P_L] = 0x00;
+ prs->resume_state[RES_THS_P_H] = 0x00;
+ prs->resume_state[RES_INT_CFG] = 0x00;
+
+ err = lps001wp_prs_device_power_on(prs);
+ if (err < 0) {
+ dev_err(&client->dev, "power on failed: %d\n", err);
+ goto err2;
+ }
+
+ prs->diff_enabled = 0;
+ prs->lpowmode_enabled = 0;
+ atomic_set(&prs->enabled, 1);
+
+ err = lps001wp_prs_update_odr(prs, prs->pdata->poll_interval);
+ if (err < 0) {
+ dev_err(&client->dev, "update_odr failed\n");
+ goto err_power_off;
+ }
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+ err = lps001wp_prs_input_init(prs);
+ if (err < 0) {
+ dev_err(&client->dev, "input init failed\n");
+ goto err_power_off;
+ }
+#endif
+
+ err = create_sysfs_interfaces(&client->dev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "device LPS001WP_PRS_DEV_NAME sysfs register failed\n");
+ goto err_input_cleanup;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ prs->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ prs->early_suspend.suspend = lps001wp_prs_early_suspend;
+ prs->early_suspend.resume = lps001wp_prs_late_resume;
+ register_early_suspend(&prs->early_suspend);
+#endif
+
+
+ lps001wp_prs_device_power_off(prs);
+
+ if (prs->regulator)
+ regulator_disable(prs->regulator);
+
+ /* As default, do not report information */
+ atomic_set(&prs->enabled, 0);
+
+
+ mutex_unlock(&prs->lock);
+
+ dev_info(&client->dev, "%s: probed\n", LPS001WP_PRS_DEV_NAME);
+
+ return 0;
+
+/*
+remove_sysfs_int:
+ remove_sysfs_interfaces(&client->dev);
+*/
+err_input_cleanup:
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+ lps001wp_prs_input_cleanup(prs);
+#endif
+err_power_off:
+ lps001wp_prs_device_power_off(prs);
+err2:
+ if (prs->pdata->exit)
+ prs->pdata->exit();
+exit_kfree_pdata:
+ kfree(prs->pdata);
+
+err_mutexunlockfreedata:
+ mutex_unlock(&prs->lock);
+ if (prs->regulator) {
+ regulator_disable(prs->regulator);
+ regulator_put(prs->regulator);
+ }
+ kfree(prs);
+exit_alloc_data_failed:
+exit_check_functionality_failed:
+ dev_err(&client->dev, "%s: Driver Init failed\n",
+ LPS001WP_PRS_DEV_NAME);
+ return err;
+}
+
+static int __devexit lps001wp_prs_remove(struct i2c_client *client)
+{
+ struct lps001wp_prs_data *prs = i2c_get_clientdata(client);
+
+#ifdef CONFIG_LPS001WP_INPUT_DEVICE
+ lps001wp_prs_input_cleanup(prs);
+#endif
+ lps001wp_prs_device_power_off(prs);
+ remove_sysfs_interfaces(&client->dev);
+ if (prs->regulator) {
+ /* Disable the regulator if device is enabled. */
+ if (atomic_read(&prs->enabled))
+ regulator_disable(prs->regulator);
+ regulator_put(prs->regulator);
+ }
+
+ if (prs->pdata->exit)
+ prs->pdata->exit();
+
+ if (prs->regulator)
+ regulator_put(prs->regulator);
+
+ kfree(prs->pdata);
+ kfree(prs);
+
+ return 0;
+}
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+static int lps001wp_prs_resume(struct device *dev)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+
+ if (prs->on_before_suspend)
+ return lps001wp_prs_enable(prs);
+ return 0;
+}
+
+static int lps001wp_prs_suspend(struct device *dev)
+{
+ struct lps001wp_prs_data *prs = dev_get_drvdata(dev);
+ prs->on_before_suspend = atomic_read(&prs->enabled);
+ return lps001wp_prs_disable(prs);
+}
+
+static const struct dev_pm_ops lps001wp_prs_dev_pm_ops = {
+ .suspend = lps001wp_prs_suspend,
+ .resume = lps001wp_prs_resume,
+};
+#else
+static void lps001wp_prs_early_suspend(struct early_suspend *data)
+{
+ struct lps001wp_prs_data *prs =
+ container_of(data, struct lps001wp_prs_data, early_suspend);
+ prs->on_before_suspend = atomic_read(&prs->enabled);
+ lps001wp_prs_disable(prs);
+}
+
+static void lps001wp_prs_late_resume(struct early_suspend *data)
+{
+ struct lps001wp_prs_data *prs =
+ container_of(data, struct lps001wp_prs_data, early_suspend);
+ if (prs->on_before_suspend)
+ lps001wp_prs_enable(prs);
+}
+#endif
+static const struct i2c_device_id lps001wp_prs_id[]
+ = { { LPS001WP_PRS_DEV_NAME, 0}, { },};
+
+MODULE_DEVICE_TABLE(i2c, lps001wp_prs_id);
+
+static struct i2c_driver lps001wp_prs_driver = {
+ .driver = {
+ .name = LPS001WP_PRS_DEV_NAME,
+ .owner = THIS_MODULE,
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
+ .pm = &lps001wp_prs_dev_pm_ops,
+#endif
+ },
+ .probe = lps001wp_prs_probe,
+ .remove = __devexit_p(lps001wp_prs_remove),
+ .id_table = lps001wp_prs_id,
+};
+
+static int __init lps001wp_prs_init(void)
+{
+ printk(KERN_DEBUG "%s barometer driver: init\n",
+ LPS001WP_PRS_DEV_NAME);
+ return i2c_add_driver(&lps001wp_prs_driver);
+}
+
+static void __exit lps001wp_prs_exit(void)
+{
+ #if DEBUG
+ printk(KERN_DEBUG "%s barometer driver exit\n",
+ LPS001WP_PRS_DEV_NAME);
+ #endif
+ i2c_del_driver(&lps001wp_prs_driver);
+ return;
+}
+
+module_init(lps001wp_prs_init);
+module_exit(lps001wp_prs_exit);
+
+MODULE_DESCRIPTION("STMicrolelectronics lps001wp pressure sensor sysfs driver");
+MODULE_AUTHOR("Matteo Dameno, Carmine Iascone, STMicroelectronics");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/ste_ff_vibra.c b/drivers/input/misc/ste_ff_vibra.c
new file mode 100644
index 00000000000..9038e6be046
--- /dev/null
+++ b/drivers/input/misc/ste_ff_vibra.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>
+ * for ST-Ericsson
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/ste_audio_io_vibrator.h>
+
+#define FF_VIBRA_DOWN 0x0000 /* 0 degrees */
+#define FF_VIBRA_LEFT 0x4000 /* 90 degrees */
+#define FF_VIBRA_UP 0x8000 /* 180 degrees */
+#define FF_VIBRA_RIGHT 0xC000 /* 270 degrees */
+
+/**
+ * struct vibra_info - Vibrator information structure
+ * @idev: Pointer to input device structure
+ * @vibra_workqueue: Pointer to vibrator workqueue structure
+ * @vibra_work: Vibrator work
+ * @direction: Vibration direction
+ * @speed: Vibration speed
+ *
+ * Structure vibra_info holds vibrator informations
+ **/
+struct vibra_info {
+ struct input_dev *idev;
+ struct workqueue_struct *vibra_workqueue;
+ struct work_struct vibra_work;
+ int direction;
+ unsigned char speed;
+};
+
+/**
+ * vibra_play_work() - Vibrator work, sets speed and direction
+ * @work: Pointer to work structure
+ *
+ * This function is called from workqueue, turns on/off vibrator
+ **/
+static void vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *vinfo = container_of(work,
+ struct vibra_info, vibra_work);
+ struct ste_vibra_speed left_speed = {
+ .positive = 0,
+ .negative = 0,
+ };
+ struct ste_vibra_speed right_speed = {
+ .positive = 0,
+ .negative = 0,
+ };
+
+ /* Divide by 2 because supported range by PWM is 0-100 */
+ vinfo->speed /= 2;
+
+ if ((vinfo->direction > FF_VIBRA_DOWN) &&
+ (vinfo->direction < FF_VIBRA_UP)) {
+ /* 1 - 179 degrees, turn on left vibrator */
+ left_speed.positive = vinfo->speed;
+ } else if (vinfo->direction > FF_VIBRA_UP) {
+ /* more than 180 degrees, turn on right vibrator */
+ right_speed.positive = vinfo->speed;
+ } else {
+ /* 0 (down) or 180 (up) degrees, turn on 2 vibrators */
+ left_speed.positive = vinfo->speed;
+ right_speed.positive = vinfo->speed;
+ }
+
+ ste_audioio_vibrator_pwm_control(STE_AUDIOIO_CLIENT_FF_VIBRA,
+ left_speed, right_speed);
+}
+
+/**
+ * vibra_play() - Memless device control function
+ * @idev: Pointer to input device structure
+ * @data: Pointer to private data (not used)
+ * @effect: Pointer to force feedback effect structure
+ *
+ * This function controls memless device
+ *
+ * Returns:
+ * 0 - success
+ **/
+static int vibra_play(struct input_dev *idev, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ vinfo->direction = effect->direction;
+ vinfo->speed = effect->u.rumble.strong_magnitude >> 8;
+ if (!vinfo->speed)
+ /* Shift weak magnitude to make it feelable on vibrator */
+ vinfo->speed = effect->u.rumble.weak_magnitude >> 9;
+
+ queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work);
+
+ return 0;
+}
+
+/**
+ * ste_ff_vibra_open() - Input device open function
+ * @idev: Pointer to input device structure
+ *
+ * This function is called on opening input device
+ *
+ * Returns:
+ * -ENOMEM - no memory left
+ * 0 - success
+ **/
+static int ste_ff_vibra_open(struct input_dev *idev)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ vinfo->vibra_workqueue =
+ create_singlethread_workqueue("ste_ff-ff-vibra");
+ if (!vinfo->vibra_workqueue) {
+ dev_err(&idev->dev, "couldn't create vibra workqueue\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * ste_ff_vibra_close() - Input device close function
+ * @idev: Pointer to input device structure
+ *
+ * This function is called on closing input device
+ **/
+static void ste_ff_vibra_close(struct input_dev *idev)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ cancel_work_sync(&vinfo->vibra_work);
+ INIT_WORK(&vinfo->vibra_work, vibra_play_work);
+ destroy_workqueue(vinfo->vibra_workqueue);
+ vinfo->vibra_workqueue = NULL;
+}
+
+static int __devinit ste_ff_vibra_probe(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo;
+ int ret;
+
+ vinfo = kmalloc(sizeof *vinfo, GFP_KERNEL);
+ if (!vinfo) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vinfo->idev = input_allocate_device();
+ if (!vinfo->idev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ ret = -ENOMEM;
+ goto exit_vinfo_free;
+ }
+
+ vinfo->idev->name = "ste-ff-vibra";
+ vinfo->idev->dev.parent = pdev->dev.parent;
+ vinfo->idev->open = ste_ff_vibra_open;
+ vinfo->idev->close = ste_ff_vibra_close;
+ INIT_WORK(&vinfo->vibra_work, vibra_play_work);
+ __set_bit(FF_RUMBLE, vinfo->idev->ffbit);
+
+ ret = input_ff_create_memless(vinfo->idev, NULL, vibra_play);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create memless device\n");
+ goto exit_idev_free;
+ }
+
+ ret = input_register_device(vinfo->idev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto exit_destroy_memless;
+ }
+
+ input_set_drvdata(vinfo->idev, vinfo);
+ platform_set_drvdata(pdev, vinfo);
+ return 0;
+
+exit_destroy_memless:
+ input_ff_destroy(vinfo->idev);
+exit_idev_free:
+ input_free_device(vinfo->idev);
+exit_vinfo_free:
+ kfree(vinfo);
+ return ret;
+}
+
+static int __devexit ste_ff_vibra_remove(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo = platform_get_drvdata(pdev);
+
+ /*
+ * Function device_release() will call input_dev_release()
+ * which will free ff and input device. No need to call
+ * input_ff_destroy() and input_free_device() explicitly.
+ */
+ input_unregister_device(vinfo->idev);
+ kfree(vinfo);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ste_ff_vibra_driver = {
+ .driver = {
+ .name = "ste_ff_vibra",
+ .owner = THIS_MODULE,
+ },
+ .probe = ste_ff_vibra_probe,
+ .remove = __devexit_p(ste_ff_vibra_remove)
+};
+
+static int __init ste_ff_vibra_init(void)
+{
+ return platform_driver_register(&ste_ff_vibra_driver);
+}
+module_init(ste_ff_vibra_init);
+
+static void __exit ste_ff_vibra_exit(void)
+{
+ platform_driver_unregister(&ste_ff_vibra_driver);
+}
+module_exit(ste_ff_vibra_exit);
+
+MODULE_AUTHOR("Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>");
+MODULE_DESCRIPTION("STE Force Feedback Vibrator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 4af2a18eb3b..643b969ae61 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -135,6 +135,23 @@ config TOUCHSCREEN_BU21013
To compile this driver as a module, choose M here: the
module will be called bu21013_ts.
+config TOUCHSCREEN_CYTTSP_CORE
+ tristate "Cypress TTSP touchscreen core"
+ help
+ Always activated for Cypress TTSP touchscreen.
+
+config TOUCHSCREEN_CYTTSP_SPI
+ tristate "Cypress TTSP spi touchscreen"
+ depends on SPI_MASTER && TOUCHSCREEN_CYTTSP_CORE
+ help
+ Say Y here if you have a Cypress TTSP touchscreen
+ connected to your with an SPI interface.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cyttsp-spi.
+
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 496091e8846..5b8bdb04f8c 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -17,7 +17,9 @@ obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
-obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
+bj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 902c7214e88..857a21db0eb 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2009
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* License terms:GNU General Public License (GPL) version 2
*/
@@ -12,13 +12,14 @@
#include <linux/input.h>
#include <linux/input/bu21013.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#define PEN_DOWN_INTR 0
-#define MAX_FINGERS 2
#define RESET_DELAY 30
-#define PENUP_TIMEOUT (10)
+#define PENUP_TIMEOUT 2 /* 2msecs */
+#define SCALE_FACTOR 1000
#define DELTA_MIN 16
#define MASK_BITS 0x03
#define SHIFT_8 8
@@ -131,7 +132,7 @@
#define BU21013_NUMBER_OF_X_SENSORS (6)
#define BU21013_NUMBER_OF_Y_SENSORS (11)
-#define DRIVER_TP "bu21013_tp"
+#define DRIVER_TP "bu21013_ts"
/**
* struct bu21013_ts_data - touch panel data structure
@@ -142,6 +143,12 @@
* @in_dev: pointer to the input device structure
* @intr_pin: interrupt pin value
* @regulator: pointer to the Regulator used for touch screen
+ * @enable: variable to indicate the enable/disable of touch screen
+ * @ext_clk_enable: true if running on ext clk
+ * @ext_clk_state: Saved state for suspend/resume of ext clk
+ * @factor_x: x scale factor
+ * @factor_y: y scale factor
+ * @tpclk: pointer to clock structure
*
* Touch panel device data structure
*/
@@ -149,12 +156,226 @@ struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
bool touch_stopped;
- const struct bu21013_platform_device *chip;
+ struct bu21013_platform_device *chip;
struct input_dev *in_dev;
unsigned int intr_pin;
struct regulator *regulator;
+ bool enable;
+ bool ext_clk_enable;
+ bool ext_clk_state;
+ unsigned int factor_x;
+ unsigned int factor_y;
+ struct clk *tpclk;
};
+static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk);
+
+/**
+ * bu21013_ext_clk() - enable/disable the external clock
+ * @pdata: touch screen data
+ * @enable: enable external clock
+ * @reconfig: reconfigure chip upon external clock off.
+ *
+ * This function used to enable or disable the external clock and possible
+ * reconfigure hw.
+ */
+static int bu21013_ext_clk(struct bu21013_ts_data *pdata, bool enable,
+ bool reconfig)
+{
+ int retval = 0;
+
+ if (!pdata->tpclk || pdata->ext_clk_enable == enable)
+ return retval;
+
+ if (enable) {
+ pdata->ext_clk_enable = true;
+ clk_enable(pdata->tpclk);
+ retval = bu21013_init_chip(pdata, true);
+ } else {
+ pdata->ext_clk_enable = false;
+ if (reconfig)
+ retval = bu21013_init_chip(pdata, false);
+ clk_disable(pdata->tpclk);
+ }
+ return retval;
+}
+
+/**
+ * bu21013_enable() - enable the touch driver event
+ * @pdata: touch screen data
+ *
+ * This function used to enable the driver and returns integer
+ */
+static int bu21013_enable(struct bu21013_ts_data *pdata)
+{
+ int retval;
+
+ if (pdata->regulator)
+ regulator_enable(pdata->regulator);
+
+ if (pdata->chip->cs_en) {
+ retval = pdata->chip->cs_en(pdata->chip->cs_pin);
+ if (retval < 0) {
+ dev_err(&pdata->client->dev, "enable hw failed\n");
+ return retval;
+ }
+ }
+
+ if (pdata->ext_clk_state)
+ retval = bu21013_ext_clk(pdata, true, true);
+ else
+ retval = bu21013_init_chip(pdata, false);
+
+ if (retval < 0) {
+ dev_err(&pdata->client->dev, "enable hw failed\n");
+ return retval;
+ }
+ pdata->touch_stopped = false;
+ enable_irq(pdata->chip->irq);
+
+ return 0;
+}
+
+/**
+ * bu21013_disable() - disable the touch driver event
+ * @pdata: touch screen data
+ *
+ * This function used to disable the driver and returns integer
+ */
+static void bu21013_disable(struct bu21013_ts_data *pdata)
+{
+ pdata->touch_stopped = true;
+
+ pdata->ext_clk_state = pdata->ext_clk_enable;
+ (void) bu21013_ext_clk(pdata, false, false);
+
+ disable_irq(pdata->chip->irq);
+ if (pdata->chip->cs_dis)
+ pdata->chip->cs_dis(pdata->chip->cs_pin);
+ if (pdata->regulator)
+ regulator_disable(pdata->regulator);
+}
+
+/**
+ * bu21013_show_attr_enable() - show the touch screen controller status
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ *
+ * This funtion is used to show whether the touch screen is enabled or
+ * disabled
+ */
+static ssize_t bu21013_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", pdata->enable);
+}
+
+/**
+ * bu21013_store_attr_enable() - Enable/Disable the touchscreen.
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ * @count: number of parameters
+ *
+ * This funtion is used to enable or disable the touch screen controller.
+ */
+static ssize_t bu21013_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long val;
+
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->enable != val) {
+ pdata->enable = val ? true : false;
+ if (pdata->enable) {
+ ret = bu21013_enable(pdata);
+ if (ret < 0)
+ return ret;
+ } else
+ bu21013_disable(pdata);
+ }
+ return count;
+}
+
+/**
+ * bu21013_show_attr_extclk() - shows the external clock status
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ *
+ * This funtion is used to show whether the external clock for the touch
+ * screen is enabled or disabled.
+ */
+static ssize_t bu21013_show_attr_extclk(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", pdata->ext_clk_enable);
+}
+
+/**
+ * bu21013_store_attr_extclk() - Enable/Disable the external clock
+ * for the tocuh screen controller.
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ * @count: number of parameters
+ *
+ * This funtion is used enabled or disable the external clock for the touch
+ * screen controller.
+ */
+static ssize_t bu21013_store_attr_extclk(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval = 0;
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->chip->has_ext_clk) {
+ if (pdata->enable)
+ retval = bu21013_ext_clk(pdata, val, true);
+ else
+ pdata->ext_clk_state = val;
+ if (retval < 0)
+ return retval;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ bu21013_show_attr_enable, bu21013_store_attr_enable);
+
+static DEVICE_ATTR(ext_clk, S_IWUSR | S_IRUGO,
+ bu21013_show_attr_extclk, bu21013_store_attr_extclk);
+
+
+static struct attribute *bu21013_attribute[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_ext_clk.attr,
+ NULL,
+};
+
+static struct attribute_group bu21013_attr_group = {
+ .attrs = bu21013_attribute,
+};
+
+
/**
* bu21013_read_block_data(): read the touch co-ordinates
* @data: bu21013_ts_data structure pointer
@@ -204,12 +425,14 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data)
if (!has_x_sensors || !has_y_sensors)
return 0;
- for (i = 0; i < MAX_FINGERS; i++) {
+ for (i = 0; i < 2; i++) {
const u8 *p = &buf[4 * i + 3];
unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS);
unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS);
if (x == 0 || y == 0)
continue;
+ x = x * data->factor_x / SCALE_FACTOR;
+ y = y * data->factor_y / SCALE_FACTOR;
pos_x[finger_down_count] = x;
pos_y[finger_down_count] = y;
finger_down_count++;
@@ -217,21 +440,21 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data)
if (finger_down_count) {
if (finger_down_count == 2 &&
- (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
- abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) {
+ (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
+ abs(pos_y[0] - pos_y[1]) < DELTA_MIN))
return 0;
- }
for (i = 0; i < finger_down_count; i++) {
- if (data->chip->x_flip)
- pos_x[i] = data->chip->touch_x_max - pos_x[i];
- if (data->chip->y_flip)
- pos_y[i] = data->chip->touch_y_max - pos_y[i];
-
- input_report_abs(data->in_dev,
- ABS_MT_POSITION_X, pos_x[i]);
- input_report_abs(data->in_dev,
- ABS_MT_POSITION_Y, pos_y[i]);
+ if (data->chip->portrait && data->chip->x_flip)
+ pos_x[i] = data->chip->x_max_res - pos_x[i];
+ if (data->chip->portrait && data->chip->y_flip)
+ pos_y[i] = data->chip->y_max_res - pos_y[i];
+ input_report_abs(data->in_dev, ABS_MT_TOUCH_MAJOR,
+ max(pos_x[i], pos_y[i]));
+ input_report_abs(data->in_dev, ABS_MT_POSITION_X,
+ pos_x[i]);
+ input_report_abs(data->in_dev, ABS_MT_POSITION_Y,
+ pos_y[i]);
input_mt_sync(data->in_dev);
}
} else
@@ -261,24 +484,23 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
dev_err(&i2c->dev, "bu21013_do_touch_report failed\n");
return IRQ_NONE;
}
-
data->intr_pin = data->chip->irq_read_val();
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
- msecs_to_jiffies(2));
+ msecs_to_jiffies(PENUP_TIMEOUT));
} while (!data->intr_pin && !data->touch_stopped);
-
return IRQ_HANDLED;
}
/**
* bu21013_init_chip() - power on sequence for the bu21013 controller
* @data: device structure pointer
+ * @on_ext_clk: Run on external clock
*
* This function is used to power on
* the bu21013 controller and returns integer.
*/
-static int bu21013_init_chip(struct bu21013_ts_data *data)
+static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk)
{
int retval;
struct i2c_client *i2c = data->client;
@@ -297,28 +519,24 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_SENSOR_0_7 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_8_15_REG,
BU21013_SENSORS_EN_8_15);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_8_15 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_16_23_REG,
BU21013_SENSORS_EN_16_23);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_16_23 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE1_REG,
(BU21013_POS_MODE1_0 | BU21013_POS_MODE1_1));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_POS_MODE1 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE2_REG,
(BU21013_POS_MODE2_ZERO | BU21013_POS_MODE2_AVG1 |
BU21013_POS_MODE2_AVG2 | BU21013_POS_MODE2_EN_RAW |
@@ -327,8 +545,7 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_POS_MODE2 reg write failed\n");
return retval;
}
-
- if (data->chip->ext_clk)
+ if (on_ext_clk)
retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
(BU21013_CLK_MODE_EXT | BU21013_CLK_MODE_CALIB));
else
@@ -338,21 +555,18 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_CLK_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_IDLE_REG,
(BU21013_IDLET_0 | BU21013_IDLE_INTERMIT_EN));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_IDLE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_INT_MODE_REG,
BU21013_INT_MODE_LEVEL);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_INT_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_FILTER_REG,
(BU21013_DELTA_0_6 |
BU21013_FILTER_EN));
@@ -367,14 +581,12 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_TH_ON reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG,
BU21013_TH_OFF_4 | BU21013_TH_OFF_3);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_GAIN_REG,
(BU21013_GAIN_0 | BU21013_GAIN_1));
if (retval < 0) {
@@ -388,7 +600,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_OFFSET_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_XY_EDGE_REG,
(BU21013_X_EDGE_0 | BU21013_X_EDGE_2 |
BU21013_Y_EDGE_1 | BU21013_Y_EDGE_3));
@@ -396,7 +607,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_XY_EDGE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_DONE_REG,
BU21013_DONE);
if (retval < 0) {
@@ -404,25 +614,15 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
return retval;
}
- return 0;
+ data->factor_x = (data->chip->x_max_res * SCALE_FACTOR /
+ data->chip->touch_x_max);
+ data->factor_y = (data->chip->y_max_res * SCALE_FACTOR /
+ data->chip->touch_y_max);
+ return retval;
}
/**
- * bu21013_free_irq() - frees IRQ registered for touchscreen
- * @bu21013_data: device structure pointer
- *
- * This function signals interrupt thread to stop processing and
- * frees interrupt.
- */
-static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
-{
- bu21013_data->touch_stopped = true;
- wake_up(&bu21013_data->wait);
- free_irq(bu21013_data->chip->irq, bu21013_data);
-}
-
-/**
- * bu21013_probe() - initializes the i2c-client touchscreen driver
+ * bu21013_probe() - initialzes the i2c-client touchscreen driver
* @client: i2c client structure pointer
* @id: i2c device id pointer
*
@@ -432,11 +632,11 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
static int __devinit bu21013_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ int retval;
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
- const struct bu21013_platform_device *pdata =
+ struct bu21013_platform_device *pdata =
client->dev.platform_data;
- int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -446,53 +646,72 @@ static int __devinit bu21013_probe(struct i2c_client *client,
if (!pdata) {
dev_err(&client->dev, "platform data not defined\n");
- return -EINVAL;
+ retval = -EINVAL;
+ return retval;
}
bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL);
- in_dev = input_allocate_device();
- if (!bu21013_data || !in_dev) {
+ if (!bu21013_data) {
dev_err(&client->dev, "device memory alloc failed\n");
- error = -ENOMEM;
- goto err_free_mem;
+ retval = -ENOMEM;
+ return retval;
+ }
+ /* allocate input device */
+ in_dev = input_allocate_device();
+ if (!in_dev) {
+ dev_err(&client->dev, "input device memory alloc failed\n");
+ retval = -ENOMEM;
+ goto err_alloc;
}
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
- bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ bu21013_data->regulator = regulator_get(&client->dev, "avdd");
if (IS_ERR(bu21013_data->regulator)) {
- dev_err(&client->dev, "regulator_get failed\n");
- error = PTR_ERR(bu21013_data->regulator);
- goto err_free_mem;
+ dev_warn(&client->dev, "regulator_get failed\n");
+ bu21013_data->regulator = NULL;
}
-
- error = regulator_enable(bu21013_data->regulator);
- if (error < 0) {
- dev_err(&client->dev, "regulator enable failed\n");
- goto err_put_regulator;
- }
-
- bu21013_data->touch_stopped = false;
- init_waitqueue_head(&bu21013_data->wait);
+ if (bu21013_data->regulator)
+ regulator_enable(bu21013_data->regulator);
/* configure the gpio pins */
if (pdata->cs_en) {
- error = pdata->cs_en(pdata->cs_pin);
- if (error < 0) {
+ retval = pdata->cs_en(pdata->cs_pin);
+ if (retval < 0) {
dev_err(&client->dev, "chip init failed\n");
- goto err_disable_regulator;
+ goto err_init_cs;
+ }
+ }
+
+ if (pdata->has_ext_clk) {
+ bu21013_data->tpclk = clk_get(&client->dev, NULL);
+ if (IS_ERR(bu21013_data->tpclk)) {
+ dev_warn(&client->dev, "get extern clock failed\n");
+ bu21013_data->tpclk = NULL;
+ }
+ }
+
+ if (pdata->enable_ext_clk && bu21013_data->tpclk) {
+ retval = clk_enable(bu21013_data->tpclk);
+ if (retval < 0) {
+ dev_err(&client->dev, "clock enable failed\n");
+ goto err_ext_clk;
}
+ bu21013_data->ext_clk_enable = true;
}
/* configure the touch panel controller */
- error = bu21013_init_chip(bu21013_data);
- if (error) {
+ retval = bu21013_init_chip(bu21013_data, bu21013_data->ext_clk_enable);
+ if (retval < 0) {
dev_err(&client->dev, "error in bu21013 config\n");
- goto err_cs_disable;
+ goto err_init_config;
}
+ init_waitqueue_head(&bu21013_data->wait);
+ bu21013_data->touch_stopped = false;
+
/* register the device to input subsystem */
in_dev->name = DRIVER_TP;
in_dev->id.bustype = BUS_I2C;
@@ -503,44 +722,63 @@ static int __devinit bu21013_probe(struct i2c_client *client,
__set_bit(EV_ABS, in_dev->evbit);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
- pdata->touch_x_max, 0, 0);
+ pdata->x_max_res, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
- pdata->touch_y_max, 0, 0);
+ pdata->y_max_res, 0, 0);
+ input_set_abs_params(in_dev, ABS_MT_TOUCH_MAJOR, 0,
+ max(pdata->x_max_res , pdata->y_max_res), 0, 0);
input_set_drvdata(in_dev, bu21013_data);
-
- error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
- DRIVER_TP, bu21013_data);
- if (error) {
+ retval = input_register_device(in_dev);
+ if (retval)
+ goto err_input_register;
+
+ retval = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+ DRIVER_TP, bu21013_data);
+ if (retval) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
- goto err_cs_disable;
+ goto err_init_irq;
}
+ bu21013_data->enable = true;
+ i2c_set_clientdata(client, bu21013_data);
- error = input_register_device(in_dev);
- if (error) {
- dev_err(&client->dev, "failed to register input device\n");
- goto err_free_irq;
+ /* sysfs implementation for dynamic enable/disable the input event */
+ retval = sysfs_create_group(&client->dev.kobj, &bu21013_attr_group);
+ if (retval) {
+ dev_err(&client->dev, "failed to create sysfs entries\n");
+ goto err_sysfs_create;
}
- device_init_wakeup(&client->dev, pdata->wakeup);
- i2c_set_clientdata(client, bu21013_data);
-
- return 0;
+ return retval;
-err_free_irq:
- bu21013_free_irq(bu21013_data);
-err_cs_disable:
- pdata->cs_dis(pdata->cs_pin);
-err_disable_regulator:
- regulator_disable(bu21013_data->regulator);
-err_put_regulator:
- regulator_put(bu21013_data->regulator);
-err_free_mem:
- input_free_device(in_dev);
+err_sysfs_create:
+ free_irq(pdata->irq, bu21013_data);
+ i2c_set_clientdata(client, NULL);
+err_init_irq:
+ input_unregister_device(bu21013_data->in_dev);
+err_input_register:
+ wake_up(&bu21013_data->wait);
+err_init_config:
+ if (bu21013_data->tpclk) {
+ if (bu21013_data->ext_clk_enable)
+ clk_disable(bu21013_data->tpclk);
+ clk_put(bu21013_data->tpclk);
+ }
+err_ext_clk:
+ if (pdata->cs_dis)
+ pdata->cs_dis(pdata->cs_pin);
+err_init_cs:
+ if (bu21013_data->regulator) {
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+ }
+ input_free_device(bu21013_data->in_dev);
+err_alloc:
kfree(bu21013_data);
- return error;
+ return retval;
}
+
/**
* bu21013_remove() - removes the i2c-client touchscreen driver
* @client: i2c client structure pointer
@@ -552,19 +790,24 @@ static int __devexit bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
- bu21013_free_irq(bu21013_data);
-
+ bu21013_data->touch_stopped = true;
+ sysfs_remove_group(&client->dev.kobj, &bu21013_attr_group);
+ wake_up(&bu21013_data->wait);
+ free_irq(bu21013_data->chip->irq, bu21013_data);
bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
-
input_unregister_device(bu21013_data->in_dev);
- regulator_disable(bu21013_data->regulator);
- regulator_put(bu21013_data->regulator);
-
+ if (bu21013_data->tpclk) {
+ if (bu21013_data->ext_clk_enable)
+ clk_disable(bu21013_data->tpclk);
+ clk_put(bu21013_data->tpclk);
+ }
+ if (bu21013_data->regulator) {
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+ }
kfree(bu21013_data);
- device_init_wakeup(&client->dev, false);
-
return 0;
}
@@ -579,15 +822,8 @@ static int __devexit bu21013_remove(struct i2c_client *client)
static int bu21013_suspend(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
- struct i2c_client *client = bu21013_data->client;
- bu21013_data->touch_stopped = true;
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(bu21013_data->chip->irq);
- else
- disable_irq(bu21013_data->chip->irq);
-
- regulator_disable(bu21013_data->regulator);
+ bu21013_disable(bu21013_data);
return 0;
}
@@ -602,29 +838,8 @@ static int bu21013_suspend(struct device *dev)
static int bu21013_resume(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
- struct i2c_client *client = bu21013_data->client;
- int retval;
- retval = regulator_enable(bu21013_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "bu21013 regulator enable failed\n");
- return retval;
- }
-
- retval = bu21013_init_chip(bu21013_data);
- if (retval < 0) {
- dev_err(&client->dev, "bu21013 controller config failed\n");
- return retval;
- }
-
- bu21013_data->touch_stopped = false;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(bu21013_data->chip->irq);
- else
- enable_irq(bu21013_data->chip->irq);
-
- return 0;
+ return bu21013_enable(bu21013_data);
}
static const struct dev_pm_ops bu21013_dev_pm_ops = {
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
new file mode 100755
index 00000000000..a154056caa3
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -0,0 +1,2247 @@
+/* Source for:
+ * Cypress TrueTouch(TM) Standard Product touchscreen driver.
+ * drivers/input/touchscreen/cyttsp_core.c
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/byteorder/generic.h>
+#include <linux/bitops.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/cyttsp.h>
+#include <linux/ctype.h>
+#include <linux/regulator/consumer.h>
+#include "cyttsp_core.h"
+
+#define DBG(x)
+#define DBG2(x)
+#define DBG3(x)
+
+/* rely on kernel input.h to define Multi-Touch capability */
+#ifndef ABS_MT_TRACKING_ID
+/* define only if not defined already by system; */
+/* value based on linux kernel 2.6.30.10 */
+#define ABS_MT_TRACKING_ID (ABS_MT_BLOB_ID + 1)
+#endif /* ABS_MT_TRACKING_ID */
+
+#define TOUCHSCREEN_TIMEOUT (msecs_to_jiffies(28))
+/* Bootloader File 0 offset */
+#define CY_BL_FILE0 0x00
+/* Bootloader command directive */
+#define CY_BL_CMD 0xFF
+/* Bootloader Enter Loader mode */
+#define CY_BL_ENTER 0x38
+/* Bootloader Write a Block */
+#define CY_BL_WRITE_BLK 0x39
+/* Bootloader Terminate Loader mode */
+#define CY_BL_TERMINATE 0x3B
+/* Bootloader Exit and Verify Checksum command */
+#define CY_BL_EXIT 0xA5
+/* Bootloader default keys */
+#define CY_BL_KEY0 0
+#define CY_BL_KEY1 1
+#define CY_BL_KEY2 2
+#define CY_BL_KEY3 3
+#define CY_BL_KEY4 4
+#define CY_BL_KEY5 5
+#define CY_BL_KEY6 6
+#define CY_BL_KEY7 7
+
+#define CY_DIFF(m, n) ((m) != (n))
+#define GET_NUM_TOUCHES(x) ((x) & 0x0F)
+#define GET_TOUCH1_ID(x) (((x) & 0xF0) >> 4)
+#define GET_TOUCH2_ID(x) ((x) & 0x0F)
+#define GET_TOUCH3_ID(x) (((x) & 0xF0) >> 4)
+#define GET_TOUCH4_ID(x) ((x) & 0x0F)
+#define IS_LARGE_AREA(x) (((x) & 0x10) >> 4)
+#define IS_BAD_PKT(x) ((x) & 0x20)
+#define FLIP_DATA_FLAG 0x01
+#define REVERSE_X_FLAG 0x02
+#define REVERSE_Y_FLAG 0x04
+#define FLIP_DATA(flags) ((flags) & FLIP_DATA_FLAG)
+#define REVERSE_X(flags) ((flags) & REVERSE_X_FLAG)
+#define REVERSE_Y(flags) ((flags) & REVERSE_Y_FLAG)
+#define FLIP_XY(x, y) {typeof(x) tmp; tmp = (x); (x) = (y); (y) = tmp; }
+#define INVERT_X(x, xmax) ((xmax) - (x))
+#define INVERT_Y(y, ymax) ((ymax) - (y))
+#define SET_HSTMODE(reg, mode) ((reg) & (mode))
+#define GET_HSTMODE(reg) ((reg & 0x70) >> 4)
+#define GET_BOOTLOADERMODE(reg) ((reg & 0x10) >> 4)
+
+/* Watchdog timeout to check if device is reset and no interrupts running */
+#define CY_WDG_TIMEOUT msecs_to_jiffies(3000)
+#define CY_MODE_ERROR(ps, hst_mode, tt_mode) \
+ ((ps == CY_ACTIVE_STATE && GET_HSTMODE(hst_mode) != CY_OPERATE_MODE) ||\
+ GET_BOOTLOADERMODE(tt_mode))
+
+/* maximum number of concurrent ST track IDs */
+#define CY_NUM_ST_TCH_ID 2
+/* maximum number of concurrent MT track IDs */
+#define CY_NUM_MT_TCH_ID 4
+/* maximum number of track IDs */
+#define CY_NUM_TRK_ID 16
+/*
+ * maximum number of concurrent touches
+ * (only CY_NUM_MT_TCH_ID have coord data)
+ */
+#define CY_MAX_TCH 10
+/*
+ * maximum number of touch reports with
+ * current touches=0 before performing Driver reset
+ */
+#define CY_MAX_NTCH 10
+
+#define CY_NTCH 0 /* lift off */
+#define CY_TCH 1 /* touch down */
+#define CY_ST_FNGR1_IDX 0
+#define CY_ST_FNGR2_IDX 1
+#define CY_MT_TCH1_IDX 0
+#define CY_MT_TCH2_IDX 1
+#define CY_MT_TCH3_IDX 2
+#define CY_MT_TCH4_IDX 3
+#define CY_XPOS 0
+#define CY_YPOS 1
+#define CY_IGNR_TCH (-1)
+#define CY_SMALL_TOOL_WIDTH 10
+#define CY_LARGE_TOOL_WIDTH 255
+#define CY_REG_BASE 0x00
+#define CY_REG_GEST_SET 0x1E
+#define CY_REG_SCN_TYP 0x1C
+#define CY_REG_ACT_INTRVL 0x1D
+#define CY_REG_TCH_TMOUT (CY_REG_ACT_INTRVL+1)
+#define CY_REG_LP_INTRVL (CY_REG_TCH_TMOUT+1)
+#define CY_SOFT_RESET (1 << 0)
+#define CY_DEEP_SLEEP (1 << 1)
+#define CY_LOW_POWER (1 << 2)
+#define CY_MAXZ 255
+#define CY_OK 0
+#define CY_INIT 1
+#define CY_DELAY_DFLT 10 /* ms */
+#define CY_DELAY_MAX (500/CY_DELAY_DFLT) /* half second */
+#define CY_DELAY_SYSINFO 20 /* ms */
+#define CY_MODE_CHANGE_DELAY 30 /* ms */
+#define CY_DELAY_BL 300
+#define CY_DELAY_DNLOAD 100 /* ms */
+#define CY_HNDSHK_BIT 0x80
+/* device mode bits */
+#define CY_OPERATE_MODE 0x00
+#define CY_SYSINFO_MODE 0x10
+/* power mode select bits */
+#define CY_SOFT_RESET_MODE 0x01 /* return to Bootloader mode */
+#define CY_DEEP_SLEEP_MODE 0x02
+#define CY_LOW_POWER_MODE 0x04
+#define CY_NUM_KEY 8
+
+/* TrueTouch Standard Product Gen3 (Txx3xx) interface definition */
+struct cyttsp_xydata {
+ u8 hst_mode;
+ u8 tt_mode;
+ u8 tt_stat;
+ u16 x1 __attribute__ ((packed));
+ u16 y1 __attribute__ ((packed));
+ u8 z1;
+ u8 touch12_id;
+ u16 x2 __attribute__ ((packed));
+ u16 y2 __attribute__ ((packed));
+ u8 z2;
+ u8 gest_cnt;
+ u8 gest_id;
+ u16 x3 __attribute__ ((packed));
+ u16 y3 __attribute__ ((packed));
+ u8 z3;
+ u8 touch34_id;
+ u16 x4 __attribute__ ((packed));
+ u16 y4 __attribute__ ((packed));
+ u8 z4;
+ u8 tt_undef[3];
+ u8 gest_set;
+ u8 tt_reserved;
+};
+
+struct cyttsp_xydata_gen2 {
+ u8 hst_mode;
+ u8 tt_mode;
+ u8 tt_stat;
+ u16 x1 __attribute__ ((packed));
+ u16 y1 __attribute__ ((packed));
+ u8 z1;
+ u8 evnt_idx;
+ u16 x2 __attribute__ ((packed));
+ u16 y2 __attribute__ ((packed));
+ u8 tt_undef1;
+ u8 gest_cnt;
+ u8 gest_id;
+ u8 tt_undef[14];
+ u8 gest_set;
+ u8 tt_reserved;
+};
+
+/* TrueTouch Standard Product Gen2 (Txx2xx) interface definition */
+enum cyttsp_gen2_std {
+ CY_GEN2_NOTOUCH = 0x03, /* Both touches removed */
+ CY_GEN2_GHOST = 0x02, /* ghost */
+ CY_GEN2_2TOUCH = 0x03, /* 2 touch; no ghost */
+ CY_GEN2_1TOUCH = 0x01, /* 1 touch only */
+ CY_GEN2_TOUCH2 = 0x01, /* 1st touch removed; 2nd touch remains */
+};
+
+/* TTSP System Information interface definition */
+struct cyttsp_sysinfo_data {
+ u8 hst_mode;
+ u8 mfg_stat;
+ u8 mfg_cmd;
+ u8 cid[3];
+ u8 tt_undef1;
+ u8 uid[8];
+ u8 bl_verh;
+ u8 bl_verl;
+ u8 tts_verh;
+ u8 tts_verl;
+ u8 app_idh;
+ u8 app_idl;
+ u8 app_verh;
+ u8 app_verl;
+ u8 tt_undef[5];
+ u8 scn_typ; /* Gen3 only: scan type [0:Mutual, 1:Self] */
+ u8 act_intrvl;
+ u8 tch_tmout;
+ u8 lp_intrvl;
+};
+
+/* TTSP Bootloader Register Map interface definition */
+#define CY_BL_CHKSUM_OK 0x01
+struct cyttsp_bootloader_data {
+ u8 bl_file;
+ u8 bl_status;
+ u8 bl_error;
+ u8 blver_hi;
+ u8 blver_lo;
+ u8 bld_blver_hi;
+ u8 bld_blver_lo;
+ u8 ttspver_hi;
+ u8 ttspver_lo;
+ u8 appid_hi;
+ u8 appid_lo;
+ u8 appver_hi;
+ u8 appver_lo;
+ u8 cid_0;
+ u8 cid_1;
+ u8 cid_2;
+};
+
+#define cyttsp_wake_data cyttsp_xydata
+
+struct cyttsp {
+ struct device *pdev;
+ int irq;
+ struct input_dev *input;
+ struct work_struct work;
+ struct timer_list timer;
+ struct mutex mutex;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ char phys[32];
+ struct cyttsp_platform_data *platform_data;
+ struct cyttsp_bootloader_data bl_data;
+ struct cyttsp_sysinfo_data sysinfo_data;
+ u8 num_prv_st_tch;
+ u16 act_trk[CY_NUM_TRK_ID];
+ u16 prv_mt_tch[CY_NUM_MT_TCH_ID];
+ u16 prv_st_tch[CY_NUM_ST_TCH_ID];
+ u16 prv_mt_pos[CY_NUM_TRK_ID][2];
+ struct cyttsp_bus_ops *bus_ops;
+ struct regulator *regulator;
+ unsigned fw_loader_mode:1;
+ unsigned suspended:1;
+ struct timer_list to_timer;
+ bool to_timeout;
+ struct completion int_running;
+ bool bl_ready;
+ u8 reg_id;
+ u8 ntch_count;
+};
+
+struct cyttsp_track_data {
+ u8 prv_tch;
+ u8 cur_tch;
+ u16 tmp_trk[CY_NUM_MT_TCH_ID];
+ u16 snd_trk[CY_NUM_MT_TCH_ID];
+ u16 cur_trk[CY_NUM_TRK_ID];
+ u16 cur_st_tch[CY_NUM_ST_TCH_ID];
+ u16 cur_mt_tch[CY_NUM_MT_TCH_ID];
+ /* if NOT CY_USE_TRACKING_ID then only */
+ /* uses CY_NUM_MT_TCH_ID positions */
+ u16 cur_mt_pos[CY_NUM_TRK_ID][2];
+ /* if NOT CY_USE_TRACKING_ID then only */
+ /* uses CY_NUM_MT_TCH_ID positions */
+ u8 cur_mt_z[CY_NUM_TRK_ID];
+ u8 tool_width;
+ u16 st_x1;
+ u16 st_y1;
+ u8 st_z1;
+ u16 st_x2;
+ u16 st_y2;
+ u8 st_z2;
+};
+
+static const u8 bl_cmd[] = {
+ CY_BL_FILE0, CY_BL_CMD, CY_BL_EXIT,
+ CY_BL_KEY0, CY_BL_KEY1, CY_BL_KEY2,
+ CY_BL_KEY3, CY_BL_KEY4, CY_BL_KEY5,
+ CY_BL_KEY6, CY_BL_KEY7
+};
+
+#define LOCK(m) do { \
+ DBG(printk(KERN_INFO "%s: lock\n", __func__);) \
+ mutex_lock(&(m)); \
+} while (0);
+
+#define UNLOCK(m) do { \
+ DBG(printk(KERN_INFO "%s: unlock\n", __func__);) \
+ mutex_unlock(&(m)); \
+} while (0);
+
+DBG(
+static void print_data_block(const char *func, u8 command,
+ u8 length, void *data)
+{
+ char buf[1024];
+ unsigned buf_len = sizeof(buf);
+ char *p = buf;
+ int i;
+ int l;
+
+ l = snprintf(p, buf_len, "cmd 0x%x: ", command);
+ buf_len -= l;
+ p += l;
+ for (i = 0; i < length && buf_len; i++, p += l, buf_len -= l)
+ l = snprintf(p, buf_len, "%02x ", *((char *)data + i));
+ printk(KERN_DEBUG "%s: %s\n", func, buf);
+})
+
+static int cyttsp_soft_reset(struct cyttsp *ts);
+static int cyttsp_set_operational_mode(struct cyttsp *ts);
+static int cyttsp_exit_bl_mode(struct cyttsp *ts);
+static int cyttsp_power_on(struct cyttsp *ts);
+static void cyttsp_init_tch(struct cyttsp *ts)
+{
+ /* init the touch structures */
+ ts->num_prv_st_tch = CY_NTCH;
+ memset(ts->act_trk, CY_NTCH, sizeof(ts->act_trk));
+ memset(ts->prv_mt_pos, CY_NTCH, sizeof(ts->prv_mt_pos));
+ memset(ts->prv_mt_tch, CY_IGNR_TCH, sizeof(ts->prv_mt_tch));
+ memset(ts->prv_st_tch, CY_IGNR_TCH, sizeof(ts->prv_st_tch));
+ ts->ntch_count = 0;
+}
+
+static u8 ttsp_convert_gen2(u8 cur_tch, struct cyttsp_xydata *pxy_data)
+{
+ struct cyttsp_xydata_gen2 *pxy_data_gen2;
+ pxy_data_gen2 = (struct cyttsp_xydata_gen2 *)(pxy_data);
+
+ if (pxy_data_gen2->evnt_idx == CY_GEN2_NOTOUCH) {
+ cur_tch = 0;
+ } else if (cur_tch == CY_GEN2_GHOST) {
+ cur_tch = 0;
+ } else if (cur_tch == CY_GEN2_2TOUCH) {
+ /* stuff artificial track ID1 and ID2 */
+ pxy_data->touch12_id = 0x12;
+ pxy_data->z1 = CY_MAXZ;
+ pxy_data->z2 = CY_MAXZ;
+ cur_tch--; /* 2 touches */
+ } else if (cur_tch == CY_GEN2_1TOUCH) {
+ /* stuff artificial track ID1 and ID2 */
+ pxy_data->touch12_id = 0x12;
+ pxy_data->z1 = CY_MAXZ;
+ pxy_data->z2 = CY_NTCH;
+ if (pxy_data_gen2->evnt_idx == CY_GEN2_TOUCH2) {
+ /* push touch 2 data into touch1
+ * (first finger up; second finger down) */
+ /* stuff artificial track ID1 for touch2 info */
+ pxy_data->touch12_id = 0x20;
+ /* stuff touch 1 with touch 2 coordinate data */
+ pxy_data->x1 = pxy_data->x2;
+ pxy_data->y1 = pxy_data->y2;
+ }
+ } else {
+ cur_tch = 0;
+ }
+ return cur_tch;
+}
+
+static int ttsp_read_block_data(struct cyttsp *ts, u8 command,
+ u8 length, void *buf)
+{
+ int rc;
+ int tries;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ if (!buf || !length) {
+ printk(KERN_ERR "%s: Error, buf:%s len:%u\n",
+ __func__, !buf ? "NULL" : "OK", length);
+ return -EIO;
+ }
+
+ for (tries = 0, rc = -1; tries < CY_NUM_RETRY && (rc < 0); tries++) {
+ rc = ts->bus_ops->read(ts->bus_ops, command, length, buf);
+ if (rc)
+ msleep(CY_DELAY_DFLT);
+ }
+
+ if (rc < 0)
+ printk(KERN_ERR "%s: error %d\n", __func__, rc);
+ DBG(print_data_block(__func__, command, length, buf);)
+ return rc;
+}
+
+static int ttsp_write_block_data(struct cyttsp *ts, u8 command,
+ u8 length, void *buf)
+{
+ int rc;
+ int tries;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ if (!buf || !length) {
+ printk(KERN_ERR "%s: Error, buf:%s len:%u\n",
+ __func__, !buf ? "NULL" : "OK", length);
+ return -EIO;
+ }
+
+ for (tries = 0, rc = -1; tries < CY_NUM_RETRY && (rc < 0); tries++) {
+ rc = ts->bus_ops->write(ts->bus_ops, command, length, buf);
+ if (rc)
+ msleep(CY_DELAY_DFLT);
+ }
+
+ if (rc < 0)
+ printk(KERN_ERR "%s: error %d\n", __func__, rc);
+ DBG(print_data_block(__func__, command, length, buf);)
+ return rc;
+}
+
+static int ttsp_tch_ext(struct cyttsp *ts, void *buf)
+{
+ int rc;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ if (!buf) {
+ printk(KERN_ERR "%s: Error, buf:%s\n",
+ __func__, !buf ? "NULL" : "OK");
+ return -EIO;
+ }
+ rc = ts->bus_ops->ext(ts->bus_ops, buf);
+ if (rc < 0)
+ printk(KERN_ERR "%s: error %d\n", __func__, rc);
+ return rc;
+}
+
+/* ************************************************************************
+ * The cyttsp_xy_worker function reads the XY coordinates and sends them to
+ * the input layer. It is called from the Touch Interrupt.
+ * *************************************************************************/
+static int cyttsp_inlist(u16 prev_track[], u8 cur_trk_id, u8 *prev_loc,
+ u8 num_touches)
+{
+ u8 id = 0;
+
+ DBG(printk(KERN_INFO"%s: IN p[%d]=%d c=%d n=%d loc=%d\n",
+ __func__, id, prev_track[id], cur_trk_id,
+ num_touches, *prev_loc);)
+
+ for (*prev_loc = CY_IGNR_TCH; id < num_touches; id++) {
+ DBG(printk(KERN_INFO"%s: p[%d]=%d c=%d n=%d loc=%d\n",
+ __func__, id, prev_track[id], cur_trk_id,
+ num_touches, *prev_loc);)
+ if (prev_track[id] == cur_trk_id) {
+ *prev_loc = id;
+ break;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: OUT p[%d]=%d c=%d n=%d loc=%d\n", __func__,
+ id, prev_track[id], cur_trk_id, num_touches, *prev_loc);)
+
+ return *prev_loc < CY_NUM_TRK_ID;
+}
+
+static int cyttsp_next_avail_inlist(u16 cur_trk[], u8 *new_loc,
+ u8 num_touches)
+{
+ u8 id = 0;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ for (*new_loc = CY_IGNR_TCH; id < num_touches; id++) {
+ if (cur_trk[id] > CY_NUM_TRK_ID) {
+ *new_loc = id;
+ break;
+ }
+ }
+ return *new_loc < CY_NUM_TRK_ID;
+}
+
+static void handle_single_touch(struct cyttsp_xydata *xy,
+ struct cyttsp_track_data *t, struct cyttsp *ts)
+{
+ u8 id;
+ u8 use_trk_id = ts->platform_data->use_trk_id;
+
+ DBG(printk(KERN_INFO"%s: ST STEP 0 - ST1 ID=%d ST2 ID=%d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR1_IDX],
+ t->cur_st_tch[CY_ST_FNGR2_IDX]);)
+
+ if (t->cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) {
+ /* reassign finger 1 and 2 positions to new tracks */
+ if (t->cur_tch > 0) {
+ /* reassign st finger1 */
+ if (use_trk_id) {
+ id = CY_MT_TCH1_IDX;
+ t->cur_st_tch[CY_ST_FNGR1_IDX] =
+ t->cur_mt_tch[id];
+ } else {
+ id = GET_TOUCH1_ID(xy->touch12_id);
+ t->cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ }
+ t->st_x1 = t->cur_mt_pos[id][CY_XPOS];
+ t->st_y1 = t->cur_mt_pos[id][CY_YPOS];
+ t->st_z1 = t->cur_mt_z[id];
+
+ DBG(printk(KERN_INFO"%s: ST STEP 1 - ST1 ID=%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR1_IDX]);)
+
+ if ((t->cur_tch > 1) &&
+ (t->cur_st_tch[CY_ST_FNGR2_IDX] >
+ CY_NUM_TRK_ID)) {
+ /* reassign st finger2 */
+ if (use_trk_id) {
+ id = CY_MT_TCH2_IDX;
+ t->cur_st_tch[CY_ST_FNGR2_IDX] =
+ t->cur_mt_tch[id];
+ } else {
+ id = GET_TOUCH2_ID(xy->touch12_id);
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ t->st_x2 = t->cur_mt_pos[id][CY_XPOS];
+ t->st_y2 = t->cur_mt_pos[id][CY_YPOS];
+ t->st_z2 = t->cur_mt_z[id];
+
+ DBG(
+ printk(KERN_INFO"%s: ST STEP 2 - ST2 ID=%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR2_IDX]);)
+ }
+ }
+ } else if (t->cur_st_tch[CY_ST_FNGR2_IDX] > CY_NUM_TRK_ID) {
+ if (t->cur_tch > 1) {
+ /* reassign st finger2 */
+ if (use_trk_id) {
+ /* reassign st finger2 */
+ id = CY_MT_TCH2_IDX;
+ t->cur_st_tch[CY_ST_FNGR2_IDX] =
+ t->cur_mt_tch[id];
+ } else {
+ /* reassign st finger2 */
+ id = GET_TOUCH2_ID(xy->touch12_id);
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ t->st_x2 = t->cur_mt_pos[id][CY_XPOS];
+ t->st_y2 = t->cur_mt_pos[id][CY_YPOS];
+ t->st_z2 = t->cur_mt_z[id];
+
+ DBG(printk(KERN_INFO"%s: ST STEP 3 - ST2 ID=%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR2_IDX]);)
+ }
+ }
+ /* if the 1st touch is missing and there is a 2nd touch,
+ * then set the 1st touch to 2nd touch and terminate 2nd touch
+ */
+ if ((t->cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) &&
+ (t->cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID)) {
+ t->st_x1 = t->st_x2;
+ t->st_y1 = t->st_y2;
+ t->st_z1 = t->st_z2;
+ t->cur_st_tch[CY_ST_FNGR1_IDX] = t->cur_st_tch[CY_ST_FNGR2_IDX];
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = CY_IGNR_TCH;
+ }
+ /* if the 2nd touch ends up equal to the 1st touch,
+ * then just report a single touch */
+ if (t->cur_st_tch[CY_ST_FNGR1_IDX] == t->cur_st_tch[CY_ST_FNGR2_IDX])
+ t->cur_st_tch[CY_ST_FNGR2_IDX] = CY_IGNR_TCH;
+
+ /* set Single Touch current event signals */
+ if (t->cur_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ input_report_abs(ts->input, ABS_X, t->st_x1);
+ input_report_abs(ts->input, ABS_Y, t->st_y1);
+ input_report_abs(ts->input, ABS_PRESSURE, t->st_z1);
+ input_report_key(ts->input, BTN_TOUCH, CY_TCH);
+ input_report_abs(ts->input, ABS_TOOL_WIDTH, t->tool_width);
+
+ DBG2(printk(KERN_INFO"%s:ST->F1:%3d X:%3d Y:%3d Z:%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR1_IDX],
+ t->st_x1, t->st_y1, t->st_z1);)
+
+ if (t->cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID) {
+ input_report_key(ts->input, BTN_2, CY_TCH);
+ input_report_abs(ts->input, ABS_HAT0X, t->st_x2);
+ input_report_abs(ts->input, ABS_HAT0Y, t->st_y2);
+
+ DBG2(printk(KERN_INFO
+ "%s:ST->F2:%3d X:%3d Y:%3d Z:%3d\n",
+ __func__, t->cur_st_tch[CY_ST_FNGR2_IDX],
+ t->st_x2, t->st_y2, t->st_z2);)
+ } else {
+ input_report_key(ts->input, BTN_2, CY_NTCH);
+ }
+ } else {
+ input_report_abs(ts->input, ABS_PRESSURE, CY_NTCH);
+ input_report_key(ts->input, BTN_TOUCH, CY_NTCH);
+ input_report_key(ts->input, BTN_2, CY_NTCH);
+ }
+ /* update platform data for the current single touch info */
+ ts->prv_st_tch[CY_ST_FNGR1_IDX] = t->cur_st_tch[CY_ST_FNGR1_IDX];
+ ts->prv_st_tch[CY_ST_FNGR2_IDX] = t->cur_st_tch[CY_ST_FNGR2_IDX];
+
+}
+
+static void handle_multi_touch(struct cyttsp_track_data *t, struct cyttsp *ts)
+{
+
+ u8 id;
+ u8 i, loc;
+ void (*mt_sync_func)(struct input_dev *) = ts->platform_data->mt_sync;
+
+ if (!ts->platform_data->use_trk_id)
+ goto no_track_id;
+
+ /* terminate any previous touch where the track
+ * is missing from the current event */
+ for (id = 0; id < CY_NUM_TRK_ID; id++) {
+ if ((ts->act_trk[id] == CY_NTCH) || (t->cur_trk[id] != CY_NTCH))
+ continue;
+
+ input_report_abs(ts->input, ABS_MT_TRACKING_ID, id);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, CY_NTCH);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR, t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ ts->prv_mt_pos[id][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ ts->prv_mt_pos[id][CY_YPOS]);
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+ ts->act_trk[id] = CY_NTCH;
+ ts->prv_mt_pos[id][CY_XPOS] = 0;
+ ts->prv_mt_pos[id][CY_YPOS] = 0;
+ }
+ /* set Multi-Touch current event signals */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->cur_mt_tch[id] >= CY_NUM_TRK_ID)
+ continue;
+
+ input_report_abs(ts->input, ABS_MT_TRACKING_ID,
+ t->cur_mt_tch[id]);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ t->cur_mt_z[id]);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,
+ t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ t->cur_mt_pos[id][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ t->cur_mt_pos[id][CY_YPOS]);
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+
+ ts->act_trk[id] = CY_TCH;
+ ts->prv_mt_pos[id][CY_XPOS] = t->cur_mt_pos[id][CY_XPOS];
+ ts->prv_mt_pos[id][CY_YPOS] = t->cur_mt_pos[id][CY_YPOS];
+ }
+ return;
+no_track_id:
+
+ /* set temporary track array elements to voids */
+ memset(t->tmp_trk, CY_IGNR_TCH, sizeof(t->tmp_trk));
+ memset(t->snd_trk, CY_IGNR_TCH, sizeof(t->snd_trk));
+
+ /* get what is currently active */
+ for (i = id = 0; id < CY_NUM_TRK_ID && i < CY_NUM_MT_TCH_ID; id++) {
+ if (t->cur_trk[id] == CY_TCH) {
+ /* only incr counter if track found */
+ t->tmp_trk[i] = id;
+ i++;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: T1: t0=%d, t1=%d, t2=%d, t3=%d\n", __func__,
+ t->tmp_trk[0], t->tmp_trk[1],
+ t->tmp_trk[2], t->tmp_trk[3]);)
+ DBG(printk(KERN_INFO"%s: T1: p0=%d, p1=%d, p2=%d, p3=%d\n", __func__,
+ ts->prv_mt_tch[0], ts->prv_mt_tch[1],
+ ts->prv_mt_tch[2], ts->prv_mt_tch[3]);)
+
+ /* pack in still active previous touches */
+ for (id = t->prv_tch = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->tmp_trk[id] >= CY_NUM_TRK_ID)
+ continue;
+
+ if (cyttsp_inlist(ts->prv_mt_tch, t->tmp_trk[id], &loc,
+ CY_NUM_MT_TCH_ID)) {
+ loc &= CY_NUM_MT_TCH_ID - 1;
+ t->snd_trk[loc] = t->tmp_trk[id];
+ t->prv_tch++;
+ DBG(printk(KERN_INFO"%s: in list s[%d]=%d "
+ "t[%d]=%d, loc=%d p=%d\n", __func__,
+ loc, t->snd_trk[loc],
+ id, t->tmp_trk[id],
+ loc, t->prv_tch);)
+ } else {
+ DBG(printk(KERN_INFO"%s: is not in list s[%d]=%d"
+ " t[%d]=%d loc=%d\n", __func__,
+ id, t->snd_trk[id],
+ id, t->tmp_trk[id],
+ loc);)
+ }
+ }
+ DBG(printk(KERN_INFO"%s: S1: s0=%d, s1=%d, s2=%d, s3=%d p=%d\n",
+ __func__,
+ t->snd_trk[0], t->snd_trk[1], t->snd_trk[2],
+ t->snd_trk[3], t->prv_tch);)
+
+ /* pack in new touches */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->tmp_trk[id] >= CY_NUM_TRK_ID)
+ continue;
+
+ if (!cyttsp_inlist(t->snd_trk, t->tmp_trk[id], &loc,
+ CY_NUM_MT_TCH_ID)) {
+
+ DBG(
+ printk(KERN_INFO"%s: not in list t[%d]=%d, loc=%d\n",
+ __func__,
+ id, t->tmp_trk[id], loc);)
+
+ if (cyttsp_next_avail_inlist(t->snd_trk, &loc,
+ CY_NUM_MT_TCH_ID)) {
+ loc &= CY_NUM_MT_TCH_ID - 1;
+ t->snd_trk[loc] = t->tmp_trk[id];
+ DBG(printk(KERN_INFO "%s: put in list s[%d]=%d"
+ " t[%d]=%d\n", __func__,
+ loc,
+ t->snd_trk[loc], id, t->tmp_trk[id]);
+ )
+ }
+ } else {
+ DBG(printk(KERN_INFO"%s: is in list s[%d]=%d "
+ "t[%d]=%d loc=%d\n", __func__,
+ id, t->snd_trk[id], id, t->tmp_trk[id], loc);)
+ }
+ }
+ DBG(printk(KERN_INFO"%s: S2: s0=%d, s1=%d, s2=%d, s3=%d\n", __func__,
+ t->snd_trk[0], t->snd_trk[1],
+ t->snd_trk[2], t->snd_trk[3]);)
+
+ /* sync motion event signals for each current touch */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ /* z will either be 0 (NOTOUCH) or
+ * some pressure (TOUCH)
+ */
+ DBG(printk(KERN_INFO "%s: MT0 prev[%d]=%d "
+ "temp[%d]=%d send[%d]=%d\n",
+ __func__, id, ts->prv_mt_tch[id],
+ id, t->tmp_trk[id], id, t->snd_trk[id]);)
+
+ if (ts->platform_data->invert) {
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS] =
+ INVERT_X(t->cur_mt_pos[t->snd_trk[id]]
+ [CY_XPOS], ts->platform_data->maxx);
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS] =
+ INVERT_X(t->cur_mt_pos[t->snd_trk[id]]
+ [CY_YPOS], ts->platform_data->maxy);
+ }
+ if (t->snd_trk[id] < CY_NUM_TRK_ID) {
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ t->cur_mt_z[t->snd_trk[id]]);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,
+ t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS]);
+ input_report_key(ts->input, BTN_TOUCH, 1);
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+
+ DBG2(printk(KERN_INFO"%s: MT1 -> TID:"
+ "%3d X:%3d Y:%3d Z:%3d\n", __func__,
+ t->snd_trk[id],
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS],
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS],
+ t->cur_mt_z[t->snd_trk[id]]);)
+
+ } else if (ts->prv_mt_tch[id] < CY_NUM_TRK_ID) {
+ /* void out this touch */
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ CY_NTCH);
+ input_report_abs(ts->input, ABS_MT_WIDTH_MAJOR,
+ t->tool_width);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS]);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS]);
+ input_report_key(ts->input, BTN_TOUCH, 0);
+
+ if (mt_sync_func)
+ mt_sync_func(ts->input);
+
+ DBG2(printk(KERN_INFO"%s: "
+ "MT2->TID:%2d X:%3d Y:%3d Z:%3d liftoff-sent\n",
+ __func__, ts->prv_mt_tch[id],
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS],
+ ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS],
+ CY_NTCH);)
+ } else {
+ /* do not stuff any signals for this
+ * previously and currently void touches
+ */
+ DBG(printk(KERN_INFO"%s: "
+ "MT3->send[%d]=%d - No touch - NOT sent\n",
+ __func__, id, t->snd_trk[id]);)
+ }
+ }
+
+ /* save current posted tracks to
+ * previous track memory */
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ ts->prv_mt_tch[id] = t->snd_trk[id];
+ if (t->snd_trk[id] < CY_NUM_TRK_ID) {
+ ts->prv_mt_pos[t->snd_trk[id]][CY_XPOS] =
+ t->cur_mt_pos[t->snd_trk[id]][CY_XPOS];
+ ts->prv_mt_pos[t->snd_trk[id]][CY_YPOS] =
+ t->cur_mt_pos[t->snd_trk[id]][CY_YPOS];
+ DBG(printk(KERN_INFO"%s: "
+ "MT4->TID:%2d X:%3d Y:%3d Z:%3d save for prv\n",
+ __func__, t->snd_trk[id],
+ ts->prv_mt_pos[t->snd_trk[id]][CY_XPOS],
+ ts->prv_mt_pos[t->snd_trk[id]][CY_YPOS],
+ CY_NTCH);)
+ }
+ }
+ memset(ts->act_trk, CY_NTCH, sizeof(ts->act_trk));
+ for (id = 0; id < CY_NUM_MT_TCH_ID; id++) {
+ if (t->snd_trk[id] < CY_NUM_TRK_ID)
+ ts->act_trk[t->snd_trk[id]] = CY_TCH;
+ }
+}
+
+static int cyttsp_reset_controller(struct cyttsp *ts)
+{
+ int ret;
+
+ ret = gpio_request(ts->platform_data->rst_gpio, "reset_pin");
+ if (ret) {
+ printk(KERN_ERR "cyttsp_reset_controller: touch gpio fail\n");
+ return ret;
+ }
+ ret = gpio_direction_output(ts->platform_data->rst_gpio, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "cyttsp_reset_controller: reset gpio direction fail\n");
+ goto out;
+ }
+ /*
+ * The start up procedure
+ * Set the RESET pin to low
+ * Wait for a period of 1 milisecond
+ * Set the RESET pin to high
+ * Wait for a period of 5 milisecond
+ * Start the initial Sequence
+ */
+ gpio_set_value(ts->platform_data->rst_gpio, 0);
+ usleep_range(10000, 20000);
+ gpio_set_value(ts->platform_data->rst_gpio, 1);
+ usleep_range(50000, 60000);
+out:
+ gpio_free(ts->platform_data->rst_gpio);
+ return ret;
+}
+
+static void cyttsp_xy_worker(struct cyttsp *ts)
+{
+ struct cyttsp_xydata xy_data;
+ u8 id, tilt, rev_x, rev_y;
+ struct cyttsp_track_data trc;
+ s32 retval;
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+ /* get event data from CYTTSP device */
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(xy_data), &xy_data);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, fail to read device on host interface bus\n",
+ __func__);
+ goto exit_xy_worker;
+ }
+
+ if (CY_MODE_ERROR(ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode)) {
+ /* TTSP device has switched to non-operational mode */
+ printk(KERN_ERR "%s: Error, bad mode ps=%d hm=%02X tm=%02X\n",
+ __func__, ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode);
+ retval = cyttsp_reset_controller(ts);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, conroller reset fail\n",
+ __func__);
+ goto exit_xy_worker;
+ }
+ retval = cyttsp_power_on(ts);
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n", __func__);
+ goto exit_xy_worker;
+ }
+
+ /* touch extension handling */
+ retval = ttsp_tch_ext(ts, &xy_data);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, touch extension handling\n",
+ __func__);
+ goto exit_xy_worker;
+ } else if (retval > 0) {
+ DBG(printk(KERN_INFO "%s: Touch extension handled\n",
+ __func__);)
+ goto exit_xy_worker;
+ }
+
+ /* provide flow control handshake */
+ if (ts->irq) {
+ if (ts->platform_data->use_hndshk) {
+ u8 cmd;
+ cmd = xy_data.hst_mode & CY_HNDSHK_BIT ?
+ xy_data.hst_mode & ~CY_HNDSHK_BIT :
+ xy_data.hst_mode | CY_HNDSHK_BIT;
+ retval = ttsp_write_block_data(ts, CY_REG_BASE,
+ sizeof(cmd), (u8 *)&cmd);
+ }
+ }
+ trc.cur_tch = GET_NUM_TOUCHES(xy_data.tt_stat);
+
+ if (IS_LARGE_AREA(xy_data.tt_stat) == 1) {
+ /* terminate all active tracks */
+ trc.cur_tch = CY_NTCH;
+ DBG(printk(KERN_INFO "%s: Large area detected\n",
+ __func__);)
+ } else if (trc.cur_tch > CY_MAX_TCH) {
+ /* terminate all active tracks */
+ trc.cur_tch = CY_NTCH;
+ DBG(printk(KERN_INFO "%s: Num touch error detected\n",
+ __func__);)
+ } else if (IS_BAD_PKT(xy_data.tt_mode)) {
+ /* terminate all active tracks */
+ trc.cur_tch = CY_NTCH;
+ DBG(printk(KERN_INFO "%s: Invalid buffer detected\n",
+ __func__);)
+ }
+
+ /* set tool size */
+ trc.tool_width = CY_SMALL_TOOL_WIDTH;
+
+ if (ts->platform_data->gen == CY_GEN2) {
+ /* translate Gen2 interface data into comparable Gen3 data */
+ trc.cur_tch = ttsp_convert_gen2(trc.cur_tch, &xy_data);
+ }
+
+ /* clear current active track ID array and count previous touches */
+ for (id = 0, trc.prv_tch = CY_NTCH; id < CY_NUM_TRK_ID; id++) {
+ trc.cur_trk[id] = CY_NTCH;
+ trc.prv_tch += ts->act_trk[id];
+ }
+
+ /*
+ * send no events if there were no
+ * previous touches and no new touches
+ */
+ if ((trc.prv_tch == CY_NTCH) && ((trc.cur_tch == CY_NTCH))) {
+ if (++ts->ntch_count > CY_MAX_NTCH) {
+ trc.cur_tch = CY_NTCH;
+ }
+ } else
+ ts->ntch_count = 0;
+
+ DBG(printk(KERN_INFO "%s: prev=%d curr=%d\n", __func__,
+ trc.prv_tch, trc.cur_tch);)
+
+ /* clear current single-touch array */
+ memset(trc.cur_st_tch, CY_IGNR_TCH, sizeof(trc.cur_st_tch));
+
+ /* clear single touch positions */
+ trc.st_x1 = trc.st_y1 = trc.st_z1 =
+ trc.st_x2 = trc.st_y2 = trc.st_z2 = CY_NTCH;
+
+ /* clear current multi-touch arrays */
+ memset(trc.cur_mt_tch, CY_IGNR_TCH, sizeof(trc.cur_mt_tch));
+ memset(trc.cur_mt_pos, CY_NTCH, sizeof(trc.cur_mt_pos));
+ memset(trc.cur_mt_z, CY_NTCH, sizeof(trc.cur_mt_z));
+
+ DBG(
+ if (trc.cur_tch) {
+ unsigned i;
+ u8 *pdata = (u8 *)&xy_data;
+
+ printk(KERN_INFO "%s: TTSP data_pack: ", __func__);
+ for (i = 0; i < sizeof(struct cyttsp_xydata); i++)
+ printk(KERN_INFO "[%d]=0x%x ", i, pdata[i]);
+ printk(KERN_INFO "\n");
+ })
+
+ /* Determine if display is tilted */
+ tilt = !!FLIP_DATA(ts->platform_data->flags);
+ /* Check for switch in origin */
+ rev_x = !!REVERSE_X(ts->platform_data->flags);
+ rev_y = !!REVERSE_Y(ts->platform_data->flags);
+
+ /* process the touches */
+ switch (trc.cur_tch) {
+ case 4:
+ xy_data.x4 = be16_to_cpu(xy_data.x4);
+ xy_data.y4 = be16_to_cpu(xy_data.y4);
+ if (tilt)
+ FLIP_XY(xy_data.x4, xy_data.y4);
+
+ if (rev_x)
+ xy_data.x4 = INVERT_X(xy_data.x4,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y4 = INVERT_X(xy_data.y4,
+ ts->platform_data->maxy);
+
+ id = GET_TOUCH4_ID(xy_data.touch34_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH4_IDX][CY_XPOS] = xy_data.x4;
+ trc.cur_mt_pos[CY_MT_TCH4_IDX][CY_YPOS] = xy_data.y4;
+ trc.cur_mt_z[CY_MT_TCH4_IDX] = xy_data.z4;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x4;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y4;
+ trc.cur_mt_z[id] = xy_data.z4;
+ }
+ trc.cur_mt_tch[CY_MT_TCH4_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x4;
+ trc.st_y1 = xy_data.y4;
+ trc.st_z1 = xy_data.z4;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x4;
+ trc.st_y2 = xy_data.y4;
+ trc.st_z2 = xy_data.z4;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: 4th XYZ:% 3d,% 3d,% 3d ID:% 2d\n\n",
+ __func__, xy_data.x4, xy_data.y4, xy_data.z4,
+ (xy_data.touch34_id & 0x0F));)
+
+ /* do not break */
+ case 3:
+ xy_data.x3 = be16_to_cpu(xy_data.x3);
+ xy_data.y3 = be16_to_cpu(xy_data.y3);
+ if (tilt)
+ FLIP_XY(xy_data.x3, xy_data.y3);
+
+ if (rev_x)
+ xy_data.x3 = INVERT_X(xy_data.x3,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y3 = INVERT_X(xy_data.y3,
+ ts->platform_data->maxy);
+
+ id = GET_TOUCH3_ID(xy_data.touch34_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH3_IDX][CY_XPOS] = xy_data.x3;
+ trc.cur_mt_pos[CY_MT_TCH3_IDX][CY_YPOS] = xy_data.y3;
+ trc.cur_mt_z[CY_MT_TCH3_IDX] = xy_data.z3;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x3;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y3;
+ trc.cur_mt_z[id] = xy_data.z3;
+ }
+ trc.cur_mt_tch[CY_MT_TCH3_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x3;
+ trc.st_y1 = xy_data.y3;
+ trc.st_z1 = xy_data.z3;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x3;
+ trc.st_y2 = xy_data.y3;
+ trc.st_z2 = xy_data.z3;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: 3rd XYZ:% 3d,% 3d,% 3d ID:% 2d\n",
+ __func__, xy_data.x3, xy_data.y3, xy_data.z3,
+ ((xy_data.touch34_id >> 4) & 0x0F));)
+
+ /* do not break */
+ case 2:
+ xy_data.x2 = be16_to_cpu(xy_data.x2);
+ xy_data.y2 = be16_to_cpu(xy_data.y2);
+ if (tilt)
+ FLIP_XY(xy_data.x2, xy_data.y2);
+
+ if (rev_x)
+ xy_data.x2 = INVERT_X(xy_data.x2,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y2 = INVERT_X(xy_data.y2,
+ ts->platform_data->maxy);
+ id = GET_TOUCH2_ID(xy_data.touch12_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH2_IDX][CY_XPOS] = xy_data.x2;
+ trc.cur_mt_pos[CY_MT_TCH2_IDX][CY_YPOS] = xy_data.y2;
+ trc.cur_mt_z[CY_MT_TCH2_IDX] = xy_data.z2;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x2;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y2;
+ trc.cur_mt_z[id] = xy_data.z2;
+ }
+ trc.cur_mt_tch[CY_MT_TCH2_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x2;
+ trc.st_y1 = xy_data.y2;
+ trc.st_z1 = xy_data.z2;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x2;
+ trc.st_y2 = xy_data.y2;
+ trc.st_z2 = xy_data.z2;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: 2nd XYZ:% 3d,% 3d,% 3d ID:% 2d\n",
+ __func__, xy_data.x2, xy_data.y2, xy_data.z2,
+ (xy_data.touch12_id & 0x0F));)
+
+ /* do not break */
+ case 1:
+ xy_data.x1 = be16_to_cpu(xy_data.x1);
+ xy_data.y1 = be16_to_cpu(xy_data.y1);
+ if (tilt)
+ FLIP_XY(xy_data.x1, xy_data.y1);
+
+ if (rev_x)
+ xy_data.x1 = INVERT_X(xy_data.x1,
+ ts->platform_data->maxx);
+ if (rev_y)
+ xy_data.y1 = INVERT_X(xy_data.y1,
+ ts->platform_data->maxy);
+
+ id = GET_TOUCH1_ID(xy_data.touch12_id);
+ if (ts->platform_data->use_trk_id) {
+ trc.cur_mt_pos[CY_MT_TCH1_IDX][CY_XPOS] = xy_data.x1;
+ trc.cur_mt_pos[CY_MT_TCH1_IDX][CY_YPOS] = xy_data.y1;
+ trc.cur_mt_z[CY_MT_TCH1_IDX] = xy_data.z1;
+ } else {
+ trc.cur_mt_pos[id][CY_XPOS] = xy_data.x1;
+ trc.cur_mt_pos[id][CY_YPOS] = xy_data.y1;
+ trc.cur_mt_z[id] = xy_data.z1;
+ }
+ trc.cur_mt_tch[CY_MT_TCH1_IDX] = id;
+ trc.cur_trk[id] = CY_TCH;
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) {
+ if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) {
+ trc.st_x1 = xy_data.x1;
+ trc.st_y1 = xy_data.y1;
+ trc.st_z1 = xy_data.z1;
+ trc.cur_st_tch[CY_ST_FNGR1_IDX] = id;
+ } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) {
+ trc.st_x2 = xy_data.x1;
+ trc.st_y2 = xy_data.y1;
+ trc.st_z2 = xy_data.z1;
+ trc.cur_st_tch[CY_ST_FNGR2_IDX] = id;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: S1st XYZ:% 3d,% 3d,% 3d ID:% 2d\n",
+ __func__, xy_data.x1, xy_data.y1, xy_data.z1,
+ ((xy_data.touch12_id >> 4) & 0x0F));)
+
+ break;
+ case 0:
+ default:
+ break;
+ }
+
+ if (ts->platform_data->use_st)
+ handle_single_touch(&xy_data, &trc, ts);
+
+ if (ts->platform_data->use_mt)
+ handle_multi_touch(&trc, ts);
+
+ /* handle gestures */
+ if (ts->platform_data->use_gestures && xy_data.gest_id) {
+ input_report_key(ts->input, BTN_3, CY_TCH);
+ input_report_abs(ts->input, ABS_HAT1X, xy_data.gest_id);
+ input_report_abs(ts->input, ABS_HAT1Y, xy_data.gest_cnt);
+ }
+
+ /* signal the view motion event */
+ input_sync(ts->input);
+
+ /* update platform data for the current multi-touch information */
+ memcpy(ts->act_trk, trc.cur_trk, CY_NUM_TRK_ID);
+
+exit_xy_worker:
+ /* reset the watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ DBG(printk(KERN_INFO"%s: finished.\n", __func__);)
+ return;
+}
+
+static irqreturn_t cyttsp_irq(int irq, void *handle)
+{
+ struct cyttsp *ts = (struct cyttsp *)handle;
+
+ DBG(printk(KERN_INFO"%s: Got IRQ!\n", __func__);)
+ switch (ts->platform_data->power_state) {
+ case CY_BL_STATE:
+ case CY_SYSINFO_STATE:
+ complete(&ts->int_running);
+ break;
+ case CY_LDR_STATE:
+ ts->bl_ready = true;
+ break;
+ case CY_ACTIVE_STATE:
+ cyttsp_xy_worker(ts);
+ break;
+ case CY_READY_STATE:
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Timer function used as watchdog */
+static void cyttsp_timer(unsigned long handle)
+{
+ struct cyttsp *ts = (struct cyttsp *)handle;
+
+ DBG(printk(KERN_INFO"%s: Watchdog timer event\n", __func__);)
+ if (!work_pending(&ts->work))
+ schedule_work(&ts->work);
+ return;
+}
+/*
+ ************************************************************************
+ * Probe initialization functions
+ ************************************************************************
+ */
+static int cyttsp_load_bl_regs(struct cyttsp *ts)
+{
+ int retval;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ memset(&ts->bl_data, 0, sizeof(ts->bl_data));
+
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(ts->bl_data), &(ts->bl_data));
+
+ if (retval < 0) {
+ DBG2(printk(KERN_INFO "%s: Failed reading block data, err:%d\n",
+ __func__, retval);)
+ goto fail;
+ }
+
+ DBG({
+ int i;
+ u8 *bl_data = (u8 *)&(ts->bl_data);
+ for (i = 0; i < sizeof(struct cyttsp_bootloader_data); i++)
+ printk(KERN_INFO "%s bl_data[%d]=0x%x\n",
+ __func__, i, bl_data[i]);
+ })
+ DBG2(printk(KERN_INFO "%s: bl f=%02X s=%02X e=%02X\n",
+ __func__,
+ ts->bl_data.bl_file,
+ ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+
+ return 0;
+fail:
+ return retval;
+}
+
+static bool cyttsp_bl_app_valid(struct cyttsp *ts)
+{
+ int retval;
+
+ ts->bl_data.bl_status = 0x00;
+
+ retval = cyttsp_load_bl_regs(ts);
+
+ if (retval < 0)
+ return false;
+
+ if (ts->bl_data.bl_status == 0x11) {
+ printk(KERN_INFO "%s: App found; normal boot\n", __func__);
+ return true;
+ } else if (ts->bl_data.bl_status == 0x10) {
+ printk(KERN_INFO "%s: NO APP; load firmware!!\n", __func__);
+ return false;
+ } else {
+ if (ts->bl_data.bl_file == CY_OPERATE_MODE) {
+ int invalid_op_mode_status;
+ invalid_op_mode_status = ts->bl_data.bl_status & 0x3f;
+ if (invalid_op_mode_status)
+ return false;
+ else {
+ if (ts->platform_data->power_state ==
+ CY_ACTIVE_STATE)
+ printk(KERN_INFO "%s: Operational\n",
+ __func__);
+ else
+ printk(KERN_INFO "%s: No bootloader\n",
+ __func__);
+ }
+ }
+ return true;
+ }
+}
+
+static int cyttsp_exit_bl_mode(struct cyttsp *ts)
+{
+ int retval;
+ int tries = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* check if in bootloader mode;
+ * if in operational mode then just return without fail
+ */
+ cyttsp_load_bl_regs(ts);
+ if (!GET_BOOTLOADERMODE(ts->bl_data.bl_status))
+ return 0;
+
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(bl_cmd),
+ (void *)bl_cmd);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Failed writing block data, err:%d\n",
+ __func__, retval);
+ goto fail;
+ }
+ do {
+ msleep(10);
+ cyttsp_load_bl_regs(ts);
+ } while (GET_BOOTLOADERMODE(ts->bl_data.bl_status) && tries++ < 1);
+
+ DBG2(printk(KERN_INFO "%s: read tries=%d\n", __func__, tries);)
+
+ DBG(printk(KERN_INFO"%s: Exit "
+ "f=%02X s=%02X e=%02X\n",
+ __func__,
+ ts->bl_data.bl_file, ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+
+ return 0;
+fail:
+ return retval;
+}
+
+static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
+{
+ int retval;
+ u8 cmd = CY_SYSINFO_MODE;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ memset(&(ts->sysinfo_data), 0, sizeof(struct cyttsp_sysinfo_data));
+
+ /* switch to sysinfo mode */
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Failed writing block data, err:%d\n",
+ __func__, retval);
+ goto exit_sysinfo_mode;
+ }
+
+ if (!(retval < 0)) {
+ /* wait for interrupt to set ready completion */
+ ts->platform_data->power_state = CY_SYSINFO_STATE;
+ INIT_COMPLETION(ts->int_running);
+ retval = wait_for_completion_interruptible_timeout(
+ &ts->int_running,
+ msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX));
+ ts->platform_data->power_state = CY_READY_STATE;
+ }
+
+ if (retval < 0) {
+ ts->platform_data->power_state = CY_IDLE_STATE;
+ printk(KERN_ERR "%s: reset timeout fail (ret=%d)\n",
+ __func__, retval);
+ }
+
+exit_sysinfo_mode:
+ /* read sysinfo registers */
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(ts->sysinfo_data), &(ts->sysinfo_data));
+
+ DBG(printk(KERN_INFO"%s:SI2: hst_mode=0x%02X mfg_cmd=0x%02X "
+ "mfg_stat=0x%02X\n", __func__, ts->sysinfo_data.hst_mode,
+ ts->sysinfo_data.mfg_cmd,
+ ts->sysinfo_data.mfg_stat);)
+
+ DBG(printk(KERN_INFO"%s:SI2: bl_ver=0x%02X%02X\n",
+ __func__, ts->sysinfo_data.bl_verh, ts->sysinfo_data.bl_verl);)
+
+ DBG(printk(KERN_INFO"%s:SI2: sysinfo act_intrvl=0x%02X "
+ "tch_tmout=0x%02X lp_intrvl=0x%02X\n",
+ __func__, ts->sysinfo_data.act_intrvl,
+ ts->sysinfo_data.tch_tmout,
+ ts->sysinfo_data.lp_intrvl);)
+
+ printk(KERN_INFO"%s:SI2:tts_ver=%02X%02X app_id=%02X%02X "
+ "app_ver=%02X%02X c_id=%02X%02X%02X "
+ "u_id=%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ __func__,
+ ts->sysinfo_data.tts_verh, ts->sysinfo_data.tts_verl,
+ ts->sysinfo_data.app_idh, ts->sysinfo_data.app_idl,
+ ts->sysinfo_data.app_verh, ts->sysinfo_data.app_verl,
+ ts->sysinfo_data.cid[0], ts->sysinfo_data.cid[1],
+ ts->sysinfo_data.cid[2],
+ ts->sysinfo_data.uid[0], ts->sysinfo_data.uid[1],
+ ts->sysinfo_data.uid[2], ts->sysinfo_data.uid[3],
+ ts->sysinfo_data.uid[4], ts->sysinfo_data.uid[5],
+ ts->sysinfo_data.uid[6], ts->sysinfo_data.uid[7]);
+
+ return retval;
+}
+
+static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
+{
+ int retval = 0;
+
+ if (ts->platform_data->scn_typ != CY_SCN_TYP_DFLT) {
+ u8 scn_typ = ts->platform_data->scn_typ;
+
+ retval = ttsp_write_block_data(ts,
+ CY_REG_SCN_TYP,
+ sizeof(scn_typ), &scn_typ);
+ }
+
+ if (retval < 0)
+ return retval;
+
+ if ((ts->platform_data->act_intrvl != CY_ACT_INTRVL_DFLT) ||
+ (ts->platform_data->tch_tmout != CY_TCH_TMOUT_DFLT) ||
+ (ts->platform_data->lp_intrvl != CY_LP_INTRVL_DFLT)) {
+
+ u8 intrvl_ray[3];
+
+ intrvl_ray[0] = ts->platform_data->act_intrvl;
+ intrvl_ray[1] = ts->platform_data->tch_tmout;
+ intrvl_ray[2] = ts->platform_data->lp_intrvl;
+
+ /* set intrvl registers */
+ retval = ttsp_write_block_data(ts,
+ CY_REG_ACT_INTRVL,
+ sizeof(intrvl_ray), intrvl_ray);
+
+ msleep(CY_DELAY_SYSINFO);
+ }
+
+ return retval;
+}
+
+static int cyttsp_set_operational_mode(struct cyttsp *ts)
+{
+ int retval;
+ int tries;
+ u8 cmd = CY_OPERATE_MODE;
+ u8 gest_default;
+ struct cyttsp_xydata xy_data;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Failed writing block data, err:%d\n",
+ __func__, retval);
+ goto write_block_data_fail;
+ }
+
+ /* wait for TTSP Device to complete switch to Operational mode */
+ msleep(10);
+
+ tries = 0;
+ gest_default =
+ CY_GEST_GRP1 + CY_GEST_GRP2 +
+ CY_GEST_GRP3 + CY_GEST_GRP4 +
+ CY_ACT_DIST_DFLT;
+ tries = 0;
+ do {
+ msleep(CY_DELAY_DFLT);
+ memset(&xy_data, 0, sizeof(xy_data));
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(struct cyttsp_xydata), &xy_data);
+ } while (!((retval == 0) &&
+ ((xy_data.gest_set & CY_ACT_DIST_BITS) ==
+ (CY_ACT_DIST_DFLT & CY_ACT_DIST_BITS))) &&
+ (tries++ < CY_DELAY_MAX));
+
+ DBG2(printk(KERN_INFO "%s: read tries=%d\n", __func__, tries);)
+
+ return 0;
+write_block_data_fail:
+ return retval;
+}
+
+static int cyttsp_soft_reset(struct cyttsp *ts)
+{
+ int retval;
+ u8 cmd = CY_SOFT_RESET_MODE;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* reset TTSP Device back to bootloader mode */
+ ts->platform_data->power_state = CY_BL_STATE;
+ retval = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
+
+ if (!(retval < 0)) {
+ /* wait for interrupt to set ready completion */
+ INIT_COMPLETION(ts->int_running);
+ retval = wait_for_completion_interruptible_timeout(
+ &ts->int_running,
+ msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX));
+ }
+
+ if (retval < 0) {
+ ts->platform_data->power_state = CY_IDLE_STATE;
+ printk(KERN_ERR "%s: reset timeout fail (ret=%d)\n",
+ __func__, retval);
+ }
+
+ if (retval > 0)
+ retval = 0;
+
+ return retval;
+}
+
+static int cyttsp_gesture_setup(struct cyttsp *ts)
+{
+ int retval;
+ u8 gesture_setup;
+
+ DBG(printk(KERN_INFO"%s: Init gesture; active distance setup\n",
+ __func__);)
+
+ gesture_setup = ts->platform_data->gest_set;
+ retval = ttsp_write_block_data(ts, CY_REG_GEST_SET,
+ sizeof(gesture_setup), &gesture_setup);
+
+ return retval;
+}
+
+#ifdef CY_INCLUDE_LOAD_RECS
+#include "cyttsp_ldr.h"
+#else
+static bool cyttsp_bl_status(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ return ((ts->bl_data.bl_status == 0x10) ||
+ (ts->bl_data.bl_status == 0x11));
+}
+
+static int cyttsp_loader(struct cyttsp *ts)
+{
+ void *fptr = cyttsp_bl_status; /* kill warning */
+
+ if (ts) {
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ DBG(printk(KERN_INFO"%s: Exit\n", __func__);)
+ }
+
+ if (!fptr)
+ return -EIO;
+ else
+ return 0;
+}
+#endif
+
+static int cyttsp_power_on(struct cyttsp *ts)
+{
+ int retval = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* (re-)start watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+
+ cyttsp_init_tch(ts);
+ retval = cyttsp_soft_reset(ts);
+ DBG2(printk(KERN_INFO"%s: after softreset r=%d\n", __func__, retval);)
+ if (retval < 0)
+ goto bypass;
+
+ if (ts->platform_data->use_load_file)
+ retval = cyttsp_loader(ts);
+
+ if (!cyttsp_bl_app_valid(ts)) {
+ retval = 1;
+ goto bypass;
+ }
+
+ retval = cyttsp_exit_bl_mode(ts);
+ DBG2(printk(KERN_INFO"%s: after exit bl r=%d\n", __func__, retval);)
+
+ if (retval < 0)
+ goto bypass;
+
+ /* switch to System Information mode to read */
+ /* versions and set interval registers */
+ ts->platform_data->power_state = CY_READY_STATE;
+ retval = cyttsp_set_sysinfo_mode(ts);
+ if (retval < 0)
+ goto bypass;
+
+ retval = cyttsp_set_sysinfo_regs(ts);
+ if (retval < 0)
+ goto bypass;
+
+ /* init gesture setup; required for active distance */
+ cyttsp_gesture_setup(ts);
+
+ /* switch back to Operational mode */
+ DBG2(printk(KERN_INFO"%s: switch back to operational mode\n",
+ __func__);)
+ retval = cyttsp_set_operational_mode(ts);
+ if (retval < 0)
+ goto bypass;
+
+bypass:
+ if (retval)
+ ts->platform_data->power_state = CY_IDLE_STATE;
+ else
+ ts->platform_data->power_state = CY_ACTIVE_STATE;
+
+ printk(KERN_INFO"%s: Power state is %s\n",
+ __func__, (ts->platform_data->power_state == CY_ACTIVE_STATE) ?
+ "ACTIVE" : "IDLE");
+ return retval;
+}
+
+static void cyttsp_check_bl(struct work_struct *work)
+{
+ struct cyttsp *ts = container_of(work, struct cyttsp, work);
+ s32 retval;
+ struct cyttsp_xydata xy_data;
+
+ retval = ttsp_read_block_data(ts, CY_REG_BASE, 2, &xy_data);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error, fail to read device\n", __func__);
+ goto reserve_next;
+ }
+
+ if (CY_MODE_ERROR(ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode)) {
+ printk(KERN_ERR "%s: Error, mode error detected "
+ "on watchdog timeout ps=%d mode=%02X bl_mode=%02X\n",
+ __func__, ts->platform_data->power_state,
+ xy_data.hst_mode, xy_data.tt_mode);
+ retval = cyttsp_power_on(ts);
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n", __func__);
+ }
+
+reserve_next:
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+}
+
+static int cyttsp_resume(struct cyttsp *ts)
+{
+ int retval = 0;
+ struct cyttsp_xydata xydata;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ if (ts->platform_data->use_sleep && (ts->platform_data->power_state !=
+ CY_ACTIVE_STATE)) {
+ if (ts->platform_data->wakeup) {
+ retval = ts->platform_data->wakeup();
+ if (retval < 0)
+ printk(KERN_ERR "%s: Error, wakeup failed!\n",
+ __func__);
+ } else {
+ printk(KERN_ERR "%s: Error, wakeup not implemented "
+ "(check board file).\n", __func__);
+ retval = -ENOSYS;
+ }
+ if (!(retval < 0)) {
+ retval = ttsp_read_block_data(ts, CY_REG_BASE,
+ sizeof(xydata), &xydata);
+ if (!(retval < 0) && !GET_HSTMODE(xydata.hst_mode))
+ ts->platform_data->power_state =
+ CY_ACTIVE_STATE;
+ }
+ }
+ DBG(printk(KERN_INFO"%s: Wake Up %s\n", __func__,
+ (retval < 0) ? "FAIL" : "PASS");)
+ return retval;
+}
+
+static int cyttsp_suspend(struct cyttsp *ts)
+{
+ u8 sleep_mode = 0;
+ int retval = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ if (ts->platform_data->use_sleep &&
+ (ts->platform_data->power_state == CY_ACTIVE_STATE)) {
+ sleep_mode = CY_DEEP_SLEEP_MODE;
+ retval = ttsp_write_block_data(ts,
+ CY_REG_BASE, sizeof(sleep_mode), &sleep_mode);
+ if (!(retval < 0))
+ ts->platform_data->power_state = CY_SLEEP_STATE;
+ msleep(CY_MODE_CHANGE_DELAY);
+ }
+ DBG(printk(KERN_INFO"%s: Sleep Power state is %s\n", __func__,
+ (ts->platform_data->power_state == CY_ACTIVE_STATE) ?
+ "ACTIVE" :
+ ((ts->platform_data->power_state == CY_SLEEP_STATE) ?
+ "SLEEP" : "LOW POWER"));)
+ return retval;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cyttsp_ts_early_suspend(struct early_suspend *h)
+{
+ struct cyttsp *ts = container_of(h, struct cyttsp, early_suspend);
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode) {
+ disable_irq_nosync(ts->irq);
+ ts->suspended = 1;
+ /* kill watchdog */
+ del_timer(&ts->timer);
+ cancel_work_sync(&ts->work);
+ cyttsp_suspend(ts);
+ }
+ regulator_disable(ts->regulator);
+ UNLOCK(ts->mutex);
+}
+
+static void cyttsp_ts_late_resume(struct early_suspend *h)
+{
+ struct cyttsp *ts = container_of(h, struct cyttsp, early_suspend);
+
+ regulator_enable(ts->regulator);
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode && ts->suspended) {
+ ts->suspended = 0;
+ if (cyttsp_resume(ts) < 0)
+ printk(KERN_ERR "%s: Error, cyttsp_resume.\n",
+ __func__);
+ enable_irq(ts->irq);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ UNLOCK(ts->mutex);
+}
+#endif
+
+static int cyttsp_wr_reg(struct cyttsp *ts, u8 reg_id, u8 reg_data)
+{
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ return ttsp_write_block_data(ts,
+ CY_REG_BASE + reg_id, sizeof(u8), &reg_data);
+}
+
+static int cyttsp_rd_reg(struct cyttsp *ts, u8 reg_id, u8 *reg_data)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ return ttsp_read_block_data(ts,
+ CY_REG_BASE + reg_id, sizeof(u8), reg_data);
+}
+
+static ssize_t firmware_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cyttsp *ts = dev_get_drvdata(dev);
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode) {
+ unsigned short val = *(unsigned short *)buf;
+ u8 reg_data = val & 0xff;
+ ts->reg_id = (val & 0xff00) >> 8;
+ if (!(ts->reg_id & 0x80)) {
+ /* write user specified operational register */
+ if (ts->reg_id == 0x00) {
+ switch (GET_HSTMODE(reg_data)) {
+ case GET_HSTMODE(CY_OPERATE_MODE):
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ ts->platform_data->power_state =
+ CY_ACTIVE_STATE;
+ enable_irq(ts->irq);
+ mod_timer(&ts->timer, CY_WDG_TIMEOUT);
+ printk(KERN_INFO "%s: "
+ "Switch to Operational Mode "
+ "ps=%d\n", __func__,
+ ts->platform_data->power_state);
+ break;
+ case GET_HSTMODE(CY_SYSINFO_MODE):
+ ts->platform_data->power_state =
+ CY_READY_STATE;
+ disable_irq_nosync(ts->irq);
+ /* kill watchdog */
+ del_timer(&ts->timer);
+ cancel_work_sync(&ts->work);
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ printk(KERN_INFO "%s: "
+ "Switch to SysInfo Mode "
+ "ps=%d\n", __func__,
+ ts->platform_data->power_state);
+ break;
+ default:
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ break;
+ }
+ } else
+ cyttsp_wr_reg(ts, ts->reg_id, reg_data);
+ printk(KERN_INFO "%s: "
+ "write(reg=%02X(%d) dat=0x%02X(%d))\n",
+ __func__,
+ ts->reg_id & ~0x80, ts->reg_id & ~0x80,
+ reg_data, reg_data);
+ } else {
+ /* save user specified operational read register */
+ DBG2(printk(KERN_INFO "%s: read(r=0x%02X)\n",
+ __func__, ts->reg_id);)
+ }
+ } else {
+ int retval = 0;
+ int tries = 0;
+ DBG({
+ char str[128];
+ char *p = str;
+ int i;
+ for (i = 0; i < size; i++, p += 2)
+ sprintf(p, "%02x", (unsigned char)buf[i]);
+ printk(KERN_DEBUG "%s: size %d, pos %ld payload %s\n",
+ __func__, size, (long)pos, str);
+ })
+ do {
+ retval = ttsp_write_block_data(ts,
+ CY_REG_BASE, size, buf);
+ if (retval < 0)
+ msleep(500);
+ } while ((retval < 0) && (tries++ < 10));
+ }
+ UNLOCK(ts->mutex);
+ return size;
+}
+
+static ssize_t firmware_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *ba,
+ char *buf, loff_t pos, size_t size)
+{
+ int count = 0;
+ u8 reg_data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cyttsp *ts = dev_get_drvdata(dev);
+
+ DBG2(printk(KERN_INFO"%s: Enter (mode=%d)\n",
+ __func__, ts->fw_loader_mode);)
+
+ LOCK(ts->mutex);
+ if (!ts->fw_loader_mode) {
+ /* read user specified operational register */
+ cyttsp_rd_reg(ts, ts->reg_id & ~0x80, &reg_data);
+ *(unsigned short *)buf = reg_data << 8;
+ count = sizeof(unsigned short);
+ printk(KERN_INFO "%s: read(reg=%02X(%d) dat=0x%02X(%d))\n",
+ __func__, ts->reg_id & ~0x80, ts->reg_id & ~0x80,
+ reg_data, reg_data);
+ } else {
+ int retval = 0;
+ int tries = 0;
+
+ do {
+ retval = cyttsp_load_bl_regs(ts);
+ if (retval < 0)
+ msleep(500);
+ } while ((retval < 0) && (tries++ < 10));
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: error reading status\n", __func__);
+ count = 0;
+ } else {
+ *(unsigned short *)buf = ts->bl_data.bl_status << 8 |
+ ts->bl_data.bl_error;
+ count = sizeof(unsigned short);
+ }
+
+ DBG2(printk(KERN_INFO
+ "%s:bl_f=0x%02X bl_s=0x%02X bl_e=0x%02X\n",
+ __func__,
+ ts->bl_data.bl_file,
+ ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+ }
+ UNLOCK(ts->mutex);
+ return count;
+}
+
+static struct bin_attribute cyttsp_firmware = {
+ .attr = {
+ .name = "firmware",
+ .mode = 0644,
+ },
+ .size = 128,
+ .read = firmware_read,
+ .write = firmware_write,
+};
+
+static ssize_t attr_fwloader_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cyttsp *ts = dev_get_drvdata(dev);
+ return sprintf(buf, "0x%02X%02X 0x%02X%02X 0x%02X%02X 0x%02X%02X%02X\n",
+ ts->sysinfo_data.tts_verh, ts->sysinfo_data.tts_verl,
+ ts->sysinfo_data.app_idh, ts->sysinfo_data.app_idl,
+ ts->sysinfo_data.app_verh, ts->sysinfo_data.app_verl,
+ ts->sysinfo_data.cid[0], ts->sysinfo_data.cid[1],
+ ts->sysinfo_data.cid[2]);
+}
+
+static ssize_t attr_fwloader_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ char *p;
+ int ret;
+ struct cyttsp *ts = dev_get_drvdata(dev);
+ unsigned val = simple_strtoul(buf, &p, 10);
+
+ ret = p - buf;
+ if (*p && isspace(*p))
+ ret++;
+ DBG2(printk(KERN_DEBUG "%s: %u\n", __func__, val);)
+
+ LOCK(ts->mutex)
+ if (val == 3) {
+ sysfs_remove_bin_file(&dev->kobj, &cyttsp_firmware);
+ DBG2(printk(KERN_INFO "%s: FW loader closed for reg r/w\n",
+ __func__);)
+ } else if (val == 2) {
+ if (sysfs_create_bin_file(&dev->kobj, &cyttsp_firmware))
+ printk(KERN_ERR "%s: unable to create file\n",
+ __func__);
+ DBG2(printk(KERN_INFO "%s: FW loader opened for reg r/w\n",
+ __func__);)
+ if (ts->suspended) {
+ cyttsp_resume(ts);
+ ts->suspended = 0;
+ enable_irq(ts->irq);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ } else if ((val == 1) && !ts->fw_loader_mode) {
+ ts->fw_loader_mode = 1;
+ if (ts->suspended) {
+ cyttsp_resume(ts);
+ ts->suspended = 0;
+ enable_irq(ts->irq);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ disable_irq_nosync(ts->irq);
+ /* kill watchdog */
+ del_timer(&ts->timer);
+ cancel_work_sync(&ts->work);
+ if (sysfs_create_bin_file(&dev->kobj, &cyttsp_firmware))
+ printk(KERN_ERR "%s: unable to create file\n",
+ __func__);
+ DBG2(printk(KERN_INFO
+ "%s: FW loader opened for start load: ps=%d mode=%d\n",
+ __func__,
+ ts->platform_data->power_state, ts->fw_loader_mode);)
+ cyttsp_soft_reset(ts);
+ printk(KERN_INFO "%s: FW loader started.\n", __func__);
+ ts->platform_data->power_state = CY_LDR_STATE;
+ } else if (!val && ts->fw_loader_mode) {
+ sysfs_remove_bin_file(&dev->kobj, &cyttsp_firmware);
+ ts->fw_loader_mode = 0;
+ printk(KERN_INFO "%s: FW loader finished.\n", __func__);
+ enable_irq(ts->irq);
+ ret = cyttsp_power_on(ts);
+ if (ret < 0)
+ printk(KERN_ERR "%s: Error, power on fail\n", __func__);
+ /* resume watchdog */
+ mod_timer(&ts->timer, jiffies + CY_WDG_TIMEOUT);
+ }
+ UNLOCK(ts->mutex);
+ return ret == size ? ret : -EINVAL;
+}
+
+static void cyttsp_close(struct input_dev *dev)
+{
+ struct cyttsp *ts = dev_get_drvdata(&dev->dev);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ts->early_suspend);
+#endif
+ del_timer_sync(&ts->timer);
+ cancel_work_sync(&ts->work);
+ free_irq(ts->irq, ts);
+ input_unregister_device(ts->input);
+ input_free_device(ts->input);
+ if (ts->platform_data->init)
+ ts->platform_data->init(0);
+ regulator_disable(ts->regulator);
+ kfree(ts);
+}
+
+static int cyttsp_open(struct input_dev *dev)
+{
+ struct cyttsp *ts = dev_get_drvdata(&dev->dev);
+ int ret = 0;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ ts->regulator = regulator_get(ts->pdev, "vcpin");
+ if (IS_ERR(ts->regulator)) {
+ printk(KERN_ERR "%s: Error, regulator_get failed\n", __func__);
+ ts->regulator = NULL;
+ ret = PTR_ERR(ts->regulator);
+ goto error_regulator_get;
+ }
+ ret = regulator_enable(ts->regulator);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: regulator enable failed\n",__func__);
+ goto error_regulator;
+ }
+ /* enable interrupts */
+ ts->irq = gpio_to_irq(ts->platform_data->irq_gpio);
+ ret = request_threaded_irq(ts->irq, NULL, cyttsp_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ts->input->name, ts);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: IRQ request failed r=%d\n",
+ __func__, ret);
+ ts->platform_data->power_state = CY_INVALID_STATE;
+ goto error_gpio_irq;
+ }
+ ret = cyttsp_reset_controller(ts);
+ if (ret < 0) {
+ printk(KERN_ERR "controller reset failed\n");
+ goto error_reset;
+ }
+ DBG(printk(KERN_INFO "%s: call power_on\n", __func__);)
+
+ ret = cyttsp_power_on(ts);
+ if (ret < 0)
+ printk(KERN_ERR "%s: Error, power on failed!\n", __func__);
+
+ return ret;
+
+error_reset:
+ if (ts->irq >= 0)
+ free_irq(ts->irq, ts);
+error_gpio_irq:
+ regulator_disable(ts->regulator);
+error_regulator:
+ regulator_put(ts->regulator);
+error_regulator_get:
+ return ret;
+}
+
+static struct device_attribute fwloader =
+ __ATTR(fwloader, 0644, attr_fwloader_show, attr_fwloader_store);
+
+void *cyttsp_core_init(struct cyttsp_bus_ops *bus_ops, struct device *pdev)
+{
+ struct input_dev *input_device;
+ struct cyttsp *ts;
+ int retval = 0;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ printk(KERN_ERR "%s: Error, kzalloc\n", __func__);
+ goto error_alloc_data_failed;
+ }
+ mutex_init(&ts->mutex);
+ ts->pdev = pdev;
+ ts->platform_data = pdev->platform_data;
+ ts->bus_ops = bus_ops;
+ init_completion(&ts->int_running);
+
+ if (ts->platform_data->init)
+ retval = ts->platform_data->init(1);
+ if (retval) {
+ printk(KERN_ERR "%s: platform init failed!\n", __func__);
+ goto error_init;
+ }
+
+ /* Create the input device and register it. */
+ input_device = input_allocate_device();
+ if (!input_device) {
+ retval = -ENOMEM;
+ printk(KERN_ERR "%s: Error, failed to allocate input device\n",
+ __func__);
+ goto error_input_allocate_device;
+ }
+ ts->input = input_device;
+ input_device->name = ts->platform_data->name;
+ input_device->phys = ts->phys;
+ input_device->dev.parent = ts->pdev;
+ input_set_drvdata(ts->input, ts);
+ input_device->open = cyttsp_open;
+ input_device->close = cyttsp_close;
+
+ /* setup watchdog */
+ INIT_WORK(&ts->work, cyttsp_check_bl);
+ setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts);
+
+ set_bit(EV_SYN, input_device->evbit);
+ set_bit(EV_KEY, input_device->evbit);
+ set_bit(EV_ABS, input_device->evbit);
+ set_bit(BTN_TOUCH, input_device->keybit);
+ set_bit(BTN_2, input_device->keybit);
+ if (ts->platform_data->use_gestures)
+ set_bit(BTN_3, input_device->keybit);
+
+ input_set_abs_params(input_device, ABS_X, 0, ts->platform_data->maxx,
+ 0, 0);
+ input_set_abs_params(input_device, ABS_Y, 0, ts->platform_data->maxy,
+ 0, 0);
+ input_set_abs_params(input_device, ABS_TOOL_WIDTH, 0,
+ CY_LARGE_TOOL_WIDTH, 0, 0);
+ input_set_abs_params(input_device, ABS_PRESSURE, 0, CY_MAXZ, 0, 0);
+ input_set_abs_params(input_device, ABS_HAT0X, 0,
+ ts->platform_data->maxx, 0, 0);
+ input_set_abs_params(input_device, ABS_HAT0Y, 0,
+ ts->platform_data->maxy, 0, 0);
+ if (ts->platform_data->use_gestures) {
+ input_set_abs_params(input_device, ABS_HAT1X, 0, CY_MAXZ,
+ 0, 0);
+ input_set_abs_params(input_device, ABS_HAT1Y, 0, CY_MAXZ,
+ 0, 0);
+ }
+ if (ts->platform_data->use_mt) {
+ input_set_abs_params(input_device, ABS_MT_POSITION_X, 0,
+ ts->platform_data->maxx, 0, 0);
+ input_set_abs_params(input_device, ABS_MT_POSITION_Y, 0,
+ ts->platform_data->maxy, 0, 0);
+ input_set_abs_params(input_device, ABS_MT_TOUCH_MAJOR, 0,
+ CY_MAXZ, 0, 0);
+ input_set_abs_params(input_device, ABS_MT_WIDTH_MAJOR, 0,
+ CY_LARGE_TOOL_WIDTH, 0, 0);
+ if (ts->platform_data->use_trk_id)
+ input_set_abs_params(input_device, ABS_MT_TRACKING_ID,
+ 0, CY_NUM_TRK_ID, 0, 0);
+ }
+
+ if (ts->platform_data->use_virtual_keys)
+ input_set_capability(input_device, EV_KEY, KEY_PROG1);
+
+ retval = input_register_device(input_device);
+ if (retval) {
+ printk(KERN_ERR "%s: Error, failed to register input device\n",
+ __func__);
+ goto error_input_register_device;
+ }
+ DBG(printk(KERN_INFO "%s: Registered input device %s\n",
+ __func__, input_device->name);)
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = cyttsp_ts_early_suspend;
+ ts->early_suspend.resume = cyttsp_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+ retval = device_create_file(pdev, &fwloader);
+ if (retval) {
+ printk(KERN_ERR "%s: Error, could not create attribute\n",
+ __func__);
+ goto device_create_error;
+ }
+
+ return ts;
+
+device_create_error:
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ts->early_suspend);
+#endif
+error_input_register_device:
+ input_unregister_device(input_device);
+ cancel_work_sync(&ts->work);
+ del_timer_sync(&ts->timer);
+error_input_allocate_device:
+ if (ts->platform_data->init)
+ ts->platform_data->init(0);
+error_init:
+ kfree(ts);
+error_alloc_data_failed:
+ return NULL;
+}
+
+/* registered in driver struct */
+void cyttsp_core_release(void *handle)
+{
+ struct cyttsp *ts = handle;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&ts->early_suspend);
+#endif
+ del_timer_sync(&ts->timer);
+ cancel_work_sync(&ts->work);
+ free_irq(ts->irq, ts);
+ input_unregister_device(ts->input);
+ input_free_device(ts->input);
+ if (ts->platform_data->init)
+ ts->platform_data->init(0);
+ kfree(ts);
+}
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver core");
+MODULE_AUTHOR("Cypress");
+
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
new file mode 100755
index 00000000000..6af486177a0
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -0,0 +1,44 @@
+/* Header file for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * drivers/input/touchscreen/cyttsp_core.h
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+
+#ifndef __CYTTSP_CORE_H__
+#define __CYTTSP_CORE_H__
+
+#include <linux/kernel.h>
+
+struct cyttsp_bus_ops {
+ s32 (*write)(void *handle, u8 addr, u8 length, const void *values);
+ s32 (*read)(void *handle, u8 addr, u8 length, void *values);
+ s32 (*ext)(void *handle, void *values);
+};
+
+void *cyttsp_core_init(struct cyttsp_bus_ops *bus_ops, struct device *pdev);
+void cyttsp_core_release(void *handle);
+
+#endif /* __CYTTSP_CORE_H__ */
diff --git a/drivers/input/touchscreen/cyttsp_ldr.h b/drivers/input/touchscreen/cyttsp_ldr.h
new file mode 100755
index 00000000000..95db89d0d13
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_ldr.h
@@ -0,0 +1,333 @@
+/*
+ * Source for:
+ * Cypress TrueTouch(TM) Standard Product touchscreen driver.
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+/*
+ ************************************************************************
+ * Compiled image bootloader functions
+ ************************************************************************
+ */
+#include "cyttsp_fw.h"
+#define CY_BL_PAGE_SIZE 16
+#define CY_BL_NUM_PAGES 5
+#define CY_MAX_DATA_LEN (CY_BL_PAGE_SIZE * 2)
+
+/* Timeout timer */
+static int cyttsp_check_polling(struct cyttsp *ts)
+{
+ return ts->platform_data->use_timer;
+}
+
+static void cyttsp_to_timer(unsigned long handle)
+{
+ struct cyttsp *ts = (struct cyttsp *)handle;
+
+ DBG(printk(KERN_INFO"%s: TTSP timeout timer event!\n", __func__);)
+ ts->to_timeout = true;
+ return;
+}
+
+static void cyttsp_setup_to_timer(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ setup_timer(&ts->to_timer, cyttsp_to_timer, (unsigned long) ts);
+}
+
+static void cyttsp_kill_to_timer(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ del_timer(&ts->to_timer);
+}
+
+static void cyttsp_start_to_timer(struct cyttsp *ts, int ms)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ ts->to_timeout = false;
+ mod_timer(&ts->to_timer, jiffies + ms);
+}
+
+static bool cyttsp_timeout(struct cyttsp *ts)
+{
+ if (cyttsp_check_polling(ts))
+ return false;
+ else
+ return ts->to_timeout;
+}
+
+static void cyttsp_set_bl_ready(struct cyttsp *ts, bool set)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ ts->bl_ready = set;
+ DBG(printk(KERN_INFO"%s: bl_ready=%d\n", __func__, (int)ts->bl_ready);)
+}
+
+static bool cyttsp_check_bl_ready(struct cyttsp *ts)
+{
+ if (cyttsp_check_polling(ts))
+ return true;
+ else
+ return ts->bl_ready;
+}
+
+static bool cyttsp_bl_err_status(struct cyttsp *ts)
+{
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ return (((ts->bl_data.bl_status == 0x10) &&
+ (ts->bl_data.bl_error == 0x20)) ||
+ ((ts->bl_data.bl_status == 0x11) &&
+ (ts->bl_data.bl_error == 0x20)));
+}
+
+static bool cyttsp_wait_bl_ready(struct cyttsp *ts,
+ int pre_delay, int loop_delay, int max_try,
+ bool (*done)(struct cyttsp *ts))
+{
+ int tries;
+ bool rdy = false, tmo = false;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ DBG(printk(KERN_INFO"%s: pre-dly=%d loop-dly=%d, max-try=%d\n",
+ __func__, pre_delay, loop_delay, max_try);)
+
+ tries = 0;
+ ts->bl_data.bl_file = 0;
+ ts->bl_data.bl_status = 0;
+ ts->bl_data.bl_error = 0;
+ if (cyttsp_check_polling(ts)) {
+ msleep(pre_delay);
+ do {
+ msleep(abs(loop_delay));
+ cyttsp_load_bl_regs(ts);
+ } while (!done(ts) &&
+ tries++ < max_try);
+ DBG(printk(KERN_INFO"%s: polling mode tries=%d\n",
+ __func__, tries);)
+ } else {
+ cyttsp_start_to_timer(ts, abs(loop_delay) * max_try);
+ while (!rdy && !tmo) {
+ rdy = cyttsp_check_bl_ready(ts);
+ tmo = cyttsp_timeout(ts);
+ if (loop_delay < 0)
+ udelay(abs(loop_delay));
+ else
+ msleep(abs(loop_delay));
+ tries++;
+ }
+ DBG2(printk(KERN_INFO"%s: irq mode tries=%d rdy=%d tmo=%d\n",
+ __func__, tries, (int)rdy, (int)tmo);)
+ cyttsp_load_bl_regs(ts);
+ }
+
+ if (tries >= max_try || tmo)
+ return true; /* timeout */
+ else
+ return false;
+}
+
+static int cyttsp_wr_blk_chunks(struct cyttsp *ts, u8 cmd,
+ u8 length, const u8 *values)
+{
+ int retval = 0;
+ int block = 1;
+ bool timeout;
+
+ u8 dataray[CY_MAX_DATA_LEN];
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* first page already includes the bl page offset */
+ memcpy(dataray, values, CY_BL_PAGE_SIZE + 1);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, cmd, CY_BL_PAGE_SIZE + 1, dataray);
+ values += CY_BL_PAGE_SIZE + 1;
+ length -= CY_BL_PAGE_SIZE + 1;
+ if (retval)
+ return retval;
+
+ /* remaining blocks require bl page offset stuffing */
+ while (length && (block < CY_BL_NUM_PAGES) && !(retval < 0)) {
+ dataray[0] = CY_BL_PAGE_SIZE * block;
+ timeout = cyttsp_wait_bl_ready(ts,
+ 1, -100, 100, cyttsp_bl_err_status);
+ if (timeout)
+ return -EIO;
+ memcpy(&dataray[1], values, length >= CY_BL_PAGE_SIZE ?
+ CY_BL_PAGE_SIZE : length);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, cmd,
+ length >= CY_BL_PAGE_SIZE ?
+ CY_BL_PAGE_SIZE + 1 : length + 1, dataray);
+ values += CY_BL_PAGE_SIZE;
+ length = length >= CY_BL_PAGE_SIZE ?
+ length - CY_BL_PAGE_SIZE : 0;
+ block++;
+ }
+
+ return retval;
+}
+
+static int cyttsp_load_app(struct cyttsp *ts)
+{
+ int retval = 0;
+ int rec;
+ bool timeout;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ printk(KERN_INFO "%s: "
+ "load file - tver=0x%02X%02X a_id=0x%02X%02X aver=0x%02X%02X\n",
+ __func__,
+ cyttsp_fw_tts_verh, cyttsp_fw_tts_verl,
+ cyttsp_fw_app_idh, cyttsp_fw_app_idl,
+ cyttsp_fw_app_verh, cyttsp_fw_app_verl);
+
+ /* download new TTSP Application to the Bootloader */
+ rec = 0;
+
+ /* send bootload initiation command */
+ printk(KERN_INFO"%s: Send BL Enter\n", __func__);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, CY_REG_BASE,
+ cyttsp_fw[rec].Length, cyttsp_fw[rec].Block);
+ rec++;
+ if (retval)
+ return retval;
+ timeout = cyttsp_wait_bl_ready(ts, 1, 100, 100, cyttsp_bl_err_status);
+ DBG(printk(KERN_INFO "%s: BL ENTER f=%02X s=%02X e=%02X t=%d\n",
+ __func__,
+ ts->bl_data.bl_file, ts->bl_data.bl_status,
+ ts->bl_data.bl_error, timeout);)
+ if (timeout)
+ goto loader_exit;
+
+ /* send bootload firmware load blocks */
+ printk(KERN_INFO"%s: Send BL Blocks\n", __func__);
+ while (cyttsp_fw[rec].Command == CY_BL_WRITE_BLK) {
+ DBG2(printk(KERN_INFO "%s:"
+ "BL DNLD Rec=% 3d Len=% 3d Addr=%04X\n",
+ __func__,
+ cyttsp_fw[rec].Record, cyttsp_fw[rec].Length,
+ cyttsp_fw[rec].Address);
+ )
+ retval = cyttsp_wr_blk_chunks(ts, CY_REG_BASE,
+ cyttsp_fw[rec].Length, cyttsp_fw[rec].Block);
+ if (retval < 0) {
+ DBG(printk(KERN_INFO "%s:"
+ "BL fail Rec=%3d retval=%d\n",
+ __func__,
+ cyttsp_fw[rec].Record, retval);
+ )
+ break;
+ } else {
+ cyttsp_wait_bl_ready(ts, 10, 1, 1000,
+ cyttsp_bl_err_status);
+ DBG(printk(KERN_INFO "%s: BL _LOAD "
+ "f=%02X s=%02X e=%02X\n",
+ __func__,
+ ts->bl_data.bl_file, ts->bl_data.bl_status,
+ ts->bl_data.bl_error);)
+ }
+ rec++;
+ }
+ if (retval < 0)
+ goto loader_exit;
+
+ /* send bootload terminate command */
+ printk(KERN_INFO"%s: Send BL Terminate\n", __func__);
+ cyttsp_set_bl_ready(ts, false);
+ retval = ttsp_write_block_data(ts, CY_REG_BASE,
+ cyttsp_fw[rec].Length, cyttsp_fw[rec].Block);
+ if (retval < 0)
+ goto loader_exit;
+ else
+ cyttsp_wait_bl_ready(ts, 1, 100, 100, cyttsp_bl_err_status);
+
+loader_exit:
+ /* reset TTSP Device back to bootloader mode */
+ retval = cyttsp_soft_reset(ts);
+
+ return retval;
+}
+
+static int cyttsp_loader(struct cyttsp *ts)
+{
+ int retval;
+
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ retval = cyttsp_load_bl_regs(ts);
+ if (retval < 0)
+ return retval;
+
+ printk(KERN_INFO "%s:"
+ "blttsp=0x%02X%02X flttsp=0x%02X%02X force=%d\n",
+ __func__,
+ ts->bl_data.ttspver_hi, ts->bl_data.ttspver_lo,
+ cyttsp_fw_tts_verh, cyttsp_fw_tts_verl,
+ ts->platform_data->use_force_fw_update);
+ printk(KERN_INFO "%s:"
+ "blappid=0x%02X%02X flappid=0x%02X%02X\n",
+ __func__,
+ ts->bl_data.appid_hi, ts->bl_data.appid_lo,
+ cyttsp_fw_app_idh, cyttsp_fw_app_idl);
+ printk(KERN_INFO "%s:"
+ "blappver=0x%02X%02X flappver=0x%02X%02X\n",
+ __func__,
+ ts->bl_data.appver_hi, ts->bl_data.appver_lo,
+ cyttsp_fw_app_verh, cyttsp_fw_app_verl);
+ printk(KERN_INFO "%s:"
+ "blcid=0x%02X%02X%02X flcid=0x%02X%02X%02X\n",
+ __func__,
+ ts->bl_data.cid_0, ts->bl_data.cid_1, ts->bl_data.cid_2,
+ cyttsp_fw_cid_0, cyttsp_fw_cid_1, cyttsp_fw_cid_2);
+
+ if (CY_DIFF(ts->bl_data.ttspver_hi, cyttsp_fw_tts_verh) ||
+ CY_DIFF(ts->bl_data.ttspver_lo, cyttsp_fw_tts_verl) ||
+ CY_DIFF(ts->bl_data.appid_hi, cyttsp_fw_app_idh) ||
+ CY_DIFF(ts->bl_data.appid_lo, cyttsp_fw_app_idl) ||
+ CY_DIFF(ts->bl_data.appver_hi, cyttsp_fw_app_verh) ||
+ CY_DIFF(ts->bl_data.appver_lo, cyttsp_fw_app_verl) ||
+ CY_DIFF(ts->bl_data.cid_0, cyttsp_fw_cid_0) ||
+ CY_DIFF(ts->bl_data.cid_1, cyttsp_fw_cid_1) ||
+ CY_DIFF(ts->bl_data.cid_2, cyttsp_fw_cid_2) ||
+ ts->platform_data->use_force_fw_update) {
+ /* load new app into TTSP Device */
+ cyttsp_setup_to_timer(ts);
+ ts->platform_data->power_state = CY_LDR_STATE;
+ retval = cyttsp_load_app(ts);
+ cyttsp_kill_to_timer(ts);
+
+ } else {
+ /* firmware file is a match with firmware in the TTSP device */
+ DBG(printk(KERN_INFO "%s: FW matches - no loader\n", __func__);)
+ }
+
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
new file mode 100755
index 00000000000..d4f7ffeed1b
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -0,0 +1,302 @@
+/* Source for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * drivers/input/touchscreen/cyttsp_spi.c
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/cyttsp.h>
+#include "cyttsp_core.h"
+
+#define DBG(x)
+
+#define CY_SPI_WR_OP 0x00 /* r/~w */
+#define CY_SPI_RD_OP 0x01
+#define CY_SPI_CMD_BYTES 4
+#define CY_SPI_SYNC_BYTE 2
+#define CY_SPI_SYNC_ACK1 0x62
+#define CY_SPI_SYNC_ACK2 0x9D
+#define CY_SPI_DATA_SIZE 128
+#define CY_SPI_DATA_BUF_SIZE (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE)
+#define CY_SPI_BITS_PER_WORD 8
+
+struct cyttsp_spi {
+ struct cyttsp_bus_ops ops;
+ struct spi_device *spi_client;
+ void *ttsp_client;
+ u8 wr_buf[CY_SPI_DATA_BUF_SIZE];
+ u8 rd_buf[CY_SPI_DATA_BUF_SIZE];
+};
+
+static int cyttsp_spi_xfer_(u8 op, struct cyttsp_spi *ts_spi,
+ u8 reg, u8 *buf, int length)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u8 *wr_buf = ts_spi->wr_buf;
+ u8 *rd_buf = ts_spi->rd_buf;
+ int retval;
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+ if (length > CY_SPI_DATA_SIZE) {
+ printk(KERN_ERR "%s: length %d is too big.\n",
+ __func__, length);
+ return -EINVAL;
+ }
+ DBG(printk(KERN_INFO "%s: OP=%s length=%d\n", __func__,
+ op == CY_SPI_RD_OP ? "Read" : "Write", length);)
+
+ memset(wr_buf, 0, CY_SPI_DATA_BUF_SIZE);
+ memset(rd_buf, 0, CY_SPI_DATA_BUF_SIZE);
+
+ wr_buf[0] = 0x00; /* header byte 0 */
+ wr_buf[1] = 0xFF; /* header byte 1 */
+ wr_buf[2] = reg; /* reg index */
+ wr_buf[3] = op; /* r/~w */
+ if (op == CY_SPI_WR_OP)
+ memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length);
+ DBG(
+ if (op == CY_SPI_RD_OP)
+ memset(rd_buf, CY_SPI_SYNC_NACK, CY_SPI_DATA_BUF_SIZE);)
+ DBG(
+ for (i = 0; i < (length + CY_SPI_CMD_BYTES); i++) {
+ if ((op == CY_SPI_RD_OP) && (i < CY_SPI_CMD_BYTES))
+ printk(KERN_INFO "%s: read op. wr[%d]:0x%02x\n",
+ __func__, i, wr_buf[i]);
+ if (op == CY_SPI_WR_OP)
+ printk(KERN_INFO "%s: write op. wr[%d]:0x%02x\n",
+ __func__, i, wr_buf[i]);
+ })
+
+ memset((void *)xfer, 0, sizeof(xfer));
+ spi_message_init(&msg);
+ xfer[0].tx_buf = wr_buf;
+ xfer[0].rx_buf = rd_buf;
+ if (op == CY_SPI_WR_OP) {
+ xfer[0].len = length + CY_SPI_CMD_BYTES;
+ spi_message_add_tail(&xfer[0], &msg);
+ } else if (op == CY_SPI_RD_OP) {
+ xfer[0].len = CY_SPI_CMD_BYTES;
+ spi_message_add_tail(&xfer[0], &msg);
+
+ xfer[1].rx_buf = buf;
+ xfer[1].len = length;
+ spi_message_add_tail(&xfer[1], &msg);
+ }
+
+ retval = spi_sync(ts_spi->spi_client, &msg);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: spi sync error %d, len=%d, op=%d\n",
+ __func__, retval, xfer[1].len, op);
+ retval = 0;
+ }
+
+ if ((rd_buf[CY_SPI_SYNC_BYTE] == CY_SPI_SYNC_ACK1) &&
+ (rd_buf[CY_SPI_SYNC_BYTE+1] == CY_SPI_SYNC_ACK2))
+ retval = 0;
+ else {
+ DBG(
+ for (i = 0; i < (CY_SPI_CMD_BYTES); i++)
+ printk(KERN_INFO "%s: test rd_buf[%d]:0x%02x\n",
+ __func__, i, rd_buf[i]);
+ for (i = 0; i < (length); i++)
+ printk(KERN_INFO "%s: test buf[%d]:0x%02x\n",
+ __func__, i, buf[i]);)
+ retval = 1;
+ }
+ return retval;
+}
+
+static int cyttsp_spi_xfer(u8 op, struct cyttsp_spi *ts,
+ u8 reg, u8 *buf, int length)
+{
+ int tries;
+ int retval;
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ if (op == CY_SPI_RD_OP) {
+ for (tries = CY_NUM_RETRY; tries; tries--) {
+ retval = cyttsp_spi_xfer_(op, ts, reg, buf, length);
+ if (retval == 0)
+ break;
+ else
+ msleep(10);
+ }
+ } else {
+ retval = cyttsp_spi_xfer_(op, ts, reg, buf, length);
+ }
+ return retval;
+}
+
+static s32 ttsp_spi_read_block_data(void *handle, u8 addr,
+ u8 length, void *data)
+{
+ int retval;
+ struct cyttsp_spi *ts = container_of(handle, struct cyttsp_spi, ops);
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ retval = cyttsp_spi_xfer(CY_SPI_RD_OP, ts, addr, data, length);
+ if (retval < 0)
+ printk(KERN_ERR "%s: ttsp_spi_read_block_data failed\n",
+ __func__);
+
+ /* Do not print the above error if the data sync bytes were not found.
+ This is a normal condition for the bootloader loader startup and need
+ to retry until data sync bytes are found. */
+ if (retval > 0)
+ retval = -1; /* now signal fail; so retry can be done */
+
+ return retval;
+}
+
+static s32 ttsp_spi_write_block_data(void *handle, u8 addr,
+ u8 length, const void *data)
+{
+ int retval;
+ struct cyttsp_spi *ts = container_of(handle, struct cyttsp_spi, ops);
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ retval = cyttsp_spi_xfer(CY_SPI_WR_OP, ts, addr, (void *)data, length);
+ if (retval < 0)
+ printk(KERN_ERR "%s: ttsp_spi_write_block_data failed\n",
+ __func__);
+
+ /* Do not print the above error if the data sync bytes were not found.
+ This is a normal condition for the bootloader loader startup and need
+ to retry until data sync bytes are found. */
+ if (retval > 0)
+ retval = -1; /* now signal fail; so retry can be done */
+
+ return retval;
+}
+
+static s32 ttsp_spi_tch_ext(void *handle, void *values)
+{
+ int retval = 0;
+ struct cyttsp_spi *ts = container_of(handle, struct cyttsp_spi, ops);
+
+ DBG(printk(KERN_INFO "%s: Enter\n", __func__);)
+
+ /* Add custom touch extension handling code here */
+ /* set: retval < 0 for any returned system errors,
+ retval = 0 if normal touch handling is required,
+ retval > 0 if normal touch handling is *not* required */
+ if (!ts || !values)
+ retval = -EIO;
+
+ return retval;
+}
+
+static int __devinit cyttsp_spi_probe(struct spi_device *spi)
+{
+ struct cyttsp_spi *ts_spi;
+ int retval;
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+
+ /* Set up SPI*/
+ spi->bits_per_word = CY_SPI_BITS_PER_WORD;
+ spi->mode = SPI_MODE_0;
+ retval = spi_setup(spi);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: SPI setup error %d\n", __func__, retval);
+ return retval;
+ }
+ ts_spi = kzalloc(sizeof(*ts_spi), GFP_KERNEL);
+ if (ts_spi == NULL) {
+ printk(KERN_ERR "%s: Error, kzalloc\n", __func__);
+ retval = -ENOMEM;
+ goto error_alloc_data_failed;
+ }
+ ts_spi->spi_client = spi;
+ dev_set_drvdata(&spi->dev, ts_spi);
+ ts_spi->ops.write = ttsp_spi_write_block_data;
+ ts_spi->ops.read = ttsp_spi_read_block_data;
+ ts_spi->ops.ext = ttsp_spi_tch_ext;
+
+ ts_spi->ttsp_client = cyttsp_core_init(&ts_spi->ops, &spi->dev);
+ if (!ts_spi->ttsp_client) {
+ retval = -ENODEV;
+ goto ttsp_core_err;
+ }
+ printk(KERN_INFO "%s: Successful registration %s\n",
+ __func__, CY_SPI_NAME);
+
+ return 0;
+
+ttsp_core_err:
+ kfree(ts_spi);
+error_alloc_data_failed:
+ return retval;
+}
+
+/* registered in driver struct */
+static int __devexit cyttsp_spi_remove(struct spi_device *spi)
+{
+ struct cyttsp_spi *ts_spi = dev_get_drvdata(&spi->dev);
+ DBG(printk(KERN_INFO"%s: Enter\n", __func__);)
+ cyttsp_core_release(ts_spi->ttsp_client);
+ kfree(ts_spi);
+ return 0;
+}
+
+
+static struct spi_driver cyttsp_spi_driver = {
+ .driver = {
+ .name = CY_SPI_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = cyttsp_spi_probe,
+ .remove = __devexit_p(cyttsp_spi_remove),
+};
+
+static int __init cyttsp_spi_init(void)
+{
+ int err;
+
+ err = spi_register_driver(&cyttsp_spi_driver);
+ printk(KERN_INFO "%s: Cypress TrueTouch(R) Standard Product SPI "
+ "Touchscreen Driver (Built %s @ %s) returned %d\n",
+ __func__, __DATE__, __TIME__, err);
+
+ return err;
+}
+module_init(cyttsp_spi_init);
+
+static void __exit cyttsp_spi_exit(void)
+{
+ spi_unregister_driver(&cyttsp_spi_driver);
+ printk(KERN_INFO "%s: module exit\n", __func__);
+}
+module_exit(cyttsp_spi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product SPI driver");
+MODULE_AUTHOR("Cypress");
+
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 00000000000..5729602cbb6
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,675 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+ uint16_t addr;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int use_irq;
+ bool has_relative_report;
+ struct hrtimer timer;
+ struct work_struct work;
+ uint16_t max[2];
+ int snap_state[2][2];
+ int snap_down_on[2];
+ int snap_down_off[2];
+ int snap_up_on[2];
+ int snap_up_off[2];
+ int snap_down[2];
+ int snap_up[2];
+ uint32_t flags;
+ int reported_finger_count;
+ int8_t sensitivity_adjust;
+ int (*power)(int on);
+ struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_page_select_failed;
+ }
+ ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+ ts->sensitivity_adjust);
+ if (ret < 0)
+ pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+ return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+ int i;
+ int ret;
+ int bad_data = 0;
+ struct i2c_msg msg[2];
+ uint8_t start_reg;
+ uint8_t buf[15];
+ struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+ int buf_len = ts->has_relative_report ? 15 : 13;
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &start_reg;
+ start_reg = 0x00;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = buf_len;
+ msg[1].buf = buf;
+
+ /* printk("synaptics_ts_work_func\n"); */
+ for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+ bad_data = 1;
+ } else {
+ /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+ /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+ /* buf[0], buf[1], buf[2], buf[3], */
+ /* buf[4], buf[5], buf[6], buf[7], */
+ /* buf[8], buf[9], buf[10], buf[11], */
+ /* buf[12], buf[13], buf[14], ret); */
+ if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+ printk(KERN_WARNING "synaptics_ts_work_func:"
+ " bad read %x %x %x %x %x %x %x %x %x"
+ " %x %x %x %x %x %x, ret %d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], ret);
+ if (bad_data)
+ synaptics_init_panel(ts);
+ bad_data = 1;
+ continue;
+ }
+ bad_data = 0;
+ if ((buf[buf_len - 1] & 1) == 0) {
+ /* printk("read %d coordinates\n", i); */
+ break;
+ } else {
+ int pos[2][2];
+ int f, a;
+ int base;
+ /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+ /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+ int z = buf[1];
+ int w = buf[0] >> 4;
+ int finger = buf[0] & 7;
+
+ /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+ /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+ /* int z2 = buf[1+6]; */
+ /* int w2 = buf[0+6] >> 4; */
+ /* int finger2 = buf[0+6] & 7; */
+
+ /* int dx = (int8_t)buf[12]; */
+ /* int dy = (int8_t)buf[13]; */
+ int finger2_pressed;
+
+ /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+ /* x, y, z, w, finger, */
+ /* x2, y2, z2, w2, finger2, */
+ /* dx, dy); */
+
+ base = 2;
+ for (f = 0; f < 2; f++) {
+ uint32_t flip_flag = SYNAPTICS_FLIP_X;
+ for (a = 0; a < 2; a++) {
+ int p = buf[base + 1];
+ p |= (uint16_t)(buf[base] & 0x1f) << 8;
+ if (ts->flags & flip_flag)
+ p = ts->max[a] - p;
+ if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+ if (ts->snap_state[f][a]) {
+ if (p <= ts->snap_down_off[a])
+ p = ts->snap_down[a];
+ else if (p >= ts->snap_up_off[a])
+ p = ts->snap_up[a];
+ else
+ ts->snap_state[f][a] = 0;
+ } else {
+ if (p <= ts->snap_down_on[a]) {
+ p = ts->snap_down[a];
+ ts->snap_state[f][a] = 1;
+ } else if (p >= ts->snap_up_on[a]) {
+ p = ts->snap_up[a];
+ ts->snap_state[f][a] = 1;
+ }
+ }
+ }
+ pos[f][a] = p;
+ base += 2;
+ flip_flag <<= 1;
+ }
+ base += 2;
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(pos[f][0], pos[f][1]);
+ }
+ if (z) {
+ input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+ }
+ input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+ input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+ input_report_key(ts->input_dev, BTN_TOUCH, finger);
+ finger2_pressed = finger > 1 && finger != 7;
+ input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
+ }
+
+ if (!finger)
+ z = 0;
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+ input_mt_sync(ts->input_dev);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+ input_mt_sync(ts->input_dev);
+ } else if (ts->reported_finger_count > 1) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
+ }
+ ts->reported_finger_count = finger;
+ input_sync(ts->input_dev);
+ }
+ }
+ }
+ if (ts->use_irq)
+ enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+ struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+ /* printk("synaptics_ts_timer_func\n"); */
+
+ queue_work(synaptics_wq, &ts->work);
+
+ hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+ struct synaptics_ts_data *ts = dev_id;
+
+ /* printk("synaptics_ts_irq_handler\n"); */
+ disable_irq_nosync(ts->client->irq);
+ queue_work(synaptics_wq, &ts->work);
+ return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+ struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct synaptics_ts_data *ts;
+ uint8_t buf0[4];
+ uint8_t buf1[8];
+ struct i2c_msg msg[2];
+ int ret = 0;
+ uint16_t max_x, max_y;
+ int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+ struct synaptics_i2c_rmi_platform_data *pdata;
+ unsigned long irqflags;
+ int inactive_area_left;
+ int inactive_area_right;
+ int inactive_area_top;
+ int inactive_area_bottom;
+ int snap_left_on;
+ int snap_left_off;
+ int snap_right_on;
+ int snap_right_off;
+ int snap_top_on;
+ int snap_top_off;
+ int snap_bottom_on;
+ int snap_bottom_off;
+ uint32_t panel_version;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_check_functionality_failed;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_data_failed;
+ }
+ INIT_WORK(&ts->work, synaptics_ts_work_func);
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ pdata = client->dev.platform_data;
+ if (pdata)
+ ts->power = pdata->power;
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+ goto err_power_failed;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ /* fail? */
+ }
+ {
+ int retry = 10;
+ while (retry-- > 0) {
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+ }
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+ panel_version = ret << 8;
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+ panel_version |= ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+ if (pdata) {
+ while (pdata->version > panel_version)
+ pdata++;
+ ts->flags = pdata->flags;
+ ts->sensitivity_adjust = pdata->sensitivity_adjust;
+ irqflags = pdata->irqflags;
+ inactive_area_left = pdata->inactive_left;
+ inactive_area_right = pdata->inactive_right;
+ inactive_area_top = pdata->inactive_top;
+ inactive_area_bottom = pdata->inactive_bottom;
+ snap_left_on = pdata->snap_left_on;
+ snap_left_off = pdata->snap_left_off;
+ snap_right_on = pdata->snap_right_on;
+ snap_right_off = pdata->snap_right_off;
+ snap_top_on = pdata->snap_top_on;
+ snap_top_off = pdata->snap_top_off;
+ snap_bottom_on = pdata->snap_bottom_on;
+ snap_bottom_off = pdata->snap_bottom_off;
+ fuzz_x = pdata->fuzz_x;
+ fuzz_y = pdata->fuzz_y;
+ fuzz_p = pdata->fuzz_p;
+ fuzz_w = pdata->fuzz_w;
+ } else {
+ irqflags = 0;
+ inactive_area_left = 0;
+ inactive_area_right = 0;
+ inactive_area_top = 0;
+ inactive_area_bottom = 0;
+ snap_left_on = 0;
+ snap_left_off = 0;
+ snap_right_on = 0;
+ snap_right_off = 0;
+ snap_top_on = 0;
+ snap_top_off = 0;
+ snap_bottom_on = 0;
+ snap_bottom_off = 0;
+ fuzz_x = 0;
+ fuzz_y = 0;
+ fuzz_p = 0;
+ fuzz_w = 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ goto err_detect_failed;
+ }
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf0;
+ buf0[0] = 0xe0;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 8;
+ msg[1].buf = buf1;
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_transfer failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+ buf1[0], buf1[1], buf1[2], buf1[3],
+ buf1[4], buf1[5], buf1[6], buf1[7]);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_detect_failed;
+ }
+ ret = i2c_smbus_read_word_data(ts->client, 0x02);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->has_relative_report = !(ret & 0x100);
+ printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+ ret = i2c_smbus_read_word_data(ts->client, 0x04);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ ret = i2c_smbus_read_word_data(ts->client, 0x06);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(max_x, max_y);
+
+ ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_init_panel failed\n");
+ goto err_detect_failed;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ ts->input_dev->name = "synaptics-rmi-touchscreen";
+ set_bit(EV_SYN, ts->input_dev->evbit);
+ set_bit(EV_KEY, ts->input_dev->evbit);
+ set_bit(BTN_TOUCH, ts->input_dev->keybit);
+ set_bit(BTN_2, ts->input_dev->keybit);
+ set_bit(EV_ABS, ts->input_dev->evbit);
+ inactive_area_left = inactive_area_left * max_x / 0x10000;
+ inactive_area_right = inactive_area_right * max_x / 0x10000;
+ inactive_area_top = inactive_area_top * max_y / 0x10000;
+ inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+ snap_left_on = snap_left_on * max_x / 0x10000;
+ snap_left_off = snap_left_off * max_x / 0x10000;
+ snap_right_on = snap_right_on * max_x / 0x10000;
+ snap_right_off = snap_right_off * max_x / 0x10000;
+ snap_top_on = snap_top_on * max_y / 0x10000;
+ snap_top_off = snap_top_off * max_y / 0x10000;
+ snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+ snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+ fuzz_x = fuzz_x * max_x / 0x10000;
+ fuzz_y = fuzz_y * max_y / 0x10000;
+ ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+ ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+ ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+ ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+ ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+ ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+ ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+ ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+ ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+ ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+ ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+ ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+ printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+ printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+ inactive_area_left, inactive_area_right,
+ inactive_area_top, inactive_area_bottom);
+ printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+ snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+ snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+ input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+ /* ts->input_dev->name = ts->keypad_info->name; */
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+ goto err_input_register_device_failed;
+ }
+ if (client->irq) {
+ ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+ if (ret == 0) {
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+ if (ret)
+ free_irq(client->irq, ts);
+ }
+ if (ret == 0)
+ ts->use_irq = 1;
+ else
+ dev_err(&client->dev, "request_irq failed\n");
+ }
+ if (!ts->use_irq) {
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ts->timer.function = synaptics_ts_timer_func;
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = synaptics_ts_early_suspend;
+ ts->early_suspend.resume = synaptics_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
+ printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+ return 0;
+
+err_input_register_device_failed:
+ input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+ kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+ return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+ unregister_early_suspend(&ts->early_suspend);
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+ return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->use_irq)
+ disable_irq(client->irq);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = cancel_work_sync(&ts->work);
+ if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+ enable_irq(client->irq);
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+ ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+ if (ts->power) {
+ ret = ts->power(0);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+ }
+ return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+ }
+
+ synaptics_init_panel(ts);
+
+ if (ts->use_irq)
+ enable_irq(client->irq);
+
+ if (!ts->use_irq)
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ else
+ i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+ { SYNAPTICS_I2C_RMI_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+ .probe = synaptics_ts_probe,
+ .remove = synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = synaptics_ts_suspend,
+ .resume = synaptics_ts_resume,
+#endif
+ .id_table = synaptics_ts_id,
+ .driver = {
+ .name = SYNAPTICS_I2C_RMI_NAME,
+ },
+};
+
+static int __devinit synaptics_ts_init(void)
+{
+ synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+ if (!synaptics_wq)
+ return -ENOMEM;
+ return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+ i2c_del_driver(&synaptics_ts_driver);
+ if (synaptics_wq)
+ destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9ca28fced2b..26bab398840 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,14 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_AB5500
+ tristate "HVLED driver for AB5500"
+ depends on AB5500_CORE
+ help
+ This option enables support for the HVLED in AB5500
+ multi function device. Currently Ab5500 v1.0 chip leds
+ are supported.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 1fc6875a8b2..c7b4880a63f 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_AB5500) += leds-ab5500.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
diff --git a/drivers/leds/leds-ab5500.c b/drivers/leds/leds-ab5500.c
new file mode 100644
index 00000000000..294551b1962
--- /dev/null
+++ b/drivers/leds/leds-ab5500.c
@@ -0,0 +1,811 @@
+/*
+ * leds-ab5500.c - driver for High Voltage (HV) LED in ST-Ericsson AB5500 chip
+ *
+ * Copyright (C) 2011 ST-Ericsson SA.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ */
+
+/*
+ * Driver for HVLED in ST-Ericsson AB5500 analog baseband controller
+ *
+ * This chip can drive upto 3 leds, of upto 40mA of led sink current.
+ * These leds can be programmed to blink between two intensities with
+ * fading delay of half, one or two seconds.
+ *
+ * Leds can be controlled via sysfs entries in
+ * "/sys/class/leds/< red | green | blue >"
+ *
+ * For each led,
+ *
+ * Modes of operation:
+ * - manual: echo 0 > fade_auto (default, no auto blinking)
+ * - auto: echo 1 > fade_auto
+ *
+ * Soft scaling delay between two intensities:
+ * - 1/2 sec: echo 1 > fade_delay
+ * - 1 sec: echo 2 > fade_delay
+ * - 2 sec: echo 3 > fade_delay
+ *
+ * Possible sequence of operation:
+ * - continuous glow: set brightness (brt)
+ * - blink between LED_OFF and LED_FULL:
+ * set fade delay -> set fade auto
+ * - blink between previous two brightness (only for LED-1):
+ * set brt1 -> set brt2 -> set fade auto
+ *
+ * Delay can be set in any step, its affect will be seen on switching mode.
+ *
+ * Note: Blink/Fade feature is supported in AB5500 v2 onwards
+ *
+ */
+
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/leds-ab5500.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#define AB5500LED_NAME "ab5500-leds"
+#define AB5500_LED_MAX 0x03
+
+/* Register offsets */
+#define AB5500_LED_REG_ENABLE 0x03
+#define AB5500_LED_FADE_CTRL 0x0D
+
+/* LED-0 Register Addr. Offsets */
+#define AB5500_LED0_PWM_DUTY 0x01
+#define AB5500_LED0_PWMFREQ 0x02
+#define AB5500_LED0_SINKCTL 0x0A
+#define AB5500_LED0_FADE_HI 0x11
+#define AB5500_LED0_FADE_LO 0x17
+
+/* LED-1 Register Addr. Offsets */
+#define AB5500_LED1_PWM_DUTY 0x05
+#define AB5500_LED1_PWMFREQ 0x06
+#define AB5500_LED1_SINKCTL 0x0B
+#define AB5500_LED1_FADE_HI 0x13
+#define AB5500_LED1_FADE_LO 0x19
+
+/* LED-2 Register Addr. Offsets */
+#define AB5500_LED2_PWM_DUTY 0x08
+#define AB5500_LED2_PWMFREQ 0x09
+#define AB5500_LED2_SINKCTL 0x0C
+#define AB5500_LED2_FADE_HI 0x15
+#define AB5500_LED2_FADE_LO 0x1B
+
+/* led-0/1/2 enable bit */
+#define AB5500_LED_ENABLE_MASK 0x04
+
+/* led intensity */
+#define AB5500_LED_INTENSITY_OFF 0x0
+#define AB5500_LED_INTENSITY_MAX 0x3FF
+#define AB5500_LED_INTENSITY_STEP (AB5500_LED_INTENSITY_MAX/LED_FULL)
+
+/* pwm frequency */
+#define AB5500_LED_PWMFREQ_MAX 0x0F /* 373.39 @sysclk=26MHz */
+#define AB5500_LED_PWMFREQ_SHIFT 4
+
+/* LED sink current control */
+#define AB5500_LED_SINKCURR_MAX 0x0F /* 40mA MAX */
+#define AB5500_LED_SINKCURR_SHIFT 4
+
+/* fade Control shift and masks */
+#define AB5500_FADE_DELAY_SHIFT 0x00
+#define AB5500_FADE_MODE_MASK 0x80
+#define AB5500_FADE_DELAY_MASK 0x03
+#define AB5500_FADE_START_MASK 0x04
+#define AB5500_FADE_ON_MASK 0x70
+#define AB5500_LED_FADE_ENABLE(ledid) (0x40 >> (ledid))
+
+struct ab5500_led {
+ u8 id;
+ u8 max_current;
+ u16 brt_val;
+ u16 fade_hi;
+ u16 fade_lo;
+ bool led_on;
+ struct led_classdev led_cdev;
+ struct work_struct led_work;
+};
+
+struct ab5500_hvleds {
+ struct mutex lock;
+ struct device *dev;
+ struct ab5500_hvleds_platform_data *pdata;
+ struct ab5500_led leds[AB5500_HVLEDS_MAX];
+ bool hw_fade;
+ bool fade_auto;
+ enum ab5500_fade_delay fade_delay;
+};
+
+static u8 ab5500_led_pwmduty_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_PWM_DUTY,
+ AB5500_LED1_PWM_DUTY,
+ AB5500_LED2_PWM_DUTY,
+};
+
+static u8 ab5500_led_pwmfreq_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_PWMFREQ,
+ AB5500_LED1_PWMFREQ,
+ AB5500_LED2_PWMFREQ,
+};
+
+static u8 ab5500_led_sinkctl_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_SINKCTL,
+ AB5500_LED1_SINKCTL,
+ AB5500_LED2_SINKCTL
+};
+
+static u8 ab5500_led_fade_hi_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_FADE_HI,
+ AB5500_LED1_FADE_HI,
+ AB5500_LED2_FADE_HI,
+};
+
+static u8 ab5500_led_fade_lo_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_FADE_LO,
+ AB5500_LED1_FADE_LO,
+ AB5500_LED2_FADE_LO,
+};
+
+#define to_led(_x) container_of(_x, struct ab5500_led, _x)
+
+static inline struct ab5500_hvleds *led_to_hvleds(struct ab5500_led *led)
+{
+ return container_of(led, struct ab5500_hvleds, leds[led->id]);
+}
+
+static int ab5500_led_enable(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id],
+ AB5500_LED_ENABLE_MASK,
+ AB5500_LED_ENABLE_MASK);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ return ret;
+
+}
+
+static int ab5500_led_start_manual(struct ab5500_hvleds *hvleds)
+{
+ int ret;
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_mask_and_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, AB5500_FADE_START_MASK,
+ AB5500_FADE_START_MASK);
+ if (ret < 0)
+ dev_err(hvleds->dev, "update reg 0x%x failed - %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_disable(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id] - 1, 0);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id], 0);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ return ret;
+}
+
+static int ab5500_led_pwmduty_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u16 val)
+{
+ int ret;
+ u8 val_lsb = val & 0xFF;
+ u8 val_msb = (val & 0x300) >> 8;
+
+ mutex_lock(&hvleds->lock);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n"
+ "reg[%d] w val = %d\n",
+ ab5500_led_pwmduty_reg[led_id] - 1, val_lsb,
+ ab5500_led_pwmduty_reg[led_id], val_msb);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id] - 1, val_lsb);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id], val_msb);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_pwmfreq_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u8 val)
+{
+ int ret;
+
+ val = (val & 0x0F) << AB5500_LED_PWMFREQ_SHIFT;
+
+ mutex_lock(&hvleds->lock);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n",
+ ab5500_led_pwmfreq_reg[led_id], val);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmfreq_reg[led_id], val);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmfreq_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_sinkctl_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u8 val)
+{
+ int ret;
+
+ if (val > AB5500_LED_SINKCURR_MAX)
+ val = AB5500_LED_SINKCURR_MAX;
+
+ val = (val << AB5500_LED_SINKCURR_SHIFT);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n",
+ ab5500_led_sinkctl_reg[led_id], val);
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_sinkctl_reg[led_id], val);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_sinkctl_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_fade_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, bool on, u16 val)
+{
+ int ret;
+ int val_lsb = val & 0xFF;
+ int val_msb = (val & 0x300) >> 8;
+ u8 *fade_reg;
+
+ if (on)
+ fade_reg = ab5500_led_fade_hi_reg;
+ else
+ fade_reg = ab5500_led_fade_lo_reg;
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n"
+ "reg[%d] w val = %d\n",
+ fade_reg[led_id] - 1, val_lsb,
+ fade_reg[led_id], val_msb);
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ fade_reg[led_id] - 1, val_lsb);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ fade_reg[led_id], val_msb);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ fade_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_sinkctl_read(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_get_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_sinkctl_reg[led_id], &val);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] r failed: %d\n",
+ ab5500_led_sinkctl_reg[led_id], ret);
+ mutex_unlock(&hvleds->lock);
+ return ret;
+ }
+
+ val = (val & 0xF0) >> AB5500_LED_SINKCURR_SHIFT;
+
+ mutex_unlock(&hvleds->lock);
+
+ return val;
+}
+
+static void ab5500_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct ab5500_led *led = to_led(led_cdev);
+
+ /* adjust LED_FULL to 10bit range */
+ brt_val &= LED_FULL;
+ led->brt_val = brt_val * AB5500_LED_INTENSITY_STEP;
+
+ schedule_work(&led->led_work);
+}
+
+static void ab5500_led_work(struct work_struct *led_work)
+{
+ struct ab5500_led *led = to_led(led_work);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (led->led_on == true) {
+ ab5500_led_pwmduty_write(hvleds, led->id, led->brt_val);
+ if (hvleds->hw_fade && led->brt_val) {
+ ab5500_led_enable(hvleds, led->id);
+ ab5500_led_start_manual(hvleds);
+ }
+ }
+}
+
+static ssize_t ab5500_led_show_current(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int led_curr = 0;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ led_curr = ab5500_led_sinkctl_read(hvleds, led->id);
+
+ if (led_curr < 0)
+ return led_curr;
+
+ return sprintf(buf, "%d\n", led_curr);
+}
+
+static ssize_t ab5500_led_store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int ret;
+ unsigned long led_curr;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &led_curr))
+ return -EINVAL;
+
+ if (led_curr > led->max_current)
+ led_curr = led->max_current;
+
+ ret = ab5500_led_sinkctl_write(hvleds, led->id, led_curr);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t ab5500_led_store_fade_auto(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int ret;
+ u8 fade_ctrl = 0;
+ unsigned long fade_auto;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &fade_auto))
+ return -EINVAL;
+
+ if (fade_auto > 1) {
+ dev_err(hvleds->dev, "invalid mode\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_get_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, &fade_ctrl);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+ goto unlock_and_return;
+ }
+
+ /* manual mode */
+ if (fade_auto == false) {
+ fade_ctrl &= ~(AB5500_LED_FADE_ENABLE(led->id));
+ if (!(fade_ctrl & AB5500_FADE_ON_MASK))
+ fade_ctrl = 0;
+
+ ret = ab5500_led_disable(hvleds, led->id);
+ if (ret < 0)
+ goto unlock_and_return;
+ } else {
+ /* set led auto enable bit */
+ fade_ctrl |= AB5500_FADE_MODE_MASK;
+ fade_ctrl |= AB5500_LED_FADE_ENABLE(led->id);
+
+ /* set fade delay */
+ fade_ctrl &= ~AB5500_FADE_DELAY_MASK;
+ fade_ctrl |= hvleds->fade_delay << AB5500_FADE_DELAY_SHIFT;
+
+ /* set fade start manual */
+ fade_ctrl |= AB5500_FADE_START_MASK;
+
+ /* enble corresponding led */
+ ret = ab5500_led_enable(hvleds, led->id);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ }
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, fade_ctrl);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+ goto unlock_and_return;
+ }
+
+ hvleds->fade_auto = fade_auto;
+
+ ret = len;
+
+unlock_and_return:
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static ssize_t ab5500_led_show_fade_auto(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ return sprintf(buf, "%d\n", hvleds->fade_auto);
+}
+
+static ssize_t ab5500_led_store_fade_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ unsigned long fade_delay;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &fade_delay))
+ return -EINVAL;
+
+ if (fade_delay > AB5500_FADE_DELAY_TWOSEC) {
+ dev_err(hvleds->dev, "invalid mode\n");
+ return -EINVAL;
+ }
+
+ hvleds->fade_delay = fade_delay;
+
+ return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO,
+ ab5500_led_show_current, ab5500_led_store_current);
+static DEVICE_ATTR(fade_auto, S_IRUGO | S_IWUGO,
+ ab5500_led_show_fade_auto, ab5500_led_store_fade_auto);
+static DEVICE_ATTR(fade_delay, S_IRUGO | S_IWUGO,
+ NULL, ab5500_led_store_fade_delay);
+
+static int ab5500_led_init_registers(struct ab5500_hvleds *hvleds)
+{
+ int ret = 0;
+ unsigned int led_id;
+
+ /* fade - manual : dur mid : pwm duty mid */
+ if (!hvleds->hw_fade) {
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_REG_ENABLE, true);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_REG_ENABLE, ret);
+ return ret;
+ }
+ }
+
+ for (led_id = 0; led_id < AB5500_HVLEDS_MAX; led_id++) {
+ if (hvleds->leds[led_id].led_on == false)
+ continue;
+
+ ret = ab5500_led_sinkctl_write(
+ hvleds, led_id,
+ hvleds->leds[led_id].max_current);
+ if (ret < 0)
+ return ret;
+
+ if (hvleds->hw_fade) {
+ ret = ab5500_led_pwmfreq_write(
+ hvleds, led_id,
+ AB5500_LED_PWMFREQ_MAX / 2);
+ if (ret < 0)
+ return ret;
+
+ /* fade high intensity */
+ ret = ab5500_led_fade_write(
+ hvleds, led_id, true,
+ hvleds->leds[led_id].fade_hi);
+ if (ret < 0)
+ return ret;
+
+ /* fade low intensity */
+ ret = ab5500_led_fade_write(
+ hvleds, led_id, false,
+ hvleds->leds[led_id].fade_lo);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* init led off */
+ ret |= ab5500_led_pwmduty_write(
+ hvleds, led_id, AB5500_LED_INTENSITY_OFF);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab5500_led_register_leds(struct device *dev,
+ struct ab5500_hvleds_platform_data *pdata,
+ struct ab5500_hvleds *hvleds)
+{
+ int i_led;
+ int ret = 0;
+ struct ab5500_led_conf *pled;
+ struct ab5500_led *led;
+
+ hvleds->dev = dev;
+ hvleds->pdata = pdata;
+
+ if (abx500_get_chip_id(dev) == AB5500_2_0)
+ hvleds->hw_fade = true;
+ else
+ hvleds->hw_fade = false;
+
+ for (i_led = 0; i_led < AB5500_HVLEDS_MAX; i_led++) {
+ pled = &pdata->leds[i_led];
+ led = &hvleds->leds[i_led];
+
+ INIT_WORK(&led->led_work, ab5500_led_work);
+
+ led->id = pled->led_id;
+ led->max_current = pled->max_current;
+ led->led_on = pled->led_on;
+ led->led_cdev.name = pled->name;
+ led->led_cdev.brightness_set = ab5500_led_brightness_set;
+
+ /* Provide interface only for enabled LEDs */
+ if (led->led_on == false)
+ continue;
+
+ if (hvleds->hw_fade) {
+ led->fade_hi = (pled->fade_hi & LED_FULL);
+ led->fade_hi *= AB5500_LED_INTENSITY_STEP;
+ led->fade_lo = (pled->fade_lo & LED_FULL);
+ led->fade_lo *= AB5500_LED_INTENSITY_STEP;
+ }
+
+ ret = led_classdev_register(dev, &led->led_cdev);
+ if (ret < 0) {
+ dev_err(dev, "Register led class failed: %d\n", ret);
+ goto bailout1;
+ }
+
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_led_current);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device creation failed: %d\n", ret);
+ goto bailout2;
+ }
+
+ if (hvleds->hw_fade) {
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device "
+ "creation failed: %d\n", ret);
+ goto bailout3;
+ }
+
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device "
+ "creation failed: %d\n", ret);
+ goto bailout4;
+ }
+ }
+ }
+
+ return ret;
+ for (; i_led >= 0; i_led--) {
+ if (hvleds->leds[i_led].led_on == false)
+ continue;
+
+ if (hvleds->hw_fade) {
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_fade_delay);
+bailout4:
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_fade_auto);
+ }
+bailout3:
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_led_current);
+bailout2:
+ led_classdev_unregister(&hvleds->leds[i_led].led_cdev);
+bailout1:
+ cancel_work_sync(&hvleds->leds[i_led].led_work);
+ }
+ return ret;
+}
+
+static int __devinit ab5500_hvleds_probe(struct platform_device *pdev)
+{
+ struct ab5500_hvleds_platform_data *pdata = pdev->dev.platform_data;
+ struct ab5500_hvleds *hvleds = NULL;
+ int ret = 0, i;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data required\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ hvleds = kzalloc(sizeof(struct ab5500_hvleds), GFP_KERNEL);
+ if (hvleds == NULL) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ mutex_init(&hvleds->lock);
+
+ /* init leds data and register led_classdev */
+ ret = ab5500_led_register_leds(&pdev->dev, pdata, hvleds);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "leds registration failed\n");
+ goto err_out;
+ }
+
+ /* init device registers and set initial led current */
+ ret = ab5500_led_init_registers(hvleds);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "reg init failed: %d\n", ret);
+ goto err_reg_init;
+ }
+
+ if (hvleds->hw_fade)
+ dev_info(&pdev->dev, "v2 enabled\n");
+ else
+ dev_info(&pdev->dev, "v1 enabled\n");
+
+ return ret;
+
+err_reg_init:
+ for (i = 0; i < AB5500_HVLEDS_MAX; i++) {
+ struct ab5500_led *led = &hvleds->leds[i];
+
+ if (led->led_on == false)
+ continue;
+
+ device_remove_file(led->led_cdev.dev, &dev_attr_led_current);
+ if (hvleds->hw_fade) {
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ }
+ led_classdev_unregister(&led->led_cdev);
+ cancel_work_sync(&led->led_work);
+ }
+err_out:
+ kfree(hvleds);
+ return ret;
+}
+
+static int __devexit ab5500_hvleds_remove(struct platform_device *pdev)
+{
+ struct ab5500_hvleds *hvleds = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < AB5500_HVLEDS_MAX; i++) {
+ struct ab5500_led *led = &hvleds->leds[i];
+
+ if (led->led_on == false)
+ continue;
+
+ device_remove_file(led->led_cdev.dev, &dev_attr_led_current);
+ if (hvleds->hw_fade) {
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ }
+ led_classdev_unregister(&led->led_cdev);
+ cancel_work_sync(&led->led_work);
+ }
+ kfree(hvleds);
+ return 0;
+}
+
+static struct platform_driver ab5500_hvleds_driver = {
+ .driver = {
+ .name = AB5500LED_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_hvleds_probe,
+ .remove = __devexit_p(ab5500_hvleds_remove),
+};
+
+static int __init ab5500_hvleds_module_init(void)
+{
+ return platform_driver_register(&ab5500_hvleds_driver);
+}
+
+static void __exit ab5500_hvleds_module_exit(void)
+{
+ platform_driver_unregister(&ab5500_hvleds_driver);
+}
+
+module_init(ab5500_hvleds_module_init);
+module_exit(ab5500_hvleds_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
+MODULE_DESCRIPTION("Driver for AB5500 HVLED");
+
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index e59c166a0ce..e0c034fc226 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
+#include <linux/gpio.h>
#define LM3530_LED_DEV "lcd-backlight"
#define LM3530_NAME "lm3530-led"
@@ -99,6 +100,7 @@ static struct lm3530_mode_map mode_map[] = {
* @mode: mode of operation - manual, ALS, PWM
* @regulator: regulator
* @brighness: previous brightness value
+ * @hw_en_gpio: GPIO line for LM3530 HWEN
* @enable: regulator is enabled
*/
struct lm3530_data {
@@ -108,6 +110,7 @@ struct lm3530_data {
enum lm3530_mode mode;
struct regulator *regulator;
enum led_brightness brightness;
+ int hw_en_gpio;
bool enable;
};
@@ -150,7 +153,7 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
- u8 zones[LM3530_ALS_ZB_MAX];
+ u8 zones[LM3530_ALS_ZB_MAX] = {0};
u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pltfm = drvdata->pdata;
struct i2c_client *client = drvdata->client;
@@ -224,6 +227,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
if (!drvdata->enable) {
+ if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO)
+ gpio_set_value(drvdata->hw_en_gpio, 1);
ret = regulator_enable(drvdata->regulator);
if (ret) {
dev_err(&drvdata->client->dev,
@@ -276,6 +281,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
if (err)
dev_err(&drvdata->client->dev,
"Disable regulator failed\n");
+ if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO)
+ gpio_set_value(drvdata->hw_en_gpio, 0);
drvdata->enable = false;
}
break;
@@ -377,12 +384,22 @@ static int __devinit lm3530_probe(struct i2c_client *client,
drvdata->client = client;
drvdata->pdata = pdata;
drvdata->brightness = LED_OFF;
+ drvdata->hw_en_gpio = pdata->hw_en_gpio;
drvdata->enable = false;
drvdata->led_dev.name = LM3530_LED_DEV;
drvdata->led_dev.brightness_set = lm3530_brightness_set;
i2c_set_clientdata(client, drvdata);
+ if (gpio_is_valid(drvdata->hw_en_gpio)) {
+ err = gpio_request_one(drvdata->hw_en_gpio, GPIOF_OUT_INIT_HIGH,
+ "lm3530_hw_en");
+ if (err < 0) {
+ dev_err(&client->dev, "lm3530 hw_en gpio failed: %d\n", err);
+ goto err_gpio_request;
+ }
+ }
+
drvdata->regulator = regulator_get(&client->dev, "vin");
if (IS_ERR(drvdata->regulator)) {
dev_err(&client->dev, "regulator get failed\n");
@@ -422,6 +439,10 @@ err_class_register:
err_reg_init:
regulator_put(drvdata->regulator);
err_regulator_get:
+ if (gpio_is_valid(drvdata->hw_en_gpio))
+ gpio_free(drvdata->hw_en_gpio);
+err_gpio_request:
+ i2c_set_clientdata(client, NULL);
kfree(drvdata);
err_out:
return err;
@@ -436,6 +457,8 @@ static int __devexit lm3530_remove(struct i2c_client *client)
if (drvdata->enable)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
+ if (gpio_is_valid(drvdata->hw_en_gpio))
+ gpio_free(drvdata->hw_en_gpio);
led_classdev_unregister(&drvdata->led_dev);
kfree(drvdata);
return 0;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index d62a7982a5e..0c13a08766b 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -361,7 +361,12 @@ static int lp5521_do_store_load(struct lp5521_engine *engine,
while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
- if (ret != 2)
+ /*
+ * Execution of a %n directive does not always
+ * increment the assignment count returned at
+ * completion of execution.so ret need not be 2
+ */
+ if ((ret != 1) && (ret != 2))
goto fail;
ret = sscanf(c, "%2x", &cmd);
if (ret != 1)
@@ -695,6 +700,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
if (buf != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&client->dev, "error in reseting chip\n");
+ ret = -EIO;
goto fail2;
}
usleep_range(10000, 20000);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 3ed92f34bd4..4d086e87bc1 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,6 +27,7 @@ struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
unsigned int active_low;
+ unsigned int lth_brightness;
unsigned int period;
};
@@ -42,7 +43,10 @@ static void led_pwm_set(struct led_classdev *led_cdev,
pwm_config(led_dat->pwm, 0, period);
pwm_disable(led_dat->pwm);
} else {
- pwm_config(led_dat->pwm, brightness * period / max, period);
+ brightness = led_dat->lth_brightness + (brightness *
+ (led_dat->period - led_dat->lth_brightness) / max);
+ pwm_config(led_dat->pwm, brightness, led_dat->period);
+
pwm_enable(led_dat->pwm);
}
}
@@ -79,6 +83,8 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->cdev.default_trigger = cur_led->default_trigger;
led_dat->active_low = cur_led->active_low;
led_dat->period = cur_led->pwm_period_ns;
+ led_dat->lth_brightness = cur_led->lth_brightness *
+ (cur_led->pwm_period_ns / cur_led->max_brightness);
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.max_brightness = cur_led->max_brightness;
diff --git a/drivers/media/radio/CG2900/Makefile b/drivers/media/radio/CG2900/Makefile
new file mode 100755
index 00000000000..60b12dd9c35
--- /dev/null
+++ b/drivers/media/radio/CG2900/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the CG2900 FM Radio Driver
+#
+
+radio_cg2900-objs := radio-cg2900.o cg2900_fm_api.o cg2900_fm_driver.o
+
+obj-$(CONFIG_RADIO_CG2900) += radio_cg2900.o
+
+ccflags-y := \
+ -Idrivers/staging/cg2900/include \
+
+
diff --git a/drivers/media/radio/CG2900/cg2900_fm_api.c b/drivers/media/radio/CG2900/cg2900_fm_api.c
new file mode 100644
index 00000000000..c18caffdaa7
--- /dev/null
+++ b/drivers/media/radio/CG2900/cg2900_fm_api.c
@@ -0,0 +1,3205 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Linux FM Host API's for ST-Ericsson FM Chip.
+ *
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include "cg2900_fm_driver.h"
+
+#define CG2900_FM_BT_SRC_COEFF_INFO_FILE "cg2900_fm_bt_src_coeff_info.fw"
+#define CG2900_FM_EXT_SRC_COEFF_INFO_FILE "cg2900_fm_ext_src_coeff_info.fw"
+#define CG2900_FM_FM_COEFF_INFO_FILE "cg2900_fm_fm_coeff_info.fw"
+#define CG2900_FM_FM_PROG_INFO_FILE "cg2900_fm_fm_prog_info.fw"
+#define CG2900_FM_LINE_BUFFER_LENGTH 128
+#define CG2900_FM_FILENAME_MAX 128
+#define FW_FILE_PARAM_LEN 3
+/* RDS Tx PTY set to Other music */
+#define OTHER_MUSIC 15
+#define DEFAULT_AUDIO_DEVIATION 0x1AA9
+#define DEFAULT_NOTIFICATION_HOLD_OFF_TIME 0x000A
+
+static bool fm_rds_status;
+static bool fm_prev_rds_status;
+static u16 program_identification_code;
+static u16 default_program_identification_code = 0x1234;
+static u16 program_type_code;
+static u16 default_program_type_code = OTHER_MUSIC;
+static char program_service[MAX_PSN_SIZE];
+static char default_program_service[MAX_PSN_SIZE] = "FM-Xmit ";
+static char radio_text[MAX_RT_SIZE];
+static char default_radio_text[MAX_RT_SIZE] = "Default Radio Text "
+ "Default Radio Text Default Radio Text Default";
+static bool a_b_flag;
+u8 fm_event;
+static struct mutex rds_mutex;
+struct cg2900_fm_rds_buf fm_rds_buf[MAX_RDS_BUFFER][MAX_RDS_GROUPS];
+struct cg2900_fm_rds_info fm_rds_info;
+static enum cg2900_fm_state fm_state;
+static enum cg2900_fm_mode fm_mode;
+static struct cg2900_version_info version_info;
+
+/**
+ * cg2900_fm_get_one_line_of_text()- Get One line of text from a file.
+ *
+ * Replacement function for stdio function fgets.This function extracts one
+ * line of text from input file.
+ *
+ * @wr_buffer: Buffer to copy text to.
+ * @max_nbr_of_bytes: Max number of bytes to read, i.e. size of rd_buffer.
+ * @rd_buffer: Data to parse.
+ * @bytes_copied: Number of bytes copied to wr_buffer.
+ *
+ * Returns:
+ * Pointer to next data to read.
+ */
+static char *cg2900_fm_get_one_line_of_text(
+ char *wr_buffer,
+ int max_nbr_of_bytes,
+ char *rd_buffer,
+ int *bytes_copied
+ )
+{
+ char *curr_wr = wr_buffer;
+ char *curr_rd = rd_buffer;
+ char in_byte;
+
+ *bytes_copied = 0;
+
+ do {
+ *curr_wr = *curr_rd;
+ in_byte = *curr_wr;
+ curr_wr++;
+ curr_rd++;
+ (*bytes_copied)++;
+ } while ((*bytes_copied <= max_nbr_of_bytes) && (in_byte != '\0')
+ && (in_byte != '\n'));
+ *curr_wr = '\0';
+ return curr_rd;
+}
+
+/**
+ * cg2900_fm_get_file_to_load() - Parse info file and find correct target file.
+ *
+ * @fw: Firmware structure containing file data.
+ * @file_name: (out) Pointer to name of requested file.
+ *
+ * Returns:
+ * True, if target file was found,
+ * False, otherwise.
+ */
+static bool cg2900_fm_get_file_to_load(
+ const struct firmware *fw,
+ char **file_name
+ )
+{
+ char *line_buffer;
+ char *curr_file_buffer;
+ int bytes_left_to_parse = fw->size;
+ int bytes_read = 0;
+ bool file_found = false;
+
+ curr_file_buffer = (char *)&(fw->data[0]);
+
+ line_buffer = kmalloc(CG2900_FM_LINE_BUFFER_LENGTH,
+ GFP_KERNEL);
+
+ if (line_buffer == NULL) {
+ FM_ERR_REPORT("Failed to allocate:"
+ "file_name 0x%X, line_buffer 0x%X",
+ (unsigned int)file_name,
+ (unsigned int)line_buffer);
+ goto error;
+ }
+
+ while (!file_found) {
+ /* Get one line of text from the file to parse */
+ curr_file_buffer =
+ cg2900_fm_get_one_line_of_text(line_buffer,
+ min
+ (CG2900_FM_LINE_BUFFER_LENGTH,
+ (int)(fw->size -
+ bytes_read)),
+ curr_file_buffer,
+ &bytes_read);
+
+ bytes_left_to_parse -= bytes_read;
+ if (bytes_left_to_parse <= 0) {
+ /* End of file => Leave while loop */
+ FM_ERR_REPORT("Reached end of file."
+ "No file found!");
+ break;
+ }
+
+ /*
+ * Check if the line of text is a comment
+ * or not, comments begin with '#'
+ */
+ if (*line_buffer != '#') {
+ u32 hci_rev = 0;
+ u32 lmp_sub = 0;
+
+ FM_DEBUG_REPORT("Found a valid line <%s>",
+ line_buffer);
+
+ /*
+ * Check if we can find the correct
+ * HCI revision and LMP subversion
+ * as well as a file name in the text line
+ * Store the filename if the actual file can
+ * be found in the file system
+ */
+ if (sscanf(line_buffer, "%x%x%s",
+ (unsigned int *)&hci_rev,
+ (unsigned int *)&lmp_sub,
+ *file_name) == FW_FILE_PARAM_LEN
+ && hci_rev == version_info.revision
+ && lmp_sub == version_info.sub_version) {
+ FM_INFO_REPORT("File name = %s "
+ "HCI Revision"
+ "= 0x%04X LMP "
+ "Subversion = 0x%04X",
+ *file_name,
+ (unsigned int)hci_rev,
+ (unsigned int)lmp_sub);
+
+ /*
+ * Name has already been stored above.
+ * Nothing more to do
+ */
+ file_found = true;
+ } else {
+ /*Zero the name buffer so it is clear to next read*/
+ memset(*file_name, 0x00,
+ CG2900_FM_FILENAME_MAX);
+ }
+ }
+ }
+ kfree(line_buffer);
+error:
+ return file_found;
+}
+
+/**
+ * cg2900_fm_load_firmware() - Loads the FM Coeffecients and F/W file(s)
+ *
+ * @device: Pointer to char device requesting the operation.
+ *
+ * Returns:
+ * 0, if firmware download is successful
+ * -ENOENT, file not found.
+ * -ENOMEM, out of memory
+ */
+static int cg2900_fm_load_firmware(
+ struct device *device
+ )
+{
+ int err;
+ bool file_found;
+ int result = 0;
+ const struct firmware *bt_src_coeff_info;
+ const struct firmware *ext_src_coeff_info;
+ const struct firmware *fm_coeff_info;
+ const struct firmware *fm_prog_info;
+ char *bt_src_coeff_file_name = NULL;
+ char *ext_src_coeff_file_name = NULL;
+ char *fm_coeff_file_name = NULL;
+ char *fm_prog_file_name = NULL;
+
+ FM_INFO_REPORT("+cg2900_fm_load_firmware");
+
+ /* Open bt_src_coeff info file. */
+ err = request_firmware(&bt_src_coeff_info,
+ CG2900_FM_BT_SRC_COEFF_INFO_FILE, device);
+ if (err) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get bt_src_coeff info file");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * Now we have the bt_src_coeff info file.
+ * See if we can find the right bt_src_coeff file as well
+ */
+ bt_src_coeff_file_name = kmalloc(CG2900_FM_FILENAME_MAX,
+ GFP_KERNEL);
+ if (bt_src_coeff_file_name == NULL) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't allocate memory for "
+ "bt_src_coeff_file_name");
+ release_firmware(bt_src_coeff_info);
+ result = -ENOMEM;
+ goto error;
+ }
+ file_found = cg2900_fm_get_file_to_load(bt_src_coeff_info,
+ &bt_src_coeff_file_name);
+
+ /* Now we are finished with the bt_src_coeff info file */
+ release_firmware(bt_src_coeff_info);
+
+ if (!file_found) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't find bt_src_coeff file!! "
+ "Major error!!!");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Open ext_src_coeff info file. */
+ err = request_firmware(&ext_src_coeff_info,
+ CG2900_FM_EXT_SRC_COEFF_INFO_FILE, device);
+ if (err) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get ext_src_coeff_info info file");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * Now we have the ext_src_coeff info file. See if we can
+ * find the right ext_src_coeff file as well
+ */
+ ext_src_coeff_file_name = kmalloc(CG2900_FM_FILENAME_MAX,
+ GFP_KERNEL);
+ if (ext_src_coeff_file_name == NULL) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't allocate memory for "
+ "ext_src_coeff_file_name");
+ release_firmware(ext_src_coeff_info);
+ result = -ENOMEM;
+ goto error;
+ }
+ file_found = cg2900_fm_get_file_to_load(ext_src_coeff_info,
+ &ext_src_coeff_file_name);
+
+ /* Now we are finished with the ext_src_coeff info file */
+ release_firmware(ext_src_coeff_info);
+
+ if (!file_found) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't find ext_src_coeff_info "
+ "file!!! Major error!");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Open fm_coeff info file. */
+ err = request_firmware(&fm_coeff_info,
+ CG2900_FM_FM_COEFF_INFO_FILE, device);
+ if (err) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get fm_coeff info file");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * Now we have the fm_coeff_info info file.
+ * See if we can find the right fm_coeff_info file as well
+ */
+ fm_coeff_file_name = kmalloc(CG2900_FM_FILENAME_MAX,
+ GFP_KERNEL);
+ if (fm_coeff_file_name == NULL) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't allocate memory for "
+ "fm_coeff_file_name");
+ release_firmware(fm_coeff_info);
+ result = -ENOMEM;
+ goto error;
+ }
+ file_found = cg2900_fm_get_file_to_load(fm_coeff_info,
+ &fm_coeff_file_name);
+
+ /* Now we are finished with the fm_coeff info file */
+ release_firmware(fm_coeff_info);
+
+ if (!file_found) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't find fm_coeff file!!! "
+ "Major error!");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Open fm_prog info file. */
+ err = request_firmware(&fm_prog_info,
+ CG2900_FM_FM_PROG_INFO_FILE, device);
+ if (err) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get fm_prog_info info file");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * Now we have the fm_prog info file.
+ * See if we can find the right fm_prog file as well
+ */
+ fm_prog_file_name = kmalloc(CG2900_FM_FILENAME_MAX,
+ GFP_KERNEL);
+ if (fm_prog_file_name == NULL) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't allocate memory for "
+ "fm_prog_file_name");
+ release_firmware(fm_prog_info);
+ result = -ENOMEM;
+ goto error;
+ }
+ file_found = cg2900_fm_get_file_to_load(fm_prog_info,
+ &fm_prog_file_name);
+
+ /* Now we are finished with fm_prog patch info file */
+ release_firmware(fm_prog_info);
+
+ if (!file_found) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't find fm_prog_info file!!! "
+ "Major error!");
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* OK. Now it is time to download the firmware */
+ err = request_firmware(&bt_src_coeff_info,
+ bt_src_coeff_file_name, device);
+ if (err < 0) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get bt_src_coeff file, err = %d", err);
+ result = -ENOENT;
+ goto error;
+ }
+
+ FM_INFO_REPORT("cg2900_fm_load_firmware: Downloading %s of %d bytes",
+ bt_src_coeff_file_name, bt_src_coeff_info->size);
+ if (fmd_send_fm_firmware((u8 *) bt_src_coeff_info->data,
+ bt_src_coeff_info->size)) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: Error in "
+ "downloading %s", bt_src_coeff_file_name);
+ release_firmware(bt_src_coeff_info);
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Now we are finished with the bt_src_coeff info file */
+ release_firmware(bt_src_coeff_info);
+ err = request_firmware(&ext_src_coeff_info,
+ ext_src_coeff_file_name, device);
+ if (err < 0) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get ext_src_coeff file, err = %d", err);
+ result = -ENOENT;
+ goto error;
+ }
+
+ FM_INFO_REPORT("cg2900_fm_load_firmware: Downloading %s of %d bytes",
+ ext_src_coeff_file_name, ext_src_coeff_info->size);
+ if (fmd_send_fm_firmware((u8 *) ext_src_coeff_info->data,
+ ext_src_coeff_info->size)) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: Error in "
+ "downloading %s", ext_src_coeff_file_name);
+ release_firmware(ext_src_coeff_info);
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Now we are finished with the bt_src_coeff info file */
+ release_firmware(ext_src_coeff_info);
+
+ err = request_firmware(&fm_coeff_info, fm_coeff_file_name, device);
+ if (err < 0) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get fm_coeff file, err = %d", err);
+ result = -ENOENT;
+ goto error;
+ }
+
+ FM_INFO_REPORT("cg2900_fm_load_firmware: Downloading %s of %d bytes",
+ fm_coeff_file_name, fm_coeff_info->size);
+ if (fmd_send_fm_firmware((u8 *) fm_coeff_info->data,
+ fm_coeff_info->size)) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: Error in "
+ "downloading %s", fm_coeff_file_name);
+ release_firmware(fm_coeff_info);
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Now we are finished with the bt_src_coeff info file */
+ release_firmware(fm_coeff_info);
+
+ err = request_firmware(&fm_prog_info, fm_prog_file_name, device);
+ if (err < 0) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: "
+ "Couldn't get fm_prog file, err = %d", err);
+ result = -ENOENT;
+ goto error;
+ }
+
+ FM_INFO_REPORT("cg2900_fm_load_firmware: Downloading %s of %d bytes",
+ fm_prog_file_name, fm_prog_info->size);
+ if (fmd_send_fm_firmware((u8 *) fm_prog_info->data,
+ fm_prog_info->size)) {
+ FM_ERR_REPORT("cg2900_fm_load_firmware: Error in "
+ "downloading %s", fm_prog_file_name);
+ release_firmware(fm_prog_info);
+ result = -ENOENT;
+ goto error;
+ }
+
+ /* Now we are finished with the bt_src_coeff info file */
+ release_firmware(fm_prog_info);
+
+error:
+ /* Free Allocated memory */
+ if (bt_src_coeff_file_name != NULL)
+ kfree(bt_src_coeff_file_name);
+ if (ext_src_coeff_file_name != NULL)
+ kfree(ext_src_coeff_file_name);
+ if (fm_coeff_file_name != NULL)
+ kfree(fm_coeff_file_name);
+ if (fm_prog_file_name != NULL)
+ kfree(fm_prog_file_name);
+ FM_DEBUG_REPORT("-cg2900_fm_load_firmware: returning %d",
+ result);
+ return result;
+}
+
+/**
+ * cg2900_fm_transmit_rds_groups()- Transmits the RDS Groups.
+ *
+ * Stores the RDS Groups in Chip's buffer and each group is
+ * transmitted every 87.6 ms.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise
+ */
+static int cg2900_fm_transmit_rds_groups(void)
+{
+ int result = 0;
+ u16 group_position = 0;
+ u8 block1[2];
+ u8 block2[2];
+ u8 block3[2];
+ u8 block4[2];
+ int index1 = 0;
+ int index2 = 0;
+ int group_0B_count = 0;
+ int group_2A_count = 0;
+
+ FM_INFO_REPORT("cg2900_fm_transmit_rds_groups");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_transmit_rds_groups: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ while (group_position < 20 && result == 0) {
+ if (group_position < 4) {
+ /* Transmit PSN in Group 0B */
+ block1[0] = program_identification_code;
+ block1[1] = program_identification_code >> 8;
+ /* M/S bit set to Music */
+ if (group_0B_count % 4 == 0) {
+ /* Manipulate DI bit */
+ block2[0] =
+ (0x08 | ((program_type_code & 0x07)
+ << 5))
+ + group_0B_count;
+ } else {
+ block2[0] =
+ (0x0C | ((program_type_code & 0x07)
+ << 5))
+ + group_0B_count;
+ }
+ block2[1] =
+ 0x08 | ((program_type_code & 0x18) >> 3);
+ block3[0] = program_identification_code;
+ block3[1] = program_identification_code >> 8;
+ block4[0] = program_service[index1 + 1];
+ block4[1] = program_service[index1 + 0];
+ index1 += 2;
+ group_0B_count++;
+ } else {
+ /* Transmit RT in Group 2A */
+ block1[0] = program_identification_code;
+ block1[1] = program_identification_code >> 8;
+ if (a_b_flag)
+ block2[0] = (0x10 |
+ ((program_type_code & 0x07)
+ << 5)) + group_2A_count;
+ else
+ block2[0] = (0x00 |
+ ((program_type_code & 0x07)
+ << 5)) + group_2A_count;
+ block2[1] = 0x20 | ((program_type_code & 0x18)
+ >> 3);
+ block3[0] = radio_text[index2 + 1];
+ block3[1] = radio_text[index2 + 0];
+ block4[0] = radio_text[index2 + 3];
+ block4[1] = radio_text[index2 + 2];
+ index2 += 4;
+ group_2A_count++;
+ }
+ FM_DEBUG_REPORT("%02x%02x "
+ "%02x%02x "
+ "%02x%02x "
+ "%02x%02x ",
+ block1[1], block1[0],
+ block2[1], block2[0],
+ block3[1], block3[0],
+ block4[1], block4[0]);
+ result = fmd_tx_set_group(
+ group_position,
+ block1,
+ block2,
+ block3,
+ block4);
+ group_position++;
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_transmit_rds_groups: "
+ "fmd_tx_set_group failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ break;
+ }
+ }
+ a_b_flag = !a_b_flag;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_transmit_rds_groups: returning %d",
+ result);
+ return result;
+}
+
+/**
+ * cg2900_fm_check_rds_status()- Checks whether RDS was On previously
+ *
+ * This method is called on receiving interrupt for Seek Completion,
+ * Scan completion and Block Scan completion. It will check whether RDS
+ * was forcefully disabled before the above operations started and if the
+ * previous RDS state was true, then RDS will be enabled back
+ */
+static void cg2900_fm_check_rds_status(void)
+{
+ FM_INFO_REPORT("cg2900_fm_check_rds_status");
+ if (fm_prev_rds_status) {
+ /* Restart RDS if it was active previously */
+ cg2900_fm_rds_on();
+ fm_prev_rds_status = false;
+ }
+}
+
+/**
+ * cg2900_fm_driver_callback()- Callback function indicating the event.
+ *
+ * This callback function is called on receiving irpt_CommandSucceeded,
+ * irpt_CommandFailed, irpt_bufferFull, etc from FM chip.
+ * @event: event for which the callback function was caled
+ * from FM Driver.
+ * @event_successful: Signifying whether the event is called from FM Driver
+ * on receiving irpt_OperationSucceeded or irpt_OperationFailed.
+ */
+static void cg2900_fm_driver_callback(
+ u8 event,
+ bool event_successful
+ )
+{
+ struct sk_buff *skb;
+
+ FM_INFO_REPORT("cg2900_fm_driver_callback: "
+ "event = %02x, event_successful = %x",
+ event, event_successful);
+
+ switch (event) {
+ case FMD_EVENT_GEN_POWERUP:
+ FM_DEBUG_REPORT("FMD_EVENT_GEN_POWERUP");
+ break;
+ case FMD_EVENT_ANTENNA_STATUS_CHANGED:
+ FM_DEBUG_REPORT("FMD_EVENT_ANTENNA_STATUS_CHANGED");
+ break;
+ case FMD_EVENT_FREQUENCY_CHANGED:
+ FM_DEBUG_REPORT("FMD_EVENT_FREQUENCY_CHANGED ");
+ break;
+ case FMD_EVENT_SEEK_STOPPED:
+ FM_DEBUG_REPORT("FMD_EVENT_SEEK_STOPPED");
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA,
+ GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_fm_driver_callback: "
+ "Unable to Allocate Memory");
+ return;
+ }
+ skb->data[0] = CG2900_EVENT_SCAN_CANCELLED;
+ skb->data[1] = event_successful;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+ wake_up_poll_queue();
+ break;
+ case FMD_EVENT_SEEK_COMPLETED:
+ FM_DEBUG_REPORT("FMD_EVENT_SEEK_COMPLETED");
+ cg2900_fm_check_rds_status();
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA,
+ GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_fm_driver_callback: "
+ "Unable to Allocate Memory");
+ return;
+ }
+ skb->data[0] = CG2900_EVENT_SEARCH_CHANNEL_FOUND;
+ skb->data[1] = event_successful;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+ wake_up_poll_queue();
+ break;
+ case FMD_EVENT_SCAN_BAND_COMPLETED:
+ FM_DEBUG_REPORT("FMD_EVENT_SCAN_BAND_COMPLETED");
+ cg2900_fm_check_rds_status();
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA,
+ GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_fm_driver_callback: "
+ "Unable to Allocate Memory");
+ return;
+ }
+ skb->data[0] = CG2900_EVENT_SCAN_CHANNELS_FOUND;
+ skb->data[1] = event_successful;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+ wake_up_poll_queue();
+ break;
+ case FMD_EVENT_BLOCK_SCAN_COMPLETED:
+ FM_DEBUG_REPORT("FMD_EVENT_BLOCK_SCAN_COMPLETED");
+ cg2900_fm_check_rds_status();
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA,
+ GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_fm_driver_callback: "
+ "Unable to Allocate Memory");
+ return;
+ }
+ skb->data[0] = CG2900_EVENT_BLOCK_SCAN_CHANNELS_FOUND;
+ skb->data[1] = event_successful;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+ wake_up_poll_queue();
+ break;
+ case FMD_EVENT_AF_UPDATE_SWITCH_COMPLETE:
+ FM_DEBUG_REPORT("FMD_EVENT_AF_UPDATE_SWITCH_COMPLETE");
+ break;
+ case FMD_EVENT_RDSGROUP_RCVD:
+ FM_DEBUG_REPORT("FMD_EVENT_RDSGROUP_RCVD");
+ /*
+ * Release the rds semaphore, poll queue
+ * will be woken-up in rds callback
+ */
+ fmd_set_rds_sem();
+ break;
+ case FMD_EVENT_MONO_STEREO_TRANSITION_COMPLETE:
+ FM_ERR_REPORT(
+ "FMD_EVENT_MONO_STEREO_TRANSITION_COMPLETE");
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA,
+ GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_fm_driver_callback: "
+ "Unable to Allocate Memory");
+ return;
+ }
+ skb->data[0] = CG2900_EVENT_MONO_STEREO_TRANSITION;
+ skb->data[1] = event_successful;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+ wake_up_poll_queue();
+ break;
+ default:
+ FM_INFO_REPORT("cg2900_fm_driver_callback: "
+ "Unknown event = %x", event);
+ break;
+ }
+}
+
+/**
+ * cg2900_fm_rds_callback()- Function to retrieve the RDS groups.
+ *
+ * This is called when the chip has received enough RDS groups
+ * so an interrupt irpt_BufferFull is generated to read the groups.
+ */
+static void cg2900_fm_rds_callback(void)
+{
+ u8 index = 0;
+ u16 rds_local_buf_count;
+ int result;
+ struct sk_buff *skb;
+
+ FM_INFO_REPORT("cg2900_fm_rds_callback");
+
+ /*
+ * Wait till interrupt is RDS Buffer
+ * full interrupt is received
+ */
+ fmd_get_rds_sem();
+
+ if (!fm_rds_status)
+ return;
+
+ /* RDS Data available, Read the Groups */
+ mutex_lock(&rds_mutex);
+ result = fmd_int_bufferfull(&rds_local_buf_count);
+
+ if (0 != result)
+ goto error;
+
+ while (index < rds_local_buf_count) {
+ /*
+ * Status are in reverse order because of Endianness
+ * of status byte received from chip
+ */
+ result = fmd_rx_get_low_level_rds_groups(
+ index,
+ &fm_rds_buf[fm_rds_info.rds_head][index].block1,
+ &fm_rds_buf[fm_rds_info.rds_head][index].block2,
+ &fm_rds_buf[fm_rds_info.rds_head][index].block3,
+ &fm_rds_buf[fm_rds_info.rds_head][index].block4,
+ &fm_rds_buf[fm_rds_info.rds_head][index].status2,
+ &fm_rds_buf[fm_rds_info.rds_head][index].status1,
+ &fm_rds_buf[fm_rds_info.rds_head][index].status4,
+ &fm_rds_buf[fm_rds_info.rds_head][index].status3);
+ FM_INFO_REPORT("%04x %04x %04x %04x %02x %02x %02x %02x",
+ fm_rds_buf[fm_rds_info.rds_head][index].block1,
+ fm_rds_buf[fm_rds_info.rds_head][index].block2,
+ fm_rds_buf[fm_rds_info.rds_head][index].block3,
+ fm_rds_buf[fm_rds_info.rds_head][index].block4,
+ fm_rds_buf[fm_rds_info.rds_head][index].status1,
+ fm_rds_buf[fm_rds_info.rds_head][index].status2,
+ fm_rds_buf[fm_rds_info.rds_head][index].status3,
+ fm_rds_buf[fm_rds_info.rds_head][index].status4);
+
+ if (0 != result)
+
+ goto error;
+
+ if (!fm_rds_status)
+ return;
+
+ index++;
+ }
+ fm_rds_info.rds_head++;
+ if (fm_rds_info.rds_head == MAX_RDS_BUFFER)
+ fm_rds_info.rds_head = 0;
+
+ /* Queue the RDS event */
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA,
+ GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_fm_rds_callback: "
+ "Unable to Allocate Memory");
+ goto error;
+ }
+ skb->data[0] = CG2900_EVENT_RDS_EVENT;
+ skb->data[1] = true;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+
+ /* Wake up the poll queue */
+ wake_up_poll_queue();
+error:
+ mutex_unlock(&rds_mutex);
+}
+
+int cg2900_fm_init(void)
+{
+ int result = 0;
+
+ FM_INFO_REPORT("cg2900_fm_init");
+
+ if (CG2900_FM_STATE_DEINITIALIZED != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_init: Already Initialized");
+ result = -EINVAL;
+ goto error;
+ }
+
+ mutex_init(&rds_mutex);
+
+ memset(&fm_rds_info, 0, sizeof(struct cg2900_fm_rds_info));
+ memset(&version_info, 0, sizeof(struct cg2900_version_info));
+ memset(
+ fm_rds_buf,
+ 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+
+ /* Initalize the Driver */
+ if (fmd_init() != 0) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Register the callback */
+ if (fmd_register_callback(
+ (fmd_radio_cb) cg2900_fm_driver_callback) != 0) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* initialize global variables */
+ fm_event = CG2900_EVENT_NO_EVENT;
+ fm_state = CG2900_FM_STATE_INITIALIZED;
+ fm_mode = CG2900_FM_IDLE_MODE;
+ fm_prev_rds_status = false;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_init: returning %d",
+ result);
+ return result;
+
+}
+
+int cg2900_fm_deinit(void)
+{
+ int result = 0;
+
+ FM_INFO_REPORT("cg2900_fm_deinit");
+
+ if (CG2900_FM_STATE_INITIALIZED != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_deinit: Already de-Initialized");
+ result = -EINVAL;
+ goto error;
+ }
+ fmd_exit();
+ mutex_destroy(&rds_mutex);
+ fm_state = CG2900_FM_STATE_DEINITIALIZED;
+ fm_mode = CG2900_FM_IDLE_MODE;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_deinit: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_switch_on(
+ struct device *device
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_switch_on");
+
+ if (CG2900_FM_STATE_INITIALIZED != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Enable FM IP */
+ FM_DEBUG_REPORT("cg2900_fm_switch_on: " "Sending FM IP Enable");
+
+ if (fmd_send_fm_ip_enable()) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "Error in fmd_send_fm_ip_enable");
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Now Download the Coefficient Files and FM Firmware */
+ if (cg2900_fm_load_firmware(device) != 0) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "Error in downloading firmware");
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Power up FM */
+ result = fmd_power_up();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "fmd_power_up failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Switch Mode To Idle */
+ result = fmd_set_mode(FMD_MODE_IDLE);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "fmd_set_mode failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ fm_state = CG2900_FM_STATE_SWITCHED_ON;
+ fm_mode = CG2900_FM_IDLE_MODE;
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_switch_on: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_switch_off(void)
+{
+ int result = 0;
+
+ FM_INFO_REPORT("cg2900_fm_switch_off");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state &&
+ CG2900_FM_STATE_STAND_BY != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_switch_off: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Stop the RDS Thread if it is running */
+ if (fm_rds_status) {
+ fm_rds_status = false;
+ fmd_stop_rds_thread();
+ }
+ if (CG2900_FM_STATE_STAND_BY == fm_state) {
+ /* Power up FM */
+ result = fmd_power_up();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_switch_off: "
+ "fmd_power_up failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ } else
+ fm_state = CG2900_FM_STATE_SWITCHED_ON;
+ }
+ if (fmd_send_fm_ip_disable()) {
+ FM_ERR_REPORT("cg2900_fm_switch_off: "
+ "Problem in fmd_send_fm_ip_"
+ "disable");
+ result = -EINVAL;
+ goto error;
+ }
+ if (0 == result) {
+ fm_state = CG2900_FM_STATE_INITIALIZED;
+ fm_mode = CG2900_FM_IDLE_MODE;
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_switch_off: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_standby(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_standby");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_standby: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ result = fmd_goto_standby();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_standby: "
+ "FMLGotoStandby failed, "
+ "err = %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ fm_state = CG2900_FM_STATE_STAND_BY;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_standby: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_power_up_from_standby(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_power_up_from_standby");
+
+ if (CG2900_FM_STATE_STAND_BY != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_power_up_from_standby: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Power up FM */
+ result = fmd_power_up();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_power_up_from_standby: "
+ "fmd_power_up failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ } else {
+ fm_state = CG2900_FM_STATE_SWITCHED_ON;
+ if (CG2900_FM_TX_MODE == fm_mode) {
+ /* Enable the PA */
+ result = fmd_tx_set_pa(true);
+ if (0 != result) {
+ FM_ERR_REPORT
+ ("cg2900_fm_power_up_from_standby:"
+ " fmd_tx_set_pa " "failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_power_up_from_standby: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_rx_default_settings(
+ u32 freq,
+ u8 band,
+ u8 grid,
+ bool enable_rds,
+ bool enable_stereo
+ )
+{
+ int result;
+ u8 vol_in_percentage;
+
+ FM_INFO_REPORT("cg2900_fm_set_rx_default_settings: freq = %d Hz, "
+ "band = %d, grid = %d, RDS = %d, Stereo Mode = %d",
+ freq, band, grid, enable_rds, enable_stereo);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state &&
+ CG2900_FM_STATE_STAND_BY != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_STATE_STAND_BY == fm_state) {
+ /* Power up FM */
+ result = fmd_power_up();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "fmd_power_up failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ } else
+ fm_state = CG2900_FM_STATE_SWITCHED_ON;
+ }
+ fm_mode = CG2900_FM_RX_MODE;
+
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: "
+ "Sending Set mode to Rx");
+ result = fmd_set_mode(FMD_MODE_RX);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "fmd_set_mode failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Grid */
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: "
+ "Sending fmd_rx_set_grid ");
+ result = fmd_rx_set_grid(grid);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "fmd_rx_set_grid failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Band */
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: "
+ "Sending Set fmd_set_freq_range");
+ result = fmd_set_freq_range(band);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "fmd_set_freq_range failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Frequency */
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: "
+ "Sending Set fmd_rx_set_frequency");
+ result = fmd_rx_set_frequency(
+ freq / FREQUENCY_CONVERTOR_KHZ_HZ);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "fmd_rx_set_frequency failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: "
+ "SetFrequency interrupt received, "
+ "Sending Set fmd_rx_set_stereo_mode");
+
+ if (enable_stereo) {
+ /* Set the Stereo Blending mode */
+ result = fmd_rx_set_stereo_mode(
+ FMD_STEREOMODE_BLENDING);
+ } else {
+ /* Set the Mono mode */
+ result = fmd_rx_set_stereo_mode(
+ FMD_STEREOMODE_MONO);
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "fmd_rx_set_stereo_mode "
+ "failed %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Remove all Interrupt from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: "
+ "Sending Set rds");
+
+ if (enable_rds) {
+ /* Enable RDS */
+ a_b_flag = false;
+ result = cg2900_fm_rds_on();
+ } else {
+ /* Disable RDS */
+ result = cg2900_fm_rds_off();
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rx_default_settings: "
+ "cg2900_fm_rds_on "
+ "failed %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Analog Out Volume to Max */
+ vol_in_percentage = (u8)
+ (((u16) (MAX_ANALOG_VOLUME) * 100)
+ / MAX_ANALOG_VOLUME);
+ result = fmd_set_volume(vol_in_percentage);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "FMRSetVolume failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_rx_default_settings: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_tx_default_settings(
+ u32 freq,
+ u8 band,
+ u8 grid,
+ bool enable_rds,
+ bool enable_stereo
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_set_tx_default_settings: freq = %d Hz, "
+ "band = %d, grid = %d, RDS = %d, Stereo Mode = %d",
+ freq, band, grid, enable_rds, enable_stereo);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state &&
+ CG2900_FM_STATE_STAND_BY != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_STATE_STAND_BY == fm_state) {
+ /* Power up FM */
+ result = fmd_power_up();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_power_up failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ } else
+ fm_state = CG2900_FM_STATE_SWITCHED_ON;
+ }
+ fm_mode = CG2900_FM_TX_MODE;
+ if (fm_rds_status) {
+ fm_rds_status = false;
+ fmd_stop_rds_thread();
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Give 50 ms delay to exit the RDS thread */
+ schedule_timeout_interruptible(msecs_to_jiffies(50));
+ }
+ /* Remove all Interrupt from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+
+ /* Switch To Tx mode */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending Set mode to Tx");
+ result = fmd_set_mode(FMD_MODE_TX);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_set_mode failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Sets the Limiter Values */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending fmd_limiter_setcontrol");
+ result = fmd_limiter_setcontrol(
+ DEFAULT_AUDIO_DEVIATION,
+ DEFAULT_NOTIFICATION_HOLD_OFF_TIME);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_limiter_setcontrol failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Grid */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending fmd_tx_set_grid ");
+ result = fmd_tx_set_grid(grid);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_tx_set_grid failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Band */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending fmd_tx_set_freq_range");
+ result = fmd_tx_set_freq_range(band);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_tx_set_freq_range failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Band */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending fmd_tx_set_preemphasis");
+ result = fmd_tx_set_preemphasis(FMD_EMPHASIS_75US);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "fmd_tx_set_preemphasis failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Set the Frequency */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending Set fmd_tx_set_frequency");
+ result = fmd_tx_set_frequency(
+ freq / FREQUENCY_CONVERTOR_KHZ_HZ);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_switch_on: "
+ "fmd_tx_set_frequency failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "SetFrequency interrupt received, "
+ "Sending Set fmd_tx_enable_stereo_mode");
+
+ /* Set the Stereo mode */
+ result = fmd_tx_enable_stereo_mode(enable_stereo);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_tx_enable_stereo_mode "
+ "failed %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending Set fmd_tx_set_pa");
+
+ /* Enable the PA */
+ result = fmd_tx_set_pa(true);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_tx_set_pa "
+ "failed %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "set PA interrupt received, "
+ "Sending Set fmd_tx_set_signal_strength");
+
+ /* Set the Signal Strength to Max */
+ result = fmd_tx_set_signal_strength(
+ MAX_POWER_LEVEL);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "fmd_tx_set_signal_strength "
+ "failed %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Enable Tx RDS */
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: "
+ "Sending Set cg2900_fm_tx_rds");
+ result = cg2900_fm_tx_rds(enable_rds);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_tx_default_settings: "
+ "cg2900_fm_tx_rds "
+ "failed %x", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_tx_default_settings: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_grid(
+ u8 grid
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_set_grid: Grid = %d", grid);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_grid: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_set_grid(grid);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_grid: "
+ "fmd_rx_set_grid failed");
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_grid: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_band(
+ u8 band
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_set_band: Band = %d", band);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_band: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_set_freq_range(band);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_band: "
+ "fmd_set_freq_range failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_band: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_search_up_freq(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_search_up_freq");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_search_up_freq: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (fm_rds_status) {
+ /* Stop RDS if it is active */
+ result = cg2900_fm_rds_off();
+ fm_prev_rds_status = true;
+ } else {
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ }
+ result = fmd_rx_seek(CG2900_DIR_UP);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_search_up_freq: "
+ "Error Code %d", (unsigned int)result);
+ cg2900_fm_check_rds_status();
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_search_up_freq: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_search_down_freq(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_search_down_freq");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_search_down_freq: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (fm_rds_status) {
+ /* Stop RDS if it is active */
+ result = cg2900_fm_rds_off();
+ fm_prev_rds_status = true;
+ } else {
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ }
+ result = fmd_rx_seek(CG2900_DIR_DOWN);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_search_down_freq: "
+ "Error Code %d", (unsigned int)result);
+ cg2900_fm_check_rds_status();
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_search_down_freq: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_start_band_scan(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_start_band_scan");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_start_band_scan: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (fm_rds_status) {
+ /* Stop RDS if it is active */
+ result = cg2900_fm_rds_off();
+ fm_prev_rds_status = true;
+ } else {
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ }
+ result = fmd_rx_scan_band(DEFAULT_CHANNELS_TO_SCAN);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_start_band_scan: "
+ "Error Code %d", (unsigned int)result);
+ cg2900_fm_check_rds_status();
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_start_band_scan: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_stop_scan(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_stop_scan");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_stop_scan: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_stop_seeking();
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_stop_scan: "
+ "Error Code %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ result = 0;
+ if (fm_prev_rds_status) {
+ /* Restart RDS if it was active earlier */
+ cg2900_fm_rds_on();
+ fm_prev_rds_status = false;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_stop_scan: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_scan_result(
+ u16 *num_of_scanfreq,
+ u32 *scan_freq,
+ u32 *scan_freq_rssi_level
+ )
+{
+ int result;
+ u32 cnt;
+ u32 index;
+ u32 minfreq;
+ u32 maxfreq;
+ u16 channels[3];
+ u16 rssi[3];
+ u8 freq_range;
+ u8 max_channels = 0;
+
+ FM_INFO_REPORT("cg2900_fm_get_scan_result");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_scan_result: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_get_freq_range(&freq_range);
+
+ if (0 != result) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ result = fmd_get_freq_range_properties(
+ freq_range,
+ &minfreq,
+ &maxfreq);
+
+ if (0 != result) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ result = fmd_rx_get_max_channels_to_scan(&max_channels);
+
+ if (0 != result) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* In 1 iteration we can retreive max 3 channels */
+ cnt = (max_channels / 3) + 1;
+ while ((cnt--) && (result == 0)) {
+ /*
+ * Get all channels, including empty ones.
+ * In 1 iteration at max 3 channels can be found.
+ */
+ result = fmd_rx_get_scan_band_info(cnt * 3,
+ num_of_scanfreq,
+ channels, rssi);
+ if (0 == result) {
+ index = cnt * 3;
+ /* Convert Freq to Hz from channel number */
+ scan_freq[index] = (minfreq +
+ channels[0] *
+ CHANNEL_FREQ_CONVERTER_MHZ) *
+ FREQUENCY_CONVERTOR_KHZ_HZ;
+ scan_freq_rssi_level[index] = rssi[0];
+ /* Convert Freq to Hz from channel number */
+ scan_freq[index + 1] = (minfreq +
+ channels[1] *
+ CHANNEL_FREQ_CONVERTER_MHZ) *
+ FREQUENCY_CONVERTOR_KHZ_HZ;
+ scan_freq_rssi_level[index + 1] = rssi[1];
+ /* Check if we donot overwrite the array */
+ if (cnt < (max_channels / 3)) {
+ /* Convert Freq to Hz from channel number */
+ scan_freq[index + 2] = (minfreq +
+ channels[2] *
+ CHANNEL_FREQ_CONVERTER_MHZ) *
+ FREQUENCY_CONVERTOR_KHZ_HZ;
+ scan_freq_rssi_level[index + 2]
+ = rssi[2];
+ }
+ }
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_scan_result: returning %d",
+ result);
+ return result;
+
+}
+
+int cg2900_fm_start_block_scan(
+ u32 start_freq,
+ u32 end_freq
+ )
+{
+ int result;
+ u8 antenna;
+
+ FM_INFO_REPORT("cg2900_fm_start_block_scan");
+
+ FM_DEBUG_REPORT("cg2900_fm_start_block_scan: Start Freq = %d, "
+ "End Freq = %d", start_freq, end_freq);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_start_block_scan: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (fm_rds_status) {
+ /* Stop RDS if it is active */
+ result = cg2900_fm_rds_off();
+ fm_prev_rds_status = true;
+ } else {
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ }
+ result = fmd_get_antenna(
+ &antenna);
+ result = fmd_block_scan(
+ start_freq/FREQUENCY_CONVERTOR_KHZ_HZ,
+ end_freq/FREQUENCY_CONVERTOR_KHZ_HZ,
+ antenna);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_start_block_scan: "
+ "Error Code %d", (unsigned int)result);
+ cg2900_fm_check_rds_status();
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_start_block_scan: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_block_scan_result(
+ u16 *num_of_scanchan,
+ u16 *scan_freq_rssi_level
+ )
+{
+ int result = 0;
+ u32 cnt;
+ u32 index;
+ u16 rssi[6];
+
+ FM_INFO_REPORT("cg2900_fm_get_block_scan_result");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_block_scan_result: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ cnt = 33;
+ while ((cnt--) && (result == 0)) {
+ /* Get all channels, including empty ones */
+ result = fmd_get_block_scan_result(
+ cnt * 6,
+ num_of_scanchan,
+ rssi);
+ if (0 == result) {
+ index = cnt * 6;
+ scan_freq_rssi_level[index]
+ = rssi[0];
+ scan_freq_rssi_level[index + 1]
+ = rssi[1];
+ scan_freq_rssi_level[index + 2]
+ = rssi[2];
+ scan_freq_rssi_level[index + 3]
+ = rssi[3];
+ scan_freq_rssi_level[index + 4]
+ = rssi[4];
+ scan_freq_rssi_level[index + 5]
+ = rssi[5];
+ }
+ }
+ if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_block_scan_result:"
+ " Sending Set fmd_tx_set_pa");
+
+ /* Enable the PA */
+ result = fmd_tx_set_pa(true);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_block_scan_result:"
+ " fmd_tx_set_pa "
+ "failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_block_scan_result: returning %d",
+ result);
+ return result;
+
+}
+
+int cg2900_fm_tx_rds(
+ bool enable_rds
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_rds: enable_rds = %d", enable_rds);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_rds: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (enable_rds) {
+ /* Set the Tx Buffer Size */
+ result = fmd_tx_buffer_set_size(
+ MAX_RDS_GROUPS - 2);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_rds: "
+ "fmd_tx_buffer_set_size "
+ "failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ } else {
+ result = fmd_tx_set_rds(true);
+ }
+
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_rds: "
+ "fmd_tx_set_rds "
+ "failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ program_identification_code =
+ default_program_identification_code;
+ program_type_code = default_program_type_code;
+ memcpy(program_service,
+ default_program_service,
+ MAX_PSN_SIZE);
+ memcpy(radio_text,
+ default_radio_text, MAX_RT_SIZE);
+ radio_text[strlen(radio_text)] = 0x0D;
+ cg2900_fm_transmit_rds_groups();
+ result = 0;
+ } else {
+ result = fmd_tx_set_rds(false);
+
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_rds: "
+ "fmd_tx_set_rds "
+ "failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_rds: returning %d",
+ result);
+
+ return result;
+}
+
+int cg2900_fm_tx_set_pi_code(
+ u16 pi_code
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_pi_code: PI = %04x", pi_code);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_pi_code: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ program_identification_code = pi_code;
+ result = cg2900_fm_transmit_rds_groups();
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_pi_code: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_pty_code(
+ u16 pty_code
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_pty_code: PTY = %04x", pty_code);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_pty_code: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ program_type_code = pty_code;
+ result = cg2900_fm_transmit_rds_groups();
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_pty_code: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_program_station_name(
+ char *psn,
+ u8 len
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_program_station_name: PSN = %s",
+ psn);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_program_station_name: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (len < (MAX_PSN_SIZE - 1)) {
+ int count = len;
+ while (count < (MAX_PSN_SIZE - 1))
+ psn[count++] = ' ';
+ }
+ memcpy(program_service, psn, MAX_PSN_SIZE);
+ result = cg2900_fm_transmit_rds_groups();
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_program_station_name: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_radio_text(
+ char *rt,
+ u8 len
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_radio_text: RT = %s", rt);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_radio_text: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ rt[len] = 0x0D;
+ memcpy(radio_text, rt, len + 1);
+
+ result = cg2900_fm_transmit_rds_groups();
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_radio_text: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_get_rds_deviation(
+ u16 *deviation
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_get_rds_deviation");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_rds_deviation: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_get_rds_deviation(deviation);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_rds_deviation: "
+ "fmd_tx_get_rds_deviation failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_get_rds_deviation: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_rds_deviation(
+ u16 deviation
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_rds_deviation: deviation = %d",
+ deviation);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_rds_deviation: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_set_rds_deviation(deviation);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_rds_deviation: "
+ "fmd_tx_set_rds_deviation failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_rds_deviation: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_get_pilot_tone_status(
+ bool *enable
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_get_pilot_tone_status");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_pilot_tone_status: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_get_stereo_mode(enable);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_pilot_tone_status: "
+ "fmd_tx_get_stereo_mode failed %d",
+ result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_get_pilot_tone_status: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_pilot_tone_status(
+ bool enable
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_pilot_tone_status: enable = %d",
+ enable);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_pilot_tone_status: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_enable_stereo_mode(enable);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_pilot_tone_status: "
+ "fmd_tx_enable_stereo_mode failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_pilot_tone_status: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_get_pilot_deviation(
+ u16 *deviation
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_get_pilot_deviation");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_pilot_deviation: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_get_pilot_deviation(deviation);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_pilot_deviation: "
+ "fmd_tx_get_pilot_deviation failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_get_pilot_deviation: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_pilot_deviation(
+ u16 deviation
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_pilot_deviation: deviation = %d",
+ deviation);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_pilot_deviation: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_set_pilot_deviation(deviation);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_pilot_deviation: "
+ "fmd_tx_set_pilot_deviation failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_pilot_deviation: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_get_preemphasis(
+ u8 *preemphasis
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_get_preemphasis");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_preemphasis: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_get_preemphasis(preemphasis);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_preemphasis: "
+ "fmd_tx_get_preemphasis failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_get_preemphasis: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_preemphasis(
+ u8 preemphasis
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_preemphasis: preemphasis = %d",
+ preemphasis);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_preemphasis: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_set_preemphasis(preemphasis);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_preemphasis: "
+ "fmd_tx_set_preemphasis failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_preemphasis: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_rx_set_deemphasis(
+ u8 deemphasis
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_rx_set_deemphasis: deemphasis = %02x",
+ deemphasis);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_rx_set_deemphasis: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_set_deemphasis(deemphasis);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_rx_set_deemphasis: "
+ "fmd_rx_set_deemphasis failed %d",
+ result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_rx_set_deemphasis: returning %d", result);
+ return result;
+}
+
+int cg2900_fm_tx_get_power_level(
+ u16 *power_level
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_get_power_level");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_power_level: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_get_signal_strength(power_level);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_get_power_level: "
+ "fmd_tx_get_signal_strength failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_get_power_level: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_tx_set_power_level(
+ u16 power_level
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_tx_set_power_level: power_level = %d",
+ power_level);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_power_level: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_tx_set_signal_strength(power_level);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_tx_set_power_level: "
+ "fmd_tx_set_preemphasis failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_tx_set_power_level: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_audio_balance(
+ s8 balance
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_set_audio_balance, balance = %d", balance);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_audio_balance: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_set_balance(balance);
+ if (0 != result) {
+ FM_ERR_REPORT("FMRSetAudioBalance : "
+ "Failed in fmd_set_balance, err = %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_audio_balance: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_volume(
+ u8 vol_level
+ )
+{
+ int result;
+ u8 vol_in_percentage;
+
+ FM_INFO_REPORT("cg2900_fm_set_volume: Volume Level = %d", vol_level);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_volume: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ vol_in_percentage =
+ (u8) (((u16) (vol_level) * 100) / MAX_ANALOG_VOLUME);
+ result = fmd_set_volume(vol_in_percentage);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_increase_volume: "
+ "FMRSetVolume failed, err = %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_volume: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_volume(
+ u8 *vol_level
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_get_volume");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_volume: "
+ "Invalid state of FM Driver = %d", fm_state);
+ *vol_level = 0;
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_get_volume(vol_level);
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_volume: returning %d, VolLevel = %d",
+ result, *vol_level);
+ return result;
+}
+
+int cg2900_fm_rds_off(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_rds_off");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_rds_off: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ result = fmd_rx_set_rds(FMD_SWITCH_OFF_RDS);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_rds_off: fmd_rx_set_rds failed, "
+ "err = %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ if (fm_rds_status) {
+ /* Stop the RDS Thread */
+ FM_DEBUG_REPORT("cg2900_fm_rds_off: "
+ "Stopping RDS Thread");
+ fmd_stop_rds_thread();
+ fm_rds_status = false;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_rds_off: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_rds_on(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_rds_on");
+ if (fm_rds_status) {
+ result = 0;
+ FM_DEBUG_REPORT("cg2900_fm_rds_on: rds is on "
+ "return result = %d", result);
+ return result;
+ }
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_rds_on: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ FM_DEBUG_REPORT("cg2900_fm_rds_on:"
+ " Sending fmd_rx_buffer_set_size");
+ result = fmd_rx_buffer_set_size(MAX_RDS_GROUPS);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_rds_on: fmd_rx_buffer_set_size"
+ "failed, err = %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ FM_DEBUG_REPORT("cg2900_fm_rds_on: Sending "
+ "fmd_rx_buffer_set_threshold");
+ result = fmd_rx_buffer_set_threshold(MAX_RDS_GROUPS - 1);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_rds_on: fmd_rx_buffer_set_threshold "
+ "failed, err = %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ FM_DEBUG_REPORT("cg2900_fm_rds_on: Sending fmd_rx_set_rds");
+ result = fmd_rx_set_rds(FMD_SWITCH_ON_RDS_ENHANCED_MODE);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_rds_on: fmd_rx_set_rds failed, "
+ "err = %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Start the RDS Thread to read the RDS Buffers */
+ fm_rds_status = true;
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ fmd_start_rds_thread(cg2900_fm_rds_callback);
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_rds_on: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_rds_status(
+ bool *rds_status
+ )
+{
+ int result = 0;
+
+ FM_INFO_REPORT("cg2900_fm_get_rds_status");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_rds_status: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_RX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_rds_status: "
+ "fmd_rx_get_rds");
+ result = fmd_rx_get_rds(rds_status);
+ } else if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_rds_status: "
+ "fmd_tx_get_rds");
+ result = fmd_tx_get_rds(rds_status);
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_rds_status: "
+ "fmd_get_rds failed, Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_rds_status: returning %d, "
+ "rds_status = %d", result,
+ *rds_status);
+ return result;
+}
+
+int cg2900_fm_mute(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_mute");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_mute: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+
+ /* Mute Analog DAC */
+ result = fmd_set_mute(true);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_mute: "
+ "fmd_set_mute failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Mute Ext Src */
+ result = fmd_ext_set_mute(true);
+ if (0 != result) {
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_mute: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_unmute(void)
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_unmute");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_unmute: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Unmute Analog DAC */
+ result = fmd_set_mute(false);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_mute: "
+ "fmd_set_mute failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Unmute Ext Src */
+ result = fmd_ext_set_mute(false);
+ if (0 != result) {
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_unmute: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_frequency(
+ u32 *freq
+ )
+{
+ int result = 0;
+ u32 currentFreq = 0;
+
+ FM_INFO_REPORT("cg2900_fm_get_frequency");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_frequency: "
+ "Invalid state of FM Driver = %d", fm_state);
+ *freq = 0;
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_RX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_frequency: "
+ "fmd_rx_get_frequency");
+ result = fmd_rx_get_frequency(
+ (u32 *) &currentFreq);
+ } else if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_frequency: "
+ "fmd_tx_get_frequency");
+ result = fmd_tx_get_frequency(
+ (u32 *) &currentFreq);
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_frequency: "
+ "fmd_rx_get_frequency failed %d",
+ (unsigned int)result);
+ *freq = 0;
+ result = -EINVAL;
+ goto error;
+ }
+ /* Convert To Hz */
+ *freq = currentFreq * FREQUENCY_CONVERTOR_KHZ_HZ;
+ FM_DEBUG_REPORT("cg2900_fm_get_frequency: "
+ "Current Frequency = %d Hz", *freq);
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_frequency: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_frequency(
+ u32 new_freq
+ )
+{
+ int result = 0;
+
+ FM_INFO_REPORT("cg2900_fm_set_frequency, new_freq = %d",
+ (int)new_freq);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_frequency: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Check if RDS needs to be disabled before Setting Frequency */
+ if (fm_rds_status) {
+ /* Stop RDS if it is active */
+ result = cg2900_fm_rds_off();
+ fm_prev_rds_status = true;
+ } else {
+ memset(&fm_rds_info, 0,
+ sizeof(struct cg2900_fm_rds_info));
+ memset(fm_rds_buf, 0,
+ sizeof(struct cg2900_fm_rds_buf) *
+ MAX_RDS_BUFFER * MAX_RDS_GROUPS);
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ }
+
+ if (CG2900_FM_RX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_set_frequency: "
+ "fmd_rx_set_frequency");
+ result = fmd_rx_set_frequency(
+ new_freq / FREQUENCY_CONVERTOR_KHZ_HZ);
+ } else if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_set_frequency: "
+ "fmd_tx_set_frequency");
+ result = fmd_tx_set_frequency(
+ new_freq / FREQUENCY_CONVERTOR_KHZ_HZ);
+ }
+ if (fm_prev_rds_status) {
+ /* Restart RDS if it was active earlier */
+ cg2900_fm_rds_on();
+ fm_prev_rds_status = false;
+ }
+ if (result != 0) {
+ FM_ERR_REPORT("cg2900_fm_set_frequency: "
+ "fmd_rx_set_frequency failed %x",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+ if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_set_frequency:"
+ " Sending Set" "fmd_tx_set_pa");
+
+ /* Enable the PA */
+ result = fmd_tx_set_pa(true);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_frequency:"
+ " fmd_tx_set_pa "
+ "failed %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_frequency: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_signal_strength(
+ u16 *signal_strength
+ )
+{
+ int result = 0;
+
+ FM_INFO_REPORT("cg2900_fm_get_signal_strength");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_signal_strength: "
+ "Invalid state of FM Driver = %d", fm_state);
+ *signal_strength = 0;
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_RX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_signal_strength: "
+ "fmd_rx_get_signal_strength");
+ result = fmd_rx_get_signal_strength(
+ signal_strength);
+ } else if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_signal_strength: "
+ "fmd_tx_get_signal_strength");
+ result = fmd_tx_get_signal_strength(
+ signal_strength);
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_signal_strength: "
+ "Error Code %d", (unsigned int)result);
+ *signal_strength = 0;
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_signal_strength: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_af_update_get_result(
+ u16 *af_update_rssi
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_af_update_get_result");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_af_update_get_result: "
+ "Invalid state of FM Driver = %d", fm_state);
+ *af_update_rssi = 0;
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_get_af_update_result(af_update_rssi);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_af_update_get_result: "
+ "Error Code %d", (unsigned int)result);
+ *af_update_rssi = 0;
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_af_update_get_result: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_af_update_start(
+ u32 af_freq
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_af_update_start");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_af_update_start: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_af_update_start(
+ af_freq / FREQUENCY_CONVERTOR_KHZ_HZ);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_af_update_start: "
+ "Error Code %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_af_update_start: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_af_switch_get_result(
+ u16 *af_switch_conclusion
+ )
+{
+ int result;
+ u16 af_rssi;
+ u16 af_pi;
+
+ FM_INFO_REPORT("cg2900_fm_af_switch_get_result");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_af_switch_get_result: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_get_af_switch_results(
+ af_switch_conclusion,
+ &af_rssi, &af_pi);
+
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_af_switch_get_result: "
+ "Error Code %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+ FM_DEBUG_REPORT("cg2900_fm_af_switch_get_result: "
+ "AF Switch conclusion = %d "
+ "AF Switch RSSI level = %d "
+ "AF Switch PI code = %d ",
+ *af_switch_conclusion, af_rssi, af_pi);
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_af_switch_get_result: returning %d",
+ result);
+ return result;
+
+}
+
+int cg2900_fm_af_switch_start(
+ u32 af_switch_freq,
+ u16 af_switch_pi
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_af_switch_start");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_af_switch_start: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_rx_af_switch_start(
+ af_switch_freq / FREQUENCY_CONVERTOR_KHZ_HZ,
+ af_switch_pi);
+
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_af_switch_start: "
+ "Error Code %d", (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_af_switch_start: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_mode(
+ u8 *cur_mode
+ )
+{
+ int result = 0;
+ bool stereo_mode;
+
+ FM_INFO_REPORT("cg2900_fm_get_mode");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_mode: "
+ "Invalid state of FM Driver = %d", fm_state);
+ *cur_mode = CG2900_MODE_MONO;
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_RX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_mode: "
+ "fmd_rx_get_stereo_mode");
+ result = fmd_rx_get_stereo_mode(cur_mode);
+ FM_DEBUG_REPORT("cg2900_fm_get_mode: cur_mode = %x", *cur_mode);
+ } else if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_get_mode: "
+ "fmd_tx_get_stereo_mode");
+ result = fmd_tx_get_stereo_mode(&stereo_mode);
+ if (stereo_mode)
+ *cur_mode = CG2900_MODE_STEREO;
+ else
+ *cur_mode = CG2900_MODE_MONO;
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_mode: "
+ "fmd_get_stereo_mode failed, "
+ "Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_mode: returning %d, mode = %d",
+ result, *cur_mode);
+ return result;
+}
+
+int cg2900_fm_set_mode(
+ u8 mode
+ )
+{
+ int result = 0;
+ bool enable_stereo_mode = false;
+
+ FM_INFO_REPORT("cg2900_fm_set_mode: mode = %d", mode);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_set_mode: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ if (CG2900_FM_RX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_set_mode: "
+ "fmd_rx_set_stereo_mode");
+ result = fmd_rx_set_stereo_mode(mode);
+ } else if (CG2900_FM_TX_MODE == fm_mode) {
+ FM_DEBUG_REPORT("cg2900_fm_set_mode: "
+ "fmd_tx_set_stereo_mode");
+ if (mode == CG2900_MODE_STEREO)
+ enable_stereo_mode = true;
+ result =
+ fmd_tx_enable_stereo_mode(
+ enable_stereo_mode);
+ }
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_mode: "
+ "fmd_rx_set_stereo_mode failed, "
+ "Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_mode: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_select_antenna(
+ u8 antenna
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_select_antenna: Antenna = %d", antenna);
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_select_antenna: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_set_antenna(antenna);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_select_antenna: "
+ "fmd_set_antenna failed, Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_select_antenna: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_antenna(
+ u8 *antenna
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_get_antenna");
+
+ if (CG2900_FM_STATE_SWITCHED_ON != fm_state) {
+ FM_ERR_REPORT("cg2900_fm_get_antenna: "
+ "Invalid state of FM Driver = %d", fm_state);
+ result = -EINVAL;
+ goto error;
+ }
+ result = fmd_get_antenna(antenna);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_antenna: "
+ "fmd_get_antenna failed, Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_antenna: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_get_rssi_threshold(
+ u16 *rssi_thresold
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_get_rssi_threshold");
+
+ result = fmd_rx_get_stop_level(rssi_thresold);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_get_rssi_threshold: "
+ "fmd_rx_get_stop_level failed, Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_get_rssi_threshold: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_set_rssi_threshold(
+ u16 rssi_thresold
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_set_rssi_threshold: "
+ "RssiThresold = %d", rssi_thresold);
+
+ result = fmd_rx_set_stop_level(rssi_thresold);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_rssi_threshold: "
+ "fmd_rx_set_stop_level failed, Error Code %d",
+ (unsigned int)result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_rssi_threshold: returning %d",
+ result);
+ return result;
+}
+
+void cg2900_fm_set_chip_version(
+ u16 revision,
+ u16 sub_version
+ )
+{
+ version_info.revision = revision;
+ version_info.sub_version = sub_version;
+}
+
+int cg2900_fm_set_test_tone_generator(
+ u8 test_tone_status
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_set_test_tone_generator: "
+ "test_tone_status = %02x", test_tone_status);
+
+ result = fmd_set_test_tone_generator_status(test_tone_status);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_set_test_tone_generator: "
+ "fmd_set_test_tone_generator_status failed"
+ ", Error Code %d", result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_set_test_tone_generator: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_test_tone_connect(
+ u8 left_audio_mode,
+ u8 right_audio_mode
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_test_tone_connect: "
+ "left_audio_mode = %02x right_audio_mode = %02x",
+ left_audio_mode, right_audio_mode);
+
+ result = fmd_test_tone_connect(left_audio_mode, right_audio_mode);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_test_tone_connect: "
+ "fmd_set_test_tone_connect failed, Error Code %d",
+ result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_test_tone_connect: returning %d",
+ result);
+ return result;
+}
+
+int cg2900_fm_test_tone_set_params(
+ u8 tone_gen,
+ u16 frequency,
+ u16 volume,
+ u16 phase_offset,
+ u16 dc,
+ u8 waveform
+ )
+{
+ int result;
+
+ FM_INFO_REPORT("cg2900_fm_test_tone_set_params: "
+ "tone_gen = %02x frequency = %04x "
+ "volume = %04x phase_offset = %04x "
+ "dc offset = %04x waveform = %02x",
+ tone_gen, frequency,
+ volume, phase_offset,
+ dc, waveform);
+
+ result = fmd_test_tone_set_params(
+ tone_gen,
+ frequency,
+ volume,
+ phase_offset,
+ dc,
+ waveform);
+ if (0 != result) {
+ FM_ERR_REPORT("cg2900_fm_test_tone_set_params: "
+ "fmd_test_tone_set_params failed, Error Code %d",
+ result);
+ result = -EINVAL;
+ goto error;
+ }
+
+error:
+ FM_DEBUG_REPORT("cg2900_fm_test_tone_set_params: returning %d",
+ result);
+ return result;
+}
+
+MODULE_AUTHOR("Hemant Gupta");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/radio/CG2900/cg2900_fm_api.h b/drivers/media/radio/CG2900/cg2900_fm_api.h
new file mode 100644
index 00000000000..ec9e6e86f77
--- /dev/null
+++ b/drivers/media/radio/CG2900/cg2900_fm_api.h
@@ -0,0 +1,1077 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Linux FM Host API's for ST-Ericsson FM Chip.
+ *
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef CG2900_FM_API_H
+#define CG2900_FM_API_H
+
+#include <linux/device.h>
+#include <linux/skbuff.h>
+
+/* Callback function to receive RDS Data. */
+typedef void (*cg2900_fm_rds_cb)(void);
+
+extern struct sk_buff_head fm_interrupt_queue;
+
+/**
+ * struct cg2900_fm_rds_buf - RDS Group Receiving Structure
+ *
+ * @block1: RDS Block A
+ * @block2: RDS Block B
+ * @block3: RDS Block C
+ * @block4: RDS Block D
+ * @status1: Status of received RDS Block A
+ * @status2: Status of received RDS Block B
+ * @status3: Status of received RDS Block C
+ * @status4: Status of received RDS Block D
+ *
+ * Structure for receiving the RDS Group from FM Chip.
+ */
+struct cg2900_fm_rds_buf {
+ u16 block1;
+ u16 block2;
+ u16 block3;
+ u16 block4;
+ u8 status1;
+ u8 status2;
+ u8 status3;
+ u8 status4;
+};
+
+/**
+ * struct cg2900_fm_rds_info - RDS Information Structure
+ *
+ * @rds_head: RDS Queue Head for storing next valid data.
+ * @rds_tail: RDS Queue Tail for retreiving next valid data.
+ * @rds_group_sent: Number of RDS Groups sent to Application.
+ * @rds_block_sent: Number of RDS Blocks sent to Application.
+ *
+ * Structure for storing the RDS data queue information.
+ */
+struct cg2900_fm_rds_info {
+ u8 rds_head;
+ u8 rds_tail;
+ u8 rds_group_sent;
+ u8 rds_block_sent;
+};
+
+/**
+ * struct cg2900_version_info - Chip HCI Version Info
+ *
+ * @revision: Revision of the controller, e.g. to indicate that it is
+ * a CG2900 controller.
+ * @sub_version: Subversion of the controller, e.g. to indicate a certain
+ * tape-out of the controller.
+ *
+ * Structure for storing the HCI Version Information of the Controller.
+ */
+struct cg2900_version_info {
+ u16 revision;
+ u16 sub_version;
+};
+
+/**
+ * enum cg2900_fm_state - States of FM Driver.
+ *
+ * @CG2900_FM_STATE_DEINITIALIZED: FM driver is not initialized.
+ * @CG2900_FM_STATE_INITIALIZED: FM driver is initialized.
+ * @CG2900_FM_STATE_SWITCHED_ON: FM driver is switched on and in active state.
+ * @CG2900_FM_STATE_STAND_BY: FM Radio is switched on but not in active state.
+ *
+ * Various states of FM Driver.
+ */
+enum cg2900_fm_state {
+ CG2900_FM_STATE_DEINITIALIZED,
+ CG2900_FM_STATE_INITIALIZED,
+ CG2900_FM_STATE_SWITCHED_ON,
+ CG2900_FM_STATE_STAND_BY
+};
+
+/**
+ * enum cg2900_fm_mode - FM Driver Command state .
+ *
+ * @CG2900_FM_IDLE_MODE: FM Radio is in Idle Mode.
+ * @CG2900_FM_RX_MODE: FM Radio is configured in Rx mode.
+ * @CG2900_FM_TX_MODE: FM Radio is configured in Tx mode.
+ *
+ * Various Modes of the FM Radio.
+ */
+enum cg2900_fm_mode {
+ CG2900_FM_IDLE_MODE,
+ CG2900_FM_RX_MODE,
+ CG2900_FM_TX_MODE
+};
+
+/**
+ * enum cg2900_fm_band - Various Frequency band supported.
+ *
+ * @CG2900_FM_BAND_US_EU: European / US Band.
+ * @CG2900_FM_BAND_JAPAN: Japan Band.
+ * @CG2900_FM_BAND_CHINA: China Band.
+ * @CG2900_FM_BAND_CUSTOM: Custom Band.
+ *
+ * Various Frequency band supported.
+ */
+enum cg2900_fm_band {
+ CG2900_FM_BAND_US_EU,
+ CG2900_FM_BAND_JAPAN,
+ CG2900_FM_BAND_CHINA,
+ CG2900_FM_BAND_CUSTOM
+};
+
+/**
+ * enum cg2900_fm_grid - Various Frequency grids supported.
+ *
+ * @CG2900_FM_GRID_50: 50 kHz spacing.
+ * @CG2900_FM_GRID_100: 100 kHz spacing.
+ * @CG2900_FM_GRID_200: 200 kHz spacing.
+ *
+ * Various Frequency grids supported.
+ */
+enum cg2900_fm_grid {
+ CG2900_FM_GRID_50,
+ CG2900_FM_GRID_100,
+ CG2900_FM_GRID_200
+};
+
+/**
+ * enum cg2900_fm_event - Various Events reported by FM API layer.
+ *
+ * @CG2900_EVENT_NO_EVENT: No Event.
+ * @CG2900_EVENT_SEARCH_CHANNEL_FOUND: Seek operation is completed.
+ * @CG2900_EVENT_SCAN_CHANNELS_FOUND: Band Scan is completed.
+ * @CG2900_EVENT_BLOCK_SCAN_CHANNELS_FOUND: Block Scan is completed.
+ * @CG2900_EVENT_SCAN_CANCELLED: Scan/Seek is cancelled.
+ * @CG2900_EVENT_MONO_STEREO_TRANSITION: Mono/Stereo Transition has taken place.
+ * @CG2900_EVENT_DEVICE_RESET: CG2900 has been reset by some other IP.
+ * @CG2900_EVENT_RDS_EVENT: RDS data interrupt has been received from chip.
+ *
+ * Various Events reported by FM API layer.
+ */
+enum cg2900_fm_event {
+ CG2900_EVENT_NO_EVENT,
+ CG2900_EVENT_SEARCH_CHANNEL_FOUND,
+ CG2900_EVENT_SCAN_CHANNELS_FOUND,
+ CG2900_EVENT_BLOCK_SCAN_CHANNELS_FOUND,
+ CG2900_EVENT_SCAN_CANCELLED,
+ CG2900_EVENT_MONO_STEREO_TRANSITION,
+ CG2900_EVENT_DEVICE_RESET,
+ CG2900_EVENT_RDS_EVENT
+};
+
+/**
+ * enum cg2900_fm_direction - Directions used while seek.
+ *
+ * @CG2900_DIR_DOWN: Search in downwards direction.
+ * @CG2900_DIR_UP: Search in upwards direction.
+ *
+ * Directions used while seek.
+ */
+enum cg2900_fm_direction {
+ CG2900_DIR_DOWN,
+ CG2900_DIR_UP
+};
+
+/**
+ * enum cg2900_fm_stereo_mode - Stereo Modes.
+ *
+ * @CG2900_MODE_MONO: Mono Mode.
+ * @CG2900_MODE_STEREO: Stereo Mode.
+ *
+ * Stereo Modes.
+ */
+enum cg2900_fm_stereo_mode {
+ CG2900_MODE_MONO,
+ CG2900_MODE_STEREO
+};
+
+#define CG2900_FM_DEFAULT_RSSI_THRESHOLD 100
+#define MAX_RDS_BUFFER 10
+#define MAX_RDS_GROUPS 22
+#define MIN_ANALOG_VOLUME 0
+#define MAX_ANALOG_VOLUME 20
+#define NUM_OF_RDS_BLOCKS 4
+#define RDS_BLOCK_MASK 0x1C
+#define RDS_ERROR_STATUS_MASK 0x03
+#define RDS_UPTO_TWO_BITS_CORRECTED 0x01
+#define RDS_UPTO_FIVE_BITS_CORRECTED 0x02
+#define MAX_RT_SIZE 65
+#define MAX_PSN_SIZE 9
+#define DEFAULT_CHANNELS_TO_SCAN 32
+#define MAX_CHANNELS_TO_SCAN 99
+#define MAX_CHANNELS_FOR_BLOCK_SCAN 198
+#define SKB_FM_INTERRUPT_DATA 2
+
+extern u8 fm_event;
+extern struct cg2900_fm_rds_buf fm_rds_buf[MAX_RDS_BUFFER][MAX_RDS_GROUPS];
+extern struct cg2900_fm_rds_info fm_rds_info;
+
+/**
+ * cg2900_fm_init()- Initializes FM Radio.
+ *
+ * Initializes the Variables and structures required for FM Driver.
+ * It also registers the callback to receive the events for command
+ * completion, etc
+ *
+ * Returns:
+ * 0, if Initialization successful
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_init(void);
+
+/**
+ * cg2900_fm_deinit()- De-initializes FM Radio.
+ *
+ * De-initializes the Variables and structures required for FM Driver.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_deinit(void);
+
+/**
+ * cg2900_fm_switch_on()- Start up procedure of the FM radio.
+ *
+ * @device: Character device requesting the operation.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_switch_on(
+ struct device *device
+ );
+
+/**
+ * cg2900_fm_switch_off()- Switches off FM radio
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_switch_off(void);
+
+/**
+ * cg2900_fm_standby()- Makes the FM Radio Go in Standby mode.
+ *
+ * The FM Radio memorizes the the last state, i.e. Volume, last
+ * tuned station, etc that helps in resuming quickly to previous state.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_standby(void);
+
+/**
+ * cg2900_fm_power_up_from_standby()- Power Up FM Radio from Standby mode.
+ *
+ * It retruns the FM radio to the same state as it was before
+ * going to Standby.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_power_up_from_standby(void);
+
+/**
+ * cg2900_fm_set_rx_default_settings()- Loads FM Rx Default Settings.
+ *
+ * @freq: Frequency in Hz to be set on the FM Radio.
+ * @band: Band To be Set.
+ * (0: US/EU, 1: Japan, 2: China, 3: Custom)
+ * @grid: Grid specifying Spacing.
+ * (0: 50 KHz, 1: 100 KHz, 2: 200 Khz)
+ * @enable_rds: Flag indicating enable or disable rds transmission.
+ * @enable_stereo: Flag indicating enable or disable stereo mode.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_rx_default_settings(
+ u32 freq,
+ u8 band,
+ u8 grid,
+ bool enable_rds,
+ bool enable_stereo
+ );
+
+/**
+ * cg2900_fm_set_tx_default_settings()- Loads FM Tx Default Settings.
+ *
+ * @freq: Frequency in Hz to be set on the FM Radio.
+ * @band: Band To be Set.
+ * (0: US/EU, 1: Japan, 2: China, 3: Custom)
+ * @grid: Grid specifying Spacing.
+ * (0: 50 KHz, 1: 100 KHz, 2: 200 Khz)
+ * @enable_rds: Flag indicating enable or disable rds transmission.
+ * @enable_stereo: Flag indicating enable or disable stereo mode.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_tx_default_settings(
+ u32 freq,
+ u8 band,
+ u8 grid,
+ bool enable_rds,
+ bool enable_stereo
+ );
+
+/**
+ * cg2900_fm_set_grid()- Sets the Grid on the FM Radio.
+ *
+ * @grid: Grid specifying Spacing.
+ * (0: 50 KHz,1: 100 KHz,2: 200 Khz)
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_grid(
+ u8 grid
+ );
+
+/**
+ * cg2900_fm_set_band()- Sets the Band on the FM Radio.
+ *
+ * @band: Band specifying Region.
+ * (0: US_EU,1: Japan,2: China,3: Custom)
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_band(
+ u8 band
+ );
+
+/**
+ * cg2900_fm_search_up_freq()- seek Up.
+ *
+ * Searches the next available station in Upward Direction
+ * starting from the Current freq.
+ *
+ * If the operation is started successfully, the chip will generate the
+ * irpt_OperationSucced. interrupt when the operation is completed
+ * and will tune to the next available frequency.
+ * If no station is found, the chip is still tuned to the original station
+ * before starting the search
+ * Till the interrupt is received, no more API's should be called
+ * except cg2900_fm_stop_scan
+ *
+ * Returns:
+ * 0, if operation started successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_search_up_freq(void);
+
+/**
+ * cg2900_fm_search_down_freq()- seek Down.
+ *
+ * Searches the next available station in Downward Direction
+ * starting from the Current freq.
+ *
+ * If the operation is started successfully, the chip will generate
+ * the irpt_OperationSucced. interrupt when the operation is completed.
+ * and will tune to the next available frequency. If no station is found,
+ * the chip is still tuned to the original station before starting the search.
+ * Till the interrupt is received, no more API's should be called
+ * except cg2900_fm_stop_scan.
+ *
+ * Returns:
+ * 0, if operation started successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_search_down_freq(void);
+
+/**
+ * cg2900_fm_start_band_scan()- Band Scan.
+ *
+ * Searches for available Stations in the entire Band starting from
+ * current freq.
+ * If the operation is started successfully, the chip will generate
+ * the irpt_OperationSucced. interrupt when the operation is completed.
+ * After completion the chip will still be tuned the original station before
+ * starting the Scan. on reception of interrupt, the host should call the AP
+ * cg2900_fm_get_scan_result() to retrieve the Stations and corresponding
+ * RSSI of stations found in the Band.
+ * Till the interrupt is received, no more API's should be called
+ * except cg2900_fm_stop_scan, cg2900_fm_switch_off, cg2900_fm_standby and
+ * cg2900_fm_get_frequency.
+ *
+ * Returns:
+ * 0, if operation started successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_start_band_scan(void);
+
+/**
+ * cg2900_fm_stop_scan()- Stops an active ongoing seek or Band Scan.
+ *
+ * If the operation is started successfully, the chip will generate the
+ * irpt_OperationSucced interrupt when the operation is completed.
+ * Till the interrupt is received, no more API's should be called.
+ *
+ * Returns:
+ * 0, if operation started successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_stop_scan(void);
+
+/**
+ * cg2900_fm_get_scan_result()- Retreives Band Scan Result
+ *
+ * Retrieves the Scan Band Results of the stations found and
+ * the corressponding RSSI values of the stations.
+ * @num_of_scanfreq: (out) Number of Stations found
+ * during Scanning.
+ * @scan_freq: (out) Frequency of Stations in Hz
+ * found during Scanning.
+ * @scan_freq_rssi_level: (out) RSSI level of Stations
+ * found during Scanning.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_scan_result(
+ u16 *num_of_scanfreq,
+ u32 *scan_freq,
+ u32 *scan_freq_rssi_level
+ );
+
+/**
+ * cg2900_fm_start_block_scan()- Block Scan.
+ *
+ * Searches for RSSI level of all the channels between the start and stop
+ * channels. If the operation is started successfully, the chip will generate
+ * the irpt_OperationSucced interrupt when the operation is completed.
+ * After completion the chip will still be tuned the original station before
+ * starting the Scan. On reception of interrupt, the host should call the AP
+ * cg2900_fm_get_block_scan_result() to retrieve the RSSI of channels.
+ * Till the interrupt is received, no more API's should be called from Host
+ * except cg2900_fm_stop_scan, cg2900_fm_switch_off, cg2900_fm_standby and
+ * cg2900_fm_get_frequency.
+ * @start_freq: Start channel block scan Frequency.
+ * @end_freq: End channel block scan Frequency
+ *
+ * Returns:
+ * 0, if operation started successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_start_block_scan(
+ u32 start_freq,
+ u32 end_freq
+ );
+
+/**
+ * cg2900_fm_get_scan_result()- Retreives Band Scan Result
+ *
+ * Retrieves the Scan Band Results of the stations found and
+ * the corressponding RSSI values of the stations.
+ * @num_of_scanchan: (out) Number of Stations found
+ * during Scanning.
+ * @scan_freq_rssi_level: (out) RSSI level of Stations
+ * found during Scanning.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_block_scan_result(
+ u16 *num_of_scanchan,
+ u16 *scan_freq_rssi_level
+ );
+
+/**
+ * cg2900_fm_tx_get_rds_deviation()- Gets RDS Deviation.
+ *
+ * Retrieves the RDS Deviation level set for FM Tx.
+ * @deviation: (out) Rds Deviation.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_get_rds_deviation(
+ u16 *deviation
+ );
+
+/**
+ * cg2900_fm_tx_set_rds_deviation()- Sets RDS Deviation.
+ *
+ * Sets the RDS Deviation level on FM Tx.
+ * @deviation: Rds Deviation to set on FM Tx.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_rds_deviation(
+ u16 deviation
+ );
+
+/**
+ * cg2900_fm_tx_set_pi_code()- Sets PI code for RDS Transmission.
+ *
+ * Sets the Program Identification code to be transmitted.
+ * @pi_code: PI code to be transmitted.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_pi_code(
+ u16 pi_code
+ );
+
+/**
+ * cg2900_fm_tx_set_pty_code()- Sets PTY code for RDS Transmission.
+ *
+ * Sets the Program Type code to be transmitted.
+ * @pty_code: PTY code to be transmitted.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_pty_code(
+ u16 pty_code
+ );
+
+/**
+ * cg2900_fm_tx_set_program_station_name()- Sets PSN for RDS Transmission.
+ *
+ * Sets the Program Station Name to be transmitted.
+ * @psn: Program Station Name to be transmitted.
+ * @len: Length of Program Station Name to be transmitted.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_program_station_name(
+ char *psn,
+ u8 len
+ );
+
+/**
+ * cg2900_fm_tx_set_radio_text()- Sets RT for RDS Transmission.
+ *
+ * Sets the radio text to be transmitted.
+ * @rt: Radio Text to be transmitted.
+ * @len: Length of Radio Text to be transmitted.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_radio_text(
+ char *rt,
+ u8 len
+ );
+
+/**
+ * cg2900_fm_tx_get_rds_deviation()- Gets Pilot Tone status
+ *
+ * Gets the current status of pilot tone for FM Tx.
+ * @enable: (out) Flag indicating Pilot Tone is enabled or disabled.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_get_pilot_tone_status(
+ bool *enable
+ );
+
+/**
+ * cg2900_fm_tx_set_pilot_tone_status()- Enables/Disables Pilot Tone.
+ *
+ * Enables or disables the pilot tone for FM Tx.
+ * @enable: Flag indicating enabling or disabling Pilot Tone.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_pilot_tone_status(
+ bool enable
+ );
+
+/**
+ * cg2900_fm_tx_get_pilot_deviation()- Gets Pilot Deviation.
+ *
+ * Retrieves the Pilot Tone Deviation level set for FM Tx.
+ * @deviation: (out) Pilot Tone Deviation.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_get_pilot_deviation(
+ u16 *deviation
+ );
+
+/**
+ * cg2900_fm_tx_set_pilot_deviation()- Sets Pilot Deviation.
+ *
+ * Sets the Pilot Tone Deviation level on FM Tx.
+ * @deviation: Pilot Tone Deviation to set.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_pilot_deviation(
+ u16 deviation
+ );
+
+/**
+ * cg2900_fm_tx_get_preemphasis()- Gets Pre-emhasis level.
+ *
+ * Retrieves the Preemphasis level set for FM Tx.
+ * @preemphasis: (out) Preemphasis level.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_get_preemphasis(
+ u8 *preemphasis
+ );
+
+/**
+ * cg2900_fm_tx_set_preemphasis()- Sets Pre-emhasis level.
+ *
+ * Sets the Preemphasis level on FM Tx.
+ * @preemphasis: Preemphasis level.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_preemphasis(
+ u8 preemphasis
+ );
+
+/**
+ * cg2900_fm_tx_get_power_level()- Gets Power level.
+ *
+ * Retrieves the Power level set for FM Tx.
+ * @power_level: (out) Power level.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_get_power_level(
+ u16 *power_level
+ );
+
+/**
+ * cg2900_fm_tx_set_power_level()- Sets Power level.
+ *
+ * Sets the Power level for FM Tx.
+ * @power_level: Power level.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_set_power_level(
+ u16 power_level
+ );
+
+/**
+ * cg2900_fm_tx_rds()- Enable or disable Tx RDS.
+ *
+ * Enable or disable RDS transmission.
+ * @enable_rds: Flag indicating enabling or disabling RDS.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_tx_rds(
+ bool enable_rds
+ );
+
+/**
+ * cg2900_fm_set_audio_balance()- Sets Audio Balance.
+ *
+ * @balance: Audio Balnce to be Set in Percentage.
+ * (-100: Right Mute.... 0: Both on.... 100: Left Mute)
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_audio_balance(
+ s8 balance
+ );
+
+/**
+ * cg2900_fm_set_volume()- Sets the Analog Out Gain of FM Chip.
+ *
+ * @vol_level: Volume Level to be set on Tuner (0-20).
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_volume(
+ u8 vol_level
+ );
+
+/**
+ * cg2900_fm_get_volume()- Gets the currently set Analog Out Gain of FM Chip.
+ *
+ * @vol_level: (out)Volume Level set on Tuner (0-20).
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_volume(
+ u8 *vol_level
+ );
+
+/**
+ * cg2900_fm_rds_off()- Disables the RDS decoding algorithm in FM chip
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_rds_off(void);
+
+/**
+ * cg2900_fm_rds_on()- Enables the RDS decoding algorithm in FM chip
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_rds_on(void);
+
+/**
+ * cg2900_fm_get_rds_status()- Retrieves the status whether RDS is enabled or not
+ *
+ * @rds_status: (out) Status of RDS
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_rds_status(
+ bool *rds_status
+ );
+
+/**
+ * cg2900_fm_mute()- Mutes the Audio output from FM Chip
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_mute(void);
+
+/**
+ * cg2900_fm_unmute()- Unmutes the Audio output from FM Chip
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_unmute(void);
+
+/**
+ * cg2900_fm_get_frequency()- Gets the Curently tuned Frequency on FM Radio
+ *
+ * @freq: (out) Frequency in Hz set on the FM Radio.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_frequency(
+ u32 *freq
+ );
+
+/**
+ * cg2900_fm_set_frequency()- Sets the frequency on FM Radio
+ *
+ * @new_freq: Frequency in Hz to be set on the FM Radio.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_frequency(
+ u32 new_freq
+ );
+
+/**
+ * cg2900_fm_get_signal_strength()- Gets the RSSI level.
+ *
+ * @signal_strength: (out) RSSI level of the currently
+ * tuned frequency.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_signal_strength(
+ u16 *signal_strength
+ );
+
+/**
+ * cg2900_fm_get_af_updat()- Retrives results of AF Update
+ *
+ * @af_update_rssi: (out) RSSI level of the Alternative frequency.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_af_update_get_result(
+ u16 *af_update_rssi
+ );
+
+
+/**
+ * cg2900_fm_af_update_start()- PErforms AF Update.
+ *
+ * @af_freq: AF frequency in Hz whose RSSI is to be retrived.
+ * tuned frequency.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+
+int cg2900_fm_af_update_start(
+ u32 af_freq
+ );
+
+/**
+ * cg2900_fm_af_switch_get_result()- Retrives the AF switch result.
+ *
+ * @af_switch_conclusion: (out) Conclusion of the AF Switch.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_af_switch_get_result(
+ u16 *af_switch_conclusion
+ );
+
+/**
+ * cg2900_fm_af_switch_start()- PErforms AF switch.
+ *
+ * @af_switch_freq: Alternate Frequency in Hz to be switched.
+ * @af_switch_pi: picode of the Alternative frequency.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_af_switch_start(
+ u32 af_switch_freq,
+ u16 af_switch_pi
+ );
+
+/**
+ * cg2900_fm_get_mode()- Gets the mode of the Radio tuner.
+ *
+ * @cur_mode: (out) Current mode set on FM Radio
+ * (0: Stereo, 1: Mono, 2: Blending, 3: Switching).
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_mode(
+ u8 *cur_mode
+ );
+
+/**
+ * cg2900_fm_set_mode()- Sets the mode on the Radio tuner.
+ *
+ * @mode: mode to be set on FM Radio
+ * (0: Stereo, 1: Mono, 2: Blending, 3: Switching.)
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_mode(
+ u8 mode
+ );
+
+/**
+ * cg2900_fm_select_antenna()- Selects the Antenna of the Radio tuner.
+ *
+ * @antenna: (0: Embedded, 1: Wired.)
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_select_antenna(
+ u8 antenna
+ );
+
+/**
+ * cg2900_fm_get_antenna()- Retreives the currently selected antenna.
+ *
+ * @antenna: out (0: Embedded, 1: Wired.)
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_antenna(
+ u8 *antenna
+ );
+
+/**
+ * cg2900_fm_get_rssi_threshold()- Gets the rssi threshold currently
+ *
+ * set on FM radio.
+ * @rssi_thresold: (out) Current rssi threshold set.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_get_rssi_threshold(
+ u16 *rssi_thresold
+ );
+
+/**
+ * cg2900_fm_set_rssi_threshold()- Sets the rssi threshold to be used during
+ *
+ * Band Scan and seek Stations
+ * @rssi_thresold: rssi threshold to be set.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_rssi_threshold(
+ u16 rssi_thresold
+ );
+
+/**
+ * cg2900_handle_device_reset()- Handle The reset of Device
+ */
+void cg2900_handle_device_reset(void);
+
+/**
+ * wake_up_poll_queue()- Wakes up the Task waiting on Poll Queue.
+ * This function is called when Scan Band or seek has completed.
+ */
+void wake_up_poll_queue(void);
+
+/**
+ * void cg2900_fm_set_chip_version()- Sets the Version of the Controller.
+ *
+ * This function is used to update the Chip Version information at time
+ * of intitialization of FM driver.
+ * @revision: Revision of the controller, e.g. to indicate that it is
+ * a CG2900 controller.
+ * @sub_version: Subversion of the controller, e.g. to indicate a certain
+ * tape-out of the controller.
+ */
+void cg2900_fm_set_chip_version(
+ u16 revision,
+ u16 sub_version
+ );
+
+/**
+ * cg2900_fm_rx_set_deemphasis()- Sets de-emhasis level.
+ *
+ * Sets the Deemphasis level on FM Rx.
+ * @deemphasis: Deemphasis level.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_rx_set_deemphasis(
+ u8 deemphasis
+ );
+
+/**
+ * cg2900_fm_set_test_tone_generator()- Sets the Test Tone Generator.
+ *
+ * This function is used to enable/disable the Internal Tone Generator of
+ * CG2900.
+ * @test_tone_status: Status of tone generator.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_set_test_tone_generator(
+ u8 test_tone_status
+ );
+
+
+/**
+ * cg2900_fm_test_tone_connect()- Connect Audio outputs/inputs.
+ *
+ * This function connects the audio outputs/inputs of the Internal Tone
+ * Generator of CG2900.
+ * @left_audio_mode: Left Audio Output Mode.
+ * @right_audio_mode: Right Audio Output Mode.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_test_tone_connect(
+ u8 left_audio_mode,
+ u8 right_audio_mode
+ );
+
+/**
+ * cg2900_fm_test_tone_set_params()- Sets the Test Tone Parameters.
+ *
+ * This function is used to set the parameters of the Internal Tone Generator of
+ * CG2900.
+ * @tone_gen: Tone to be configured (Tone 1 or Tone 2)
+ * @frequency: Frequency of the tone.
+ * @volume: Volume of the tone.
+ * @phase_offset: Phase offset of the tone.
+ * @dc: DC to add to tone.
+ * @waveform: Waveform to generate, sine or pulse.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int cg2900_fm_test_tone_set_params(
+ u8 tone_gen,
+ u16 frequency,
+ u16 volume,
+ u16 phase_offset,
+ u16 dc,
+ u8 waveform
+ );
+
+#endif /* CG2900_FM_API_H */
diff --git a/drivers/media/radio/CG2900/cg2900_fm_driver.c b/drivers/media/radio/CG2900/cg2900_fm_driver.c
new file mode 100644
index 00000000000..4b1d39839ae
--- /dev/null
+++ b/drivers/media/radio/CG2900/cg2900_fm_driver.c
@@ -0,0 +1,4922 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Linux FM Driver for CG2900 FM Chip
+ *
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/device.h>
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <asm-generic/errno-base.h>
+#include "cg2900.h"
+#include "cg2900_fm_driver.h"
+
+/*
+ * Macro for printing the HCI Packet received from Protocol Driver
+ * to FM Driver.
+ */
+#define CG2900_HEX_READ_PACKET_DUMP \
+ if (cg2900_fm_debug_level == FM_HCI_PACKET_LOGS) \
+ fmd_hexdump('<', skb->data, skb->len);
+
+/* Macro for printing the HCI Packet sent to Protocol Driver from FM Driver */
+#define CG2900_HEX_WRITE_PACKET_DUMP \
+ if (cg2900_fm_debug_level == FM_HCI_PACKET_LOGS) \
+ fmd_hexdump('>', send_buffer, num_bytes);
+
+/* Converts the given value to ASCII format*/
+#define ASCVAL(x)(((x) <= 9) ? (x) + '0' : (x) - 10 + 'a')
+
+/* The receive Packet's 1st byte indicates the packet length */
+#define FM_GET_PKT_LEN(__data) __data[0]
+
+/* The receive Packet's following bytes are the actual data. */
+#define FM_GET_RSP_PKT_ADDR(__data) (&__data[1])
+
+/*
+ * LSB consists of shifting the command Id by 3 bits
+ * to left and ORING with the number of parameters of the
+ * command.
+ */
+#define FM_CMD_GET_LSB(__cmd_id, __num_param) \
+ ((u8)(((__cmd_id << 3) & 0x00FF) | __num_param))
+
+/* MSB consists of shifting the command Id by 5 bits to right. */
+#define FM_CMD_GET_MSB(__cmd_id) \
+ ((u8)(__cmd_id >> 5))
+
+/*
+ * Command id is mapped as shifting the MSB 5 bits to left and
+ * ORING with LSB shifted 3 bits to right.
+ */
+#define FM_GET_CMD_ID(__data) \
+ ((u16)((__data[2] << 5) | __data[1] >> 3))
+
+/*
+ * Number of parameters in the response packet are the last 3 bits
+ * of the 1st byte of the received packet.
+ */
+#define FM_GET_NUM_PARAMS(__data) \
+ ((u16)((__data[1] & 0x07)))
+
+/* Function Id is mapped to the 1st byte of the received packet */
+#define FM_GET_FUNCTION_ID(__data) __data[0]
+
+/*
+ * Block Id of the FM Firmware downloaded is mapped to the
+ * 2nd byte of the received packet.
+ */
+#define FM_GET_BLOCK_ID(__data) __data[1]
+
+/* Status of the received packet is mapped to the 4th byte. */
+#define FM_GET_STATUS(__data) __data[3]
+
+/*
+ * For PG1 of CG2900, the FM Interrupt is mapped
+ * to the 3rd and 4th byte of the received packet.
+ */
+#define FM_GET_PGI_INTERRUPT(__data) \
+ ((u16)(__data[3] << 8 | __data[2]))
+
+/*
+ * For PG2 of CG2900, the FM Interrupt is mapped
+ * to the 5th and 6th byte of the received packet.
+ */
+#define FM_GET_PG2_INTERRUPT(__data) \
+ ((u16)(__data[5] << 8 | __data[4]))
+
+#define FM_GET_NUM_RDS_GRPS(__data) __data[0]
+
+/* Response buffer starts from the 4th byte if the response buffer */
+#define FM_GET_RSP_BUFFER_ADDR(__data) (&__data[3])
+
+/* FM Function buffer starts from the 5th byte if the response buffer */
+#define FM_GET_FUNCTION_ADDR(__data) (&__data[4])
+
+/*
+ * Maximum time for chip to respond including the Command
+ * Competion interrupts for some commands. This time has been
+ * adjusted to cater to increased communication time with chip
+ * when debug level is set to 4.
+ */
+#define MAX_RESPONSE_TIME_IN_MS 5000
+
+/*
+ * enum fmd_gocmd_t - FM Driver Command state.
+ *
+ * @FMD_STATE_NONE: FM Driver in Idle state
+ * @FMD_STATE_MODE: FM Driver in setmode state
+ * @FMD_STATE_FREQUENCY: FM Driver in Set frequency state.
+ * @FMD_STATE_PA: FM Driver in SetPA state.
+ * @FMD_STATE_PA_LEVEL: FM Driver in Setpalevl state.
+ * @FMD_STATE_ANTENNA: FM Driver in Setantenna state
+ * @FMD_STATE_MUTE: FM Driver in Setmute state
+ * @FMD_STATE_SEEK: FM Driver in seek mode
+ * @FMD_STATE_SEEK_STOP: FM Driver in seek stop level state.
+ * @FMD_STATE_SCAN_BAND: FM Driver in Scanband mode
+ * @FMD_STATE_TX_SET_CTRL: FM Driver in RDS control state
+ * @FMD_STATE_TX_SET_THRSHLD: FM Driver in RDS threshld state
+ * @FMD_STATE_GEN_POWERUP: FM Driver in Power UP state.
+ * @FMD_STATE_SELECT_REF_CLK: FM Driver in Select Reference clock state.
+ * @FMD_STATE_SET_REF_CLK_PLL: FM Driver in Set Reference Freq state.
+ * @FMD_STATE_BLOCK_SCAN: FM Driver in Block Scan state.
+ * @FMD_STATE_AF_UPDATE: FM Driver in AF Update State.
+ * @FMD_STATE_AF_SWITCH: FM Driver in AF Switch State.
+ * @FMD_STATE_LAST: Last State of FM Driver
+ *
+ * Various states of the FM driver.
+ */
+enum fmd_gocmd {
+ FMD_STATE_NONE,
+ FMD_STATE_MODE,
+ FMD_STATE_FREQUENCY,
+ FMD_STATE_PA,
+ FMD_STATE_PA_LEVEL,
+ FMD_STATE_ANTENNA,
+ FMD_STATE_MUTE,
+ FMD_STATE_SEEK,
+ FMD_STATE_SEEK_STOP,
+ FMD_STATE_SCAN_BAND,
+ FMD_STATE_TX_SET_CTRL,
+ FMD_STATE_TX_SET_THRSHLD,
+ FMD_STATE_GEN_POWERUP,
+ FMD_STATE_SELECT_REF_CLK,
+ FMD_STATE_SET_REF_CLK_PLL,
+ FMD_STATE_BLOCK_SCAN,
+ FMD_STATE_AF_UPDATE,
+ FMD_STATE_AF_SWITCH,
+ FMD_STATE_LAST
+};
+
+/**
+ * struct fmd_rdsgroup_t - Rds group structure.
+ *
+ * @block: Array for RDS Block(s) received.
+ * @status: Array of Status of corresponding RDS block(s).
+ *
+ * It stores the value and status of a particular RDS group
+ * received.
+ */
+struct fmd_rds_group {
+ u16 block[NUM_OF_RDS_BLOCKS];
+ u8 status[NUM_OF_RDS_BLOCKS];
+};
+
+/**
+ * struct fmd_states_info - Main FM state info structure.
+ *
+ * @fmd_initialized: Flag indicating FM Driver is initialized or not
+ * @rx_freq_range: Receiver freq range
+ * @rx_volume: Receiver volume level
+ * @rx_antenna: Receiver Antenna
+ * @rx_seek_stop_level: RDS seek stop Level
+ * @rx_rds_on: Receiver RDS ON
+ * @rx_stereo_mode: Receiver Stereo mode
+ * @max_channels_to_scan: Maximum Number of channels to Scan.
+ * @tx_freq_range: Transmitter freq Range
+ * @tx_preemphasis: Transmitter Pre emphiasis level
+ * @tx_stereo_mode: Transmitter stero mode
+ * @tx_rds_on: Enable RDS
+ * @tx_pilot_dev: PIlot freq deviation
+ * @tx_rds_dev: RDS deviation
+ * @tx_strength: TX Signal Stregnth
+ * @irq_index: Index where last interrupt is added to Interrupt queue
+ * @interrupt_available_for_processing: Flag indicating if interrupt is
+ * available for processing or not.
+ * @interrupt_queue: Circular Queue to store the received interrupt from chip.
+ * @gocmd: Command which is in progress.
+ * @mode: Current Mode of FM Radio.
+ * @rds_group: Array of RDS group Buffer
+ * @callback: Callback registered by upper layers.
+ */
+struct fmd_states_info {
+ bool fmd_initialized;
+ u8 rx_freq_range;
+ u8 rx_volume;
+ u8 rx_antenna;
+ u16 rx_seek_stop_level;
+ bool rx_rds_on;
+ u8 rx_stereo_mode;
+ u8 tx_freq_range;
+ u8 tx_preemphasis;
+ bool tx_stereo_mode;
+ u8 max_channels_to_scan;
+ bool tx_rds_on;
+ u16 tx_pilot_dev;
+ u16 tx_rds_dev;
+ u16 tx_strength;
+ u8 irq_index;
+ bool interrupt_available_for_processing;
+ u16 interrupt_queue[MAX_COUNT_OF_IRQS];
+ enum fmd_gocmd gocmd;
+ enum fmd_mode mode;
+ struct fmd_rds_group rds_group[MAX_RDS_GROUPS];
+ fmd_radio_cb callback;
+};
+
+/**
+ * struct fmd_data - Main structure for FM data exchange.
+ *
+ * @cmd_id: Command Id of the command being exchanged.
+ * @num_parameters: Number of parameters
+ * @parameters: FM data parameters.
+ */
+struct fmd_data {
+ u32 cmd_id;
+ u16 num_parameters;
+ u8 *parameters;
+};
+
+static struct fmd_states_info fmd_state_info;
+static struct fmd_data fmd_data;
+static struct semaphore cmd_sem;
+static struct semaphore rds_sem;
+static struct semaphore interrupt_sem;
+static struct task_struct *rds_thread_task;
+static struct task_struct *irq_thread_task;
+static struct device *cg2900_fm_dev;
+static struct mutex write_mutex;
+static struct mutex send_cmd_mutex;
+static spinlock_t fmd_spinlock;
+static spinlock_t fmd_spinlock_read;
+
+/* Debug Level
+ * 1: Only Error Logs
+ * 2: Info Logs
+ * 3: Debug Logs
+ * 4: HCI Logs
+ */
+unsigned short cg2900_fm_debug_level = FM_ERROR_LOGS;
+EXPORT_SYMBOL(cg2900_fm_debug_level);
+
+static cg2900_fm_rds_cb cb_rds_func;
+static bool rds_thread_required;
+static bool irq_thread_required;
+
+static char event_name[FMD_EVENT_LAST_ELEMENT][MAX_NAME_SIZE] = {
+ "FMD_EVENT_OPERATION_COMPLETED",
+ "FMD_EVENT_ANTENNA_STATUS_CHANGED",
+ "FMD_EVENT_FREQUENCY_CHANGED",
+ "FMD_EVENT_SEEK_COMPLETED",
+ "FMD_EVENT_SCAN_BAND_COMPLETED",
+ "FMD_EVENT_BLOCK_SCAN_COMPLETED",
+ "FMD_EVENT_AF_UPDATE_SWITCH_COMPLETE",
+ "FMD_EVENT_MONO_STEREO_TRANSITION_COMPLETE",
+ "FMD_EVENT_SEEK_STOPPED",
+ "FMD_EVENT_GEN_POWERUP",
+ "FMD_EVENT_RDSGROUP_RCVD",
+};
+
+static char interrupt_name[MAX_COUNT_OF_IRQS][MAX_NAME_SIZE] = {
+ "IRPT_OPERATION_SUCCEEDED",
+ "IRPT_OPERATION_FAILED",
+ "IRPT_NOT_DEFINED",
+ "IRPT_RX_BUFFER_FULL_OR_TX_BUFFER_EMPTY",
+ "IRPT_RX_SIGNAL_QUALITY_LOW_OR_TX_MUTE_STATUS_CHANGED",
+ "IRPT_MONO_STEREO_TRANSITION",
+ "IRPT_RX_RDS_SYNC_FOUND_OR_TX_INPUT_OVERDRIVE",
+ "IRPT_RDS_SYNC_LOST",
+ "IRPT_PI_CODE_CHANGED",
+ "IRPT_REQUESTED_BLOCK_AVAILABLE",
+ "IRPT_NOT_DEFINED",
+ "IRPT_NOT_DEFINED",
+ "IRPT_NOT_DEFINED",
+ "IRPT_NOT_DEFINED",
+ "IRPT_WARM_BOOT_READY",
+ "IRPT_COLD_BOOT_READY",
+};
+
+
+static void fmd_hexdump(
+ char prompt,
+ u8 *buffer,
+ int num_bytes
+ );
+static u8 fmd_get_event(
+ enum fmd_gocmd gocmd
+ );
+static void fmd_event_name(
+ u8 event,
+ char *event_name
+ );
+static char *fmd_get_fm_function_name(
+ u8 fm_function
+ );
+static void fmd_interrupt_name(
+ u16 interrupt,
+ char *interrupt_name
+ );
+static void fmd_add_interrupt_to_queue(
+ u16 interrupt
+ );
+static void fmd_process_interrupt(
+ u16 interrupt
+ );
+static void fmd_callback(
+ u8 event,
+ bool event_successful
+ );
+static int fmd_rx_frequency_to_channel(
+ u32 freq,
+ u16 *channel
+ );
+static int fmd_rx_channel_to_frequency(
+ u16 channel_number,
+ u32 *frequency
+ );
+static int fmd_tx_frequency_to_channel(
+ u32 freq,
+ u16 *channel
+ );
+static int fmd_tx_channel_to_frequency(
+ u16 channel_number,
+ u32 *frequency
+ );
+static bool fmd_go_cmd_busy(void);
+static int fmd_send_cmd_and_read_resp(
+ const u16 cmd_id,
+ const u16 num_parameters,
+ const u16 *parameters,
+ u16 *resp_num_parameters,
+ u16 *resp_parameters
+ );
+static int fmd_send_cmd(
+ const u16 cmd_id,
+ const u16 num_parameters,
+ const u16 *parameters
+ );
+static int fmd_read_resp(
+ u16 *cmd_id,
+ u16 *num_parameters,
+ u16 *parameters
+ );
+static void fmd_process_fm_function(
+ u8 *packet_buffer
+ );
+static int fmd_write_file_block(
+ u32 file_block_id,
+ u8 *file_block,
+ u16 file_block_length
+ );
+static void fmd_receive_data(
+ u16 packet_length,
+ u8 *packet_buffer
+ );
+static int fmd_rds_thread(
+ void *data
+ );
+static void fmd_start_irq_thread(void);
+static void fmd_stop_irq_thread(void);
+static int fmd_irq_thread(
+ void *data
+ );
+static int fmd_send_packet(
+ u16 num_bytes,
+ u8 *send_buffer
+ );
+static int fmd_get_cmd_sem(void);
+static void fmd_set_cmd_sem(void);
+static void fmd_get_interrupt_sem(void);
+static void fmd_set_interrupt_sem(void);
+static bool fmd_driver_init(void);
+static void fmd_driver_exit(void);
+
+/* structure declared in time.h */
+struct timespec time_spec;
+
+
+/**
+ * fmd_hexdump() - Displays the HCI Data Bytes exchanged with FM Chip.
+ *
+ * @prompt: Prompt signifying the direction '<' for Rx '>' for Tx
+ * @buffer: Buffer to be displayed.
+ * @num_bytes: Number of bytes of the buffer.
+ */
+ static void fmd_hexdump(
+ char prompt,
+ u8 *buffer,
+ int num_bytes
+ )
+{
+ int i;
+ u8 tmp_val;
+ struct timespec time;
+ static u8 pkt_write[MAX_BUFFER_SIZE], *pkt_ptr;
+
+ getnstimeofday(&time);
+ sprintf(pkt_write, "\n[%08x:%08x] [%04x] %c",
+ (unsigned int)time.tv_sec,
+ (unsigned int)time.tv_nsec,
+ num_bytes, prompt);
+
+ pkt_ptr = pkt_write + strlen(pkt_write);
+ if (buffer == NULL)
+ return;
+
+ /* Copy the buffer only if the input buffer is not NULL */
+ for (i = 0; i < num_bytes; i++) {
+ *pkt_ptr++ = ' ';
+ tmp_val = buffer[i] >> 4;
+ *pkt_ptr++ = ASCVAL(tmp_val);
+ tmp_val = buffer[i] & 0x0F;
+ *pkt_ptr++ = ASCVAL(tmp_val);
+ if (i > 20) {
+ /* Print only 20 bytes at max */
+ break;
+ }
+ }
+ *pkt_ptr++ = '\0';
+ FM_HEX_REPORT("%s", pkt_write);
+}
+
+/**
+ * fmd_get_event() - Returns the Event based on FM Driver State.
+ *
+ * @gocmd: Pending FM Command
+ *
+ * Returns: Corresponding Event
+ */
+static u8 fmd_get_event(
+ enum fmd_gocmd gocmd
+ )
+{
+ u8 event = FMD_EVENT_OPERATION_COMPLETED;
+ switch (gocmd) {
+ case FMD_STATE_ANTENNA:
+ event = FMD_EVENT_ANTENNA_STATUS_CHANGED;
+ break;
+ case FMD_STATE_FREQUENCY:
+ event = FMD_EVENT_FREQUENCY_CHANGED;
+ break;
+ case FMD_STATE_SEEK:
+ event = FMD_EVENT_SEEK_COMPLETED;
+ break;
+ case FMD_STATE_SCAN_BAND:
+ event = FMD_EVENT_SCAN_BAND_COMPLETED;
+ break;
+ case FMD_STATE_BLOCK_SCAN:
+ event = FMD_EVENT_BLOCK_SCAN_COMPLETED;
+ break;
+ case FMD_STATE_AF_UPDATE:
+ /* Drop Down */
+ case FMD_STATE_AF_SWITCH:
+ event = FMD_EVENT_AF_UPDATE_SWITCH_COMPLETE;
+ break;
+ case FMD_STATE_SEEK_STOP:
+ event = FMD_EVENT_SEEK_STOPPED;
+ break;
+ default:
+ event = FMD_EVENT_OPERATION_COMPLETED;
+ break;
+ }
+ return event;
+}
+
+/**
+ * fmd_event_name() - Converts the event to a displayable string.
+ *
+ * @event: Event that has occurred.
+ * @eventname: (out) Buffer to store event name.
+ */
+static void fmd_event_name(
+ u8 event,
+ char *eventname
+ )
+{
+ if (eventname == NULL) {
+ FM_ERR_REPORT("fmd_event_name: Output Buffer is NULL");
+ return;
+ }
+ if (event < FMD_EVENT_LAST_ELEMENT)
+ strcpy(eventname, event_name[event]);
+ else
+ strcpy(eventname, "FMD_EVENT_UNKNOWN");
+}
+
+/**
+ * fmd_get_fm_function_name() - Returns the FM Fucntion name.
+ *
+ * @fm_function: Function whose name is to be retrieved.
+ *
+ * Returns FM Function Name.
+ */
+static char *fmd_get_fm_function_name(
+ u8 fm_function
+ )
+{
+ switch (fm_function) {
+ case FM_FUNCTION_ENABLE:
+ return "FM_FUNCTION_ENABLE";
+ break;
+ case FM_FUNCTION_DISABLE:
+ return "FM_FUNCTION_DISABLE";
+ break;
+ case FM_FUNCTION_RESET:
+ return "FM_FUNCTION_RESET";
+ break;
+ case FM_FUNCTION_WRITE_COMMAND:
+ return "FM_FUNCTION_WRITE_COMMAND";
+ break;
+ case FM_FUNCTION_SET_INT_MASK_ALL:
+ return "FM_FUNCTION_SET_INT_MASK_ALL";
+ break;
+ case FM_FUNCTION_GET_INT_MASK_ALL:
+ return "FM_FUNCTION_GET_INT_MASK_ALL";
+ break;
+ case FM_FUNCTION_SET_INT_MASK:
+ return "FM_FUNCTION_SET_INT_MASK";
+ break;
+ case FM_FUNCTION_GET_INT_MASK:
+ return "FM_FUNCTION_GET_INT_MASK";
+ break;
+ case FM_FUNCTION_FIRMWARE_DOWNLOAD:
+ return "FM_FUNCTION_FIRMWARE_DOWNLOAD";
+ break;
+ default:
+ return "FM_FUNCTION_UNKNOWN";
+ break;
+ }
+}
+
+/**
+ * fmd_interrupt_name() - Converts the interrupt to a displayable string.
+ *
+ * @interrupt: interrupt received from FM Chip
+ * @interruptname: (out) Buffer to store interrupt name.
+ */
+static void fmd_interrupt_name(
+ u16 interrupt,
+ char *interruptname
+ )
+{
+ int index;
+
+ if (interruptname == NULL) {
+ FM_ERR_REPORT("fmd_interrupt_name: Output Buffer is NULL!!!");
+ return;
+ }
+ /* Convert Interrupt to Bit */
+ for (index = 0; index < MAX_COUNT_OF_IRQS; index++) {
+ if (interrupt & (1 << index)) {
+ /* Match found, break the loop */
+ break;
+ }
+ }
+ if (index < MAX_COUNT_OF_IRQS)
+ strcpy(interruptname, interrupt_name[index]);
+ else
+ strcpy(interruptname, "IRPT_UNKNOWN");
+}
+
+/**
+ * fmd_add_interrupt_to_queue() - Add interrupt to IRQ Queue.
+ *
+ * @interrupt: interrupt received from FM Chip
+ */
+static void fmd_add_interrupt_to_queue(
+ u16 interrupt
+ )
+{
+ FM_DEBUG_REPORT("fmd_add_interrupt_to_queue : "
+ "Interrupt Received = %04x", (u16) interrupt);
+
+ /* Reset the index if it reaches the array limit */
+ if (fmd_state_info.irq_index > MAX_COUNT_OF_IRQS - 1) {
+ spin_lock(&fmd_spinlock);
+ fmd_state_info.irq_index = 0;
+ spin_unlock(&fmd_spinlock);
+ }
+
+ spin_lock(&fmd_spinlock);
+ fmd_state_info.interrupt_queue[fmd_state_info.irq_index] = interrupt;
+ fmd_state_info.irq_index++;
+ spin_unlock(&fmd_spinlock);
+ if (!fmd_state_info.interrupt_available_for_processing) {
+ spin_lock(&fmd_spinlock);
+ fmd_state_info.interrupt_available_for_processing = true;
+ spin_unlock(&fmd_spinlock);
+ fmd_set_interrupt_sem();
+ }
+}
+
+/**
+ * fmd_process_interrupt() - Processes the Interrupt.
+ *
+ * This function processes the interrupt received from FM Chip
+ * and calls the corresponding callback registered by upper layers with
+ * proper parameters.
+ * @interrupt: interrupt received from FM Chip
+ */
+static void fmd_process_interrupt(
+ u16 interrupt
+ )
+{
+ char irpt_name[MAX_NAME_SIZE];
+
+ fmd_interrupt_name(interrupt, irpt_name);
+ FM_DEBUG_REPORT("%s", irpt_name);
+ if ((interrupt & IRPT_OPERATION_SUCCEEDED) |
+ (interrupt & IRPT_OPERATION_FAILED)) {
+ bool event_status = (interrupt & IRPT_OPERATION_SUCCEEDED);
+ u8 event = fmd_get_event(fmd_state_info.gocmd);
+
+ switch (fmd_state_info.gocmd) {
+ case FMD_STATE_MODE:
+ /* Mode has been changed. */
+ case FMD_STATE_MUTE:
+ /* FM radio is Muter or Unmuted */
+ case FMD_STATE_PA:
+ /* Power Amplifier has been enabled/disabled */
+ case FMD_STATE_PA_LEVEL:
+ /* Power Amplifier Level has been changed. */
+ case FMD_STATE_SELECT_REF_CLK:
+ /* Reference Clock has been selected. */
+ case FMD_STATE_SET_REF_CLK_PLL:
+ /* Reference Clock frequency has been changed. */
+ case FMD_STATE_TX_SET_CTRL:
+ /* Tx Control has been set. */
+ case FMD_STATE_TX_SET_THRSHLD:
+ /* Tx Threashold has been set. */
+ /* Set State to None and set the waiting semaphore. */
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ fmd_set_cmd_sem();
+ break;
+ case FMD_STATE_ANTENNA:
+ /* Antenna status has been changed. */
+ case FMD_STATE_SEEK_STOP:
+ /* Band scan, seek or block scan has completed. */
+ case FMD_STATE_AF_UPDATE:
+ /* AF Update has completed. */
+ case FMD_STATE_AF_SWITCH:
+ /* AF Switch has completed. */
+ case FMD_STATE_FREQUENCY:
+ /* Frequency has been changed. */
+ /*
+ * Set State to None, set the waiting semaphore,
+ * and inform upper layer.
+ */
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ fmd_set_cmd_sem();
+ fmd_callback(
+ event,
+ event_status);
+ break;
+ case FMD_STATE_SEEK:
+ /* Seek has completed. */
+ case FMD_STATE_SCAN_BAND:
+ /* Band scan has completed. */
+ case FMD_STATE_BLOCK_SCAN:
+ /* Block scan has completed. */
+ /*
+ * Set State to None. No need to set the
+ * semaphore since this is an asyncronous event.
+ */
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ /* Inform Upper layer. */
+ fmd_callback(event, event_status);
+ break;
+ default:
+ /* Do Nothing */
+ FM_ERR_REPORT("Default %s case of "\
+ "interrupt processing", event_status ? \
+ "Success" : "Failed");
+ break;
+ }
+ }
+
+ if (interrupt & IRPT_RX_BUFFERFULL_TX_BUFFEREMPTY) {
+ /*
+ * RDS Buffer Full or RDS Buffer Empty
+ * interrupt received from chip, indicating
+ * that RDS data is available if chip
+ * is in Rx mode or RDS data can be send
+ * to chip in case of Tx mode. Inform the
+ * upper layers about this interrupt.
+ */
+ fmd_callback(
+ FMD_EVENT_RDSGROUP_RCVD,
+ true);
+ }
+
+ if (interrupt & IRPT_RX_MONO_STEREO_TRANSITION) {
+ /*
+ * Mono Stereo Transition interrupt
+ * received from chip, inform the
+ * upper layers about it.
+ */
+ fmd_callback(
+ FMD_EVENT_MONO_STEREO_TRANSITION_COMPLETE,
+ true);
+ }
+
+ if ((interrupt & IRPT_COLD_BOOT_READY) |
+ (interrupt & IRPT_WARM_BOOT_READY)) {
+ switch (fmd_state_info.gocmd) {
+ case FMD_STATE_GEN_POWERUP:
+ /*
+ * Cold Boot/ Warm Boot Interrupt received from
+ * chip, indicating transition from
+ * power off/standby state to active state.
+ * Inform the upper layers about it.
+ */
+ fmd_callback(
+ FMD_EVENT_GEN_POWERUP,
+ true);
+ /* Set State to None and set the waiting semaphore. */
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ fmd_set_cmd_sem();
+ break;
+ default:
+ /* Do Nothing */
+ break;
+ }
+ }
+}
+
+/**
+ * fmd_callback() - Callback function for upper layers.
+ *
+ * Callback function that calls the registered callback of upper
+ * layers with proper parameters.
+ * @event: event for which the callback function was called
+ * from FM Driver.
+ * @event_successful: Signifying whether the event is called from FM
+ * Driver on receiving irpt_Operation_Succeeded or irpt_Operation_Failed.
+ */
+static void fmd_callback(
+ u8 event,
+ bool event_successful
+ )
+{
+ char event_name_string[MAX_NAME_SIZE];
+
+ fmd_event_name(event, event_name_string);
+
+ FM_DEBUG_REPORT("%s %x, %d", event_name_string,
+ (unsigned int)event , (unsigned int)event_successful);
+
+ if (fmd_state_info.callback)
+ fmd_state_info.callback(
+ event,
+ event_successful);
+}
+
+/**
+ * fmd_rx_frequency_to_channel() - Converts Rx frequency to channel number.
+ *
+ * Converts the Frequency in kHz to corresponding Channel number.
+ * This is used for FM Rx.
+ * @freq: Frequency in kHz.
+ * @channel: Channel Number corresponding to the given Frequency.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameters are not valid.
+ *
+ */
+static int fmd_rx_frequency_to_channel(
+ u32 freq,
+ u16 *channel
+ )
+{
+ u8 range;
+ int result;
+ u32 min_freq;
+ u32 max_freq;
+
+ if (channel == NULL) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ result = fmd_get_freq_range(
+ &range);
+
+ if (result != 0)
+ goto error;
+
+ result = fmd_get_freq_range_properties(
+ range,
+ &min_freq,
+ &max_freq);
+
+ if (result != 0)
+ goto error;
+
+ if (freq > max_freq)
+ freq = max_freq;
+ else if (freq < min_freq)
+ freq = min_freq;
+
+ /*
+ * Frequency in kHz needs to be divided with 50 kHz to get
+ * channel number for all FM Bands
+ */
+ *channel = (u16)((freq - min_freq) / CHANNEL_FREQ_CONVERTER_MHZ);
+ result = 0;
+error:
+ return result;
+}
+
+/**
+ * fmd_rx_channel_to_frequency() - Converts Rx Channel number to frequency.
+ *
+ * Converts the Channel Number to corresponding Frequency in kHz.
+ * This is used for FM Rx.
+ * @channel_number: Channel Number to be converted.
+ * @frequency: Frequency corresponding to the corresponding channel in kHz.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameters are not valid.
+ *
+ */
+static int fmd_rx_channel_to_frequency(
+ u16 channel_number,
+ u32 *frequency
+ )
+{
+ u8 range;
+ int result;
+ u32 min_freq;
+ u32 max_freq;
+
+ if (frequency == NULL) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ result = fmd_get_freq_range(
+ &range);
+
+ if (result != 0)
+ goto error;
+
+ result = fmd_get_freq_range_properties(
+ range,
+ &min_freq,
+ &max_freq);
+
+ if (result != 0)
+ goto error;
+
+ /*
+ * Channel Number needs to be multiplied with 50 kHz to get
+ * frequency in kHz for all FM Bands
+ */
+ *frequency = min_freq + (channel_number * CHANNEL_FREQ_CONVERTER_MHZ);
+
+ if (*frequency > max_freq)
+ *frequency = max_freq;
+ else if (*frequency < min_freq)
+ *frequency = min_freq;
+
+error:
+ return result;
+}
+
+/**
+ * fmd_tx_frequency_to_channel() - Converts Tx frequency to channel number.
+ *
+ * Converts the Frequency in kHz to corresponding Channel number.
+ * This is used for FM Tx.
+ * @freq: Frequency in kHz.
+ * @channel: (out)Channel Number corresponding to the given Frequency.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameters are not valid.
+ */
+static int fmd_tx_frequency_to_channel(
+ u32 freq,
+ u16 *channel
+ )
+{
+ u8 range;
+ int result;
+ u32 min_freq;
+ u32 max_freq;
+
+ if (channel == NULL) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ result = fmd_tx_get_freq_range(
+ &range);
+
+ if (result != 0)
+ goto error;
+
+ result = fmd_get_freq_range_properties(
+ range,
+ &min_freq,
+ &max_freq);
+
+ if (result != 0)
+ goto error;
+
+ if (freq > max_freq)
+ freq = max_freq;
+ else if (freq < min_freq)
+ freq = min_freq;
+
+ /*
+ * Frequency in kHz needs to be divided with 50 kHz to get
+ * channel number for all FM Bands
+ */
+ *channel = (u16)((freq - min_freq) / CHANNEL_FREQ_CONVERTER_MHZ);
+ result = 0;
+error:
+ return result;
+}
+
+/**
+ * fmd_tx_channel_to_frequency() - Converts Tx Channel number to frequency.
+ *
+ * Converts the Channel Number to corresponding Frequency in kHz.
+ * This is used for FM Tx.
+ * @channel_number: Channel Number to be converted.
+ * @frequency: Frequency corresponding to the corresponding channel
+ * in kHz.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameters are not valid.
+ */
+static int fmd_tx_channel_to_frequency(
+ u16 channel_number,
+ u32 *frequency
+ )
+{
+ u8 range;
+ int result;
+ u32 min_freq;
+ u32 max_freq;
+
+ if (frequency == NULL) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ result = fmd_tx_get_freq_range(
+ &range);
+
+ if (result != 0)
+ goto error;
+
+ result = fmd_get_freq_range_properties(
+ range,
+ &min_freq,
+ &max_freq);
+
+ if (result != 0)
+ goto error;
+
+ /*
+ * Channel Number needs to be multiplied with 50 kHz to get
+ * frequency in kHz for all FM Bands
+ */
+ *frequency = min_freq + (channel_number * CHANNEL_FREQ_CONVERTER_MHZ);
+
+ if (*frequency > max_freq)
+ *frequency = max_freq;
+ else if (*frequency < min_freq)
+ *frequency = min_freq;
+
+ result = 0;
+error:
+ return result;
+}
+
+/**
+ * fmd_go_cmd_busy() - Function to check if FM Driver is busy or idle
+ *
+ * Returns:
+ * false if FM Driver is Idle
+ * true otherwise
+ */
+static bool fmd_go_cmd_busy(void)
+{
+ return (fmd_state_info.gocmd != FMD_STATE_NONE);
+}
+
+/**
+ * fmd_read_cb() - Handle Received Data
+ *
+ * This function handles data received from connectivity protocol driver.
+ * @dev: Device receiving data.
+ * @skb: Buffer with data coming form device.
+ */
+static void fmd_read_cb(
+ struct cg2900_user_data *dev,
+ struct sk_buff *skb
+ )
+{
+ FM_INFO_REPORT("fmd_read_cb");
+
+ if (skb->data == NULL || skb->len == 0)
+ return;
+
+ spin_lock(&fmd_spinlock_read);
+ CG2900_HEX_READ_PACKET_DUMP;
+ /*
+ * The first byte is length of bytes following bytes
+ * Rest of the bytes are the actual data
+ */
+ fmd_receive_data(
+ FM_GET_PKT_LEN(skb->data),
+ FM_GET_RSP_PKT_ADDR(skb->data));
+
+ kfree_skb(skb);
+ spin_unlock(&fmd_spinlock_read);
+}
+
+/**
+ * fmd_receive_data() - Processes the FM data received from device.
+ *
+ * @packet_length: Length of received Data Packet
+ * @packet_buffer: Received Data buffer.
+ */
+static void fmd_receive_data(
+ u16 packet_length,
+ u8 *packet_buffer
+ )
+{
+ if (packet_buffer == NULL) {
+ FM_ERR_REPORT("fmd_receive_data: Buffer = NULL");
+ return;
+ }
+
+ if (packet_length == FM_PG1_INTERRUPT_EVENT_LEN &&
+ packet_buffer[0] == FM_CATENA_OPCODE &&
+ packet_buffer[1] == FM_EVENT_ID) {
+ /* PG 1.0 interrupt Handling */
+ u16 interrupt = FM_GET_PGI_INTERRUPT(packet_buffer);
+ FM_DEBUG_REPORT("interrupt = %04x",
+ (unsigned int)interrupt);
+ fmd_add_interrupt_to_queue(interrupt);
+ } else if (packet_length == FM_PG2_INTERRUPT_EVENT_LEN &&
+ packet_buffer[0] == FM_SUCCESS_STATUS &&
+ packet_buffer[1] == FM_CATENA_OPCODE &&
+ packet_buffer[2] == FM_EVENT &&
+ packet_buffer[3] == FM_EVENT_ID) {
+ /* PG 2.0 interrupt Handling */
+ u16 interrupt = FM_GET_PG2_INTERRUPT(packet_buffer);
+ FM_DEBUG_REPORT("interrupt = %04x",
+ (unsigned int)interrupt);
+ fmd_add_interrupt_to_queue(interrupt);
+ } else if (packet_buffer[0] == FM_SUCCESS_STATUS &&
+ packet_buffer[1] == FM_CATENA_OPCODE &&
+ packet_buffer[2] == FM_WRITE) {
+ /* Command Complete or RDS Data Handling */
+ u8 fm_status = FM_GET_STATUS(packet_buffer);;
+ switch (fm_status) {
+ case FM_CMD_STATUS_CMD_SUCCESS:
+ fmd_process_fm_function(
+ FM_GET_FUNCTION_ADDR(packet_buffer));
+ break;
+ case FM_CMD_STATUS_HCI_ERR_HW_FAILURE:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_HCI_ERR_HW_FAILURE");
+ break;
+ case FM_CMD_STATUS_HCI_ERR_INVALID_PARAMETERS:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_HCI_ERR_INVALID_PARAMETERS");
+ break;
+ case FM_CMD_STATUS_IP_UNINIT:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_IP_UNINIT");
+ break;
+ case FM_CMD_STATUS_HCI_ERR_UNSPECIFIED_ERROR:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_HCI_ERR_UNSPECIFIED_ERROR");
+ break;
+ case FM_CMD_STATUS_HCI_ERR_CMD_DISALLOWED:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_HCI_ERR_CMD_DISALLOWED");
+ break;
+ case FM_CMD_STATUS_WRONG_SEQ_NUM:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_WRONG_SEQ_NUM");
+ break;
+ case FM_CMD_STATUS_UNKNOWN_FILE_TYPE:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_UNKNOWN_FILE_TYPE");
+ break;
+ case FM_CMD_STATUS_FILE_VERSION_MISMATCH:
+ FM_DEBUG_REPORT(
+ "FM_CMD_STATUS_FILE_VERSION_MISMATCH");
+ break;
+ default:
+ FM_DEBUG_REPORT(
+ "Unknown Status = %02x", fm_status);
+ break;
+ }
+ }
+}
+
+/**
+ * fmd_reset_cb() - Reset callback fuction.
+ *
+ * @dev: CPD device reseting.
+ */
+static void fmd_reset_cb(struct cg2900_user_data *dev)
+{
+ FM_INFO_REPORT("fmd_reset_cb: Device Reset");
+ spin_lock(&fmd_spinlock_read);
+ cg2900_handle_device_reset();
+ spin_unlock(&fmd_spinlock_read);
+}
+
+/**
+ * fmd_rds_thread() - Thread for receiving RDS data from Chip.
+ *
+ * @data: Data beng passed as parameter on starting the thread.
+ */
+static int fmd_rds_thread(
+ void *data
+ )
+{
+ FM_INFO_REPORT("fmd_rds_thread Created Successfully");
+ while (rds_thread_required) {
+ if (cb_rds_func)
+ cb_rds_func();
+ /* Give 100 ms for context switching */
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+ }
+ /* Always signal the rds_sem semaphore before exiting */
+ fmd_set_rds_sem();
+ FM_DEBUG_REPORT("fmd_rds_thread Exiting!!!");
+ return 0;
+}
+
+/**
+ * fmd_start_irq_thread() - Function for starting Interrupt Thread.
+ */
+static void fmd_start_irq_thread(void)
+{
+ FM_INFO_REPORT("fmd_start_irq_thread");
+ irq_thread_task = kthread_create(fmd_irq_thread, NULL, "irq_thread");
+ if (IS_ERR(irq_thread_task)) {
+ FM_ERR_REPORT("fmd_start_irq_thread: "
+ "Unable to Create irq_thread");
+ irq_thread_task = NULL;
+ return;
+ }
+ wake_up_process(irq_thread_task);
+}
+
+/**
+ * fmd_stop_irq_thread() - Function for stopping Interrupt Thread.
+ */
+static void fmd_stop_irq_thread(void)
+{
+ FM_INFO_REPORT("fmd_stop_irq_thread");
+ kthread_stop(irq_thread_task);
+ irq_thread_task = NULL;
+ FM_DEBUG_REPORT("-fmd_stop_irq_thread");
+}
+
+/**
+ * fmd_irq_thread() - Thread for processing Interrupts received from Chip.
+ *
+ * @data: Data being passed as parameter on starting the thread.
+ */
+
+static int fmd_irq_thread(
+ void *data
+ )
+{
+ int index;
+
+ FM_INFO_REPORT("fmd_irq_thread Created Successfully");
+
+ while (irq_thread_required) {
+ if (!fmd_state_info.interrupt_available_for_processing) {
+ FM_DEBUG_REPORT("fmd_irq_thread: Waiting on irq sem "
+ "interrupt_available_for_processing = %d "
+ "fmd_state_info.fmd_initialized = %d",
+ fmd_state_info.interrupt_available_for_processing,
+ fmd_state_info.fmd_initialized);
+ fmd_get_interrupt_sem();
+ FM_DEBUG_REPORT("fmd_irq_thread: Waiting on irq sem "
+ "interrupt_available_for_processing = %d "
+ "fmd_state_info.fmd_initialized = %d",
+ fmd_state_info.interrupt_available_for_processing,
+ fmd_state_info.fmd_initialized);
+ }
+ index = 0;
+
+ if (fmd_state_info.interrupt_available_for_processing) {
+ while (index < MAX_COUNT_OF_IRQS) {
+ if (fmd_state_info.interrupt_queue[index]
+ != IRPT_INVALID) {
+ FM_DEBUG_REPORT("fmd_process_interrupt "
+ "Interrupt = %04x",
+ fmd_state_info.
+ interrupt_queue[index]);
+ fmd_process_interrupt(
+ fmd_state_info.interrupt_queue[index]);
+ fmd_state_info.interrupt_queue[index]
+ = IRPT_INVALID;
+ }
+ index++;
+ }
+ }
+ fmd_state_info.interrupt_available_for_processing = false;
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+ }
+ FM_DEBUG_REPORT("fmd_irq_thread Exiting!!!");
+ return 0;
+}
+
+/**
+ * fmd_send_packet() - Sends the FM HCI Packet to the CG2900 Protocol Driver.
+ *
+ * @num_bytes: Number of bytes of Data to be sent including
+ * Channel Identifier (08)
+ * @send_buffer: Buffer containing the Data to be sent to Chip.
+ *
+ * Returns:
+ * 0, If packet was sent successfully to
+ * CG2900 Protocol Driver, otherwise the corresponding error.
+ * -EINVAL If parameters are not valid.
+ * -EIO If there is an Input/Output Error.
+ */
+static int fmd_send_packet(
+ u16 num_bytes,
+ u8 *send_buffer
+ )
+{
+ int err;
+ struct sk_buff *skb;
+ struct cg2900_user_data *pf_data;
+
+ FM_INFO_REPORT("fmd_send_packet");
+
+ if (send_buffer == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!cg2900_fm_dev) {
+ FM_ERR_REPORT("fmd_send_packet: No FM device registered");
+ err = -EIO;
+ goto error;
+ }
+
+ pf_data = dev_get_platdata(cg2900_fm_dev);
+ if (!pf_data->opened) {
+ FM_ERR_REPORT("fmd_send_packet: FM channel is not opened");
+ err = -EIO;
+ goto error;
+ }
+
+ mutex_lock(&write_mutex);
+ CG2900_HEX_WRITE_PACKET_DUMP;
+
+ skb = pf_data->alloc_skb(num_bytes, GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("fmd_send_packet:Couldn't " \
+ "allocate sk_buff with length %d", num_bytes);
+ err = -EIO;
+ goto error;
+ }
+
+ /*
+ * Copy the buffer removing the FM Header as this
+ * would be done by Protocol Driver
+ */
+ memcpy(skb_put(skb, num_bytes), send_buffer, num_bytes);
+
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ FM_ERR_REPORT("fmd_send_packet: "
+ "Failed to send(%d) bytes using "
+ "cg2900_write, err = %d",
+ num_bytes, err);
+ kfree(skb);
+ err = -EIO;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ mutex_unlock(&write_mutex);
+ FM_DEBUG_REPORT("fmd_send_packet returning %d", err);
+ return err;
+}
+
+/**
+ * fmd_get_cmd_sem() - Block on Command Semaphore.
+ *
+ * This is required to ensure Flow Control in FM Driver.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ETIME if timeout occurs.
+ */
+static int fmd_get_cmd_sem(void)
+{
+ int ret_val;
+
+ FM_INFO_REPORT("fmd_get_cmd_sem");
+
+ ret_val = down_timeout(&cmd_sem,
+ msecs_to_jiffies(MAX_RESPONSE_TIME_IN_MS));
+
+ if (ret_val)
+ FM_ERR_REPORT("fmd_get_cmd_sem: down_timeout "
+ "returned error = %d", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * fmd_set_cmd_sem() - Unblock on Command Semaphore.
+ *
+ * This is required to ensure Flow Control in FM Driver.
+ */
+static void fmd_set_cmd_sem(void)
+{
+ FM_DEBUG_REPORT("fmd_set_cmd_sem");
+
+ up(&cmd_sem);
+}
+
+/**
+ * fmd_get_interrupt_sem() - Block on Interrupt Semaphore.
+ *
+ * Till Interrupt is received, Interrupt Task is blocked.
+ */
+static void fmd_get_interrupt_sem(void)
+{
+ int ret_val;
+
+ FM_DEBUG_REPORT("fmd_get_interrupt_sem");
+
+ ret_val = down_killable(&interrupt_sem);
+
+ if (ret_val)
+ FM_ERR_REPORT("fmd_get_interrupt_sem: down_killable "
+ "returned error = %d", ret_val);
+}
+
+/**
+ * fmd_set_interrupt_sem() - Unblock on Interrupt Semaphore.
+ *
+ * on receiving Interrupt, Interrupt Task is un-blocked.
+ */
+static void fmd_set_interrupt_sem(void)
+{
+ FM_DEBUG_REPORT("fmd_set_interrupt_sem");
+ up(&interrupt_sem);
+}
+
+/**
+ * fmd_driver_init()- Initializes the Mutex, Semaphore, etc for FM Driver.
+ *
+ * It also registers FM Driver with the Protocol Driver.
+ *
+ * Returns:
+ * true if initialization is successful
+ * false if initiialization fails.
+ */
+static bool fmd_driver_init(void)
+{
+ bool ret_val;
+ struct cg2900_rev_data rev_data;
+ struct cg2900_user_data *pf_data;
+ int err;
+
+ FM_INFO_REPORT("fmd_driver_init");
+
+ if (!cg2900_fm_dev) {
+ FM_ERR_REPORT("No device registered");
+ ret_val = false;
+ goto error;
+ }
+
+ /* Initialize the semaphores */
+ sema_init(&cmd_sem, 0);
+ sema_init(&rds_sem, 0);
+ sema_init(&interrupt_sem, 0);
+ cb_rds_func = NULL;
+ rds_thread_required = false;
+ irq_thread_required = true;
+
+ pf_data = dev_get_platdata(cg2900_fm_dev);
+
+ /* Create Mutex For Reading and Writing */
+ spin_lock_init(&fmd_spinlock_read);
+ mutex_init(&write_mutex);
+ mutex_init(&send_cmd_mutex);
+ spin_lock_init(&fmd_spinlock);
+ fmd_start_irq_thread();
+
+ /* Open the FM channel */
+ err = pf_data->open(pf_data);
+ if (err) {
+ FM_ERR_REPORT("fmd_driver_init: "
+ "Couldn't open FM channel. Either chip is not connected"
+ " or Protocol Driver is not initialized");
+ ret_val = false;
+ goto error;
+ }
+
+ if (!pf_data->get_local_revision(pf_data, &rev_data)) {
+ FM_DEBUG_REPORT("No revision data available");
+ ret_val = false;
+ goto error;
+ }
+
+ FM_DEBUG_REPORT("Read revision data revision %04x "
+ "sub_version %04x",
+ rev_data.revision, rev_data.sub_version);
+ cg2900_fm_set_chip_version(rev_data.revision, rev_data.sub_version);
+ ret_val = true;
+
+error:
+ FM_DEBUG_REPORT("fmd_driver_init: Returning %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * fmd_driver_exit() - Deinitializes the mutex, semaphores, etc.
+ *
+ * It also deregisters FM Driver with the Protocol Driver.
+ *
+ */
+static void fmd_driver_exit(void)
+{
+ struct cg2900_user_data *pf_data;
+
+ FM_INFO_REPORT("fmd_driver_exit");
+ irq_thread_required = false;
+ mutex_destroy(&write_mutex);
+ mutex_destroy(&send_cmd_mutex);
+ fmd_stop_irq_thread();
+ /* Close the FM channel */
+ pf_data = dev_get_platdata(cg2900_fm_dev);
+ if (pf_data->opened)
+ pf_data->close(pf_data);
+}
+
+/**
+ * fmd_send_cmd_and_read_resp() - Send command and read response.
+ *
+ * This function sends the HCI Command to Protocol Driver and
+ * Reads back the Response Packet.
+ * @cmd_id: Command Id to be sent to FM Chip.
+ * @num_parameters: Number of parameters of the command sent.
+ * @parameters: Buffer containing the Buffer to be sent.
+ * @resp_num_parameters: (out) Number of paramters of the response packet.
+ * @resp_parameters: (out) Buffer of the response packet.
+ *
+ * Returns:
+ * 0: If the command is sent successfully and the
+ * response received is also correct.
+ * -EINVAL: If the received response is not correct.
+ * -EIO: If there is an input/output error.
+ * -EINVAL: If parameters are not valid.
+ */
+static int fmd_send_cmd_and_read_resp(
+ const u16 cmd_id,
+ const u16 num_parameters,
+ const u16 *parameters,
+ u16 *resp_num_parameters,
+ u16 *resp_parameters
+ )
+{
+ int result;
+ u16 read_cmd_id = CMD_ID_NONE;
+
+ FM_INFO_REPORT("fmd_send_cmd_and_read_resp");
+
+ mutex_lock(&send_cmd_mutex);
+ result = fmd_send_cmd(
+ cmd_id,
+ num_parameters,
+ parameters);
+
+ if (result != 0)
+ goto error;
+
+ result = fmd_read_resp(
+ &read_cmd_id,
+ resp_num_parameters,
+ resp_parameters);
+
+ if (result != 0)
+ goto error;
+
+ /*
+ * Check that the response belongs to the sent command
+ */
+ if (read_cmd_id != cmd_id)
+ result = -EINVAL;
+
+error:
+ mutex_unlock(&send_cmd_mutex);
+ FM_DEBUG_REPORT("fmd_send_cmd_and_read_resp: "
+ "returning %d", result);
+ return result;
+}
+
+/**
+ * fmd_send_cmd() - This function sends the HCI Command
+ * to Protocol Driver.
+ *
+ * @cmd_id: Command Id to be sent to FM Chip.
+ * @num_parameters: Number of parameters of the command sent.
+ * @parameters: Buffer containing the Buffer to be sent.
+ *
+ * Returns:
+ * 0: If the command is sent successfully to Lower Layers.
+ * -EIO: If there is an input/output error.
+ * -EINVAL: If parameters are not valid.
+ */
+static int fmd_send_cmd(
+ const u16 cmd_id ,
+ const u16 num_parameters,
+ const u16 *parameters
+ )
+{
+ /*
+ * Total Length includes 6 bytes HCI Header
+ * and remaining bytes depending on number of paramters.
+ */
+ u16 total_length = num_parameters * sizeof(u16) + FM_HCI_CMD_HEADER_LEN;
+ /*
+ * Parameter Length includes 5 bytes HCI Header
+ * and remaining bytes depending on number of paramters.
+ */
+ u16 param_length = num_parameters * sizeof(u16) + FM_HCI_CMD_PARAM_LEN;
+ u8 *fm_data = kmalloc(total_length, GFP_KERNEL);
+ int err = -EINVAL;
+
+ FM_INFO_REPORT("fmd_send_cmd");
+
+ if (fm_data == NULL) {
+ err = -EIO;
+ goto error;
+ }
+
+ if (num_parameters && parameters == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* HCI encapsulation */
+ fm_data[0] = param_length;
+ fm_data[1] = FM_CATENA_OPCODE;
+ fm_data[2] = FM_WRITE;
+ fm_data[3] = FM_FUNCTION_WRITE_COMMAND;
+ fm_data[4] = FM_CMD_GET_LSB(cmd_id, num_parameters);
+ fm_data[5] = FM_CMD_GET_MSB(cmd_id);
+
+ memcpy(
+ (fm_data + FM_HCI_CMD_HEADER_LEN),
+ (void *)parameters,
+ num_parameters * sizeof(u16));
+
+ /* Send the Packet */
+ err = fmd_send_packet(total_length , fm_data);
+
+error:
+ kfree(fm_data);
+ FM_DEBUG_REPORT("fmd_send_cmd: "
+ "returning %d", err);
+ return err;
+}
+
+/**
+ * fmd_read_resp() - This function reads the response packet of the previous
+ * command sent to FM Chip and copies it to the buffer provided as parameter.
+ *
+ * @cmd_id: (out) Command Id received from FM Chip.
+ * @num_parameters: (out) Number of paramters of the response packet.
+ * @parameters: (out) Buffer of the response packet.
+ *
+ * Returns:
+ * 0: If the response buffer is copied successfully.
+ * -EINVAL: If parameters are not valid.
+ * -ETIME: Otherwise
+ */
+static int fmd_read_resp(
+ u16 *cmd_id,
+ u16 *num_parameters,
+ u16 *parameters
+ )
+{
+ int err;
+ FM_INFO_REPORT("fmd_read_resp");
+
+ /* Wait till response of the command is received */
+ if (fmd_get_cmd_sem()) {
+ err = -ETIME;
+ goto error;
+ }
+
+ /* Check if the parameters are valid */
+ if (cmd_id == NULL || (fmd_data.num_parameters &&
+ (num_parameters == NULL || parameters == NULL))) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Fill the arguments */
+ *cmd_id = fmd_data.cmd_id;
+ if (fmd_data.num_parameters) {
+ *num_parameters = fmd_data.num_parameters;
+ memcpy(
+ parameters,
+ fmd_data.parameters,
+ (*num_parameters * sizeof(u16)));
+ }
+
+ err = 0;
+
+error:
+ FM_DEBUG_REPORT("fmd_read_resp: "
+ "returning %d", err);
+ return err;
+}
+
+/**
+ * fmd_process_fm_function() - Process FM Function.
+ *
+ * This function processes the Response buffer received
+ * from lower layers for the FM function and performs the necessary action to
+ * parse the same.
+ * @packet_buffer: Received Buffer.
+ */
+static void fmd_process_fm_function(
+ u8 *packet_buffer
+ )
+{
+ u8 fm_function_id;
+ u8 block_id;
+
+ if (packet_buffer == NULL)
+ return;
+
+ fm_function_id = FM_GET_FUNCTION_ID(packet_buffer);
+ switch (fm_function_id) {
+ case FM_FUNCTION_ENABLE:
+ case FM_FUNCTION_DISABLE:
+ case FM_FUNCTION_RESET:
+ FM_DEBUG_REPORT(
+ "fmd_process_fm_function: "
+ "command success received for %s",
+ fmd_get_fm_function_name(fm_function_id));
+ /* Release the semaphore since response is received */
+ fmd_set_cmd_sem();
+ break;
+ case FM_FUNCTION_WRITE_COMMAND:
+ FM_DEBUG_REPORT(
+ "fmd_process_fm_function: "
+ "command success received for %s",
+ fmd_get_fm_function_name(fm_function_id));
+
+ fmd_data.cmd_id = FM_GET_CMD_ID(packet_buffer);
+ fmd_data.num_parameters =
+ FM_GET_NUM_PARAMS(packet_buffer);
+
+ FM_DEBUG_REPORT(
+ "fmd_process_fm_function: "
+ "Cmd Id = 0x%04x, Num Of Parms = %02x",
+ fmd_data.cmd_id, fmd_data.num_parameters);
+
+ if (fmd_data.num_parameters) {
+ fmd_data.parameters =
+ FM_GET_RSP_BUFFER_ADDR(packet_buffer);
+ memcpy(fmd_data.parameters,
+ FM_GET_RSP_BUFFER_ADDR(packet_buffer),
+ fmd_data.num_parameters * sizeof(u16));
+ }
+ /* Release the semaphore since response is received */
+ fmd_set_cmd_sem();
+ break;
+ case FM_FUNCTION_FIRMWARE_DOWNLOAD:
+ block_id = FM_GET_BLOCK_ID(packet_buffer);
+ FM_DEBUG_REPORT(
+ "fmd_process_fm_function: "
+ "command success received for %s"
+ "block id = %02x",
+ fmd_get_fm_function_name(fm_function_id),
+ block_id);
+ /* Release the semaphore since response is received */
+ fmd_set_cmd_sem();
+ break;
+ default:
+ FM_ERR_REPORT(
+ "fmd_process_fm_function: "
+ "default case: command success received for %s",
+ fmd_get_fm_function_name(fm_function_id));
+ break;
+ }
+}
+
+/**
+ * fmd_write_file_block() - download firmware.
+ *
+ * This Function adds the header for downloading
+ * the firmware and coeffecient files and sends it to Protocol Driver.
+ * @file_block_id: Block ID of the F/W to be transmitted to FM Chip
+ * @file_block: Buffer containing the bytes to be sent.
+ * @file_block_length: Size of the Firmware buffer.
+ *
+ * Returns:
+ * 0: If there is no error.
+ * -EINVAL: If parameters are not valid.
+ * -ETIME: Otherwise
+ */
+static int fmd_write_file_block(
+ u32 file_block_id,
+ u8 *file_block,
+ u16 file_block_length
+ )
+{
+ int err;
+
+ FM_INFO_REPORT("fmd_write_file_block");
+ if (file_block == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ mutex_lock(&send_cmd_mutex);
+ file_block[0] = file_block_length + FM_HCI_WRITE_FILE_BLK_PARAM_LEN;
+ file_block[1] = FM_CATENA_OPCODE;
+ file_block[2] = FM_WRITE;
+ file_block[3] = FM_FUNCTION_FIRMWARE_DOWNLOAD;
+ file_block[4] = file_block_id;
+ /* Send the Packet */
+ err = fmd_send_packet(
+ file_block_length +
+ FM_HCI_WRITE_FILE_BLK_HEADER_LEN,
+ file_block);
+
+ /* wait till response comes */
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+
+error:
+ mutex_unlock(&send_cmd_mutex);
+ FM_DEBUG_REPORT("fmd_write_file_block: "
+ "returning %d", err);
+ return err;
+}
+
+int fmd_init(void)
+{
+ int err;
+
+ if (!fmd_driver_init()) {
+ err = -EIO;
+ goto error;
+ }
+
+ memset(&fmd_state_info, 0, sizeof(fmd_state_info));
+ fmd_state_info.fmd_initialized = true;
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ fmd_state_info.mode = FMD_MODE_IDLE;
+ fmd_state_info.callback = NULL;
+ fmd_state_info.rx_freq_range = FMD_FREQRANGE_EUROAMERICA;
+ fmd_state_info.rx_stereo_mode = FMD_STEREOMODE_BLENDING;
+ fmd_state_info.rx_volume = MAX_ANALOG_VOLUME;
+ fmd_state_info.rx_antenna = FMD_ANTENNA_EMBEDDED;
+ fmd_state_info.rx_rds_on = false;
+ fmd_state_info.rx_seek_stop_level = DEFAULT_RSSI_THRESHOLD;
+ fmd_state_info.tx_freq_range = FMD_FREQRANGE_EUROAMERICA;
+ fmd_state_info.tx_preemphasis = FMD_EMPHASIS_75US;
+ fmd_state_info.tx_pilot_dev = DEFAULT_PILOT_DEVIATION;
+ fmd_state_info.tx_rds_dev = DEFAULT_RDS_DEVIATION;
+ fmd_state_info.tx_strength = MAX_POWER_LEVEL;
+ fmd_state_info.max_channels_to_scan = DEFAULT_CHANNELS_TO_SCAN;
+ fmd_state_info.tx_stereo_mode = true;
+ fmd_state_info.irq_index = 0;
+ spin_lock_init(&fmd_spinlock);
+ err = 0;
+
+error:
+ FM_DEBUG_REPORT("fmd_init returning = %d", err);
+ return err;
+}
+
+void fmd_exit(void)
+{
+ fmd_set_interrupt_sem();
+ fmd_driver_exit();
+ memset(&fmd_state_info, 0, sizeof(fmd_state_info));
+}
+
+int fmd_register_callback(
+ fmd_radio_cb callback
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ fmd_state_info.callback = callback;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_get_version(
+ u16 *version
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_GET_VERSION_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (version == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_GET_VERSION,
+ CMD_GET_VERSION_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ memcpy(version,
+ response_data,
+ sizeof(u16) * CMD_GET_VERSION_RSP_PARAM_LEN);
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_mode(
+ u8 mode
+ )
+{
+ int err;
+ u16 parameters[CMD_GOTO_MODE_PARAM_LEN];
+ int io_result;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (mode > FMD_MODE_TX) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = mode;
+
+ fmd_state_info.gocmd = FMD_STATE_MODE;
+ FM_ERR_REPORT("Sending Set Mode");
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_GOTO_MODE,
+ CMD_GOTO_MODE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem()) {
+ err = -ETIME;
+ goto error;
+ }
+ fmd_state_info.mode = mode;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_get_freq_range_properties(
+ u8 range,
+ u32 *min_freq,
+ u32 *max_freq
+ )
+{
+ int err;
+
+ if (min_freq == NULL || max_freq == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ switch (range) {
+ case FMD_FREQRANGE_EUROAMERICA:
+ *min_freq = FMD_EU_US_MIN_FREQ_IN_KHZ;
+ *max_freq = FMD_EU_US_MAX_FREQ_IN_KHZ;
+ break;
+ case FMD_FREQRANGE_JAPAN:
+ *min_freq = FMD_JAPAN_MIN_FREQ_IN_KHZ;
+ *max_freq = FMD_JAPAN_MAX_FREQ_IN_KHZ;
+ break;
+ case FMD_FREQRANGE_CHINA:
+ *min_freq = FMD_CHINA_MIN_FREQ_IN_KHZ;
+ *max_freq = FMD_CHINA_MAX_FREQ_IN_KHZ;
+ break;
+ default:
+ *min_freq = FMD_EU_US_MIN_FREQ_IN_KHZ;
+ *max_freq = FMD_EU_US_MAX_FREQ_IN_KHZ;
+ break;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_antenna(
+ u8 antenna
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SET_ANTENNA_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (antenna > FMD_ANTENNA_WIRED) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = antenna;
+
+ fmd_state_info.gocmd = FMD_STATE_ANTENNA;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SET_ANTENNA,
+ CMD_SET_ANTENNA_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.rx_antenna = antenna;
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_get_antenna(
+ u8 *antenna
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ *antenna = fmd_state_info.rx_antenna;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_freq_range(
+ u8 range
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TN_SET_BAND_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ parameters[0] = range;
+ parameters[1] = FMD_MIN_CHANNEL_NUMBER;
+ parameters[2] = FMD_MAX_CHANNEL_NUMBER;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_TN_SET_BAND,
+ CMD_TN_SET_BAND_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+ fmd_state_info.rx_freq_range = range;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_get_freq_range(
+ u8 *range
+ )
+{
+ int err;
+
+ if (range == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ *range = fmd_state_info.rx_freq_range;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_set_grid(
+ u8 grid
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TN_SET_GRID_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (grid > FMD_GRID_200KHZ) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = grid;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_TN_SET_GRID,
+ CMD_TN_SET_GRID_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_set_frequency(
+ u32 freq
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_TUNE_SET_CHANNEL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (freq > FMD_EU_US_MAX_FREQ_IN_KHZ ||
+ freq < FMD_CHINA_MIN_FREQ_IN_KHZ) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_rx_frequency_to_channel(
+ freq,
+ &parameters[0]);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.gocmd = FMD_STATE_FREQUENCY;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_TUNE_SET_CHANNEL,
+ CMD_SP_TUNE_SET_CHANNEL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_frequency(
+ u32 *freq
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_SP_TUNE_GET_CHANNEL_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (freq == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_TUNE_GET_CHANNEL,
+ CMD_SP_TUNE_GET_CHANNEL_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ io_result = fmd_rx_channel_to_frequency(
+ response_data[0], /* 1st byte is the Frequency */
+ freq);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_set_stereo_mode(
+ u8 mode
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_RP_STEREO_SET_MODE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (mode > FMD_STEREOMODE_BLENDING) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = mode;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_RP_STEREO_SET_MODE,
+ CMD_RP_STEREO_SET_MODE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.rx_stereo_mode = mode;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_stereo_mode(
+ u8 *mode
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_RP_GET_STATE_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (mode == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_RP_GET_STATE,
+ CMD_RP_GET_STATE_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ /* 2nd element of response is stereo signal */
+ *mode = response_data[1];
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_signal_strength(
+ u16 *strength
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_RP_GET_RSSI_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (strength == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_RP_GET_RSSI,
+ CMD_RP_GET_RSSI_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ *strength = response_data[0]; /* 1st byte is the signal strength */
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_set_stop_level(
+ u16 stoplevel
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ fmd_state_info.rx_seek_stop_level = stoplevel;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_stop_level(
+ u16 *stop_level
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (stop_level == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *stop_level = fmd_state_info.rx_seek_stop_level;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_seek(
+ bool upwards
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_SEARCH_START_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (upwards)
+ parameters[0] = 0x0000;
+ else
+ parameters[0] = 0x0001;
+ parameters[1] = fmd_state_info.rx_seek_stop_level;
+ parameters[2] = DEFAULT_PEAK_NOISE_VALUE;
+ parameters[3] = DEFAULT_AVERAGE_NOISE_MAX_VALUE;
+ fmd_state_info.gocmd = FMD_STATE_SEEK;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_SEARCH_START,
+ CMD_SP_SEARCH_START_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_scan_band(
+ u8 max_channels_to_scan
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_SCAN_START_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (max_channels_to_scan > MAX_CHANNELS_TO_SCAN) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = max_channels_to_scan;
+ parameters[1] = fmd_state_info.rx_seek_stop_level;
+ parameters[2] = DEFAULT_PEAK_NOISE_VALUE;
+ parameters[3] = DEFAULT_AVERAGE_NOISE_MAX_VALUE;
+
+ fmd_state_info.gocmd = FMD_STATE_SCAN_BAND;
+ fmd_state_info.max_channels_to_scan = max_channels_to_scan;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_SCAN_START,
+ CMD_SP_SCAN_START_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_max_channels_to_scan(
+ u8 *max_channels_to_scan
+ )
+{
+ int err;
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (max_channels_to_scan == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *max_channels_to_scan = fmd_state_info.max_channels_to_scan;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_scan_band_info(
+ u32 index,
+ u16 *num_channels,
+ u16 *channels,
+ u16 *rssi
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_SCAN_GET_RESULT_PARAM_LEN];
+ u16 response_count;
+ u16 response_data[CMD_SP_SCAN_GET_RESULT_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (num_channels == NULL || rssi == NULL || channels == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = index;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_SCAN_GET_RESULT,
+ CMD_SP_SCAN_GET_RESULT_PARAM_LEN,
+ parameters,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ /* 1st byte indicates number of channels found */
+ *num_channels = response_data[0];
+ /* 2nd byte indicates 1st channel number */
+ channels[0] = response_data[1];
+ /* 3rd byte indicates RSSI of corresponding channel */
+ rssi[0] = response_data[2];
+ /* 4th byte indicates 2nd channel number */
+ channels[1] = response_data[3];
+ /* 5th byte indicates RSSI of corresponding channel */
+ rssi[1] = response_data[4];
+ /* 6th byte indicates 3rd channel number */
+ channels[2] = response_data[5];
+ /* 7th byte indicates RSSI of corresponding channel */
+ rssi[2] = response_data[6];
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_block_scan(
+ u32 start_freq,
+ u32 stop_freq,
+ u8 antenna
+ )
+{
+ u16 start_channel;
+ u16 stop_channel;
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_BLOCK_SCAN_START_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (antenna > FMD_ANTENNA_WIRED) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (start_freq > FMD_EU_US_MAX_FREQ_IN_KHZ ||
+ start_freq < FMD_CHINA_MIN_FREQ_IN_KHZ) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (stop_freq > FMD_EU_US_MAX_FREQ_IN_KHZ ||
+ stop_freq < FMD_CHINA_MIN_FREQ_IN_KHZ) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Convert the start frequency to corresponsing channel */
+ switch (fmd_state_info.mode) {
+ case FMD_MODE_RX:
+ io_result = fmd_rx_frequency_to_channel(
+ start_freq,
+ &start_channel);
+ break;
+ case FMD_MODE_TX:
+ io_result = fmd_tx_frequency_to_channel(
+ start_freq,
+ &start_channel);
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ /* Convert the end frequency to corresponsing channel */
+ switch (fmd_state_info.mode) {
+ case FMD_MODE_RX:
+ io_result = fmd_rx_frequency_to_channel(
+ stop_freq,
+ &stop_channel);
+ break;
+ case FMD_MODE_TX:
+ io_result = fmd_tx_frequency_to_channel(
+ stop_freq,
+ &stop_channel);
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ parameters[0] = start_channel;
+ parameters[1] = stop_channel;
+ parameters[2] = antenna;
+
+ fmd_state_info.gocmd = FMD_STATE_BLOCK_SCAN;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_BLOCK_SCAN_START,
+ CMD_SP_BLOCK_SCAN_START_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_get_block_scan_result(
+ u32 index,
+ u16 *num_channels,
+ u16 *rssi
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_BLOCK_SCAN_GET_RESULT_PARAM_LEN];
+ u16 response_count;
+ u16 response_data[CMD_SP_BLOCK_SCAN_GET_RESULT_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (num_channels == NULL || rssi == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = index;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_BLOCK_SCAN_GET_RESULT,
+ CMD_SP_BLOCK_SCAN_GET_RESULT_PARAM_LEN,
+ parameters,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ /*
+ * Response packet has 1st byte as the number
+ * of channels, and the remaining 6 bytes as
+ * rssi values of the channels.
+ */
+ *num_channels = response_data[0];
+ rssi[0] = response_data[1];
+ rssi[1] = response_data[2];
+ rssi[2] = response_data[3];
+ rssi[3] = response_data[4];
+ rssi[4] = response_data[5];
+ rssi[5] = response_data[6];
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_stop_seeking(void)
+{
+ int err;
+ int io_result;
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (!(fmd_state_info.gocmd == FMD_STATE_SEEK ||
+ fmd_state_info.gocmd == FMD_STATE_SCAN_BAND ||
+ fmd_state_info.gocmd == FMD_STATE_BLOCK_SCAN)) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ fmd_state_info.gocmd = FMD_STATE_SEEK_STOP;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_STOP,
+ CMD_SP_STOP_PARAM_LEN,
+ NULL,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_af_update_start(
+ u32 freq
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_AF_UPDATE_START_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ io_result = fmd_rx_frequency_to_channel(
+ freq,
+ &parameters[0]);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.gocmd = FMD_STATE_AF_UPDATE;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_AF_UPDATE_START,
+ CMD_SP_AF_UPDATE_START_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_af_update_result(
+ u16 *af_level
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_SP_AF_UPDATE_GET_RESULT_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (af_level == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_AF_UPDATE_GET_RESULT,
+ CMD_SP_AF_UPDATE_GET_RESULT_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ /*
+ * 1st byte of response packet is the
+ * RSSI of the AF Frequency.
+ */
+ *af_level = response_data[0];
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_af_switch_start(
+ u32 freq,
+ u16 picode
+ )
+{
+
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_AF_SWITCH_START_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ io_result = fmd_rx_frequency_to_channel(
+ freq,
+ &parameters[0]);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ parameters[1] = picode;
+ parameters[2] = 0xFFFF; /* PI Mask */
+ parameters[3] = fmd_state_info.rx_seek_stop_level;
+ parameters[4] = 0x0000; /* Unmute when AF's PI matches expected PI */
+
+ fmd_state_info.gocmd = FMD_STATE_AF_SWITCH;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_AF_SWITCH_START,
+ CMD_SP_AF_SWITCH_START_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_af_switch_results(
+ u16 *afs_conclusion,
+ u16 *afs_level,
+ u16 *afs_pi
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_SP_AF_SWITCH_GET_RESULT_RWSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (afs_conclusion == NULL ||
+ afs_level == NULL ||
+ afs_pi == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_SP_AF_SWITCH_GET_RESULT,
+ CMD_SP_AF_SWITCH_GET_RESULT_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ *afs_conclusion = response_data[0];
+ *afs_level = response_data[1];
+ *afs_pi = response_data[2];
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_rds(
+ bool *on
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (on == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *on = fmd_state_info.rx_rds_on;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_buffer_set_size(
+ u8 size
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_DP_BUFFER_SET_SIZE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (size > MAX_RDS_GROUPS) {
+ err = -EIO;
+ goto error;
+ }
+
+ parameters[0] = size;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_DP_BUFFER_SET_SIZE,
+ CMD_DP_BUFFER_SET_SIZE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_buffer_set_threshold(
+ u8 threshold
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_DP_BUFFER_SET_THRESHOLD_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (threshold > MAX_RDS_GROUPS) {
+ err = -EIO;
+ goto error;
+ }
+
+ parameters[0] = threshold;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_DP_BUFFER_SET_THRESHOLD,
+ CMD_DP_BUFFER_SET_THRESHOLD_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_set_rds(
+ u8 on_off_state
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_DP_SET_CONTROL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ switch (on_off_state) {
+ case FMD_SWITCH_ON_RDS_SIMULATOR:
+ parameters[0] = 0xFFFF;
+ break;
+ case FMD_SWITCH_OFF_RDS:
+ default:
+ parameters[0] = 0x0000;
+ fmd_state_info.rx_rds_on = false;
+ break;
+ case FMD_SWITCH_ON_RDS:
+ parameters[0] = 0x0001;
+ fmd_state_info.rx_rds_on = true;
+ break;
+ case FMD_SWITCH_ON_RDS_ENHANCED_MODE:
+ parameters[0] = 0x0002;
+ fmd_state_info.rx_rds_on = true;
+ break;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_DP_SET_CONTROL,
+ CMD_DP_SET_CONTROL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_get_low_level_rds_groups(
+ u8 index,
+ u16 *block1,
+ u16 *block2,
+ u16 *block3,
+ u16 *block4,
+ u8 *status1,
+ u8 *status2,
+ u8 *status3,
+ u8 *status4
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (block1 == NULL ||
+ block2 == NULL ||
+ block3 == NULL ||
+ block4 == NULL ||
+ status1 == NULL ||
+ status2 == NULL ||
+ status3 == NULL ||
+ status4 == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *block1 = fmd_state_info.rds_group[index].block[0];
+ *block2 = fmd_state_info.rds_group[index].block[1];
+ *block3 = fmd_state_info.rds_group[index].block[2];
+ *block4 = fmd_state_info.rds_group[index].block[3];
+ *status1 = fmd_state_info.rds_group[index].status[0];
+ *status2 = fmd_state_info.rds_group[index].status[1];
+ *status3 = fmd_state_info.rds_group[index].status[2];
+ *status4 = fmd_state_info.rds_group[index].status[3];
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_rx_set_deemphasis(
+ u8 deemphasis
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_RP_SET_DEEMPHASIS_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ switch (deemphasis) {
+ case FMD_EMPHASIS_50US:
+ parameters[0] = FMD_EMPHASIS_50US;
+ break;
+
+ case FMD_EMPHASIS_75US:
+ parameters[0] = FMD_EMPHASIS_75US;
+ break;
+
+ case FMD_EMPHASIS_NONE:
+ default:
+ parameters[0] = FMD_EMPHASIS_NONE;
+ break;
+
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_RP_SET_DEEMPHASIS,
+ CMD_RP_SET_DEEMPHASIS_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_pa(
+ bool on
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_PA_SET_MODE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (on)
+ parameters[0] = 0x0001;
+ else
+ parameters[0] = 0x0000;
+
+ fmd_state_info.gocmd = FMD_STATE_PA;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_PA_SET_MODE,
+ CMD_PA_SET_MODE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_signal_strength(
+ u16 strength
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_PA_SET_CONTROL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if ((strength > MAX_POWER_LEVEL)
+ || (strength < MIN_POWER_LEVEL)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = strength;
+
+ fmd_state_info.gocmd = FMD_STATE_PA_LEVEL;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_PA_SET_CONTROL,
+ CMD_PA_SET_CONTROL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_strength = strength;
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_signal_strength(
+ u16 *strength
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (strength == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *strength = fmd_state_info.tx_strength;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_freq_range(
+ u8 range
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TN_SET_BAND_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (range > FMD_FREQRANGE_CHINA) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = range;
+ parameters[1] = FMD_MIN_CHANNEL_NUMBER;
+ parameters[2] = FMD_MAX_CHANNEL_NUMBER;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_TN_SET_BAND,
+ CMD_TN_SET_BAND_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_freq_range = range;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_freq_range(
+ u8 *range
+ )
+{
+ int err;
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (range == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *range = fmd_state_info.tx_freq_range;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_grid(
+ u8 grid
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TN_SET_GRID_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (grid > FMD_GRID_200KHZ) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = grid;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_TN_SET_GRID,
+ CMD_TN_SET_GRID_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_preemphasis(
+ u8 preemphasis
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_RP_SET_PREEMPHASIS_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ switch (preemphasis) {
+ case FMD_EMPHASIS_50US:
+ parameters[0] = FMD_EMPHASIS_50US;
+ break;
+ case FMD_EMPHASIS_75US:
+ default:
+ parameters[0] = FMD_EMPHASIS_75US;
+ break;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_RP_SET_PREEMPHASIS,
+ CMD_RP_SET_PREEMPHASIS_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_preemphasis = preemphasis;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_preemphasis(
+ u8 *preemphasis
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (preemphasis == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *preemphasis = fmd_state_info.tx_preemphasis;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_frequency(
+ u32 freq
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SP_TUNE_SET_CHANNEL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (freq > FMD_EU_US_MAX_FREQ_IN_KHZ ||
+ freq < FMD_CHINA_MIN_FREQ_IN_KHZ) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_tx_frequency_to_channel(
+ freq,
+ &parameters[0]);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.gocmd = FMD_STATE_FREQUENCY;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_SP_TUNE_SET_CHANNEL,
+ CMD_SP_TUNE_SET_CHANNEL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_frequency(
+ u32 *freq
+ )
+{
+ int err;
+ int io_result;
+ u16 response_count;
+ u16 response_data[CMD_SP_TUNE_GET_CHANNEL_RSP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (freq == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_SP_TUNE_GET_CHANNEL,
+ CMD_SP_TUNE_GET_CHANNEL_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ io_result = fmd_tx_channel_to_frequency(
+ response_data[0], /* 1st byte is the Frequency */
+ freq);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_enable_stereo_mode(
+ bool enable_stereo_mode
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_RP_STEREO_SET_MODE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ parameters[0] = enable_stereo_mode;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_RP_STEREO_SET_MODE,
+ CMD_RP_STEREO_SET_MODE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_stereo_mode = enable_stereo_mode;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_stereo_mode(
+ bool *stereo_mode
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (stereo_mode == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *stereo_mode = fmd_state_info.tx_stereo_mode;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_pilot_deviation(
+ u16 deviation
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_RP_SET_PILOT_DEVIATION_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (deviation > MAX_PILOT_DEVIATION) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = deviation;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_RP_SET_PILOT_DEVIATION,
+ CMD_RP_SET_PILOT_DEVIATION_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_pilot_dev = deviation;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_pilot_deviation(
+ u16 *deviation
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (deviation == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *deviation = fmd_state_info.tx_pilot_dev;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_rds_deviation(
+ u16 deviation
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_RP_SET_RDS_DEVIATION_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (deviation > MAX_RDS_DEVIATION) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = deviation;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_RP_SET_RDS_DEVIATION,
+ CMD_RP_SET_RDS_DEVIATION_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_rds_dev = deviation;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_get_rds_deviation(
+ u16 *deviation
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (deviation == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *deviation = fmd_state_info.tx_rds_dev;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_rds(
+ bool on
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_DP_SET_CONTROL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (on)
+ parameters[0] = 0x0001;
+ else
+ parameters[0] = 0x0000;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_DP_SET_CONTROL,
+ CMD_DP_SET_CONTROL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.tx_rds_on = on;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_set_group(
+ u16 position,
+ u8 *block1,
+ u8 *block2,
+ u8 *block3,
+ u8 *block4
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_DP_BUFFER_SET_GROUP_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (block1 == NULL ||
+ block2 == NULL ||
+ block3 == NULL ||
+ block4 == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = position;
+ memcpy(&parameters[1], block1, sizeof(u16));
+ memcpy(&parameters[2], block2, sizeof(u16));
+ memcpy(&parameters[3], block3, sizeof(u16));
+ memcpy(&parameters[4], block4, sizeof(u16));
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_DP_BUFFER_SET_GROUP,
+ CMD_DP_BUFFER_SET_GROUP_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_tx_buffer_set_size(
+ u16 buffer_size
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_DP_BUFFER_SET_SIZE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ parameters[0] = buffer_size;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_DP_BUFFER_SET_SIZE,
+ CMD_DP_BUFFER_SET_SIZE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+
+}
+
+int fmd_tx_get_rds(
+ bool *on
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (on == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *on = fmd_state_info.tx_rds_on;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_balance(
+ s8 balance
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SET_BALANCE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ /* Convert balance from percentage to chip number */
+ parameters[0] = (((s16)balance) * FMD_MAX_BALANCE) / 100;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_AUP_SET_BALANCE,
+ CMD_SET_BALANCE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_volume(
+ u8 volume
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SET_VOLUME_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ /* Convert volume from percentage to chip number */
+ parameters[0] = (((u16)volume) * FMD_MAX_VOLUME) / 100;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_AUP_SET_VOLUME,
+ CMD_SET_VOLUME_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ fmd_state_info.rx_volume = volume;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_get_volume(
+ u8 *volume
+ )
+{
+ int err;
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (volume == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ *volume = fmd_state_info.rx_volume;
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_mute(
+ bool mute_on
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SET_MUTE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (!mute_on)
+ parameters[0] = 0x0000;
+ else
+ parameters[0] = 0x0001;
+ parameters[1] = 0x0001;
+
+ fmd_state_info.gocmd = FMD_STATE_MUTE;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_AUP_SET_MUTE,
+ CMD_SET_MUTE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_ext_set_mute(
+ bool mute_on
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_EXT_SET_MUTE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (!mute_on)
+ parameters[0] = 0x0000;
+ else
+ parameters[0] = 0x0001;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_AUP_EXT_SET_MUTE,
+ CMD_EXT_SET_MUTE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_power_up(void)
+{
+ int err;
+ int io_result;
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ fmd_state_info.gocmd = FMD_STATE_GEN_POWERUP;
+ FM_ERR_REPORT("Sending Gen Power Up");
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_POWERUP,
+ CMD_POWERUP_PARAM_LEN,
+ NULL,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_goto_standby(void)
+{
+ int err;
+ int io_result;
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_GOTO_STANDBY,
+ CMD_GOTO_STANDBY_PARAM_LEN,
+ NULL,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_goto_power_down(void)
+{
+ int err;
+ int io_result;
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_GOTO_POWERDOWN,
+ CMD_GOTO_POWERDOWN_PARAM_LEN,
+ NULL,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_select_ref_clk(
+ u16 ref_clk
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SELECT_REFERENCE_CLOCK_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ parameters[0] = ref_clk;
+
+ fmd_state_info.gocmd = FMD_STATE_SELECT_REF_CLK;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_SELECT_REFERENCE_CLOCK,
+ CMD_SELECT_REFERENCE_CLOCK_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_set_ref_clk_pll(
+ u16 freq
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_SET_REFERENCE_CLOCK_PLL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ parameters[0] = freq;
+
+ fmd_state_info.gocmd = FMD_STATE_SET_REF_CLK_PLL;
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_GEN_SET_REFERENCE_CLOCK_PLL,
+ CMD_SET_REFERENCE_CLOCK_PLL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ fmd_state_info.gocmd = FMD_STATE_NONE;
+ err = io_result;
+ goto error;
+ }
+
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+ else
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_send_fm_ip_enable(void)
+{
+ int err;
+ u8 fm_ip_enable_cmd[CMD_IP_ENABLE_CMD_LEN];
+
+ mutex_lock(&send_cmd_mutex);
+ fm_ip_enable_cmd[0] = CMD_IP_ENABLE_PARAM_LEN;
+ fm_ip_enable_cmd[1] = FM_CATENA_OPCODE;
+ fm_ip_enable_cmd[2] = FM_WRITE ;
+ fm_ip_enable_cmd[3] = FM_FUNCTION_ENABLE;
+
+ /* Send the Packet */
+ err = fmd_send_packet(
+ CMD_IP_ENABLE_CMD_LEN,
+ fm_ip_enable_cmd);
+
+ /* Check the ErrorCode */
+ if (err != 0)
+ goto error;
+
+ /* wait till response comes */
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+
+error:
+ mutex_unlock(&send_cmd_mutex);
+ return err;
+}
+
+int fmd_send_fm_ip_disable(void)
+{
+ int err;
+ u8 fm_ip_disable_cmd[CMD_IP_DISABLE_CMD_LEN];
+
+ mutex_lock(&send_cmd_mutex);
+ fm_ip_disable_cmd[0] = CMD_IP_DISABLE_PARAM_LEN;
+ fm_ip_disable_cmd[1] = FM_CATENA_OPCODE;
+ fm_ip_disable_cmd[2] = FM_WRITE ;
+ fm_ip_disable_cmd[3] = FM_FUNCTION_DISABLE;
+
+ /* Send the Packet */
+ err = fmd_send_packet(
+ CMD_IP_DISABLE_CMD_LEN,
+ fm_ip_disable_cmd);
+
+ /* Check the ErrorCode */
+ if (err != 0)
+ goto error;
+
+ /* wait till response comes */
+ if (fmd_get_cmd_sem())
+ err = -ETIME;
+
+error:
+ mutex_unlock(&send_cmd_mutex);
+ return err;
+}
+
+int fmd_send_fm_firmware(
+ u8 *fw_buffer,
+ u16 fw_size
+ )
+{
+ int err;
+ u16 bytes_to_write = ST_WRITE_FILE_BLK_SIZE -
+ FM_HCI_WRITE_FILE_BLK_PARAM_LEN;
+ u16 bytes_remaining = fw_size;
+ u8 fm_firmware_data[ST_WRITE_FILE_BLK_SIZE + FM_HCI_CMD_HEADER_LEN];
+ u32 block_id = 0;
+
+ if (fw_buffer == NULL) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ while (bytes_remaining > 0) {
+ if (bytes_remaining <
+ (ST_WRITE_FILE_BLK_SIZE -
+ FM_HCI_WRITE_FILE_BLK_PARAM_LEN))
+ bytes_to_write = bytes_remaining;
+
+ /*
+ * Five bytes of HCI Header for FM Firmware
+ * so shift the firmware data by 5 bytes
+ */
+ memcpy(
+ fm_firmware_data + FM_HCI_WRITE_FILE_BLK_HEADER_LEN,
+ fw_buffer, bytes_to_write);
+ err = fmd_write_file_block(
+ block_id,
+ fm_firmware_data,
+ bytes_to_write);
+ if (err) {
+ FM_DEBUG_REPORT("fmd_send_fm_firmware: "
+ "Failed to download %d Block "
+ "error = %d", (unsigned int)block_id, err);
+ goto error;
+ }
+ /*
+ * Increment the Block Id by 1, since one
+ * block is successfully transmitted
+ * to the chip.
+ */
+ block_id++;
+ /*
+ * Increment the next firmware buffer equal
+ * to the number of bytes transmitted.
+ */
+ fw_buffer += bytes_to_write;
+ /*
+ * Decrement the number of bytes remaining
+ * equal to number of bytes transmitted successfully.
+ */
+ bytes_remaining -= bytes_to_write;
+
+ if (block_id == ST_MAX_NUMBER_OF_FILE_BLOCKS)
+ block_id = 0;
+ }
+
+error:
+ return err;
+}
+
+int fmd_int_bufferfull(
+ u16 *number_of_rds_groups
+ )
+{
+ u16 response_count;
+ u16 response_data[CMD_DP_BUFFER_GET_GROUP_COUNT_PARAM_LEN];
+ u16 index = 0;
+ u16 rds_group_count;
+ u8 result = -ENOEXEC;
+ struct fmd_rds_group rds_group;
+
+ if (!fmd_state_info.rx_rds_on)
+ goto error;
+
+ /* get group count*/
+ result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_DP_BUFFER_GET_GROUP_COUNT,
+ CMD_DP_BUFFER_GET_GROUP_COUNT_PARAM_LEN,
+ NULL,
+ &response_count,
+ response_data);
+
+ if (result != 0)
+ goto error;
+
+ /* read RDS groups */
+ rds_group_count = FM_GET_NUM_RDS_GRPS(response_data);
+ if (rds_group_count > MAX_RDS_GROUPS)
+ rds_group_count = MAX_RDS_GROUPS;
+
+ *number_of_rds_groups = rds_group_count;
+
+ if (rds_group_count) {
+ FM_DEBUG_REPORT("rds_group_count = %d", rds_group_count);
+ while (rds_group_count-- && fmd_state_info.rx_rds_on) {
+ result = fmd_send_cmd_and_read_resp(
+ CMD_FMR_DP_BUFFER_GET_GROUP,
+ CMD_DP_BUFFER_GET_GROUP_PARAM_LEN,
+ NULL,
+ &response_count,
+ (u16 *)&rds_group);
+
+ if (result != 0)
+ goto error;
+
+ if (fmd_state_info.rx_rds_on)
+ fmd_state_info.rds_group[index++] = rds_group;
+ }
+ }
+error:
+ return result;
+}
+
+void fmd_start_rds_thread(
+ cg2900_fm_rds_cb cb_func
+ )
+{
+ FM_INFO_REPORT("fmd_start_rds_thread");
+ cb_rds_func = cb_func;
+ rds_thread_required = true;
+ rds_thread_task = kthread_create(fmd_rds_thread, NULL, "rds_thread");
+ if (IS_ERR(rds_thread_task)) {
+ FM_ERR_REPORT("fmd_start_rds_thread: "
+ "Unable to Create rds_thread");
+ rds_thread_task = NULL;
+ rds_thread_required = false;
+ return;
+ }
+ wake_up_process(rds_thread_task);
+}
+
+void fmd_stop_rds_thread(void)
+{
+ FM_INFO_REPORT("fmd_stop_rds_thread");
+ /* In case thread is waiting, set the rds sem */
+ fmd_set_rds_sem();
+ /* Re-initialize RDS Semaphore to zero */
+ sema_init(&rds_sem, 0);
+ cb_rds_func = NULL;
+ rds_thread_required = false;
+ /* Wait for RDS thread to exit gracefully */
+ fmd_get_rds_sem();
+
+ if (rds_thread_task)
+ rds_thread_task = NULL;
+}
+
+void fmd_get_rds_sem(void)
+{
+ int ret_val;
+
+ FM_DEBUG_REPORT("fmd_get_rds_sem");
+ ret_val = down_killable(&rds_sem);
+
+ if (ret_val)
+ FM_ERR_REPORT("fmd_get_rds_sem: down_killable "
+ "returned error = %d", ret_val);
+}
+
+void fmd_set_rds_sem(void)
+{
+ FM_DEBUG_REPORT("fmd_set_rds_sem");
+ up(&rds_sem);
+}
+
+int fmd_set_dev(struct device *dev)
+{
+ struct cg2900_user_data *pf_data;
+
+ FM_DEBUG_REPORT("fmd_set_dev");
+
+ if (dev && cg2900_fm_dev) {
+ FM_ERR_REPORT("Only one FM device supported");
+ return -EACCES;
+ }
+
+ cg2900_fm_dev = dev;
+
+ if (!dev)
+ return 0;
+
+ pf_data = dev_get_platdata(dev);
+ pf_data->dev = dev;
+ pf_data->read_cb = fmd_read_cb;
+ pf_data->reset_cb = fmd_reset_cb;
+
+ return 0;
+}
+
+int fmd_set_test_tone_generator_status(
+ u8 test_tone_status
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TST_TONE_ENABLE_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (test_tone_status > FMD_TST_TONE_ON_WO_SRC) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = test_tone_status;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_TST_TONE_ENABLE,
+ CMD_TST_TONE_ENABLE_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_test_tone_connect(
+ u8 left_audio_mode,
+ u8 right_audio_mode
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TST_TONE_CONNECT_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (left_audio_mode > FMD_TST_TONE_AUDIO_TONE_SUM ||
+ right_audio_mode > FMD_TST_TONE_AUDIO_TONE_SUM) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = left_audio_mode;
+ parameters[1] = right_audio_mode;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_TST_TONE_CONNECT,
+ CMD_TST_TONE_CONNECT_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_test_tone_set_params(
+ u8 tone_gen,
+ u16 frequency,
+ u16 volume,
+ u16 phase_offset,
+ u16 dc,
+ u8 waveform
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_TST_TONE_SET_PARAMS_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (tone_gen > FMD_TST_TONE_2 ||
+ waveform > FMD_TST_TONE_PULSE ||
+ frequency > 0x7FFF ||
+ volume > 0x7FFF) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = tone_gen;
+ parameters[1] = frequency;
+ parameters[2] = volume;
+ parameters[3] = phase_offset;
+ parameters[4] = dc;
+ parameters[5] = waveform;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_TST_TONE_SET_PARAMS,
+ CMD_TST_TONE_SET_PARAMS_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+int fmd_limiter_setcontrol(
+ u16 audio_deviation,
+ u16 notification_hold_off_time
+ )
+{
+ int err;
+ int io_result;
+ u16 parameters[CMD_FMT_RP_LIMITER_SETCONTROL_PARAM_LEN];
+
+ if (fmd_go_cmd_busy()) {
+ err = -EBUSY;
+ goto error;
+ }
+
+ if (!fmd_state_info.fmd_initialized) {
+ err = -ENOEXEC;
+ goto error;
+ }
+
+ if (audio_deviation < MIN_AUDIO_DEVIATION ||
+ audio_deviation > MAX_AUDIO_DEVIATION ||
+ notification_hold_off_time > 0x7FFF) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ parameters[0] = audio_deviation;
+ parameters[1] = notification_hold_off_time;
+
+ io_result = fmd_send_cmd_and_read_resp(
+ CMD_FMT_RP_LIMITER_SETCONTROL,
+ CMD_FMT_RP_LIMITER_SETCONTROL_PARAM_LEN,
+ parameters,
+ NULL,
+ NULL);
+
+ if (io_result != 0) {
+ err = io_result;
+ goto error;
+ }
+
+ err = 0;
+
+error:
+ return err;
+}
+
+MODULE_AUTHOR("Hemant Gupta");
+MODULE_LICENSE("GPL v2");
+
+module_param(cg2900_fm_debug_level, ushort, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(cg2900_fm_debug_level, "cg2900_fm_debug_level: "
+ " *1: Only Error Logs* "
+ " 2: Info Logs "
+ " 3: Debug Logs "
+ " 4: HCI Logs");
+
diff --git a/drivers/media/radio/CG2900/cg2900_fm_driver.h b/drivers/media/radio/CG2900/cg2900_fm_driver.h
new file mode 100644
index 00000000000..fc5ec682714
--- /dev/null
+++ b/drivers/media/radio/CG2900/cg2900_fm_driver.h
@@ -0,0 +1,1793 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Linux FM Driver for CG2900 FM Chip
+ *
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _FMDRIVER_H_
+#define _FMDRIVER_H_
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/semaphore.h>
+#include <linux/version.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include "cg2900_fm_api.h"
+
+/* structure declared in cg2900_fm_driver.c */
+extern struct timespec time_spec;
+
+/* module_param declared in cg2900_fm_driver.c */
+extern unsigned short cg2900_fm_debug_level;
+
+/**
+ * enum fmd_debug_levels - FM Driver Debug Levels.
+ *
+ * @FM_NO_LOGS: No Logs are displayed.
+ * @FM_ERROR_LOGS: Only Error Logs are displayed.
+ * @FM_INFO_LOGS: Function Entry logs are displayed.
+ * @FM_DEBUG_LOGS: Full debugging support.
+ * @FM_HCI_PACKET_LOGS: HCI Packet Sent/received to/by
+ * FM Driver are displayed.
+ *
+ * Various debug levels for FM Driver.
+ */
+enum fmd_debug_levels {
+ FM_NO_LOGS,
+ FM_ERROR_LOGS,
+ FM_INFO_LOGS,
+ FM_DEBUG_LOGS,
+ FM_HCI_PACKET_LOGS
+};
+
+#define FM_HEX_REPORT(fmt, arg...) \
+ if (cg2900_fm_debug_level == FM_HCI_PACKET_LOGS) { \
+ printk(KERN_INFO fmt "\r\n" , ## arg); \
+ }
+
+#define FM_DEBUG_REPORT(fmt, arg...) \
+ if (cg2900_fm_debug_level > FM_INFO_LOGS && \
+ cg2900_fm_debug_level < FM_HCI_PACKET_LOGS) { \
+ getnstimeofday(&time_spec); \
+ printk(KERN_INFO "\n[%08x:%08x] " \
+ "CG2900_FM_Driver: " fmt "\r\n" , \
+ (unsigned int)time_spec.tv_sec, \
+ (unsigned int)time_spec.tv_nsec, ## arg); \
+ }
+
+#define FM_INFO_REPORT(fmt, arg...) \
+ if (cg2900_fm_debug_level > FM_ERROR_LOGS && \
+ cg2900_fm_debug_level < FM_HCI_PACKET_LOGS) { \
+ getnstimeofday(&time_spec); \
+ printk(KERN_INFO "\n[%08x:%08x] " \
+ "CG2900_FM_Driver: " fmt "\r\n" , \
+ (unsigned int)time_spec.tv_sec, \
+ (unsigned int)time_spec.tv_nsec, ## arg); \
+ }
+
+#define FM_ERR_REPORT(fmt, arg...) \
+ if (cg2900_fm_debug_level >= FM_ERROR_LOGS) { \
+ getnstimeofday(&time_spec); \
+ printk(KERN_ERR "\n[%08x:%08x] " \
+ "CG2900_FM_Driver: " fmt "\r\n" , \
+ (unsigned int)time_spec.tv_sec, \
+ (unsigned int)time_spec.tv_nsec, ## arg); \
+ }
+
+#define MAX_COUNT_OF_IRQS 16
+#define MAX_BUFFER_SIZE 512
+#define MAX_NAME_SIZE 100
+/* Minimum Power level for CG2900. The value is in units of dBuV */
+#define MIN_POWER_LEVEL 88
+/* Maximum Power level for CG2900. The value is in units of dBuV */
+#define MAX_POWER_LEVEL 123
+/* Minimum RDS Deviation value for CG2900. The value is in units of 10 Hz */
+#define MIN_RDS_DEVIATION 0
+/* Default RDS Deviation value for CG2900. The value is in units of 10 Hz */
+#define DEFAULT_RDS_DEVIATION 200
+/* Maximum RDS Deviation value for CG2900. The value is in units of 10 Hz */
+#define MAX_RDS_DEVIATION 750
+#define FMD_EU_US_MIN_FREQ_IN_KHZ 87500
+#define FMD_EU_US_MAX_FREQ_IN_KHZ 108000
+#define FMD_JAPAN_MIN_FREQ_IN_KHZ 76000
+#define FMD_JAPAN_MAX_FREQ_IN_KHZ 90000
+#define FMD_CHINA_MIN_FREQ_IN_KHZ 70000
+#define FMD_CHINA_MAX_FREQ_IN_KHZ 108000
+#define FMD_MIN_CHANNEL_NUMBER 0
+#define FMD_MAX_CHANNEL_NUMBER 760
+/*
+ * Maximum supported balance for CG2900. This is just a hexadecimal number
+ * with no units.
+ */
+#define FMD_MAX_BALANCE 0x7FFF
+/*
+ * Maximum supported volume for CG2900. This is just a hexadecimal number
+ * with no units.
+ */
+#define FMD_MAX_VOLUME 0x7FFF
+/* Minimum Program Identification value as per RDS specification */
+#define MIN_PI_VALUE 0x0000
+/* Maximum Program Identification value as per RDS specification */
+#define MAX_PI_VALUE 0xFFFF
+/* Minimum Program Type code value as per RDS specification */
+#define MIN_PTY_VALUE 0
+/* Maximum Program Type code value as per RDS specification */
+#define MAX_PTY_VALUE 31
+/* Minimum Pilot Deviation value for CG2900. The value is in units of 10 Hz */
+#define MIN_PILOT_DEVIATION 0
+/* Default Pilot Deviation value for CG2900. The value is in units of 10 Hz */
+#define DEFAULT_PILOT_DEVIATION 675
+/* Maximum Pilot Deviation value for CG2900. The value is in units of 10 Hz */
+#define MAX_PILOT_DEVIATION 1000
+/*
+ * Default RSSI Threshold for a channel to be considered valid for CG2900.
+ * This is just a hexadecimal number with no units.
+ */
+#define DEFAULT_RSSI_THRESHOLD 0x0100
+/*
+ * Default Peak Noise level for a channel to be considered valid for CG2900.
+ * This is just a hexadecimal number with no units.
+ */
+#define DEFAULT_PEAK_NOISE_VALUE 0x0035
+/*
+ * Default Average Noise level for a channel to be considered valid for CG2900.
+ * This is just a hexadecimal number with no units.
+ */
+#define DEFAULT_AVERAGE_NOISE_MAX_VALUE 0x0030
+/*
+ * Minimum Audio Deviation Level, as per CG2900 FM User Manual.
+ * This is units of 10 Hz.
+ */
+#define MIN_AUDIO_DEVIATION 0x157C
+/*
+ * Maximum Audio Deviation Level, as per CG2900 FM UserManual.
+ * This is units of 10 Hz.
+ */
+#define MAX_AUDIO_DEVIATION 0x3840
+#define FREQUENCY_CONVERTOR_KHZ_HZ 1000
+#define CHANNEL_FREQ_CONVERTER_MHZ 50
+/* Interrupt(s) for CG2900 */
+#define IRPT_INVALID 0x0000
+#define IRPT_OPERATION_SUCCEEDED 0x0001
+#define IRPT_OPERATION_FAILED 0x0002
+#define IRPT_RX_BUFFERFULL_TX_BUFFEREMPTY 0x0008
+#define IRPT_RX_SIGNAL_QUALITYLOW_MUTE_STATUS_CHANGED 0x0010
+#define IRPT_RX_MONO_STEREO_TRANSITION 0x0020
+#define IRPT_TX_OVERMODULATION 0x0030
+#define IRPT_RX_RDS_SYNCFOUND_TX_OVERDRIVE 0x0040
+#define IRPT_RDS_SYNC_LOST 0x0080
+#define IRPT_PI_CODE_CHANGED 0x0100
+#define IRPT_REQUESTED_BLOCK_AVAILABLE 0x0200
+#define IRPT_BUFFER_CLEARED 0x2000
+#define IRPT_WARM_BOOT_READY 0x4000
+#define IRPT_COLD_BOOT_READY 0x8000
+/* FM Commands Id */
+#define CMD_ID_NONE 0x0000
+#define CMD_AUP_EXT_SET_MUTE 0x01E2
+#define CMD_AUP_SET_BALANCE 0x0042
+#define CMD_AUP_SET_MUTE 0x0062
+#define CMD_AUP_SET_VOLUME 0x0022
+#define CMD_FMR_DP_BUFFER_GET_GROUP 0x0303
+#define CMD_FMR_DP_BUFFER_GET_GROUP_COUNT 0x0323
+#define CMD_FMR_DP_BUFFER_SET_SIZE 0x0343
+#define CMD_FMR_DP_BUFFER_SET_THRESHOLD 0x06C3
+#define CMD_FMR_DP_SET_CONTROL 0x02A3
+#define CMD_FMR_RP_GET_RSSI 0x0083
+#define CMD_FMR_RP_GET_STATE 0x0063
+#define CMD_FMR_RP_STEREO_SET_MODE 0x0123
+#define CMD_FMR_SET_ANTENNA 0x0663
+#define CMD_FMR_SP_AF_SWITCH_GET_RESULT 0x0603
+#define CMD_FMR_SP_AF_SWITCH_START 0x04A3
+#define CMD_FMR_SP_AF_UPDATE_GET_RESULT 0x0483
+#define CMD_FMR_SP_AF_UPDATE_START 0x0463
+#define CMD_FMR_SP_BLOCK_SCAN_GET_RESULT 0x06A3
+#define CMD_FMR_SP_BLOCK_SCAN_START 0x0683
+#define CMD_FMR_SP_SCAN_GET_RESULT 0x0423
+#define CMD_FMR_SP_SCAN_START 0x0403
+#define CMD_FMR_SP_SEARCH_START 0x03E3
+#define CMD_FMR_SP_STOP 0x0383
+#define CMD_FMR_SP_TUNE_GET_CHANNEL 0x03A3
+#define CMD_FMR_SP_TUNE_SET_CHANNEL 0x03C3
+#define CMD_FMR_TN_SET_BAND 0x0023
+#define CMD_FMR_TN_SET_GRID 0x0043
+#define CMD_FMR_RP_SET_DEEMPHASIS 0x00C3
+#define CMD_FMT_DP_BUFFER_GET_POSITION 0x0204
+#define CMD_FMT_DP_BUFFER_SET_GROUP 0x0244
+#define CMD_FMT_DP_BUFFER_SET_SIZE 0x0224
+#define CMD_FMT_DP_BUFFER_SET_THRESHOLD 0x0284
+#define CMD_FMT_DP_SET_CONTROL 0x0264
+#define CMD_FMT_PA_SET_CONTROL 0x01A4
+#define CMD_FMT_PA_SET_MODE 0x01E4
+#define CMD_FMT_RP_SET_PILOT_DEVIATION 0x02A4
+#define CMD_FMT_RP_SET_PREEMPHASIS 0x00C4
+#define CMD_FMT_RP_SET_RDS_DEVIATION 0x0344
+#define CMD_FMT_RP_STEREO_SET_MODE 0x0164
+#define CMD_FMT_SP_TUNE_GET_CHANNEL 0x0184
+#define CMD_FMT_SP_TUNE_SET_CHANNEL 0x0064
+#define CMD_FMT_TN_SET_BAND 0x0024
+#define CMD_FMT_TN_SET_GRID 0x0044
+#define CMD_GEN_GET_MODE 0x0021
+#define CMD_GEN_GET_REGISTER_VALUE 0x00E1
+#define CMD_GEN_GET_VERSION 0x00C1
+#define CMD_GEN_GOTO_MODE 0x0041
+#define CMD_GEN_GOTO_POWERDOWN 0x0081
+#define CMD_GEN_GOTO_STANDBY 0x0061
+#define CMD_GEN_POWERUP 0x0141
+#define CMD_GEN_SELECT_REFERENCE_CLOCK 0x0201
+#define CMD_GEN_SET_REFERENCE_CLOCK 0x0161
+#define CMD_GEN_SET_REFERENCE_CLOCK_PLL 0x01A1
+#define CMD_GEN_SET_REGISTER_VALUE 0x0101
+#define CMD_TST_TONE_ENABLE 0x0027
+#define CMD_TST_TONE_CONNECT 0x0047
+#define CMD_TST_TONE_SET_PARAMS 0x0067
+#define CMD_FMT_RP_LIMITER_SETCONTROL 0x01C4
+
+/* FM Command Id Parameter Length */
+#define CMD_GET_VERSION_PARAM_LEN 0
+#define CMD_GET_VERSION_RSP_PARAM_LEN 7
+#define CMD_GOTO_MODE_PARAM_LEN 1
+#define CMD_SET_ANTENNA_PARAM_LEN 1
+#define CMD_TN_SET_BAND_PARAM_LEN 3
+#define CMD_TN_SET_GRID_PARAM_LEN 1
+#define CMD_SP_TUNE_SET_CHANNEL_PARAM_LEN 1
+#define CMD_SP_TUNE_GET_CHANNEL_PARAM_LEN 0
+#define CMD_SP_TUNE_GET_CHANNEL_RSP_PARAM_LEN 1
+#define CMD_RP_STEREO_SET_MODE_PARAM_LEN 1
+#define CMD_RP_GET_RSSI_PARAM_LEN 0
+#define CMD_RP_GET_RSSI_RSP_PARAM_LEN 1
+#define CMD_RP_GET_STATE_PARAM_LEN 0
+#define CMD_RP_GET_STATE_RSP_PARAM_LEN 2
+#define CMD_SP_SEARCH_START_PARAM_LEN 4
+#define CMD_SP_SCAN_START_PARAM_LEN 4
+#define CMD_SP_SCAN_GET_RESULT_PARAM_LEN 1
+#define CMD_SP_SCAN_GET_RESULT_RSP_PARAM_LEN 7
+#define CMD_SP_BLOCK_SCAN_START_PARAM_LEN 3
+#define CMD_SP_BLOCK_SCAN_GET_RESULT_PARAM_LEN 1
+#define CMD_SP_BLOCK_SCAN_GET_RESULT_RSP_PARAM_LEN 7
+#define CMD_SP_STOP_PARAM_LEN 0
+#define CMD_SP_AF_UPDATE_START_PARAM_LEN 1
+#define CMD_SP_AF_UPDATE_GET_RESULT_PARAM_LEN 0
+#define CMD_SP_AF_UPDATE_GET_RESULT_RSP_PARAM_LEN 1
+#define CMD_SP_AF_SWITCH_START_PARAM_LEN 5
+#define CMD_SP_AF_SWITCH_GET_RESULT_PARAM_LEN 0
+#define CMD_SP_AF_SWITCH_GET_RESULT_RWSP_PARAM_LEN 3
+#define CMD_DP_BUFFER_SET_SIZE_PARAM_LEN 1
+#define CMD_DP_BUFFER_SET_THRESHOLD_PARAM_LEN 1
+#define CMD_DP_SET_CONTROL_PARAM_LEN 1
+#define CMD_PA_SET_MODE_PARAM_LEN 1
+#define CMD_PA_SET_CONTROL_PARAM_LEN 1
+#define CMD_RP_SET_PREEMPHASIS_PARAM_LEN 1
+#define CMD_RP_SET_DEEMPHASIS_PARAM_LEN 1
+#define CMD_RP_SET_PILOT_DEVIATION_PARAM_LEN 1
+#define CMD_RP_SET_RDS_DEVIATION_PARAM_LEN 1
+#define CMD_DP_BUFFER_SET_GROUP_PARAM_LEN 5
+#define CMD_SET_BALANCE_PARAM_LEN 1
+#define CMD_SET_VOLUME_PARAM_LEN 1
+#define CMD_SET_MUTE_PARAM_LEN 2
+#define CMD_EXT_SET_MUTE_PARAM_LEN 1
+#define CMD_POWERUP_PARAM_LEN 0
+#define CMD_GOTO_STANDBY_PARAM_LEN 0
+#define CMD_GOTO_POWERDOWN_PARAM_LEN 0
+#define CMD_SELECT_REFERENCE_CLOCK_PARAM_LEN 1
+#define CMD_SET_REFERENCE_CLOCK_PLL_PARAM_LEN 1
+#define CMD_DP_BUFFER_GET_GROUP_COUNT_PARAM_LEN 0
+#define CMD_DP_BUFFER_GET_GROUP_PARAM_LEN 0
+#define CMD_IP_ENABLE_CMD_LEN 4
+#define CMD_IP_ENABLE_PARAM_LEN 3
+#define CMD_IP_DISABLE_CMD_LEN 4
+#define CMD_IP_DISABLE_PARAM_LEN 3
+#define CMD_TST_TONE_ENABLE_PARAM_LEN 1
+#define CMD_TST_TONE_CONNECT_PARAM_LEN 2
+#define CMD_TST_TONE_SET_PARAMS_PARAM_LEN 6
+#define CMD_FMT_RP_LIMITER_SETCONTROL_PARAM_LEN 2
+
+/* FM HCI Command and event specific */
+#define FM_WRITE 0x00
+#define FM_READ 0x01
+#define FM_CATENA_OPCODE 0xFE
+#define HCI_CMD_FM 0xFD50
+#define HCI_CMD_VS_WRITE_FILE_BLOCK 0xFC2E
+#define FM_EVENT_ID 0x15
+#define FM_SUCCESS_STATUS 0x00
+#define FM_EVENT 0x01
+#define HCI_COMMAND_COMPLETE_EVENT 0x0E
+#define HCI_VS_DBG_EVENT 0xFF
+#define ST_WRITE_FILE_BLK_SIZE 254
+#define ST_MAX_NUMBER_OF_FILE_BLOCKS 256
+#define FM_PG1_INTERRUPT_EVENT_LEN 0x04
+#define FM_PG2_INTERRUPT_EVENT_LEN 0x06
+#define FM_HCI_CMD_HEADER_LEN 6
+#define FM_HCI_CMD_PARAM_LEN 5
+#define FM_HCI_WRITE_FILE_BLK_HEADER_LEN 5
+#define FM_HCI_WRITE_FILE_BLK_PARAM_LEN 4
+#define HCI_PACKET_INDICATOR_CMD 0x01
+#define HCI_PACKET_INDICATOR_EVENT 0x04
+#define HCI_PACKET_INDICATOR_FM_CMD_EVT 0x08
+/* FM Functions specific to CG2900 */
+#define FM_FUNCTION_ENABLE 0x00
+#define FM_FUNCTION_DISABLE 0x01
+#define FM_FUNCTION_RESET 0x02
+#define FM_FUNCTION_WRITE_COMMAND 0x10
+#define FM_FUNCTION_SET_INT_MASK_ALL 0x20
+#define FM_FUNCTION_GET_INT_MASK_ALL 0x21
+#define FM_FUNCTION_SET_INT_MASK 0x22
+#define FM_FUNCTION_GET_INT_MASK 0x23
+#define FM_FUNCTION_FIRMWARE_DOWNLOAD 0x30
+/* Command succeeded */
+#define FM_CMD_STATUS_CMD_SUCCESS 0x00
+/* HCI_ERR_HW_FAILURE when no response from the IP */
+#define FM_CMD_STATUS_HCI_ERR_HW_FAILURE 0x03
+/* HCI_ERR_INVALID_PARAMETERS. */
+#define FM_CMD_STATUS_HCI_ERR_INVALID_PARAMETERS 0x12
+/* When the host tries to send a command to an IP that hasn't been
+ * initialized.
+ */
+#define FM_CMD_STATUS_IP_UNINIT 0x15
+/* HCI_ERR_UNSPECIFIED_ERROR: any other error */
+#define FM_CMD_STATUS_HCI_ERR_UNSPECIFIED_ERROR 0x1F
+/* HCI_ERR_CMD_DISALLOWED when the host asks for an unauthorized operation
+ * (FM state transition for instance)
+ */
+#define FM_CMD_STATUS_HCI_ERR_CMD_DISALLOWED 0x0C
+/* Wrong sequence number for FM FW download command */
+#define FM_CMD_STATUS_WRONG_SEQ_NUM 0xF1
+/* Unknown file type for FM FW download command */
+#define FM_CMD_STATUS_UNKNOWN_FILE_TYPE 0xF2
+/* File version mismatch for FM FW download command */
+#define FM_CMD_STATUS_FILE_VERSION_MISMATCH 0xF3
+
+
+/**
+ * enum fmd_event - Events received.
+ *
+ * @FMD_EVENT_OPERATION_COMPLETED: Previous operation has been completed.
+ * @FMD_EVENT_ANTENNA_STATUS_CHANGED: Antenna has been changed.
+ * @FMD_EVENT_FREQUENCY_CHANGED: Frequency has been changed.
+ * @FMD_EVENT_SEEK_COMPLETED: Seek operation has completed.
+ * @FMD_EVENT_SCAN_BAND_COMPLETED: Band Scan completed.
+ * @FMD_EVENT_BLOCK_SCAN_COMPLETED: Block Scan completed.
+ * @FMD_EVENT_AF_UPDATE_SWITCH_COMPLETE: Af Update or AF Switch is complete.
+ * @FMD_EVENT_MONO_STEREO_TRANSITION_COMPLETE: Mono stereo transition is
+ * completed.
+ * @FMD_EVENT_SEEK_STOPPED: Previous Seek/Band Scan/ Block Scan operation is
+ * stopped.
+ * @FMD_EVENT_GEN_POWERUP: FM IP Powerup has been powered up.
+ * @FMD_EVENT_RDSGROUP_RCVD: RDS Groups Full interrupt.
+ * @FMD_EVENT_LAST_ELEMENT: Last event, used for keeping count of
+ * number of events.
+ *
+ * Various events received from FM driver for Upper Layer(s) processing.
+ */
+enum fmd_event {
+ FMD_EVENT_OPERATION_COMPLETED,
+ FMD_EVENT_ANTENNA_STATUS_CHANGED,
+ FMD_EVENT_FREQUENCY_CHANGED,
+ FMD_EVENT_SEEK_COMPLETED,
+ FMD_EVENT_SCAN_BAND_COMPLETED,
+ FMD_EVENT_BLOCK_SCAN_COMPLETED,
+ FMD_EVENT_AF_UPDATE_SWITCH_COMPLETE,
+ FMD_EVENT_MONO_STEREO_TRANSITION_COMPLETE,
+ FMD_EVENT_SEEK_STOPPED,
+ FMD_EVENT_GEN_POWERUP,
+ FMD_EVENT_RDSGROUP_RCVD,
+ FMD_EVENT_LAST_ELEMENT
+};
+
+/**
+ * enum fmd_mode - FM Driver Modes.
+ *
+ * @FMD_MODE_IDLE: FM Driver in Idle mode.
+ * @FMD_MODE_RX: FM Driver in Rx mode.
+ * @FMD_MODE_TX: FM Driver in Tx mode.
+ *
+ * Various Modes of FM Radio.
+ */
+enum fmd_mode {
+ FMD_MODE_IDLE,
+ FMD_MODE_RX,
+ FMD_MODE_TX
+};
+
+/**
+ * enum fmd_antenna - Antenna selection.
+ *
+ * @FMD_ANTENNA_EMBEDDED: Embedded Antenna.
+ * @FMD_ANTENNA_WIRED: Wired Antenna.
+ *
+ * Antenna to be used for FM Radio.
+ */
+enum fmd_antenna {
+ FMD_ANTENNA_EMBEDDED,
+ FMD_ANTENNA_WIRED
+};
+
+/**
+ * enum fmd_grid - Grid used on FM Radio.
+ *
+ * @FMD_GRID_50KHZ: 50 kHz grid spacing.
+ * @FMD_GRID_100KHZ: 100 kHz grid spacing.
+ * @FMD_GRID_200KHZ: 200 kHz grid spacing.
+ *
+ * Spacing used on FM Radio.
+ */
+enum fmd_grid {
+ FMD_GRID_50KHZ,
+ FMD_GRID_100KHZ,
+ FMD_GRID_200KHZ
+};
+
+/**
+ * enum fmd_emphasis - De-emphasis/Pre-emphasis level.
+ *
+ * @FMD_EMPHASIS_NONE: De-emphasis Disabled.
+ * @FMD_EMPHASIS_50US: 50 us de-emphasis/pre-emphasis level.
+ * @FMD_EMPHASIS_75US: 75 us de-emphasis/pre-emphasis level.
+ *
+ * De-emphasis/Pre-emphasis level used on FM Radio.
+ */
+enum fmd_emphasis {
+ FMD_EMPHASIS_NONE = 0,
+ FMD_EMPHASIS_50US = 1,
+ FMD_EMPHASIS_75US = 2
+};
+
+/**
+ * enum fmd_freq_range - Frequency range.
+ *
+ * @FMD_FREQRANGE_EUROAMERICA: EU/US Range (87.5 - 108 MHz).
+ * @FMD_FREQRANGE_JAPAN: Japan Range (76 - 90 MHz).
+ * @FMD_FREQRANGE_CHINA: China Range (70 - 108 MHz).
+ *
+ * Various Frequency range(s) supported by FM Radio.
+ */
+enum fmd_freq_range {
+ FMD_FREQRANGE_EUROAMERICA,
+ FMD_FREQRANGE_JAPAN,
+ FMD_FREQRANGE_CHINA
+};
+
+/**
+ * enum fmd_stereo_mode - FM Driver Stereo Modes.
+ *
+ * @FMD_STEREOMODE_OFF: Streo Blending Off.
+ * @FMD_STEREOMODE_MONO: Mono Mode.
+ * @FMD_STEREOMODE_BLENDING: Blending Mode.
+ *
+ * Various Stereo Modes of FM Radio.
+ */
+enum fmd_stereo_mode {
+ FMD_STEREOMODE_OFF,
+ FMD_STEREOMODE_MONO,
+ FMD_STEREOMODE_BLENDING
+};
+
+/**
+ * enum fmd_pilot_tone - Pilot Tone Selection
+ *
+ * @FMD_PILOT_TONE_DISABLED: Pilot Tone to be disabled.
+ * @FMD_PILOT_TONE_ENABLED: Pilot Tone to be enabled.
+ *
+ * Pilot Tone to be enabled or disabled.
+ */
+enum fmd_pilot_tone {
+ FMD_PILOT_TONE_DISABLED,
+ FMD_PILOT_TONE_ENABLED
+};
+
+/**
+ * enum fmd_output - Output of Sample Rate Converter.
+ *
+ * @FMD_OUTPUT_DISABLED: Sample Rate converter in disabled.
+ * @FMD_OUTPUT_I2S: I2S Output from Sample rate converter.
+ * @FMD_OUTPUT_PARALLEL: Parallel output from sample rate converter.
+ *
+ * Sample Rate Converter's output to be set on Connectivity Controller.
+ */
+enum fmd_output {
+ FMD_OUTPUT_DISABLED,
+ FMD_OUTPUT_I2S,
+ FMD_OUTPUT_PARALLEL
+};
+
+/**
+ * enum fmd_input - Audio Input to Sample Rate Converter.
+ *
+ * @FMD_INPUT_ANALOG: Selects the ADC's as audio source
+ * @FMD_INPUT_DIGITAL: Selects Digital Input as audio source.
+ *
+ * Audio Input source for Sample Rate Converter.
+ */
+enum fmd_input {
+ FMD_INPUT_ANALOG,
+ FMD_INPUT_DIGITAL
+};
+
+/**
+ * enum fmd_rds_mode - RDS Mode to be selected for FM Rx.
+ *
+ * @FMD_SWITCH_OFF_RDS: RDS Decoding disabled in FM Chip.
+ * @FMD_SWITCH_ON_RDS: RDS Decoding enabled in FM Chip.
+ * @FMD_SWITCH_ON_RDS_ENHANCED_MODE: Enhanced RDS Mode.
+ * @FMD_SWITCH_ON_RDS_SIMULATOR: RDS Simulator switched on in FM Chip.
+ *
+ * RDS Mode to be selected for FM Rx.
+ */
+enum fmd_rds_mode {
+ FMD_SWITCH_OFF_RDS,
+ FMD_SWITCH_ON_RDS,
+ FMD_SWITCH_ON_RDS_ENHANCED_MODE,
+ FMD_SWITCH_ON_RDS_SIMULATOR
+};
+
+/**
+ * enum fmd_tst_tone_status - Test Tone Generator Status.
+ *
+ * @FMD_TST_TONE_OFF: Test Tone Generator is off.
+ * @FMD_TST_TONE_ON_W_SRC: Test Tone Gen. is on with Sample Rate Conversion.
+ * @FMD_TST_TONE_ON_WO_SRC: Test Tone Gen. is on without Sample Rate Conversion.
+ *
+ * Test Tone Generator status to be set.
+ */
+enum fmd_tst_tone_status {
+ FMD_TST_TONE_OFF,
+ FMD_TST_TONE_ON_W_SRC,
+ FMD_TST_TONE_ON_WO_SRC
+};
+
+/**
+ * enum fmd_tst_tone_audio_mode - Test Tone Generator Audio Output/Input Mode.
+ *
+ * @FMD_TST_TONE_AUDIO_NORMAL: Normal Audio.
+ * @FMD_TST_TONE_AUDIO_ZERO: Zero.
+ * @FMD_TST_TONE_AUDIO_TONE_1: Tone 1.
+ * @FMD_TST_TONE_AUDIO_TONE_2: Tone 2.
+ * @FMD_TST_TONE_AUDIO_TONE_SUM: Sum of Tone 1 and Tone 2.
+ *
+ * Test Tone Generator Audio Output/Input Modes.
+ */
+enum fmd_tst_tone_audio_mode {
+ FMD_TST_TONE_AUDIO_NORMAL,
+ FMD_TST_TONE_AUDIO_ZERO,
+ FMD_TST_TONE_AUDIO_TONE_1,
+ FMD_TST_TONE_AUDIO_TONE_2,
+ FMD_TST_TONE_AUDIO_TONE_SUM
+};
+
+/**
+ * enum fmd_tst_tone - Test Tone of Internal Tone Generator.
+ *
+ * @FMD_TST_TONE_1: Test Tone 1
+ * @FMD_TST_TONE_2: Test Tone 2
+ *
+ * Test Tone.
+ */
+enum fmd_tst_tone {
+ FMD_TST_TONE_1,
+ FMD_TST_TONE_2
+};
+
+/**
+ * enum fmd_tst_tone_waveform - Test Tone Waveform of Internal Tone Generator.
+ *
+ * @FMD_TST_TONE_SINE: Sine wave
+ * @FMD_TST_TONE_PULSE: Pulse wave
+ *
+ * Test Tone waveform.
+ */
+enum fmd_tst_tone_waveform {
+ FMD_TST_TONE_SINE,
+ FMD_TST_TONE_PULSE
+};
+
+/* Callback function to receive radio events. */
+typedef void(*fmd_radio_cb)(
+ u8 event,
+ bool event_successful
+ );
+
+/**
+ * fmd_init() - Initialize the FM Driver internal structures.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EIO, if there is an error.
+ */
+int fmd_init(void);
+
+/**
+ * fmd_exit() - De-initialize the FM Driver.
+ */
+void fmd_exit(void);
+
+/**
+ * fmd_register_callback() - Function to register callback function.
+ *
+ * This function registers the callback function provided by upper layers.
+ * @callback: Fmradio call back Function pointer
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_register_callback(
+ fmd_radio_cb callback
+ );
+
+/**
+ * fmd_get_version() - Retrieves the FM HW and FW version.
+ *
+ * @version: (out) Version Array
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameters are not valid.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_get_version(
+ u16 *version
+ );
+
+/**
+ * fmd_set_mode() - Starts a transition to the given mode.
+ *
+ * @mode: Transition mode
+ *
+ * Returns:
+ * 0, if set mode done successfully.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_mode(
+ u8 mode
+ );
+
+/**
+ * fmd_get_freq_range_properties() - Retrieves Freq Range Properties.
+ *
+ * @range: range of freq
+ * @min_freq: (out) Minimum Frequency of the Band in kHz.
+ * @max_freq: (out) Maximum Frequency of the Band in kHz
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ */
+int fmd_get_freq_range_properties(
+ u8 range,
+ u32 *min_freq,
+ u32 *max_freq
+ );
+
+/**
+ * fmd_set_antenna() - Selects the antenna to be used in receive mode.
+ *
+ * embedded - Selects the embedded antenna, wired- Selects the wired antenna.
+ * @antenna: Antenna Type
+ *
+ * Returns:
+ * 0, if set antenna done successfully.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_antenna(
+ u8 antenna
+ );
+
+/**
+ * fmd_get_antenna() - Retrieves the currently used antenna type.
+ *
+ * @antenna: (out) Antenna Selected on FM Radio.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_get_antenna(
+ u8 *antenna
+ );
+
+/**
+ * fmd_set_freq_range() - Sets the FM band.
+ *
+ * @range: freq range
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_freq_range(
+ u8 range
+ );
+
+/**
+ * fmd_get_freq_range() - Gets the FM band currently in use.
+ *
+ * @range: (out) Frequency Range set on FM Radio.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ */
+int fmd_get_freq_range(
+ u8 *range
+ );
+
+/**
+ * fmd_rx_set_grid() - Sets the tuning grid.
+ *
+ * @grid: Tuning grid size
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_set_grid(
+ u8 grid
+ );
+
+/**
+ * fmd_rx_set_frequency() - Sets the FM Channel.
+ *
+ * @freq: Frequency to Set in Khz
+ *
+ * Returns:
+ * 0, if set frequency done successfully.
+ * -EINVAL, if parameters are invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_set_frequency(
+ u32 freq
+ );
+
+/**
+ * fmd_rx_get_frequency() - Gets the currently used FM Channel.
+ *
+ * @freq: (out) Current Frequency set on FM Radio.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameters are invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_get_frequency(
+ u32 *freq
+ );
+
+/**
+ * fmd_rx_set_stereo_mode() - Sets the stereomode functionality.
+ *
+ * @mode: FMD_STEREOMODE_MONO, FMD_STEREOMODE_STEREO and
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_set_stereo_mode(
+ u8 mode
+ );
+
+/**
+ * fmd_rx_get_stereo_mode() - Gets the currently used FM mode.
+ *
+ * FMD_STEREOMODE_MONO, FMD_STEREOMODE_STEREO and
+ * FMD_STEREOMODE_AUTO.
+ * @mode: (out) Mode set on FM Radio, stereo or mono.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_rx_get_stereo_mode(
+ u8 *mode
+ );
+
+/**
+ * fmd_rx_get_signal_strength() - Gets the RSSI level of current frequency.
+ *
+ * @strength: (out) RSSI level of current channel.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_get_signal_strength(
+ u16 *strength
+ );
+
+/**
+ * fmd_rx_set_stop_level() - Sets the FM Rx Seek stop level.
+ *
+ * @stoplevel: seek stop level
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_rx_set_stop_level(
+ u16 stoplevel
+ );
+
+/**
+ * fmd_rx_get_stop_level() - Gets the current FM Rx Seek stop level.
+ *
+ * @stoplevel: (out) RSSI Threshold set on FM Radio.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_rx_get_stop_level(
+ u16 *stoplevel
+ );
+
+/**
+ * fmd_rx_seek() - Perform FM Seek.
+ *
+ * Starts searching relative to the actual channel with
+ * a specific direction, stop.
+ * level and optional noise levels
+ * @upwards: scan up
+ *
+ * Returns:
+ * 0, if seek started successfully.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_seek(
+ bool upwards
+ );
+
+/**
+ * fmd_rx_stop_seeking() - Stops a currently active seek or scan band.
+ *
+ * Returns:
+ * 0, if stop seek done successfully.
+ * -ENOEXEC, if preconditions are violated.
+ * -ENOEXEC, if FM Driver is
+ * not currently in Seek or Scan State..
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_stop_seeking(void);
+
+/**
+ * fmd_rx_af_update_start() - Perform AF update.
+ *
+ * This is used to switch to a shortly tune to a AF freq,
+ * measure its RSSI and tune back to the original frequency.
+ * @freq: Alternative frequncy in KHz to be set for AF updation.
+ *
+ * Returns:
+ * -EBUSY, if FM Driver is not in idle state.
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ */
+int fmd_rx_af_update_start(
+ u32 freq
+ );
+
+/**
+ * fmd_rx_get_af_update_result() - Retrive result of AF update.
+ *
+ * Retrive the RSSI level of the Alternative frequency.
+ * @af_level: RSSI level of the Alternative frequency.
+ *
+ * Returns:
+ * -EBUSY, if FM Driver is not in idle state.
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ */
+int fmd_rx_get_af_update_result(
+ u16 *af_level
+ );
+
+/**
+ * fmd_af_switch_start() -Performs AF switch.
+ *
+ * @freq: Frequency to Set in Khz.
+ * @picode:programable id,unique for each station.
+ *
+ * Returns:
+ * -EBUSY, if FM Driver is not in idle state.
+ * 0, if no error and if AF switch started successfully.
+ * -ENOEXEC, if preconditions are violated.
+ */
+int fmd_rx_af_switch_start(
+ u32 freq,
+ u16 picode
+ );
+
+/**
+ * fmd_rx_get_af_switch_results() -Retrieves the results of AF Switch.
+ *
+ * @afs_conclusion: Conclusion of AF switch.
+ * @afs_level: RSSI level of the Alternative frequnecy.
+ * @afs_pi: PI code of the alternative channel (if found).
+ *
+ * Returns:
+ * -EBUSY, if FM Driver is not in idle state.
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ */
+int fmd_rx_get_af_switch_results(
+ u16 *afs_conclusion,
+ u16 *afs_level,
+ u16 *afs_pi
+ );
+
+/**
+ * fmd_rx_scan_band() - Starts Band Scan.
+ *
+ * Starts scanning the active band for the strongest
+ * channels above a threshold.
+ * @max_channels_to_scan: Maximum number of channels to scan.
+ *
+ * Returns:
+ * 0, if scan band started successfully.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_scan_band(
+ u8 max_channels_to_scan
+ );
+
+/**
+ * fmd_rx_get_max_channels_to_scan() - Retreives the maximum channels.
+ *
+ * Retrieves the maximum number of channels that can be found during
+ * band scann.
+ * @max_channels_to_scan: (out) Maximum number of channels to scan.
+ *
+ * Returns:
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if parameter is invalid.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_get_max_channels_to_scan(
+ u8 *max_channels_to_scan
+ );
+
+/**
+ * fmd_rx_get_scan_band_info() - Retrieves Channels found during scanning.
+ *
+ * Retrieves the scanned active band
+ * for the strongest channels above a threshold.
+ * @index: (out) Index value to retrieve the channels.
+ * @numchannels: (out) Number of channels found during Band Scan.
+ * @channels: (out) Channels found during band scan.
+ * @rssi: (out) Rssi of channels found during Band scan.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_get_scan_band_info(
+ u32 index,
+ u16 *numchannels,
+ u16 *channels,
+ u16 *rssi
+ );
+
+/**
+ * fmd_block_scan() - Starts Block Scan.
+ *
+ * Starts block scan for retriving the RSSI level of channels
+ * in the given block.
+ * @start_freq: Starting frequency of the block from where scanning has
+ * to be started.
+ * @stop_freq: End frequency of the block to be scanned.
+ * @antenna: Antenna to be used during scanning.
+ *
+ * Returns:
+ * 0, if scan band started successfully.
+ * -EINVAL, if parameters are invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_block_scan(
+ u32 start_freq,
+ u32 stop_freq,
+ u8 antenna
+ );
+
+/**
+ * fmd_get_block_scan_result() - Retrieves RSSI Level of channels.
+ *
+ * Retrieves the RSSI level of the channels in the block.
+ * @index: (out) Index value to retrieve the channels.
+ * @numchannels: (out) Number of channels found during Band Scan.
+ * @rssi: (out) Rssi of channels found during Band scan.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_get_block_scan_result(
+ u32 index,
+ u16 *numchannels,
+ u16 *rssi
+ );
+
+/**
+ * fmd_rx_get_rds() - Gets the current status of RDS transmission.
+ *
+ * @on: (out) RDS status
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_rx_get_rds(
+ bool *on
+ );
+
+/**
+ * fmd_rx_buffer_set_size() - Sets the number of groups that the data buffer.
+ * can contain and clears the buffer.
+ *
+ * @size: buffer size
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_buffer_set_size(
+ u8 size
+ );
+
+/**
+ * fmd_rx_buffer_set_threshold() - RDS Buffer Threshold level in FM Chip.
+ *
+ * Sets the group number at which the RDS buffer full interrupt must be
+ * generated. The interrupt will be set after reception of the group.
+ * @threshold: threshold level.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_buffer_set_threshold(
+ u8 threshold
+ );
+
+/**
+ * fmd_rx_set_rds() - Enables or disables demodulation of RDS data.
+ *
+ * @on_off_state : Rx Set ON /OFF control
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_rx_set_rds(
+ u8 on_off_state
+ );
+
+/**
+ * fmd_rx_get_low_level_rds_groups() - Gets Low level RDS group data.
+ *
+ * @index: RDS group index
+ * @block1: (out) RDS Block 1
+ * @block2: (out) RDS Block 2
+ * @block3: (out) RDS Block 3
+ * @block4: (out) RDS Block 4
+ * @status1: (out) RDS data status 1
+ * @status2: (out) RDS data status 2
+ * @status3: (out) RDS data status 3
+ * @status4: (out) RDS data status 4
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_rx_get_low_level_rds_groups(
+ u8 index,
+ u16 *block1,
+ u16 *block2,
+ u16 *block3,
+ u16 *block4,
+ u8 *status1,
+ u8 *status2,
+ u8 *status3,
+ u8 *status4
+ );
+
+/**
+ * fmd_tx_set_pa() - Enables or disables the Power Amplifier.
+ *
+ * @on: Power Amplifier current state to set
+ *
+ * Returns:
+ * 0, if set Power Amplifier done successfully.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_pa(
+ bool on
+ );
+
+/**
+ * fmd_tx_set_signal_strength() - Sets the RF-level of the output FM signal.
+ *
+ * @strength: Signal strength to be set for FM Tx in dBuV.
+ *
+ * Returns:
+ * 0, if set RSSI Level done successfully.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_signal_strength(
+ u16 strength
+ );
+
+/**
+ * fmd_tx_get_signal_strength() - Retrieves current RSSI of FM Tx.
+ *
+ * @strength: (out) Strength of signal being transmitted in dBuV.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_signal_strength(
+ u16 *strength
+ );
+
+/**
+ * fmd_tx_set_freq_range() - Sets the FM band and specifies the custom band.
+ *
+ * @range: Freq range to set on FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_freq_range(
+ u8 range
+ );
+
+/**
+ * fmd_tx_get_freq_range() - Gets the FM band currently in use.
+ *
+ * @range: (out) Frequency Range set on Fm Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ */
+int fmd_tx_get_freq_range(
+ u8 *range
+ );
+
+/**
+ * fmd_tx_set_grid() - Sets the tuning grid size.
+ *
+ * @grid: FM Grid (50 Khz, 100 Khz, 200 Khz) to be set for FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_grid(
+ u8 grid
+ );
+
+/**
+ * fmd_tx_get_grid() - Gets the current tuning grid size.
+ *
+ * @grid: (out) FM Grid (50 Khz, 100 Khz, 200 Khz) currently set on FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_grid(
+ u8 *grid
+ );
+
+/**
+ * fmd_tx_set_preemphasis() - Sets the Preemphasis characteristic of the Tx.
+ *
+ * @preemphasis: Pre-emphasis level to be set for FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_preemphasis(
+ u8 preemphasis
+ );
+
+/**
+ * fmd_tx_get_preemphasis() - Gets the currently used Preemphasis char of th FM Tx.
+ *
+ * @preemphasis: (out) Preemphasis Level used for FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_preemphasis(
+ u8 *preemphasis
+ );
+
+/**
+ * fmd_tx_set_frequency() - Sets the FM Channel for Tx.
+ *
+ * @freq: Freq to be set for transmission.
+ *
+ * Returns:
+ * 0, if set frequency done successfully.
+ * -EINVAL, if parameters are invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_frequency(
+ u32 freq
+ );
+
+/**
+ * fmd_rx_get_frequency() - Gets the currently used Channel for Tx.
+ *
+ * @freq: (out) Frequency set on FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameters are invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_get_frequency(
+ u32 *freq
+ );
+
+/**
+ * fmd_tx_enable_stereo_mode() - Sets Stereo mode state for TX.
+ *
+ * @enable_stereo_mode: Flag indicating enabling or disabling Stereo mode.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_enable_stereo_mode(
+ bool enable_stereo_mode
+ );
+
+/**
+ * fmd_tx_get_stereo_mode() - Gets the currently used FM Tx stereo mode.
+ *
+ * @stereo_mode: (out) Stereo Mode state set on FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -EINVAL, if parameter is invalid.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_stereo_mode(
+ bool *stereo_mode
+ );
+
+/**
+ * fmd_tx_set_pilot_deviation() - Sets pilot deviation in HZ
+ *
+ * @deviation: Pilot deviation in HZ to set on FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_pilot_deviation(
+ u16 deviation
+ );
+
+/**
+ * fmd_tx_get_pilot_deviation() - Retrieves the current pilot deviation.
+ *
+ * @deviation: (out) Pilot deviation set on FM Tx.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_pilot_deviation(
+ u16 *deviation
+ );
+
+/**
+ * fmd_tx_set_rds_deviation() - Sets Rds deviation in HZ.
+ *
+ * @deviation: RDS deviation in HZ.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_rds_deviation(
+ u16 deviation
+ );
+
+/**
+ * fmd_tx_get_rds_deviation() - Retrieves the current Rds deviation.
+ *
+ * @deviation: (out) RDS deviation currently set.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_rds_deviation(
+ u16 *deviation
+ );
+
+/**
+ * fmd_tx_set_rds() - Enables or disables RDS transmission for Tx.
+ *
+ * @on: Boolean - RDS ON
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_rds(
+ bool on
+ );
+
+/**
+ * fmd_rx_get_rds() - Gets the current status of RDS transmission for FM Tx.
+ *
+ * @on: (out) Rds enabled or disabled.
+ *
+ *Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_tx_get_rds(
+ bool *on
+ );
+
+/**
+ * fmd_tx_set_group() - Programs a grp on a certain position in the RDS buffer.
+ *
+ * @position: RDS group position
+ * @block1: Data to be transmitted in Block 1
+ * @block2: Data to be transmitted in Block 2
+ * @block3: Data to be transmitted in Block 3
+ * @block4: Data to be transmitted in Block 4
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if parameters are invalid.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_set_group(
+ u16 position,
+ u8 *block1,
+ u8 *block2,
+ u8 *block3,
+ u8 *block4
+ );
+
+/**
+ * fmd_tx_buffer_set_size() - Controls the size of the RDS buffer in groups.
+ *
+ * @buffer_size: RDS buffer size.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_tx_buffer_set_size(
+ u16 buffer_size
+ );
+
+/**
+ * fmd_set_volume() - Sets the receive audio volume.
+ *
+ * @volume: Audio volume level
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_volume(
+ u8 volume
+ );
+
+/**
+ * fmd_get_volume() - Retrives the current audio volume.
+ *
+ * @volume: Analog Volume level.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if parameter is invalid.
+ * -EBUSY, if FM Driver is not in idle state.
+ */
+int fmd_get_volume(
+ u8 *volume
+ );
+
+/**
+ * fmd_set_balance() - Controls the receiver audio balance.
+ *
+ * @balance: Audio balance level
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_balance(
+ s8 balance
+ );
+
+/**
+ * fmd_set_mute() - Enables or disables muting of the analog audio(DAC).
+ *
+ * @mute_on: bool of mute on
+ *
+ * Returns:
+ * 0, if mute done successfully.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_mute(
+ bool mute_on
+ );
+
+/**
+ * fmd_ext_set_mute() - Enables or disables muting of the audio channel.
+ *
+ * @mute_on: bool to Mute
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_ext_set_mute(
+ bool mute_on
+ );
+
+/**
+ * fmd_power_up() - Puts the system in Powerup state.
+ *
+ * Returns:
+ * 0, if power up command sent successfully to chip.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_power_up(void);
+
+/**
+ * fmd_goto_standby() - Puts the system in standby mode.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_goto_standby(void);
+
+/**
+ * fmd_goto_power_down() - Puts the system in Powerdown mode.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_goto_power_down(void);
+
+/**
+ * fmd_select_ref_clk() - Selects the FM reference clock.
+ *
+ * @ref_clk: Ref Clock.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_select_ref_clk(
+ u16 ref_clk
+ );
+
+/**
+ * fmd_set_ref_clk_pll() - Sets the freq of Referece Clock.
+ *
+ * Sets frequency and offset correction properties of the external
+ * reference clock of the PLL
+ * @freq: PLL Frequency/ 2 in kHz.
+ *
+ * Returns:
+ * 0, if no error.
+ * -ENOEXEC, if preconditions are violated.
+ * -EBUSY, if FM Driver is not in idle state.
+ * -EINVAL, if wrong response received from chip.
+ */
+int fmd_set_ref_clk_pll(
+ u16 freq
+ );
+
+/**
+ * fmd_send_fm_ip_enable()- Enables the FM IP.
+ *
+ * Returns:
+ * 0: If there is no error.
+ * -ETIME: Otherwise
+ */
+int fmd_send_fm_ip_enable(void);
+
+/**
+ * fmd_send_fm_ip_disable()- Disables the FM IP.
+ *
+ * Returns:
+ * 0, If there is no error.
+ * -ETIME: Otherwise
+ */
+int fmd_send_fm_ip_disable(void);
+
+/**
+ * fmd_send_fm_firmware() - Send the FM Firmware File to Device.
+ *
+ * @fw_buffer: Firmware to be downloaded.
+ * @fw_size: Size of firmware to be downloaded.
+ *
+ * Returns:
+ * 0, If there is no error.
+ * -ETIME: Otherwise
+ */
+int fmd_send_fm_firmware(
+ u8 *fw_buffer,
+ u16 fw_size
+ );
+
+/**
+ * fmd_int_bufferfull() - RDS Groups availabe for reading by Host.
+ *
+ * Gets the number of groups that are available in the
+ * buffer. This function is called in RX mode to read RDS groups.
+ * @number_of_rds_groups: Number of RDS groups ready to
+ * be read from the Host.
+ *
+ * Returns:
+ * 0, If there is no error.
+ * corresponding error Otherwise
+ */
+int fmd_int_bufferfull(
+ u16 *number_of_rds_groups
+ );
+
+/**
+ * fmd_start_rds_thread() - Starts the RDS Thread for receiving RDS Data.
+ *
+ * This is started by Application when it wants to receive RDS Data.
+ * @cb_func: Callback function for receiving RDS Data
+ */
+void fmd_start_rds_thread(
+ cg2900_fm_rds_cb cb_func
+ );
+/**
+ * fmd_stop_rds_thread() - Stops the RDS Thread when Application does not
+ * want to receive RDS.
+ */
+void fmd_stop_rds_thread(void);
+
+/**
+ * fmd_get_rds_sem() - Block on RDS Semaphore.
+ * Till irpt_BufferFull is received, RDS Task is blocked.
+ */
+void fmd_get_rds_sem(void);
+
+/**
+ * fmd_set_rds_sem() - Unblock on RDS Semaphore.
+ * on receiving irpt_BufferFull, RDS Task is un-blocked.
+ */
+void fmd_set_rds_sem(void);
+
+/**
+ * fmd_set_dev() - Set FM device.
+ *
+ * @dev: FM Device
+ *
+ * Returns:
+ * 0, If there is no error.
+ * corresponding error Otherwise
+ */
+int fmd_set_dev(
+ struct device *dev
+ );
+
+/**
+ * fmd_set_test_tone_generator_status()- Sets the Test Tone Generator.
+ *
+ * This function is used to enable/disable the Internal Tone Generator of
+ * CG2900.
+ * @test_tone_status: Status of tone generator.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int fmd_set_test_tone_generator_status(
+ u8 test_tone_status
+ );
+
+/**
+ * fmd_test_tone_connect()- Connect Audio outputs/inputs.
+ *
+ * This function connects the audio outputs/inputs of the
+ * Internal Tone Generator of CG2900.
+ * @left_audio_mode: Left Audio Output Mode.
+ * @right_audio_mode: Right Audio Output Mode.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int fmd_test_tone_connect(
+ u8 left_audio_mode,
+ u8 right_audio_mode
+ );
+
+/**
+ * fmd_test_tone_set_params()- Sets the Test Tone Parameters.
+ *
+ * This function is used to set the parameters of
+ * the Internal Tone Generator of CG2900.
+ * @tone_gen: Tone to be configured (Tone 1 or Tone 2)
+ * @frequency: Frequency of the tone.
+ * @volume: Volume of the tone.
+ * @phase_offset: Phase offset of the tone.
+ * @dc: DC to add to tone.
+ * @waveform: Waveform to generate, sine or pulse.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int fmd_test_tone_set_params(
+ u8 tone_gen,
+ u16 frequency,
+ u16 volume,
+ u16 phase_offset,
+ u16 dc,
+ u8 waveform
+ );
+
+/**
+ * fmd_rx_set_deemphasis()- Connect Audio outputs/inputs.
+ *
+ * This function sets the de-emphasis filter to the
+ * specified de-empahsis level.
+ * @deemphasis: De-emphasis level to set.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int fmd_rx_set_deemphasis(
+ u8 deemphasis
+ );
+
+/**
+ * fmd_limiter_setcontrol()- Sets the Limiter Controls.
+ *
+ * This function sets the limiter control.
+ * @audio_deviation: Limiting level of Audio Deviation.
+ * @notification_hold_off_time: Minimum time between
+ * two limiting interrupts.
+ *
+ * Returns:
+ * 0, if operation completed successfully.
+ * -EINVAL, otherwise.
+ */
+int fmd_limiter_setcontrol(
+ u16 audio_deviation,
+ u16 notification_hold_off_time
+ );
+
+#endif /* _FMDRIVER_H_ */
diff --git a/drivers/media/radio/CG2900/radio-cg2900.c b/drivers/media/radio/CG2900/radio-cg2900.c
new file mode 100644
index 00000000000..9ccb4e6b85d
--- /dev/null
+++ b/drivers/media/radio/CG2900/radio-cg2900.c
@@ -0,0 +1,3024 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Linux Wrapper for V4l2 FM Driver for CG2900.
+ *
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include<linux/init.h>
+#include<linux/videodev2.h>
+#include<media/v4l2-ioctl.h>
+#include<media/v4l2-common.h>
+#include<linux/module.h>
+#include <linux/platform_device.h>
+#include<linux/string.h>
+#include<linux/wait.h>
+#include"cg2900.h"
+#include"cg2900_fm_driver.h"
+
+#define RADIO_CG2900_VERSION KERNEL_VERSION(1, 1, 0)
+#define BANNER "ST-Ericsson FM Radio Card driver v1.1.0"
+
+#define FMR_HZ_TO_MHZ_CONVERTER 1000000
+#define FMR_EU_US_LOW_FREQ_IN_MHZ 87.5
+#define FMR_EU_US_HIGH_FREQ_IN_MHZ 108
+#define FMR_JAPAN_LOW_FREQ_IN_MHZ 76
+#define FMR_JAPAN_HIGH_FREQ_IN_MHZ 90
+#define FMR_CHINA_LOW_FREQ_IN_MHZ 70
+#define FMR_CHINA_HIGH_FREQ_IN_MHZ 108
+#define FMR_MAX_BLOCK_SCAN_CHANNELS 198
+#define FMR_CHINA_GRID_IN_HZ 50000
+#define FMR_EUROPE_GRID_IN_HZ 100000
+#define FMR_USA_GRID_IN_HZ 200000
+#define FMR_AF_SWITCH_DATA_SIZE 2
+#define FMR_BLOCK_SCAN_DATA_SIZE 2
+#define FMR_GET_INTERRUPT_DATA_SIZE 2
+#define FMR_TEST_TONE_CONNECT_DATA_SIZE 2
+#define FMR_TEST_TONE_SET_PARAMS_DATA_SIZE 6
+
+/* freq in Hz to V4l2 freq (units of 62.5Hz) */
+#define HZ_TO_V4L2(X) (2*(X)/125)
+/* V4l2 freq (units of 62.5Hz) to freq in Hz */
+#define V4L2_TO_HZ(X) (((X)*125)/(2))
+
+static int cg2900_open(
+ struct file *file
+ );
+static int cg2900_release(
+ struct file *file
+ );
+static ssize_t cg2900_read(
+ struct file *file,
+ char __user *data,
+ size_t count,
+ loff_t *pos
+ );
+static unsigned int cg2900_poll(
+ struct file *file,
+ struct poll_table_struct *wait
+ );
+static int vidioc_querycap(
+ struct file *file,
+ void *priv,
+ struct v4l2_capability *query_caps
+ );
+static int vidioc_get_tuner(
+ struct file *file,
+ void *priv,
+ struct v4l2_tuner *tuner
+ );
+static int vidioc_set_tuner(
+ struct file *file,
+ void *priv,
+ struct v4l2_tuner *tuner
+ );
+static int vidioc_get_modulator(
+ struct file *file,
+ void *priv,
+ struct v4l2_modulator *modulator
+ );
+static int vidioc_set_modulator(
+ struct file *file,
+ void *priv,
+ struct v4l2_modulator *modulator
+ );
+static int vidioc_get_frequency(
+ struct file *file,
+ void *priv,
+ struct v4l2_frequency *freq
+ );
+static int vidioc_set_frequency(
+ struct file *file,
+ void *priv,
+ struct v4l2_frequency *freq
+ );
+static int vidioc_query_ctrl(
+ struct file *file,
+ void *priv,
+ struct v4l2_queryctrl *query_ctrl
+ );
+static int vidioc_get_ctrl(
+ struct file *file,
+ void *priv,
+ struct v4l2_control *ctrl
+ );
+static int vidioc_set_ctrl(
+ struct file *file,
+ void *priv,
+ struct v4l2_control *ctrl
+ );
+static int vidioc_get_ext_ctrls(
+ struct file *file,
+ void *priv,
+ struct v4l2_ext_controls *ext_ctrl
+ );
+static int vidioc_set_ext_ctrls(
+ struct file *file,
+ void *priv,
+ struct v4l2_ext_controls *ext_ctrl
+ );
+static int vidioc_set_hw_freq_seek(
+ struct file *file,
+ void *priv,
+ struct v4l2_hw_freq_seek *freq_seek
+ );
+static int vidioc_get_audio(
+ struct file *file,
+ void *priv,
+ struct v4l2_audio *audio
+ );
+static int vidioc_set_audio(
+ struct file *file,
+ void *priv,
+ struct v4l2_audio *audio
+ );
+static int vidioc_get_input(
+ struct file *filp,
+ void *priv,
+ unsigned int *input
+ );
+static int vidioc_set_input(
+ struct file *filp,
+ void *priv,
+ unsigned int input
+ );
+static void cg2900_convert_err_to_v4l2(
+ char status_byte,
+ char *out_byte
+ );
+static int cg2900_map_event_to_v4l2(
+ u8 fm_event
+ );
+
+static u32 freq_low;
+static u32 freq_high;
+
+/* Module Parameters */
+static int radio_nr = -1;
+static int grid;
+static int band;
+
+/* cg2900_poll_queue - Main Wait Queue for polling (Scan/Seek) */
+static wait_queue_head_t cg2900_poll_queue;
+
+struct sk_buff_head fm_interrupt_queue;
+
+/**
+ * enum fm_seek_status - Seek status of FM Radio.
+ *
+ * @FMR_SEEK_NONE: No seek in progress.
+ * @FMR_SEEK_IN_PROGRESS: Seek is in progress.
+ *
+ * Seek status of FM Radio.
+ */
+enum fm_seek_status {
+ FMR_SEEK_NONE,
+ FMR_SEEK_IN_PROGRESS
+};
+
+/**
+ * enum fm_power_state - Power states of FM Radio.
+ *
+ * @FMR_SWITCH_OFF: FM Radio is switched off.
+ * @FMR_SWITCH_ON: FM Radio is switched on.
+ * @FMR_STANDBY: FM Radio in standby state.
+ *
+ * Power states of FM Radio.
+ */
+enum fm_power_state {
+ FMR_SWITCH_OFF,
+ FMR_SWITCH_ON,
+ FMR_STANDBY
+};
+
+/**
+ * struct cg2900_device - Stores FM Device Info.
+ *
+ * @state: state of FM Radio
+ * @muted: FM Radio Mute/Unmute status
+ * @seekstatus: seek status
+ * @rx_rds_enabled: Rds enable/disable status for FM Rx
+ * @tx_rds_enabled: Rds enable/disable status for FM Tx
+ * @rx_stereo_status: Stereo Mode status for FM Rx
+ * @tx_stereo_status: Stereo Mode status for FM Tx
+ * @volume: Analog Volume Gain of FM Radio
+ * @rssi_threshold: rssi Thresold set on FM Radio
+ * @frequency: Frequency tuned on FM Radio in V4L2 Format
+ * @audiopath: Audio Balance
+ * @wait_on_read_queue: Flag for waiting on read queue.
+ * @fm_mode: Enum for storing the current FM Mode.
+ *
+ * FM Driver Information Structure.
+ */
+struct cg2900_device {
+ u8 state;
+ u8 muted;
+ u8 seekstatus;
+ bool rx_rds_enabled;
+ bool tx_rds_enabled;
+ bool rx_stereo_status;
+ bool tx_stereo_status;
+ int volume;
+ u16 rssi_threshold;
+ u32 frequency;
+ u32 audiopath;
+ bool wait_on_read_queue;
+ enum cg2900_fm_mode fm_mode;
+};
+
+/* Global Structure to store the maintain FM Driver device info */
+static struct cg2900_device cg2900_device;
+
+/* V4l2 File Operation Structure */
+static const struct v4l2_file_operations cg2900_fops = {
+ .owner = THIS_MODULE,
+ .open = cg2900_open,
+ .release = cg2900_release,
+ .read = cg2900_read,
+ .poll = cg2900_poll,
+ .ioctl = video_ioctl2,
+};
+
+/* V4L2 IOCTL Operation Structure */
+static const struct v4l2_ioctl_ops cg2900_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_tuner = vidioc_get_tuner,
+ .vidioc_s_tuner = vidioc_set_tuner,
+ .vidioc_g_modulator = vidioc_get_modulator,
+ .vidioc_s_modulator = vidioc_set_modulator,
+ .vidioc_g_frequency = vidioc_get_frequency,
+ .vidioc_s_frequency = vidioc_set_frequency,
+ .vidioc_queryctrl = vidioc_query_ctrl,
+ .vidioc_g_ctrl = vidioc_get_ctrl,
+ .vidioc_s_ctrl = vidioc_set_ctrl,
+ .vidioc_g_ext_ctrls = vidioc_get_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_set_ext_ctrls,
+ .vidioc_s_hw_freq_seek = vidioc_set_hw_freq_seek,
+ .vidioc_g_audio = vidioc_get_audio,
+ .vidioc_s_audio = vidioc_set_audio,
+ .vidioc_g_input = vidioc_get_input,
+ .vidioc_s_input = vidioc_set_input,
+};
+
+/* V4L2 Video Device Structure */
+static struct video_device cg2900_video_device = {
+ .name = "STE CG2900 FM Rx/Tx Radio",
+ .fops = &cg2900_fops,
+ .ioctl_ops = &cg2900_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+static u16 no_of_scan_freq;
+static u16 no_of_block_scan_freq;
+static u32 scanfreq_rssi_level[MAX_CHANNELS_TO_SCAN];
+static u16 block_scan_rssi_level[MAX_CHANNELS_FOR_BLOCK_SCAN];
+static u32 scanfreq[MAX_CHANNELS_TO_SCAN];
+static struct mutex fm_mutex;
+static spinlock_t fm_spinlock;
+static int users;
+
+/**
+ * vidioc_querycap()- Query FM Driver Capabilities.
+ *
+ * This function is used to query the capabilities of the
+ * FM Driver. This function is called when the application issues the IOCTL
+ * VIDIOC_QUERYCAP.
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @query_caps: v4l2_capability structure.
+ *
+ * Returns: 0
+ */
+static int vidioc_querycap(
+ struct file *file,
+ void *priv,
+ struct v4l2_capability *query_caps
+ )
+{
+ FM_INFO_REPORT("vidioc_querycap");
+ memset(
+ query_caps,
+ 0,
+ sizeof(*query_caps)
+ );
+ strlcpy(
+ query_caps->driver,
+ "CG2900 Driver",
+ sizeof(query_caps->driver)
+ );
+ strlcpy(
+ query_caps->card,
+ "CG2900 FM Radio",
+ sizeof(query_caps->card)
+ );
+ strcpy(
+ query_caps->bus_info,
+ "platform"
+ );
+ query_caps->version = RADIO_CG2900_VERSION;
+ query_caps->capabilities =
+ V4L2_CAP_TUNER |
+ V4L2_CAP_MODULATOR |
+ V4L2_CAP_RADIO |
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_RDS_CAPTURE |
+ V4L2_CAP_HW_FREQ_SEEK |
+ V4L2_CAP_RDS_OUTPUT;
+ FM_DEBUG_REPORT("vidioc_querycap returning 0");
+ return 0;
+}
+
+/**
+ * vidioc_get_tuner()- Get FM Tuner Features.
+ *
+ * This function is used to get the tuner features.
+ * This function is called when the application issues the IOCTL
+ * VIDIOC_G_TUNER
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @tuner: v4l2_tuner structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_tuner(
+ struct file *file,
+ void *priv,
+ struct v4l2_tuner *tuner
+ )
+{
+ int status = 0;
+ u8 mode;
+ bool rds_enabled;
+ u16 rssi;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_get_tuner");
+
+ if (tuner->index > 0) {
+ FM_ERR_REPORT("vidioc_get_tuner: Only 1 tuner supported");
+ goto error;
+ }
+
+ memset(tuner, 0, sizeof(*tuner));
+ strcpy(tuner->name, "CG2900 FM Receiver");
+ tuner->type = V4L2_TUNER_RADIO;
+ tuner->rangelow = HZ_TO_V4L2(freq_low);
+ tuner->rangehigh = HZ_TO_V4L2(freq_high);
+ tuner->capability =
+ V4L2_TUNER_CAP_LOW /* Frequency steps = 1/16 kHz */
+ | V4L2_TUNER_CAP_STEREO /* Can receive stereo */
+ | V4L2_TUNER_CAP_RDS; /* Supports RDS Capture */
+
+ if (cg2900_device.fm_mode == CG2900_FM_RX_MODE) {
+
+ status = cg2900_fm_get_mode(&mode);
+
+ FM_DEBUG_REPORT("vidioc_get_tuner: mode = %x, ", mode);
+
+ if (0 != status) {
+ /* Get mode API failed, set mode to mono */
+ tuner->audmode = V4L2_TUNER_MODE_MONO;
+ tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
+ goto error;
+ }
+
+ switch (mode) {
+ case CG2900_MODE_STEREO:
+ tuner->audmode = V4L2_TUNER_MODE_STEREO;
+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ break;
+ case CG2900_MODE_MONO:
+ default:
+ tuner->audmode = V4L2_TUNER_MODE_MONO;
+ tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
+ break;
+ }
+
+ status = cg2900_fm_get_rds_status(&rds_enabled);
+
+ if (0 != status) {
+ tuner->rxsubchans &= ~V4L2_TUNER_SUB_RDS;
+ goto error;
+ }
+
+ if (rds_enabled)
+ tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
+ else
+ tuner->rxsubchans &= ~V4L2_TUNER_SUB_RDS;
+ } else {
+ tuner->audmode = V4L2_TUNER_MODE_MONO;
+ tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
+ }
+
+ if (cg2900_device.fm_mode == CG2900_FM_RX_MODE) {
+ status = cg2900_fm_get_signal_strength(&rssi);
+
+ if (0 != status) {
+ tuner->signal = 0;
+ goto error;
+ }
+ tuner->signal = rssi;
+ } else {
+ tuner->signal = 0;
+ }
+
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_get_tuner: returning %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_set_tuner()- Set FM Tuner Features.
+ *
+ * This function is used to set the tuner features.
+ * It also sets the default FM Rx settings.
+ * This function is called when the application issues the IOCTL
+ * VIDIOC_S_TUNER
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @tuner: v4l2_tuner structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_tuner(
+ struct file *file,
+ void *priv,
+ struct v4l2_tuner *tuner
+ )
+{
+ bool rds_status = false;
+ bool stereo_status = false;
+ int status = 0;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_set_tuner");
+ if (tuner->index != 0) {
+ FM_ERR_REPORT("vidioc_set_tuner: Only 1 tuner supported");
+ goto error;
+ }
+
+ if (cg2900_device.fm_mode != CG2900_FM_RX_MODE) {
+ /*
+ * FM Rx mode should be configured
+ * as earlier mode was not FM Rx
+ */
+ if (CG2900_FM_BAND_US_EU == band) {
+ freq_low = FMR_EU_US_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_EU_US_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ } else if (CG2900_FM_BAND_JAPAN == band) {
+ freq_low = FMR_JAPAN_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_JAPAN_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ } else if (CG2900_FM_BAND_CHINA == band) {
+ freq_low = FMR_CHINA_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_CHINA_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ }
+ cg2900_device.fm_mode = CG2900_FM_RX_MODE;
+ cg2900_device.rx_rds_enabled =
+ (tuner->rxsubchans & V4L2_TUNER_SUB_RDS) ?
+ true : false;
+ if (tuner->rxsubchans & V4L2_TUNER_SUB_STEREO)
+ stereo_status = true;
+ else if (tuner->rxsubchans & V4L2_TUNER_SUB_MONO)
+ stereo_status = false;
+ cg2900_device.rx_stereo_status = stereo_status;
+ status = cg2900_fm_set_rx_default_settings(freq_low,
+ band,
+ grid,
+ cg2900_device.rx_rds_enabled,
+ cg2900_device.rx_stereo_status);
+
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_tuner: "
+ "cg2900_fm_set_rx_default_settings returned "
+ " %d", status);
+ goto error;
+ }
+ status = cg2900_fm_set_rssi_threshold(
+ cg2900_device.rssi_threshold);
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_tuner: "
+ "cg2900_fm_set_rssi_threshold returned "
+ " %d", status);
+ goto error;
+ }
+ } else {
+ /*
+ * Mode was FM Rx only, change the RDS settings or stereo mode
+ * if they are changed by application
+ */
+ rds_status = (tuner->rxsubchans & V4L2_TUNER_SUB_RDS) ?
+ true : false;
+ if (tuner->rxsubchans & V4L2_TUNER_SUB_STEREO)
+ stereo_status = true;
+ else if (tuner->rxsubchans & V4L2_TUNER_SUB_MONO)
+ stereo_status = false;
+ if (stereo_status != cg2900_device.rx_stereo_status) {
+ cg2900_device.rx_stereo_status = stereo_status;
+ if (stereo_status)
+ status =
+ cg2900_fm_set_mode(
+ FMD_STEREOMODE_BLENDING);
+ else
+ status = cg2900_fm_set_mode(
+ FMD_STEREOMODE_MONO);
+
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_tuner: "
+ "cg2900_fm_set_mode returned "
+ " %d", status);
+ goto error;
+ }
+ }
+ if (rds_status != cg2900_device.rx_rds_enabled) {
+ cg2900_device.rx_rds_enabled = rds_status;
+ if (rds_status)
+ status = cg2900_fm_rds_on();
+ else
+ status = cg2900_fm_rds_off();
+
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_tuner: "
+ "cg2900_fm_rds returned "
+ " %d", status);
+ goto error;
+ }
+ }
+ }
+
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_set_tuner: returning %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_get_modulator()- Get FM Modulator Features.
+ *
+ * This function is used to get the modulator features.
+ * This function is called when the application issues the IOCTL
+ * VIDIOC_G_MODULATOR
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @modulator: v4l2_modulator structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_modulator(
+ struct file *file,
+ void *priv,
+ struct v4l2_modulator *modulator
+ )
+{
+ int status = 0;
+ bool rds_enabled;
+ u8 mode;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_get_modulator");
+
+ if (modulator->index > 0) {
+ FM_ERR_REPORT("vidioc_get_modulator: Only 1 "
+ "modulator supported");
+ goto error;
+ }
+
+ memset(modulator, 0, sizeof(*modulator));
+ strcpy(modulator->name, "CG2900 FM Transmitter");
+ modulator->rangelow = freq_low;
+ modulator->rangehigh = freq_high;
+ modulator->capability = V4L2_TUNER_CAP_NORM /* Freq steps = 1/16 kHz */
+ | V4L2_TUNER_CAP_STEREO /* Can receive stereo */
+ | V4L2_TUNER_CAP_RDS; /* Supports RDS Capture */
+
+ if (cg2900_device.fm_mode == CG2900_FM_TX_MODE) {
+ status = cg2900_fm_get_mode(&mode);
+ FM_DEBUG_REPORT("vidioc_get_modulator: mode = %x", mode);
+ if (0 != status) {
+ /* Get mode API failed, set mode to mono */
+ modulator->txsubchans = V4L2_TUNER_SUB_MONO;
+ goto error;
+ }
+ switch (mode) {
+ /* Stereo */
+ case CG2900_MODE_STEREO:
+ modulator->txsubchans = V4L2_TUNER_SUB_STEREO;
+ break;
+ /* Mono */
+ case CG2900_MODE_MONO:
+ modulator->txsubchans = V4L2_TUNER_SUB_MONO;
+ break;
+ /* Switching or Blending, set mode as Stereo */
+ default:
+ modulator->txsubchans = V4L2_TUNER_SUB_STEREO;
+ }
+ status = cg2900_fm_get_rds_status(&rds_enabled);
+ if (0 != status) {
+ modulator->txsubchans &= ~V4L2_TUNER_SUB_RDS;
+ goto error;
+ }
+ if (rds_enabled)
+ modulator->txsubchans |= V4L2_TUNER_SUB_RDS;
+ else
+ modulator->txsubchans &= ~V4L2_TUNER_SUB_RDS;
+ } else
+ modulator->txsubchans = V4L2_TUNER_SUB_MONO;
+
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_get_modulator: returning %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_set_modulator()- Set FM Modulator Features.
+ *
+ * This function is used to set the Modulaotr features.
+ * It also sets the default FM Tx settings.
+ * This function is called when the application issues the IOCTL
+ * VIDIOC_S_MODULATOR
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @modulator: v4l2_modulator structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_modulator(
+ struct file *file,
+ void *priv,
+ struct v4l2_modulator *modulator
+ )
+{
+ bool rds_status = false;
+ bool stereo_status = false;
+ int status = 0;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_set_modulator");
+ if (modulator->index != 0) {
+ FM_ERR_REPORT("vidioc_set_modulator: Only 1 "
+ "modulator supported");
+ goto error;
+ }
+
+ if (cg2900_device.fm_mode != CG2900_FM_TX_MODE) {
+ /*
+ * FM Tx mode should be configured as
+ * earlier mode was not FM Tx
+ */
+ if (band == CG2900_FM_BAND_US_EU) {
+ freq_low = FMR_EU_US_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_EU_US_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ } else if (band == CG2900_FM_BAND_JAPAN) {
+ freq_low = FMR_JAPAN_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_JAPAN_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ } else if (band == CG2900_FM_BAND_CHINA) {
+ freq_low = FMR_CHINA_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_CHINA_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ }
+ cg2900_device.fm_mode = CG2900_FM_TX_MODE;
+ cg2900_device.rx_rds_enabled = false;
+ cg2900_device.tx_rds_enabled =
+ (modulator->txsubchans & V4L2_TUNER_SUB_RDS) ?
+ true : false;
+ if (modulator->txsubchans & V4L2_TUNER_SUB_STEREO)
+ stereo_status = true;
+ else if (modulator->txsubchans & V4L2_TUNER_SUB_MONO)
+ stereo_status = false;
+ cg2900_device.tx_stereo_status = stereo_status;
+
+ status = cg2900_fm_set_tx_default_settings(freq_low,
+ band,
+ grid,
+ cg2900_device.tx_rds_enabled,
+ cg2900_device.
+ tx_stereo_status);
+
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_modulator: "
+ "cg2900_fm_set_tx_default_settings returned "
+ " %d", status);
+ goto error;
+ }
+ } else {
+ /*
+ * Mode was FM Tx only, change the RDS settings or stereo mode
+ * if they are changed by application
+ */
+ rds_status = (modulator->txsubchans & V4L2_TUNER_SUB_RDS) ?
+ true : false;
+ if (modulator->txsubchans & V4L2_TUNER_SUB_STEREO)
+ stereo_status = true;
+ else if (modulator->txsubchans & V4L2_TUNER_SUB_MONO)
+ stereo_status = false;
+ if (stereo_status != cg2900_device.tx_stereo_status) {
+ cg2900_device.tx_stereo_status = stereo_status;
+ status = cg2900_fm_set_mode(stereo_status);
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_modulator: "
+ "cg2900_fm_set_mode returned "
+ " %d", status);
+ goto error;
+ }
+ }
+ if (rds_status != cg2900_device.tx_rds_enabled) {
+ cg2900_device.tx_rds_enabled = rds_status;
+ status = cg2900_fm_tx_rds(rds_status);
+ if (0 != status) {
+ FM_ERR_REPORT("vidioc_set_modulator: "
+ "cg2900_fm_tx_rds returned "
+ " %d", status);
+ goto error;
+ }
+ }
+ }
+
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_set_modulator: returning %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_get_frequency()- Get the Current FM Frequnecy.
+ *
+ * This function is used to get the currently tuned
+ * frequency on FM Radio. This function is called when the application
+ * issues the IOCTL VIDIOC_G_FREQUENCY
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @freq: v4l2_frequency structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_frequency(
+ struct file *file,
+ void *priv,
+ struct v4l2_frequency *freq
+ )
+{
+ int status;
+ u32 frequency;
+ int ret_val = -EINVAL;
+ struct sk_buff *skb;
+
+ FM_INFO_REPORT("vidioc_get_frequency: Status = %d",
+ cg2900_device.seekstatus);
+
+ status = cg2900_fm_get_frequency(&frequency);
+
+ if (0 != status) {
+ freq->frequency = cg2900_device.frequency;
+ goto error;
+ }
+
+ if (cg2900_device.seekstatus == FMR_SEEK_IN_PROGRESS) {
+ if (skb_queue_empty(&fm_interrupt_queue)) {
+ /* No Interrupt, bad case */
+ FM_ERR_REPORT("vidioc_get_frequency: "
+ "No Interrupt to read");
+ fm_event = CG2900_EVENT_NO_EVENT;
+ goto error;
+ }
+ spin_lock(&fm_spinlock);
+ skb = skb_dequeue(&fm_interrupt_queue);
+ spin_unlock(&fm_spinlock);
+ if (!skb) {
+ /* No Interrupt, bad case */
+ FM_ERR_REPORT("vidioc_get_frequency: "
+ "No Interrupt to read");
+ fm_event = CG2900_EVENT_NO_EVENT;
+ goto error;
+ }
+ fm_event = (u8)skb->data[0];
+ FM_DEBUG_REPORT("vidioc_get_frequency: Interrupt = %x",
+ fm_event);
+ /* Check if seek is finished or not */
+ if (CG2900_EVENT_SEARCH_CHANNEL_FOUND == fm_event) {
+ /* seek is finished */
+ spin_lock(&fm_spinlock);
+ cg2900_device.frequency = HZ_TO_V4L2(frequency);
+ freq->frequency = cg2900_device.frequency;
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ fm_event = CG2900_EVENT_NO_EVENT;
+ kfree_skb(skb);
+ spin_unlock(&fm_spinlock);
+ } else {
+ /* Some other interrupt, queue it back */
+ spin_lock(&fm_spinlock);
+ skb_queue_head(&fm_interrupt_queue, skb);
+ spin_unlock(&fm_spinlock);
+ }
+ } else {
+ spin_lock(&fm_spinlock);
+ cg2900_device.frequency = HZ_TO_V4L2(frequency);
+ freq->frequency = cg2900_device.frequency;
+ spin_unlock(&fm_spinlock);
+ }
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_get_frequency: returning = %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_set_frequency()- Set the FM Frequnecy.
+ *
+ * This function is used to set the frequency
+ * on FM Radio. This function is called when the application
+ * issues the IOCTL VIDIOC_S_FREQUENCY
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @freq: v4l2_frequency structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_frequency(
+ struct file *file,
+ void *priv,
+ struct v4l2_frequency *freq
+ )
+{
+ u32 frequency = freq->frequency;
+ u32 freq_low, freq_high;
+ int status;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_set_frequency: Frequency = "
+ "%d ", V4L2_TO_HZ(frequency));
+
+ /* Check which band is set currently */
+ switch (band) {
+ case CG2900_FM_BAND_US_EU:
+ freq_low = FMR_EU_US_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_EU_US_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ break;
+
+ case CG2900_FM_BAND_CHINA:
+ freq_low = FMR_CHINA_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_CHINA_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ break;
+
+ case CG2900_FM_BAND_JAPAN:
+ freq_low = FMR_JAPAN_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_JAPAN_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ break;
+
+ default:
+ /* Set to US_MAX and CHINA_MIN band */
+ freq_low = FMR_CHINA_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ freq_high = FMR_EU_US_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ }
+
+ /* Check if the frequency set is out of current band */
+ if ((V4L2_TO_HZ(frequency) < freq_low) ||
+ (V4L2_TO_HZ(frequency) > freq_high))
+ goto error;
+
+ spin_lock(&fm_spinlock);
+ fm_event = CG2900_EVENT_NO_EVENT;
+ no_of_scan_freq = 0;
+ spin_unlock(&fm_spinlock);
+
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ cg2900_device.frequency = frequency;
+ status = cg2900_fm_set_frequency(V4L2_TO_HZ(frequency));
+
+ if (0 != status)
+ goto error;
+
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_set_frequency: returning = %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_query_ctrl()- Query the FM Driver control features.
+ *
+ * This function is used to query the control features on FM Radio.
+ * This function is called when the application
+ * issues the IOCTL VIDIOC_QUERYCTRL
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @query_ctrl: v4l2_queryctrl structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_query_ctrl(
+ struct file *file,
+ void *priv,
+ struct v4l2_queryctrl *query_ctrl
+ )
+{
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_query_ctrl");
+ /* Check which control is requested */
+ switch (query_ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ FM_DEBUG_REPORT("vidioc_query_ctrl: V4L2_CID_AUDIO_MUTE");
+ query_ctrl->type = V4L2_CTRL_TYPE_BOOLEAN;
+ query_ctrl->minimum = 0;
+ query_ctrl->maximum = 1;
+ query_ctrl->step = 1;
+ query_ctrl->default_value = 0;
+ query_ctrl->flags = 0;
+ strncpy(query_ctrl->name, "CG2900 Mute", 32);
+ ret_val = 0;
+ break;
+
+ case V4L2_CID_AUDIO_VOLUME:
+ FM_DEBUG_REPORT("vidioc_query_ctrl: V4L2_CID_AUDIO_VOLUME");
+
+ strncpy(query_ctrl->name, "CG2900 Volume", 32);
+ query_ctrl->minimum = MIN_ANALOG_VOLUME;
+ query_ctrl->maximum = MAX_ANALOG_VOLUME;
+ query_ctrl->step = 1;
+ query_ctrl->default_value = MAX_ANALOG_VOLUME;
+ query_ctrl->flags = 0;
+ query_ctrl->type = V4L2_CTRL_TYPE_INTEGER;
+ ret_val = 0;
+ break;
+
+ case V4L2_CID_AUDIO_BALANCE:
+ FM_DEBUG_REPORT("vidioc_query_ctrl: V4L2_CID_AUDIO_BALANCE ");
+ strncpy(query_ctrl->name, "CG2900 Audio Balance", 32);
+ query_ctrl->type = V4L2_CTRL_TYPE_INTEGER;
+ query_ctrl->minimum = 0x0000;
+ query_ctrl->maximum = 0xFFFF;
+ query_ctrl->step = 0x0001;
+ query_ctrl->default_value = 0x0000;
+ query_ctrl->flags = 0;
+ ret_val = 0;
+ break;
+
+ case V4L2_CID_AUDIO_BASS:
+ FM_DEBUG_REPORT("vidioc_query_ctrl: "
+ "V4L2_CID_AUDIO_BASS (unsupported)");
+ break;
+
+ case V4L2_CID_AUDIO_TREBLE:
+ FM_DEBUG_REPORT("vidioc_query_ctrl: "
+ "V4L2_CID_AUDIO_TREBLE (unsupported)");
+ break;
+
+ default:
+ FM_DEBUG_REPORT("vidioc_query_ctrl: "
+ "--> unsupported id = %x", query_ctrl->id);
+ break;
+ }
+
+ FM_DEBUG_REPORT("vidioc_query_ctrl: returning = %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_get_ctrl()- Get the value of a particular Control.
+ *
+ * This function is used to get the value of a
+ * particular control from the FM Driver. This function is called
+ * when the application issues the IOCTL VIDIOC_G_CTRL
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @ctrl: v4l2_control structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_ctrl(
+ struct file *file,
+ void *priv,
+ struct v4l2_control *ctrl
+ )
+{
+ int status;
+ u8 value;
+ u16 rssi;
+ u8 antenna;
+ u16 conclusion;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_get_ctrl");
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ status = cg2900_fm_get_volume(&value);
+ if (0 == status) {
+ ctrl->value = value;
+ cg2900_device.volume = value;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_AUDIO_MUTE:
+ ctrl->value = cg2900_device.muted;
+ ret_val = 0;
+ break;
+ case V4L2_CID_AUDIO_BALANCE:
+ ctrl->value = cg2900_device.audiopath;
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_RSSI_THRESHOLD:
+ ctrl->value = cg2900_device.rssi_threshold;
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_SELECT_ANTENNA:
+ status = cg2900_fm_get_antenna(&antenna);
+ FM_DEBUG_REPORT("vidioc_get_ctrl: Antenna = %x", antenna);
+ if (0 == status) {
+ ctrl->value = antenna;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_GET_RESULT:
+ status = cg2900_fm_af_update_get_result(&rssi);
+ FM_DEBUG_REPORT("vidioc_get_ctrl: AF RSSI Level = %x", rssi);
+ if (0 == status) {
+ ctrl->value = rssi;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT:
+ status = cg2900_fm_af_switch_get_result(&conclusion);
+ FM_DEBUG_REPORT("vidioc_get_ctrl: AF Switch conclusion = %x",
+ conclusion);
+ if (0 != status)
+ break;
+ if (conclusion == 0) {
+ ctrl->value = conclusion;
+ FM_DEBUG_REPORT("vidioc_get_ctrl: "
+ "AF Switch conclusion = %d",
+ ctrl->value);
+ ret_val = 0;
+ } else {
+ /*
+ * Convert positive error code returned by chip
+ * into negative error codes to be in line with linux.
+ */
+ ctrl->value = -conclusion;
+ FM_ERR_REPORT("vidioc_get_ctrl: "
+ "AF-Switch failed with value %d", ctrl->value);
+ ret_val = 0;
+ }
+ break;
+ default:
+ FM_DEBUG_REPORT("vidioc_get_ctrl: "
+ "unsupported (id = %x)", (int)ctrl->id);
+ ret_val = -EINVAL;
+ }
+ FM_DEBUG_REPORT("vidioc_get_ctrl: returning = %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_set_ctrl()- Set the value of a particular Control.
+ *
+ * This function is used to set the value of a
+ * particular control from the FM Driver. This function is called when the
+ * application issues the IOCTL VIDIOC_S_CTRL
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @ctrl: v4l2_control structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -ERANGE when the parameter is out of range.
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_ctrl(
+ struct file *file,
+ void *priv,
+ struct v4l2_control *ctrl
+ )
+{
+ int status;
+ int ret_val = -EINVAL;
+ FM_INFO_REPORT("vidioc_set_ctrl");
+ /* Check which control is requested */
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_AUDIO_MUTE, "
+ "value = %d", ctrl->value);
+ if (ctrl->value > 1 && ctrl->value < 0) {
+ ret_val = -ERANGE;
+ break;
+ }
+
+ if (ctrl->value) {
+ FM_DEBUG_REPORT("vidioc_set_ctrl: Ctrl_Id = "
+ "V4L2_CID_AUDIO_MUTE, "
+ "Muting the Radio");
+ status = cg2900_fm_mute();
+ } else {
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "Ctrl_Id = V4L2_CID_AUDIO_MUTE, "
+ "UnMuting the Radio");
+ status = cg2900_fm_unmute();
+ }
+ if (0 == status) {
+ cg2900_device.muted = ctrl->value;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_AUDIO_VOLUME, "
+ "value = %d", ctrl->value);
+ if (ctrl->value > MAX_ANALOG_VOLUME &&
+ ctrl->value < MIN_ANALOG_VOLUME) {
+ ret_val = -ERANGE;
+ break;
+ }
+ status = cg2900_fm_set_volume(ctrl->value);
+ if (0 == status) {
+ cg2900_device.volume = ctrl->value;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_AUDIO_BALANCE:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_AUDIO_BALANCE, "
+ "value = %d", ctrl->value);
+ status = cg2900_fm_set_audio_balance(ctrl->value);
+ if (0 == status) {
+ cg2900_device.audiopath = ctrl->value;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_CG2900_RADIO_CHIP_STATE:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_CHIP_STATE, "
+ "value = %d", ctrl->value);
+ if (V4L2_CG2900_RADIO_STANDBY == ctrl->value)
+ status = cg2900_fm_standby();
+ else if (V4L2_CG2900_RADIO_POWERUP == ctrl->value)
+ status = cg2900_fm_power_up_from_standby();
+ else
+ break;
+ if (0 != status)
+ break;
+ if (V4L2_CG2900_RADIO_STANDBY == ctrl->value)
+ cg2900_device.state = FMR_STANDBY;
+ else if (V4L2_CG2900_RADIO_POWERUP == ctrl->value)
+ cg2900_device.state = FMR_SWITCH_ON;
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_SELECT_ANTENNA:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_SELECT_ANTENNA, "
+ "value = %d", ctrl->value);
+ status = cg2900_fm_select_antenna(ctrl->value);
+ if (0 == status)
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_BANDSCAN:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_BANDSCAN, "
+ "value = %d", ctrl->value);
+ if (V4L2_CG2900_RADIO_BANDSCAN_START == ctrl->value) {
+ cg2900_device.seekstatus = FMR_SEEK_IN_PROGRESS;
+ no_of_scan_freq = 0;
+ status = cg2900_fm_start_band_scan();
+ } else if (V4L2_CG2900_RADIO_BANDSCAN_STOP == ctrl->value) {
+ status = cg2900_fm_stop_scan();
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ } else
+ break;
+ if (0 == status)
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_RSSI_THRESHOLD:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_RSSI_THRESHOLD "
+ "= %d", ctrl->value);
+ status = cg2900_fm_set_rssi_threshold(ctrl->value);
+ if (0 == status) {
+ cg2900_device.rssi_threshold = ctrl->value;
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_START:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_START "
+ "freq = %d Hz", ctrl->value);
+ status = cg2900_fm_af_update_start(ctrl->value);
+ if (0 == status)
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_TEST_TONE_GENERATOR_SET_STATUS:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_TEST_TONE_GENERATOR_SET_STATUS "
+ "state = %d ", ctrl->value);
+ if (ctrl->value < V4L2_CG2900_RADIO_TEST_TONE_GEN_OFF ||
+ ctrl->value >
+ V4L2_CG2900_RADIO_TEST_TONE_GENERATOR_ON_WO_SRC) {
+ FM_ERR_REPORT("Invalid parameter = %d", ctrl->value);
+ break;
+ }
+ status = cg2900_fm_set_test_tone_generator(ctrl->value);
+ if (0 == status)
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_TUNE_DEEMPHASIS:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "V4L2_CID_CG2900_RADIO_TUNE_DEEMPHASIS, "
+ "Value = %d", ctrl->value);
+
+ if ((V4L2_CG2900_RADIO_DEEMPHASIS_DISABLED >
+ ctrl->value) ||
+ (V4L2_CG2900_RADIO_DEEMPHASIS_75_uS <
+ ctrl->value)) {
+ FM_ERR_REPORT("Unsupported deemphasis = %d",
+ ctrl->value);
+ break;
+ }
+
+ switch (ctrl->value) {
+ case V4L2_CG2900_RADIO_DEEMPHASIS_50_uS:
+ ctrl->value = FMD_EMPHASIS_50US;
+ break;
+ case V4L2_CG2900_RADIO_DEEMPHASIS_75_uS:
+ ctrl->value = FMD_EMPHASIS_75US;
+ break;
+ case V4L2_CG2900_RADIO_DEEMPHASIS_DISABLED:
+ /* Drop Down */
+ default:
+ ctrl->value = FMD_EMPHASIS_NONE;
+ break;
+ }
+ status = cg2900_fm_rx_set_deemphasis(ctrl->value);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ default:
+ FM_DEBUG_REPORT("vidioc_set_ctrl: "
+ "unsupported (id = %x)", ctrl->id);
+ }
+ FM_DEBUG_REPORT("vidioc_set_ctrl: returning = %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_get_ext_ctrls()- Get the values of a particular control.
+ *
+ * This function is used to get the value of a
+ * particular control from the FM Driver. This is used when the data to
+ * be received is more than 1 paramter. This function is called when the
+ * application issues the IOCTL VIDIOC_G_EXT_CTRLS
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @ext_ctrl: v4l2_ext_controls structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -ENOSPC: when there is no space to copy the data into the buffer provided
+ * by application.
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_ext_ctrls(
+ struct file *file,
+ void *priv,
+ struct v4l2_ext_controls *ext_ctrl
+ )
+{
+ u32 *dest_buffer;
+ int index = 0;
+ int count = 0;
+ int ret_val = -EINVAL;
+ int status;
+ struct sk_buff *skb;
+ u8 mode;
+ s8 interrupt_success;
+ int *fm_interrupt_buffer;
+
+ FM_INFO_REPORT("vidioc_get_ext_ctrls: Id = %04x,"
+ "ext_ctrl->ctrl_class = %04x",
+ ext_ctrl->controls->id,
+ ext_ctrl->ctrl_class);
+
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_FM_TX &&
+ ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_get_ext_ctrls: Unsupported "
+ "ctrl_class = %04x", ext_ctrl->ctrl_class);
+ goto error;
+ }
+
+ switch (ext_ctrl->controls->id) {
+ case V4L2_CID_CG2900_RADIO_BANDSCAN_GET_RESULTS:
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_BANDSCAN_GET_RESULTS "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ if (cg2900_device.seekstatus ==
+ FMR_SEEK_IN_PROGRESS) {
+ spin_lock(&fm_spinlock);
+ skb = skb_dequeue(&fm_interrupt_queue);
+ spin_unlock(&fm_spinlock);
+ if (!skb) {
+ /* No Interrupt, bad case */
+ FM_ERR_REPORT("No Interrupt to read");
+ fm_event = CG2900_EVENT_NO_EVENT;
+ break;
+ }
+ fm_event = (u8)skb->data[0];
+ FM_DEBUG_REPORT(
+ "V4L2_CID_CG2900_RADIO"
+ "_BANDSCAN_GET_RESULTS: "
+ "fm_event = %x", fm_event);
+ if (fm_event ==
+ CG2900_EVENT_SCAN_CHANNELS_FOUND) {
+ /* Check to get Scan Result */
+ status =
+ cg2900_fm_get_scan_result
+ (&no_of_scan_freq, scanfreq,
+ scanfreq_rssi_level);
+ if (0 != status) {
+ FM_ERR_REPORT
+ ("vidioc_get_ext_ctrls: "
+ "cg2900_fm_get_scan_"
+ "result: returned %d",
+ status);
+ kfree_skb(skb);
+ break;
+ }
+ kfree_skb(skb);
+ } else {
+ /* Some other interrupt, Queue it back */
+ spin_lock(&fm_spinlock);
+ skb_queue_head(&fm_interrupt_queue, skb);
+ spin_unlock(&fm_spinlock);
+ }
+ }
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "SeekStatus = %x, GlobalEvent = %x, "
+ "numchannels = %x",
+ cg2900_device.seekstatus,
+ fm_event, no_of_scan_freq);
+
+ if (ext_ctrl->controls->size == 0 &&
+ ext_ctrl->controls->string == NULL) {
+ if (cg2900_device.seekstatus ==
+ FMR_SEEK_IN_PROGRESS &&
+ CG2900_EVENT_SCAN_CHANNELS_FOUND
+ == fm_event) {
+ spin_lock(&fm_spinlock);
+ ext_ctrl->controls->size =
+ no_of_scan_freq;
+ cg2900_device.seekstatus
+ = FMR_SEEK_NONE;
+ fm_event =
+ CG2900_EVENT_NO_EVENT;
+ spin_unlock(&fm_spinlock);
+ return -ENOSPC;
+ }
+ } else if (ext_ctrl->controls->string != NULL) {
+ dest_buffer =
+ (u32 *) ext_ctrl->controls->string;
+ while (index < no_of_scan_freq) {
+ *(dest_buffer + count + 0) =
+ HZ_TO_V4L2(scanfreq[index]);
+ *(dest_buffer + count + 1) =
+ scanfreq_rssi_level[index];
+ count += 2;
+ index++;
+ }
+ ret_val = 0;
+ }
+ break;
+ case V4L2_CID_CG2900_RADIO_BLOCKSCAN_GET_RESULTS:
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_BLOCKSCAN"
+ "_GET_RESULTS "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ if (cg2900_device.seekstatus == FMR_SEEK_IN_PROGRESS) {
+ spin_lock(&fm_spinlock);
+ skb = skb_dequeue(&fm_interrupt_queue);
+ spin_unlock(&fm_spinlock);
+ if (!skb) {
+ /* No Interrupt, bad case */
+ FM_ERR_REPORT("No Interrupt to read");
+ fm_event = CG2900_EVENT_NO_EVENT;
+ break;
+ }
+ fm_event = (u8)skb->data[0];
+ FM_DEBUG_REPORT(
+ "V4L2_CID_CG2900_RADIO_BLOCKSCAN"
+ "GET_RESULTS: "
+ "fm_event = %x", fm_event);
+ if (fm_event ==
+ CG2900_EVENT_BLOCK_SCAN_CHANNELS_FOUND) {
+ /* Check for BlockScan Result */
+ status =
+ cg2900_fm_get_block_scan_result
+ (&no_of_block_scan_freq,
+ block_scan_rssi_level);
+ if (0 != status) {
+ FM_ERR_REPORT
+ ("vidioc_get_ext_ctrls: "
+ "cg2900_fm_get_block_scan_"
+ "result: returned %d",
+ status);
+ kfree_skb(skb);
+ break;
+ }
+ kfree_skb(skb);
+ } else {
+ /* Some other interrupt,
+ Queue it back */
+ spin_lock(&fm_spinlock);
+ skb_queue_head(&fm_interrupt_queue, skb);
+ spin_unlock(&fm_spinlock);
+ }
+ }
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "SeekStatus = %x, GlobalEvent = %x, "
+ "numchannels = %x",
+ cg2900_device.seekstatus,
+ fm_event, no_of_block_scan_freq);
+ if (ext_ctrl->controls->size == 0 &&
+ ext_ctrl->controls->string == NULL) {
+ if (cg2900_device.seekstatus ==
+ FMR_SEEK_IN_PROGRESS &&
+ CG2900_EVENT_BLOCK_SCAN_CHANNELS_FOUND
+ == fm_event) {
+ spin_lock(&fm_spinlock);
+ ext_ctrl->controls->size =
+ no_of_block_scan_freq;
+ cg2900_device.seekstatus
+ = FMR_SEEK_NONE;
+ fm_event =
+ CG2900_EVENT_NO_EVENT;
+ spin_unlock(&fm_spinlock);
+ return -ENOSPC;
+ }
+ } else if (ext_ctrl->controls->size >=
+ no_of_block_scan_freq &&
+ ext_ctrl->controls->string != NULL) {
+ dest_buffer =
+ (u32 *) ext_ctrl->controls->string;
+ while (index < no_of_block_scan_freq) {
+ *(dest_buffer + index) =
+ block_scan_rssi_level
+ [index];
+ index++;
+ }
+ ret_val = 0;
+ return ret_val;
+ }
+ break;
+ case V4L2_CID_RDS_TX_DEVIATION:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_RDS_TX_DEVIATION");
+ if (V4L2_CTRL_CLASS_FM_TX != ext_ctrl->ctrl_class) {
+ FM_ERR_REPORT("Invalid Ctrl Class = %x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ status = cg2900_fm_tx_get_rds_deviation((u16 *) &
+ ext_ctrl->
+ controls->value);
+ if (status == 0)
+ ret_val = 0;
+ break;
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_PILOT_TONE_ENABLED");
+ if (V4L2_CTRL_CLASS_FM_TX != ext_ctrl->ctrl_class) {
+ FM_ERR_REPORT("Invalid Ctrl Class = %x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ status = cg2900_fm_tx_get_pilot_tone_status(
+ (bool *)&ext_ctrl->controls->value);
+ if (status == 0)
+ ret_val = 0;
+ break;
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_PILOT_TONE_DEVIATION");
+ if (V4L2_CTRL_CLASS_FM_TX != ext_ctrl->ctrl_class) {
+ FM_ERR_REPORT("Invalid Ctrl Class = %x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ status = cg2900_fm_tx_get_pilot_deviation(
+ (u16 *)&ext_ctrl->controls->value);
+ if (status == 0)
+ ret_val = 0;
+ break;
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_TUNE_PREEMPHASIS");
+ if (V4L2_CTRL_CLASS_FM_TX != ext_ctrl->ctrl_class) {
+ FM_ERR_REPORT("Invalid Ctrl Class = %x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ status = cg2900_fm_tx_get_preemphasis(
+ (u8 *)&ext_ctrl->controls->value);
+ if (status == 0)
+ ret_val = 0;
+ break;
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_TUNE_POWER_LEVEL");
+ if (V4L2_CTRL_CLASS_FM_TX != ext_ctrl->ctrl_class) {
+ FM_ERR_REPORT("Invalid Ctrl Class = %x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ status = cg2900_fm_tx_get_power_level(
+ (u16 *)&ext_ctrl->controls->value);
+ if (status == 0)
+ ret_val = 0;
+ break;
+ case V4L2_CID_CG2900_RADIO_GET_INTERRUPT:
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_GET_INTERRUPT "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+ if (ext_ctrl->controls->size != FMR_GET_INTERRUPT_DATA_SIZE ||
+ ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("vidioc_get_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_GET_INTERRUPT "
+ "Invalid parameters, ext_ctrl->controls->size = %x "
+ "ext_ctrl->controls->string = %08x",
+ ext_ctrl->controls->size,
+ (unsigned int)ext_ctrl->controls->string);
+ ret_val = -ENOSPC;
+ break;
+ }
+ spin_lock(&fm_spinlock);
+ skb = skb_dequeue(&fm_interrupt_queue);
+ spin_unlock(&fm_spinlock);
+ if (!skb) {
+ /* No Interrupt, bad case */
+ FM_ERR_REPORT("V4L2_CID_CG2900_RADIO_GET_INTERRUPT: "
+ "No Interrupt to read");
+ fm_event = CG2900_EVENT_NO_EVENT;
+ break;
+ }
+ fm_event = (u8)skb->data[0];
+ interrupt_success = (s8)skb->data[1];
+ FM_DEBUG_REPORT("vidioc_get_ctrl: Interrupt = %x "
+ "interrupt_success = %x",
+ fm_event, interrupt_success);
+ fm_interrupt_buffer =
+ (int *) ext_ctrl->controls->string;
+ /* Interrupt that has occurred */
+ *fm_interrupt_buffer = cg2900_map_event_to_v4l2(fm_event);
+
+ /* Interrupt success or failed */
+ if (interrupt_success) {
+ /* Interrupt Success, return 0 */
+ *(fm_interrupt_buffer + 1) = 0;
+ } else {
+ spin_lock(&fm_spinlock);
+ no_of_scan_freq = 0;
+ no_of_block_scan_freq = 0;
+ spin_unlock(&fm_spinlock);
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ /* Clear the Interrupt flag */
+ fm_event = CG2900_EVENT_NO_EVENT;
+ kfree_skb(skb);
+ /* Interrupt Success, return negative error */
+ *(fm_interrupt_buffer + 1) = -1;
+ FM_ERR_REPORT("vidioc_get_ext_ctrls: Interrupt = %d "
+ "failed with reason = %d",
+ (*fm_interrupt_buffer),
+ (*(fm_interrupt_buffer + 1)));
+ /*
+ * Update return value, so that application
+ * can read the event failure reason.
+ */
+ ret_val = 0;
+ break;
+ }
+
+ if (CG2900_EVENT_MONO_STEREO_TRANSITION
+ == fm_event) {
+ /*
+ * In case of Mono/Stereo Interrupt,
+ * get the current value from chip
+ */
+ status = cg2900_fm_get_mode(&mode);
+ cg2900_device.rx_stereo_status = (bool)mode;
+ /* Clear the Interrupt flag */
+ fm_event = CG2900_EVENT_NO_EVENT;
+ kfree_skb(skb);
+ } else if (CG2900_EVENT_SCAN_CANCELLED ==
+ fm_event) {
+ /* Scan/Search cancelled by User */
+ spin_lock(&fm_spinlock);
+ no_of_scan_freq = 0;
+ no_of_block_scan_freq = 0;
+ spin_unlock(&fm_spinlock);
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ /* Clear the Interrupt flag */
+ fm_event = CG2900_EVENT_NO_EVENT;
+ kfree_skb(skb);
+ } else {
+ /* Queue the interrupt back
+ for later dequeuing */
+ FM_DEBUG_REPORT("V4L2_CID_CG2900"
+ "_RADIO_GET_INTERRUPT: "
+ "Queuing the interrupt"
+ "again to head of list");
+ spin_lock(&fm_spinlock);
+ skb_queue_head(&fm_interrupt_queue, skb);
+ spin_unlock(&fm_spinlock);
+ }
+ ret_val = 0;
+ break;
+ default:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: "
+ "unsupported (id = %x)",
+ ext_ctrl->controls->id);
+ }
+
+error:
+ FM_DEBUG_REPORT("vidioc_get_ext_ctrls: returning = %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_set_ext_ctrls()- Set the values of a particular control.
+ *
+ * This function is used to set the value of a
+ * particular control on the FM Driver. This is used when the data to
+ * be set is more than 1 paramter. This function is called when the
+ * application issues the IOCTL VIDIOC_S_EXT_CTRLS
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @ext_ctrl: v4l2_ext_controls structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -ENOSPC: when there is no space to copy the data into the buffer provided
+ * by application.
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_ext_ctrls(
+ struct file *file,
+ void *priv,
+ struct v4l2_ext_controls *ext_ctrl
+ )
+{
+ int ret_val = -EINVAL;
+ int status;
+
+ FM_INFO_REPORT("vidioc_set_ext_ctrls: Id = %04x, ctrl_class = %04x",
+ ext_ctrl->controls->id, ext_ctrl->ctrl_class);
+
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_FM_TX &&
+ ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: Unsupported "
+ "ctrl_class = %04x", ext_ctrl->ctrl_class);
+ goto error;
+ }
+
+ switch (ext_ctrl->controls->id) {
+ case V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START:
+ {
+ u32 af_switch_freq;
+ u16 af_switch_pi;
+ u32 *af_switch_buf;
+
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+
+ if (ext_ctrl->controls->size !=
+ FMR_AF_SWITCH_DATA_SIZE ||
+ ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+
+ af_switch_buf = (u32 *) ext_ctrl->controls->string;
+ af_switch_freq = V4L2_TO_HZ(*af_switch_buf);
+ af_switch_pi = *(af_switch_buf + 1);
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START: "
+ "AF Switch Freq =%d Hz AF Switch PI = %04x",
+ (int)af_switch_freq, af_switch_pi);
+
+ if (af_switch_freq < (FMR_CHINA_LOW_FREQ_IN_MHZ
+ * FMR_HZ_TO_MHZ_CONVERTER) ||
+ af_switch_freq > (FMR_CHINA_HIGH_FREQ_IN_MHZ
+ * FMR_HZ_TO_MHZ_CONVERTER)) {
+ FM_ERR_REPORT("Invalid Freq = %04x",
+ af_switch_freq);
+ break;
+ }
+
+ status = cg2900_fm_af_switch_start(
+ af_switch_freq,
+ af_switch_pi);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_RDS_TX_DEVIATION:
+ {
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_RDS_TX_DEVIATION, "
+ "Value = %d",
+ ext_ctrl->controls->value);
+
+ if (ext_ctrl->controls->value <= MIN_RDS_DEVIATION &&
+ ext_ctrl->controls->value > MAX_RDS_DEVIATION) {
+ FM_ERR_REPORT("Invalid RDS Deviation = %02x",
+ ext_ctrl->controls->value);
+ break;
+ }
+
+ status = cg2900_fm_tx_set_rds_deviation(
+ ext_ctrl->controls->value);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_RDS_TX_PI:
+ {
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_RDS_TX_PI, PI = %04x",
+ ext_ctrl->controls->value);
+
+ if (ext_ctrl->controls->value <= MIN_PI_VALUE &&
+ ext_ctrl->controls->value > MAX_PI_VALUE) {
+ FM_ERR_REPORT("Invalid PI = %04x",
+ ext_ctrl->controls->value);
+ break;
+ }
+
+ status = cg2900_fm_tx_set_pi_code(
+ ext_ctrl->controls->value);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_RDS_TX_PTY:
+ {
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_RDS_TX_PTY, PTY = %d",
+ ext_ctrl->controls->value);
+
+ if (ext_ctrl->controls->value < MIN_PTY_VALUE &&
+ ext_ctrl->controls->value > MAX_PTY_VALUE) {
+ FM_ERR_REPORT("Invalid PTY = %02x",
+ ext_ctrl->controls->value);
+ break;
+ }
+
+ status = cg2900_fm_tx_set_pty_code(
+ ext_ctrl->controls->value);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_RDS_TX_PS_NAME:
+ {
+ if (ext_ctrl->controls->size > MAX_PSN_SIZE
+ || ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("Invalid PSN");
+ break;
+ }
+
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_RDS_TX_PS_NAME, "
+ "PSN = %s, Len = %x",
+ ext_ctrl->controls->string,
+ ext_ctrl->controls->size);
+
+ status = cg2900_fm_tx_set_program_station_name(
+ ext_ctrl->controls->string,
+ ext_ctrl->controls->size);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ {
+ if (ext_ctrl->controls->size >= MAX_RT_SIZE
+ || ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("Invalid RT");
+ break;
+ }
+
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_RDS_TX_RADIO_TEXT, "
+ "RT = %s, Len = %x",
+ ext_ctrl->controls->string,
+ ext_ctrl->controls->size);
+
+ status = cg2900_fm_tx_set_radio_text(
+ ext_ctrl->controls->string,
+ ext_ctrl->controls->size);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ {
+ bool enable;
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_PILOT_TONE_ENABLED, "
+ "Value = %d",
+ ext_ctrl->controls->value);
+
+ if (FMD_PILOT_TONE_ENABLED ==
+ ext_ctrl->controls->value)
+ enable = true;
+ else if (FMD_PILOT_TONE_DISABLED ==
+ ext_ctrl->controls->value)
+ enable = false;
+ else {
+ FM_ERR_REPORT("Unsupported Value = %d",
+ ext_ctrl->controls->value);
+ break;
+ }
+ status = cg2900_fm_tx_set_pilot_tone_status(enable);
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ {
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_PILOT_TONE_DEVIATION, "
+ "Value = %d",
+ ext_ctrl->controls->value);
+
+ if (ext_ctrl->controls->value <= MIN_PILOT_DEVIATION &&
+ ext_ctrl->controls->value > MAX_PILOT_DEVIATION) {
+ FM_ERR_REPORT("Invalid Pilot Deviation = %02x",
+ ext_ctrl->controls->value);
+ break;
+ }
+
+ status = cg2900_fm_tx_set_pilot_deviation(
+ ext_ctrl->controls->value);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ {
+ u8 preemphasis;
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_TUNE_PREEMPHASIS, "
+ "Value = %d",
+ ext_ctrl->controls->value);
+
+ if ((V4L2_PREEMPHASIS_50_uS >
+ ext_ctrl->controls->value) ||
+ (V4L2_PREEMPHASIS_75_uS <
+ ext_ctrl->controls->value)) {
+ FM_ERR_REPORT("Unsupported Preemphasis = %d",
+ ext_ctrl->controls->value);
+ break;
+ }
+
+ if (V4L2_PREEMPHASIS_50_uS ==
+ ext_ctrl->controls->value) {
+ preemphasis = FMD_EMPHASIS_50US;
+ } else if (V4L2_PREEMPHASIS_75_uS ==
+ ext_ctrl->controls->value) {
+ preemphasis = FMD_EMPHASIS_75US;
+ }
+
+ status = cg2900_fm_tx_set_preemphasis(preemphasis);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ {
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_TUNE_POWER_LEVEL, "
+ "Value = %d",
+ ext_ctrl->controls->value);
+ if (ext_ctrl->controls->value < MIN_POWER_LEVEL &&
+ ext_ctrl->controls->value > MAX_POWER_LEVEL) {
+ FM_ERR_REPORT("Invalid Power Level = %02x",
+ ext_ctrl->controls->value);
+ break;
+ }
+
+ status = cg2900_fm_tx_set_power_level(
+ ext_ctrl->controls->value);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_CG2900_RADIO_BLOCKSCAN_START:
+ {
+ u32 start_freq;
+ u32 end_freq;
+ u32 *block_scan_buf;
+ u32 current_grid;
+ u32 low_freq;
+ u32 high_freq;
+ u32 result_freq;
+ u8 no_of_block_scan_channels;
+
+ /* V4L2 Initial check */
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_BLOCKSCAN_START "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+
+ if (ext_ctrl->controls->size !=
+ FMR_BLOCK_SCAN_DATA_SIZE ||
+ ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_BLOCKSCAN_START "
+ "Invalid Parameters");
+ break;
+ }
+
+ /* Check for current grid */
+ if (grid == CG2900_FM_GRID_50)
+ current_grid = FMR_CHINA_GRID_IN_HZ;
+ else if (grid == CG2900_FM_GRID_100)
+ current_grid = FMR_EUROPE_GRID_IN_HZ;
+ else
+ current_grid = FMR_USA_GRID_IN_HZ;
+
+ /* Check for current band */
+ if (band == CG2900_FM_BAND_US_EU) {
+ low_freq = FMR_EU_US_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ high_freq = FMR_EU_US_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+
+ } else if (band == CG2900_FM_BAND_JAPAN) {
+ low_freq = FMR_JAPAN_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ high_freq = FMR_JAPAN_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+
+ } else {
+ low_freq = FMR_CHINA_LOW_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ high_freq = FMR_CHINA_HIGH_FREQ_IN_MHZ *
+ FMR_HZ_TO_MHZ_CONVERTER;
+ }
+
+ /* V4L2 Extended control */
+
+ block_scan_buf = (u32 *)ext_ctrl->controls->string;
+ start_freq = V4L2_TO_HZ(*block_scan_buf);
+ end_freq = V4L2_TO_HZ(*(block_scan_buf + 1));
+
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_"
+ "BLOCKSCAN_START: "
+ "Start Freq = %d Hz "
+ "End Freq = %d Hz",
+ (int)start_freq,
+ (int)end_freq);
+
+ result_freq = end_freq - start_freq;
+ no_of_block_scan_channels =
+ (u8)(result_freq / current_grid);
+
+ /* Frequency Check */
+ if (end_freq < start_freq) {
+ FM_ERR_REPORT("Start Freq (%d Hz) "
+ " > End Freq (%d Hz)",
+ (int)start_freq,
+ (int)end_freq);
+ break;
+ }
+
+ if ((start_freq < low_freq) ||
+ (start_freq > high_freq)) {
+ FM_ERR_REPORT("Out of Band Freq: "
+ "Start Freq = %d Hz",
+ (int)start_freq);
+ break;
+ }
+
+ if ((end_freq < low_freq) ||
+ (end_freq > high_freq)) {
+ FM_ERR_REPORT("Out of Band Freq: "
+ "End Freq = %d Hz",
+ (int)end_freq);
+ break;
+ }
+
+ /* Maximum allowed block scan range */
+ if (FMR_MAX_BLOCK_SCAN_CHANNELS <
+ no_of_block_scan_channels) {
+ FM_ERR_REPORT("No of channels (%x)"
+ "exceeds Max Block Scan (%x)",
+ no_of_block_scan_channels,
+ FMR_MAX_BLOCK_SCAN_CHANNELS);
+ break;
+ }
+
+ status = cg2900_fm_start_block_scan(
+ start_freq,
+ end_freq);
+ if (0 == status) {
+ cg2900_device.seekstatus =
+ FMR_SEEK_IN_PROGRESS;
+ ret_val = 0;
+ }
+ break;
+ }
+ case V4L2_CID_CG2900_RADIO_TEST_TONE_CONNECT:
+ {
+ u8 left_audio_mode;
+ u8 right_audio_mode;
+ u8 *test_tone_connect_buf;
+
+ /* V4L2 Initial check */
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_"
+ "TEST_TONE_CONNECT "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+
+ if (ext_ctrl->controls->size !=
+ FMR_TEST_TONE_CONNECT_DATA_SIZE ||
+ ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_TEST"
+ "_TONE_CONNECT "
+ "Invalid Parameters");
+ break;
+ }
+
+ /* V4L2 Extended control */
+ test_tone_connect_buf =
+ (u8 *)ext_ctrl->controls->string;
+ left_audio_mode = *test_tone_connect_buf;
+ right_audio_mode = *(test_tone_connect_buf + 1);
+
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_TEST_TONE_CONNECT"
+ "left_audio_mode Freq = %02x"
+ "right_audio_modeFreq = %02x",
+ left_audio_mode,
+ right_audio_mode);
+
+ /* Range Check */
+ if (left_audio_mode > \
+ V4L2_CG2900_RADIO_TEST_TONE_TONE_SUM) {
+ FM_ERR_REPORT("Invalid Value of "
+ "left_audio_mode (%02x) ",
+ left_audio_mode);
+ break;
+ }
+
+ if (right_audio_mode > \
+ V4L2_CG2900_RADIO_TEST_TONE_TONE_SUM) {
+ FM_ERR_REPORT("Invalid Value of "
+ "right_audio_mode (%02x) ",
+ left_audio_mode);
+ break;
+ }
+
+ status = cg2900_fm_test_tone_connect(
+ left_audio_mode,
+ right_audio_mode);
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ case V4L2_CID_CG2900_RADIO_TEST_TONE_SET_PARAMS:
+ {
+ u8 tone_gen;
+ u16 frequency;
+ u16 volume;
+ u16 phase_offset;
+ u16 dc;
+ u8 waveform;
+ u16 *test_tone_set_params_buf;
+
+ /* V4L2 Initial check */
+ if (ext_ctrl->ctrl_class != V4L2_CTRL_CLASS_USER) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_TEST_TONE_SET_PARAMS "
+ "Unsupported ctrl_class = %04x",
+ ext_ctrl->ctrl_class);
+ break;
+ }
+
+ if (ext_ctrl->controls->size !=
+ FMR_TEST_TONE_SET_PARAMS_DATA_SIZE ||
+ ext_ctrl->controls->string == NULL) {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "FMR_TEST_TONE_SET_PARAMS_DATA_SIZE "
+ "Invalid Parameters");
+ break;
+ }
+
+ /* V4L2 Extended control */
+ test_tone_set_params_buf = \
+ (u16 *)ext_ctrl->controls->string;
+
+ tone_gen = (u8)(*test_tone_set_params_buf);
+ frequency = *(test_tone_set_params_buf + 1);
+ volume = *(test_tone_set_params_buf + 2);
+ phase_offset = *(test_tone_set_params_buf + 3);
+ dc = *(test_tone_set_params_buf + 4);
+ waveform = (u8)(*(test_tone_set_params_buf + 5));
+
+ FM_DEBUG_REPORT("vidioc_set_ext_ctrls: "
+ "V4L2_CID_CG2900_RADIO_TEST_TONE_SET_PARAMS"
+ "tone_gen = %02x frequency = %04x"
+ "volume = %04x phase_offset = %04x"
+ "dc = %04x waveform = %02x",
+ tone_gen, frequency,
+ volume, phase_offset,
+ dc, waveform);
+
+ /* Range Check */
+ if (tone_gen > FMD_TST_TONE_2) {
+ FM_ERR_REPORT("Invalid Value of "
+ "tone_gen (%02x) ",
+ tone_gen);
+ break;
+ }
+
+ if (waveform > FMD_TST_TONE_PULSE) {
+ FM_ERR_REPORT("Invalid Value of "
+ "waveform (%02x) ",
+ waveform);
+ break;
+ }
+
+ if (frequency > 0x7FFF) {
+ FM_ERR_REPORT("Invalid Value of "
+ "frequency (%04x) ",
+ frequency);
+ break;
+ }
+
+ if (volume > 0x7FFF) {
+ FM_ERR_REPORT("Invalid Value of "
+ "volume (%04x) ",
+ volume);
+ break;
+ }
+
+ status = cg2900_fm_test_tone_set_params(
+ tone_gen,
+ frequency,
+ volume,
+ phase_offset,
+ dc,
+ waveform);
+
+ if (0 == status)
+ ret_val = 0;
+ break;
+ }
+ default:
+ {
+ FM_ERR_REPORT("vidioc_set_ext_ctrls: "
+ "Unsupported Id = %04x",
+ ext_ctrl->controls->id);
+ }
+ }
+error:
+ return ret_val;
+}
+
+/**
+ * vidioc_set_hw_freq_seek()- seek Up/Down Frequency.
+ *
+ * This function is used to start seek
+ * on the FM Radio. Direction if seek is as inicated by the parameter
+ * inside the v4l2_hw_freq_seek structure. This function is called when the
+ * application issues the IOCTL VIDIOC_S_HW_FREQ_SEEK
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @freq_seek: v4l2_hw_freq_seek structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_hw_freq_seek(
+ struct file *file,
+ void *priv,
+ struct v4l2_hw_freq_seek *freq_seek
+ )
+{
+ int status;
+ int ret_val = -EINVAL;
+
+ FM_INFO_REPORT("vidioc_set_hw_freq_seek");
+
+ FM_DEBUG_REPORT("vidioc_set_hw_freq_seek: Status = %x, "
+ "Upwards = %x, Wrap Around = %x",
+ cg2900_device.seekstatus,
+ freq_seek->seek_upward, freq_seek->wrap_around);
+
+ if (cg2900_device.seekstatus == FMR_SEEK_IN_PROGRESS) {
+ FM_ERR_REPORT("vidioc_set_hw_freq_seek: "
+ "VIDIOC_S_HW_FREQ_SEEK, "
+ "freq_seek in progress");
+ goto error;
+ }
+
+ spin_lock(&fm_spinlock);
+ fm_event = CG2900_EVENT_NO_EVENT;
+ no_of_scan_freq = 0;
+ spin_unlock(&fm_spinlock);
+
+ if (CG2900_DIR_UP == freq_seek->seek_upward)
+ status = cg2900_fm_search_up_freq();
+ else if (CG2900_DIR_DOWN == freq_seek->seek_upward)
+ status = cg2900_fm_search_down_freq();
+ else
+ goto error;
+
+ if (0 != status)
+ goto error;
+
+ cg2900_device.seekstatus = FMR_SEEK_IN_PROGRESS;
+ ret_val = 0;
+
+error:
+ FM_DEBUG_REPORT("vidioc_set_hw_freq_seek: returning = %d",
+ ret_val);
+ return ret_val;
+}
+
+/**
+ * vidioc_get_audio()- Get Audio features of FM Driver.
+ *
+ * This function is used to get the audio features of FM Driver.
+ * This function is imlemented as a dumy function.
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @audio: (out) v4l2_audio structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_audio(
+ struct file *file,
+ void *priv,
+ struct v4l2_audio *audio
+ )
+{
+ FM_INFO_REPORT("vidioc_get_audio");
+ strcpy(audio->name, "");
+ audio->capability = 0;
+ audio->mode = 0;
+ return 0;
+}
+
+/**
+ * vidioc_set_audio()- Set Audio features of FM Driver.
+ *
+ * This function is used to set the audio features of FM Driver.
+ * This function is imlemented as a dumy function.
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @audio: v4l2_audio structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_audio(
+ struct file *file,
+ void *priv,
+ struct v4l2_audio *audio
+ )
+{
+ FM_INFO_REPORT("vidioc_set_audio");
+ if (audio->index != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * vidioc_get_input()- Get the Input Value
+ *
+ * This function is used to get the Input.
+ * This function is imlemented as a dumy function.
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @input: (out) Value to be stored.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_get_input(
+ struct file *file,
+ void *priv,
+ unsigned int *input
+ )
+{
+ FM_INFO_REPORT("vidioc_get_input");
+ *input = 0;
+ return 0;
+}
+
+/**
+ * vidioc_set_input()- Set the input value.
+ *
+ * This function is used to set input.
+ * This function is imlemented as a dumy function.
+ *
+ * @file: File structure.
+ * @priv: Previous data of file structure.
+ * @input: Value to set
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int vidioc_set_input(
+ struct file *file,
+ void *priv,
+ unsigned int input
+ )
+{
+ FM_INFO_REPORT("vidioc_set_input");
+ if (input != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * cg2900_convert_err_to_v4l2()- Convert Error Bits to V4L2 RDS format.
+ *
+ * This function converts the error bits in RDS Block
+ * as received from Chip into V4L2 RDS data specification.
+ *
+ * @status_byte: The status byte as received in RDS Group for
+ * particular RDS Block
+ * @out_byte: byte to store the modified byte with the err bits
+ * alligned as per V4L2 RDS Specifications.
+ */
+static void cg2900_convert_err_to_v4l2(
+ char status_byte,
+ char *out_byte
+ )
+{
+ if ((status_byte & RDS_ERROR_STATUS_MASK) == RDS_ERROR_STATUS_MASK) {
+ /* Uncorrectable Block */
+ *out_byte = (*out_byte | V4L2_RDS_BLOCK_ERROR);
+ } else if (((status_byte & RDS_UPTO_TWO_BITS_CORRECTED)
+ == RDS_UPTO_TWO_BITS_CORRECTED) ||
+ ((status_byte & RDS_UPTO_FIVE_BITS_CORRECTED)
+ == RDS_UPTO_FIVE_BITS_CORRECTED)) {
+ /* Corrected Bits in Block */
+ *out_byte = (*out_byte | V4L2_RDS_BLOCK_CORRECTED);
+ }
+}
+
+/**
+ * cg2900_map_event_to_v4l2()- Maps cg2900 event to v4l2 events .
+ *
+ * This function maps cg2900 events to corresponding v4l2 events.
+ *
+ * @event: This contains the cg2900 event to be converted.
+ *
+ * Returns: Corresponding V4L2 events.
+ */
+static int cg2900_map_event_to_v4l2(
+ u8 event
+ )
+{
+ switch (event) {
+ case CG2900_EVENT_MONO_STEREO_TRANSITION:
+ return V4L2_CG2900_RADIO_INTERRUPT_MONO_STEREO_TRANSITION;
+ case CG2900_EVENT_SEARCH_CHANNEL_FOUND:
+ return V4L2_CG2900_RADIO_INTERRUPT_SEARCH_COMPLETED;
+ case CG2900_EVENT_SCAN_CHANNELS_FOUND:
+ return V4L2_CG2900_RADIO_INTERRUPT_BAND_SCAN_COMPLETED;
+ case CG2900_EVENT_BLOCK_SCAN_CHANNELS_FOUND:
+ return V4L2_CG2900_RADIO_INTERRUPT_BLOCK_SCAN_COMPLETED;
+ case CG2900_EVENT_SCAN_CANCELLED:
+ return V4L2_CG2900_RADIO_INTERRUPT_SCAN_CANCELLED;
+ case CG2900_EVENT_DEVICE_RESET:
+ return V4L2_CG2900_RADIO_INTERRUPT_DEVICE_RESET;
+ case CG2900_EVENT_RDS_EVENT:
+ return V4L2_CG2900_RADIO_INTERRUPT_RDS_RECEIVED;
+ default:
+ return V4L2_CG2900_RADIO_INTERRUPT_UNKNOWN;
+ }
+}
+
+/**
+ * cg2900_open()- This function nitializes and switches on FM.
+ *
+ * This is called when the application opens the character device.
+ *
+ * @file: File structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int cg2900_open(
+ struct file *file
+ )
+{
+ int status;
+ int ret_val = -EINVAL;
+ struct video_device *vdev = video_devdata(file);
+
+ mutex_lock(&fm_mutex);
+ users++;
+ FM_INFO_REPORT("cg2900_open: users = %d", users);
+
+ if (users > 1) {
+ FM_INFO_REPORT("cg2900_open: FM already switched on!!!");
+ ret_val = 0;
+ /*
+ * No need to perform the initialization and switch on FM
+ * since it is already done during the first open call to
+ * this driver.
+ */
+ goto done;
+ }
+
+ status = cg2900_fm_init();
+ if (0 != status)
+ goto init_error;
+
+ FM_DEBUG_REPORT("cg2900_open: Switching on FM");
+ status = cg2900_fm_switch_on(&(vdev->dev));
+ if (0 != status)
+ goto switch_on_error;
+
+ cg2900_device.state = FMR_SWITCH_ON;
+ cg2900_device.frequency = HZ_TO_V4L2(freq_low);
+ cg2900_device.rx_rds_enabled = false;
+ cg2900_device.muted = false;
+ cg2900_device.audiopath = 0;
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ cg2900_device.rssi_threshold = CG2900_FM_DEFAULT_RSSI_THRESHOLD;
+ fm_event = CG2900_EVENT_NO_EVENT;
+ no_of_scan_freq = 0;
+ cg2900_device.fm_mode = CG2900_FM_IDLE_MODE;
+ ret_val = 0;
+ goto done;
+
+switch_on_error:
+ cg2900_fm_deinit();
+init_error:
+ users--;
+done:
+ mutex_unlock(&fm_mutex);
+ FM_DEBUG_REPORT("cg2900_open: returning %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * cg2900_release()- This function switches off FM.
+ *
+ * This function switches off FM and releases the resources.
+ * This is called when the application closes the character
+ * device.
+ *
+ * @file: File structure.
+ *
+ * Returns:
+ * 0 when no error
+ * -EINVAL: otherwise
+ */
+static int cg2900_release(
+ struct file *file
+ )
+{
+ int status;
+ int ret_val = -EINVAL;
+
+ mutex_lock(&fm_mutex);
+
+ FM_INFO_REPORT("cg2900_release");
+ if (users <= 0) {
+ FM_ERR_REPORT("cg2900_release: No users registered "
+ "with FM Driver");
+ goto done;
+ }
+
+ users--;
+ FM_INFO_REPORT("cg2900_release: users = %d", users);
+
+ if (0 == users) {
+ FM_DEBUG_REPORT("cg2900_release: Switching Off FM");
+ status = cg2900_fm_switch_off();
+ status = cg2900_fm_deinit();
+ if (0 != status)
+ goto done;
+
+ cg2900_device.state = FMR_SWITCH_OFF;
+ cg2900_device.frequency = 0;
+ cg2900_device.rx_rds_enabled = false;
+ cg2900_device.muted = false;
+ cg2900_device.seekstatus = FMR_SEEK_NONE;
+ fm_event = CG2900_EVENT_NO_EVENT;
+ no_of_scan_freq = 0;
+ }
+ ret_val = 0;
+
+done:
+ mutex_unlock(&fm_mutex);
+ FM_DEBUG_REPORT("cg2900_release: returning %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * cg2900_read()- This function is invoked when the application
+ * calls read() to receive RDS Data.
+ *
+ * @file: File structure.
+ * @data: buffer provided by application for receving the data.
+ * @count: Number of bytes that application wants to read from driver
+ * @pos: offset
+ *
+ * Returns:
+ * Number of bytes copied to the user buffer
+ * -EFAULT: If there is problem in copying data to buffer supplied
+ * by application
+ * -EIO: If the number of bytes to be read are not a multiple of
+ * struct v4l2_rds_data.
+ * -EAGAIN: More than 22 blocks requested to be read or read
+ * was called in non blocking mode and no data was available for reading.
+ * -EINTR: If read was interrupted by a signal before data was avaialble.
+ * 0 when no data available for reading.
+ */
+static ssize_t cg2900_read(
+ struct file *file,
+ char __user *data,
+ size_t count, loff_t *pos
+ )
+{
+ int current_rds_grp;
+ int index = 0;
+ int blocks_to_read;
+ struct v4l2_rds_data rdsbuf[MAX_RDS_GROUPS * NUM_OF_RDS_BLOCKS];
+ struct v4l2_rds_data *rdslocalbuf = rdsbuf;
+ struct sk_buff *skb;
+
+ FM_INFO_REPORT("cg2900_read");
+
+ blocks_to_read = (count / sizeof(struct v4l2_rds_data));
+
+ if (!cg2900_device.rx_rds_enabled) {
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ FM_INFO_REPORT("cg2900_read: returning 0");
+ return 0;
+ }
+
+ if (count % sizeof(struct v4l2_rds_data) != 0) {
+ FM_ERR_REPORT("cg2900_read: Invalid Number of bytes %x "
+ "requested to read", count);
+ return -EIO;
+ }
+
+ if (blocks_to_read > MAX_RDS_GROUPS * NUM_OF_RDS_BLOCKS) {
+ FM_ERR_REPORT("cg2900_read: Too many blocks(%d) "
+ "requested to be read", blocks_to_read);
+ return -EAGAIN;
+ }
+
+ current_rds_grp = fm_rds_info.rds_group_sent;
+
+ if ((fm_rds_info.rds_head == fm_rds_info.rds_tail) ||
+ (fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block1 == 0x0000)) {
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ FM_INFO_REPORT("cg2900_read: returning 0");
+ return 0;
+ }
+
+ spin_lock(&fm_spinlock);
+ while (index < blocks_to_read) {
+ /* Check which Block needs to be transferred next */
+ switch (fm_rds_info.rds_block_sent % NUM_OF_RDS_BLOCKS) {
+ case 0:
+ (rdslocalbuf + index)->lsb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block1;
+ (rdslocalbuf + index)->msb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block1 >> 8;
+ (rdslocalbuf + index)->block =
+ (fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status1
+ & RDS_BLOCK_MASK) >> 2;
+ cg2900_convert_err_to_v4l2(
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status1,
+ &(rdslocalbuf + index)->block);
+ break;
+ case 1:
+ (rdslocalbuf + index)->lsb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block2;
+ (rdslocalbuf + index)->msb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block2 >> 8;
+ (rdslocalbuf + index)->block =
+ (fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status2
+ & RDS_BLOCK_MASK) >> 2;
+ cg2900_convert_err_to_v4l2(
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status2,
+ &(rdslocalbuf + index)->block);
+ break;
+ case 2:
+ (rdslocalbuf + index)->lsb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block3;
+ (rdslocalbuf + index)->msb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block3 >> 8;
+ (rdslocalbuf + index)->block =
+ (fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status3
+ & RDS_BLOCK_MASK) >> 2;
+ cg2900_convert_err_to_v4l2(
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status3,
+ &(rdslocalbuf + index)->block);
+ break;
+ case 3:
+ (rdslocalbuf + index)->lsb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block4;
+ (rdslocalbuf + index)->msb =
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].block4 >> 8;
+ (rdslocalbuf + index)->block =
+ (fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status4
+ & RDS_BLOCK_MASK) >> 2;
+ cg2900_convert_err_to_v4l2(
+ fm_rds_buf[fm_rds_info.rds_tail]
+ [current_rds_grp].status4,
+ &(rdslocalbuf + index)->block);
+ current_rds_grp++;
+ if (current_rds_grp == MAX_RDS_GROUPS) {
+ fm_rds_info.rds_tail++;
+ current_rds_grp = 0;
+ /* Dequeue Rds Interrupt here */
+ skb = skb_dequeue(&fm_interrupt_queue);
+ if (!skb) {
+ /* No Interrupt, bad case */
+ FM_ERR_REPORT("cg2900_read: "
+ "skb is NULL. Major error");
+ spin_unlock(&fm_spinlock);
+ return 0;
+ }
+ fm_event = (u8)skb->data[0];
+ if (fm_event != CG2900_EVENT_RDS_EVENT) {
+ /* RDS interrupt not found */
+ FM_ERR_REPORT("cg2900_read:"
+ "RDS interrupt not found"
+ "for de-queuing."
+ "fm_event = %x", fm_event);
+ /* Queue the event back */
+ skb_queue_head(&fm_interrupt_queue,
+ skb);
+ spin_unlock(&fm_spinlock);
+ return 0;
+ }
+ kfree_skb(skb);
+ }
+ break;
+ default:
+ FM_ERR_REPORT("Invalid RDS Group!!!");
+ spin_unlock(&fm_spinlock);
+ return 0;
+ }
+ index++;
+ fm_rds_info.rds_block_sent++;
+ if (fm_rds_info.rds_block_sent == NUM_OF_RDS_BLOCKS)
+ fm_rds_info.rds_block_sent = 0;
+
+ if (!cg2900_device.rx_rds_enabled) {
+ /* Remove all Interrupts from the queue */
+ skb_queue_purge(&fm_interrupt_queue);
+ FM_INFO_REPORT("cg2900_read: returning 0");
+ spin_unlock(&fm_spinlock);
+ return 0;
+ }
+ }
+ /* Update the RDS Group Count Sent to Application */
+ fm_rds_info.rds_group_sent = current_rds_grp;
+ if (fm_rds_info.rds_tail == MAX_RDS_BUFFER)
+ fm_rds_info.rds_tail = 0;
+
+ spin_unlock(&fm_spinlock);
+ if (copy_to_user(data, rdslocalbuf, count)) {
+ FM_ERR_REPORT("cg2900_read: Error "
+ "in copying, returning");
+ return -EFAULT;
+ }
+ return count;
+}
+
+/**
+ * cg2900_poll()- Check if the operation is complete or not.
+ *
+ * This function is invoked by application on calling poll() and is used to
+ * wait till the any FM interrupt is received from the chip.
+ * The application decides to read the corresponding data depending on FM
+ * interrupt.
+ *
+ * @file: File structure.
+ * @wait: poll table
+ *
+ * Returns:
+ * POLLRDNORM|POLLIN whenever FM interrupt has occurred.
+ * 0 whenever the call times out.
+ */
+static unsigned int cg2900_poll(
+ struct file *file,
+ struct poll_table_struct *wait
+ )
+{
+ int ret_val = 0;
+
+ FM_INFO_REPORT("cg2900_poll");
+
+ /* Check if we have some data in queue already */
+ if (skb_queue_empty(&fm_interrupt_queue)) {
+ FM_DEBUG_REPORT("cg2900_poll: Interrupt Queue Empty, waiting");
+ /* No Interrupt, wait for it to occur */
+ poll_wait(file, &cg2900_poll_queue, wait);
+ }
+ /* Check if we now have interrupt to read in queue */
+ if (skb_queue_empty(&fm_interrupt_queue))
+ goto done;
+
+ ret_val = POLLIN | POLLRDNORM;
+
+done:
+ FM_DEBUG_REPORT("poll_wait returning %d", ret_val);
+ return ret_val;
+}
+
+/**
+ * radio_cg2900_probe()- This function registers FM Driver with V4L2 Driver.
+ *
+ * This function is called whenever the driver is probed by the device system,
+ * i.e. when a CG2900 controller has connected. It registers the FM Driver with
+ * Video4Linux as a character device.
+ *
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 on success
+ * -EINVAL on error
+ */
+static int __devinit radio_cg2900_probe(
+ struct platform_device *pdev
+ )
+{
+ int err;
+
+ FM_INFO_REPORT(BANNER);
+
+ err = fmd_set_dev(&pdev->dev);
+ if (err) {
+ FM_ERR_REPORT("Could not set device %s", pdev->name);
+ return err;
+ }
+
+ radio_nr = 0;
+ grid = CG2900_FM_GRID_100;
+ band = CG2900_FM_BAND_US_EU;
+ FM_INFO_REPORT("radio_cg2900_probe: radio_nr= %d.", radio_nr);
+
+ /* Initialize the parameters */
+ if (video_register_device(
+ &cg2900_video_device,
+ VFL_TYPE_RADIO,
+ radio_nr) == -1) {
+ FM_ERR_REPORT("radio_cg2900_probe: video_register_device err");
+ return -EINVAL;
+ }
+ mutex_init(&fm_mutex);
+ spin_lock_init(&fm_spinlock);
+ init_waitqueue_head(&cg2900_poll_queue);
+ skb_queue_head_init(&fm_interrupt_queue);
+ users = 0;
+ return 0;
+}
+
+/**
+ * radio_cg2900_remove()- This function removes the FM Driver.
+ *
+ * This function is called whenever the driver is removed by the device system,
+ * i.e. when a CG2900 controller has disconnected. It unregisters the FM Driver
+ * from Video4Linux.
+ *
+ * @pdev: Platform device.
+ *
+ * Returns: 0 on success
+ */
+static int __devexit radio_cg2900_remove(
+ struct platform_device *pdev
+ )
+{
+ FM_INFO_REPORT("radio_cg2900_remove");
+ /* Wake up the poll queue since we are now exiting */
+ wake_up_poll_queue();
+ /* Give some time for application to exit the poll thread */
+ schedule_timeout_interruptible(msecs_to_jiffies(500));
+
+ /* Try to Switch Off FM in case it is still switched on */
+ cg2900_fm_switch_off();
+ cg2900_fm_deinit();
+ skb_queue_purge(&fm_interrupt_queue);
+ mutex_destroy(&fm_mutex);
+ video_unregister_device(&cg2900_video_device);
+ fmd_set_dev(NULL);
+ return 0;
+}
+
+static struct platform_driver radio_cg2900_driver = {
+ .driver = {
+ .name = "cg2900-fm",
+ .owner = THIS_MODULE,
+ },
+ .probe = radio_cg2900_probe,
+ .remove = __devexit_p(radio_cg2900_remove),
+};
+
+/**
+ * radio_cg2900_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init radio_cg2900_init(void)
+{
+ FM_INFO_REPORT("radio_cg2900_init");
+ return platform_driver_register(&radio_cg2900_driver);
+}
+
+/**
+ * radio_cg2900_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit radio_cg2900_exit(void)
+{
+ FM_INFO_REPORT("radio_cg2900_exit");
+ platform_driver_unregister(&radio_cg2900_driver);
+}
+
+void wake_up_poll_queue(void)
+{
+ FM_INFO_REPORT("wake_up_poll_queue");
+ wake_up_interruptible(&cg2900_poll_queue);
+}
+
+void cg2900_handle_device_reset(void)
+{
+ struct sk_buff *skb;
+ FM_INFO_REPORT("cg2900_handle_device_reset");
+ skb = alloc_skb(SKB_FM_INTERRUPT_DATA, GFP_KERNEL);
+ if (!skb) {
+ FM_ERR_REPORT("cg2900_handle_device_reset: "
+ "Unable to Allocate Memory");
+ return;
+ }
+ skb->data[0] = CG2900_EVENT_DEVICE_RESET;
+ skb->data[1] = true;
+ skb_queue_tail(&fm_interrupt_queue, skb);
+ wake_up_poll_queue();
+}
+
+module_init(radio_cg2900_init);
+module_exit(radio_cg2900_exit);
+MODULE_AUTHOR("Hemant Gupta");
+MODULE_LICENSE("GPL v2");
+
+module_param(radio_nr, int, S_IRUGO);
+
+module_param(grid, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(grid, "Grid:"
+ "0=50 kHz"
+ "*1=100 kHz*"
+ "2=200 kHz");
+
+module_param(band, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(band, "Band:"
+ "*0=87.5-108 MHz*"
+ "1=76-90 MHz"
+ "2=70-108 MHz");
+
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index e954781c90b..82e9090e15b 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -151,6 +151,22 @@ config RADIO_WL1273
To compile this driver as a module, choose M here: the
module will be called radio-wl1273.
+config RADIO_CG2900
+ tristate "ST-Ericsson CG2900 FM Radio support"
+ depends on CG2900 && VIDEO_V4L2
+ ---help---
+ Choose Y here if you have one of these FM radio cards. This is a BT,
+ FM and GPS combo chip controlled via HCI.
+
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux API. Information on
+ this API and pointers to "v4l" programs may be found at
+ <file:Documentation/video4linux/API.html>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-CG2900.
+
+
# TI's ST based wl128x FM radio
source "drivers/media/radio/wl128x/Kconfig"
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 390daf94d84..d18e01e369b 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -26,5 +26,6 @@ obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
+obj-$(CONFIG_RADIO_CG2900) += CG2900/
ccflags-y += -Isound
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f147395bac9..2e9c2d81d80 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -307,6 +307,17 @@ config MFD_TC3589X
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_TC35892
+ bool "Support Toshiba TC35892"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the Toshiba TC35892 I/O Expander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TMIO
bool
default n
@@ -335,6 +346,27 @@ config MFD_TC6393XB
help
Support for Toshiba Mobile IO Controller TC6393XB
+config AB5500_CORE
+ bool "ST-Ericsson AB5500 Mixed Signal Circuit core functions"
+ select MFD_CORE
+ depends on GENERIC_HARDIRQS && ABX500_CORE
+ help
+ Select this to enable the AB5500 Mixed Signal IC core
+ functionality. This connects to a AB5500 chip on the I2C bus via
+ the Power and Reset Management Unit (PRCMU). It exposes a number
+ of symbols needed for dependent devices to read and write
+ registers and subscribe to events from this multi-functional IC.
+ This is needed to use other features of the AB5500 such as
+ battery-backed RTC, charging control, Regulators, LEDs, vibrator,
+ system power and temperature, power management and ALSA sound.
+
+config AB5500_GPADC
+ bool "AB5500 GPADC driver"
+ depends on AB5500_CORE
+ default y
+ help
+ AB5500 GPADC driver used to convert battery/usb voltage.
+
config PMIC_DA903X
bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
depends on I2C=y
@@ -654,7 +686,7 @@ config AB8500_CORE
config AB8500_I2C_CORE
bool "AB8500 register access via PRCMU I2C"
- depends on AB8500_CORE && MFD_DB8500_PRCMU
+ depends on AB8500_CORE
default y
help
This enables register access to the AB8500 chip via PRCMU I2C.
@@ -662,6 +694,14 @@ config AB8500_I2C_CORE
the I2C bus is connected to the Power Reset
and Mangagement Unit, PRCMU.
+config AB8500_DENC
+ bool "AB8500_DENC driver support(CVBS)"
+ depends on AB8500_CORE
+ help
+ Select this option to add driver support for analog TV out through
+ AB8500.
+
+
config AB8500_DEBUG
bool "Enable debug info via debugfs"
depends on AB8500_CORE && DEBUG_FS
@@ -672,10 +712,10 @@ config AB8500_DEBUG
config AB8500_GPADC
bool "AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
+ depends on AB8500_CORE
default y
help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage.
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b953bab934f..2b31af7dc8c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -2,6 +2,7 @@
# Makefile for multifunction miscellaneous devices
#
+obj-$(CONFIG_AB5500_CORE) += ab5500-core.o ab5500-power.o
88pm860x-objs := 88pm860x-core.o 88pm860x-i2c.o
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
+obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -90,11 +92,13 @@ obj-$(CONFIG_AB5500_CORE) += ab5500-core.o
obj-$(CONFIG_AB5500_DEBUG) += ab5500-debugfs.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
+obj-$(CONFIG_AB8500_DENC) += ab8500-denc.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o
+obj-$(CONFIG_AB5500_GPADC) += ab5500-gpadc.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index bd56a764dea..7da5fa3ba35 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -992,6 +992,74 @@ static struct mfd_cell ab5500_devs[AB5500_NUM_DEVICES] = {
},
},
},
+ [AB5500_DEVID_TEMPMON] = {
+ .name = "abx500-temp",
+ .id = AB5500_DEVID_TEMPMON,
+ .num_resources = 1,
+ .resources = (struct resource[]) {
+ {
+ .name = "ABX500_TEMP_WARM",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 2),
+ .end = AB5500_IRQ(2, 2),
+ },
+ },
+ },
+ [AB5500_DEVID_ACCDET] = {
+ .name = "ab5500-acc-det",
+ .id = AB5500_DEVID_ACCDET,
+ .num_resources = 8,
+ .resources = (struct resource[]) {
+ {
+ .name = "acc_detedt22db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 7),
+ .end = AB5500_IRQ(2, 7),
+ },
+ {
+ .name = "acc_detedt21db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 6),
+ .end = AB5500_IRQ(2, 6),
+ },
+ {
+ .name = "acc_detedt21db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 5),
+ .end = AB5500_IRQ(2, 5),
+ },
+ {
+ .name = "acc_detedt3db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 4),
+ .end = AB5500_IRQ(3, 4),
+ },
+ {
+ .name = "acc_detedt3db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 3),
+ .end = AB5500_IRQ(3, 3),
+ },
+ {
+ .name = "acc_detedt1db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 2),
+ .end = AB5500_IRQ(3, 2),
+ },
+ {
+ .name = "acc_detedt1db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 1),
+ .end = AB5500_IRQ(3, 1),
+ },
+ {
+ .name = "acc_detedt22db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 0),
+ .end = AB5500_IRQ(3, 0),
+ },
+ },
+ },
};
/*
@@ -1302,6 +1370,10 @@ static const struct ab_family_id ids[] __initdata = {
.id = AB5500_1_1,
.name = "1.1"
},
+ {
+ .id = AB5500_2_0,
+ .name = "2.0"
+ },
/* Terminator */
{
.id = 0x00,
diff --git a/drivers/mfd/ab5500-gpadc.c b/drivers/mfd/ab5500-gpadc.c
new file mode 100644
index 00000000000..d099f1b9d73
--- /dev/null
+++ b/drivers/mfd/ab5500-gpadc.c
@@ -0,0 +1,1224 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Vijaya Kumar K <vijay.kilari@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+
+/*
+ * Manual mode ADC registers
+ */
+#define AB5500_GPADC_MANUAL_STAT_REG 0x1F
+#define AB5500_GPADC_MANDATAL_REG 0x21
+#define AB5500_GPADC_MANDATAH_REG 0x20
+#define AB5500_GPADC_MANUAL_MUX_CTRL 0x22
+#define AB5500_GPADC_MANUAL_MODE_CTRL 0x23
+#define AB5500_GPADC_MANUAL_MODE_CTRL2 0x24
+/*
+ * Auto/Polling mode ADC registers
+ */
+#define AB5500_GPADC_AUTO_VBAT_MAX 0x26
+#define AB5500_GPADC_AUTO_VBAT_MIN_TXON 0x27
+#define AB5500_GPADC_AUTO_VBAT_MIN_NOTX 0x28
+#define AB5500_GPADC_AUTO_VBAT_AVGH 0x29
+#define AB5500_GPADC_AUTO_VBAT_AVGL 0x2A
+#define AB5500_GPADC_AUTO_ICHAR_MAX 0x2B
+#define AB5500_GPADC_AUTO_ICHAR_MIN 0x2C
+#define AB5500_GPADC_AUTO_ICHAR_AVG 0x2D
+#define AB5500_GPADC_AUTO_CTRL2 0x2F
+#define AB5500_GPADC_AUTO_CTRL1 0x30
+#define AB5500_GPADC_AUTO_PWR_CTRL 0x31
+#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON 0x32
+#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX 0x33
+#define AB5500_GPADC_AUTO_TRIG_ADOUT0_CTRL 0x34
+#define AB5500_GPADC_AUTO_TRIG_ADOUT1_CTRL 0x35
+#define AB5500_GPADC_AUTO_TRIG0_MUX_CTRL 0x37
+#define AB5500_GPADC_AUTO_XTALTEMP_CTRL 0x57
+#define AB5500_GPADC_KELVIN_CTRL 0xFE
+
+/* gpadc constants */
+#define AB5500_INT_ADC_TRIG0 0x0
+#define AB5500_INT_ADC_TRIG1 0x1
+#define AB5500_INT_ADC_TRIG2 0x2
+#define AB5500_INT_ADC_TRIG3 0x3
+#define AB5500_INT_ADC_TRIG4 0x4
+#define AB5500_INT_ADC_TRIG5 0x5
+#define AB5500_INT_ADC_TRIG6 0x6
+#define AB5500_INT_ADC_TRIG7 0x7
+
+#define AB5500_GPADC_AUTO_TRIG_INDEX AB5500_GPADC_AUTO_TRIG0_MUX_CTRL
+#define GPADC_MANUAL_READY 0x01
+#define GPADC_MANUAL_ADOUT0_MASK 0x30
+#define GPADC_MANUAL_ADOUT1_MASK 0xC0
+#define GPADC_MANUAL_ADOUT0_ON 0x10
+#define GPADC_MANUAL_ADOUT1_ON 0x40
+#define MUX_SCALE_GPADC0_MASK 0x08
+#define MUX_SCALE_VBAT_MASK 0x02
+#define MUX_SCALE_45 0x02
+#define MUX_SCALE_BDATA_MASK 0x01
+#define MUX_SCALE_BDATA27 0x00
+#define MUX_SCALE_BDATA18 0x01
+#define MUX_SCALE_ACCDET2_MASK 0x01
+#define MUX_SCALE_ACCDET3_MASK 0x02
+#define GPADC0_SCALE_VOL27 0x00
+#define GPADC0_SCALE_VOL18 0x01
+#define ACCDET2_SCALE_VOL27 0x00
+#define ACCDET3_SCALE_VOL27 0x00
+#define TRIGX_FREQ_MASK 0x07
+#define AUTO_VBAT_MASK 0x10
+#define AUTO_VBAT_ON 0x10
+#define TRIG_VBAT_TXON_ARM_MASK 0x08
+#define TRIG_VBAT_NOTX_ARM_MASK 0x04
+#define TRIGX_ARM_MASK 0x20
+#define TRIGX_ARM 0x20
+#define TRIGX_MUX_SELECT 0x1F
+#define ADC_CAL_OFF_MASK 0x04
+#define ADC_ON_MODE_MASK 0x03
+#define ADC_CAL_ON 0x00
+#define ADC_FULLPWR 0x03
+#define ADC_XTAL_FORCE_MASK 0x80
+#define ADC_XTAL_FORCE_EN 0x80
+#define ADC_XTAL_FORCE_DI 0x00
+#define ADOUT0 0x01
+#define ADOUT1 0x02
+#define MIN_INDEX 0x02
+#define MAX_INDEX 0x03
+#define CTRL_INDEX 0x01
+
+/* GPADC constants from AB5500 spec */
+#define GPADC0_MIN 0
+#define GPADC0_MAX 1800
+#define BTEMP_MIN 0
+#define BTEMP_MAX 1800
+#define BDATA_MIN 0
+#define BDATA_MAX 2750
+#define PCBTEMP_MIN 0
+#define PCBTEMP_MAX 1800
+#define XTALTEMP_MIN 0
+#define XTALTEMP_MAX 1800
+#define DIETEMP_MIN 0
+#define DIETEMP_MAX 1800
+#define VBUS_I_MIN 0
+#define VBUS_I_MAX 1600
+#define VBUS_V_MIN 0
+#define VBUS_V_MAX 20000
+#define ACCDET2_MIN 0
+#define ACCDET2_MAX 2500
+#define ACCDET3_MIN 0
+#define ACCDET3_MAX 2500
+#define VBAT_MIN 2300
+#define VBAT_MAX 4500
+#define BKBAT_MIN 0
+#define BKBAT_MAX 2750
+#define USBID_MIN 0
+#define USBID_MAX 1800
+#define KELVIN_MIN 0
+#define KELVIN_MAX 4500
+
+/* This is used for calibration */
+#define ADC_RESOLUTION 1023
+#define AUTO_ADC_RESOLUTION 255
+
+enum adc_auto_channels {
+ ADC_INPUT_TRIG0 = 0,
+ ADC_INPUT_TRIG1,
+ ADC_INPUT_TRIG2,
+ ADC_INPUT_TRIG3,
+ ADC_INPUT_TRIG4,
+ ADC_INPUT_TRIG5,
+ ADC_INPUT_TRIG6,
+ ADC_INPUT_TRIG7,
+ ADC_INPUT_VBAT_TXOFF,
+ ADC_INPUT_VBAT_TXON,
+ N_AUTO_TRIGGER
+};
+
+/**
+ * struct adc_auto_trigger - AB5500 GPADC auto trigger
+ * @adc_mux Mux input
+ * @flag Status of trigger
+ * @freq Frequency of conversion
+ * @adout Adout to pull
+ * @trig_min trigger minimum value
+ * @trig_max trigger maximum value
+ * @auto_adc_callback notification callback
+ */
+struct adc_auto_trigger {
+ u8 auto_mux;
+ u8 flag;
+ u8 freq;
+ u8 adout;
+ u8 trig_min;
+ u8 trig_max;
+ int (*auto_callb)(int mux);
+};
+
+/**
+ * struct ab5500_btemp_interrupts - ab5500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_adc_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+/**
+ * struct ab5500_gpadc - AB5500 GPADC device information
+ * @chip_id ABB chip id
+ * @dev: pointer to the struct device
+ * @node: a list of AB5500 GPADCs, hence prepared for
+ reentrance
+ * @ab5500_gpadc_complete: pointer to the struct completion, to indicate
+ * the completion of gpadc conversion
+ * @ab5500_gpadc_lock: structure of type mutex
+ * @regu: pointer to the struct regulator
+ * @irq: interrupt number that is used by gpadc
+ * @cal_data array of ADC calibration data structs
+ * @auto_trig auto trigger channel
+ * @gpadc_trigX_work work items for trigger channels
+ */
+struct ab5500_gpadc {
+ u8 chip_id;
+ struct device *dev;
+ struct list_head node;
+ struct mutex ab5500_gpadc_lock;
+ struct regulator *regu;
+ int irq;
+ int prev_bdata;
+ spinlock_t gpadc_auto_lock;
+ struct adc_auto_trigger adc_trig[N_AUTO_TRIGGER];
+ struct workqueue_struct *gpadc_wq;
+ struct work_struct gpadc_trig0_work;
+ struct work_struct gpadc_trig1_work;
+ struct work_struct gpadc_trig2_work;
+ struct work_struct gpadc_trig3_work;
+ struct work_struct gpadc_trig4_work;
+ struct work_struct gpadc_trig5_work;
+ struct work_struct gpadc_trig6_work;
+ struct work_struct gpadc_trig7_work;
+ struct work_struct gpadc_trig_vbat_txon_work;
+ struct work_struct gpadc_trig_vbat_txoff_work;
+};
+
+static LIST_HEAD(ab5500_gpadc_list);
+
+struct adc_data {
+ u8 mux;
+ int min;
+ int max;
+ int adout;
+};
+
+#define ADC_DATA(_id, _mux, _min, _max, _adout) \
+ [_id] = { \
+ .mux = _mux, \
+ .min = _min, \
+ .max = _max, \
+ .adout = _adout \
+ }
+
+struct adc_data adc_tab[] = {
+ ADC_DATA(GPADC0_V, 0x00, GPADC0_MIN, GPADC0_MAX, 0),
+ ADC_DATA(BTEMP_BALL, 0x0D, BTEMP_MIN, BTEMP_MAX, ADOUT0),
+ ADC_DATA(BAT_CTRL, 0x0D, BDATA_MIN, BDATA_MAX, 0),
+ ADC_DATA(MAIN_BAT_V, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TXON, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(VBUS_V, 0x10, VBUS_V_MIN, VBUS_V_MAX, 0),
+ ADC_DATA(USB_CHARGER_C, 0x0A, VBUS_I_MIN, VBUS_I_MAX, 0),
+ ADC_DATA(BK_BAT_V, 0x07, BKBAT_MIN, BKBAT_MAX, 0),
+ ADC_DATA(DIE_TEMP, 0x0F, DIETEMP_MIN, DIETEMP_MAX, ADOUT0),
+ ADC_DATA(PCB_TEMP, 0x13, PCBTEMP_MIN, PCBTEMP_MAX, ADOUT0),
+ ADC_DATA(XTAL_TEMP, 0x06, XTALTEMP_MIN, XTALTEMP_MAX, ADOUT0),
+ ADC_DATA(USB_ID, 0x1A, USBID_MIN, USBID_MAX, 0),
+ ADC_DATA(ACC_DETECT2, 0x18, ACCDET2_MIN, ACCDET2_MAX, 0),
+ ADC_DATA(ACC_DETECT3, 0x19, ACCDET3_MIN, ACCDET3_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TXON_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+};
+/**
+ * ab5500_gpadc_get() - returns a reference to the primary AB5500 GPADC
+ * (i.e. the first GPADC in the instance list)
+ */
+struct ab5500_gpadc *ab5500_gpadc_get(const char *name)
+{
+ struct ab5500_gpadc *gpadc;
+ list_for_each_entry(gpadc, &ab5500_gpadc_list, node) {
+ if (!strcmp(name, dev_name(gpadc->dev)))
+ return gpadc;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(ab5500_gpadc_get);
+
+#define CONV(min, max, x)\
+ ((min) + ((((max)-(min))*(x))/ADC_RESOLUTION))
+
+static int ab5500_gpadc_ad_to_voltage(struct ab5500_gpadc *gpadc,
+ u8 in, u16 ad_val)
+{
+ int res;
+
+ switch (in) {
+ case GPADC0_V:
+ case PCB_TEMP:
+ case BTEMP_BALL:
+ case MAIN_BAT_V:
+ case MAIN_BAT_V_TXON:
+ case ACC_DETECT2:
+ case ACC_DETECT3:
+ case VBUS_V:
+ case USB_CHARGER_C:
+ case BK_BAT_V:
+ case XTAL_TEMP:
+ case USB_ID:
+ case BAT_CTRL:
+ res = CONV(adc_tab[in].min, adc_tab[in].max, ad_val);
+ break;
+ case DIE_TEMP:
+ /*
+ * From the AB5500 product specification
+ * T(deg cel) = 27 - ((ADCode - 709)/2.4213)
+ * 27 + 709/2.4213 - ADCode/2.4213
+ * 320 - (ADCode/2.4213)
+ */
+ res = 320 - (((unsigned long)ad_val * 10000) / 24213);
+ break;
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel, not possible to convert\n");
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+/**
+ * ab5500_gpadc_convert() - gpadc conversion
+ * @input: analog input to be converted to digital data
+ *
+ * This function converts the selected analog i/p to digital
+ * data.
+ */
+int ab5500_gpadc_convert(struct ab5500_gpadc *gpadc, u8 input)
+{
+ int result, ret = -EINVAL;
+ u16 data = 0;
+ u8 looplimit = 0;
+ u8 status = 0;
+ u8 low_data, high_data, adout_mask, adout_val;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ mutex_lock(&gpadc->ab5500_gpadc_lock);
+
+ switch (input) {
+ case MAIN_BAT_V:
+ case MAIN_BAT_V_TXON:
+ /*
+ * The value of mux scale volatage depends
+ * on the type of battery
+ * for LI-ion use MUX_SCALE_35 => 2.3-3.5V
+ * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V
+ * Check type of battery from platform data TODO ???
+ */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to read status\n");
+ goto out;
+ }
+ break;
+ case BTEMP_BALL:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set mux scale\n");
+ goto out;
+ }
+ break;
+ case BAT_CTRL:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set mux scale\n");
+ goto out;
+ }
+ break;
+ case XTAL_TEMP:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL,
+ ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_EN);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set xtaltemp\n");
+ goto out;
+ }
+ break;
+ case GPADC0_V:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_GPADC0_MASK, GPADC0_SCALE_VOL18);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set gpadc0\n");
+ goto out;
+ }
+ break;
+ case ACC_DETECT2:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2,
+ MUX_SCALE_ACCDET2_MASK, ACCDET2_SCALE_VOL27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set accdet2\n");
+ goto out;
+ }
+ break;
+ case ACC_DETECT3:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2,
+ MUX_SCALE_ACCDET3_MASK, ACCDET3_SCALE_VOL27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set accdet3\n");
+ goto out;
+ }
+ break;
+ case USB_CHARGER_C:
+ case VBUS_V:
+ case BK_BAT_V:
+ case USB_ID:
+ case PCB_TEMP:
+ case DIE_TEMP:
+ break;
+ default:
+ dev_err(gpadc->dev, "gpadc: Wrong adc\n");
+ goto out;
+ break;
+ }
+ if (adc_tab[input].adout) {
+ adout_mask = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK;
+ adout_val = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_ON : GPADC_MANUAL_ADOUT1_ON;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ adout_mask, adout_val);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set ADOUT\n");
+ goto out;
+ }
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANUAL_MUX_CTRL, adc_tab[input].mux);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to trigger manual conv\n");
+ goto out;
+ }
+ /* wait for completion of conversion */
+ looplimit = 0;
+ do {
+ msleep(1);
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_STAT_REG,
+ &status);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to read status\n");
+ goto out;
+ }
+ if (status & GPADC_MANUAL_READY)
+ break;
+ } while (++looplimit < 2);
+ if (looplimit >= 2) {
+ dev_err(gpadc->dev, "timeout:failed to complete conversion\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Disable ADOUT for measurement
+ */
+ if (adc_tab[input].adout) {
+ adout_mask = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ adout_mask, 0x0);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to disable ADOUT\n");
+ goto out;
+ }
+ }
+ /*
+ * Disable XTAL TEMP
+ */
+ if (input == XTAL_TEMP) {
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL,
+ ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_DI);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to disable xtaltemp\n");
+ goto out;
+ }
+ }
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANDATAL_REG, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANDATAH_REG, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: read high data failed\n");
+ goto out;
+ }
+
+ data = (high_data << 2) | (low_data >> 6);
+ if (input == BAT_CTRL || input == BTEMP_BALL) {
+ /*
+ * TODO: Re-check with h/w team
+ * discard null or value < 5, as there is some error
+ * in conversion
+ */
+ if (data < 5)
+ data = gpadc->prev_bdata;
+ else
+ gpadc->prev_bdata = data;
+ }
+ result = ab5500_gpadc_ad_to_voltage(gpadc, input, data);
+
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ return result;
+
+out:
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ dev_err(gpadc->dev,
+ "gpadc: Failed to AD convert channel %d\n", input);
+ return ret;
+}
+EXPORT_SYMBOL(ab5500_gpadc_convert);
+
+/**
+ * ab5500_gpadc_program_auto() - gpadc conversion auto conversion
+ * @trig_index: Generic trigger channel for conversion
+ *
+ * This function program the auto trigger channel
+ */
+static int ab5500_gpadc_program_auto(struct ab5500_gpadc *gpadc, int trig)
+{
+ int ret;
+ u8 adout;
+#define MIN_INDEX 0x02
+#define MAX_INDEX 0x03
+#define CTRL_INDEX 0x01
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MIN_INDEX,
+ gpadc->adc_trig[trig].trig_min);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program min\n");
+ return ret;
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MAX_INDEX,
+ gpadc->adc_trig[trig].trig_max);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program max\n");
+ return ret;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2),
+ TRIGX_MUX_SELECT, gpadc->adc_trig[trig].auto_mux);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to select mux\n");
+ return ret;
+ }
+ if (gpadc->adc_trig[trig].adout) {
+ adout = gpadc->adc_trig[trig].adout == ADOUT0 ?
+ gpadc->adc_trig[trig].adout << 6 :
+ gpadc->adc_trig[trig].adout << 5;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX,
+ adout, adout);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program adout\n");
+ return ret;
+ }
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX,
+ TRIGX_FREQ_MASK, gpadc->adc_trig[trig].freq);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program freq\n");
+ return ret;
+ }
+ return ret;
+
+}
+
+#define TRIG_V(trigval, min, max) \
+ ((((trigval) - (min)) * AUTO_ADC_RESOLUTION) / ((max) - (min)))
+
+static int ab5500_gpadc_vbat_auto_conf(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *in)
+{
+ int trig_min, ret;
+ u8 trig_reg, trig_arm;
+
+ /* Scale mux voltage */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to set vbat scale\n");
+ return ret;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_CTRL1,
+ AUTO_VBAT_MASK, AUTO_VBAT_ON);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to set vbat on\n");
+ return ret;
+ }
+
+ trig_min = TRIG_V(in->min, adc_tab[in->mux].min, adc_tab[in->mux].max);
+
+ if (in->mux == MAIN_BAT_V_TRIG_MIN) {
+ trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX;
+ trig_arm = TRIG_VBAT_NOTX_ARM_MASK;
+ } else {
+ trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON;
+ trig_arm = TRIG_VBAT_TXON_ARM_MASK;
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ trig_reg, trig_min);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program vbat min\n");
+ return ret;
+ }
+ /*
+ * arm the trigger
+ */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL1, trig_arm, trig_arm);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to trig vbat\n");
+ return ret;
+ }
+ return ret;
+}
+/**
+ * ab5500_gpadc_convert_auto() - gpadc conversion
+ * @auto_input: input trigger for conversion
+ *
+ * This function converts the selected channel from
+ * analog to digital data in auto mode
+ */
+
+int ab5500_gpadc_convert_auto(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *in)
+{
+ int ret, trig;
+ unsigned long flags;
+
+ if (!gpadc)
+ return -ENODEV;
+ mutex_lock(&gpadc->ab5500_gpadc_lock);
+
+ if (in->mux == MAIN_BAT_V_TXON_TRIG_MIN) {
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ if (gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag == true) {
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: Auto vbat txon busy");
+ goto out;
+ }
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+
+ ret = ab5500_gpadc_vbat_auto_conf(gpadc, in);
+ if (ret < 0)
+ goto out;
+
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_mux = in->mux;
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_callb =
+ in->auto_adc_callback;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ } else if (in->mux == MAIN_BAT_V_TRIG_MIN) {
+
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ if (gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag == true) {
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: Auto vbat busy");
+ goto out;
+ }
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+
+ ret = ab5500_gpadc_vbat_auto_conf(gpadc, in);
+ if (ret < 0)
+ goto out;
+
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_mux = in->mux;
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_callb =
+ in->auto_adc_callback;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ } else {
+ /*
+ * check if free trigger is available
+ */
+ trig = ADC_INPUT_TRIG0;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ while (gpadc->adc_trig[trig].flag == true &&
+ trig <= ADC_INPUT_TRIG7)
+ trig++;
+
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ if (trig > ADC_INPUT_TRIG7) {
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: no free channel\n");
+ goto out;
+ }
+ switch (in->mux) {
+ case BTEMP_BALL:
+ case MAIN_BAT_V:
+ /*
+ * The value of mux scale volatage depends
+ * on the type of battery
+ * for LI-ion use MUX_SCALE_35 => 2.3-3.5V
+ * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V
+ * Check type of battery from platform data TODO ???
+ */
+ ret = abx500_mask_and_set_register_interruptible(
+ gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: failed to read status\n");
+ goto out;
+ }
+ case ACC_DETECT2:
+ case ACC_DETECT3:
+ case VBUS_V:
+ case USB_CHARGER_C:
+ case BK_BAT_V:
+ case PCB_TEMP:
+ case USB_ID:
+ case BAT_CTRL:
+ gpadc->adc_trig[trig].trig_min =
+ (u8)TRIG_V(in->min, adc_tab[in->mux].min,
+ adc_tab[in->mux].max);
+ gpadc->adc_trig[trig].trig_max =
+ (u8)TRIG_V(in->max, adc_tab[in->mux].min,
+ adc_tab[in->mux].max);
+ gpadc->adc_trig[trig].adout =
+ adc_tab[in->mux].adout;
+ break;
+ case DIE_TEMP:
+ /*
+ * From the AB5500 product specification
+ * T(deg_cel) = 27 - (ADCode - 709)/2.4213)
+ * ADCode = 709 + (2.4213 * (27 - T))
+ * Auto trigger min/max level is of 8bit precision.
+ * Hence use AB5500_GPADC_MANDATAH_REG value
+ * obtained by 2 bit right shift of ADCode.
+ */
+ gpadc->adc_trig[trig].trig_min =
+ (709 + ((24213 * (27 - in->min))/10000))>>2;
+ gpadc->adc_trig[trig].trig_max =
+ (709 + ((24213 * (27 - in->max))/10000))>>2;
+ gpadc->adc_trig[trig].adout =
+ adc_tab[in->mux].adout;
+ break;
+ default:
+ dev_err(gpadc->dev, "Unknow GPADC request\n");
+ break;
+ }
+ gpadc->adc_trig[trig].freq = in->freq;
+ gpadc->adc_trig[trig].auto_mux =
+ adc_tab[in->mux].mux;
+ gpadc->adc_trig[trig].auto_callb = in->auto_adc_callback;
+
+ ret = ab5500_gpadc_program_auto(gpadc, trig);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to program auto ch\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig * 4),
+ TRIGX_ARM_MASK, TRIGX_ARM);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to trigger\n");
+ goto out;
+ }
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[trig].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ }
+out:
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ return ret;
+
+}
+EXPORT_SYMBOL(ab5500_gpadc_convert_auto);
+
+/* sysfs interface for GPADC0 */
+static ssize_t ab5500_gpadc0_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int voltage;
+ struct ab5500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ voltage = ab5500_gpadc_convert(gpadc, GPADC0_V);
+
+ return sprintf(buf, "%d\n", voltage);
+}
+static DEVICE_ATTR(adc0volt, 0644, ab5500_gpadc0_get, NULL);
+
+static void ab5500_gpadc_trigx_work(struct ab5500_gpadc *gp, int trig)
+{
+ unsigned long flags;
+ if (gp->adc_trig[trig].auto_callb != NULL) {
+ gp->adc_trig[trig].auto_callb(gp->adc_trig[trig].auto_mux);
+ spin_lock_irqsave(&gp->gpadc_auto_lock, flags);
+ gp->adc_trig[trig].flag = false;
+ spin_unlock_irqrestore(&gp->gpadc_auto_lock, flags);
+ } else {
+ dev_err(gp->dev, "Unknown trig for %d\n", trig);
+ }
+}
+/**
+ * ab5500_gpadc_trig0_work() - work item for trig0 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 0 auto conversion.
+ */
+static void ab5500_gpadc_trig0_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig0_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG0);
+}
+
+/**
+ * ab5500_gpadc_trig1_work() - work item for trig1 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig1 auto conversion.
+ */
+static void ab5500_gpadc_trig1_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig1_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG1);
+}
+
+/**
+ * ab5500_gpadc_trig2_work() - work item for trig2 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 2 auto conversion.
+ */
+static void ab5500_gpadc_trig2_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig2_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG2);
+}
+
+/**
+ * ab5500_gpadc_trig3_work() - work item for trig3 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 3 auto conversion.
+ */
+static void ab5500_gpadc_trig3_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig3_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG3);
+}
+
+/**
+ * ab5500_gpadc_trig4_work() - work item for trig4 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 4 auto conversion.
+ */
+static void ab5500_gpadc_trig4_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig4_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG4);
+}
+
+/**
+ * ab5500_gpadc_trig5_work() - work item for trig5 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 5 auto conversion.
+ */
+static void ab5500_gpadc_trig5_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig5_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG5);
+}
+
+/**
+ * ab5500_gpadc_trig6_work() - work item for trig6 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 6 auto conversion.
+ */
+static void ab5500_gpadc_trig6_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig6_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG6);
+}
+
+/**
+ * ab5500_gpadc_trig7_work() - work item for trig7 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 7 auto conversion.
+ */
+static void ab5500_gpadc_trig7_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig7_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG7);
+}
+
+/**
+ * ab5500_gpadc_vbat_txon_work() - work item for vbat_txon trigger auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for vbat_txon trigger auto adc.
+ */
+static void ab5500_gpadc_vbat_txon_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig_vbat_txon_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXON);
+}
+
+/**
+ * ab5500_gpadc_vbat_txoff_work() - work item for vbat_txoff trigger auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for vbat_txoff trigger auto adc.
+ */
+static void ab5500_gpadc_vbat_txoff_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig_vbat_txoff_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXOFF);
+}
+
+/**
+ * ab5500_adc_trigx_handler() - isr for auto gpadc conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto gpadc conversion.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_trigx_handler(int irq, void *_gpadc)
+{
+ struct ab5500_platform_data *plat;
+ struct ab5500_gpadc *gpadc = _gpadc;
+ int dev_irq;
+
+ plat = dev_get_platdata(gpadc->dev->parent);
+ dev_irq = irq - plat->irq.base;
+
+ switch (dev_irq) {
+ case AB5500_INT_ADC_TRIG0:
+ dev_dbg(gpadc->dev, "Trigger 0 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig0_work);
+ break;
+ case AB5500_INT_ADC_TRIG1:
+ dev_dbg(gpadc->dev, "Trigger 1 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig1_work);
+ break;
+ case AB5500_INT_ADC_TRIG2:
+ dev_dbg(gpadc->dev, "Trigger 2 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig2_work);
+ break;
+ case AB5500_INT_ADC_TRIG3:
+ dev_dbg(gpadc->dev, "Trigger 3 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig3_work);
+ break;
+ case AB5500_INT_ADC_TRIG4:
+ dev_dbg(gpadc->dev, "Trigger 4 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig4_work);
+ break;
+ case AB5500_INT_ADC_TRIG5:
+ dev_dbg(gpadc->dev, "Trigger 5 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig5_work);
+ break;
+ case AB5500_INT_ADC_TRIG6:
+ dev_dbg(gpadc->dev, "Trigger 6 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig6_work);
+ break;
+ case AB5500_INT_ADC_TRIG7:
+ dev_dbg(gpadc->dev, "Trigger 7 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig7_work);
+ break;
+ default:
+ dev_dbg(gpadc->dev, "unknown trigx handler input\n");
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_adc_vbat_txon_handler() - isr for auto vbat_txon conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto vbat_txon conversion
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_vbat_txon_handler(int irq, void *_gpadc)
+{
+ struct ab5500_gpadc *gpadc = _gpadc;
+
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txon_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_adc_vbat_txoff_handler() - isr for auto vbat_txoff conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto vbat_txoff conversion
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_vbat_txoff_handler(int irq, void *_gpadc)
+{
+ struct ab5500_gpadc *gpadc = _gpadc;
+
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txoff_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_gpadc_configuration() - function for gpadc conversion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This function configures the gpadc
+ */
+static int ab5500_gpadc_configuration(struct ab5500_gpadc *gpadc)
+{
+ int ret;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL2,
+ ADC_CAL_OFF_MASK | ADC_ON_MODE_MASK,
+ ADC_CAL_ON | ADC_FULLPWR);
+ return ret;
+}
+
+/* ab5500 btemp driver interrupts and their respective isr */
+static struct ab5500_adc_interrupts ab5500_adc_irq[] = {
+ {"TRIGGER-0", ab5500_adc_trigx_handler},
+ {"TRIGGER-1", ab5500_adc_trigx_handler},
+ {"TRIGGER-2", ab5500_adc_trigx_handler},
+ {"TRIGGER-3", ab5500_adc_trigx_handler},
+ {"TRIGGER-4", ab5500_adc_trigx_handler},
+ {"TRIGGER-5", ab5500_adc_trigx_handler},
+ {"TRIGGER-6", ab5500_adc_trigx_handler},
+ {"TRIGGER-7", ab5500_adc_trigx_handler},
+ {"TRIGGER-VBAT-TXON", ab5500_adc_vbat_txon_handler},
+ {"TRIGGER-VBAT", ab5500_adc_vbat_txoff_handler},
+};
+
+static int __devinit ab5500_gpadc_probe(struct platform_device *pdev)
+{
+ int ret, irq, i, j;
+ struct ab5500_gpadc *gpadc;
+
+ gpadc = kzalloc(sizeof(struct ab5500_gpadc), GFP_KERNEL);
+ if (!gpadc) {
+ dev_err(&pdev->dev, "Error: No memory\n");
+ return -ENOMEM;
+ }
+ gpadc->dev = &pdev->dev;
+ mutex_init(&gpadc->ab5500_gpadc_lock);
+ spin_lock_init(&gpadc->gpadc_auto_lock);
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_adc_irq[i].isr,
+ IRQF_NO_SUSPEND,
+ ab5500_adc_irq[i].name, gpadc);
+
+ if (ret) {
+ dev_err(gpadc->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_adc_irq[i].name, irq, ret);
+ goto fail_irq;
+ }
+ dev_dbg(gpadc->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_adc_irq[i].name, irq, ret);
+ }
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(gpadc->dev);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "failed to get chip ID\n");
+ goto fail_irq;
+ }
+ gpadc->chip_id = (u8) ret;
+
+ /* Create a work queue for gpadc auto */
+ gpadc->gpadc_wq =
+ create_singlethread_workqueue("ab5500_gpadc_wq");
+ if (gpadc->gpadc_wq == NULL) {
+ dev_err(gpadc->dev, "failed to create work queue\n");
+ goto fail_irq;
+ }
+
+ INIT_WORK(&gpadc->gpadc_trig0_work, ab5500_gpadc_trig0_work);
+ INIT_WORK(&gpadc->gpadc_trig1_work, ab5500_gpadc_trig1_work);
+ INIT_WORK(&gpadc->gpadc_trig2_work, ab5500_gpadc_trig2_work);
+ INIT_WORK(&gpadc->gpadc_trig3_work, ab5500_gpadc_trig3_work);
+ INIT_WORK(&gpadc->gpadc_trig4_work, ab5500_gpadc_trig4_work);
+ INIT_WORK(&gpadc->gpadc_trig5_work, ab5500_gpadc_trig5_work);
+ INIT_WORK(&gpadc->gpadc_trig6_work, ab5500_gpadc_trig6_work);
+ INIT_WORK(&gpadc->gpadc_trig7_work, ab5500_gpadc_trig7_work);
+ INIT_WORK(&gpadc->gpadc_trig_vbat_txon_work,
+ ab5500_gpadc_vbat_txon_work);
+ INIT_WORK(&gpadc->gpadc_trig_vbat_txoff_work,
+ ab5500_gpadc_vbat_txoff_work);
+
+ for (j = 0; j < N_AUTO_TRIGGER; j++)
+ gpadc->adc_trig[j].flag = false;
+
+ ret = ab5500_gpadc_configuration(gpadc);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: configuration failed\n");
+ goto free_wq;
+ }
+
+ ret = device_create_file(gpadc->dev, &dev_attr_adc0volt);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "File device creation failed: %d\n", ret);
+ ret = -ENODEV;
+ goto fail_sysfs;
+ }
+ list_add_tail(&gpadc->node, &ab5500_gpadc_list);
+
+ platform_set_drvdata(pdev, gpadc);
+
+ return 0;
+fail_sysfs:
+free_wq:
+ destroy_workqueue(gpadc->gpadc_wq);
+fail_irq:
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ free_irq(irq, gpadc);
+ }
+ kfree(gpadc);
+ gpadc = NULL;
+ return ret;
+}
+
+static int __devexit ab5500_gpadc_remove(struct platform_device *pdev)
+{
+ int i, irq;
+ struct ab5500_gpadc *gpadc = platform_get_drvdata(pdev);
+
+ device_remove_file(gpadc->dev, &dev_attr_adc0volt);
+
+ /* remove this gpadc entry from the list */
+ list_del(&gpadc->node);
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ free_irq(irq, gpadc);
+ }
+ /* Flush work */
+ flush_workqueue(gpadc->gpadc_wq);
+
+ /* Delete the work queue */
+ destroy_workqueue(gpadc->gpadc_wq);
+
+ kfree(gpadc);
+ gpadc = NULL;
+ return 0;
+}
+
+static struct platform_driver ab5500_gpadc_driver = {
+ .probe = ab5500_gpadc_probe,
+ .remove = __devexit_p(ab5500_gpadc_remove),
+ .driver = {
+ .name = "ab5500-adc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_gpadc_init(void)
+{
+ return platform_driver_register(&ab5500_gpadc_driver);
+}
+
+static void __exit ab5500_gpadc_exit(void)
+{
+ platform_driver_unregister(&ab5500_gpadc_driver);
+}
+
+subsys_initcall_sync(ab5500_gpadc_init);
+module_exit(ab5500_gpadc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vijaya Kumar K");
+MODULE_ALIAS("platform:ab5500_adc");
+MODULE_DESCRIPTION("AB5500 GPADC driver");
diff --git a/drivers/mfd/ab5500-power.c b/drivers/mfd/ab5500-power.c
new file mode 100644
index 00000000000..9474c32809b
--- /dev/null
+++ b/drivers/mfd/ab5500-power.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+static struct device *dev;
+
+/* STARTUP */
+#define AB5500_SYSPOR_CONTROL 0x30
+
+/* VINT IO I2C CLOCK */
+#define AB5500_RTC_VINT 0x01
+
+int ab5500_clock_rtc_enable(int num, bool enable)
+{
+ /* RTC_CLK{0,1,2} are bits {4,3,2}, active low */
+ u8 mask = BIT(4 - num);
+ u8 value = enable ? 0 : mask;
+
+ /* Don't allow RTC_CLK0 to be controlled. */
+ if (num < 1 || num > 2)
+ return -EINVAL;
+
+ if (!dev)
+ return -EAGAIN;
+
+ return abx500_mask_and_set(dev, AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ AB5500_RTC_VINT, mask, value);
+}
+
+static void ab5500_power_off(void)
+{
+ sigset_t old;
+ sigset_t all;
+
+ sigfillset(&all);
+
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ /* Clear dbb_on */
+ int ret = abx500_set(dev, AB5500_BANK_STARTUP,
+ AB5500_SYSPOR_CONTROL, 0);
+ WARN_ON(ret);
+ }
+}
+
+static int __devinit ab5500_power_probe(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent);
+
+ dev = &pdev->dev;
+
+ if (plat->pm_power_off)
+ pm_power_off = ab5500_power_off;
+
+ return 0;
+}
+
+static int __devexit ab5500_power_remove(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent);
+
+ if (plat->pm_power_off)
+ pm_power_off = NULL;
+ dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver ab5500_power_driver = {
+ .driver = {
+ .name = "ab5500-power",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_power_probe,
+ .remove = __devexit_p(ab5500_power_remove),
+};
+
+static int __init ab8500_sysctrl_init(void)
+{
+ return platform_driver_register(&ab5500_power_driver);
+}
+
+subsys_initcall(ab8500_sysctrl_init);
+
+MODULE_DESCRIPTION("AB5500 power driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d295941c9a3..2537e7461f1 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -94,6 +94,9 @@
#define AB8500_TURN_ON_STATUS 0x00
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
* numbers are indexed into this array with (num / 8).
@@ -684,7 +687,7 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
static struct resource __devinitdata ab8500_temp_resources[] = {
{
- .name = "AB8500_TEMP_WARM",
+ .name = "ABX500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
.end = AB8500_INT_TEMP_WARM,
.flags = IORESOURCE_IRQ,
@@ -706,6 +709,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.name = "ab8500-regulator",
},
{
+ .name = "ab8500-regulator-debug",
+ },
+ {
.name = "ab8500-gpio",
.num_resources = ARRAY_SIZE(ab8500_gpio_resources),
.resources = ab8500_gpio_resources,
@@ -721,26 +727,6 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-charger",
- .num_resources = ARRAY_SIZE(ab8500_charger_resources),
- .resources = ab8500_charger_resources,
- },
- {
- .name = "ab8500-btemp",
- .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
- .resources = ab8500_btemp_resources,
- },
- {
- .name = "ab8500-fg",
- .num_resources = ARRAY_SIZE(ab8500_fg_resources),
- .resources = ab8500_fg_resources,
- },
- {
- .name = "ab8500-chargalg",
- .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
- .resources = ab8500_chargalg_resources,
- },
- {
.name = "ab8500-acc-det",
.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
.resources = ab8500_av_acc_detect_resources,
@@ -775,12 +761,35 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.name = "ab8500-denc",
},
{
- .name = "ab8500-temp",
+ .name = "abx500-temp",
.num_resources = ARRAY_SIZE(ab8500_temp_resources),
.resources = ab8500_temp_resources,
},
};
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+ {
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+};
+
static ssize_t show_chip_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -946,9 +955,19 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
ARRAY_SIZE(ab8500_devs), NULL,
ab8500->irq_base);
+
if (ret)
goto out_freeirq;
+ if (!no_bm) {
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
+
ret = sysfs_create_group(&ab8500->dev->kobj, &ab8500_attr_group);
if (ret)
dev_err(ab8500->dev, "error creating sysfs entries\n");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211aa889..9521d738fd0 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -4,6 +4,72 @@
* Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
* License Terms: GNU General Public License v2
*/
+/*
+ * AB8500 register access
+ * ======================
+ *
+ * read:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # cat <debugfs>/ab8500/register-value
+ *
+ * write:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # echo VALUE > <debugfs>/ab8500/register-value
+ *
+ * read all registers from a bank:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # cat <debugfs>/ab8500/all-bank-register
+ *
+ * BANK target AB8500 register bank
+ * ADDR target AB8500 register address
+ * VALUE decimal or 0x-prefixed hexadecimal
+ *
+ *
+ * User Space notification on AB8500 IRQ
+ * =====================================
+ *
+ * Allows user space entity to be notified when target AB8500 IRQ occurs.
+ * When subscribed, a sysfs entry is created in ab8500.i2c platform device.
+ * One can pool this file to get target IRQ occurence information.
+ *
+ * subscribe to an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-subscribe
+ *
+ * unsubscribe from an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-unsubscribe
+ *
+ *
+ * AB8500 register formated read/write access
+ * ==========================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] BANK ADRESS [VALUE]" > $debugfs/ab8500/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * BANK target reg bank
+ * ADDRESS target reg address
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ */
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -11,13 +77,28 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/string.h>
+#include <linux/ctype.h>
+#endif
static u32 debug_bank;
static u32 debug_address;
+static int irq_first;
+static int irq_last;
+static u32 irq_count[AB8500_NR_IRQS];
+
+static struct device_attribute *dev_attr[AB8500_NR_IRQS];
+static char *event_name[AB8500_NR_IRQS];
+
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -42,15 +123,35 @@ struct ab8500_i2c_ranges {
const struct ab8500_reg_range *range;
};
+/* hwreg- "mask" and "shift" entries ressources */
+struct hwreg_cfg {
+ u32 bank; /* target bank */
+ u32 addr; /* target address */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+/* fmt bit #0: 0=hexa, 1=dec */
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1)
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c))
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_NUM_BANKS 22
+#define AB8500_ADC_NAME_STRING "gpadc"
+#define AB8500_NUM_BANKS 24
#define AB8500_REV_REG 0x80
static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
- .range = 0,
+ .range = NULL,
},
[AB8500_SYS_CTRL1_BLOCK] = {
.num_ranges = 3,
@@ -215,7 +316,7 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
[AB8500_CHARGER] = {
- .num_ranges = 8,
+ .num_ranges = 9,
.range = (struct ab8500_reg_range[]) {
{
.first = 0x00,
@@ -249,6 +350,10 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
.first = 0xC0,
.last = 0xC2,
},
+ {
+ .first = 0xf5,
+ .last = 0xf6,
+ },
},
},
[AB8500_GAS_GAUGE] = {
@@ -268,6 +373,24 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
+ [AB8500_DEVELOPMENT] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ },
+ },
+ [AB8500_DEBUG] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x05,
+ .last = 0x07,
+ },
+ },
+ },
[AB8500_AUDIO] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -354,6 +477,24 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
};
+static irqreturn_t ab8500_debug_handler(int irq, void *data)
+{
+ char buf[16];
+ struct kobject *kobj = (struct kobject *)data;
+ unsigned int irq_abb = irq - irq_first;
+
+ if (irq_abb < AB8500_NR_IRQS)
+ irq_count[irq_abb]++;
+ /*
+ * This makes it possible to use poll for events (POLLPRI | POLLERR)
+ * from userspace on sysfs file named <irq-nr>
+ */
+ sprintf(buf, "%d", irq);
+ sysfs_notify(kobj, NULL, buf);
+
+ return IRQ_HANDLED;
+}
+
static int ab8500_registers_print(struct seq_file *s, void *p)
{
struct device *dev = s->private;
@@ -515,10 +656,732 @@ static ssize_t ab8500_val_write(struct file *file,
printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
return -EINVAL;
}
+ return count;
+}
+
+/*
+ * - HWREG DB8500 formated routines
+ */
+static int ab8500_hwreg_print(struct seq_file *s, void *d)
+{
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)hwreg_cfg.bank, (u8)hwreg_cfg.addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (hwreg_cfg.shift >= 0)
+ regvalue >>= hwreg_cfg.shift;
+ else
+ regvalue <<= -hwreg_cfg.shift;
+ regvalue &= hwreg_cfg.mask;
+
+ if (REG_FMT_DEC(&hwreg_cfg))
+ seq_printf(s, "%d\n", regvalue);
+ else
+ seq_printf(s, "0x%02X\n", regvalue);
+ return 0;
+}
+
+static int ab8500_hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_hwreg_print, inode->i_private);
+}
+
+static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+{
+ int bat_ctrl_raw;
+ int bat_ctrl_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL);
+ bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BAT_CTRL, bat_ctrl_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bat_ctrl_convert, bat_ctrl_raw);
+}
+
+static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
+ .open = ab8500_gpadc_bat_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+{
+ int btemp_ball_raw;
+ int btemp_ball_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL);
+ btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
+ btemp_ball_raw);
+
+ return seq_printf(s,
+ "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+}
+
+static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
+ .open = ab8500_gpadc_btemp_ball_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+{
+ int main_charger_v_raw;
+ int main_charger_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V);
+ main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_V, main_charger_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_v_convert, main_charger_v_raw);
+}
+
+static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_v_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
+ .open = ab8500_gpadc_main_charger_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+{
+ int acc_detect1_raw;
+ int acc_detect1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1);
+ acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
+ acc_detect1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect1_convert, acc_detect1_raw);
+}
+
+static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect1_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
+ .open = ab8500_gpadc_acc_detect1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+{
+ int acc_detect2_raw;
+ int acc_detect2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2);
+ acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ ACC_DETECT2, acc_detect2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect2_convert, acc_detect2_raw);
+}
+
+static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect2_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
+ .open = ab8500_gpadc_acc_detect2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+{
+ int aux1_raw;
+ int aux1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1);
+ aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
+ aux1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux1_convert, aux1_raw);
+}
+
+static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux1_fops = {
+ .open = ab8500_gpadc_aux1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+{
+ int aux2_raw;
+ int aux2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2);
+ aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
+ aux2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux2_convert, aux2_raw);
+}
+
+static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux2_fops = {
+ .open = ab8500_gpadc_aux2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+{
+ int main_bat_v_raw;
+ int main_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V);
+ main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
+ main_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_bat_v_convert, main_bat_v_raw);
+}
+
+static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
+ .open = ab8500_gpadc_main_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+{
+ int vbus_v_raw;
+ int vbus_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V);
+ vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
+ vbus_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ vbus_v_convert, vbus_v_raw);
+}
+
+static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_vbus_v_fops = {
+ .open = ab8500_gpadc_vbus_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+{
+ int main_charger_c_raw;
+ int main_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C);
+ main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_C, main_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_c_convert, main_charger_c_raw);
+}
+
+static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
+ .open = ab8500_gpadc_main_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+{
+ int usb_charger_c_raw;
+ int usb_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C);
+ usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ USB_CHARGER_C, usb_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ usb_charger_c_convert, usb_charger_c_raw);
+}
+
+static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_usb_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
+ .open = ab8500_gpadc_usb_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+{
+ int bk_bat_v_raw;
+ int bk_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V);
+ bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BK_BAT_V, bk_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bk_bat_v_convert, bk_bat_v_raw);
+}
+
+static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
+ .open = ab8500_gpadc_bk_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+{
+ int die_temp_raw;
+ int die_temp_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get();
+ die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP);
+ die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
+ die_temp_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ die_temp_convert, die_temp_raw);
+}
+
+static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_die_temp_fops = {
+ .open = ab8500_gpadc_die_temp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a
+ * numerical value.
+ * string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
+ s += 2;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s == '-')
+ s++;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
+ struct device *dev)
+{
+ uint write, val = 0;
+ struct hwreg_cfg loc = {
+ .bank = 0, /* default: invalid phys addr */
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if (!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else
+ return -EINVAL;
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while ((*b == ' ') || (*b == '-')) {
+ if (*(b-1) != ' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) ||
+ (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) ||
+ (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) ||
+ (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2) == ' ') ? 3 : 6;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) ||
+ (!strncmp(b, "-shift ", 7))) {
+ b += (*(b+2) == ' ') ? 3 : 7;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg BANK and ADDRESS */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.bank = simple_strtoul(b, &b, 0);
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+
+ if (write) {
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef ABB_HWREG_DEBUG
+ pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d"
+ "value=0x%X\n", (write) ? "write" : "read",
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (write) {
+ u8 regvalue;
+ int ret = abx500_get_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (cfg->shift >= 0) {
+ regvalue &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ regvalue &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | regvalue;
+
+ ret = abx500_set_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, (u8)val);
+ if (ret < 0) {
+ pr_err("abx500_set_reg failed %d, %d", ret, __LINE__);
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
+static ssize_t ab8500_hwreg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg, dev);
+ return (ret) ? ret : buf_size;
+}
+
+/*
+ * - irq subscribe/unsubscribe stuff
+ */
+static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", irq_first);
+
+ return 0;
+}
+
+static int ab8500_subscribe_unsubscribe_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_subscribe_unsubscribe_print,
+ inode->i_private);
+}
+
+/*
+ * Userspace should use poll() on this file. When an event occur
+ * the blocking poll will be released.
+ */
+static ssize_t show_irq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long name;
+ unsigned int irq_index;
+ int err;
+
+ err = strict_strtoul(attr->attr.name, 0, &name);
+ if (err)
+ return err;
+
+ irq_index = name - irq_first;
+ if (irq_index >= AB8500_NR_IRQS)
+ return -EINVAL;
+ else
+ return sprintf(buf, "%u\n", irq_count[irq_index]);
+}
+
+static ssize_t ab8500_subscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= AB8500_NR_IRQS)
+ return -EINVAL;
+
+ /*
+ * This will create a sysfs file named <irq-nr> which userspace can
+ * use to select or poll and get the AB8500 events
+ */
+ dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
+ GFP_KERNEL);
+ event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL);
+ sprintf(event_name[irq_index], "%lu", user_val);
+ dev_attr[irq_index]->show = show_irq;
+ dev_attr[irq_index]->store = NULL;
+ dev_attr[irq_index]->attr.name = event_name[irq_index];
+ dev_attr[irq_index]->attr.mode = S_IRUGO;
+ err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed %d\n", err);
+ return err;
+ }
+
+ err = request_threaded_irq(user_val, NULL, ab8500_debug_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND, "ab8500-debug", &dev->kobj);
+ if (err < 0) {
+ printk(KERN_ERR "request_threaded_irq failed %d, %lu\n",
+ err, user_val);
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ return err;
+ }
+
+ return buf_size;
+}
+
+static ssize_t ab8500_unsubscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= AB8500_NR_IRQS)
+ return -EINVAL;
+
+ /* Set irq count to 0 when unsubscribe */
+ irq_count[irq_index] = 0;
+
+ if (dev_attr[irq_index])
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+
+
+ free_irq(user_val, &dev->kobj);
+ kfree(event_name[irq_index]);
+ kfree(dev_attr[irq_index]);
return count;
}
+/*
+ * - several deubgfs nodes fops
+ */
+
static const struct file_operations ab8500_bank_fops = {
.open = ab8500_bank_open,
.write = ab8500_bank_write,
@@ -546,65 +1409,177 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations ab8500_subscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_subscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_unsubscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_unsubscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_hwreg_fops = {
+ .open = ab8500_hwreg_open,
+ .write = ab8500_hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static struct dentry *ab8500_dir;
-static struct dentry *ab8500_reg_file;
-static struct dentry *ab8500_bank_file;
-static struct dentry *ab8500_address_file;
-static struct dentry *ab8500_val_file;
+static struct dentry *ab8500_gpadc_dir;
static int __devinit ab8500_debug_probe(struct platform_device *plf)
{
+ struct dentry *file;
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
+ irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
+ if (irq_first < 0) {
+ dev_err(&plf->dev, "First irq not found, err %d\n",
+ irq_first);
+ return irq_first;
+ }
+
+ irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
+ if (irq_last < 0) {
+ dev_err(&plf->dev, "Last irq not found, err %d\n",
+ irq_last);
+ return irq_last;
+ }
+
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
if (!ab8500_dir)
- goto exit_no_debugfs;
+ goto err;
+
+ ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
+ ab8500_dir);
+ if (!ab8500_gpadc_dir)
+ goto err;
+
+ file = debugfs_create_file("all-bank-registers", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-address", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-value", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+ if (!file)
+ goto err;
- ab8500_reg_file = debugfs_create_file("all-bank-registers",
- S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
- if (!ab8500_reg_file)
- goto exit_destroy_dir;
+ file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ if (!file)
+ goto err;
- ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!ab8500_bank_file)
- goto exit_destroy_reg;
+ file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+ if (!file)
+ goto err;
- ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
- &ab8500_address_fops);
- if (!ab8500_address_file)
- goto exit_destroy_bank;
+ file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+ if (!file)
+ goto err;
- ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!ab8500_val_file)
- goto exit_destroy_address;
+ file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+ if (!file)
+ goto err;
return 0;
-exit_destroy_address:
- debugfs_remove(ab8500_address_file);
-exit_destroy_bank:
- debugfs_remove(ab8500_bank_file);
-exit_destroy_reg:
- debugfs_remove(ab8500_reg_file);
-exit_destroy_dir:
- debugfs_remove(ab8500_dir);
-exit_no_debugfs:
+err:
+ if (ab8500_dir)
+ debugfs_remove_recursive(ab8500_dir);
dev_err(&plf->dev, "failed to create debugfs entries.\n");
return -ENOMEM;
}
static int __devexit ab8500_debug_remove(struct platform_device *plf)
{
- debugfs_remove(ab8500_val_file);
- debugfs_remove(ab8500_address_file);
- debugfs_remove(ab8500_bank_file);
- debugfs_remove(ab8500_reg_file);
- debugfs_remove(ab8500_dir);
-
+ debugfs_remove_recursive(ab8500_dir);
return 0;
}
diff --git a/drivers/mfd/ab8500-denc.c b/drivers/mfd/ab8500-denc.c
new file mode 100644
index 00000000000..17efee62110
--- /dev/null
+++ b/drivers/mfd/ab8500-denc.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson AB8500 DENC base driver
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/ab8500/denc-regs.h>
+#include <linux/mfd/ab8500/denc.h>
+
+#define AB8500_NAME "ab8500"
+#define AB8500_DENC_NAME "ab8500_denc"
+
+struct device_usage {
+ struct list_head list;
+ struct platform_device *pdev;
+ bool taken;
+};
+static LIST_HEAD(device_list);
+
+/* To get rid of the extra bank parameter: */
+#define AB8500_REG_BANK_NR(__reg) ((0xff00 & (__reg)) >> 8)
+static inline u8 ab8500_rreg(struct device *dev, u32 reg)
+{
+ u8 val;
+ if (abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, &val) < 0)
+ return 0;
+ else
+ return val;
+}
+
+static inline int ab8500_wreg(struct device *dev, u32 reg, u8 val)
+{
+ return abx500_set_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, val);
+}
+
+/* Only use in the macro below: */
+static inline int _ab8500_wreg_fld(struct device *dev, u32 reg, u8 val,
+ u8 mask, u8 shift)
+{
+ int ret;
+ u8 org_val;
+
+ ret = abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, &org_val);
+ if (ret < 0)
+ return ret;
+ else
+ ab8500_wreg(dev, reg,
+ (org_val & ~mask) | ((val << shift) & mask));
+ return 0;
+}
+
+#define ab8500_wr_fld(__d, __reg, __fld, __val) \
+ _ab8500_wreg_fld(__d, __reg, __val, __reg##_##__fld##_MASK, \
+ __reg##_##__fld##_SHIFT)
+
+#define ab8500_set_fld(__cur_val, __reg, __fld, __val) \
+ (((__cur_val) & ~__reg##_##__fld##_MASK) | \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK))
+
+#define AB8500_DENC_TRACE(__pd) dev_dbg(&(__pd)->dev, "%s\n", __func__)
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_ab8500_denc_dir;
+static struct dentry *debugfs_ab8500_dump_regs_file;
+static void ab8500_denc_conf_ddr(struct platform_device *pdev);
+static int debugfs_ab8500_open_file(struct inode *inode, struct file *file);
+static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_ab8500_dump_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_ab8500_open_file,
+ .read = debugfs_ab8500_dump_regs,
+};
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit ab8500_denc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_platform_data *ab8500_pdata =
+ dev_get_platdata(pdev->dev.parent);
+ struct ab8500_denc_platform_data *pdata;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+
+ if (ab8500_pdata == NULL) {
+ dev_err(&pdev->dev, "AB8500 platform data missing\n");
+ return -EINVAL;
+ }
+
+ pdata = ab8500_pdata->denc;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Denc platform data missing\n");
+ return -EINVAL;
+ }
+
+ device_data = kzalloc(sizeof(struct device_usage), GFP_KERNEL);
+ if (!device_data) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
+ return -ENOMEM;
+ }
+ device_data->pdev = pdev;
+ list_add_tail(&device_data->list, &device_list);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_ab8500_denc_dir = debugfs_create_dir(pdev->name, NULL);
+ debugfs_ab8500_dump_regs_file = debugfs_create_file(
+ "dumpregs", S_IRUGO,
+ debugfs_ab8500_denc_dir, &pdev->dev,
+ &debugfs_ab8500_dump_regs_fops
+ );
+#endif /* CONFIG_DEBUG_FS */
+ return ret;
+}
+
+static int __devexit ab8500_denc_remove(struct platform_device *pdev)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(debugfs_ab8500_dump_regs_file);
+ debugfs_remove(debugfs_ab8500_denc_dir);
+#endif /* CONFIG_DEBUG_FS */
+
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (device_data->pdev == pdev) {
+ list_del(element);
+ kzfree(device_data);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver ab8500_denc_driver = {
+ .probe = ab8500_denc_probe,
+ .remove = ab8500_denc_remove,
+ .driver = {
+ .name = "ab8500-denc",
+ },
+};
+
+static void setup_27mhz(struct platform_device *pdev, bool enable)
+{
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF);
+
+ AB8500_DENC_TRACE(pdev);
+ /* TODO: check if this field needs to be set */
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_PD_ENA,
+ true);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_BUF_ENA,
+ enable);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_INV,
+ false);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_DE_IN,
+ false);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_STRE,
+ 1);
+ ab8500_wreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF, data);
+
+ data = ab8500_rreg(&pdev->dev, AB8500_SYS_CLK_CTRL);
+ data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_CLK_VALID,
+ enable);
+ data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_PLL_ENA,
+ enable);
+ ab8500_wreg(&pdev->dev, AB8500_SYS_CLK_CTRL, data);
+}
+
+static u32 map_tv_std(enum ab8500_denc_TV_std std)
+{
+ switch (std) {
+ case TV_STD_PAL_BDGHI:
+ return AB8500_DENC_CONF0_STD_PAL_BDGHI;
+ case TV_STD_PAL_N:
+ return AB8500_DENC_CONF0_STD_PAL_N;
+ case TV_STD_PAL_M:
+ return AB8500_DENC_CONF0_STD_PAL_M;
+ case TV_STD_NTSC_M:
+ return AB8500_DENC_CONF0_STD_NTSC_M;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_cr_filter(enum ab8500_denc_cr_filter_bandwidth bw)
+{
+ switch (bw) {
+ case TV_CR_NTSC_LOW_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_1MHZ;
+ case TV_CR_PAL_LOW_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_3MHZ;
+ case TV_CR_NTSC_HIGH_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_6MHZ;
+ case TV_CR_PAL_HIGH_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_9MHZ;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_phase_rst_mode(enum ab8500_denc_phase_reset_mode mode)
+{
+ switch (mode) {
+ case TV_PHASE_RST_MOD_DISABLE:
+ return AB8500_DENC_CONF8_PH_RST_MODE_DISABLED;
+ case TV_PHASE_RST_MOD_FROM_PHASE_BUF:
+ return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_PHASE_BUF;
+ case TV_PHASE_RST_MOD_FROM_INC_DFS:
+ return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_INC_DFS;
+ case TV_PHASE_RST_MOD_RST:
+ return AB8500_DENC_CONF8_PH_RST_MODE_RESET;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_plug_time(enum ab8500_denc_plug_time time)
+{
+ switch (time) {
+ case TV_PLUG_TIME_0_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_0_5S;
+ case TV_PLUG_TIME_1S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1S;
+ case TV_PLUG_TIME_1_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1_5S;
+ case TV_PLUG_TIME_2S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2S;
+ case TV_PLUG_TIME_2_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2_5S;
+ case TV_PLUG_TIME_3S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_3S;
+ default:
+ return 0;
+ }
+}
+
+struct platform_device *ab8500_denc_get_device(void)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ pr_debug("%s\n", __func__);
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (!device_data->taken) {
+ device_data->taken = true;
+ return device_data->pdev;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(ab8500_denc_get_device);
+
+void ab8500_denc_put_device(struct platform_device *pdev)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (device_data->pdev == pdev)
+ device_data->taken = false;
+ }
+}
+EXPORT_SYMBOL(ab8500_denc_put_device);
+
+void ab8500_denc_reset(struct platform_device *pdev, bool hard)
+{
+ AB8500_DENC_TRACE(pdev);
+ if (hard) {
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_CTRL3);
+ /* reset start */
+ ab8500_wreg(&pdev->dev, AB8500_CTRL3,
+ ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 0)
+ );
+ /* reset done */
+ ab8500_wreg(&pdev->dev, AB8500_CTRL3,
+ ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 1)
+ );
+ } else {
+ ab8500_wr_fld(&pdev->dev, AB8500_DENC_CONF6, SOFT_RESET, 1);
+ mdelay(10);
+ }
+}
+EXPORT_SYMBOL(ab8500_denc_reset);
+
+void ab8500_denc_power_up(struct platform_device *pdev)
+{
+ setup_27mhz(pdev, true);
+}
+EXPORT_SYMBOL(ab8500_denc_power_up);
+
+void ab8500_denc_power_down(struct platform_device *pdev)
+{
+ setup_27mhz(pdev, false);
+}
+EXPORT_SYMBOL(ab8500_denc_power_down);
+
+void ab8500_denc_conf(struct platform_device *pdev,
+ struct ab8500_denc_conf *conf)
+{
+ u8 data;
+
+ AB8500_DENC_TRACE(pdev);
+
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF0,
+ AB8500_VAL2REG(AB8500_DENC_CONF0, STD, map_tv_std(conf->TV_std))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF0, SYNC,
+ conf->test_pattern ? AB8500_DENC_CONF0_SYNC_AUTO_TEST :
+ AB8500_DENC_CONF0_SYNC_F_BASED_SLAVE
+ )
+ );
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF1,
+ AB8500_VAL2REG(AB8500_DENC_CONF1, BLK_LI,
+ !conf->partial_blanking)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, FLT,
+ map_cr_filter(conf->cr_filter))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CO_KI, conf->suppress_col)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, SETUP_MAIN,
+ conf->black_level_setup)
+ /* TODO: handle cc field: set to 0 now */
+ );
+
+ data = ab8500_rreg(&pdev->dev, AB8500_DENC_CONF2);
+ data = ab8500_set_fld(data, AB8500_DENC_CONF2, N_INTRL,
+ conf->progressive);
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF2, data);
+
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF8,
+ AB8500_VAL2REG(AB8500_DENC_CONF8, PH_RST_MODE,
+ map_phase_rst_mode(conf->phase_reset_mode))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF8, VAL_422_MUX,
+ conf->act_output)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF8, BLK_ALL,
+ conf->blank_all)
+ );
+ data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL0,
+ conf->dac_enable);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL1,
+ conf->act_dc_output);
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data);
+
+ /* no support for DDR in early versions */
+ if (AB8500_REG2VAL(AB8500_REV, FULL_MASK,
+ ab8500_rreg(&pdev->dev, AB8500_REV)) > 0)
+ ab8500_denc_conf_ddr(pdev);
+}
+EXPORT_SYMBOL(ab8500_denc_conf);
+
+void ab8500_denc_conf_plug_detect(struct platform_device *pdev,
+ bool enable, bool load_RC,
+ enum ab8500_denc_plug_time time)
+{
+ u8 data;
+
+ AB8500_DENC_TRACE(pdev);
+ data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_PLUG_ON, enable);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_LOAD_RC, load_RC);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, PLUG_TV_TIME,
+ map_plug_time(time));
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data);
+}
+EXPORT_SYMBOL(ab8500_denc_conf_plug_detect);
+
+void ab8500_denc_mask_int_plug_det(struct platform_device *pdev, bool plug,
+ bool unplug)
+{
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_IT_MASK1);
+
+ AB8500_DENC_TRACE(pdev);
+ data = ab8500_set_fld(data, AB8500_IT_MASK1, PLUG_TV_DET, plug);
+ data = ab8500_set_fld(data, AB8500_IT_MASK1, UNPLUG_TV_DET, unplug);
+ ab8500_wreg(&pdev->dev, AB8500_IT_MASK1, data);
+}
+EXPORT_SYMBOL(ab8500_denc_mask_int_plug_det);
+
+static void ab8500_denc_conf_ddr(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *core_pdata;
+ struct ab8500_denc_platform_data *denc_pdata;
+
+ AB8500_DENC_TRACE(pdev);
+ core_pdata = dev_get_platdata(pdev->dev.parent);
+ denc_pdata = core_pdata->denc;
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL2,
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2,
+ DENC_DDR, denc_pdata->ddr_enable) |
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, SWAP_DDR_DATA_IN,
+ denc_pdata->ddr_little_endian));
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_ab8500_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUG_BUF_SIZE 900
+
+#define AB8500_GPIO_DIR5 0x1014
+#define AB8500_GPIO_DIR5_35_SHIFT 2
+#define AB8500_GPIO_DIR5_35_MASK (1 << AB8500_GPIO_DIR5_35_SHIFT)
+#define AB8500_GPIO_OUT5 0x1024
+#define AB8500_GPIO_OUT5_35_SHIFT 2
+#define AB8500_GPIO_OUT5_35_MASK (1 << AB8500_GPIO_OUT5_35_SHIFT)
+#define AB8500_GPIO_OUT5_35_VIDEO 0
+#define AB8500_GPIO_OUT5_35_AUDIO 1
+#define AB8500_GPIO_NPUD5 0x1034
+#define AB8500_GPIO_NPUD5_35_SHIFT 2
+#define AB8500_GPIO_NPUD5_35_MASK (1 << AB8500_GPIO_NPUD5_35_SHIFT)
+#define AB8500_GPIO_NPUD5_35_ACTIVE 0
+#define AB8500_GPIO_NPUD5_35_INACTIVE 1
+
+static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int ret = 0;
+ size_t data_size = 0;
+ char buffer[DEBUG_BUF_SIZE];
+ struct device *dev = file->private_data;
+
+ data_size += sprintf(buffer + data_size,
+ "AB8500 DENC registers:\n"
+ "------Regulators etc ----------\n"
+ "CTRL3 : 0x%04x = 0x%02x\n"
+ "SYSULPCLK_CONF: 0x%04x = 0x%02x\n"
+ "SYSCLK_CTRL : 0x%04x = 0x%02x\n"
+ "REGU_MISC1 : 0x%04x = 0x%02x\n"
+ "VAUX12_REGU : 0x%04x = 0x%02x\n"
+ "VAUX1_SEL1 : 0x%04x = 0x%02x\n"
+ "------TVout only --------------\n"
+ "DENC_CONF0 : 0x%04x = 0x%02x\n"
+ "DENC_CONF1 : 0x%04x = 0x%02x\n"
+ "DENC_CONF2 : 0x%04x = 0x%02x\n"
+ "DENC_CONF6 : 0x%04x = 0x%02x\n"
+ "DENC_CONF8 : 0x%04x = 0x%02x\n"
+ "TVOUT_CTRL : 0x%04x = 0x%02x\n"
+ "TVOUT_CTRL2 : 0x%04x = 0x%02x\n"
+ "IT_MASK1 : 0x%04x = 0x%02x\n"
+ "------AV connector-------------\n"
+ "GPIO_DIR5 : 0x%04x = 0x%02x\n"
+ "GPIO_OUT5 : 0x%04x = 0x%02x\n"
+ "GPIO_NPUD5 : 0x%04x = 0x%02x\n"
+ ,
+ AB8500_CTRL3, ab8500_rreg(dev, AB8500_CTRL3),
+ AB8500_SYS_ULP_CLK_CONF, ab8500_rreg(dev,
+ AB8500_SYS_ULP_CLK_CONF),
+ AB8500_SYS_CLK_CTRL, ab8500_rreg(dev, AB8500_SYS_CLK_CTRL),
+ AB8500_REGU_MISC1, ab8500_rreg(dev, AB8500_REGU_MISC1),
+ AB8500_VAUX12_REGU, ab8500_rreg(dev, AB8500_VAUX12_REGU),
+ AB8500_VAUX1_SEL, ab8500_rreg(dev, AB8500_VAUX1_SEL),
+ AB8500_DENC_CONF0, ab8500_rreg(dev, AB8500_DENC_CONF0),
+ AB8500_DENC_CONF1, ab8500_rreg(dev, AB8500_DENC_CONF1),
+ AB8500_DENC_CONF2, ab8500_rreg(dev, AB8500_DENC_CONF2),
+ AB8500_DENC_CONF6, ab8500_rreg(dev, AB8500_DENC_CONF6),
+ AB8500_DENC_CONF8, ab8500_rreg(dev, AB8500_DENC_CONF8),
+ AB8500_TVOUT_CTRL, ab8500_rreg(dev, AB8500_TVOUT_CTRL),
+ AB8500_TVOUT_CTRL2, ab8500_rreg(dev, AB8500_TVOUT_CTRL2),
+ AB8500_IT_MASK1, ab8500_rreg(dev, AB8500_IT_MASK1),
+ AB8500_GPIO_DIR5, ab8500_rreg(dev, AB8500_GPIO_DIR5),
+ AB8500_GPIO_OUT5, ab8500_rreg(dev, AB8500_GPIO_OUT5),
+ AB8500_GPIO_NPUD5, ab8500_rreg(dev, AB8500_GPIO_NPUD5)
+ );
+ if (data_size >= DEBUG_BUF_SIZE) {
+ printk(KERN_EMERG "AB8500 DENC: Buffer overrun\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* check if read done */
+ if (*f_pos > data_size)
+ goto out;
+
+ if (*f_pos + count > data_size)
+ count = data_size - *f_pos;
+
+ if (copy_to_user(buf, buffer + *f_pos, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+out:
+ return ret;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module init */
+static int __init ab8500_denc_init(void)
+{
+ return platform_driver_register(&ab8500_denc_driver);
+}
+module_init(ab8500_denc_init);
+
+static void __exit ab8500_denc_exit(void)
+{
+ platform_driver_unregister(&ab8500_denc_driver);
+}
+module_exit(ab8500_denc_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson AB8500 DENC driver");
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc716e1d..515e360fb09 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -130,16 +130,12 @@ static LIST_HEAD(ab8500_gpadc_list);
* ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
* (i.e. the first GPADC in the instance list)
*/
-struct ab8500_gpadc *ab8500_gpadc_get(char *name)
+struct ab8500_gpadc *ab8500_gpadc_get(void)
{
struct ab8500_gpadc *gpadc;
+ gpadc = list_first_entry(&ab8500_gpadc_list, struct ab8500_gpadc, node);
- list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
- if (!strcmp(name, dev_name(gpadc->dev)))
- return gpadc;
- }
-
- return ERR_PTR(-ENOENT);
+ return gpadc;
}
EXPORT_SYMBOL(ab8500_gpadc_get);
@@ -344,7 +340,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
* Delay might be needed for ABB8500 cut 3.0, if not, remove
* when hardware will be availible
*/
- msleep(1);
+ mdelay(1);
break;
}
/* Intentional fallthrough */
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 087fecd71ce..e05836723ac 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -13,6 +13,7 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/db8500-prcmu.h>
+
static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
{
int ret;
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff..d5865d41514 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,12 +7,114 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/signal.h>
+#include <linux/power_supply.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/time.h>
+#include <linux/hwmon.h>
static struct device *sysctrl_dev;
+void ab8500_power_off(void)
+{
+ struct ab8500_platform_data *plat;
+ struct timespec ts;
+ sigset_t old;
+ sigset_t all;
+ static char *pss[] = {"ab8500_ac", "ab8500_usb"};
+ int i;
+ bool charger_present = false;
+ union power_supply_propval val;
+ struct power_supply *psy;
+ int ret;
+
+ /*
+ * If we have a charger connected and we're powering off,
+ * reboot into charge-only mode.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(pss); i++) {
+ psy = power_supply_get_by_name(pss[i]);
+ if (!psy)
+ continue;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
+
+ if (!ret && val.intval) {
+ charger_present = true;
+ break;
+ }
+ }
+
+ if (!charger_present)
+ goto shutdown;
+
+ /* Check if battery is known */
+ psy = power_supply_get_by_name("ab8500_btemp");
+ if (psy) {
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY,
+ &val);
+ if (!ret && val.intval != POWER_SUPPLY_TECHNOLOGY_UNKNOWN) {
+ printk(KERN_INFO
+ "Charger \"%s\" is connected with known battery."
+ " Rebooting.\n",
+ pss[i]);
+ machine_restart("charging");
+ }
+ }
+
+shutdown:
+ sigfillset(&all);
+
+ plat = dev_get_platdata(sysctrl_dev->parent);
+ getnstimeofday(&ts);
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ if (ts.tv_sec == 0 ||
+ (ts.tv_sec - plat->thermal_set_time_sec >
+ plat->thermal_time_out))
+ plat->thermal_power_off_pending = false;
+ if (!plat->thermal_power_off_pending) {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ } else {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_THDB8500SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ }
+ }
+}
+
+static int ab8500_notifier_call(struct notifier_block *this,
+ unsigned long val, void *data)
+{
+ struct ab8500_platform_data *plat;
+ static struct timespec ts;
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ plat = dev_get_platdata(sysctrl_dev->parent);
+ if (val) {
+ getnstimeofday(&ts);
+ plat->thermal_set_time_sec = ts.tv_sec;
+ plat->thermal_power_off_pending = true;
+ } else {
+ plat->thermal_set_time_sec = 0;
+ plat->thermal_power_off_pending = false;
+ }
+ return 0;
+}
+
+static struct notifier_block ab8500_notifier = {
+ .notifier_call = ab8500_notifier_call,
+};
+
static inline bool valid_bank(u8 bank)
{
return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
@@ -33,6 +135,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
return abx500_get_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_read);
int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
{
@@ -48,10 +151,42 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_write);
static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
{
+ struct ab8500_platform_data *plat;
+ struct ab8500_sysctrl_platform_data *pdata;
+
sysctrl_dev = &pdev->dev;
+ plat = dev_get_platdata(pdev->dev.parent);
+ if (plat->pm_power_off)
+ pm_power_off = ab8500_power_off;
+ hwmon_notifier_register(&ab8500_notifier);
+
+ pdata = plat->sysctrl;
+
+ if (pdata) {
+ int ret;
+ int i;
+ int j;
+ for (i = AB8500_SYSCLKREQ1RFCLKBUF;
+ i <= AB8500_SYSCLKREQ8RFCLKBUF; i++) {
+ j = i - AB8500_SYSCLKREQ1RFCLKBUF;
+ ret = ab8500_sysctrl_write(i, 0xff,
+ pdata->initial_req_buf_config[j]);
+ dev_dbg(&pdev->dev,
+ "Setting SysClkReq%dRfClkBuf 0x%X\n",
+ j + 1,
+ pdata->initial_req_buf_config[j]);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "unable to set sysClkReq%dRfClkBuf: "
+ "%d\n", j + 1, ret);
+ }
+ }
+ }
+
return 0;
}
diff --git a/drivers/mfd/db5500-prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h
new file mode 100644
index 00000000000..0428b5e95ae
--- /dev/null
+++ b/drivers/mfd/db5500-prcmu-regs.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __MACH_PRCMU_REGS_DB5500_H
+#define __MACH_PRCMU_REGS_DB5500_H
+
+#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
+
+#define PRCM_TCR 0x1C8
+#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
+#define PRCM_TCR_STOP_TIMERS BIT(16)
+#define PRCM_TCR_DOZE_MODE BIT(17)
+
+/* PRCMU HW semaphore */
+#define PRCM_SEM 0x400
+#define PRCM_SEM_PRCM_SEM BIT(0)
+
+#define DB5500_PRCM_ACLK_MGT 0x004
+#define DB5500_PRCM_SVACLK_MGT 0x008
+#define DB5500_PRCM_SIACLK_MGT 0x00C
+#define DB5500_PRCM_SGACLK_MGT 0x014
+#define DB5500_PRCM_UARTCLK_MGT 0x018
+#define DB5500_PRCM_MSP02CLK_MGT 0x01C
+#define DB5500_PRCM_I2CCLK_MGT 0x020
+#define DB5500_PRCM_SDMMCCLK_MGT 0x024
+#define DB5500_PRCM_PER1CLK_MGT 0x02C
+#define DB5500_PRCM_PER2CLK_MGT 0x030
+#define DB5500_PRCM_PER3CLK_MGT 0x034
+#define DB5500_PRCM_PER5CLK_MGT 0x038
+#define DB5500_PRCM_PER6CLK_MGT 0x03C
+#define DB5500_PRCM_IRDACLK_MGT 0x040
+#define DB5500_PRCM_PWMCLK_MGT 0x044
+#define DB5500_PRCM_SPARE1CLK_MGT 0x048
+#define DB5500_PRCM_IRRCCLK_MGT 0x04C
+#define DB5500_PRCM_HDMICLK_MGT 0x058
+#define DB5500_PRCM_APEATCLK_MGT 0x05C
+#define DB5500_PRCM_APETRACECLK_MGT 0x060
+#define DB5500_PRCM_MCDECLK_MGT 0x064
+#define DB5500_PRCM_DSIALTCLK_MGT 0x06C
+#define DB5500_PRCM_DMACLK_MGT 0x074
+#define DB5500_PRCM_B2R2CLK_MGT 0x078
+#define DB5500_PRCM_TVCLK_MGT 0x07C
+#define DB5500_PRCM_RNGCLK_MGT 0x284
+
+#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
+#define PRCM_CLK_MGT_CLKPLLDIV_SHIFT 0
+#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
+#define PRCM_CLK_MGT_CLKEN BIT(8)
+
+#define PRCM_ARM_IT1_CLEAR 0x48C
+#define PRCM_ARM_IT1_VAL 0x494
+
+/* CPU mailbox registers */
+#define PRCM_MBOX_CPU_VAL 0x0FC
+#define PRCM_MBOX_CPU_SET 0x100
+
+/* System reset register */
+#define PRCM_APE_SOFTRST 0x228
+
+/* PRCMU clock/PLL/reset registers */
+#define PRCM_PLLDSI_FREQ 0x500
+#define PRCM_PLLDSI_ENABLE 0x504
+#define PRCM_PLLDSI_LOCKP 0x508
+#define PRCM_DSI_PLLOUT_SEL 0x530
+#define PRCM_DSITVCLK_DIV 0x52C
+#define PRCM_APE_RESETN_SET 0x1E4
+#define PRCM_APE_RESETN_CLR 0x1E8
+
+/* CLKOUTx SEL0 settings */
+#define CLKOUT_SEL0_REF_CLK 0x01 /* 0b 0001 */
+#define CLKOUT_SEL0_RTC_CLK0 0x02 /* 0b 0010 */
+#define CLKOUT_SEL0_ULP_CLK 0x04 /* 0b 0100 */
+#define CLKOUT_SEL0_SEL_CLK 0x08 /* 0b 1000 */
+
+/* CLKOUTx SEL settings */
+#define CLKOUT_SEL_STATIC0 0x0001 /* 0b 00 0000 0001 */
+#define CLKOUT_SEL_REFCLK 0x0002 /* 0b 00 0000 0010 */
+#define CLKOUT_SEL_ULPCLK 0x0004 /* 0b 00 0000 0100 */
+#define CLKOUT_SEL_ARMCLK 0x0008 /* 0b 00 0000 1000 */
+#define CLKOUT_SEL_SYSACC0CLK 0x0010 /* 0b 00 0001 0000 */
+#define CLKOUT_SEL_SOC0PLLCLK 0x0020 /* 0b 00 0010 0000 */
+#define CLKOUT_SEL_SOC1PLLCLK 0x0040 /* 0b 00 0100 0000 */
+#define CLKOUT_SEL_DDRPLLCLK 0x0080 /* 0b 00 1000 0000 */
+#define CLKOUT_SEL_TVCLK 0x0100 /* 0b 01 0000 0000 */
+#define CLKOUT_SEL_IRDACLK 0x0200 /* 0b 10 0000 0000 */
+
+/* CLKOUTx dividers */
+#define CLKOUT_DIV_2 0x00 /* 0b 000 */
+#define CLKOUT_DIV_4 0x01 /* 0b 001 */
+#define CLKOUT_DIV_8 0x02 /* 0b 010 */
+#define CLKOUT_DIV_16 0x03 /* 0b 011 */
+#define CLKOUT_DIV_32 0x04 /* 0b 100 */
+#define CLKOUT_DIV_64 0x05 /* 0b 101 */
+/* Values 0x06 and 0x07 will also set the CLKOUTx divider to 64. */
+
+/* PRCM_CLKOCR CLKOUTx Control registers */
+#define PRCM_CLKOCR 0x1CC
+#define PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT 0
+#define PRCM_CLKOCR_CLKOUT0_SEL0_MASK BITS(0, 3)
+#define PRCM_CLKOCR_CLKOUT0_SEL_SHIFT 4
+#define PRCM_CLKOCR_CLKOUT0_SEL_MASK BITS(4, 13)
+#define PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT 16
+#define PRCM_CLKOCR_CLKOUT1_SEL0_MASK BITS(16, 19)
+#define PRCM_CLKOCR_CLKOUT1_SEL_SHIFT 20
+#define PRCM_CLKOCR_CLKOUT1_SEL_MASK BITS(20, 29)
+
+/* PRCM_CLKODIV CLKOUTx Dividers */
+#define PRCM_CLKODIV 0x188
+#define PRCM_CLKODIV_CLKOUT0_DIV_SHIFT 0
+#define PRCM_CLKODIV_CLKOUT0_DIV_MASK BITS(0, 2)
+#define PRCM_CLKODIV_CLKOUT1_DIV_SHIFT 16
+#define PRCM_CLKODIV_CLKOUT1_DIV_MASK BITS(16, 18)
+
+#define PRCM_MMIP_LS_CLAMP_SET 0x420
+#define PRCM_MMIP_LS_CLAMP_CLR 0x424
+#define PRCM_DDR_SUBSYS_APE_MINBW 0x438
+
+/* Miscellaneous unit registers */
+#define PRCM_DSI_SW_RESET 0x324
+#define PRCM_RESOUTN_SET_OFFSET 0x214
+#define PRCM_RESOUTN_CLR_OFFSET 0x218
+
+/* APE - Modem Registers */
+#define PRCM_HOSTACCESS_REQ 0x334
+/* APE - Modem register bit maipulation */
+#define PRCM_HOSTACCESS_REQ_BIT BIT(0)
+#define PRCM_APE_ACK 0x49c
+#define PRCM_APE_ACK_BIT 0x01
+
+/* Watchdog - mtimer registers */
+#define PRCM_TIMER0_RTOS_COMP1_OFFSET 0x4C
+#define PRCM_TIMER0_RTOS_COUNTER_OFFSET 0x40
+#define PRCM_TIMER0_IRQ_EN_SET_OFFSET 0x70
+#define PRCM_TIMER0_IRQ_EN_CLR_OFFSET 0x6C
+#define PRCM_TIMER0_IRQ_RTOS1_SET 0x08
+#define PRCM_TIMER0_IRQ_RTOS1_CLR 0x08
+
+#endif
diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c
index bb115b2f04e..feb1ad1a328 100644
--- a/drivers/mfd/db5500-prcmu.c
+++ b/drivers/mfd/db5500-prcmu.c
@@ -19,12 +19,21 @@
#include <linux/irq.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/regulator/db5500-prcmu.h>
+#include <linux/regulator/machine.h>
#include <linux/interrupt.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db5500-regs.h>
-#include "dbx500-prcmu-regs.h"
+#include <mach/prcmu-debug.h>
+
+#include "db5500-prcmu-regs.h"
+
+#define PRCMU_FW_VERSION_OFFSET 0xA4
+#define PRCM_SW_RST_REASON (tcdm_base + 0xFF8) /* 2 bytes */
#define _PRCM_MB_HEADER (tcdm_base + 0xFE8)
#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0)
@@ -64,6 +73,52 @@
#define PRCM_ACK_MB6 (tcdm_base + 0xF0C)
#define PRCM_ACK_MB7 (tcdm_base + 0xF08)
+/* Share info */
+#define PRCM_SHARE_INFO (tcdm_base + 0xEC8)
+
+#define PRCM_SHARE_INFO_HOTDOG (PRCM_SHARE_INFO + 62)
+
+/* Mailbox 0 REQs */
+#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
+#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x1)
+#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x2)
+#define PRCM_REQ_MB0_DDR_STATE (PRCM_REQ_MB0 + 0x3)
+#define PRCM_REQ_MB0_ESRAM0_STATE (PRCM_REQ_MB0 + 0x4)
+#define PRCM_REQ_MB0_WAKEUP_DBB (PRCM_REQ_MB0 + 0x8)
+#define PRCM_REQ_MB0_WAKEUP_ABB (PRCM_REQ_MB0 + 0xC)
+
+/* Mailbox 0 ACKs */
+#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
+#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
+#define PRCM_ACK_MB0_WAKEUP_0_DBB (PRCM_ACK_MB0 + 0x4)
+#define PRCM_ACK_MB0_WAKEUP_0_ABB (PRCM_ACK_MB0 + 0x8)
+#define PRCM_ACK_MB0_WAKEUP_1_DBB (PRCM_ACK_MB0 + 0x28)
+#define PRCM_ACK_MB0_WAKEUP_1_ABB (PRCM_ACK_MB0 + 0x2C)
+#define PRCM_ACK_MB0_EVENT_ABB_NUMBERS 20
+
+/* Request mailbox 1 fields. */
+#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
+#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
+
+/* Mailbox 1 ACKs */
+#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
+#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
+#define PRCM_ACK_MB1_ARM_VOLT_STATUS (PRCM_ACK_MB1 + 0x2)
+#define PRCM_ACK_MB1_APE_VOLT_STATUS (PRCM_ACK_MB1 + 0x3)
+
+/* Mailbox 2 REQs */
+#define PRCM_REQ_MB2_EPOD_CLIENT (PRCM_REQ_MB2 + 0x0)
+#define PRCM_REQ_MB2_EPOD_STATE (PRCM_REQ_MB2 + 0x1)
+#define PRCM_REQ_MB2_CLK_CLIENT (PRCM_REQ_MB2 + 0x2)
+#define PRCM_REQ_MB2_CLK_STATE (PRCM_REQ_MB2 + 0x3)
+#define PRCM_REQ_MB2_PLL_CLIENT (PRCM_REQ_MB2 + 0x4)
+#define PRCM_REQ_MB2_PLL_STATE (PRCM_REQ_MB2 + 0x5)
+
+/* Mailbox 2 ACKs */
+#define PRCM_ACK_MB2_EPOD_STATUS (PRCM_ACK_MB2 + 0x2)
+#define PRCM_ACK_MB2_CLK_STATUS (PRCM_ACK_MB2 + 0x6)
+#define PRCM_ACK_MB2_PLL_STATUS (PRCM_ACK_MB2 + 0xA)
+
enum mb_return_code {
RC_SUCCESS,
RC_FAIL,
@@ -71,12 +126,58 @@ enum mb_return_code {
/* Mailbox 0 headers. */
enum mb0_header {
- /* request */
- RMB0H_PWR_STATE_TRANS = 1,
- RMB0H_WAKE_UP_CFG,
- RMB0H_RD_WAKE_UP_ACK,
/* acknowledge */
- AMB0H_WAKE_UP = 1,
+ MB0H_WAKE_UP = 0,
+ /* request */
+ MB0H_PWR_STATE_TRANS,
+ MB0H_WAKE_UP_CFG,
+ MB0H_RD_WAKE_UP_ACK,
+};
+
+/* Mailbox 1 headers.*/
+enum mb1_header {
+ MB1H_ARM_OPP = 1,
+ MB1H_APE_OPP,
+ MB1H_ARM_APE_OPP,
+};
+
+/* Mailbox 2 headers. */
+enum mb2_header {
+ MB2H_EPOD_REQUEST = 1,
+ MB2H_CLK_REQUEST,
+ MB2H_PLL_REQUEST,
+};
+
+/* Mailbox 3 headers. */
+enum mb3_header {
+ MB3H_REFCLK_REQUEST = 1,
+};
+
+enum sysclk_state {
+ SYSCLK_OFF,
+ SYSCLK_ON,
+};
+
+/* Mailbox 4 headers */
+enum mb4_header {
+ MB4H_CFG_HOTDOG = 7,
+ MB4H_CFG_HOTMON = 8,
+ MB4H_CFG_HOTPERIOD = 10,
+ MB4H_CGF_MODEM_RESET = 13,
+ MB4H_CGF_A9WDOG_EN_PREBARK = 14,
+ MB4H_CGF_A9WDOG_EN_NOPREBARK = 15,
+ MB4H_CGF_A9WDOG_DIS = 16,
+};
+
+/* Mailbox 4 ACK headers */
+enum mb4_ack_header {
+ MB4H_ACK_CFG_HOTDOG = 5,
+ MB4H_ACK_CFG_HOTMON = 6,
+ MB4H_ACK_CFG_HOTPERIOD = 8,
+ MB4H_ACK_CFG_MODEM_RESET = 11,
+ MB4H_ACK_CGF_A9WDOG_EN_PREBARK = 12,
+ MB4H_ACK_CGF_A9WDOG_EN_NOPREBARK = 13,
+ MB4H_ACK_CGF_A9WDOG_DIS = 14,
};
/* Mailbox 5 headers. */
@@ -85,6 +186,69 @@ enum mb5_header {
MB5H_I2C_READ,
};
+enum db5500_arm_opp {
+ DB5500_ARM_100_OPP = 1,
+ DB5500_ARM_50_OPP,
+ DB5500_ARM_EXT_OPP,
+};
+
+enum db5500_ape_opp {
+ DB5500_APE_100_OPP = 1,
+ DB5500_APE_50_OPP
+};
+
+enum epod_state {
+ EPOD_OFF,
+ EPOD_ON,
+};
+enum epod_onoffret_state {
+ EPOD_OOR_OFF,
+ EPOD_OOR_RET,
+ EPOD_OOR_ON,
+};
+enum db5500_prcmu_pll {
+ DB5500_PLL_SOC0,
+ DB5500_PLL_SOC1,
+ DB5500_PLL_DDR,
+ DB5500_NUM_PLL_ID,
+};
+
+enum db5500_prcmu_clk {
+ DB5500_MSP1CLK,
+ DB5500_CDCLK,
+ DB5500_IRDACLK,
+ DB5500_TVCLK,
+ DB5500_NUM_CLK_CLIENTS,
+};
+
+enum on_off_ret {
+ OFF_ST,
+ RET_ST,
+ ON_ST,
+};
+
+enum db5500_ap_pwr_state {
+ DB5500_AP_SLEEP = 2,
+ DB5500_AP_DEEP_SLEEP,
+ DB5500_AP_IDLE,
+};
+
+/* Request mailbox 3 fields */
+#define PRCM_REQ_MB3_REFCLK_MGT (PRCM_REQ_MB3 + 0x0)
+
+/* Ack. mailbox 3 fields */
+#define PRCM_ACK_MB3_REFCLK_REQ (PRCM_ACK_MB3 + 0x0)
+
+
+/* Request mailbox 4 fields */
+#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 32)
+#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 34)
+#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 36)
+#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 38)
+
+/* Ack. mailbox 4 field */
+#define PRCM_ACK_MB4_REQUESTS (PRCM_ACK_MB4 + 0x0)
+
/* Request mailbox 5 fields. */
#define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1)
@@ -105,11 +269,12 @@ enum mb5_header {
#define PRCMU_RESET_DSIPLL 0x00004000
#define PRCMU_UNCLAMP_DSIPLL 0x00400800
-/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0x8, = 50 Mhz*/
-#define PRCMU_DSI_CLOCK_SETTING 0x00000128
+/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0xC, = 33.33 Mhz*/
+#define PRCMU_DSI_CLOCK_SETTING 0x0000012C
/* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */
#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135
-#define PRCMU_PLLDSI_FREQ_SETTING 0x00020121
+/* PRCM_PLLDSI_FREQ R=4, N=1, D= 0x65 */
+#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002
#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000201
#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101
@@ -125,13 +290,176 @@ enum mb5_header {
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
/*
+ * Wakeups/IRQs
+ */
+
+#define WAKEUP_BIT_RTC BIT(0)
+#define WAKEUP_BIT_RTT0 BIT(1)
+#define WAKEUP_BIT_RTT1 BIT(2)
+#define WAKEUP_BIT_CD_IRQ BIT(3)
+#define WAKEUP_BIT_SRP_TIM BIT(4)
+#define WAKEUP_BIT_APE_REQ BIT(5)
+#define WAKEUP_BIT_USB BIT(6)
+#define WAKEUP_BIT_ABB BIT(7)
+#define WAKEUP_BIT_LOW_POWER_AUDIO BIT(8)
+#define WAKEUP_BIT_TEMP_SENSOR_LOW BIT(9)
+#define WAKEUP_BIT_ARM BIT(10)
+#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
+#define WAKEUP_BIT_TEMP_SENSOR_HIGH BIT(12)
+#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
+#define WAKEUP_BIT_GPIO0 BIT(23)
+#define WAKEUP_BIT_GPIO1 BIT(24)
+#define WAKEUP_BIT_GPIO2 BIT(25)
+#define WAKEUP_BIT_GPIO3 BIT(26)
+#define WAKEUP_BIT_GPIO4 BIT(27)
+#define WAKEUP_BIT_GPIO5 BIT(28)
+#define WAKEUP_BIT_GPIO6 BIT(29)
+#define WAKEUP_BIT_GPIO7 BIT(30)
+#define WAKEUP_BIT_AC_REL_ACK BIT(30)
+
+/*
+ * This vector maps irq numbers to the bits in the bit field used in
+ * communication with the PRCMU firmware.
+ *
+ * The reason for having this is to keep the irq numbers contiguous even though
+ * the bits in the bit field are not. (The bits also have a tendency to move
+ * around, to further complicate matters.)
+ */
+#define IRQ_INDEX(_name) ((IRQ_DB5500_PRCMU_##_name) - IRQ_DB5500_PRCMU_BASE)
+#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
+static u32 prcmu_irq_bit[NUM_DB5500_PRCMU_WAKEUPS] = {
+ IRQ_ENTRY(RTC),
+ IRQ_ENTRY(RTT0),
+ IRQ_ENTRY(RTT1),
+ IRQ_ENTRY(CD_IRQ),
+ IRQ_ENTRY(SRP_TIM),
+ IRQ_ENTRY(APE_REQ),
+ IRQ_ENTRY(USB),
+ IRQ_ENTRY(ABB),
+ IRQ_ENTRY(LOW_POWER_AUDIO),
+ IRQ_ENTRY(TEMP_SENSOR_LOW),
+ IRQ_ENTRY(TEMP_SENSOR_HIGH),
+ IRQ_ENTRY(ARM),
+ IRQ_ENTRY(AC_WAKE_ACK),
+ IRQ_ENTRY(MODEM_SW_RESET_REQ),
+ IRQ_ENTRY(GPIO0),
+ IRQ_ENTRY(GPIO1),
+ IRQ_ENTRY(GPIO2),
+ IRQ_ENTRY(GPIO3),
+ IRQ_ENTRY(GPIO4),
+ IRQ_ENTRY(GPIO5),
+ IRQ_ENTRY(GPIO6),
+ IRQ_ENTRY(GPIO7),
+ IRQ_ENTRY(AC_REL_ACK),
+};
+
+#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
+#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
+static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
+ WAKEUP_ENTRY(RTC),
+ WAKEUP_ENTRY(RTT0),
+ WAKEUP_ENTRY(RTT1),
+ WAKEUP_ENTRY(CD_IRQ),
+ WAKEUP_ENTRY(USB),
+ WAKEUP_ENTRY(ABB),
+ WAKEUP_ENTRY(ARM)
+};
+
+/*
* mb0_transfer - state needed for mailbox 0 communication.
- * @lock: The transaction lock.
+ * @lock The transaction lock.
+ * @dbb_irqs_lock lock used for (un)masking DBB wakeup interrupts
+ * @mask_work: Work structure used for (un)masking wakeup interrupts.
+ * @ac_wake_lock: mutex to lock modem_req and modem_rel
+ * @req: Request data that need to persist between requests.
*/
static struct {
spinlock_t lock;
+ spinlock_t dbb_irqs_lock;
+ struct work_struct mask_work;
+ struct mutex ac_wake_lock;
+ struct {
+ u32 dbb_irqs;
+ u32 dbb_wakeups;
+ u32 abb_events;
+ } req;
} mb0_transfer;
+
+/*
+ * mb1_transfer - state needed for mailbox 1 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @req_arm_opp Requested arm opp
+ * @req_ape_opp Requested ape opp
+ * @ack: Reply ("acknowledge") data.
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ u8 req_arm_opp;
+ u8 req_ape_opp;
+ struct {
+ u8 header;
+ u8 arm_opp;
+ u8 ape_opp;
+ u8 arm_voltage_st;
+ u8 ape_voltage_st;
+ } ack;
+} mb1_transfer;
+
+/*
+ * mb2_transfer - state needed for mailbox 2 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @req: Request data that need to persist between requests.
+ * @ack: Reply ("acknowledge") data.
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ struct {
+ u8 epod_st[DB5500_NUM_EPOD_ID];
+ u8 pll_st[DB5500_NUM_PLL_ID];
+ } req;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb2_transfer;
+
+/*
+ * mb3_transfer - state needed for mailbox 3 communication.
+ * @sysclk_lock: A lock used to handle concurrent sysclk requests.
+ * @sysclk_work: Work structure used for sysclk requests.
+ * @req_st: Requested clock state.
+ * @ack: Acknowledgement data
+ */
+static struct {
+ struct mutex sysclk_lock;
+ struct completion sysclk_work;
+ enum sysclk_state req_st;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb3_transfer;
+
+/*
+ * mb4_transfer - state needed for mailbox 4 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @ack: Acknowledgement data
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb4_transfer;
+
/*
* mb5_transfer - state needed for mailbox 5 communication.
* @lock: The transaction lock.
@@ -148,9 +476,825 @@ static struct {
} ack;
} mb5_transfer;
-/* PRCMU TCDM base IO address. */
+/* Spinlocks */
+static DEFINE_SPINLOCK(clkout_lock);
+
+/* PRCMU TCDM base IO address */
static __iomem void *tcdm_base;
+/* PRCMU MTIMER base IO address */
+static __iomem void *mtimer_base;
+
+struct clk_mgt {
+ unsigned int offset;
+ u32 pllsw;
+ u32 div;
+ bool scalable;
+ bool force50;
+};
+
+/* PRCMU Firmware Details */
+static struct {
+ u16 board;
+ u8 fw_version;
+ u8 api_version;
+} prcmu_version;
+
+static struct {
+ u32 timeout;
+ bool enabled;
+} a9wdog_timer;
+
+static DEFINE_SPINLOCK(clk_mgt_lock);
+
+#define CLK_MGT_ENTRY(_name, _scalable)[PRCMU_##_name] = { \
+ .offset = DB5500_PRCM_##_name##_MGT, \
+ .scalable = _scalable, \
+}
+
+static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
+ CLK_MGT_ENTRY(SGACLK, true),
+ CLK_MGT_ENTRY(UARTCLK, false),
+ CLK_MGT_ENTRY(MSP02CLK, false),
+ CLK_MGT_ENTRY(I2CCLK, false),
+ [PRCMU_SDMMCCLK] {
+ .offset = DB5500_PRCM_SDMMCCLK_MGT,
+ .force50 = true,
+ .scalable = false,
+
+ },
+ [PRCMU_SPARE1CLK] {
+ .offset = DB5500_PRCM_SPARE1CLK_MGT,
+ .force50 = true,
+ .scalable = false,
+
+ },
+ CLK_MGT_ENTRY(PER1CLK, false),
+ CLK_MGT_ENTRY(PER2CLK, true),
+ CLK_MGT_ENTRY(PER3CLK, true),
+ CLK_MGT_ENTRY(PER5CLK, false), /* used for SPI */
+ CLK_MGT_ENTRY(PER6CLK, true),
+ CLK_MGT_ENTRY(PWMCLK, false),
+ CLK_MGT_ENTRY(IRDACLK, false),
+ CLK_MGT_ENTRY(IRRCCLK, false),
+ CLK_MGT_ENTRY(HDMICLK, false),
+ CLK_MGT_ENTRY(APEATCLK, false),
+ CLK_MGT_ENTRY(APETRACECLK, true),
+ CLK_MGT_ENTRY(MCDECLK, true),
+ CLK_MGT_ENTRY(DSIALTCLK, false),
+ CLK_MGT_ENTRY(DMACLK, true),
+ CLK_MGT_ENTRY(B2R2CLK, true),
+ CLK_MGT_ENTRY(TVCLK, false),
+ CLK_MGT_ENTRY(RNGCLK, false),
+ CLK_MGT_ENTRY(SIACLK, false),
+ CLK_MGT_ENTRY(SVACLK, false),
+ CLK_MGT_ENTRY(ACLK, true),
+};
+
+static atomic_t modem_req_state = ATOMIC_INIT(0);
+
+bool db5500_prcmu_is_modem_requested(void)
+{
+ return (atomic_read(&modem_req_state) != 0);
+}
+
+/**
+ * prcmu_modem_req - APE requests Modem to wake up
+ *
+ * Whenever APE wants to send message to the modem, it will have to call this
+ * function to make sure that modem is awake.
+ */
+void prcmu_modem_req(void)
+{
+ u32 val;
+
+ mutex_lock(&mb0_transfer.ac_wake_lock);
+
+ val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
+ if (val & PRCM_HOSTACCESS_REQ_BIT)
+ goto unlock_and_return;
+
+ writel((val | PRCM_HOSTACCESS_REQ_BIT),
+ (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
+ atomic_set(&modem_req_state, 1);
+
+unlock_and_return:
+ mutex_unlock(&mb0_transfer.ac_wake_lock);
+
+}
+
+/**
+ * prcmu_modem_rel - APE has no more messages to send and hence releases modem.
+ *
+ * APE to Modem communication is initiated by modem_req and once the
+ * communication is completed, APE sends modem_rel to complete the protocol.
+ */
+void prcmu_modem_rel(void)
+{
+ u32 val;
+
+ mutex_lock(&mb0_transfer.ac_wake_lock);
+
+ val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
+ if (!(val & PRCM_HOSTACCESS_REQ_BIT))
+ goto unlock_and_return;
+
+ writel((val & ~PRCM_HOSTACCESS_REQ_BIT),
+ (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
+
+ atomic_set(&modem_req_state, 0);
+
+unlock_and_return:
+ mutex_unlock(&mb0_transfer.ac_wake_lock);
+}
+
+/**
+ * prcm_ape_ack - send an acknowledgement to modem
+ *
+ * On ape receiving ape_req, APE will have to acknowledge for the interrupt
+ * received. This function will send the acknowledgement by writing to the
+ * prcmu register and an interrupt is trigerred to modem.
+ */
+void prcmu_ape_ack(void)
+{
+ writel(PRCM_APE_ACK_BIT, (_PRCMU_BASE + PRCM_APE_ACK));
+}
+
+/**
+ * db5500_prcmu_modem_reset - Assert a Reset on modem
+ *
+ * This function will assert a reset request to the modem. Prior to that
+ * PRCM_HOSTACCESS_REQ must be '0'.
+ */
+void db5500_prcmu_modem_reset(void)
+{
+ mutex_lock(&mb4_transfer.lock);
+
+ /* PRCM_HOSTACCESS_REQ = 0, before asserting a reset */
+ prcmu_modem_rel();
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writeb(MB4H_CGF_MODEM_RESET, PRCM_REQ_MB4_HEADER);
+ writel(MBOX_BIT(4), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ wait_for_completion(&mb4_transfer.work);
+ if (mb4_transfer.ack.status != RC_SUCCESS ||
+ mb4_transfer.ack.header != MB4H_CGF_MODEM_RESET)
+ printk(KERN_ERR,
+ "ACK not received for modem reset interrupt\n");
+ mutex_unlock(&mb4_transfer.lock);
+}
+
+/**
+ * prcmu_config_clkout - Configure one of the programmable clock outputs.
+ * @clkout: The CLKOUT number (0 or 1).
+ * @source: Clock source.
+ * @div: The divider to be applied.
+ *
+ * Configures one of the programmable clock outputs (CLKOUTs).
+ */
+int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
+{
+ static bool configured[2] = {false, false};
+ int r = 0;
+ unsigned long flags;
+ u32 sel_val;
+ u32 div_val;
+ u32 sel_bits;
+ u32 div_bits;
+ u32 sel_mask;
+ u32 div_mask;
+ u8 sel0 = CLKOUT_SEL0_SEL_CLK;
+ u16 sel = 0;
+
+ BUG_ON(clkout > DB5500_CLKOUT1);
+ BUG_ON(source > DB5500_CLKOUT_IRDACLK);
+ BUG_ON(div > 7);
+
+ switch (source) {
+ case DB5500_CLKOUT_REF_CLK_SEL0:
+ sel0 = CLKOUT_SEL0_REF_CLK;
+ break;
+ case DB5500_CLKOUT_RTC_CLK0_SEL0:
+ sel0 = CLKOUT_SEL0_RTC_CLK0;
+ break;
+ case DB5500_CLKOUT_ULP_CLK_SEL0:
+ sel0 = CLKOUT_SEL0_ULP_CLK;
+ break;
+ case DB5500_CLKOUT_STATIC0:
+ sel = CLKOUT_SEL_STATIC0;
+ break;
+ case DB5500_CLKOUT_REFCLK:
+ sel = CLKOUT_SEL_REFCLK;
+ break;
+ case DB5500_CLKOUT_ULPCLK:
+ sel = CLKOUT_SEL_ULPCLK;
+ break;
+ case DB5500_CLKOUT_ARMCLK:
+ sel = CLKOUT_SEL_ARMCLK;
+ break;
+ case DB5500_CLKOUT_SYSACC0CLK:
+ sel = CLKOUT_SEL_SYSACC0CLK;
+ break;
+ case DB5500_CLKOUT_SOC0PLLCLK:
+ sel = CLKOUT_SEL_SOC0PLLCLK;
+ break;
+ case DB5500_CLKOUT_SOC1PLLCLK:
+ sel = CLKOUT_SEL_SOC1PLLCLK;
+ break;
+ case DB5500_CLKOUT_DDRPLLCLK:
+ sel = CLKOUT_SEL_DDRPLLCLK;
+ break;
+ case DB5500_CLKOUT_TVCLK:
+ sel = CLKOUT_SEL_TVCLK;
+ break;
+ case DB5500_CLKOUT_IRDACLK:
+ sel = CLKOUT_SEL_IRDACLK;
+ break;
+ }
+
+ switch (clkout) {
+ case DB5500_CLKOUT0:
+ sel_mask = PRCM_CLKOCR_CLKOUT0_SEL0_MASK |
+ PRCM_CLKOCR_CLKOUT0_SEL_MASK;
+ sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT) |
+ (sel << PRCM_CLKOCR_CLKOUT0_SEL_SHIFT));
+ div_mask = PRCM_CLKODIV_CLKOUT0_DIV_MASK;
+ div_bits = div << PRCM_CLKODIV_CLKOUT0_DIV_SHIFT;
+ break;
+ case DB5500_CLKOUT1:
+ sel_mask = PRCM_CLKOCR_CLKOUT1_SEL0_MASK |
+ PRCM_CLKOCR_CLKOUT1_SEL_MASK;
+ sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT) |
+ (sel << PRCM_CLKOCR_CLKOUT1_SEL_SHIFT));
+ div_mask = PRCM_CLKODIV_CLKOUT1_DIV_MASK;
+ div_bits = div << PRCM_CLKODIV_CLKOUT1_DIV_SHIFT;
+ break;
+ }
+
+ spin_lock_irqsave(&clkout_lock, flags);
+
+ if (configured[clkout]) {
+ r = -EINVAL;
+ goto unlock_and_return;
+ }
+
+ sel_val = readl(_PRCMU_BASE + PRCM_CLKOCR);
+ writel((sel_bits | (sel_val & ~sel_mask)),
+ (_PRCMU_BASE + PRCM_CLKOCR));
+
+ div_val = readl(_PRCMU_BASE + PRCM_CLKODIV);
+ writel((div_bits | (div_val & ~div_mask)),
+ (_PRCMU_BASE + PRCM_CLKODIV));
+
+ configured[clkout] = true;
+
+unlock_and_return:
+ spin_unlock_irqrestore(&clkout_lock, flags);
+
+ return r;
+}
+
+static int request_sysclk(bool enable)
+{
+ int r;
+
+ r = 0;
+ mutex_lock(&mb3_transfer.sysclk_lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
+ cpu_relax();
+
+ if (enable)
+ mb3_transfer.req_st = SYSCLK_ON;
+ else
+ mb3_transfer.req_st = SYSCLK_OFF;
+
+ writeb(mb3_transfer.req_st, (PRCM_REQ_MB3_REFCLK_MGT));
+
+ writeb(MB3H_REFCLK_REQUEST, (PRCM_REQ_MB3_HEADER));
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ /*
+ * The firmware only sends an ACK if we want to enable the
+ * SysClk, and it succeeds.
+ */
+ if (!wait_for_completion_timeout(&mb3_transfer.sysclk_work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
+ __func__);
+ r = -EIO;
+ WARN(1, "Failed to set sysclk");
+ goto unlock_and_return;
+ }
+
+ if ((mb3_transfer.ack.header != MB3H_REFCLK_REQUEST) ||
+ (mb3_transfer.ack.status != mb3_transfer.req_st)) {
+ r = -EIO;
+ }
+
+unlock_and_return:
+ mutex_unlock(&mb3_transfer.sysclk_lock);
+
+ return r;
+}
+
+static int request_timclk(bool enable)
+{
+ u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
+
+ if (!enable)
+ val |= PRCM_TCR_STOP_TIMERS;
+ writel(val, _PRCMU_BASE + PRCM_TCR);
+
+ return 0;
+}
+
+static int request_clk(u8 clock, bool enable)
+{
+ int r = 0;
+
+ BUG_ON(clock >= DB5500_NUM_CLK_CLIENTS);
+
+ mutex_lock(&mb2_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ /* fill in mailbox */
+ writeb(clock, PRCM_REQ_MB2_CLK_CLIENT);
+ writeb(enable, PRCM_REQ_MB2_CLK_STATE);
+
+ writeb(MB2H_CLK_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: request_clk() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed in request_clk");
+ goto unlock_and_return;
+ }
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_CLK_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+ return r;
+}
+
+static int request_reg_clock(u8 clock, bool enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ WARN_ON(!clk_mgt[clock].offset);
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
+ if (enable) {
+ val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
+ } else {
+ clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
+ val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
+ }
+ writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
+
+ /* Release the HW semaphore. */
+ writel(0, _PRCMU_BASE + PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+
+ return 0;
+}
+
+/*
+ * request_pll() - Request for a pll to be enabled or disabled.
+ * @pll: The pll for which the request is made.
+ * @enable: Whether the clock should be enabled (true) or disabled (false).
+ *
+ * This function should only be used by the clock implementation.
+ * Do not use it from any other place!
+ */
+static int request_pll(u8 pll, bool enable)
+{
+ int r = 0;
+
+ BUG_ON(pll >= DB5500_NUM_PLL_ID);
+ mutex_lock(&mb2_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ mb2_transfer.req.pll_st[pll] = enable;
+
+ /* fill in mailbox */
+ writeb(pll, PRCM_REQ_MB2_PLL_CLIENT);
+ writeb(mb2_transfer.req.pll_st[pll], PRCM_REQ_MB2_PLL_STATE);
+
+ writeb(MB2H_PLL_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: set_pll() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed to set pll");
+ goto unlock_and_return;
+ }
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_PLL_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+
+ return r;
+}
+
+/**
+ * db5500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
+ * @clock: The clock for which the request is made.
+ * @enable: Whether the clock should be enabled (true) or disabled (false).
+ *
+ * This function should only be used by the clock implementation.
+ * Do not use it from any other place!
+ */
+int db5500_prcmu_request_clock(u8 clock, bool enable)
+{
+ /* MSP1 & CD clocks are handled by FW */
+ if (clock == PRCMU_MSP1CLK)
+ return request_clk(DB5500_MSP1CLK, enable);
+ else if (clock == PRCMU_CDCLK)
+ return request_clk(DB5500_CDCLK, enable);
+ else if (clock == PRCMU_IRDACLK)
+ return request_clk(DB5500_IRDACLK, enable);
+ else if (clock < PRCMU_NUM_REG_CLOCKS)
+ return request_reg_clock(clock, enable);
+ else if (clock == PRCMU_TIMCLK)
+ return request_timclk(enable);
+ else if (clock == PRCMU_PLLSOC0)
+ return request_pll(DB5500_PLL_SOC0, enable);
+ else if (clock == PRCMU_PLLSOC1)
+ return request_pll(DB5500_PLL_SOC1, enable);
+ else if (clock == PRCMU_PLLDDR)
+ return request_pll(DB5500_PLL_DDR, enable);
+ else if (clock == PRCMU_SYSCLK)
+ return request_sysclk(enable);
+ else
+ return -EINVAL;
+}
+
+/* This function should only be called while mb0_transfer.lock is held. */
+static void config_wakeups(void)
+{
+ static u32 last_dbb_events;
+ static u32 last_abb_events;
+ u32 dbb_events;
+ u32 abb_events;
+
+ dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
+
+ abb_events = mb0_transfer.req.abb_events;
+
+ if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
+ return;
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ cpu_relax();
+
+ writel(dbb_events, PRCM_REQ_MB0_WAKEUP_DBB);
+ writel(abb_events, PRCM_REQ_MB0_WAKEUP_ABB);
+ writeb(MB0H_WAKE_UP_CFG, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ last_dbb_events = dbb_events;
+ last_abb_events = abb_events;
+}
+
+int db5500_prcmu_config_esram0_deep_sleep(u8 state)
+{
+ unsigned long flags;
+
+ if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
+ (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ if (state == ESRAM0_DEEP_SLEEP_STATE_RET)
+ writeb(RET_ST, PRCM_REQ_MB0_ESRAM0_STATE);
+ else
+ writeb(OFF_ST, PRCM_REQ_MB0_ESRAM0_STATE);
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+
+ return 0;
+}
+
+int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
+{
+ int r = 0;
+ unsigned long flags;
+
+ /* Deep Idle is not supported in DB5500 */
+ BUG_ON((state < PRCMU_AP_SLEEP) || (state >= PRCMU_AP_DEEP_IDLE));
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ cpu_relax();
+
+ switch (state) {
+ case PRCMU_AP_IDLE:
+ writeb(DB5500_AP_IDLE, PRCM_REQ_MB0_AP_POWER_STATE);
+ /* TODO: Can be high latency */
+ writeb(DDR_PWR_STATE_UNCHANGED, PRCM_REQ_MB0_DDR_STATE);
+ break;
+ case PRCMU_AP_SLEEP:
+ writeb(DB5500_AP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE);
+ break;
+ case PRCMU_AP_DEEP_SLEEP:
+ writeb(DB5500_AP_DEEP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE);
+ break;
+ default:
+ r = -EINVAL;
+ goto unlock_return;
+ }
+ writeb((keep_ap_pll ? 1 : 0), PRCM_REQ_MB0_AP_PLL_STATE);
+ writeb((keep_ulp_clk ? 1 : 0), PRCM_REQ_MB0_ULP_CLOCK_STATE);
+
+ writeb(MB0H_PWR_STATE_TRANS, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+unlock_return:
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+
+ return r;
+}
+
+u8 db5500_prcmu_get_power_state_result(void)
+{
+ u8 status = readb_relaxed(PRCM_ACK_MB0_AP_PWRSTTR_STATUS);
+
+ /*
+ * Callers expect all the status values to match 8500. Adjust for
+ * PendingReq_Er (0x2b).
+ */
+ if (status == 0x2b)
+ status = PRCMU_PRCMU2ARMPENDINGIT_ER;
+
+ return status;
+}
+
+void db5500_prcmu_enable_wakeups(u32 wakeups)
+{
+ unsigned long flags;
+ u32 bits;
+ int i;
+
+ BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
+
+ for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
+ if (wakeups & BIT(i)) {
+ if (prcmu_wakeup_bit[i] == 0)
+ WARN(1, "WAKEUP NOT SUPPORTED");
+ else
+ bits |= prcmu_wakeup_bit[i];
+ }
+ }
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ mb0_transfer.req.dbb_wakeups = bits;
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+void db5500_prcmu_config_abb_event_readout(u32 abb_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ mb0_transfer.req.abb_events = abb_events;
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+void db5500_prcmu_get_abb_event_buffer(void __iomem **buf)
+{
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ *buf = (PRCM_ACK_MB0_WAKEUP_1_ABB);
+ else
+ *buf = (PRCM_ACK_MB0_WAKEUP_0_ABB);
+}
+
+/* This function should be called with lock */
+static int mailbox4_request(u8 mb4_request, u8 ack_request)
+{
+ int ret = 0;
+
+ writeb(mb4_request, PRCM_REQ_MB4_HEADER);
+ writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+
+ if (!wait_for_completion_timeout(&mb4_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: MB4 request %d failed", mb4_request);
+ ret = -EIO;
+ WARN(1, "prcmu: failed mb4 request");
+ goto failed;
+ }
+
+ if (mb4_transfer.ack.header != ack_request ||
+ mb4_transfer.ack.status != RC_SUCCESS)
+ ret = -EIO;
+failed:
+ return ret;
+}
+
+int db5500_prcmu_get_hotdog(void)
+{
+ return readw(PRCM_SHARE_INFO_HOTDOG);
+}
+
+int db5500_prcmu_config_hotdog(u8 threshold)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(threshold, PRCM_REQ_MB4_HOTDOG_THRESHOLD);
+ r = mailbox4_request(MB4H_CFG_HOTDOG, MB4H_ACK_CFG_HOTDOG);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+int db5500_prcmu_config_hotmon(u8 low, u8 high)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(low, PRCM_REQ_MB4_HOTMON_LOW);
+ writew(high, PRCM_REQ_MB4_HOTMON_HIGH);
+
+ r = mailbox4_request(MB4H_CFG_HOTMON, MB4H_ACK_CFG_HOTMON);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+static int config_hot_period(u16 val)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(val, PRCM_REQ_MB4_HOT_PERIOD);
+ r = mailbox4_request(MB4H_CFG_HOTPERIOD, MB4H_ACK_CFG_HOTPERIOD);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+/*
+ * period in milli seconds
+ */
+int db5500_prcmu_start_temp_sense(u16 period)
+{
+ if (period == 0xFFFF)
+ return -EINVAL;
+
+ return config_hot_period(period);
+}
+
+int db5500_prcmu_stop_temp_sense(void)
+{
+ return config_hot_period(0xFFFF);
+}
+
+static int prcmu_a9wdog(u8 req, u8 ack)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ r = mailbox4_request(req, ack);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+static void prcmu_a9wdog_set_interrupt(bool enable)
+{
+ if (enable) {
+ writel(PRCM_TIMER0_IRQ_RTOS1_SET,
+ (mtimer_base + PRCM_TIMER0_IRQ_EN_SET_OFFSET));
+ } else {
+ writel(PRCM_TIMER0_IRQ_RTOS1_CLR,
+ (mtimer_base + PRCM_TIMER0_IRQ_EN_CLR_OFFSET));
+ }
+}
+
+static void prcmu_a9wdog_set_timeout(u32 timeout)
+{
+ u32 comp_timeout;
+
+ comp_timeout = readl(mtimer_base + PRCM_TIMER0_RTOS_COUNTER_OFFSET) +
+ timeout;
+ writel(comp_timeout, mtimer_base + PRCM_TIMER0_RTOS_COMP1_OFFSET);
+}
+
+int db5500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ /*
+ * Sleep auto off feature is not supported. Resume and
+ * suspend will be handled by watchdog driver.
+ */
+ return 0;
+}
+
+int db5500_prcmu_enable_a9wdog(u8 id)
+{
+ int r = 0;
+
+ if (a9wdog_timer.enabled)
+ return -EPERM;
+
+ prcmu_a9wdog_set_interrupt(true);
+
+ r = prcmu_a9wdog(MB4H_CGF_A9WDOG_EN_PREBARK,
+ MB4H_ACK_CGF_A9WDOG_EN_PREBARK);
+ if (!r)
+ a9wdog_timer.enabled = true;
+ else
+ prcmu_a9wdog_set_interrupt(false);
+
+ return r;
+}
+
+int db5500_prcmu_disable_a9wdog(u8 id)
+{
+ if (!a9wdog_timer.enabled)
+ return -EPERM;
+
+ prcmu_a9wdog_set_interrupt(false);
+
+ a9wdog_timer.enabled = false;
+
+ return prcmu_a9wdog(MB4H_CGF_A9WDOG_DIS,
+ MB4H_ACK_CGF_A9WDOG_DIS);
+}
+
+int db5500_prcmu_kick_a9wdog(u8 id)
+{
+ int r = 0;
+
+ if (a9wdog_timer.enabled)
+ prcmu_a9wdog_set_timeout(a9wdog_timer.timeout);
+ else
+ r = -EPERM;
+
+ return r;
+}
+
+int db5500_prcmu_load_a9wdog(u8 id, u32 timeout)
+{
+ if (a9wdog_timer.enabled)
+ return -EPERM;
+
+ prcmu_a9wdog_set_timeout(timeout);
+ a9wdog_timer.timeout = timeout;
+
+ return 0;
+}
+
/**
* db5500_prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
@@ -170,14 +1314,14 @@ int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
writeb(reg, PRCM_REQ_MB5_I2C_REG);
writeb(size, PRCM_REQ_MB5_I2C_SIZE);
writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER);
- writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
wait_for_completion(&mb5_transfer.work);
r = 0;
@@ -211,7 +1355,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
writeb(reg, PRCM_REQ_MB5_I2C_REG);
@@ -219,7 +1363,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size);
writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER);
- writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
wait_for_completion(&mb5_transfer.work);
if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) &&
@@ -233,42 +1377,385 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
return r;
}
+/**
+ * db5500_prcmu_set_arm_opp - set the appropriate ARM OPP
+ * @opp: The new ARM operating point to which transition is to be made
+ * Returns: 0 on success, non-zero on failure
+ *
+ * This function sets the the operating point of the ARM.
+ */
+int db5500_prcmu_set_arm_opp(u8 opp)
+{
+ int r;
+ u8 db5500_opp;
+
+ r = 0;
+
+ switch (opp) {
+ case ARM_EXTCLK:
+ db5500_opp = DB5500_ARM_EXT_OPP;
+ break;
+ case ARM_50_OPP:
+ db5500_opp = DB5500_ARM_50_OPP;
+ break;
+ case ARM_100_OPP:
+ db5500_opp = DB5500_ARM_100_OPP;
+ break;
+ default:
+ pr_err("prcmu: %s() received wrong opp value: %d\n",
+ __func__, opp);
+ r = -EINVAL;
+ goto bailout;
+ }
+
+ mutex_lock(&mb1_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_ARM_OPP, PRCM_REQ_MB1_HEADER);
+
+ writeb(db5500_opp, PRCM_REQ_MB1_ARM_OPP);
+ writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ if (!wait_for_completion_timeout(&mb1_transfer.work,
+ msecs_to_jiffies(20000))) {
+ r = -EIO;
+ WARN(1, "prcmu: failed to set arm opp");
+ goto unlock_and_return;
+ }
+
+ if (mb1_transfer.ack.header != MB1H_ARM_OPP ||
+ (mb1_transfer.ack.arm_opp != db5500_opp) ||
+ (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS))
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb1_transfer.lock);
+bailout:
+ if (!r)
+ prcmu_debug_arm_opp_log(opp);
+ return r;
+}
+
+static void __init prcmu_ape_clocks_init(void)
+{
+ u8 opp = db5500_prcmu_get_ape_opp();
+ unsigned long flags;
+ int i;
+
+ WARN(opp != APE_100_OPP, "%s: Initial APE OPP (%u) not 100%%?\n",
+ __func__, opp);
+
+ for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) {
+ struct clk_mgt *clkmgt = &clk_mgt[i];
+ u32 clkval;
+ u32 div;
+
+ if (!clkmgt->scalable && !clkmgt->force50)
+ continue;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ clkval = readl(_PRCMU_BASE + clkmgt->offset);
+ div = clkval & PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ div >>= PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+
+ if (clkmgt->force50) {
+ div *= 2;
+
+ clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ clkval |= div << PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+ writel(clkval, _PRCMU_BASE + clkmgt->offset);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+ continue;
+ }
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+
+ clkmgt->div = div;
+ if (!div)
+ pr_err("%s: scalable clock at offset %#x has zero divisor\n",
+ __func__, clkmgt->offset);
+ }
+}
+
+static void prcmu_ape_clocks_scale(u8 opp)
+{
+ unsigned long irqflags;
+ unsigned int i;
+ u32 clkval;
+
+ /*
+ * Note: calling printk() under the following lock can cause lock
+ * recursion via clk_enable() for the console UART!
+ */
+ spin_lock_irqsave(&clk_mgt_lock, irqflags);
+
+ /* take a lock on HW (HWSEM)*/
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) {
+ u32 divval;
+
+ if (!clk_mgt[i].scalable)
+ continue;
+
+ clkval = readl(_PRCMU_BASE + clk_mgt[i].offset);
+ divval = clk_mgt[i].div;
+
+ pr_debug("PRCMU: reg %#x prev clk = 0x%x stored div = 0x%x\n",
+ clk_mgt[i].offset, clkval, divval);
+
+ if (opp == DB5500_APE_50_OPP)
+ divval *= 2;
+
+ clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ clkval |= divval << PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+
+ pr_debug("PRCMU: wr 0x%x in reg 0x%x\n",
+ clkval, clk_mgt[i].offset);
+
+ writel(clkval, _PRCMU_BASE + clk_mgt[i].offset);
+ }
+
+ /* release lock */
+ writel(0, (_PRCMU_BASE + PRCM_SEM));
+
+ spin_unlock_irqrestore(&clk_mgt_lock, irqflags);
+}
+/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
+static void request_even_slower_clocks(bool enable)
+{
+ void __iomem *clock_reg[] = {
+ (_PRCMU_BASE + DB5500_PRCM_ACLK_MGT),
+ (_PRCMU_BASE + DB5500_PRCM_DMACLK_MGT)
+ };
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < ARRAY_SIZE(clock_reg); i++) {
+ u32 val;
+ u32 div;
+
+ val = readl(clock_reg[i]);
+ div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
+ if (enable) {
+ if ((div <= 1) || (div > 15)) {
+ pr_err("prcmu: Bad clock divider %d in %s\n",
+ div, __func__);
+ goto unlock_and_return;
+ }
+ div <<= 1;
+ } else {
+ if (div <= 2)
+ goto unlock_and_return;
+ div >>= 1;
+ }
+ val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
+ (div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
+ writel(val, clock_reg[i]);
+ }
+
+unlock_and_return:
+ /* Release the HW semaphore. */
+ writel(0, _PRCMU_BASE + PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+int db5500_prcmu_set_ape_opp(u8 opp)
+{
+ int ret = 0;
+ u8 db5500_opp;
+ if (opp == mb1_transfer.req_ape_opp)
+ return 0;
+
+ switch (opp) {
+ case APE_100_OPP:
+ db5500_opp = DB5500_APE_100_OPP;
+ break;
+ case APE_50_OPP:
+ case APE_50_PARTLY_25_OPP:
+ db5500_opp = DB5500_APE_50_OPP;
+ break;
+ default:
+ pr_err("prcmu: %s() received wrong opp value: %d\n",
+ __func__, opp);
+ ret = -EINVAL;
+ goto bailout;
+ }
+
+ mutex_lock(&mb1_transfer.lock);
+ if (mb1_transfer.req_ape_opp == APE_50_PARTLY_25_OPP)
+ request_even_slower_clocks(false);
+ if ((opp != APE_100_OPP) && (mb1_transfer.req_ape_opp != APE_100_OPP))
+ goto skip_message;
+
+ prcmu_ape_clocks_scale(db5500_opp);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_APE_OPP, PRCM_REQ_MB1_HEADER);
+ writeb(db5500_opp, PRCM_REQ_MB1_APE_OPP);
+ writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+
+ if (!wait_for_completion_timeout(&mb1_transfer.work,
+ msecs_to_jiffies(20000))) {
+ ret = -EIO;
+ WARN(1, "prcmu: failed to set ape opp to %u", opp);
+ goto unlock_and_return;
+ }
+
+ if (mb1_transfer.ack.header != MB1H_APE_OPP ||
+ (mb1_transfer.ack.ape_opp != db5500_opp) ||
+ (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS))
+ ret = -EIO;
+
+skip_message:
+ if ((!ret && (opp == APE_50_PARTLY_25_OPP)) ||
+ (ret && (mb1_transfer.req_ape_opp == APE_50_PARTLY_25_OPP)))
+ request_even_slower_clocks(true);
+ if (!ret)
+ mb1_transfer.req_ape_opp = opp;
+unlock_and_return:
+ mutex_unlock(&mb1_transfer.lock);
+bailout:
+ return ret;
+}
+
+int db5500_prcmu_get_ape_opp(void)
+{
+ u8 opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+
+ switch (opp) {
+ case DB5500_APE_100_OPP:
+ return APE_100_OPP;
+ case DB5500_APE_50_OPP:
+ return APE_50_OPP;
+ default:
+ pr_err("prcmu: %s() read unknown opp value: %d\n",
+ __func__, opp);
+ return APE_100_OPP;
+ }
+}
+
+int db5500_prcmu_get_ddr_opp(void)
+{
+ return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+}
+
+int db5500_prcmu_set_ddr_opp(u8 opp)
+{
+ if (opp != DDR_100_OPP && opp != DDR_50_OPP)
+ return -EINVAL;
+
+ writeb(opp, _PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+
+ return 0;
+}
+
+/**
+ * db5500_prcmu_get_arm_opp - get the current ARM OPP
+ *
+ * Returns: the current ARM OPP
+ */
+int db5500_prcmu_get_arm_opp(void)
+{
+ u8 opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP);
+
+ switch (opp) {
+ case DB5500_ARM_EXT_OPP:
+ return ARM_EXTCLK;
+ case DB5500_ARM_50_OPP:
+ return ARM_50_OPP;
+ case DB5500_ARM_100_OPP:
+ return ARM_100_OPP;
+ default:
+ pr_err("prcmu: %s() read unknown opp value: %d\n",
+ __func__, opp);
+ return ARM_100_OPP;
+ }
+}
+
+int prcmu_resetout(u8 resoutn, u8 state)
+{
+ int offset;
+ int pin = -1;
+
+ offset = state > 0 ? PRCM_RESOUTN_SET_OFFSET : PRCM_RESOUTN_CLR_OFFSET;
+
+ switch (resoutn) {
+ case 0:
+ pin = PRCMU_RESOUTN0_PIN;
+ break;
+ case 1:
+ pin = PRCMU_RESOUTN1_PIN;
+ break;
+ case 2:
+ pin = PRCMU_RESOUTN2_PIN;
+ default:
+ break;
+ }
+
+ if (pin > 0)
+ writel(pin, _PRCMU_BASE + offset);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
int db5500_prcmu_enable_dsipll(void)
{
int i;
+ int ret = 0;
/* Enable DSIPLL_RESETN resets */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
+ writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
- writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
+ writel(PRCMU_UNCLAMP_DSIPLL, _PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR);
/* Set DSI PLL FREQ */
- writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
+ writel(PRCMU_PLLDSI_FREQ_SETTING, _PRCMU_BASE + PRCM_PLLDSI_FREQ);
writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
- PRCM_DSI_PLLOUT_SEL);
+ _PRCMU_BASE + PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
- writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
+ writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV);
/* Start DSI PLL */
- writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
+ writel(PRCMU_ENABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE);
/* Reset DSI PLL */
- writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
+ writel(PRCMU_DSI_RESET_SW, _PRCMU_BASE + PRCM_DSI_SW_RESET);
for (i = 0; i < 10; i++) {
- if ((readl(PRCM_PLLDSI_LOCKP) &
+ if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED)
break;
udelay(100);
}
+
+ if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
+ PRCMU_PLLDSI_LOCKP_LOCKED)
+ != PRCMU_PLLDSI_LOCKP_LOCKED)
+ ret = -EIO;
/* Release DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
- return 0;
+ writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_SET);
+ return ret;
}
int db5500_prcmu_disable_dsipll(void)
{
/* Disable dsi pll */
- writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
+ writel(PRCMU_DISABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE);
/* Disable escapeclock */
- writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
+ writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV);
return 0;
}
@@ -276,27 +1763,150 @@ int db5500_prcmu_set_display_clocks(void)
{
/* HDMI and TVCLK Should be handled somewhere else */
/* PLLDIV=8, PLLSW=2, CLKEN=1 */
- writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
+ writel(PRCMU_DSI_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_HDMICLK_MGT);
/* PLLDIV=14, PLLSW=2, CLKEN=1 */
- writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
+ writel(PRCMU_DSI_LP_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_TVCLK_MGT);
return 0;
}
+u32 db5500_prcmu_read(unsigned int reg)
+{
+ return readl_relaxed(_PRCMU_BASE + reg);
+}
+
+void db5500_prcmu_write(unsigned int reg, u32 value)
+{
+ writel_relaxed(value, _PRCMU_BASE + reg);
+}
+
+void db5500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(_PRCMU_BASE + reg);
+ val = (val & ~mask) | (value & mask);
+ writel_relaxed(val, _PRCMU_BASE + reg);
+}
+
+/**
+ * db5500_prcmu_system_reset - System reset
+ *
+ * Saves the reset reason code and then sets the APE_SOFTRST register which
+ * fires an interrupt to fw
+ */
+void db5500_prcmu_system_reset(u16 reset_code)
+{
+ writew(reset_code, PRCM_SW_RST_REASON);
+ writel(1, _PRCMU_BASE + PRCM_APE_SOFTRST);
+}
+
+/**
+ * db5500_prcmu_get_reset_code - Retrieve SW reset reason code
+ *
+ * Retrieves the reset reason code stored by prcmu_system_reset() before
+ * last restart.
+ */
+u16 db5500_prcmu_get_reset_code(void)
+{
+ return readw(PRCM_SW_RST_REASON);
+}
+
static void ack_dbb_wakeup(void)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
- writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
- writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
+ writeb(MB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
+int db5500_prcmu_set_epod(u16 epod, u8 epod_state)
+{
+ int r = 0;
+ bool ram_retention = false;
+
+ /* check argument */
+ BUG_ON(epod < DB5500_EPOD_ID_BASE);
+ BUG_ON(epod_state > EPOD_STATE_ON);
+ BUG_ON((epod - DB5500_EPOD_ID_BASE) >= DB5500_NUM_EPOD_ID);
+
+ if (epod == DB5500_EPOD_ID_ESRAM12)
+ ram_retention = true;
+
+ /* check argument */
+ BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
+
+ /* get lock */
+ mutex_lock(&mb2_transfer.lock);
+
+ /* wait for mailbox */
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ /* Retention is allowed only for ESRAM12 */
+ if (epod == DB5500_EPOD_ID_ESRAM12) {
+ switch (epod_state) {
+ case EPOD_STATE_ON:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_ON;
+ break;
+ case EPOD_STATE_OFF:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_OFF;
+ break;
+ case EPOD_STATE_RAMRET:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_RET;
+ break;
+ default:
+ r = -EINVAL;
+ goto unlock_and_return;
+ break;
+ }
+ } else {
+ if (epod_state == EPOD_STATE_ON)
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_ON;
+ else if (epod_state == EPOD_STATE_OFF)
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OFF;
+ else {
+ r = -EINVAL;
+ goto unlock_and_return;
+ }
+ }
+ /* fill in mailbox */
+ writeb((epod - DB5500_EPOD_ID_BASE), PRCM_REQ_MB2_EPOD_CLIENT);
+ writeb(mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE],
+ PRCM_REQ_MB2_EPOD_STATE);
+
+ writeb(MB2H_EPOD_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: set_epod() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed to set epod");
+ goto unlock_and_return;
+ }
+
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_EPOD_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+ return r;
+}
+
static inline void print_unknown_header_warning(u8 n, u8 header)
{
pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
@@ -306,11 +1916,27 @@ static inline void print_unknown_header_warning(u8 n, u8 header)
static bool read_mailbox_0(void)
{
bool r;
+ u32 ev;
+ unsigned int n;
+
u8 header;
header = readb(PRCM_ACK_MB0_HEADER);
switch (header) {
- case AMB0H_WAKE_UP:
+ case MB0H_WAKE_UP:
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB);
+ else
+ ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB);
+
+ ev &= mb0_transfer.req.dbb_irqs;
+
+ for (n = 0; n < NUM_DB5500_PRCMU_WAKEUPS; n++) {
+ if (ev & prcmu_irq_bit[n]) {
+ if (n != IRQ_INDEX(ABB))
+ generic_handle_irq(IRQ_DB5500_PRCMU_BASE + n);
+ }
+ }
r = true;
break;
default:
@@ -318,31 +1944,123 @@ static bool read_mailbox_0(void)
r = false;
break;
}
- writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return r;
}
static bool read_mailbox_1(void)
{
- writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
+ u8 header;
+ bool do_complete = true;
+
+ header = mb1_transfer.ack.header = readb(PRCM_ACK_MB1_HEADER);
+
+ switch (header) {
+ case MB1H_ARM_OPP:
+ mb1_transfer.ack.arm_opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP);
+ mb1_transfer.ack.arm_voltage_st =
+ readb(PRCM_ACK_MB1_ARM_VOLT_STATUS);
+ break;
+ case MB1H_APE_OPP:
+ mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+ mb1_transfer.ack.ape_voltage_st =
+ readb(PRCM_ACK_MB1_APE_VOLT_STATUS);
+ break;
+ case MB1H_ARM_APE_OPP:
+ mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+ mb1_transfer.ack.ape_voltage_st =
+ readb(PRCM_ACK_MB1_APE_VOLT_STATUS);
+ break;
+ default:
+ print_unknown_header_warning(1, header);
+ do_complete = false;
+ break;
+ }
+
+ writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+
+ if (do_complete)
+ complete(&mb1_transfer.work);
+
return false;
}
static bool read_mailbox_2(void)
{
- writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
+ u8 header;
+
+ header = readb(PRCM_ACK_MB2_HEADER);
+ mb2_transfer.ack.header = header;
+ switch (header) {
+ case MB2H_EPOD_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_EPOD_STATUS);
+ break;
+ case MB2H_CLK_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_CLK_STATUS);
+ break;
+ case MB2H_PLL_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_PLL_STATUS);
+ break;
+ default:
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ pr_err("prcmu: Wrong ACK received for MB2 request \n");
+ return false;
+ break;
+ }
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ complete(&mb2_transfer.work);
return false;
}
static bool read_mailbox_3(void)
{
- writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
+ u8 header;
+
+ header = readb(PRCM_ACK_MB3_HEADER);
+ mb3_transfer.ack.header = header;
+ switch (header) {
+ case MB3H_REFCLK_REQUEST:
+ mb3_transfer.ack.status = readb(PRCM_ACK_MB3_REFCLK_REQ);
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ complete(&mb3_transfer.sysclk_work);
+ break;
+ default:
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ pr_err("prcmu: wrong MB3 header\n");
+ break;
+ }
+
return false;
}
static bool read_mailbox_4(void)
{
- writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
+ u8 header;
+ bool do_complete = true;
+
+ header = readb(PRCM_ACK_MB4_HEADER);
+ mb4_transfer.ack.header = header;
+ switch (header) {
+ case MB4H_ACK_CFG_HOTDOG:
+ case MB4H_ACK_CFG_HOTMON:
+ case MB4H_ACK_CFG_HOTPERIOD:
+ case MB4H_ACK_CFG_MODEM_RESET:
+ case MB4H_ACK_CGF_A9WDOG_EN_PREBARK:
+ case MB4H_ACK_CGF_A9WDOG_EN_NOPREBARK:
+ case MB4H_ACK_CGF_A9WDOG_DIS:
+ mb4_transfer.ack.status = readb(PRCM_ACK_MB4_REQUESTS);
+ break;
+ default:
+ print_unknown_header_warning(4, header);
+ do_complete = false;
+ break;
+ }
+
+ writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLEAR));
+
+ if (do_complete)
+ complete(&mb4_transfer.work);
+
return false;
}
@@ -363,19 +2081,19 @@ static bool read_mailbox_5(void)
print_unknown_header_warning(5, header);
break;
}
- writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
static bool read_mailbox_6(void)
{
- writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(6), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
static bool read_mailbox_7(void)
{
- writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(7), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
@@ -396,7 +2114,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
u8 n;
irqreturn_t r;
- bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
+ bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
if (unlikely(!bits))
return IRQ_NONE;
@@ -413,39 +2131,271 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
{
+ u32 ev;
+
+ /*
+ * ABB needs to be handled before the wakeup because
+ * the ping/pong buffers for ABB events could change
+ * after we acknowledge the wakeup.
+ */
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB);
+ else
+ ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB);
+
+ ev &= mb0_transfer.req.dbb_irqs;
+ if (ev & WAKEUP_BIT_ABB)
+ handle_nested_irq(IRQ_DB5500_PRCMU_ABB);
+
ack_dbb_wakeup();
+
return IRQ_HANDLED;
}
+static void prcmu_mask_work(struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+static void prcmu_irq_mask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
+
+ mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE];
+
+ spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
+ schedule_work(&mb0_transfer.mask_work);
+}
+
+static void prcmu_irq_unmask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
+
+ mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE];
+
+ spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
+ schedule_work(&mb0_transfer.mask_work);
+}
+
+static void noop(struct irq_data *d)
+{
+}
+
+static struct irq_chip prcmu_irq_chip = {
+ .name = "prcmu",
+ .irq_disable = prcmu_irq_mask,
+ .irq_ack = noop,
+ .irq_mask = prcmu_irq_mask,
+ .irq_unmask = prcmu_irq_unmask,
+};
+
void __init db5500_prcmu_early_init(void)
{
+ unsigned int i;
+ void *tcpm_base = ioremap_nocache(U5500_PRCMU_TCPM_BASE, SZ_4K);
+
+ if (tcpm_base != NULL) {
+ int version_high, version_low;
+
+ version_high = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
+ version_low = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET + 4);
+ prcmu_version.board = (version_high >> 24) & 0xFF;
+ prcmu_version.fw_version = version_high & 0xFF;
+ prcmu_version.api_version = version_low & 0xFF;
+
+ pr_info("PRCMU Firmware Version: 0x%x\n",
+ prcmu_version.fw_version);
+ pr_info("PRCMU API Version: 0x%x\n",
+ prcmu_version.api_version);
+
+ iounmap(tcpm_base);
+ }
+
tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE);
+ mtimer_base = __io_address(U5500_MTIMER_BASE);
spin_lock_init(&mb0_transfer.lock);
+ spin_lock_init(&mb0_transfer.dbb_irqs_lock);
+ mutex_init(&mb0_transfer.ac_wake_lock);
+ mutex_init(&mb1_transfer.lock);
+ init_completion(&mb1_transfer.work);
+ mutex_init(&mb2_transfer.lock);
+ init_completion(&mb2_transfer.work);
+ mutex_init(&mb3_transfer.sysclk_lock);
+ init_completion(&mb3_transfer.sysclk_work);
+ mutex_init(&mb4_transfer.lock);
+ init_completion(&mb4_transfer.work);
mutex_init(&mb5_transfer.lock);
init_completion(&mb5_transfer.work);
+
+ INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
+
+ /* Initalize irqs. */
+ for (i = 0; i < NUM_DB5500_PRCMU_WAKEUPS; i++) {
+ unsigned int irq;
+
+ irq = IRQ_DB5500_PRCMU_BASE + i;
+ irq_set_chip_and_handler(irq, &prcmu_irq_chip,
+ handle_simple_irq);
+ if (irq == IRQ_DB5500_PRCMU_ABB)
+ irq_set_nested_thread(irq, true);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ prcmu_ape_clocks_init();
+}
+
+/*
+ * Power domain switches (ePODs) modeled as regulators for the DB5500 SoC
+ */
+static struct regulator_consumer_supply db5500_vape_consumers[] = {
+ REGULATOR_SUPPLY("v-ape", NULL),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+ REGULATOR_SUPPLY("vcore", "sdi0"),
+ REGULATOR_SUPPLY("vcore", "sdi1"),
+ REGULATOR_SUPPLY("vcore", "sdi2"),
+ REGULATOR_SUPPLY("vcore", "sdi3"),
+ REGULATOR_SUPPLY("vcore", "sdi4"),
+ REGULATOR_SUPPLY("v-uart", "uart0"),
+ REGULATOR_SUPPLY("v-uart", "uart1"),
+ REGULATOR_SUPPLY("v-uart", "uart2"),
+ REGULATOR_SUPPLY("v-uart", "uart3"),
+ REGULATOR_SUPPLY("v-ape", "db5500-keypad"),
+};
+
+static struct regulator_consumer_supply db5500_sga_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.0"),
+ REGULATOR_SUPPLY("v-mali", NULL),
+};
+
+static struct regulator_consumer_supply db5500_hva_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.1"),
+ REGULATOR_SUPPLY("v-hva", NULL),
+};
+
+static struct regulator_consumer_supply db5500_sia_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.2"),
+ REGULATOR_SUPPLY("v-sia", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply db5500_disp_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.3"),
+ REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
+ REGULATOR_SUPPLY("vsupply", "mcde"),
+};
+
+static struct regulator_consumer_supply db5500_esram12_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.4"),
+ REGULATOR_SUPPLY("v-esram12", "mcde"),
+ REGULATOR_SUPPLY("esram12", "hva"),
+};
+
+#define DB5500_REGULATOR_SWITCH(lower, upper) \
+[DB5500_REGULATOR_SWITCH_##upper] = { \
+ .constraints = { \
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \
+ }, \
+ .consumer_supplies = db5500_##lower##_consumers, \
+ .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\
}
+#define DB5500_REGULATOR_SWITCH_VAPE(lower, upper) \
+[DB5500_REGULATOR_SWITCH_##upper] = { \
+ .supply_regulator = "db5500-vape", \
+ .constraints = { \
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \
+ }, \
+ .consumer_supplies = db5500_##lower##_consumers, \
+ .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\
+} \
+
+static struct regulator_init_data db5500_regulators[DB5500_NUM_REGULATORS] = {
+ [DB5500_REGULATOR_VAPE] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = db5500_vape_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db5500_vape_consumers),
+ },
+ DB5500_REGULATOR_SWITCH_VAPE(sga, SGA),
+ DB5500_REGULATOR_SWITCH_VAPE(hva, HVA),
+ DB5500_REGULATOR_SWITCH_VAPE(sia, SIA),
+ DB5500_REGULATOR_SWITCH_VAPE(disp, DISP),
+ /*
+ * ESRAM12 is put in retention by the firmware when VAPE is
+ * turned off so there's no need to hold VAPE.
+ */
+ DB5500_REGULATOR_SWITCH(esram12, ESRAM12),
+};
+
+static struct mfd_cell db5500_prcmu_devs[] = {
+ {
+ .name = "db5500-prcmu-regulators",
+ .platform_data = &db5500_regulators,
+ .pdata_size = sizeof(db5500_regulators),
+ },
+ {
+ .name = "cpufreq-u5500",
+ },
+};
+
/**
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-int __init db5500_prcmu_init(void)
+static int __init db5500_prcmu_probe(struct platform_device *pdev)
{
- int r = 0;
+ int err = 0;
if (ux500_is_svp() || !cpu_is_u5500())
return -ENODEV;
/* Clean up the mailbox interrupts after pre-kernel code. */
- writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
+ writel(ALL_MBOX_BITS, _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
- r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
- prcmu_irq_thread_fn, 0, "prcmu", NULL);
- if (r < 0) {
+ err = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
+ prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+ if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto no_irq_return;
}
- return 0;
+
+ err = mfd_add_devices(&pdev->dev, 0, db5500_prcmu_devs,
+ ARRAY_SIZE(db5500_prcmu_devs), NULL,
+ 0);
+
+ if (err)
+ pr_err("prcmu: Failed to add subdevices\n");
+ else
+ pr_info("DB5500 PRCMU initialized\n");
+
+no_irq_return:
+ return err;
+
+}
+
+static struct platform_driver db5500_prcmu_driver = {
+ .driver = {
+ .name = "db5500-prcmu",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init db5500_prcmu_init(void)
+{
+ return platform_driver_probe(&db5500_prcmu_driver, db5500_prcmu_probe);
}
arch_initcall(db5500_prcmu_init);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index af8e0efedbe..76f2e24a94e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -34,16 +34,13 @@
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
#include <mach/id.h>
+#include <mach/prcmu-debug.h>
+
#include "dbx500-prcmu-regs.h"
/* Offset for the firmware version within the TCPM */
#define PRCMU_FW_VERSION_OFFSET 0xA4
-/* PRCMU project numbers, defined by PRCMU FW */
-#define PRCMU_PROJECT_ID_8500V1_0 1
-#define PRCMU_PROJECT_ID_8500V2_0 2
-#define PRCMU_PROJECT_ID_8400V2_0 3
-
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
@@ -137,6 +134,8 @@
#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
#define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4)
+#define PLL_SOC0_OFF 0x1
+#define PLL_SOC0_ON 0x2
#define PLL_SOC1_OFF 0x4
#define PLL_SOC1_ON 0x8
@@ -266,6 +265,11 @@
#define WAKEUP_BIT_GPIO7 BIT(30)
#define WAKEUP_BIT_GPIO8 BIT(31)
+static struct {
+ bool valid;
+ struct prcmu_fw_version version;
+} fw_info;
+
/*
* This vector maps irq numbers to the bits in the bit field used in
* communication with the PRCMU firmware.
@@ -341,11 +345,13 @@ static struct {
* mb1_transfer - state needed for mailbox 1 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
+ * @ape_opp: The current APE OPP.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
+ u8 ape_opp;
struct {
u8 header;
u8 arm_opp;
@@ -413,50 +419,102 @@ static struct {
static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
/* Spinlocks */
+static DEFINE_SPINLOCK(prcmu_lock);
static DEFINE_SPINLOCK(clkout_lock);
-static DEFINE_SPINLOCK(gpiocr_lock);
/* Global var to runtime determine TCDM base for v2 or v1 */
static __iomem void *tcdm_base;
struct clk_mgt {
- unsigned int offset;
+ void __iomem *reg;
u32 pllsw;
+ int branch;
+ bool clk38div;
+};
+
+enum {
+ PLL_RAW,
+ PLL_FIX,
+ PLL_DIV
};
static DEFINE_SPINLOCK(clk_mgt_lock);
-#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT_OFF), 0 }
+#define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \
+ { (PRCM_##_name##_MGT), 0 , _branch, _clk38div}
struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
- CLK_MGT_ENTRY(SGACLK),
- CLK_MGT_ENTRY(UARTCLK),
- CLK_MGT_ENTRY(MSP02CLK),
- CLK_MGT_ENTRY(MSP1CLK),
- CLK_MGT_ENTRY(I2CCLK),
- CLK_MGT_ENTRY(SDMMCCLK),
- CLK_MGT_ENTRY(SLIMCLK),
- CLK_MGT_ENTRY(PER1CLK),
- CLK_MGT_ENTRY(PER2CLK),
- CLK_MGT_ENTRY(PER3CLK),
- CLK_MGT_ENTRY(PER5CLK),
- CLK_MGT_ENTRY(PER6CLK),
- CLK_MGT_ENTRY(PER7CLK),
- CLK_MGT_ENTRY(LCDCLK),
- CLK_MGT_ENTRY(BMLCLK),
- CLK_MGT_ENTRY(HSITXCLK),
- CLK_MGT_ENTRY(HSIRXCLK),
- CLK_MGT_ENTRY(HDMICLK),
- CLK_MGT_ENTRY(APEATCLK),
- CLK_MGT_ENTRY(APETRACECLK),
- CLK_MGT_ENTRY(MCDECLK),
- CLK_MGT_ENTRY(IPI2CCLK),
- CLK_MGT_ENTRY(DSIALTCLK),
- CLK_MGT_ENTRY(DMACLK),
- CLK_MGT_ENTRY(B2R2CLK),
- CLK_MGT_ENTRY(TVCLK),
- CLK_MGT_ENTRY(SSPCLK),
- CLK_MGT_ENTRY(RNGCLK),
- CLK_MGT_ENTRY(UICCCLK),
+ CLK_MGT_ENTRY(SGACLK, PLL_DIV, false),
+ CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false),
+ CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false),
+ CLK_MGT_ENTRY(DMACLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true),
+ CLK_MGT_ENTRY(TVCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true),
+ CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false),
+};
+
+struct dsiclk {
+ u32 divsel_mask;
+ u32 divsel_shift;
+ u32 divsel;
+};
+
+static struct dsiclk dsiclk[2] = {
+ {
+ .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK,
+ .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT,
+ .divsel = PRCM_DSI_PLLOUT_SEL_PHI,
+ },
+ {
+ .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK,
+ .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT,
+ .divsel = PRCM_DSI_PLLOUT_SEL_PHI,
+ }
+};
+
+struct dsiescclk {
+ u32 en;
+ u32 div_mask;
+ u32 div_shift;
+};
+
+static struct dsiescclk dsiescclk[3] = {
+ {
+ .en = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN,
+ .div_mask = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK,
+ .div_shift = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT,
+ },
+ {
+ .en = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN,
+ .div_mask = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK,
+ .div_shift = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT,
+ },
+ {
+ .en = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN,
+ .div_mask = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK,
+ .div_shift = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT,
+ }
};
static struct regulator *hwacc_regulator[NUM_HW_ACC];
@@ -503,9 +561,6 @@ static const char *hwacc_ret_regulator_name[NUM_HW_ACC] = {
/* PLLDIV=12, PLLSW=4 (PLLDDR) */
#define PRCMU_DSI_CLOCK_SETTING 0x0000008C
-/* PLLDIV=8, PLLSW=4 (PLLDDR) */
-#define PRCMU_DSI_CLOCK_SETTING_U8400 0x00000088
-
/* DPI 50000000 Hz */
#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
(16 << PRCMU_CLK_PLL_DIV_SHIFT))
@@ -514,9 +569,6 @@ static const char *hwacc_ret_regulator_name[NUM_HW_ACC] = {
/* D=101, N=1, R=4, SELDIV2=0 */
#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
-/* D=70, N=1, R=3, SELDIV2=0 */
-#define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146
-
#define PRCMU_ENABLE_PLLDSI 0x00000001
#define PRCMU_DISABLE_PLLDSI 0x00000000
#define PRCMU_RELEASE_RESET_DSS 0x0000400C
@@ -528,30 +580,17 @@ static const char *hwacc_ret_regulator_name[NUM_HW_ACC] = {
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
-static struct {
- u8 project_number;
- u8 api_version;
- u8 func_version;
- u8 errata;
-} prcmu_version;
-
-
int db8500_prcmu_enable_dsipll(void)
{
int i;
- unsigned int plldsifreq;
/* Clear DSIPLL_RESETN */
writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
- if (prcmu_is_u8400())
- plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
- else
- plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
/* Set DSI PLL FREQ */
- writel(plldsifreq, PRCM_PLLDSI_FREQ);
+ writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
@@ -583,12 +622,6 @@ int db8500_prcmu_disable_dsipll(void)
int db8500_prcmu_set_display_clocks(void)
{
unsigned long flags;
- unsigned int dsiclk;
-
- if (prcmu_is_u8400())
- dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
- else
- dsiclk = PRCMU_DSI_CLOCK_SETTING;
spin_lock_irqsave(&clk_mgt_lock, flags);
@@ -596,7 +629,7 @@ int db8500_prcmu_set_display_clocks(void)
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- writel(dsiclk, PRCM_HDMICLK_MGT);
+ writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT);
@@ -608,43 +641,41 @@ int db8500_prcmu_set_display_clocks(void)
return 0;
}
-/**
- * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
- */
-void prcmu_enable_spi2(void)
+u32 db8500_prcmu_read(unsigned int reg)
+{
+ return readl(_PRCMU_BASE + reg);
+}
+
+void db8500_prcmu_write(unsigned int reg, u32 value)
{
- u32 reg;
unsigned long flags;
- spin_lock_irqsave(&gpiocr_lock, flags);
- reg = readl(PRCM_GPIOCR);
- writel(reg | PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
- spin_unlock_irqrestore(&gpiocr_lock, flags);
+ spin_lock_irqsave(&prcmu_lock, flags);
+ writel(value, (_PRCMU_BASE + reg));
+ spin_unlock_irqrestore(&prcmu_lock, flags);
}
-/**
- * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
- */
-void prcmu_disable_spi2(void)
+void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
{
- u32 reg;
+ u32 val;
unsigned long flags;
- spin_lock_irqsave(&gpiocr_lock, flags);
- reg = readl(PRCM_GPIOCR);
- writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
- spin_unlock_irqrestore(&gpiocr_lock, flags);
+ spin_lock_irqsave(&prcmu_lock, flags);
+ val = readl(_PRCMU_BASE + reg);
+ val = ((val & ~mask) | (value & mask));
+ writel(val, (_PRCMU_BASE + reg));
+ spin_unlock_irqrestore(&prcmu_lock, flags);
}
-bool prcmu_has_arm_maxopp(void)
+struct prcmu_fw_version *prcmu_get_fw_version(void)
{
- return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
- PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
+ return fw_info.valid ? &fw_info.version : NULL;
}
-bool prcmu_is_u8400(void)
+bool prcmu_has_arm_maxopp(void)
{
- return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
+ return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
+ PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
}
/**
@@ -787,6 +818,11 @@ int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
return 0;
}
+u8 db8500_prcmu_get_power_state_result(void)
+{
+ return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS);
+}
+
/* This function should only be called while mb0_transfer.lock is held. */
static void config_wakeups(void)
{
@@ -895,6 +931,8 @@ int db8500_prcmu_set_arm_opp(u8 opp)
mutex_unlock(&mb1_transfer.lock);
+ prcmu_debug_arm_opp_log(opp);
+
return r;
}
@@ -909,23 +947,23 @@ int db8500_prcmu_get_arm_opp(void)
}
/**
- * prcmu_get_ddr_opp - get the current DDR OPP
+ * db8500_prcmu_get_ddr_opp - get the current DDR OPP
*
* Returns: the current DDR OPP
*/
-int prcmu_get_ddr_opp(void)
+int db8500_prcmu_get_ddr_opp(void)
{
return readb(PRCM_DDR_SUBSYS_APE_MINBW);
}
/**
- * set_ddr_opp - set the appropriate DDR OPP
+ * db8500_set_ddr_opp - set the appropriate DDR OPP
* @opp: The new DDR operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the DDR.
*/
-int prcmu_set_ddr_opp(u8 opp)
+int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
@@ -935,25 +973,82 @@ int prcmu_set_ddr_opp(u8 opp)
return 0;
}
+
+/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
+static void request_even_slower_clocks(bool enable)
+{
+ void __iomem *clock_reg[] = {
+ PRCM_ACLK_MGT,
+ PRCM_DMACLK_MGT
+ };
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < ARRAY_SIZE(clock_reg); i++) {
+ u32 val;
+ u32 div;
+
+ val = readl(clock_reg[i]);
+ div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
+ if (enable) {
+ if ((div <= 1) || (div > 15)) {
+ pr_err("prcmu: Bad clock divider %d in %s\n",
+ div, __func__);
+ goto unlock_and_return;
+ }
+ div <<= 1;
+ } else {
+ if (div <= 2)
+ goto unlock_and_return;
+ div >>= 1;
+ }
+ val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
+ (div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
+ writel(val, clock_reg[i]);
+ }
+
+unlock_and_return:
+ /* Release the HW semaphore. */
+ writel(0, PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+
/**
- * set_ape_opp - set the appropriate APE OPP
+ * db8500_set_ape_opp - set the appropriate APE OPP
* @opp: The new APE operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the APE.
*/
-int prcmu_set_ape_opp(u8 opp)
+int db8500_prcmu_set_ape_opp(u8 opp)
{
int r = 0;
+ if (opp == mb1_transfer.ape_opp)
+ return 0;
+
mutex_lock(&mb1_transfer.lock);
+ if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)
+ request_even_slower_clocks(false);
+
+ if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP))
+ goto skip_message;
+
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
- writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
+ writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp),
+ (tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
@@ -962,17 +1057,24 @@ int prcmu_set_ape_opp(u8 opp)
(mb1_transfer.ack.ape_opp != opp))
r = -EIO;
+skip_message:
+ if ((!r && (opp == APE_50_PARTLY_25_OPP)) ||
+ (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)))
+ request_even_slower_clocks(true);
+ if (!r)
+ mb1_transfer.ape_opp = opp;
+
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
- * prcmu_get_ape_opp - get the current APE OPP
+ * db8500_prcmu_get_ape_opp - get the current APE OPP
*
* Returns: the current APE OPP
*/
-int prcmu_get_ape_opp(void)
+int db8500_prcmu_get_ape_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
}
@@ -1056,7 +1158,9 @@ static int request_pll(u8 clock, bool enable)
{
int r = 0;
- if (clock == PRCMU_PLLSOC1)
+ if (clock == PRCMU_PLLSOC0)
+ clock = (enable ? PLL_SOC0_ON : PLL_SOC0_OFF);
+ else if (clock == PRCMU_PLLSOC1)
clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF);
else
return -EINVAL;
@@ -1375,7 +1479,7 @@ static int request_timclk(bool enable)
return 0;
}
-static int request_reg_clock(u8 clock, bool enable)
+static int request_clock(u8 clock, bool enable)
{
u32 val;
unsigned long flags;
@@ -1386,14 +1490,14 @@ static int request_reg_clock(u8 clock, bool enable)
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
- val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
+ val = readl(clk_mgt[clock].reg);
if (enable) {
val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
} else {
clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
}
- writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
+ writel(val, clk_mgt[clock].reg);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
@@ -1413,7 +1517,7 @@ static int request_sga_clock(u8 clock, bool enable)
writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
}
- ret = request_reg_clock(clock, enable);
+ ret = request_clock(clock, enable);
if (!ret && !enable) {
val = readl(PRCM_CGATING_BYPASS);
@@ -1423,6 +1527,78 @@ static int request_sga_clock(u8 clock, bool enable)
return ret;
}
+static inline bool plldsi_locked(void)
+{
+ return (readl(PRCM_PLLDSI_LOCKP) &
+ (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 |
+ PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3)) ==
+ (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 |
+ PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3);
+}
+
+static int request_plldsi(bool enable)
+{
+ int r = 0;
+ u32 val;
+
+ writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP |
+ PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), (enable ?
+ PRCM_MMIP_LS_CLAMP_CLR : PRCM_MMIP_LS_CLAMP_SET));
+
+ val = readl(PRCM_PLLDSI_ENABLE);
+ if (enable)
+ val |= PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
+ else
+ val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
+ writel(val, PRCM_PLLDSI_ENABLE);
+
+ if (enable) {
+ unsigned int i;
+ bool locked = plldsi_locked();
+
+ for (i = 10; !locked && (i > 0); --i) {
+ udelay(100);
+ locked = plldsi_locked();
+ }
+ if (locked) {
+ writel(PRCM_APE_RESETN_DSIPLL_RESETN,
+ PRCM_APE_RESETN_SET);
+ } else {
+ writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP |
+ PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI),
+ PRCM_MMIP_LS_CLAMP_SET);
+ val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
+ writel(val, PRCM_PLLDSI_ENABLE);
+ r = -EAGAIN;
+ }
+ } else {
+ writel(PRCM_APE_RESETN_DSIPLL_RESETN, PRCM_APE_RESETN_CLR);
+ }
+ return r;
+}
+
+static int request_dsiclk(u8 n, bool enable)
+{
+ u32 val;
+
+ val = readl(PRCM_DSI_PLLOUT_SEL);
+ val &= ~dsiclk[n].divsel_mask;
+ val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) <<
+ dsiclk[n].divsel_shift);
+ writel(val, PRCM_DSI_PLLOUT_SEL);
+ return 0;
+}
+
+static int request_dsiescclk(u8 n, bool enable)
+{
+ u32 val;
+
+ val = readl(PRCM_DSITVCLK_DIV);
+ enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en);
+ writel(val, PRCM_DSITVCLK_DIV);
+ return 0;
+}
+
/**
* db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
* @clock: The clock for which the request is made.
@@ -1440,16 +1616,434 @@ int db8500_prcmu_request_clock(u8 clock, bool enable)
return request_timclk(enable);
case PRCMU_SYSCLK:
return request_sysclk(enable);
+ case PRCMU_PLLDSI:
+ return request_plldsi(enable);
+ case PRCMU_PLLSOC0:
case PRCMU_PLLSOC1:
return request_pll(clock, enable);
default:
break;
}
if (clock < PRCMU_NUM_REG_CLOCKS)
- return request_reg_clock(clock, enable);
+ return request_clock(clock, enable);
+ else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
+ return request_dsiclk((clock - PRCMU_DSI0CLK), enable);
+ else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
+ return request_dsiescclk((clock - PRCMU_DSI0ESCCLK), enable);
return -EINVAL;
}
+static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate,
+ int branch)
+{
+ u64 rate;
+ u32 val;
+ u32 d;
+ u32 div = 1;
+
+ val = readl(reg);
+
+ rate = src_rate;
+ rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT);
+
+ d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT);
+ if (d > 1)
+ div *= d;
+
+ d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT);
+ if (d > 1)
+ div *= d;
+
+ if (val & PRCM_PLL_FREQ_SELDIV2)
+ div *= 2;
+
+ if ((branch == PLL_FIX) || ((branch == PLL_DIV) &&
+ (val & PRCM_PLL_FREQ_DIV2EN) &&
+ ((reg == PRCM_PLLSOC0_FREQ) ||
+ (reg == PRCM_PLLDDR_FREQ))))
+ div *= 2;
+
+ (void)do_div(rate, div);
+
+ return (unsigned long)rate;
+}
+
+#define ROOT_CLOCK_RATE 38400000
+
+static unsigned long clock_rate(u8 clock)
+{
+ u32 val;
+ u32 pllsw;
+ unsigned long rate = ROOT_CLOCK_RATE;
+
+ val = readl(clk_mgt[clock].reg);
+
+ if (val & PRCM_CLK_MGT_CLK38) {
+ if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV))
+ rate /= 2;
+ return rate;
+ }
+
+ val |= clk_mgt[clock].pllsw;
+ pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
+
+ if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0)
+ rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch);
+ else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1)
+ rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch);
+ else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR)
+ rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch);
+ else
+ return 0;
+
+ if ((clock == PRCMU_SGACLK) &&
+ (val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) {
+ u64 r = (rate * 10);
+
+ (void)do_div(r, 25);
+ return (unsigned long)r;
+ }
+ val &= PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ if (val)
+ return rate / val;
+ else
+ return 0;
+}
+
+static unsigned long dsiclk_rate(u8 n)
+{
+ u32 divsel;
+ u32 div = 1;
+
+ divsel = readl(PRCM_DSI_PLLOUT_SEL);
+ divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift);
+
+ if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
+ divsel = dsiclk[n].divsel;
+
+ switch (divsel) {
+ case PRCM_DSI_PLLOUT_SEL_PHI_4:
+ div *= 2;
+ case PRCM_DSI_PLLOUT_SEL_PHI_2:
+ div *= 2;
+ case PRCM_DSI_PLLOUT_SEL_PHI:
+ return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
+ PLL_RAW) / div;
+ default:
+ return 0;
+ }
+}
+
+static unsigned long dsiescclk_rate(u8 n)
+{
+ u32 div;
+
+ div = readl(PRCM_DSITVCLK_DIV);
+ div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift));
+ return clock_rate(PRCMU_TVCLK) / max((u32)1, div);
+}
+
+unsigned long prcmu_clock_rate(u8 clock)
+{
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ return clock_rate(clock);
+ else if (clock == PRCMU_TIMCLK)
+ return ROOT_CLOCK_RATE / 16;
+ else if (clock == PRCMU_SYSCLK)
+ return ROOT_CLOCK_RATE;
+ else if (clock == PRCMU_PLLSOC0)
+ return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
+ else if (clock == PRCMU_PLLSOC1)
+ return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
+ else if (clock == PRCMU_PLLDDR)
+ return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
+ else if (clock == PRCMU_PLLDSI)
+ return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
+ PLL_RAW);
+ else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
+ return dsiclk_rate(clock - PRCMU_DSI0CLK);
+ else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
+ return dsiescclk_rate(clock - PRCMU_DSI0ESCCLK);
+ else
+ return 0;
+}
+
+static unsigned long clock_source_rate(u32 clk_mgt_val, int branch)
+{
+ if (clk_mgt_val & PRCM_CLK_MGT_CLK38)
+ return ROOT_CLOCK_RATE;
+ clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK;
+ if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0)
+ return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch);
+ else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1)
+ return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch);
+ else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR)
+ return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch);
+ else
+ return 0;
+}
+
+static u32 clock_divider(unsigned long src_rate, unsigned long rate)
+{
+ u32 div;
+
+ div = (src_rate / rate);
+ if (div == 0)
+ return 1;
+ if (rate < (src_rate / div))
+ div++;
+ return div;
+}
+
+static long round_clock_rate(u8 clock, unsigned long rate)
+{
+ u32 val;
+ u32 div;
+ unsigned long src_rate;
+ long rounded_rate;
+
+ val = readl(clk_mgt[clock].reg);
+ src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
+ clk_mgt[clock].branch);
+ div = clock_divider(src_rate, rate);
+ if (val & PRCM_CLK_MGT_CLK38) {
+ if (clk_mgt[clock].clk38div) {
+ if (div > 2)
+ div = 2;
+ } else {
+ div = 1;
+ }
+ } else if ((clock == PRCMU_SGACLK) && (div == 3)) {
+ u64 r = (src_rate * 10);
+
+ (void)do_div(r, 25);
+ if (r <= rate)
+ return (unsigned long)r;
+ }
+ rounded_rate = (src_rate / min(div, (u32)31));
+
+ return rounded_rate;
+}
+
+#define MIN_PLL_VCO_RATE 600000000ULL
+#define MAX_PLL_VCO_RATE 1680640000ULL
+
+static long round_plldsi_rate(unsigned long rate)
+{
+ long rounded_rate = 0;
+ unsigned long src_rate;
+ unsigned long rem;
+ u32 r;
+
+ src_rate = clock_rate(PRCMU_HDMICLK);
+ rem = rate;
+
+ for (r = 7; (rem > 0) && (r > 0); r--) {
+ u64 d;
+
+ d = (r * rate);
+ (void)do_div(d, src_rate);
+ if (d < 6)
+ d = 6;
+ else if (d > 255)
+ d = 255;
+ d *= src_rate;
+ if (((2 * d) < (r * MIN_PLL_VCO_RATE)) ||
+ ((r * MAX_PLL_VCO_RATE) < (2 * d)))
+ continue;
+ (void)do_div(d, r);
+ if (rate < d) {
+ if (rounded_rate == 0)
+ rounded_rate = (long)d;
+ break;
+ }
+ if ((rate - d) < rem) {
+ rem = (rate - d);
+ rounded_rate = (long)d;
+ }
+ }
+ return rounded_rate;
+}
+
+static long round_dsiclk_rate(unsigned long rate)
+{
+ u32 div;
+ unsigned long src_rate;
+ long rounded_rate;
+
+ src_rate = pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
+ PLL_RAW);
+ div = clock_divider(src_rate, rate);
+ rounded_rate = (src_rate / ((div > 2) ? 4 : div));
+
+ return rounded_rate;
+}
+
+static long round_dsiescclk_rate(unsigned long rate)
+{
+ u32 div;
+ unsigned long src_rate;
+ long rounded_rate;
+
+ src_rate = clock_rate(PRCMU_TVCLK);
+ div = clock_divider(src_rate, rate);
+ rounded_rate = (src_rate / min(div, (u32)255));
+
+ return rounded_rate;
+}
+
+long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ return round_clock_rate(clock, rate);
+ else if (clock == PRCMU_PLLDSI)
+ return round_plldsi_rate(rate);
+ else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
+ return round_dsiclk_rate(rate);
+ else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
+ return round_dsiescclk_rate(rate);
+ else
+ return (long)prcmu_clock_rate(clock);
+}
+
+static void set_clock_rate(u8 clock, unsigned long rate)
+{
+ u32 val;
+ u32 div;
+ unsigned long src_rate;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ val = readl(clk_mgt[clock].reg);
+ src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
+ clk_mgt[clock].branch);
+ div = clock_divider(src_rate, rate);
+ if (val & PRCM_CLK_MGT_CLK38) {
+ if (clk_mgt[clock].clk38div) {
+ if (div > 1)
+ val |= PRCM_CLK_MGT_CLK38DIV;
+ else
+ val &= ~PRCM_CLK_MGT_CLK38DIV;
+ }
+ } else if (clock == PRCMU_SGACLK) {
+ val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK |
+ PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN);
+ if (div == 3) {
+ u64 r = (src_rate * 10);
+
+ (void)do_div(r, 25);
+ if (r <= rate) {
+ val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN;
+ div = 0;
+ }
+ }
+ val |= min(div, (u32)31);
+ } else {
+ val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ val |= min(div, (u32)31);
+ }
+ writel(val, clk_mgt[clock].reg);
+
+ /* Release the HW semaphore. */
+ writel(0, PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+
+static int set_plldsi_rate(unsigned long rate)
+{
+ unsigned long src_rate;
+ unsigned long rem;
+ u32 pll_freq = 0;
+ u32 r;
+
+ src_rate = clock_rate(PRCMU_HDMICLK);
+ rem = rate;
+
+ for (r = 7; (rem > 0) && (r > 0); r--) {
+ u64 d;
+ u64 hwrate;
+
+ d = (r * rate);
+ (void)do_div(d, src_rate);
+ if (d < 6)
+ d = 6;
+ else if (d > 255)
+ d = 255;
+ hwrate = (d * src_rate);
+ if (((2 * hwrate) < (r * MIN_PLL_VCO_RATE)) ||
+ ((r * MAX_PLL_VCO_RATE) < (2 * hwrate)))
+ continue;
+ (void)do_div(hwrate, r);
+ if (rate < hwrate) {
+ if (pll_freq == 0)
+ pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) |
+ (r << PRCM_PLL_FREQ_R_SHIFT));
+ break;
+ }
+ if ((rate - hwrate) < rem) {
+ rem = (rate - hwrate);
+ pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) |
+ (r << PRCM_PLL_FREQ_R_SHIFT));
+ }
+ }
+ if (pll_freq == 0)
+ return -EINVAL;
+
+ pll_freq |= (1 << PRCM_PLL_FREQ_N_SHIFT);
+ writel(pll_freq, PRCM_PLLDSI_FREQ);
+
+ return 0;
+}
+
+static void set_dsiclk_rate(u8 n, unsigned long rate)
+{
+ u32 val;
+ u32 div;
+
+ div = clock_divider(pll_rate(PRCM_PLLDSI_FREQ,
+ clock_rate(PRCMU_HDMICLK), PLL_RAW), rate);
+
+ dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI :
+ (div == 2) ? PRCM_DSI_PLLOUT_SEL_PHI_2 :
+ /* else */ PRCM_DSI_PLLOUT_SEL_PHI_4;
+
+ val = readl(PRCM_DSI_PLLOUT_SEL);
+ val &= ~dsiclk[n].divsel_mask;
+ val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift);
+ writel(val, PRCM_DSI_PLLOUT_SEL);
+}
+
+static void set_dsiescclk_rate(u8 n, unsigned long rate)
+{
+ u32 val;
+ u32 div;
+
+ div = clock_divider(clock_rate(PRCMU_TVCLK), rate);
+ val = readl(PRCM_DSITVCLK_DIV);
+ val &= ~dsiescclk[n].div_mask;
+ val |= (min(div, (u32)255) << dsiescclk[n].div_shift);
+ writel(val, PRCM_DSITVCLK_DIV);
+}
+
+int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ if (clock < PRCMU_NUM_REG_CLOCKS)
+ set_clock_rate(clock, rate);
+ else if (clock == PRCMU_PLLDSI)
+ return set_plldsi_rate(rate);
+ else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
+ set_dsiclk_rate((clock - PRCMU_DSI0CLK), rate);
+ else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
+ set_dsiescclk_rate((clock - PRCMU_DSI0ESCCLK), rate);
+ return 0;
+}
+
int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
@@ -1476,7 +2070,7 @@ int db8500_prcmu_config_esram0_deep_sleep(u8 state)
return 0;
}
-int prcmu_config_hotdog(u8 threshold)
+int db8500_prcmu_config_hotdog(u8 threshold)
{
mutex_lock(&mb4_transfer.lock);
@@ -1494,7 +2088,7 @@ int prcmu_config_hotdog(u8 threshold)
return 0;
}
-int prcmu_config_hotmon(u8 low, u8 high)
+int db8500_prcmu_config_hotmon(u8 low, u8 high)
{
mutex_lock(&mb4_transfer.lock);
@@ -1533,7 +2127,7 @@ static int config_hot_period(u16 val)
return 0;
}
-int prcmu_start_temp_sense(u16 cycles32k)
+int db8500_prcmu_start_temp_sense(u16 cycles32k)
{
if (cycles32k == 0xFFFF)
return -EINVAL;
@@ -1541,7 +2135,7 @@ int prcmu_start_temp_sense(u16 cycles32k)
return config_hot_period(cycles32k);
}
-int prcmu_stop_temp_sense(void)
+int db8500_prcmu_stop_temp_sense(void)
{
return config_hot_period(0xFFFF);
}
@@ -1570,7 +2164,7 @@ static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
}
-int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
{
BUG_ON(num == 0 || num > 0xf);
return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0,
@@ -1578,17 +2172,17 @@ int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
A9WDOG_AUTO_OFF_DIS);
}
-int prcmu_enable_a9wdog(u8 id)
+int db8500_prcmu_enable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
}
-int prcmu_disable_a9wdog(u8 id)
+int db8500_prcmu_disable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
}
-int prcmu_kick_a9wdog(u8 id)
+int db8500_prcmu_kick_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
}
@@ -1596,16 +2190,8 @@ int prcmu_kick_a9wdog(u8 id)
/*
* timeout is 28 bit, in ms.
*/
-#define MAX_WATCHDOG_TIMEOUT 131000
-int prcmu_load_a9wdog(u8 id, u32 timeout)
+int db8500_prcmu_load_a9wdog(u8 id, u32 timeout)
{
- if (timeout > MAX_WATCHDOG_TIMEOUT)
- /*
- * Due to calculation bug in prcmu fw, timeouts
- * can't be bigger than 131 seconds.
- */
- return -EINVAL;
-
return prcmu_a9wdog(MB4H_A9WDOG_LOAD,
(id & A9WDOG_ID_MASK) |
/*
@@ -1619,41 +2205,6 @@ int prcmu_load_a9wdog(u8 id, u32 timeout)
}
/**
- * prcmu_set_clock_divider() - Configure the clock divider.
- * @clock: The clock for which the request is made.
- * @divider: The clock divider. (< 32)
- *
- * This function should only be used by the clock implementation.
- * Do not use it from any other place!
- */
-int prcmu_set_clock_divider(u8 clock, u8 divider)
-{
- u32 val;
- unsigned long flags;
-
- if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
- return -EINVAL;
-
- spin_lock_irqsave(&clk_mgt_lock, flags);
-
- /* Grab the HW semaphore. */
- while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
- cpu_relax();
-
- val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
- val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
- val |= (u32)divider;
- writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
-
- /* Release the HW semaphore. */
- writel(0, PRCM_SEM);
-
- spin_unlock_irqrestore(&clk_mgt_lock, flags);
-
- return 0;
-}
-
-/**
* prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
@@ -1850,9 +2401,9 @@ u16 db8500_prcmu_get_reset_code(void)
}
/**
- * prcmu_reset_modem - ask the PRCMU to reset modem
+ * db8500_prcmu_reset_modem - ask the PRCMU to reset modem
*/
-void prcmu_modem_reset(void)
+void db8500_prcmu_modem_reset(void)
{
mutex_lock(&mb1_transfer.lock);
@@ -2099,6 +2650,22 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
+static char *fw_project_name(u8 project)
+{
+ switch (project) {
+ case PRCMU_FW_PROJECT_U8500:
+ return "U8500";
+ case PRCMU_FW_PROJECT_U8500_C2:
+ return "U8500 C2";
+ case PRCMU_FW_PROJECT_U9500:
+ return "U9500";
+ case PRCMU_FW_PROJECT_U9500_C2:
+ return "U9500 C2";
+ default:
+ return "Unknown";
+ }
+}
+
void __init db8500_prcmu_early_init(void)
{
unsigned int i;
@@ -2108,11 +2675,13 @@ void __init db8500_prcmu_early_init(void)
if (tcpm_base != NULL) {
u32 version;
version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
- prcmu_version.project_number = version & 0xFF;
- prcmu_version.api_version = (version >> 8) & 0xFF;
- prcmu_version.func_version = (version >> 16) & 0xFF;
- prcmu_version.errata = (version >> 24) & 0xFF;
- pr_info("PRCMU firmware version %d.%d.%d\n",
+ fw_info.version.project = version & 0xFF;
+ fw_info.version.api_version = (version >> 8) & 0xFF;
+ fw_info.version.func_version = (version >> 16) & 0xFF;
+ fw_info.version.errata = (version >> 24) & 0xFF;
+ fw_info.valid = true;
+ pr_info("PRCMU firmware: %s, version %d.%d.%d\n",
+ fw_project_name(fw_info.version.project),
(version >> 8) & 0xFF, (version >> 16) & 0xFF,
(version >> 24) & 0xFF);
iounmap(tcpm_base);
@@ -2130,6 +2699,7 @@ void __init db8500_prcmu_early_init(void)
init_completion(&mb0_transfer.ac_wake_work);
mutex_init(&mb1_transfer.lock);
init_completion(&mb1_transfer.work);
+ mb1_transfer.ape_opp = APE_NO_CHANGE;
mutex_init(&mb2_transfer.lock);
init_completion(&mb2_transfer.work);
spin_lock_init(&mb2_transfer.auto_pm_lock);
@@ -2181,24 +2751,22 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("vcore", "sdi4"),
REGULATOR_SUPPLY("v-dma", "dma40.0"),
REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
- /* "v-uart" changed to "vcore" in the mainline kernel */
- REGULATOR_SUPPLY("vcore", "uart0"),
- REGULATOR_SUPPLY("vcore", "uart1"),
- REGULATOR_SUPPLY("vcore", "uart2"),
+ REGULATOR_SUPPLY("v-uart", "uart0"),
+ REGULATOR_SUPPLY("v-uart", "uart1"),
+ REGULATOR_SUPPLY("v-uart", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
+ REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+ REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
- /* CG2900 and CW1200 power to off-chip peripherals */
- REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
- REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
/* AV8100 regulator */
REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
};
static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
- REGULATOR_SUPPLY("vsupply", "b2r2.0"),
+ REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
REGULATOR_SUPPLY("vsupply", "mcde"),
};
@@ -2235,6 +2803,7 @@ static struct regulator_consumer_supply db8500_esram12_consumers[] = {
static struct regulator_consumer_supply db8500_esram34_consumers[] = {
REGULATOR_SUPPLY("v-esram34", "mcde"),
REGULATOR_SUPPLY("esram34", "cm_control"),
+ REGULATOR_SUPPLY("lcla_esram", "dma40.0"),
};
static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
@@ -2291,7 +2860,7 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
},
},
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
- .supply_regulator = "db8500-vape",
+ /* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sva-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -2307,7 +2876,7 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
},
},
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
- .supply_regulator = "db8500-vape",
+ /* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sva-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -2316,7 +2885,7 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
- .supply_regulator = "db8500-vape",
+ /* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sia-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -2331,7 +2900,7 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
},
},
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
- .supply_regulator = "db8500-vape",
+ /* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sia-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -2359,7 +2928,10 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
- .supply_regulator = "db8500-vape",
+ /*
+ * esram12 is set in retention and supplied by Vsafe when Vape is off,
+ * no need to hold Vape
+ */
.constraints = {
.name = "db8500-esram12",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -2374,7 +2946,10 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
},
},
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
- .supply_regulator = "db8500-vape",
+ /*
+ * esram34 is set in retention and supplied by Vsafe when Vape is off,
+ * no need to hold Vape
+ */
.constraints = {
.name = "db8500-esram34",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index ec22e9f15d3..4c5c2478c05 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -17,41 +17,41 @@
#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
-#define PRCM_SVACLK_MGT_OFF 0x008
-#define PRCM_SIACLK_MGT_OFF 0x00C
-#define PRCM_SGACLK_MGT_OFF 0x014
-#define PRCM_UARTCLK_MGT_OFF 0x018
-#define PRCM_MSP02CLK_MGT_OFF 0x01C
-#define PRCM_I2CCLK_MGT_OFF 0x020
-#define PRCM_SDMMCCLK_MGT_OFF 0x024
-#define PRCM_SLIMCLK_MGT_OFF 0x028
-#define PRCM_PER1CLK_MGT_OFF 0x02C
-#define PRCM_PER2CLK_MGT_OFF 0x030
-#define PRCM_PER3CLK_MGT_OFF 0x034
-#define PRCM_PER5CLK_MGT_OFF 0x038
-#define PRCM_PER6CLK_MGT_OFF 0x03C
-#define PRCM_PER7CLK_MGT_OFF 0x040
-#define PRCM_PWMCLK_MGT_OFF 0x044 /* for DB5500 */
-#define PRCM_IRDACLK_MGT_OFF 0x048 /* for DB5500 */
-#define PRCM_IRRCCLK_MGT_OFF 0x04C /* for DB5500 */
-#define PRCM_LCDCLK_MGT_OFF 0x044
-#define PRCM_BMLCLK_MGT_OFF 0x04C
-#define PRCM_HSITXCLK_MGT_OFF 0x050
-#define PRCM_HSIRXCLK_MGT_OFF 0x054
-#define PRCM_HDMICLK_MGT_OFF 0x058
-#define PRCM_APEATCLK_MGT_OFF 0x05C
-#define PRCM_APETRACECLK_MGT_OFF 0x060
-#define PRCM_MCDECLK_MGT_OFF 0x064
-#define PRCM_IPI2CCLK_MGT_OFF 0x068
-#define PRCM_DSIALTCLK_MGT_OFF 0x06C
-#define PRCM_DMACLK_MGT_OFF 0x074
-#define PRCM_B2R2CLK_MGT_OFF 0x078
-#define PRCM_TVCLK_MGT_OFF 0x07C
-#define PRCM_UNIPROCLK_MGT_OFF 0x278
-#define PRCM_SSPCLK_MGT_OFF 0x280
-#define PRCM_RNGCLK_MGT_OFF 0x284
-#define PRCM_UICCCLK_MGT_OFF 0x27C
-#define PRCM_MSP1CLK_MGT_OFF 0x288
+#define PRCM_CLK_MGT(_offset) (void __iomem *)(IO_ADDRESS(U8500_PRCMU_BASE) \
+ + _offset)
+#define PRCM_ACLK_MGT PRCM_CLK_MGT(0x004)
+#define PRCM_SVACLK_MGT PRCM_CLK_MGT(0x008)
+#define PRCM_SIACLK_MGT PRCM_CLK_MGT(0x00C)
+#define PRCM_SGACLK_MGT PRCM_CLK_MGT(0x014)
+#define PRCM_UARTCLK_MGT PRCM_CLK_MGT(0x018)
+#define PRCM_MSP02CLK_MGT PRCM_CLK_MGT(0x01C)
+#define PRCM_I2CCLK_MGT PRCM_CLK_MGT(0x020)
+#define PRCM_SDMMCCLK_MGT PRCM_CLK_MGT(0x024)
+#define PRCM_SLIMCLK_MGT PRCM_CLK_MGT(0x028)
+#define PRCM_PER1CLK_MGT PRCM_CLK_MGT(0x02C)
+#define PRCM_PER2CLK_MGT PRCM_CLK_MGT(0x030)
+#define PRCM_PER3CLK_MGT PRCM_CLK_MGT(0x034)
+#define PRCM_PER5CLK_MGT PRCM_CLK_MGT(0x038)
+#define PRCM_PER6CLK_MGT PRCM_CLK_MGT(0x03C)
+#define PRCM_PER7CLK_MGT PRCM_CLK_MGT(0x040)
+#define PRCM_LCDCLK_MGT PRCM_CLK_MGT(0x044)
+#define PRCM_BMLCLK_MGT PRCM_CLK_MGT(0x04C)
+#define PRCM_HSITXCLK_MGT PRCM_CLK_MGT(0x050)
+#define PRCM_HSIRXCLK_MGT PRCM_CLK_MGT(0x054)
+#define PRCM_HDMICLK_MGT PRCM_CLK_MGT(0x058)
+#define PRCM_APEATCLK_MGT PRCM_CLK_MGT(0x05C)
+#define PRCM_APETRACECLK_MGT PRCM_CLK_MGT(0x060)
+#define PRCM_MCDECLK_MGT PRCM_CLK_MGT(0x064)
+#define PRCM_IPI2CCLK_MGT PRCM_CLK_MGT(0x068)
+#define PRCM_DSIALTCLK_MGT PRCM_CLK_MGT(0x06C)
+#define PRCM_DMACLK_MGT PRCM_CLK_MGT(0x074)
+#define PRCM_B2R2CLK_MGT PRCM_CLK_MGT(0x078)
+#define PRCM_TVCLK_MGT PRCM_CLK_MGT(0x07C)
+#define PRCM_UNIPROCLK_MGT PRCM_CLK_MGT(0x278)
+#define PRCM_SSPCLK_MGT PRCM_CLK_MGT(0x280)
+#define PRCM_RNGCLK_MGT PRCM_CLK_MGT(0x284)
+#define PRCM_UICCCLK_MGT PRCM_CLK_MGT(0x27C)
+#define PRCM_MSP1CLK_MGT PRCM_CLK_MGT(0x288)
#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f
@@ -131,20 +131,58 @@
#define PRCM_MMIP_LS_CLAMP_SET (_PRCMU_BASE + 0x420)
#define PRCM_MMIP_LS_CLAMP_CLR (_PRCMU_BASE + 0x424)
+#define PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP BIT(11)
+#define PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI BIT(22)
+
/* PRCMU clock/PLL/reset registers */
+#define PRCM_PLLSOC0_FREQ (_PRCMU_BASE + 0x080)
+#define PRCM_PLLSOC1_FREQ (_PRCMU_BASE + 0x084)
+#define PRCM_PLLDDR_FREQ (_PRCMU_BASE + 0x08C)
+#define PRCM_PLL_FREQ_D_SHIFT 0
+#define PRCM_PLL_FREQ_D_MASK BITS(0, 7)
+#define PRCM_PLL_FREQ_N_SHIFT 8
+#define PRCM_PLL_FREQ_N_MASK BITS(8, 13)
+#define PRCM_PLL_FREQ_R_SHIFT 16
+#define PRCM_PLL_FREQ_R_MASK BITS(16, 18)
+#define PRCM_PLL_FREQ_SELDIV2 BIT(24)
+#define PRCM_PLL_FREQ_DIV2EN BIT(25)
+
#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
-#define PRCM_LCDCLK_MGT (_PRCMU_BASE + PRCM_LCDCLK_MGT_OFF)
-#define PRCM_MCDECLK_MGT (_PRCMU_BASE + PRCM_MCDECLK_MGT_OFF)
-#define PRCM_HDMICLK_MGT (_PRCMU_BASE + PRCM_HDMICLK_MGT_OFF)
-#define PRCM_TVCLK_MGT (_PRCMU_BASE + PRCM_TVCLK_MGT_OFF)
#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
+#define PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE BIT(0)
+
+#define PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 BIT(0)
+#define PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3 BIT(1)
+
+#define PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT 0
+#define PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK BITS(0, 2)
+#define PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT 8
+#define PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK BITS(8, 10)
+
+#define PRCM_DSI_PLLOUT_SEL_OFF 0
+#define PRCM_DSI_PLLOUT_SEL_PHI 1
+#define PRCM_DSI_PLLOUT_SEL_PHI_2 2
+#define PRCM_DSI_PLLOUT_SEL_PHI_4 3
+
+#define PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT 0
+#define PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK BITS(0, 7)
+#define PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT 8
+#define PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK BITS(8, 15)
+#define PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT 16
+#define PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK BITS(16, 23)
+#define PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN BIT(24)
+#define PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN BIT(25)
+#define PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN BIT(26)
+
+#define PRCM_APE_RESETN_DSIPLL_RESETN BIT(14)
+
#define PRCM_CLKOCR (_PRCMU_BASE + 0x1CC)
#define PRCM_CLKOCR_CLKOUT0_REF_CLK (1 << 0)
#define PRCM_CLKOCR_CLKOUT0_MASK BITS(0, 13)
@@ -183,12 +221,21 @@
#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24)
#define PRCM_CLKOCR_CLK1TYPE BIT(28)
-#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
-#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
-#define PRCM_CLK_MGT_CLKEN BIT(8)
+#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
+#define PRCM_CLK_MGT_CLKPLLSW_SOC0 BIT(5)
+#define PRCM_CLK_MGT_CLKPLLSW_SOC1 BIT(6)
+#define PRCM_CLK_MGT_CLKPLLSW_DDR BIT(7)
+#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
+#define PRCM_CLK_MGT_CLKEN BIT(8)
+#define PRCM_CLK_MGT_CLK38 BIT(9)
+#define PRCM_CLK_MGT_CLK38DIV BIT(11)
+#define PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN BIT(12)
/* GPIOCR register */
#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
+#define PRCM_GPIOCR_DBG_STM_MOD_SELECT BIT(11)
+#define PRCM_GPIOCR_DBG_STM_APE_SELECT BIT(9)
+#define PRCM_GPIOCR_DBG_UARTMOD_SELECT BIT(0)
#define PRCM_DDR_SUBSYS_APE_MINBW (_PRCMU_BASE + 0x438)
#define PRCM_CGATING_BYPASS (_PRCMU_BASE + 0x134)
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e07947e56b2..71b30b93f17 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -739,7 +739,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
-
+back:
for (i = 0; i < num; i++) {
int bank = num - i - 1;
u8 status = isr[i];
@@ -761,6 +761,22 @@ static irqreturn_t stmpe_irq(int irq, void *data)
stmpe_reg_write(stmpe, israddr + i, clear);
}
+ /*
+ It may happen that on the first status read interrupt
+ sources may not showup, so read one more time.
+ */
+ ret = stmpe_block_read(stmpe, israddr, num, isr);
+ if (ret >= 0) {
+ for (i = 0; i < num; i++) {
+ int bank = num - i - 1;
+ u8 status = isr[i];
+
+ status &= stmpe->ier[bank];
+ if (status)
+ goto back;
+ }
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 00000000000..91211f29623
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tc35892.h>
+
+#define TC35892_CLKMODE_MODCTL_SLEEP 0x0
+#define TC35892_CLKMODE_MODCTL_OPERATION (1 << 0)
+
+/**
+ * tc35892_reg_read() - read a single TC35892 register
+ * @tc35892: Device to read from
+ * @reg: Register to read
+ */
+int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_read);
+
+/**
+ * tc35892_reg_read() - write a single TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to read
+ * @data: Value to write
+ */
+int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_write);
+
+/**
+ * tc35892_block_read() - read multiple TC35892 registers
+ * @tc35892: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_read);
+
+/**
+ * tc35892_block_write() - write multiple TC35892 registers
+ * @tc35892: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_write);
+
+/**
+ * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @values: Value to set
+ */
+int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&tc35892->lock);
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = tc35892_reg_write(tc35892, reg, ret);
+
+out:
+ mutex_unlock(&tc35892->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_set_bits);
+
+static struct resource gpio_resources[] = {
+ {
+ .start = TC35892_INT_GPIIRQ,
+ .end = TC35892_INT_GPIIRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tc35892_devs[] = {
+ {
+ .name = "tc35892-gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = &gpio_resources[0],
+ },
+};
+
+static irqreturn_t tc35892_irq(int irq, void *data)
+{
+ struct tc35892 *tc35892 = data;
+ int status;
+
+again:
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status < 0)
+ return IRQ_NONE;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ handle_nested_irq(tc35892->irq_base + bit);
+ status &= ~(1 << bit);
+ }
+
+ /*
+ * A dummy read or write (to any register) appears to be necessary to
+ * have the last interrupt clear (for example, GPIO IC write) take
+ * effect. In such a case, recheck for any interrupt which is still
+ * pending.
+ */
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status)
+ goto again;
+
+ return IRQ_HANDLED;
+}
+
+static void tc35892_irq_dummy(unsigned int irq)
+{
+ /* No mask/unmask at this level */
+}
+
+static struct irq_chip tc35892_irq_chip = {
+ .name = "tc35892",
+ .irq_mask = tc35892_irq_dummy,
+ .irq_unmask = tc35892_irq_dummy,
+};
+
+static int tc35892_irq_init(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+ irq_set_chip_data(irq, tc35892);
+ irq_set_chip_and_handler(irq, &tc35892_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_irq_remove(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+ }
+}
+
+static int tc35892_chip_init(struct tc35892 *tc35892)
+{
+ int manf, ver, ret;
+
+ manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
+ if (manf < 0)
+ return manf;
+
+ ver = tc35892_reg_read(tc35892, TC35892_VERSION);
+ if (ver < 0)
+ return ver;
+
+ if (manf != TC35892_MANFCODE_MAGIC) {
+ dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
+ return -EINVAL;
+ }
+
+ dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
+
+ /*
+ * Put everything except the IRQ module into reset;
+ * also spare the GPIO module for any pin initialization
+ * done during pre-kernel boot
+ */
+ ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_TIMRST
+ | TC35892_RSTCTRL_ROTRST
+ | TC35892_RSTCTRL_KBDRST);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the reset interrupt. */
+ return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
+}
+
+static int __devinit tc35892_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tc35892_platform_data *pdata = i2c->dev.platform_data;
+ struct tc35892 *tc35892;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
+ if (!tc35892)
+ return -ENOMEM;
+
+ mutex_init(&tc35892->lock);
+
+ tc35892->dev = &i2c->dev;
+ tc35892->i2c = i2c;
+ tc35892->pdata = pdata;
+ tc35892->irq_base = pdata->irq_base;
+ tc35892->num_gpio = id->driver_data;
+
+ i2c_set_clientdata(i2c, tc35892);
+
+ ret = tc35892_chip_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = tc35892_irq_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tc35892", tc35892);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
+ ARRAY_SIZE(tc35892_devs), NULL,
+ tc35892->irq_base);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to add children\n");
+ goto out_freeirq;
+ }
+
+ return 0;
+
+out_freeirq:
+ free_irq(tc35892->i2c->irq, tc35892);
+out_removeirq:
+ tc35892_irq_remove(tc35892);
+out_free:
+ kfree(tc35892);
+ return ret;
+}
+
+static int __devexit tc35892_remove(struct i2c_client *client)
+{
+ struct tc35892 *tc35892 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(tc35892->dev);
+
+ free_irq(tc35892->i2c->irq, tc35892);
+ tc35892_irq_remove(tc35892);
+
+ kfree(tc35892);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static u32 sleep_regs[] = {
+ TC35892_IOPC0_L,
+ TC35892_IOPC0_H,
+ TC35892_IOPC1_L,
+ TC35892_IOPC1_H,
+ TC35892_IOPC2_L,
+ TC35892_IOPC2_H,
+ TC35892_DRIVE0_L,
+ TC35892_DRIVE0_H,
+ TC35892_DRIVE1_L,
+ TC35892_DRIVE1_H,
+ TC35892_DRIVE2_L,
+ TC35892_DRIVE2_H,
+ TC35892_DRIVE3,
+ TC35892_GPIODATA0,
+ TC35892_GPIOMASK0,
+ TC35892_GPIODATA1,
+ TC35892_GPIOMASK1,
+ TC35892_GPIODATA2,
+ TC35892_GPIOMASK2,
+ TC35892_GPIODIR0,
+ TC35892_GPIODIR1,
+ TC35892_GPIODIR2,
+ TC35892_GPIOIE0,
+ TC35892_GPIOIE1,
+ TC35892_GPIOIE2,
+ TC35892_RSTCTRL,
+ TC35892_CLKCFG,
+};
+
+static u8 sleep_regs_val[] = {
+ 0x00, /* TC35892_IOPC0_L */
+ 0x00, /* TC35892_IOPC0_H */
+ 0x00, /* TC35892_IOPC1_L */
+ 0x00, /* TC35892_IOPC1_H */
+ 0x00, /* TC35892_IOPC2_L */
+ 0x00, /* TC35892_IOPC2_H */
+ 0xff, /* TC35892_DRIVE0_L */
+ 0xff, /* TC35892_DRIVE0_H */
+ 0xff, /* TC35892_DRIVE1_L */
+ 0xff, /* TC35892_DRIVE1_H */
+ 0xff, /* TC35892_DRIVE2_L */
+ 0xff, /* TC35892_DRIVE2_H */
+ 0x0f, /* TC35892_DRIVE3 */
+ 0x80, /* TC35892_GPIODATA0 */
+ 0x80, /* TC35892_GPIOMASK0 */
+ 0x80, /* TC35892_GPIODATA1 */
+ 0x80, /* TC35892_GPIOMASK1 */
+ 0x06, /* TC35892_GPIODATA2 */
+ 0x06, /* TC35892_GPIOMASK2 */
+ 0xf0, /* TC35892_GPIODIR0 */
+ 0xe0, /* TC35892_GPIODIR1 */
+ 0xee, /* TC35892_GPIODIR2 */
+ 0x0f, /* TC35892_GPIOIE0 */
+ 0x1f, /* TC35892_GPIOIE1 */
+ 0x11, /* TC35892_GPIOIE2 */
+ 0x0f, /* TC35892_RSTCTRL */
+ 0xb0 /* TC35892_CLKCFG */
+
+};
+
+static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)];
+
+static int tc35892_suspend(struct device *dev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(dev);
+ struct i2c_client *client = tc35892->i2c;
+ int ret = 0;
+ int i, j;
+ int val;
+
+ /* Put the system to sleep mode */
+ if (!device_may_wakeup(&client->dev)) {
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ val = tc35892_reg_read(tc35892,
+ sleep_regs[i]);
+ if (val < 0)
+ goto out;
+
+ sleep_regs_backup[i] = (u8) (val & 0xff);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_val[i]);
+ if (ret < 0)
+ goto fail;
+
+ }
+
+ ret = tc35892_reg_write(tc35892,
+ TC35892_CLKMODE,
+ TC35892_CLKMODE_MODCTL_SLEEP);
+ }
+out:
+ return ret;
+fail:
+ for (j = 0; j <= i; j++) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static int tc35892_resume(struct device *dev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(dev);
+ struct i2c_client *client = tc35892->i2c;
+ int ret = 0;
+ int i;
+
+ /* Enable the system into operation */
+ if (!device_may_wakeup(&client->dev))
+ {
+ ret = tc35892_reg_write(tc35892,
+ TC35892_CLKMODE,
+ TC35892_CLKMODE_MODCTL_OPERATION);
+ if (ret < 0)
+ goto out;
+
+ for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ /* Not much to do here if we fail */
+ if (ret < 0)
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
+static const struct dev_pm_ops tc35892_dev_pm_ops = {
+ .suspend = tc35892_suspend,
+ .resume = tc35892_resume,
+};
+#endif
+
+static const struct i2c_device_id tc35892_id[] = {
+ { "tc35892", 24 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35892_id);
+
+static struct i2c_driver tc35892_driver = {
+ .driver.name = "tc35892",
+ .driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &tc35892_dev_pm_ops,
+#endif
+ .probe = tc35892_probe,
+ .remove = __devexit_p(tc35892_remove),
+ .id_table = tc35892_id,
+};
+
+static int __init tc35892_init(void)
+{
+ return i2c_add_driver(&tc35892_driver);
+}
+subsys_initcall(tc35892_init);
+
+static void __exit tc35892_exit(void)
+{
+ i2c_del_driver(&tc35892_driver);
+}
+module_exit(tc35892_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 MFD core driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index de979742c6f..0e79fe2d214 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -358,16 +358,114 @@ static int __devexit tc3589x_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
+
+static u32 sleep_regs[] = {
+ TC3589x_IOPC0_L,
+ TC3589x_IOPC0_H,
+ TC3589x_IOPC1_L,
+ TC3589x_IOPC1_H,
+ TC3589x_IOPC2_L,
+ TC3589x_IOPC2_H,
+ TC3589x_DRIVE0_L,
+ TC3589x_DRIVE0_H,
+ TC3589x_DRIVE1_L,
+ TC3589x_DRIVE1_H,
+ TC3589x_DRIVE2_L,
+ TC3589x_DRIVE2_H,
+ TC3589x_DRIVE3,
+ TC3589x_GPIODATA0,
+ TC3589x_GPIOMASK0,
+ TC3589x_GPIODATA1,
+ TC3589x_GPIOMASK1,
+ TC3589x_GPIODATA2,
+ TC3589x_GPIOMASK2,
+ TC3589x_GPIODIR0,
+ TC3589x_GPIODIR1,
+ TC3589x_GPIODIR2,
+ TC3589x_GPIOIE0,
+ TC3589x_GPIOIE1,
+ TC3589x_GPIOIE2,
+ TC3589x_RSTCTRL,
+ TC3589x_CLKCFG,
+};
+
+static u8 sleep_regs_val[] = {
+ 0x00, /* TC3589x_IOPC0_L */
+ 0x00, /* TC3589x_IOPC0_H */
+ 0x00, /* TC3589x_IOPC1_L */
+ 0x00, /* TC3589x_IOPC1_H */
+ 0x00, /* TC3589x_IOPC2_L */
+ 0x00, /* TC3589x_IOPC2_H */
+ 0xff, /* TC3589x_DRIVE0_L */
+ 0xff, /* TC3589x_DRIVE0_H */
+ 0xff, /* TC3589x_DRIVE1_L */
+ 0xff, /* TC3589x_DRIVE1_H */
+ 0xff, /* TC3589x_DRIVE2_L */
+ 0xff, /* TC3589x_DRIVE2_H */
+ 0x0f, /* TC3589x_DRIVE3 */
+ 0x80, /* TC3589x_GPIODATA0 */
+ 0x80, /* TC3589x_GPIOMASK0 */
+ 0x80, /* TC3589x_GPIODATA1 */
+ 0x80, /* TC3589x_GPIOMASK1 */
+ 0x06, /* TC3589x_GPIODATA2 */
+ 0x06, /* TC3589x_GPIOMASK2 */
+ 0xf0, /* TC3589x_GPIODIR0 */
+ 0xe0, /* TC3589x_GPIODIR1 */
+ 0xee, /* TC3589x_GPIODIR2 */
+ 0x0f, /* TC3589x_GPIOIE0 */
+ 0x1f, /* TC3589x_GPIOIE1 */
+ 0x11, /* TC3589x_GPIOIE2 */
+ 0x0f, /* TC3589x_RSTCTRL */
+ 0xb0 /* TC3589x_CLKCFG */
+
+};
+
+static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)];
+
static int tc3589x_suspend(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
+ int i, j;
+ int val;
+
+ /* Put the system to sleep mode */
+ if (!device_may_wakeup(&client->dev)) {
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ val = tc3589x_reg_read(tc3589x,
+ sleep_regs[i]);
+ if (val < 0)
+ goto out;
+
+ sleep_regs_backup[i] = (u8) (val & 0xff);
+ }
- /* put the system to sleep mode */
- if (!device_may_wakeup(&client->dev))
- ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
- TC3589x_CLKMODE_MODCTL_SLEEP);
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_val[i]);
+ if (ret < 0)
+ goto fail;
+
+ }
+
+ ret = tc3589x_reg_write(tc3589x,
+ TC3589x_CLKMODE,
+ TC3589x_CLKMODE_MODCTL_SLEEP);
+ } else {
+ enable_irq_wake(client->irq);
+ }
+out:
+ return ret;
+fail:
+ for (j = 0; j <= i; j++) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ if (ret < 0)
+ break;
+ }
return ret;
}
@@ -377,12 +475,29 @@ static int tc3589x_resume(struct device *dev)
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
+ int i;
- /* enable the system into operation */
+ /* Enable the system into operation */
if (!device_may_wakeup(&client->dev))
- ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
- TC3589x_CLKMODE_MODCTL_OPERATION);
-
+ {
+ ret = tc3589x_reg_write(tc3589x,
+ TC3589x_CLKMODE,
+ TC3589x_CLKMODE_MODCTL_OPERATION);
+ if (ret < 0)
+ goto out;
+
+ for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ /* Not much to do here if we fail */
+ if (ret < 0)
+ break;
+ }
+ } else {
+ disable_irq_wake(client->irq);
+ }
+out:
return ret;
}
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index a293b978e27..d7b9e0c60ea 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -195,6 +195,7 @@ static int __devinit tps6105x_probe(struct i2c_client *client,
return 0;
fail:
+ i2c_set_clientdata(client, NULL);
kfree(tps6105x);
return ret;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c7795096d43..6c97ebfb5b9 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -451,6 +451,20 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
+config STE_TRACE_MODEM
+ tristate "DB8500 trace Modem"
+ depends on ARCH_U8500
+ default n
+ help
+ Select this option to enable modem tracing by APE
+
+config DBX500_MLOADER
+ tristate "Modem firmware loader for db8500"
+ default n
+ depends on UX500_SOC_DB8500 || UX500_SOC_DB5500
+ help
+ Provides a user interface to load modem firmware on dbx500 SOCs
+
config BMP085
tristate "BMP085 digital pressure sensor"
depends on I2C && SYSFS
@@ -461,6 +475,24 @@ config BMP085
To compile this driver as a module, choose M here: the
module will be called bmp085.
+config DISPDEV
+ bool "Display overlay device"
+ depends on FB_MCDE
+ default n
+ help
+ This driver provides a way to use a second overlay for a display (in
+ addition to the framebuffer). The device allows for registration of
+ userspace buffers to be used with the overlay.
+
+config COMPDEV
+ bool "Display composition device"
+ depends on FB_MCDE && HWMEM
+ default n
+ help
+ This driver provides a way to use several overlays for a display.
+ This driver replaces the use of the framebuffer The device allows
+ for posting userspace buffers to be used with the overlays.
+
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
depends on PCI
@@ -481,6 +513,30 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
+config HWMEM
+ bool "Hardware memory driver"
+ default n
+ help
+ This driver provides a way to allocate contiguous system memory which
+ can be used by hardware. It also enables accessing hwmem allocated
+ memory buffers through a secure id which can be shared across processes.
+
+config U5500_MBOX
+ bool "Mailbox support"
+ depends on (UX500_SOC_DB5500 && U5500_MODEM_IRQ)
+ default y
+ help
+ Add support for U5500 mailbox communication with modem side
+
+config U8500_SIM_DETECT
+ bool "Sim hot swap detection support"
+ depends on (MODEM && UX500_SOC_DB8500)
+ default n
+ help
+ Add support for sim hot swap detection support in U8500.Driver
+ basically wakes up the modem if its sleeping when sim hot plug
+ in/out has happened.
+
config USB_SWITCH_FSA9480
tristate "FSA9480 USB Switch"
depends on I2C
@@ -498,6 +554,7 @@ config MAX8997_MUIC
Maxim MAX8997 PMIC.
The MAX8997 MUIC is a USB port accessory detector and switch.
+source "drivers/misc/Kconfig.stm"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -506,4 +563,5 @@ source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
+source "drivers/misc/modem_audio/Kconfig"
endmenu
diff --git a/drivers/misc/Kconfig.stm b/drivers/misc/Kconfig.stm
new file mode 100644
index 00000000000..d509c85c79f
--- /dev/null
+++ b/drivers/misc/Kconfig.stm
@@ -0,0 +1,120 @@
+menuconfig STM_TRACE
+ bool "STM MIPI Trace driver"
+ depends on ARCH_U8500
+ help
+ Simple System Trace Module driver. It allows to use and configure the
+ STM, either from kernel space, or from user space.
+
+if STM_TRACE
+
+config STM_NUMBER_OF_CHANNEL
+ int
+ default 512 if ARCH_U8500
+ default 256
+ help
+ Number Max of channels always a multiple of 256
+
+config STM_DEFAULT_MASTERS_MODES
+ hex "channel mode"
+ default 0xffffffff
+ help
+ Default config for enabling hardware mode tracing
+
+config STM_PRINTK
+ bool "printk support"
+ depends on STM_TRACE
+ help
+ Duplicate printk output on STM printk channel & activate stm_printk
+
+config STM_PRINTK_CHANNEL
+ int "printk channel"
+ range 0 255
+ depends on STM_PRINTK
+ default 255
+ help
+ STM printk channel number
+
+config STM_FTRACE
+ bool "functions tracing"
+ depends on FTRACE
+ default y
+ help
+ Output function tracing on STM dedicated channel
+
+config STM_FTRACE_CHANNEL
+ int "ftrace channel"
+ range 0 255
+ depends on STM_FTRACE
+ default 254
+ help
+ STM ftrace channel number
+
+config STM_CTX_SWITCH
+ bool "Context switch tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler context switch on STM dedicated channel
+
+config STM_CTX_SWITCH_CHANNEL
+ int "Context switch channel"
+ range 0 255
+ depends on STM_CTX_SWITCH
+ default 253
+ help
+ STM Context switch channel number
+
+config STM_WAKEUP
+ bool "Scheduler wakeup tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler wakeup on STM dedicated channel
+
+config STM_WAKEUP_CHANNEL
+ int "Wakeup channel"
+ range 0 255
+ depends on STM_WAKEUP
+ default 252
+ help
+ STM scheduler wakeup channel number
+
+config STM_STACK_TRACE
+ bool "Stack tracing"
+ depends on STACKTRACE
+ default y
+ help
+ Output stack tracing on STM dedicated channel
+
+config STM_STACK_TRACE_CHANNEL
+ int "Stack trace channel"
+ range 0 255
+ depends on STM_STACK_TRACE
+ default 251
+ help
+ STM stack trace channel number
+
+config STM_TRACE_PRINTK
+ bool "trace printk & binary printk support"
+ depends on TRACING
+ default y
+ help
+ Duplicate trace printk output on STM printk channel
+
+config STM_TRACE_PRINTK_CHANNEL
+ int "trace_printk channel"
+ range 0 255
+ depends on TRACING
+ default 250
+ help
+ STM trace_printk channel number
+
+config STM_TRACE_BPRINTK_CHANNEL
+ int "trace_bprintk channel"
+ range 0 255
+ depends on TRACING
+ default 249
+ help
+ STM trace binary printk channel number
+
+endif
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 3e1d80106f0..17c50c1b1d2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,15 @@ obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
+obj-$(CONFIG_STM_TRACE) += stm.o
+obj-$(CONFIG_HWMEM) += hwmem/
+obj-$(CONFIG_DISPDEV) += dispdev/
+obj-$(CONFIG_COMPDEV) += compdev/
+obj-$(CONFIG_STE_TRACE_MODEM) += db8500-modem-trace.o
+obj-$(CONFIG_DBX500_MLOADER) += dbx500-mloader.o
+obj-$(CONFIG_U5500_MBOX) += mbox.o mbox_channels-db5500.o
+obj-$(CONFIG_U8500_SIM_DETECT) += sim_detect.o
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
+obj-y += modem_audio/
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d..7a928667169 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -8,6 +8,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pwm.h>
+#include <linux/clk.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/module.h>
@@ -27,8 +28,10 @@
struct pwm_device {
struct device *dev;
struct list_head node;
+ struct clk *clk;
const char *label;
unsigned int pwm_id;
+ bool clk_enabled;
};
static LIST_HEAD(pwm_list);
@@ -67,9 +70,17 @@ int pwm_enable(struct pwm_device *pwm)
{
int ret;
+ if (!pwm->clk_enabled) {
+ ret = clk_enable(pwm->clk);
+ if (ret < 0) {
+ dev_err(pwm->dev, "failed to enable clock\n");
+ return ret;
+ }
+ pwm->clk_enabled = true;
+ }
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (pwm->pwm_id-1), ENABLE_PWM);
+ 1 << (pwm->pwm_id-1), 1 << (pwm->pwm_id-1));
if (ret < 0)
dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
@@ -84,9 +95,27 @@ void pwm_disable(struct pwm_device *pwm)
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << (pwm->pwm_id-1), DISABLE_PWM);
+ /*
+ * Workaround to set PWM in disable.
+ * If enable bit is not toggled the PWM might output 50/50 duty cycle
+ * even though it should be disabled
+ */
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1),
+ ENABLE_PWM << (pwm->pwm_id-1));
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), DISABLE_PWM);
+
if (ret < 0)
dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
+ if (pwm->clk_enabled) {
+ clk_disable(pwm->clk);
+ pwm->clk_enabled = false;
+ }
+
return;
}
EXPORT_SYMBOL(pwm_disable);
@@ -116,6 +145,8 @@ EXPORT_SYMBOL(pwm_free);
static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
{
struct pwm_device *pwm;
+ int ret = 0;
+
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
@@ -129,14 +160,24 @@ static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
pwm->pwm_id = pdev->id;
list_add_tail(&pwm->node, &pwm_list);
platform_set_drvdata(pdev, pwm);
+
+ pwm->clk = clk_get(pwm->dev, NULL);
+ if (IS_ERR(pwm->clk)) {
+ dev_err(pwm->dev, "clock request failed\n");
+ ret = PTR_ERR(pwm->clk);
+ kfree(pwm);
+ return ret;
+ }
+ pwm->clk_enabled = false;
dev_dbg(pwm->dev, "pwm probe successful\n");
- return 0;
+ return ret;
}
static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
{
struct pwm_device *pwm = platform_get_drvdata(pdev);
list_del(&pwm->node);
+ clk_put(pwm->clk);
dev_dbg(&pdev->dev, "pwm driver removed\n");
kfree(pwm);
return 0;
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index bfeea9ba702..3dbbf52a126 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -18,11 +18,17 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/i2c.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
#define BH1780_REG_CONTROL 0x80
#define BH1780_REG_PARTID 0x8A
@@ -40,11 +46,20 @@
struct bh1780_data {
struct i2c_client *client;
+ struct regulator *regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
int power_state;
/* lock for sysfs operations */
struct mutex lock;
};
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void bh1780_early_suspend(struct early_suspend *ddata);
+static void bh1780_late_resume(struct early_suspend *ddata);
+#endif
+
static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
{
int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
@@ -72,6 +87,9 @@ static ssize_t bh1780_show_lux(struct device *dev,
struct bh1780_data *ddata = platform_get_drvdata(pdev);
int lsb, msb;
+ if (ddata->power_state == BH1780_POFF)
+ return -EINVAL;
+
lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
if (lsb < 0)
return lsb;
@@ -89,13 +107,9 @@ static ssize_t bh1780_show_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- int state;
-
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
- return sprintf(buf, "%d\n", state & BH1780_POWMASK);
+ /* we already maintain a sw state */
+ return sprintf(buf, "%d\n", ddata->power_state);
}
static ssize_t bh1780_store_power_state(struct device *dev,
@@ -104,7 +118,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- unsigned long val;
+ long val;
int error;
error = strict_strtoul(buf, 0, &val);
@@ -114,15 +128,25 @@ static ssize_t bh1780_store_power_state(struct device *dev,
if (val < BH1780_POFF || val > BH1780_PON)
return -EINVAL;
+ if (ddata->power_state == val)
+ return count;
+
mutex_lock(&ddata->lock);
+ if (ddata->power_state == BH1780_POFF)
+ regulator_enable(ddata->regulator);
+
error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
if (error < 0) {
mutex_unlock(&ddata->lock);
+ regulator_disable(ddata->regulator);
return error;
}
- msleep(BH1780_PON_DELAY);
+ if (val == BH1780_POFF)
+ regulator_disable(ddata->regulator);
+
+ mdelay(BH1780_PON_DELAY);
ddata->power_state = val;
mutex_unlock(&ddata->lock);
@@ -131,7 +155,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(power_state, S_IWUGO | S_IRUGO,
bh1780_show_power_state, bh1780_store_power_state);
static struct attribute *bh1780_attributes[] = {
@@ -153,21 +177,42 @@ static int __devinit bh1780_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
ret = -EIO;
- goto err_op_failed;
+ return ret;
}
ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
if (ddata == NULL) {
+ dev_err(&client->dev, "failed to alloc ddata\n");
ret = -ENOMEM;
- goto err_op_failed;
+ return ret;
}
ddata->client = client;
i2c_set_clientdata(client, ddata);
+ ddata->regulator = regulator_get(&client->dev, "vcc");
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ goto free_ddata;
+ }
+
+ regulator_enable(ddata->regulator);
+
ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
- if (ret < 0)
- goto err_op_failed;
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read part ID\n");
+ goto disable_regulator;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = bh1780_early_suspend;
+ ddata->early_suspend.resume = bh1780_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ regulator_disable(ddata->regulator);
+ ddata->power_state = BH1780_POFF;
dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
(ret & BH1780_REVMASK));
@@ -175,12 +220,17 @@ static int __devinit bh1780_probe(struct i2c_client *client,
mutex_init(&ddata->lock);
ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
- if (ret)
- goto err_op_failed;
+ if (ret) {
+ dev_err(&client->dev, "failed to create sysfs group\n");
+ goto put_regulator;
+ }
return 0;
-
-err_op_failed:
+disable_regulator:
+ regulator_disable(ddata->regulator);
+put_regulator:
+ regulator_put(ddata->regulator);
+free_ddata:
kfree(ddata);
return ret;
}
@@ -196,50 +246,106 @@ static int __devexit bh1780_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int bh1780_suspend(struct device *dev)
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int bh1780_do_suspend(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
+ mutex_lock(&ddata->lock);
- ddata->power_state = state & BH1780_POWMASK;
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
- "CONTROL");
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, "CONTROL");
if (ret < 0)
- return ret;
+ goto unlock;
- return 0;
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
}
-static int bh1780_resume(struct device *dev)
+static int bh1780_do_resume(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = ddata->power_state;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
- "CONTROL");
+ mutex_lock(&ddata->lock);
+
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL,
+ ddata->power_state, "CONTROL");
+
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int bh1780_suspend(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_suspend(ddata);
if (ret < 0)
- return ret;
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
- return 0;
+ return ret;
}
+
+static int bh1780_resume(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+
+ return ret;
+}
+
static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
#define BH1780_PMOPS (&bh1780_pm)
+#endif /* CONFIG_PM */
#else
#define BH1780_PMOPS NULL
-#endif /* CONFIG_PM */
+static void bh1780_early_suspend(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
+}
+
+static void bh1780_late_resume(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+}
+#endif /*!CONFIG_HAS_EARLYSUSPEND */
static const struct i2c_device_id bh1780_id[] = {
{ "bh1780", 0 },
@@ -252,8 +358,10 @@ static struct i2c_driver bh1780_driver = {
.id_table = bh1780_id,
.driver = {
.name = "bh1780",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
.pm = BH1780_PMOPS,
-},
+#endif
+ },
};
static int __init bh1780_init(void)
diff --git a/drivers/misc/compdev/Makefile b/drivers/misc/compdev/Makefile
new file mode 100644
index 00000000000..8d5cd14dc36
--- /dev/null
+++ b/drivers/misc/compdev/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DISPDEV) += compdev.o
diff --git a/drivers/misc/compdev/compdev.c b/drivers/misc/compdev/compdev.c
new file mode 100644
index 00000000000..4810e20d4cd
--- /dev/null
+++ b/drivers/misc/compdev/compdev.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display overlay compositer device driver
+ *
+ * Author: Anders Bauer <anders.bauer@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * Modified: Per-Daniel Olsson <per-daniel.olsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+
+#include <linux/compdev.h>
+#include <linux/hwmem.h>
+#include <video/mcde_dss.h>
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+struct compdev_buffer {
+ struct hwmem_alloc *alloc;
+ enum compdev_ptr_type type;
+ u32 size;
+ u32 paddr; /* if pinned */
+};
+
+struct compdev {
+ bool open;
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly[NUM_COMPDEV_BUFS];
+ struct compdev_buffer ovly_buffer[NUM_COMPDEV_BUFS];
+ struct compdev_size phy_size;
+ enum mcde_display_rotation display_rotation;
+ enum compdev_rotation current_buffer_rotation;
+};
+
+static int compdev_open(struct inode *inode, struct file *file)
+{
+ struct compdev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+
+ if (&cd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (cd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ cd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ file->private_data = cd;
+
+ return 0;
+}
+
+static int disable_overlay(struct mcde_overlay *ovly)
+{
+ struct mcde_overlay_info info;
+
+ mcde_dss_get_overlay_info(ovly, &info);
+ if (info.paddr != 0) {
+ /* Set the pointer to zero to disable the overlay */
+ info.paddr = 0;
+ mcde_dss_apply_overlay(ovly, &info);
+ }
+ return 0;
+}
+
+static int compdev_release(struct inode *inode, struct file *file)
+{
+ struct compdev *cd = NULL;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&cd->list == &dev_list)
+ return -ENODEV;
+
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ disable_overlay(cd->ovly[i]);
+ if (cd->ovly_buffer[i].paddr &&
+ cd->ovly_buffer[i].type ==
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET)
+ hwmem_unpin(cd->ovly_buffer[i].alloc);
+
+ cd->ovly_buffer[i].alloc = NULL;
+ cd->ovly_buffer[i].size = 0;
+ cd->ovly_buffer[i].paddr = 0;
+ }
+
+ cd->open = false;
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum compdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case COMPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case COMPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case COMPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case COMPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case COMPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static int compdev_setup_ovly(struct compdev_img *img,
+ struct compdev_buffer *buffer,
+ struct mcde_overlay *ovly,
+ int z_order,
+ struct compdev *cd)
+{
+ int ret = 0;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct mcde_overlay_info info;
+
+ if (img->buf.type == COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET) {
+ buffer->type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET;
+ buffer->alloc = hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(buffer->alloc)) {
+ ret = PTR_ERR(buffer->alloc);
+ dev_warn(cd->mdev.this_device,
+ "HWMEM resolve failed, %d\n", ret);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buffer->alloc, &buffer->size, &memtype,
+ &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ dev_warn(cd->mdev.this_device,
+ "Invalid_mem overlay, %d\n", ret);
+ goto invalid_mem;
+ }
+ ret = hwmem_pin(buffer->alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(cd->mdev.this_device,
+ "Pin failed, %d\n", ret);
+ goto pin_failed;
+ }
+
+ rgn.size = rgn.end = buffer->size;
+ ret = hwmem_set_domain(buffer->alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(cd->mdev.this_device,
+ "Set domain failed, %d\n", ret);
+
+ buffer->paddr = mem_chunk.paddr;
+ } else if (img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ buffer->type = COMPDEV_PTR_PHYSICAL;
+ buffer->alloc = NULL;
+ buffer->size = img->buf.len;
+ buffer->paddr = img->buf.offset;
+ }
+
+ info.stride = img->pitch;
+ info.fmt = get_ovly_fmt(img->fmt);
+ info.src_x = 0;
+ info.src_y = 0;
+ info.dst_x = img->dst_rect.x;
+ info.dst_y = img->dst_rect.y;
+ info.dst_z = z_order;
+ info.w = img->dst_rect.width;
+ info.h = img->dst_rect.height;
+ info.dirty.x = 0;
+ info.dirty.y = 0;
+ info.dirty.w = cd->phy_size.width;
+ info.dirty.h = cd->phy_size.height;
+ info.paddr = buffer->paddr;
+ mcde_dss_apply_overlay(ovly, &info);
+ return ret;
+
+pin_failed:
+invalid_mem:
+ buffer->alloc = NULL;
+ buffer->size = 0;
+ buffer->paddr = 0;
+
+resolve_failed:
+ return ret;
+}
+
+static int compdev_update_rotation(struct compdev *cd, enum compdev_rotation rotation)
+{
+ /* Set video mode */
+ struct mcde_video_mode vmode;
+ int ret = 0;
+
+ memset(&vmode, 0, sizeof(struct mcde_video_mode));
+ mcde_dss_get_video_mode(cd->ddev, &vmode);
+ if ((cd->display_rotation + rotation) % 180) {
+ vmode.xres = cd->phy_size.height;
+ vmode.yres = cd->phy_size.width;
+ } else {
+ vmode.xres = cd->phy_size.width;
+ vmode.yres = cd->phy_size.height;
+ }
+
+ ret = mcde_dss_set_video_mode(cd->ddev, &vmode);
+ if (ret != 0)
+ goto exit;
+
+ /* Set rotation */
+ ret = mcde_dss_set_rotation(cd->ddev, (cd->display_rotation + rotation) % 360);
+ if (ret != 0)
+ goto exit;
+
+ /* Apply */
+ ret = mcde_dss_apply_channel(cd->ddev);
+exit:
+ return ret;
+}
+
+static int release_prev_frame(struct compdev *cd)
+{
+ int ret = 0;
+ int i;
+
+ /* Handle unpin of previous buffers */
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ if (cd->ovly_buffer[i].type ==
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET &&
+ cd->ovly_buffer[i].paddr != 0) {
+ hwmem_unpin(cd->ovly_buffer[i].alloc);
+ hwmem_release(cd->ovly_buffer[i].alloc);
+ }
+ cd->ovly_buffer[i].alloc = NULL;
+ cd->ovly_buffer[i].size = 0;
+ cd->ovly_buffer[i].paddr = 0;
+ }
+ return ret;
+
+}
+
+static void check_buffer(struct compdev *cd,
+ struct compdev_buffer *overlay_buffer,
+ struct compdev_buf *posted_buffer)
+{
+ if (overlay_buffer->type == COMPDEV_PTR_PHYSICAL &&
+ posted_buffer->type == COMPDEV_PTR_PHYSICAL &&
+ overlay_buffer->paddr == posted_buffer->offset &&
+ overlay_buffer->paddr != 0)
+ dev_warn(cd->mdev.this_device, "The same FB pointer!!\n");
+}
+
+static int compdev_post_buffers(struct compdev *cd,
+ struct compdev_post_buffers_req *req)
+{
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++)
+ for (j = 0; j < NUM_COMPDEV_BUFS; j++)
+ check_buffer(cd, &cd->ovly_buffer[i],
+ &req->img_buffers[j].buf);
+
+ /* Unpin the previous frame */
+ release_prev_frame(cd);
+
+ /* Validate buffer count */
+ if (req->buffer_count > NUM_COMPDEV_BUFS || req->buffer_count == 0) {
+ dev_warn(cd->mdev.this_device,
+ "Illegal buffer count, will be clamped to %d\n",
+ NUM_COMPDEV_BUFS);
+ req->buffer_count = NUM_COMPDEV_BUFS;
+ }
+
+ /* Set channel rotation */
+ if (req->buffer_count > 0 &&
+ (cd->current_buffer_rotation != req->rotation)) {
+ if (compdev_update_rotation(cd, req->rotation) != 0)
+ dev_warn(cd->mdev.this_device,
+ "Failed to update MCDE rotation (req->rotation = %d), %d\n",
+ req->rotation, ret);
+ else
+ cd->current_buffer_rotation = req->rotation;
+ }
+
+ /* Handle buffers */
+ for (i = 0; i < req->buffer_count; i++) {
+ int overlay_index = req->buffer_count - i - 1;
+ ret = compdev_setup_ovly(&req->img_buffers[i],
+ &cd->ovly_buffer[i], cd->ovly[overlay_index], i, cd);
+ if (ret)
+ dev_warn(cd->mdev.this_device,
+ "Failed to setup overlay[%d], %d\n", i, ret);
+ }
+
+ for (i = NUM_COMPDEV_BUFS; i > req->buffer_count; i--)
+ disable_overlay(cd->ovly[i-1]);
+
+ /* Do the display update */
+ if (req->buffer_count > 0)
+ mcde_dss_update_overlay(cd->ovly[0], false);
+ else
+ dev_warn(cd->mdev.this_device, "No overlays requested\n");
+ return ret;
+}
+
+static long compdev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct compdev *cd = (struct compdev *)file->private_data;
+ struct compdev_post_buffers_req req;
+
+ mutex_lock(&cd->lock);
+
+ switch (cmd) {
+ case COMPDEV_GET_SIZE_IOC:
+ {
+ struct compdev_size tmp;
+ if ((cd->display_rotation) % 180) {
+ tmp.height = cd->phy_size.width;
+ tmp.width = cd->phy_size.height;
+ } else {
+ tmp.height = cd->phy_size.height;
+ tmp.width = cd->phy_size.width;
+ }
+ ret = copy_to_user((void __user *)arg, &tmp,
+ sizeof(tmp));
+ if (ret)
+ ret = -EFAULT;
+ }
+ break;
+ case COMPDEV_POST_BUFFERS_IOC:
+ /* arg is user pointer to struct compdev_post_buffers_req */
+
+ /* Initialize the structure */
+ memset(&req, 0, sizeof(req));
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&req, (void *)arg, sizeof(req))) {
+ dev_warn(cd->mdev.this_device,
+ "%s: copy_from_user failed\n",
+ __func__);
+ mutex_unlock(&cd->lock);
+ return -EFAULT;
+ }
+
+ ret = compdev_post_buffers(cd, &req);
+
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&cd->lock);
+
+ return ret;
+}
+
+static const struct file_operations compdev_fops = {
+ .open = compdev_open,
+ .release = compdev_release,
+ .unlocked_ioctl = compdev_ioctl,
+};
+
+static void init_compdev(struct compdev *cd, struct mcde_display_device *ddev,
+ const char *name)
+{
+ mutex_init(&cd->lock);
+ INIT_LIST_HEAD(&cd->list);
+ cd->ddev = ddev;
+ cd->mdev.minor = MISC_DYNAMIC_MINOR;
+ cd->mdev.name = name;
+ cd->mdev.fops = &compdev_fops;
+}
+
+int compdev_create(struct mcde_display_device *ddev,
+ struct mcde_overlay *parent_ovly)
+{
+ int ret = 0;
+ int i;
+ struct compdev *cd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info;
+
+ static int counter;
+ char name[10];
+
+ cd = kzalloc(sizeof(struct compdev), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s%d", COMPDEV_DEFAULT_DEVICE_PREFIX,
+ counter++);
+ init_compdev(cd, ddev, name);
+ mcde_dss_get_video_mode(ddev, &vmode);
+
+ cd->ovly[0] = parent_ovly;
+ if (!cd->ovly[0]) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ for (i = 1; i < NUM_COMPDEV_BUFS; i++) {
+ cd->ovly[i] = mcde_dss_create_overlay(ddev, &info);
+ if (!cd->ovly[i]) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+ mcde_dss_enable_overlay(cd->ovly[i]);
+ disable_overlay(cd->ovly[i]);
+ }
+
+ mcde_dss_get_native_resolution(ddev, &cd->phy_size.width,
+ &cd->phy_size.height);
+
+ cd->display_rotation = mcde_dss_get_rotation(ddev);
+ cd->current_buffer_rotation = 0;
+
+ ret = misc_register(&cd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&cd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+fail_create_ovly:
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ if (cd->ovly[i])
+ mcde_dss_destroy_overlay(cd->ovly[i]);
+ }
+ kfree(cd);
+out:
+ return ret;
+}
+
+void compdev_destroy(struct mcde_display_device *ddev)
+{
+ struct compdev *cd;
+ struct compdev *tmp;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ if (cd->ddev == ddev) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++)
+ mcde_dss_destroy_overlay(cd->ovly[i]);
+ kfree(cd);
+ break;
+ }
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void compdev_destroy_all(void)
+{
+ struct compdev *cd;
+ struct compdev *tmp;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++)
+ mcde_dss_destroy_overlay(cd->ovly[i]);
+ kfree(cd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init compdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(compdev_init);
+
+static void __exit compdev_exit(void)
+{
+ compdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(compdev_exit);
+
+MODULE_AUTHOR("Anders Bauer <anders.bauer@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display overlay device driver");
+
diff --git a/drivers/misc/db8500-modem-trace.c b/drivers/misc/db8500-modem-trace.c
new file mode 100644
index 00000000000..b757b742121
--- /dev/null
+++ b/drivers/misc/db8500-modem-trace.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors: Michel JAOUEN <michel.jaouen@stericsson.com>
+ * Maxime COQUELIN <maxime.coquelin-nonst@stericsson.com>
+ * for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/mman.h>
+#include <linux/db8500-modem-trace.h>
+
+#include <mach/hardware.h>
+
+#define DEVICE_NAME "db8500-modem-trace"
+
+/* activation of this flag triggers an initialization of 2 buffers
+ * 4kbytes , id 0xdeadbeef
+ * and 16Kbytes id 0xfadafada
+ * we assume that platform provides minimum 20Kbytes. */
+
+struct trace {
+ u32 start;
+ u32 end;
+ u32 mdm_base;
+ u32 ape_base;
+ void __iomem *area;
+ /* this spinlock to forbid concurrent access on the same trace buffer */
+ spinlock_t lock;
+ struct device *dev;
+ struct miscdevice misc_dev;
+};
+
+struct trace_modem {
+ u32 phys_addr;
+ u8 filler;
+};
+
+static struct trace *trace_priv;
+
+
+/* all this definition are linked to modem interface */
+#define MODEM_MARKER 0x88
+/* free marker is also written on filler */
+#define FREE_MARKER 0xa5
+#define FREE_MARKER_2 0xa5a5
+#define READ_MARKER 0x5a
+
+struct buffer_header {
+ u8 pattern;
+ u8 filler;
+ u16 head_size;
+};
+
+
+static int trace_read(unsigned long arg)
+{
+ struct modem_trace_req req;
+ struct buffer_header *pt;
+ char tmp_char;
+
+ if (copy_from_user(&req, (struct modem_trace_req *)arg,
+ sizeof(struct modem_trace_req)))
+ return -EFAULT;
+
+ /* compute Modem physical address to APE physical address range */
+ if (req.phys_addr < trace_priv->mdm_base) {
+ dev_err(trace_priv->dev, "MODEM ADDR uncorrect\n");
+ return -EINVAL;
+ }
+ req.phys_addr += trace_priv->ape_base - trace_priv->mdm_base;
+
+ /* check request is in the range and aligned */
+ if ((req.phys_addr % 4 != 0)
+ || (req.phys_addr < trace_priv->start)
+ || (req.phys_addr + req.size) >= trace_priv->end) {
+ dev_err(trace_priv->dev, "req out of range %x %x\n",
+ req.phys_addr, req.size);
+ return -EINVAL;
+ }
+
+ /* perform access to memory area */
+ pt = (struct buffer_header *)((u32)trace_priv->area +
+ req.phys_addr - trace_priv->start);
+
+ /* in case of several request coming on same trace buffer take a
+ * spinlock */
+ spin_lock(&trace_priv->lock);
+ if (pt->pattern != MODEM_MARKER) {
+ /* pattern and size not matching */
+ dev_err(trace_priv->dev, "req not matching filler %x/%x \
+ or/and pattern %x\n", req.filler, pt->filler,
+ pt->pattern);
+ spin_unlock(&trace_priv->lock);
+ return -EINVAL;
+ }
+ /* mark pattern as read and unlock spin */
+ pt->pattern = READ_MARKER;
+ spin_unlock(&trace_priv->lock);
+
+ req.size -= copy_to_user(req.buff, pt, req.size);
+
+ pt->pattern = FREE_MARKER;
+ pt->filler = FREE_MARKER;
+ tmp_char = MODEM_MARKER;
+
+ /* Update marker for trace tool */
+ if (copy_to_user(req.buff, &tmp_char, 1))
+ return -EFAULT;
+
+ /* Update effective written size */
+ if (copy_to_user((struct modem_trace_req *)arg, &req,
+ sizeof(struct modem_trace_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int trace_mmapdump(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long vma_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if ((vma->vm_end - vma->vm_start) <
+ (trace_priv->end - trace_priv->start))
+ return -EINVAL;
+ if (remap_pfn_range(vma,
+ vma_start,
+ trace_priv->start >> PAGE_SHIFT,
+ trace_priv->end - trace_priv->start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+static long trace_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ void __user *argp = (void __user *)arg;
+ unsigned long size = trace_priv->end-trace_priv->start;
+
+ switch (cmd) {
+ case TM_GET_DUMPINFO:
+ ret = put_user(size, (unsigned long *)argp);
+ break;
+ case TM_TRACE_REQ:
+ ret = trace_read(arg);
+ break;
+
+ default:
+ ret = -EPERM;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations trace_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = trace_ioctl,
+ .mmap = trace_mmapdump
+};
+
+static int trace_probe(struct platform_device *pdev)
+{
+ int rv = 0;
+ struct db8500_trace_platform_data *pdata = pdev->dev.platform_data;
+ /* retrieve area descriptor from platform device ressource */
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if ((mem->start == 0) && (mem->end == 0)) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if ((pdata->ape_base == 0) || (pdata->modem_base == 0)) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ trace_priv = kzalloc(sizeof(*trace_priv), GFP_ATOMIC);
+ if (!trace_priv) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ trace_priv->dev = &pdev->dev;
+ trace_priv->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ trace_priv->misc_dev.name = DEVICE_NAME;
+ trace_priv->misc_dev.fops = &trace_fops;
+ trace_priv->area = (void __iomem *)ioremap_nocache(mem->start,
+ resource_size(mem));
+ if (!trace_priv->area) {
+ rv = -ENOMEM;
+ goto outfree;
+ }
+
+ trace_priv->start = mem->start;
+ trace_priv->end = mem->end;
+
+ trace_priv->mdm_base = pdata->modem_base;
+ trace_priv->ape_base = pdata->ape_base;
+
+ /* spin allowing smp access for reading/writing trace buffer header */
+ spin_lock_init(&trace_priv->lock);
+
+ rv = misc_register(&trace_priv->misc_dev);
+ if (rv) {
+ dev_err(&pdev->dev, "can't misc_register\n");
+ goto outunmap;
+ }
+
+ return rv;
+
+outunmap:
+ iounmap(trace_priv->area);
+outfree:
+ kfree(trace_priv);
+out:
+ return rv;
+
+}
+
+static int trace_remove(struct platform_device *pdev)
+{
+ int rv = 0;
+
+ if (trace_priv) {
+ rv = misc_deregister(&trace_priv->misc_dev);
+ iounmap(trace_priv->area);
+ kfree(trace_priv);
+ }
+
+ return rv;
+}
+
+static struct platform_driver trace_driver = {
+ .probe = trace_probe,
+ .remove = trace_remove,
+ .driver = {
+ .name = "db8500-modem-trace",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int trace_init(void)
+{
+ platform_driver_register(&trace_driver);
+ return 0;
+}
+static void trace_exit(void)
+{
+ platform_driver_unregister(&trace_driver);
+}
+module_init(trace_init);
+module_exit(trace_exit);
+
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/dbx500-mloader.c b/drivers/misc/dbx500-mloader.c
new file mode 100644
index 00000000000..c3ec8b67983
--- /dev/null
+++ b/drivers/misc/dbx500-mloader.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Ludovic Barre <ludovic.barre@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/mman.h>
+#include <linux/io.h>
+
+#include <mach/mloader-dbx500.h>
+#include <linux/mloader.h>
+
+#define DEVICE_NAME "dbx500_mloader_fw"
+
+struct mloader_priv {
+ struct platform_device *pdev;
+ struct dbx500_mloader_pdata *pdata;
+ struct miscdevice misc_dev;
+ u32 aeras_size;
+};
+
+static struct mloader_priv *mloader_priv;
+
+static int mloader_fw_send(struct dbx500_ml_fw *fw_info)
+{
+ const struct firmware *fw;
+ unsigned long size;
+ unsigned long phys_start;
+ void *fw_data;
+ void *vaddr;
+ void __iomem *ioaddr;
+ int ret;
+
+ ret = request_firmware(&fw, fw_info->name, &mloader_priv->pdev->dev);
+ if (ret) {
+ dev_err(&mloader_priv->pdev->dev, "request firmware failed\n");
+ goto out;
+ }
+
+ if (fw->size > (fw_info->area->size - fw_info->offset)) {
+ dev_err(&mloader_priv->pdev->dev,
+ "fw:%s is too big for:%s\n",
+ fw_info->name, fw_info->area->name);
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ size = PAGE_ALIGN(fw->size);
+ phys_start = fw_info->area->start + fw_info->offset;
+ phys_start &= PAGE_MASK;
+ ioaddr = ioremap(phys_start, size);
+ if (!ioaddr) {
+ dev_err(&mloader_priv->pdev->dev,
+ "failed remap memory region.\n");
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ vaddr = ioaddr + (fw_info->offset & ~PAGE_MASK);
+ fw_data = (void *)fw->data;
+ memcpy_toio(vaddr, fw_data, fw->size);
+ iounmap(ioaddr);
+
+err_fw:
+ release_firmware(fw);
+out:
+ return ret;
+}
+
+static int mloader_fw_upload(void)
+{
+ int i, ret;
+ struct dbx500_mloader_pdata *pdata = mloader_priv->pdata;
+
+ for (i = 0; i < pdata->nr_fws; i++) {
+ ret = mloader_fw_send(&pdata->fws[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_err(&mloader_priv->pdev->dev,
+ "Failed to upload %s firmware", pdata->fws[i].name);
+ return ret;
+}
+
+static int mloader_fw_mmapdump(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ unsigned long dump_size = 0;
+ unsigned long vma_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++)
+ dump_size += mloader_priv->pdata->areas[i].size;
+
+ if ((vma->vm_end - vma->vm_start) < dump_size)
+ return -EINVAL;
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) {
+ if (remap_pfn_range(vma,
+ vma_start,
+ mloader_priv->pdata->areas[i].start >> PAGE_SHIFT,
+ mloader_priv->pdata->areas[i].size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ vma_start += mloader_priv->pdata->areas[i].size;
+ }
+ return 0;
+}
+
+static void mloader_fw_dumpinfo(struct dump_image *images)
+{
+ u32 offset = 0;
+ int i;
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) {
+ strncpy(images[i].name,
+ mloader_priv->pdata->areas[i].name, MAX_NAME);
+ images[i].name[MAX_NAME-1] = 0;
+ images[i].offset = offset;
+ images[i].size = mloader_priv->pdata->areas[i].size;
+ offset += mloader_priv->pdata->areas[i].size;
+ }
+}
+
+static long mloader_fw_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case ML_UPLOAD:
+ ret = mloader_fw_upload();
+ break;
+ case ML_GET_NBIMAGES:
+ ret = put_user(mloader_priv->pdata->nr_areas,
+ (unsigned long __user *)argp);
+ break;
+ case ML_GET_DUMPINFO: {
+ struct dump_image *dump_images;
+ dump_images = kzalloc(mloader_priv->pdata->nr_areas
+ * sizeof(struct dump_image), GFP_ATOMIC);
+ mloader_fw_dumpinfo(dump_images);
+ ret = copy_to_user(argp, (void *) dump_images,
+ mloader_priv->pdata->nr_areas
+ * sizeof(struct dump_image)) ? -EFAULT : 0;
+ kfree(dump_images);
+ break;
+ }
+ default:
+ ret = -EPERM;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations modem_fw_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = mloader_fw_ioctl,
+ .mmap = mloader_fw_mmapdump,
+};
+
+static int __devinit mloader_fw_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+
+ mloader_priv = kzalloc(sizeof(*mloader_priv), GFP_ATOMIC);
+ if (!mloader_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mloader_priv->pdev = pdev;
+ mloader_priv->pdata = pdev->dev.platform_data;
+
+ mloader_priv->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ mloader_priv->misc_dev.name = DEVICE_NAME;
+ mloader_priv->misc_dev.fops = &modem_fw_fops;
+ ret = misc_register(&mloader_priv->misc_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't misc_register\n");
+ goto err_free_priv;
+ }
+
+ dev_info(&mloader_priv->pdev->dev, "mloader device register\n");
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) {
+ dev_dbg(&mloader_priv->pdev->dev,
+ "Area:%d (name:%s start:%x size:%x)\n",
+ i, mloader_priv->pdata->areas[i].name,
+ mloader_priv->pdata->areas[i].start,
+ mloader_priv->pdata->areas[i].size);
+ }
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_fws ; i++) {
+ dev_dbg(&mloader_priv->pdev->dev,
+ "Firmware:%d (name:%s offset:%x "
+ "area_name:%s area_start:%x area_size:%x)\n",
+ i, mloader_priv->pdata->fws[i].name,
+ mloader_priv->pdata->fws[i].offset,
+ mloader_priv->pdata->fws[i].area->name,
+ mloader_priv->pdata->fws[i].area->start,
+ mloader_priv->pdata->fws[i].area->size);
+ }
+
+ return ret;
+
+err_free_priv:
+ kfree(mloader_priv);
+out:
+ return ret;
+}
+
+static int __devexit mloader_fw_remove(struct platform_device *pdev)
+{
+ int err;
+
+ err = misc_register(&mloader_priv->misc_dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "can't misc_deregister, %d\n", err);
+
+ kfree(mloader_priv);
+
+ return err;
+}
+
+static struct platform_driver mloader_fw_driver = {
+ .driver.name = DEVICE_NAME,
+ .driver.owner = THIS_MODULE,
+ .probe = mloader_fw_probe,
+ .remove = __devexit_p(mloader_fw_remove),
+};
+
+static int __init mloader_fw_init(void)
+{
+ return platform_driver_register(&mloader_fw_driver);
+}
+
+static void __exit mloader_fw_exit(void)
+{
+ kfree(mloader_priv);
+ platform_driver_unregister(&mloader_fw_driver);
+}
+
+module_init(mloader_fw_init);
+module_exit(mloader_fw_exit);
+MODULE_DESCRIPTION("ST-Ericsson modem loader firmware");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@stericsson.com>");
diff --git a/drivers/misc/dispdev/Makefile b/drivers/misc/dispdev/Makefile
new file mode 100644
index 00000000000..11dc7611d26
--- /dev/null
+++ b/drivers/misc/dispdev/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DISPDEV) += dispdev.o
diff --git a/drivers/misc/dispdev/dispdev.c b/drivers/misc/dispdev/dispdev.c
new file mode 100644
index 00000000000..c504b69e80c
--- /dev/null
+++ b/drivers/misc/dispdev/dispdev.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display output device driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/ioctl.h>
+
+#include <linux/dispdev.h>
+#include <linux/hwmem.h>
+#include <video/mcde_dss.h>
+
+#define DENSITY_CHECK (16)
+#define MAX_BUFFERS 4
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+enum buffer_state {
+ BUF_UNUSED = 0,
+ BUF_QUEUED,
+ BUF_ACTIVATED,
+/*TODO:waitfordone BUF_DEACTIVATED,*/
+ BUF_FREE,
+ BUF_DEQUEUED,
+};
+
+struct dispdev_buffer {
+ struct hwmem_alloc *alloc;
+ u32 size;
+ enum buffer_state state;
+ u32 paddr; /* if pinned */
+};
+
+struct dispdev {
+ bool open;
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly;
+ struct mcde_overlay *parent_ovly;
+ struct dispdev_config config;
+ bool overlay;
+ struct dispdev_buffer buffers[MAX_BUFFERS];
+ wait_queue_head_t waitq_dq;
+ /*
+ * For the rotation use case
+ * buffers_need_update is used to ensure that a set_config that
+ * changes width or height is followed by a unregister_buffer.
+ */
+ bool buffers_need_update;
+ /*
+ * For the overlay startup use case.
+ * first_update is used to handle the first update after a set_config.
+ * In this case a queue_buffer will arrive after set_config and not a
+ * unregister_buffer as in the rotation use case.
+ */
+ bool first_update;
+};
+
+static int find_buf(struct dispdev *dd, enum buffer_state state)
+{
+ int i;
+ for (i = 0; i < MAX_BUFFERS; i++)
+ if (dd->buffers[i].state == state)
+ return i;
+ return -1;
+}
+
+int dispdev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+
+ if (&dd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (dd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ dd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ ret = mcde_dss_enable_overlay(dd->ovly);
+ if (ret)
+ return ret;
+
+ file->private_data = dd;
+
+ return 0;
+}
+
+int dispdev_release(struct inode *inode, struct file *file)
+{
+ int i;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&dd->list == &dev_list)
+ return -ENODEV;
+
+ /* TODO: Make sure it waits for completion */
+ mcde_dss_disable_overlay(dd->ovly);
+ for (i = 0; i < MAX_BUFFERS; i++) {
+ if (dd->buffers[i].paddr)
+ hwmem_unpin(dd->buffers[i].alloc);
+ if (dd->buffers[i].alloc)
+ hwmem_release(dd->buffers[i].alloc);
+ dd->buffers[i].alloc = NULL;
+ dd->buffers[i].state = BUF_UNUSED;
+ dd->buffers[i].size = 0;
+ dd->buffers[i].paddr = 0;
+ }
+ dd->open = false;
+ wake_up(&dd->waitq_dq);
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case DISPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case DISPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case DISPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case DISPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case DISPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static void get_ovly_info(struct dispdev_config *cfg,
+ struct mcde_video_mode *vmode,
+ struct mcde_overlay_info *info, bool overlay)
+{
+ info->paddr = 0;
+ info->stride = cfg->stride;
+ info->fmt = get_ovly_fmt(cfg->format);
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = cfg->x;
+ info->dst_y = cfg->y;
+ info->dst_z = cfg->z;
+ info->w = cfg->width;
+ info->h = cfg->height;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = vmode->xres;
+ info->dirty.h = vmode->yres;
+}
+
+static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg)
+{
+ int ret = 0;
+ if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0)
+ return 0;
+
+ /*
+ * Only update MCDE if format, stride, width and height
+ * is the same. Otherwise just store the new config and update
+ * MCDE in the next queue buffer. This because the buffer that is
+ * active can be have the wrong format, width ...
+ */
+ if (cfg->format == dd->config.format &&
+ cfg->stride == dd->config.stride &&
+ cfg->width == dd->config.width &&
+ cfg->height == dd->config.height) {
+
+ int buf_index;
+ if (!dd->buffers_need_update) {
+ buf_index = find_buf(dd, BUF_ACTIVATED);
+ if (buf_index >= 0) {
+ struct mcde_overlay_info info;
+ struct dispdev_buffer *buf;
+ struct mcde_video_mode vmode;
+
+ buf = &dd->buffers[buf_index];
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(cfg, &vmode, &info, dd->overlay);
+ info.paddr = buf->paddr;
+ ret = mcde_dss_apply_overlay(dd->ovly, &info);
+ if (!ret)
+ mcde_dss_update_overlay(dd->ovly,
+ false);
+ }
+ }
+ } else {
+ dd->buffers_need_update = true;
+ }
+
+ dd->config = *cfg;
+
+ return ret;
+}
+
+static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name)
+{
+ int ret;
+ struct dispdev_buffer *buf;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+
+ ret = find_buf(dd, BUF_UNUSED);
+ if (ret < 0)
+ return -ENOMEM;
+ buf = &dd->buffers[ret];
+ buf->alloc = hwmem_resolve_by_name(hwmem_name);
+ if (IS_ERR(buf->alloc)) {
+ ret = PTR_ERR(buf->alloc);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buf->alloc, &buf->size, &memtype, &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ goto invalid_mem;
+ }
+
+ buf->state = BUF_FREE;
+ goto out;
+invalid_mem:
+ hwmem_release(buf->alloc);
+resolve_failed:
+out:
+ return ret;
+}
+
+static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx)
+{
+ struct dispdev_buffer *buf = &dd->buffers[buf_idx];
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers))
+ return -EINVAL;
+
+ if (buf->state == BUF_UNUSED)
+ return -EINVAL;
+
+ if (dd->buffers_need_update)
+ dd->buffers_need_update = false;
+
+ if (buf->state == BUF_ACTIVATED) {
+ /* Disable the overlay */
+ struct mcde_overlay_info info;
+ struct mcde_video_mode vmode;
+ /* TODO Wait for frame done */
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ hwmem_unpin(dd->buffers[buf_idx].alloc);
+ }
+
+ hwmem_release(buf->alloc);
+ buf->state = BUF_UNUSED;
+ buf->alloc = NULL;
+ buf->size = 0;
+ buf->paddr = 0;
+ dd->first_update = false;
+
+ return 0;
+}
+
+
+/**
+ * @brief Check if the buffer is transparent or black (ARGB = X000)
+ * Note: Only for ARGB32.
+ * Worst case: a ~full transparent buffer
+ * Results: ~2200us @800Mhz for a WVGA screen, with DENSITY_CHECK=8
+ * ~520us @800Mhz for a WVGA screen, with DENSITY_CHECK=16
+ *
+ * @param w witdh
+ * @param h height
+ * @param addr buffer addr
+ *
+ * @return 1 if the buffer is transparent, else 0
+ */
+static int is_transparent(int w, int h, u32 *addr)
+{
+ int i, j;
+ u32 *c, *next_line;
+ u32 sum;
+
+ next_line = addr;
+ sum = 0;
+
+ /* TODO Optimize me */
+ for (j = 0; j < h; j += DENSITY_CHECK) {
+ c = next_line;
+ for (i = 0; i < w; i += DENSITY_CHECK) {
+ sum += ((*c) & 0x00FFFFFF);
+ c += DENSITY_CHECK;
+ }
+ if (sum)
+ return 0; /* Not "transparent" */
+ next_line += (w * DENSITY_CHECK);
+ }
+
+ return 1; /* "Transparent" */
+}
+
+static int dispdev_queue_buffer(struct dispdev *dd,
+ struct dispdev_buffer_info *buffer)
+{
+ int ret, i;
+ struct mcde_overlay_info info;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct hwmem_alloc *alloc;
+ struct mcde_video_mode vmode;
+ u32 buf_idx = buffer->buf_idx;
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers) ||
+ dd->buffers[buf_idx].state != BUF_DEQUEUED)
+ return -EINVAL;
+
+ alloc = dd->buffers[buf_idx].alloc;
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret);
+ return -EINVAL;
+ }
+
+ rgn.size = rgn.end = dd->buffers[buf_idx].size;
+ ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret);
+
+ i = find_buf(dd, BUF_ACTIVATED);
+ if (i >= 0) {
+ dd->buffers[i].state = BUF_FREE;
+ wake_up(&dd->waitq_dq);
+ }
+
+ if (!dd->first_update) {
+ dd->first_update = true;
+ dd->buffers_need_update = false;
+ }
+
+ dd->buffers[buf_idx].paddr = mem_chunk.paddr;
+
+ if (buffer->display_update && !dd->buffers_need_update &&
+ dd->config.width == buffer->buf_cfg.width &&
+ dd->config.height == buffer->buf_cfg.height &&
+ dd->config.format == buffer->buf_cfg.format &&
+ dd->config.stride == buffer->buf_cfg.stride) {
+ info.paddr = mem_chunk.paddr;
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ } else if (buffer->display_update) {
+ dd->buffers_need_update = true;
+ }
+
+ /* Disable the MCDE FB overlay */
+ if ((dd->parent_ovly->state != NULL) &&
+ (dd->ddev->check_transparency)) {
+ dd->ddev->check_transparency--;
+ mcde_dss_get_overlay_info(dd->parent_ovly, &info);
+ if (dd->ddev->check_transparency == 0) {
+ if (is_transparent(info.w, info.h, info.vaddr)) {
+ mcde_dss_disable_overlay(dd->parent_ovly);
+ printk(KERN_INFO "%s Disable overlay\n",
+ __func__);
+ }
+ }
+ }
+
+ dd->buffers[buf_idx].state = BUF_ACTIVATED;
+
+ return 0;
+}
+
+static int dispdev_dequeue_buffer(struct dispdev *dd)
+{
+ int i;
+
+ i = find_buf(dd, BUF_FREE);
+ if (i < 0) {
+ if (find_buf(dd, BUF_ACTIVATED) < 0)
+ return -EINVAL;
+ mutex_unlock(&dd->lock);
+ wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0);
+ mutex_lock(&dd->lock);
+ }
+ hwmem_unpin(dd->buffers[i].alloc);
+ dd->buffers[i].state = BUF_DEQUEUED;
+ dd->buffers[i].paddr = 0;
+
+ return i;
+}
+
+long dispdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct dispdev *dd = (struct dispdev *)file->private_data;
+
+ mutex_lock(&dd->lock);
+
+ switch (cmd) {
+ case DISPDEV_SET_CONFIG_IOC:
+ {
+ struct dispdev_config cfg;
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(cfg)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_set_config(dd, &cfg);
+ }
+ break;
+ case DISPDEV_GET_CONFIG_IOC:
+ ret = copy_to_user((void __user *)arg, &dd->config,
+ sizeof(dd->config));
+ if (ret)
+ ret = -EFAULT;
+ break;
+ case DISPDEV_REGISTER_BUFFER_IOC:
+ ret = dispdev_register_buffer(dd, (s32)arg);
+ break;
+ case DISPDEV_UNREGISTER_BUFFER_IOC:
+ ret = dispdev_unregister_buffer(dd, (u32)arg);
+ break;
+ case DISPDEV_QUEUE_BUFFER_IOC:
+ {
+ struct dispdev_buffer_info buffer;
+ if (copy_from_user(&buffer, (void __user *)arg,
+ sizeof(buffer)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_queue_buffer(dd, &buffer);
+ break;
+ }
+ case DISPDEV_DEQUEUE_BUFFER_IOC:
+ ret = dispdev_dequeue_buffer(dd);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&dd->lock);
+
+ return ret;
+}
+
+static const struct file_operations dispdev_fops = {
+ .open = dispdev_open,
+ .release = dispdev_release,
+ .unlocked_ioctl = dispdev_ioctl,
+};
+
+static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev,
+ const char *name, bool overlay)
+{
+ u16 w, h;
+ int rotation;
+
+ mutex_init(&dd->lock);
+ INIT_LIST_HEAD(&dd->list);
+ dd->ddev = ddev;
+ dd->overlay = overlay;
+ mcde_dss_get_native_resolution(ddev, &w, &h);
+ rotation = mcde_dss_get_rotation(ddev);
+
+ if ((rotation == MCDE_DISPLAY_ROT_90_CCW) ||
+ (rotation == MCDE_DISPLAY_ROT_90_CW)) {
+ dd->config.width = h;
+ dd->config.height = w;
+ } else {
+ dd->config.width = w;
+ dd->config.height = h;
+ }
+ dd->config.format = DISPDEV_FMT_RGB565;
+ dd->config.stride = sizeof(u16) * w;
+ dd->config.x = 0;
+ dd->config.y = 0;
+ dd->config.z = 0;
+ dd->buffers_need_update = false;
+ dd->first_update = false;
+ init_waitqueue_head(&dd->waitq_dq);
+ dd->mdev.minor = MISC_DYNAMIC_MINOR;
+ dd->mdev.name = name;
+ dd->mdev.fops = &dispdev_fops;
+ pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name,
+ dd->config.width, dd->config.height, dd->config.format,
+ dd->config.stride);
+}
+
+int dispdev_create(struct mcde_display_device *ddev, bool overlay,
+ struct mcde_overlay *parent_ovly)
+{
+ int ret = 0;
+ struct dispdev *dd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info = {0};
+
+ static int counter;
+ char *name = "dispdev0";
+
+ dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sprintf(name, "%s%d", DISPDEV_DEFAULT_DEVICE_PREFIX, counter++);
+ init_dispdev(dd, ddev, name, overlay);
+
+ if (!overlay) {
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto fail_enable_display;
+ mcde_dss_get_video_mode(ddev, &vmode);
+ mcde_dss_try_video_mode(ddev, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ goto fail_set_video_mode;
+ mcde_dss_set_pixel_format(ddev, info.fmt);
+ mcde_dss_apply_channel(ddev);
+ } else
+ mcde_dss_get_video_mode(ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, overlay);
+
+ /* Save the MCDE FB overlay */
+ dd->parent_ovly = parent_ovly;
+
+ dd->ovly = mcde_dss_create_overlay(ddev, &info);
+ if (!dd->ovly) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ ret = misc_register(&dd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&dd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+ mcde_dss_destroy_overlay(dd->ovly);
+fail_create_ovly:
+ if (!overlay)
+ mcde_dss_disable_display(ddev);
+fail_set_video_mode:
+fail_enable_display:
+ kfree(dd);
+out:
+ return ret;
+}
+
+void dispdev_destroy(struct mcde_display_device *ddev)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ if (dd->ddev == ddev) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ break;
+ }
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void dispdev_destroy_all(void)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init dispdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(dispdev_init);
+
+static void __exit dispdev_exit(void)
+{
+ dispdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(dispdev_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display output device driver");
+
diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile
new file mode 100644
index 00000000000..c307616a181
--- /dev/null
+++ b/drivers/misc/hwmem/Makefile
@@ -0,0 +1,3 @@
+hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o
+
+obj-$(CONFIG_HWMEM) += hwmem.o
diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c
new file mode 100644
index 00000000000..e0ab4ee6cf8
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/hwmem.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/dcache.h>
+
+#include "cache_handler.h"
+
+#define U32_MAX (~(u32)0)
+
+enum hwmem_alloc_flags cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings);
+void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
+ pgprot_t *pgprot);
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region);
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region);
+
+static void invalidate_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void clean_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void flush_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+
+static void null_range(struct cach_range *range);
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add);
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range);
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove);
+static bool is_non_empty_range(struct cach_range *range);
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection);
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment);
+static u32 range_length(struct cach_range *range);
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range);
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset);
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset);
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment);
+static u32 align_down(u32 value, u32 alignment);
+
+/*
+ * Exported functions
+ */
+
+void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings,
+ u32 size)
+{
+ buf->vstart = NULL;
+ buf->pstart = 0;
+ buf->size = size;
+
+ buf->cache_settings = cachi_get_cache_settings(cache_settings);
+}
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr)
+{
+ bool tmp;
+
+ buf->vstart = vaddr;
+ buf->pstart = paddr;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ /*
+ * Keep whatever is in the cache. This way we avoid an
+ * unnecessary synch if CPU is the first user.
+ */
+ buf->range_in_cpu_cache.start = 0;
+ buf->range_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_in_cpu_cache,
+ get_dcache_granularity());
+ buf->range_dirty_in_cpu_cache.start = 0;
+ buf->range_dirty_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_dirty_in_cpu_cache,
+ get_dcache_granularity());
+ } else {
+ flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false,
+ &tmp);
+ drain_cpu_write_buf();
+
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ }
+ null_range(&buf->range_invalid_in_cpu_cache);
+}
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot)
+{
+ cachi_set_pgprot_cache_options(buf->cache_settings, pgprot);
+}
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ struct hwmem_region *__region;
+ struct hwmem_region full_region;
+
+ if (region != NULL) {
+ __region = region;
+ } else {
+ full_region.offset = 0;
+ full_region.count = 1;
+ full_region.start = 0;
+ full_region.end = buf->size;
+ full_region.size = buf->size;
+
+ __region = &full_region;
+ }
+
+ switch (domain) {
+ case HWMEM_DOMAIN_SYNC:
+ sync_buf_post_cpu(buf, access, __region);
+
+ break;
+
+ case HWMEM_DOMAIN_CPU:
+ sync_buf_pre_cpu(buf, access, __region);
+
+ break;
+ }
+}
+
+/*
+ * Local functions
+ */
+
+enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings)
+{
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+ /* We don't know the cache setting so we assume worst case. */
+ static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+
+ if (requested_cache_settings & CACHE_ON_FLAGS_MASK)
+ return CACHE_SETTING;
+ else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE ||
+ (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED &&
+ !(requested_cache_settings &
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE)))
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+ else if (requested_cache_settings &
+ (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED))
+ return 0;
+ else
+ /* Nothing specified, use cached */
+ return CACHE_SETTING;
+}
+
+void __attribute__((weak)) cachi_set_pgprot_cache_options(
+ enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot)
+{
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED)
+ *pgprot = *pgprot; /* To silence compiler and checkpatch */
+ else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE)
+ *pgprot = pgprot_writecombine(*pgprot);
+ else
+ *pgprot = pgprot_noncached(*pgprot);
+}
+
+bool __attribute__((weak)) speculative_data_prefetch(void)
+{
+ /* We don't know so we go with the safe alternative */
+ return true;
+}
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region)
+{
+ bool write = access & HWMEM_ACCESS_WRITE;
+ bool read = access & HWMEM_ACCESS_READ;
+
+ if (!write && !read)
+ return;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ struct cach_range region_range;
+
+ region_2_range(region, buf->size, &region_range);
+
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_WB))
+ /* Perform defered invalidates */
+ invalidate_cpu_cache(buf, &region_range);
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_AOW))
+ expand_range(&buf->range_in_cpu_cache, &region_range);
+ if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) {
+ struct cach_range dirty_range_addition;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ dirty_range_addition = region_range;
+ else
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &dirty_range_addition);
+
+ expand_range(&buf->range_dirty_in_cpu_cache,
+ &dirty_range_addition);
+ }
+ }
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) {
+ if (write)
+ buf->in_cpu_write_buf = true;
+ }
+}
+
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region)
+{
+ bool write = next_access & HWMEM_ACCESS_WRITE;
+ bool read = next_access & HWMEM_ACCESS_READ;
+ struct cach_range region_range;
+
+ if (!write && !read)
+ return;
+
+ region_2_range(next_region, buf->size, &region_range);
+
+ if (write) {
+ if (speculative_data_prefetch()) {
+ /* Defer invalidate */
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &intersection);
+
+ expand_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+
+ clean_cpu_cache(buf, &region_range);
+ } else {
+ flush_cpu_cache(buf, &region_range);
+ }
+ }
+ if (read)
+ clean_cpu_cache(buf, &region_range);
+
+ if (buf->in_cpu_write_buf) {
+ drain_cpu_write_buf();
+
+ buf->in_cpu_write_buf = false;
+ }
+}
+
+static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_invalid_in_cpu_cache, range,
+ &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_invalid_in_cpu_cache);
+
+ /*
+ * Cache handler never uses invalidate to discard data in the
+ * cache so we can use flush instead which is considerably
+ * faster for large buffers.
+ */
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ null_range(&buf->range_invalid_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ } else {
+ /*
+ * No need to shrink range_in_cpu_cache as invalidate
+ * is only used when we can't keep track of what's in
+ * the CPU cache.
+ */
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool cleaned_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_dirty_in_cpu_cache);
+
+ clean_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &cleaned_everything);
+
+ if (cleaned_everything)
+ null_range(&buf->range_dirty_in_cpu_cache);
+ else
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ }
+}
+
+static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection, &buf->range_in_cpu_cache);
+
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ if (!speculative_data_prefetch())
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ null_range(&buf->range_invalid_in_cpu_cache);
+ } else {
+ if (!speculative_data_prefetch())
+ shrink_range(&buf->range_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void null_range(struct cach_range *range)
+{
+ range->start = U32_MAX;
+ range->end = 0;
+}
+
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add)
+{
+ range->start = min(range->start, range_2_add->start);
+ range->end = max(range->end, range_2_add->end);
+}
+
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range)
+{
+ u32 space_on_low_side = range->start - enclosing_range->start;
+ u32 space_on_high_side = enclosing_range->end - range->end;
+
+ if (space_on_low_side < space_on_high_side)
+ range->start = enclosing_range->start;
+ else
+ range->end = enclosing_range->end;
+}
+
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove)
+{
+ if (range_2_remove->start > range->start)
+ range->end = min(range->end, range_2_remove->start);
+ else
+ range->start = max(range->start, range_2_remove->end);
+
+ if (range->start >= range->end)
+ null_range(range);
+}
+
+static bool is_non_empty_range(struct cach_range *range)
+{
+ return range->end > range->start;
+}
+
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection)
+{
+ intersection->start = max(range_1->start, range_2->start);
+ intersection->end = min(range_1->end, range_2->end);
+
+ if (intersection->start >= intersection->end)
+ null_range(intersection);
+}
+
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment)
+{
+ if (!is_non_empty_range(range))
+ return;
+
+ range->start = align_down(range->start, alignment);
+ range->end = align_up(range->end, alignment);
+}
+
+static u32 range_length(struct cach_range *range)
+{
+ if (is_non_empty_range(range))
+ return range->end - range->start;
+ else
+ return 0;
+}
+
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range)
+{
+ /*
+ * We don't care about invalid regions, instead we limit the region's
+ * range to the buffer's range. This should work good enough, worst
+ * case we synch the entire buffer when we get an invalid region which
+ * is acceptable.
+ */
+ range->start = region->offset + region->start;
+ range->end = min(region->offset + (region->count * region->size) -
+ (region->size - region->end), buffer_size);
+ if (range->start >= range->end) {
+ null_range(range);
+ return;
+ }
+
+ align_range_up(range, get_dcache_granularity());
+}
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset)
+{
+ return (void *)((u32)buf->vstart + offset);
+}
+
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset)
+{
+ return buf->pstart + offset;
+}
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ u32 value_2_add;
+
+ if (remainder == 0)
+ return value;
+
+ value_2_add = alignment - remainder;
+
+ if (value_2_add > U32_MAX - value) /* Will overflow */
+ return U32_MAX;
+
+ return value + value_2_add;
+}
+
+static u32 align_down(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ if (remainder == 0)
+ return value;
+
+ return value - remainder;
+}
diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h
new file mode 100644
index 00000000000..792105196fa
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * Cache handler can not handle simultaneous execution! The caller has to
+ * ensure such a situation does not occur.
+ */
+
+#ifndef _CACHE_HANDLER_H_
+#define _CACHE_HANDLER_H_
+
+#include <linux/types.h>
+#include <linux/hwmem.h>
+
+/*
+ * To not have to double all datatypes we've used hwmem datatypes. If someone
+ * want's to use cache handler but not hwmem then we'll have to define our own
+ * datatypes.
+ */
+
+struct cach_range {
+ u32 start; /* Inclusive */
+ u32 end; /* Exclusive */
+};
+
+/*
+ * Internal, do not touch!
+ */
+struct cach_buf {
+ void *vstart;
+ u32 pstart;
+ u32 size;
+
+ /* Remaining hints are active */
+ enum hwmem_alloc_flags cache_settings;
+
+ bool in_cpu_write_buf;
+ struct cach_range range_in_cpu_cache;
+ struct cach_range range_dirty_in_cpu_cache;
+ struct cach_range range_invalid_in_cpu_cache;
+};
+
+void cach_init_buf(struct cach_buf *buf,
+ enum hwmem_alloc_flags cache_settings, u32 size);
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr);
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot);
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+#endif /* _CACHE_HANDLER_H_ */
diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c
new file mode 100644
index 00000000000..31533ed5988
--- /dev/null
+++ b/drivers/misc/hwmem/contig_alloc.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Contiguous memory allocator
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <asm/sizes.h>
+
+#define MAX_INSTANCE_NAME_LENGTH 31
+
+struct alloc {
+ struct list_head list;
+
+ bool in_use;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+struct instance {
+ struct list_head list;
+
+ char name[MAX_INSTANCE_NAME_LENGTH + 1];
+
+ phys_addr_t region_paddr;
+ void *region_kaddr;
+ size_t region_size;
+
+ struct list_head alloc_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct inode *debugfs_inode;
+ int cona_status_free;
+ int cona_status_used;
+ int cona_status_max_cont;
+ int cona_status_max_check;
+ int cona_status_biggest_free;
+ int cona_status_printed;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static LIST_HEAD(instance_list);
+
+static DEFINE_MUTEX(lock);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size);
+void *cona_alloc(void *instance, size_t size);
+void cona_free(void *instance, void *alloc);
+phys_addr_t cona_get_alloc_paddr(void *alloc);
+void *cona_get_alloc_kaddr(void *instance, void *alloc);
+size_t cona_get_alloc_size(void *alloc);
+
+static int init_alloc_list(struct instance *instance);
+static void clean_alloc_list(struct instance *instance);
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size);
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size);
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size)
+{
+ int ret;
+ struct instance *instance;
+ struct vm_struct *vm_area;
+
+ if (region_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (instance == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1);
+ /* Truncate name if necessary */
+ instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0';
+ instance->region_paddr = region_paddr;
+ instance->region_size = region_size;
+
+ vm_area = get_vm_area(region_size, VM_IOREMAP);
+ if (vm_area == NULL) {
+ printk(KERN_WARNING "CONA: Failed to allocate %u bytes"
+ " kernel virtual memory", region_size);
+ ret = -ENOMSG;
+ goto vmem_alloc_failed;
+ }
+ instance->region_kaddr = vm_area->addr;
+
+ INIT_LIST_HEAD(&instance->alloc_list);
+ ret = init_alloc_list(instance);
+ if (ret < 0)
+ goto init_alloc_list_failed;
+
+ mutex_lock(&lock);
+ list_add_tail(&instance->list, &instance_list);
+ mutex_unlock(&lock);
+
+ return instance;
+
+init_alloc_list_failed:
+ vm_area = remove_vm_area(instance->region_kaddr);
+ if (vm_area == NULL)
+ printk(KERN_ERR "CONA: Failed to free kernel virtual memory,"
+ " resource leak!\n");
+
+ kfree(vm_area);
+vmem_alloc_failed:
+ kfree(instance);
+
+ return ERR_PTR(ret);
+}
+
+void *cona_alloc(void *instance, size_t size)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc;
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ alloc = find_free_alloc_bestfit(instance_l, size);
+ if (IS_ERR(alloc))
+ goto out;
+ if (size < alloc->size) {
+ alloc = split_allocation(alloc, size);
+ if (IS_ERR(alloc))
+ goto out;
+ } else {
+ alloc->in_use = true;
+ }
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont += alloc->size;
+ instance_l->cona_status_max_check =
+ max(instance_l->cona_status_max_check,
+ instance_l->cona_status_max_cont);
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+
+void cona_free(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc_l = (struct alloc *)alloc;
+ struct alloc *other;
+
+ mutex_lock(&lock);
+
+ alloc_l->in_use = false;
+
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont -= alloc_l->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ other = list_entry(alloc_l->list.prev, struct alloc, list);
+ if ((alloc_l->list.prev != &instance_l->alloc_list) &&
+ !other->in_use) {
+ other->size += alloc_l->size;
+ list_del(&alloc_l->list);
+ kfree(alloc_l);
+ alloc_l = other;
+ }
+ other = list_entry(alloc_l->list.next, struct alloc, list);
+ if ((alloc_l->list.next != &instance_l->alloc_list) &&
+ !other->in_use) {
+ alloc_l->size += other->size;
+ list_del(&other->list);
+ kfree(other);
+ }
+
+ mutex_unlock(&lock);
+}
+
+phys_addr_t cona_get_alloc_paddr(void *alloc)
+{
+ return ((struct alloc *)alloc)->paddr;
+}
+
+void *cona_get_alloc_kaddr(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+
+ return instance_l->region_kaddr + get_alloc_offset(instance_l,
+ (struct alloc *)alloc);
+}
+
+size_t cona_get_alloc_size(void *alloc)
+{
+ return ((struct alloc *)alloc)->size;
+}
+
+static int init_alloc_list(struct instance *instance)
+{
+ /*
+ * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't
+ * handle that.
+ */
+ int ret;
+ u32 curr_pos = instance->region_paddr;
+ u32 region_end = instance->region_paddr + instance->region_size;
+ u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1);
+ struct alloc *alloc;
+
+ if (PAGE_SIZE >= SZ_64M) {
+ printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n");
+ return -ENOMSG;
+ }
+
+ while (next_64mib_boundary < region_end) {
+ if (next_64mib_boundary - curr_pos > PAGE_SIZE) {
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = next_64mib_boundary - curr_pos -
+ PAGE_SIZE;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = PAGE_SIZE;
+ alloc->in_use = true;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+
+#ifdef CONFIG_DEBUG_FS
+ instance->cona_status_max_cont += alloc->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ next_64mib_boundary += SZ_64M;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = region_end - curr_pos;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+
+ return 0;
+
+error:
+ clean_alloc_list(instance);
+
+ return ret;
+}
+
+static void clean_alloc_list(struct instance *instance)
+{
+ while (list_empty(&instance->alloc_list) == 0) {
+ struct alloc *i = list_first_entry(&instance->alloc_list,
+ struct alloc, list);
+
+ list_del(&i->list);
+
+ kfree(i);
+ }
+}
+
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size)
+{
+ size_t best_diff = ~(size_t)0;
+ struct alloc *alloc = NULL, *i;
+
+ list_for_each_entry(i, &instance->alloc_list, list) {
+ size_t diff = i->size - size;
+ if (i->in_use || i->size < size)
+ continue;
+ if (diff < best_diff) {
+ alloc = i;
+ best_diff = diff;
+ }
+ }
+
+ return alloc != NULL ? alloc : ERR_PTR(-ENOMEM);
+}
+
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size)
+{
+ struct alloc *new_alloc;
+
+ new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (new_alloc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ new_alloc->in_use = true;
+ new_alloc->paddr = alloc->paddr;
+ new_alloc->size = new_alloc_size;
+ alloc->size -= new_alloc_size;
+ alloc->paddr += new_alloc_size;
+
+ list_add_tail(&new_alloc->list, &alloc->list);
+
+ return new_alloc;
+}
+
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc)
+{
+ return alloc->paddr - instance->region_paddr;
+}
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size);
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size);
+static struct instance *get_instance_from_file(struct file *file);
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ if (i == 1) {
+ if (alloc->in_use)
+ instance->cona_status_used += alloc->size;
+ else
+ instance->cona_status_free += alloc->size;
+ }
+
+ if (!alloc->in_use) {
+ instance->cona_status_biggest_free =
+ max((size_t)alloc->size,
+ (size_t)instance->cona_status_biggest_free);
+ }
+
+ ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t"
+ "in use: %1u\t used: %10u (%dMB)"
+ " \t free: %10u (%dMB)\n",
+ alloc->paddr,
+ alloc->size,
+ alloc->in_use,
+ instance->cona_status_used,
+ instance->cona_status_used/1024/1024,
+ instance->cona_status_free,
+ instance->cona_status_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l, "Overall peak usage:\t%10u "
+ "(%dMB)\nCurrent max usage:\t%10u (%dMB)\n"
+ "Current biggest free:\t%10d (%dMB)\n",
+ instance->cona_status_max_check,
+ instance->cona_status_max_check/1024/1024,
+ instance->cona_status_max_cont,
+ instance->cona_status_max_cont/1024/1024,
+ instance->cona_status_biggest_free,
+ instance->cona_status_biggest_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static struct instance *get_instance_from_file(struct file *file)
+{
+ struct instance *curr_instance;
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ if (file->f_dentry->d_inode == curr_instance->debugfs_inode)
+ return curr_instance;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ struct instance *instance;
+ struct alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+ bool readout_aborted = false;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+ instance = get_instance_from_file(file);
+ if (IS_ERR(instance)) {
+ ret = PTR_ERR(instance);
+ goto out;
+ }
+
+ list_for_each_entry(curr_alloc, &instance->alloc_list, list) {
+ phys_addr_t alloc_offset = get_alloc_offset(instance,
+ curr_alloc);
+ if (alloc_offset < (phys_addr_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(instance, curr_alloc, &local_buf_pos,
+ available_space - (size_t)(local_buf_pos -
+ local_buf));
+
+ if (ret == -EINVAL) { /* No more room */
+ readout_aborted = true;
+ break;
+ } else if (ret < 0) {
+ goto out;
+ }
+ /*
+ * There could be an overflow issue here in the unlikely case
+ * where the region is placed at the end of the address range
+ * and the last alloc is 1 byte large. Since this is debug code
+ * and that case most likely never will happen I've chosen to
+ * defer fixing it till it happens.
+ */
+ *curr_pos = (void *)(alloc_offset + 1);
+
+ /* Make sure to also print status if there were any prints */
+ instance->cona_status_printed = false;
+ }
+
+ if (!readout_aborted && !instance->cona_status_printed) {
+ ret = print_alloc_status(instance, &local_buf_pos,
+ available_space -
+ (size_t)(local_buf_pos - local_buf));
+
+ if (ret == -EINVAL) /* No more room */
+ readout_aborted = true;
+ else if (ret < 0)
+ goto out;
+ else
+ instance->cona_status_printed = true;
+ }
+
+ if (!readout_aborted) {
+ instance->cona_status_free = 0;
+ instance->cona_status_used = 0;
+ instance->cona_status_biggest_free = 0;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static int __init init_debugfs(void)
+{
+ struct instance *curr_instance;
+ struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL);
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ struct dentry *file_dentry;
+ char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1];
+ tmp_str[0] = '\0';
+ strcat(tmp_str, curr_instance->name);
+ strcat(tmp_str, "_allocs");
+ file_dentry = debugfs_create_file(tmp_str, 0444,
+ debugfs_root_dir, 0, &debugfs_allocs_fops);
+ if (file_dentry != NULL)
+ curr_instance->debugfs_inode = file_dentry->d_inode;
+ }
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+/*
+ * Must be executed after all instances have been created, hence the
+ * late_initcall.
+ */
+late_initcall(init_debugfs);
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c
new file mode 100644
index 00000000000..e9e50de78bd
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-ioctl.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mm_types.h>
+#include <linux/hwmem.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+static int hwmem_open(struct inode *inode, struct file *file);
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma);
+static int hwmem_release_fop(struct inode *inode, struct file *file);
+static long hwmem_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+static const struct file_operations hwmem_fops = {
+ .open = hwmem_open,
+ .mmap = hwmem_ioctl_mmap,
+ .unlocked_ioctl = hwmem_ioctl,
+ .release = hwmem_release_fop,
+ .get_unmapped_area = hwmem_get_unmapped_area,
+};
+
+static struct miscdevice hwmem_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hwmem",
+ .fops = &hwmem_fops,
+};
+
+struct hwmem_file {
+ struct mutex lock;
+ struct idr idr; /* id -> struct hwmem_alloc*, ref counted */
+ struct hwmem_alloc *fd_alloc; /* Ref counted */
+};
+
+static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
+{
+ int id, ret;
+
+ while (true) {
+ if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ return -ENOMEM;
+ }
+
+ /*
+ * IDR always returns the lowest free id so there is no wrapping issue
+ * because of this.
+ */
+ if (id >= (s32)1 << (31 - PAGE_SHIFT)) {
+ dev_err(hwmem_device.this_device, "Out of IDs!\n");
+ idr_remove(&hwfile->idr, id);
+ return -ENOMSG;
+ }
+
+ return (s32)id << PAGE_SHIFT;
+}
+
+static void remove_id(struct hwmem_file *hwfile, s32 id)
+{
+ idr_remove(&hwfile->idr, id >> PAGE_SHIFT);
+}
+
+static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) :
+ hwfile->fd_alloc;
+ if (alloc == NULL)
+ alloc = ERR_PTR(-EINVAL);
+
+ return alloc;
+}
+
+static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+}
+
+static int release(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ if (id == 0)
+ return -EINVAL;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ remove_id(hwfile, id);
+ hwmem_release(alloc);
+
+ return 0;
+}
+
+static int set_cpu_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU,
+ (struct hwmem_region *)&req->region);
+}
+
+static int set_sync_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC,
+ (struct hwmem_region *)&req->region);
+}
+
+static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, NULL, &mem_type, NULL);
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS)
+ return -EINVAL;
+
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret < 0)
+ return ret;
+
+ req->phys_addr = mem_chunk.paddr;
+
+ return 0;
+}
+
+static int unpin(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_unpin(alloc);
+
+ return 0;
+}
+
+static int set_access(struct hwmem_file *hwfile,
+ struct hwmem_set_access_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_access(alloc, req->access, req->pid);
+}
+
+static int get_info(struct hwmem_file *hwfile,
+ struct hwmem_get_info_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access);
+
+ return 0;
+}
+
+static s32 export(struct hwmem_file *hwfile, s32 id)
+{
+ s32 ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /*
+ * The user could be about to send the buffer to a driver but
+ * there is a chance the current thread group don't have import rights
+ * if it gained access to the buffer via a inter-process fd transfer
+ * (fork, Android binder), if this is the case the driver will not be
+ * able to resolve the buffer name. To avoid this situation we give the
+ * current thread group import rights. This will not breach the
+ * security as the process already has access to the buffer (otherwise
+ * it would not be able to get here).
+ */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+
+ ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT),
+ task_tgid_nr(current));
+ if (ret < 0)
+ return ret;
+
+ return hwmem_get_name(alloc);
+}
+
+static s32 import(struct hwmem_file *hwfile, s32 name)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ goto error;
+
+ return ret;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int import_fd(struct hwmem_file *hwfile, s32 name)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int hwmem_open(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile;
+
+ hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL);
+ if (hwfile == NULL)
+ return -ENOMEM;
+
+ idr_init(&hwfile->idr);
+ mutex_init(&hwfile->lock);
+ file->private_data = hwfile;
+
+ return 0;
+}
+
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&hwfile->lock);
+
+ alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT);
+ if (IS_ERR(alloc)) {
+ ret = PTR_ERR(alloc);
+ goto out;
+ }
+
+ ret = hwmem_mmap(alloc, vma);
+
+out:
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data)
+{
+ hwmem_release((struct hwmem_alloc *)ptr);
+
+ return 0;
+}
+
+static int hwmem_release_fop(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL);
+ idr_remove_all(&hwfile->idr);
+ idr_destroy(&hwfile->idr);
+
+ if (hwfile->fd_alloc)
+ hwmem_release(hwfile->fd_alloc);
+
+ mutex_destroy(&hwfile->lock);
+
+ kfree(hwfile);
+
+ return 0;
+}
+
+static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOSYS;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ mutex_lock(&hwfile->lock);
+
+ switch (cmd) {
+ case HWMEM_ALLOC_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc(hwfile, &req);
+ }
+ break;
+ case HWMEM_ALLOC_FD_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc_fd(hwfile, &req);
+ }
+ break;
+ case HWMEM_RELEASE_IOC:
+ ret = release(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_CPU_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_cpu_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_SET_SYNC_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_sync_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_PIN_IOC:
+ {
+ struct hwmem_pin_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ else
+ ret = pin(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_UNPIN_IOC:
+ ret = unpin(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_ACCESS_IOC:
+ {
+ struct hwmem_set_access_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_access_request)))
+ ret = -EFAULT;
+ else
+ ret = set_access(hwfile, &req);
+ }
+ break;
+ case HWMEM_GET_INFO_IOC:
+ {
+ struct hwmem_get_info_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ else
+ ret = get_info(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_EXPORT_IOC:
+ ret = export(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_IOC:
+ ret = import(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_FD_IOC:
+ ret = import_fd(hwfile, (s32)arg);
+ break;
+ }
+
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ /*
+ * pgoff will not be valid as it contains a buffer id (right shifted
+ * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass
+ * on file or pgoff.
+ */
+ return current->mm->get_unmapped_area(NULL, addr, len, 0, flags);
+}
+
+int __init hwmem_ioctl_init(void)
+{
+ if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 ||
+ sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 ||
+ sizeof(enum hwmem_access) != 4 ||
+ sizeof(enum hwmem_mem_type) != 4) {
+ dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT"
+ " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||"
+ " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum"
+ " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)"
+ " != 4\n");
+ return -ENOMSG;
+ }
+ if (PAGE_SHIFT > 15)
+ dev_warn(hwmem_device.this_device, "Due to the page size only"
+ " %u id:s per file instance are available\n",
+ ((u32)1 << (31 - PAGE_SHIFT)) - 1);
+
+ return misc_register(&hwmem_device);
+}
+
+void __exit hwmem_ioctl_exit(void)
+{
+ misc_deregister(&hwmem_device);
+}
diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c
new file mode 100644
index 00000000000..b91d99bc2be
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-main.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pid.h>
+#include <linux/list.h>
+#include <linux/hwmem.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>
+#include "cache_handler.h"
+
+#define S32_MAX 2147483647
+
+struct hwmem_alloc_threadg_info {
+ struct list_head list;
+
+ struct pid *threadg_pid; /* Ref counted */
+
+ enum hwmem_access access;
+};
+
+struct hwmem_alloc {
+ struct list_head list;
+
+ atomic_t ref_cnt;
+
+ enum hwmem_alloc_flags flags;
+ struct hwmem_mem_type_struct *mem_type;
+
+ void *allocator_hndl;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ s32 name;
+
+ /* Access control */
+ enum hwmem_access default_access;
+ struct list_head threadg_info_list;
+
+ /* Cache handling */
+ struct cach_buf cach_buf;
+
+#ifdef CONFIG_DEBUG_FS
+ /* Debug */
+ void *creator;
+ pid_t creator_tgid;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static struct platform_device *hwdev;
+
+static LIST_HEAD(alloc_list);
+static DEFINE_IDR(global_idr);
+static DEFINE_MUTEX(lock);
+
+static void vm_open(struct vm_area_struct *vma);
+static void vm_close(struct vm_area_struct *vma);
+static struct vm_operations_struct vm_ops = {
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static void kunmap_alloc(struct hwmem_alloc *alloc);
+
+/* Helpers */
+
+static void destroy_alloc_threadg_info(
+ struct hwmem_alloc_threadg_info *info)
+{
+ if (info->threadg_pid)
+ put_pid(info->threadg_pid);
+
+ kfree(info);
+}
+
+static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct hwmem_alloc_threadg_info *tmp;
+
+ list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list),
+ list) {
+ list_del(&info->list);
+ destroy_alloc_threadg_info(info);
+ }
+}
+
+static enum hwmem_access get_access(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *my_pid;
+ bool found = false;
+
+ my_pid = find_get_pid(task_tgid_nr(current));
+ if (!my_pid)
+ return 0;
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == my_pid) {
+ found = true;
+ break;
+ }
+ }
+
+ put_pid(my_pid);
+
+ if (found)
+ return info->access;
+ else
+ return alloc->default_access;
+}
+
+static void clear_alloc_mem(struct hwmem_alloc *alloc)
+{
+ cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_CPU, NULL);
+
+ memset(alloc->kaddr, 0, alloc->size);
+}
+
+static void destroy_alloc(struct hwmem_alloc *alloc)
+{
+ list_del(&alloc->list);
+
+ if (alloc->name != 0) {
+ idr_remove(&global_idr, alloc->name);
+ alloc->name = 0;
+ }
+
+ clean_alloc_threadg_info_list(alloc);
+
+ kunmap_alloc(alloc);
+
+ if (!IS_ERR_OR_NULL(alloc->allocator_hndl))
+ alloc->mem_type->allocator_api.free(
+ alloc->mem_type->allocator_instance,
+ alloc->allocator_hndl);
+
+ kfree(alloc);
+}
+
+static int kmap_alloc(struct hwmem_alloc *alloc)
+{
+ int ret;
+ pgprot_t pgprot;
+ void *alloc_kaddr;
+
+ alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr(
+ alloc->mem_type->allocator_instance, alloc->allocator_hndl);
+ if (IS_ERR(alloc_kaddr))
+ return PTR_ERR(alloc_kaddr);
+
+ pgprot = PAGE_KERNEL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot);
+
+ ret = ioremap_page_range((unsigned long)alloc_kaddr,
+ (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot);
+ if (ret < 0) {
+ dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr,
+ alloc->paddr + alloc->size);
+ return ret;
+ }
+
+ alloc->kaddr = alloc_kaddr;
+
+ return 0;
+}
+
+static void kunmap_alloc(struct hwmem_alloc *alloc)
+{
+ if (alloc->kaddr == NULL)
+ return;
+
+ unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size);
+
+ alloc->kaddr = NULL;
+}
+
+static struct hwmem_mem_type_struct *resolve_mem_type(
+ enum hwmem_mem_type mem_type)
+{
+ unsigned int i;
+ for (i = 0; i < hwmem_num_mem_types; i++) {
+ if (hwmem_mem_types[i].id == mem_type)
+ return &hwmem_mem_types[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/* HWMEM API */
+
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+
+ if (hwdev == NULL) {
+ printk(KERN_ERR "HWMEM: Badly configured\n");
+ return ERR_PTR(-ENOMSG);
+ }
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ size = PAGE_ALIGN(size);
+
+ alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto alloc_alloc_failed;
+ }
+
+ INIT_LIST_HEAD(&alloc->list);
+ atomic_inc(&alloc->ref_cnt);
+ alloc->flags = flags;
+ alloc->default_access = def_access;
+ INIT_LIST_HEAD(&alloc->threadg_info_list);
+#ifdef CONFIG_DEBUG_FS
+ alloc->creator = __builtin_return_address(0);
+ alloc->creator_tgid = task_tgid_nr(current);
+#endif
+ alloc->mem_type = resolve_mem_type(mem_type);
+ if (IS_ERR(alloc->mem_type)) {
+ ret = PTR_ERR(alloc->mem_type);
+ goto resolve_mem_type_failed;
+ }
+
+ alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc(
+ alloc->mem_type->allocator_instance, size);
+ if (IS_ERR(alloc->allocator_hndl)) {
+ ret = PTR_ERR(alloc->allocator_hndl);
+ goto allocator_failed;
+ }
+
+ alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr(
+ alloc->allocator_hndl);
+ alloc->size = alloc->mem_type->allocator_api.get_alloc_size(
+ alloc->allocator_hndl);
+
+ cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size);
+ ret = kmap_alloc(alloc);
+ if (ret < 0)
+ goto kmap_alloc_failed;
+ cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr);
+
+ list_add_tail(&alloc->list, &alloc_list);
+
+ clear_alloc_mem(alloc);
+
+ goto out;
+
+kmap_alloc_failed:
+allocator_failed:
+resolve_mem_type_failed:
+ destroy_alloc(alloc);
+alloc_alloc_failed:
+ alloc = ERR_PTR(ret);
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_alloc);
+
+void hwmem_release(struct hwmem_alloc *alloc)
+{
+ mutex_lock(&lock);
+
+ if (atomic_dec_and_test(&alloc->ref_cnt))
+ destroy_alloc(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_release);
+
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ mutex_lock(&lock);
+
+ cach_set_domain(&alloc->cach_buf, access, domain, region);
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_set_domain);
+
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ u32 *mem_chunks_length)
+{
+ if (*mem_chunks_length < 1) {
+ *mem_chunks_length = 1;
+ return -ENOSPC;
+ }
+
+ mutex_lock(&lock);
+
+ mem_chunks[0].paddr = alloc->paddr;
+ mem_chunks[0].size = alloc->size;
+ *mem_chunks_length = 1;
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_pin);
+
+void hwmem_unpin(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_unpin);
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ hwmem_release((struct hwmem_alloc *)vma->vm_private_data);
+}
+
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ enum hwmem_access access;
+ mutex_lock(&lock);
+
+ access = get_access(alloc);
+
+ /* Check permissions */
+ if ((!(access & HWMEM_ACCESS_WRITE) &&
+ (vma->vm_flags & VM_WRITE)) ||
+ (!(access & HWMEM_ACCESS_READ) &&
+ (vma->vm_flags & VM_READ))) {
+ ret = -EPERM;
+ goto illegal_access;
+ }
+
+ if (vma_size > alloc->size) {
+ ret = -EINVAL;
+ goto illegal_size;
+ }
+
+ /*
+ * We don't want Linux to do anything (merging etc) with our VMAs as
+ * the offset is not necessarily valid
+ */
+ vma->vm_flags |= VM_SPECIAL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot);
+ vma->vm_private_data = (void *)alloc;
+ atomic_inc(&alloc->ref_cnt);
+ vma->vm_ops = &vm_ops;
+
+ ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT,
+ min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot);
+ if (ret < 0)
+ goto map_failed;
+
+ goto out;
+
+map_failed:
+ atomic_dec(&alloc->ref_cnt);
+illegal_size:
+illegal_access:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_mmap);
+
+void *hwmem_kmap(struct hwmem_alloc *alloc)
+{
+ void *ret;
+
+ mutex_lock(&lock);
+
+ ret = alloc->kaddr;
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_kmap);
+
+void hwmem_kunmap(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_kunmap);
+
+int hwmem_set_access(struct hwmem_alloc *alloc,
+ enum hwmem_access access, pid_t pid_nr)
+{
+ int ret;
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *pid;
+ bool found = false;
+
+ pid = find_get_pid(pid_nr);
+ if (!pid) {
+ ret = -EINVAL;
+ goto error_get_pid;
+ }
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == pid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ info->threadg_pid = pid;
+ info->access = access;
+
+ list_add_tail(&(info->list), &(alloc->threadg_info_list));
+ } else {
+ info->access = access;
+ }
+
+ return 0;
+
+error_alloc_info:
+ put_pid(pid);
+error_get_pid:
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_set_access);
+
+void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access)
+{
+ mutex_lock(&lock);
+
+ if (size != NULL)
+ *size = alloc->size;
+ if (mem_type != NULL)
+ *mem_type = alloc->mem_type->id;
+ if (access != NULL)
+ *access = get_access(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_get_info);
+
+s32 hwmem_get_name(struct hwmem_alloc *alloc)
+{
+ int ret = 0, name;
+
+ mutex_lock(&lock);
+
+ if (alloc->name != 0) {
+ ret = alloc->name;
+ goto out;
+ }
+
+ while (true) {
+ if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto pre_get_id_failed;
+ }
+
+ ret = idr_get_new_above(&global_idr, alloc, 1, &name);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto get_id_failed;
+ }
+
+ if (name > S32_MAX) {
+ ret = -ENOMSG;
+ goto overflow;
+ }
+
+ alloc->name = name;
+
+ ret = name;
+ goto out;
+
+overflow:
+ idr_remove(&global_idr, name);
+get_id_failed:
+pre_get_id_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_get_name);
+
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name)
+{
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&lock);
+
+ alloc = idr_find(&global_idr, name);
+ if (alloc == NULL) {
+ alloc = ERR_PTR(-EINVAL);
+ goto find_failed;
+ }
+ atomic_inc(&alloc->ref_cnt);
+
+ goto out;
+
+find_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_resolve_by_name);
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size)
+{
+ int ret;
+ char creator[KSYM_SYMBOL_LEN];
+ int i;
+
+ if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0)
+ creator[0] = '\0';
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l,
+ "%#x\n"
+ "\tSize: %u\n"
+ "\tMemory type: %u\n"
+ "\tName: %#x\n"
+ "\tReference count: %i\n"
+ "\tAllocation flags: %#x\n"
+ "\t$ settings: %#x\n"
+ "\tDefault access: %#x\n"
+ "\tPhysical address: %#x\n"
+ "\tKernel virtual address: %#x\n"
+ "\tCreator: %s\n"
+ "\tCreator thread group id: %u\n",
+ (unsigned int)alloc, alloc->size, alloc->mem_type->id,
+ alloc->name, atomic_read(&alloc->ref_cnt),
+ alloc->flags, alloc->cach_buf.cache_settings,
+ alloc->default_access, alloc->paddr,
+ (unsigned int)alloc->kaddr, creator,
+ alloc->creator_tgid);
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ size_t i = 0;
+ struct hwmem_alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_alloc, &alloc_list, list) {
+ if (i++ < (size_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(curr_alloc, &local_buf_pos, available_space -
+ (size_t)(local_buf_pos - local_buf));
+ if (ret == -EINVAL) /* No more room */
+ break;
+ else if (ret < 0)
+ goto out;
+
+ *curr_pos = (void *)i;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static void init_debugfs(void)
+{
+ /* Hwmem is never unloaded so dropping the dentrys is ok. */
+ struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL);
+ (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0,
+ &debugfs_allocs_fops);
+}
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+/* Module */
+
+extern int hwmem_ioctl_init(void);
+
+static int __devinit hwmem_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (hwdev) {
+ dev_err(&pdev->dev, "Probed multiple times\n");
+ return -EINVAL;
+ }
+
+ hwdev = pdev;
+
+ /*
+ * No need to flush the caches here. If we can keep track of the cache
+ * content then none of our memory will be in the caches, if we can't
+ * keep track of the cache content we always assume all our memory is
+ * in the caches.
+ */
+
+ ret = hwmem_ioctl_init();
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing"
+ " anyway\n");
+
+#ifdef CONFIG_DEBUG_FS
+ init_debugfs();
+#endif
+
+ dev_info(&pdev->dev, "Probed OK\n");
+
+ return 0;
+}
+
+static struct platform_driver hwmem_driver = {
+ .probe = hwmem_probe,
+ .driver = {
+ .name = "hwmem",
+ },
+};
+
+static int __init hwmem_init(void)
+{
+ return platform_driver_register(&hwmem_driver);
+}
+subsys_initcall(hwmem_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hardware memory driver");
+
diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/drivers/misc/mbox.c
index 2b2d51caf9d..d884496fa4c 100644
--- a/arch/arm/mach-ux500/mbox-db5500.c
+++ b/drivers/misc/mbox.c
@@ -32,13 +32,19 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/hrtimer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/mfd/db5500-prcmu.h>
#include <mach/mbox-db5500.h>
+#include <mach/reboot_reasons.h>
#define MBOX_NAME "mbox"
@@ -53,8 +59,57 @@
#define MBOX_ENABLE_IRQ 0x0
#define MBOX_LATCH 1
+struct mbox_device_info {
+ struct mbox *mbox;
+ struct workqueue_struct *mbox_modem_rel_wq;
+ struct work_struct mbox_modem_rel;
+ struct completion mod_req_ack_work;
+ atomic_t ape_state;
+ atomic_t mod_req;
+ atomic_t mod_reset;
+};
+
/* Global list of all mailboxes */
+struct hrtimer ape_timer;
+struct hrtimer modem_timer;
+static DEFINE_MUTEX(modem_state_mutex);
static struct list_head mboxs = LIST_HEAD_INIT(mboxs);
+static struct mbox_device_info *mb;
+
+static enum hrtimer_restart mbox_ape_callback(struct hrtimer *hrtimer)
+{
+ queue_work(mb->mbox_modem_rel_wq, &mb->mbox_modem_rel);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mbox_mod_callback(struct hrtimer *hrtimer)
+{
+ atomic_set(&mb->ape_state, 0);
+ return HRTIMER_NORESTART;
+}
+
+static void mbox_modem_rel_work(struct work_struct *work)
+{
+ mutex_lock(&modem_state_mutex);
+ prcmu_modem_rel();
+ atomic_set(&mb->mod_req, 0);
+ mutex_unlock(&modem_state_mutex);
+}
+
+static void mbox_modem_req(void)
+{
+ mutex_lock(&modem_state_mutex);
+ if (!db5500_prcmu_is_modem_requested()) {
+ prcmu_modem_req();
+ /* TODO: optimize this timeout */
+ if (!wait_for_completion_timeout(&mb->mod_req_ack_work,
+ msecs_to_jiffies(2000)))
+ printk(KERN_ERR "mbox:modem_req_ack timedout(2sec)\n");
+ }
+ atomic_set(&mb->mod_req, 1);
+ mutex_unlock(&modem_state_mutex);
+}
static struct mbox *get_mbox_with_id(u8 id)
{
@@ -69,15 +124,24 @@ static struct mbox *get_mbox_with_id(u8 id)
int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
{
int res = 0;
+ unsigned long flag;
- spin_lock(&mbox->lock);
-
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "mbox_send called after modem reset\n");
+ return -EINVAL;
+ }
dev_dbg(&(mbox->pdev->dev),
"About to buffer 0x%X to mailbox 0x%X."
" ri = %d, wi = %d\n",
mbox_msg, (u32)mbox, mbox->read_index,
mbox->write_index);
+ /* Request for modem */
+ if (!db5500_prcmu_is_modem_requested())
+ mbox_modem_req();
+
+ spin_lock_irqsave(&mbox->lock, flag);
/* Check if write buffer is full */
while (((mbox->write_index + 1) % MBOX_BUF_SIZE) == mbox->read_index) {
if (!block) {
@@ -87,14 +151,14 @@ int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
res = -ENOMEM;
goto exit;
}
- spin_unlock(&mbox->lock);
+ spin_unlock_irqrestore(&mbox->lock, flag);
dev_dbg(&(mbox->pdev->dev),
"Buffer full in blocking call! Sleeping...\n");
mbox->client_blocked = 1;
wait_for_completion(&mbox->buffer_available);
dev_dbg(&(mbox->pdev->dev),
"Blocking send was woken up! Trying again...\n");
- spin_lock(&mbox->lock);
+ spin_lock_irqsave(&mbox->lock, flag);
}
mbox->buffer[mbox->write_index] = mbox_msg;
@@ -104,10 +168,16 @@ int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
* Indicate that we want an IRQ as soon as there is a slot
* in the FIFO
*/
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "modem is in reset state, cannot proceed\n");
+ res = -EINVAL;
+ goto exit;
+ }
writel(MBOX_ENABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
exit:
- spin_unlock(&mbox->lock);
+ spin_unlock_irqrestore(&mbox->lock, flag);
return res;
}
EXPORT_SYMBOL(mbox_send);
@@ -129,7 +199,8 @@ static ssize_t mbox_write_fifo(struct device *dev,
char *token;
char *val;
- struct mbox *mbox = (struct mbox *) dev->platform_data;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
strncpy((char *) &int_buf, buf, sizeof(int_buf));
token = (char *) &int_buf;
@@ -157,8 +228,13 @@ static ssize_t mbox_read_fifo(struct device *dev,
char *buf)
{
int mbox_value;
- struct mbox *mbox = (struct mbox *) dev->platform_data;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem crashed, returning\n");
+ return 0;
+ }
if ((readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7) <= 0)
return sprintf(buf, "Mailbox is empty\n");
@@ -193,6 +269,11 @@ static int mbox_show(struct seq_file *s, void *data)
continue;
}
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&m->pdev->dev, "modem crashed, returning\n");
+ spin_unlock(&m->lock);
+ return 0;
+ }
seq_printf(s,
"===========================\n"
" MAILBOX %d\n"
@@ -272,6 +353,10 @@ static irqreturn_t mbox_irq(int irq, void *arg)
int nbr_free;
struct mbox *mbox = (struct mbox *) arg;
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ return IRQ_HANDLED;
+ }
spin_lock(&mbox->lock);
dev_dbg(&(mbox->pdev->dev),
@@ -295,6 +380,11 @@ static irqreturn_t mbox_irq(int irq, void *arg)
while ((nbr_free > 0) &&
(mbox->read_index != mbox->write_index)) {
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "modem in reset state\n");
+ goto exit;
+ }
/* Write the message and latch it into the FIFO */
writel(mbox->buffer[mbox->read_index],
(mbox->virtbase_peer + MBOX_FIFO_DATA));
@@ -310,6 +400,10 @@ static irqreturn_t mbox_irq(int irq, void *arg)
(mbox->read_index + 1) % MBOX_BUF_SIZE;
}
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ goto exit;
+ }
/*
* Check if we still want IRQ:s when there is free
* space to send
@@ -342,17 +436,31 @@ static irqreturn_t mbox_irq(int irq, void *arg)
}
}
+ /* Start timer and on timer expiry call modem_rel */
+ hrtimer_start(&ape_timer, ktime_set(0, 10*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ goto exit;
+ }
/* Check if we have any incoming messages */
nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
if (nbr_occup == 0)
goto exit;
+redo:
if (mbox->cb == NULL) {
dev_dbg(&(mbox->pdev->dev), "No receive callback registered, "
"leaving %d incoming messages in fifo!\n", nbr_occup);
goto exit;
}
+ atomic_set(&mb->ape_state, 1);
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ goto exit;
+ }
/* Read and acknowledge the message */
mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
@@ -362,6 +470,14 @@ static irqreturn_t mbox_irq(int irq, void *arg)
mbox_value);
mbox->cb(mbox_value, mbox->client_data);
+ nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
+
+ if (nbr_occup > 0)
+ goto redo;
+
+ /* Start a timer and timer expiry will be the criteria for sleep */
+ hrtimer_start(&modem_timer, ktime_set(0, 100*MSEC_PER_SEC),
+ HRTIMER_MODE_REL);
exit:
dev_dbg(&(mbox->pdev->dev), "Exit mbox IRQ. ri = %d, wi = %d\n",
mbox->read_index, mbox->write_index);
@@ -370,14 +486,78 @@ exit:
return IRQ_HANDLED;
}
+static void mbox_shutdown(struct mbox *mbox)
+{
+ if (!mbox->allocated)
+ return;
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(mbox->dentry);
+ device_remove_file(&mbox->pdev->dev, &dev_attr_fifo);
+#endif
+ /* TODO: Need to check if we can write after modem reset */
+ if (!atomic_read(&mb->mod_reset)) {
+ writel(MBOX_DISABLE_IRQ, mbox->virtbase_local +
+ MBOX_FIFO_THRES_OCCUP);
+ writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer +
+ MBOX_FIFO_THRES_FREE);
+ }
+ free_irq(mbox->irq, (void *)mbox);
+ mbox->client_blocked = 0;
+ iounmap(mbox->virtbase_local);
+ iounmap(mbox->virtbase_peer);
+ mbox->cb = NULL;
+ mbox->client_data = NULL;
+ mbox->allocated = false;
+}
+
+/** mbox_state_reset - Reset the mailbox state machine
+ *
+ * This function is called on receiving modem reset interrupt. Reset all
+ * the mailbox state machine, disable irq, cancel timers, shutdown the
+ * mailboxs and re-enable irq's.
+ */
+void mbox_state_reset(void)
+{
+ struct mbox *mbox = mb->mbox;
+
+ /* Common for all mailbox */
+ atomic_set(&mb->mod_reset, 1);
+
+ /* Disable IRQ */
+ disable_irq_nosync(IRQ_DB5500_PRCMU_AC_WAKE_ACK);
+
+ /* Cancel sleep_req timers */
+ hrtimer_cancel(&modem_timer);
+ hrtimer_cancel(&ape_timer);
+
+ /* specific to each mailbox */
+ list_for_each_entry(mbox, &mboxs, list) {
+ mbox_shutdown(mbox);
+ }
+
+ /* Reset mailbox state machine */
+ atomic_set(&mb->mod_req, 0);
+ atomic_set(&mb->ape_state, 0);
+
+ /* Enable irq */
+ enable_irq(IRQ_DB5500_PRCMU_AC_WAKE_ACK);
+}
+
+
/* Setup is executed once for each mbox pair */
struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
{
struct resource *resource;
- int irq;
int res;
struct mbox *mbox;
+ /*
+ * set mod_reset flag to '0', clients calling this APE should make sure
+ * that modem is rebooted after MSR. Mailbox doesnt have any means of
+ * knowing the boot status of modem.
+ */
+ atomic_set(&mb->mod_reset, 0);
+
mbox = get_mbox_with_id(mbox_id);
if (mbox == NULL) {
dev_err(&(mbox->pdev->dev), "Incorrect mailbox id: %d!\n",
@@ -411,7 +591,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
dev_err(&(mbox->pdev->dev),
"Unable to retrieve mbox peer resource\n");
mbox = NULL;
- goto exit;
+ goto free_mbox;
}
dev_dbg(&(mbox->pdev->dev),
"Resource name: %s start: 0x%X, end: 0x%X\n",
@@ -420,7 +600,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
if (!mbox->virtbase_peer) {
dev_err(&(mbox->pdev->dev), "Unable to ioremap peer mbox\n");
mbox = NULL;
- goto exit;
+ goto free_mbox;
}
dev_dbg(&(mbox->pdev->dev),
"ioremapped peer physical: (0x%X-0x%X) to virtual: 0x%X\n",
@@ -434,7 +614,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
dev_err(&(mbox->pdev->dev),
"Unable to retrieve mbox local resource\n");
mbox = NULL;
- goto exit;
+ goto free_map;
}
dev_dbg(&(mbox->pdev->dev),
"Resource name: %s start: 0x%X, end: 0x%X\n",
@@ -443,7 +623,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
if (!mbox->virtbase_local) {
dev_err(&(mbox->pdev->dev), "Unable to ioremap local mbox\n");
mbox = NULL;
- goto exit;
+ goto free_map;
}
dev_dbg(&(mbox->pdev->dev),
"ioremapped local physical: (0x%X-0x%X) to virtual: 0x%X\n",
@@ -453,23 +633,32 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
mbox->client_blocked = 0;
/* Get IRQ for mailbox and allocate it */
- irq = platform_get_irq_byname(mbox->pdev, "mbox_irq");
- if (irq < 0) {
+ mbox->irq = platform_get_irq_byname(mbox->pdev, "mbox_irq");
+ if (mbox->irq < 0) {
dev_err(&(mbox->pdev->dev),
"Unable to retrieve mbox irq resource\n");
mbox = NULL;
- goto exit;
+ goto free_map1;
}
- dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", irq);
- res = request_irq(irq, mbox_irq, 0, mbox->name, (void *) mbox);
+ dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", mbox->irq);
+ res = request_threaded_irq(mbox->irq, NULL, mbox_irq,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ mbox->name, (void *) mbox);
if (res < 0) {
dev_err(&(mbox->pdev->dev),
- "Unable to allocate mbox irq %d\n", irq);
+ "Unable to allocate mbox irq %d\n", mbox->irq);
mbox = NULL;
goto exit;
}
+ /* check if modem has reset */
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "modem is in reset state, cannot proceed\n");
+ mbox = NULL;
+ goto free_irq;
+ }
/* Set up mailbox to not launch IRQ on free space in mailbox */
writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
@@ -491,39 +680,46 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
dev_warn(&(mbox->pdev->dev),
"Unable to create mbox sysfs entry");
- (void) debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL,
+ mbox->dentry = debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL,
NULL, &mbox_operations);
#endif
-
dev_info(&(mbox->pdev->dev),
"Mailbox driver with index %d initiated!\n", mbox_id);
+ return mbox;
+free_irq:
+ free_irq(mbox->irq, (void *)mbox);
+free_map1:
+ iounmap(mbox->virtbase_local);
+free_map:
+ iounmap(mbox->virtbase_peer);
+free_mbox:
+ mbox->client_data = NULL;
+ mbox->cb = NULL;
exit:
return mbox;
}
EXPORT_SYMBOL(mbox_setup);
+static irqreturn_t mbox_prcmu_mod_req_ack_handler(int irq, void *data)
+{
+ complete(&mb->mod_req_ack_work);
+ return IRQ_HANDLED;
+}
int __init mbox_probe(struct platform_device *pdev)
{
- struct mbox local_mbox;
struct mbox *mbox;
int res = 0;
dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32) pdev);
- memset(&local_mbox, 0x0, sizeof(struct mbox));
-
- /* Associate our mbox data with the platform device */
- res = platform_device_add_data(pdev,
- (void *) &local_mbox,
- sizeof(struct mbox));
- if (res != 0) {
- dev_err(&(pdev->dev),
- "Unable to allocate driver platform data!\n");
- goto exit;
+ mbox = kzalloc(sizeof(struct mbox), GFP_KERNEL);
+ if (mbox == NULL) {
+ dev_err(&pdev->dev,
+ "Could not allocate memory for struct mbox\n");
+ return -ENOMEM;
}
- mbox = (struct mbox *) pdev->dev.platform_data;
mbox->pdev = pdev;
mbox->write_index = 0;
mbox->read_index = 0;
@@ -534,29 +730,135 @@ int __init mbox_probe(struct platform_device *pdev)
sprintf(mbox->name, "%s", MBOX_NAME);
spin_lock_init(&mbox->lock);
+ platform_set_drvdata(pdev, mbox);
+ mb->mbox = mbox;
dev_info(&(pdev->dev), "Mailbox driver loaded\n");
-exit:
return res;
}
+static int __exit mbox_remove(struct platform_device *pdev)
+{
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ hrtimer_cancel(&ape_timer);
+ hrtimer_cancel(&modem_timer);
+ mbox_shutdown(mbox);
+ list_del(&mbox->list);
+ kfree(mbox);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int mbox_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ /*
+ * Nothing to be done for now, once APE-Modem power management is
+ * in place communication will have to be stopped.
+ */
+
+ list_for_each_entry(mbox, &mboxs, list) {
+ if (mbox->client_blocked)
+ return -EBUSY;
+ }
+ dev_dbg(dev, "APE_STATE = %d\n", atomic_read(&mb->ape_state));
+ dev_dbg(dev, "MODEM_STATE = %d\n", db5500_prcmu_is_modem_requested());
+ if (atomic_read(&mb->ape_state) || db5500_prcmu_is_modem_requested() ||
+ atomic_read(&mb->mod_req))
+ return -EBUSY;
+ return 0;
+}
+
+int mbox_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ /*
+ * Nothing to be done for now, once APE-Modem power management is
+ * in place communication will have to be resumed.
+ */
+
+ return 0;
+}
+
+static const struct dev_pm_ops mbox_dev_pm_ops = {
+ .suspend_noirq = mbox_suspend,
+ .resume_noirq = mbox_resume,
+};
+#endif
+
static struct platform_driver mbox_driver = {
+ .remove = __exit_p(mbox_remove),
.driver = {
.name = MBOX_NAME,
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mbox_dev_pm_ops,
+#endif
},
};
static int __init mbox_init(void)
{
+ struct mbox_device_info *mb_di;
+ int err;
+
+ mb_di = kzalloc(sizeof(struct mbox_device_info), GFP_KERNEL);
+ if (mb_di == NULL) {
+ printk(KERN_ERR
+ "mbox:Could not allocate memory for struct mbox_device_info\n");
+ return -ENOMEM;
+ }
+
+ mb_di->mbox_modem_rel_wq = create_singlethread_workqueue(
+ "mbox_modem_rel");
+ if (!mb_di->mbox_modem_rel_wq) {
+ printk(KERN_ERR "mbox:failed to create work queue\n");
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ INIT_WORK(&mb_di->mbox_modem_rel, mbox_modem_rel_work);
+
+ hrtimer_init(&ape_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ape_timer.function = mbox_ape_callback;
+ hrtimer_init(&modem_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ modem_timer.function = mbox_mod_callback;
+
+ atomic_set(&mb_di->ape_state, 0);
+ atomic_set(&mb_di->mod_req, 0);
+ atomic_set(&mb_di->mod_reset, 0);
+
+ err = request_irq(IRQ_DB5500_PRCMU_AC_WAKE_ACK,
+ mbox_prcmu_mod_req_ack_handler,
+ IRQF_NO_SUSPEND, "mod_req_ack", NULL);
+ if (err < 0) {
+ printk(KERN_ERR "mbox:Failed alloc IRQ_PRCMU_CA_SLEEP.\n");
+ goto free_irq;
+ }
+
+ init_completion(&mb_di->mod_req_ack_work);
+ mb = mb_di;
return platform_driver_probe(&mbox_driver, mbox_probe);
+free_irq:
+ destroy_workqueue(mb_di->mbox_modem_rel_wq);
+free_mem:
+ kfree(mb_di);
+ return err;
}
module_init(mbox_init);
void __exit mbox_exit(void)
{
+ free_irq(IRQ_DB5500_PRCMU_AC_WAKE_ACK, NULL);
+ destroy_workqueue(mb->mbox_modem_rel_wq);
platform_driver_unregister(&mbox_driver);
+ kfree(mb);
}
module_exit(mbox_exit);
diff --git a/drivers/misc/mbox_channels-db5500.c b/drivers/misc/mbox_channels-db5500.c
new file mode 100644
index 00000000000..919be308ed4
--- /dev/null
+++ b/drivers/misc/mbox_channels-db5500.c
@@ -0,0 +1,1273 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Mailbox Logical Driver
+ *
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> for ST-Ericsson.
+ * Bibek Basu ,bibek.basu@stericsson.com>
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <asm/mach-types.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <mach/mbox-db5500.h>
+#include <mach/mbox_channels-db5500.h>
+#include <linux/io.h>
+
+/* Defines start sequence number for given mailbox channel */
+#define CHANNEL_START_SEQUENCE_NUMBER 0x80
+
+/* Defines number of channels per mailbox unit */
+#define CHANNELS_PER_MBOX_UNIT 256
+
+/*
+ * This macro builds mbox channel PDU header with following format:
+ * ---------------------------------------------------------------------------
+ * | | | | |
+ * | Sequence nmbr | Type | Length | Destination logical channel number |
+ * | | | | |
+ * ---------------------------------------------------------------------------
+ * 31 24 20 16 0
+ *
+ */
+#define BUILD_HEADER(chan, len, type, seq_no) \
+ ((chan) | (((len) & 0xf) << 16) | \
+ (((type) & 0xf) << 20) | ((seq_no) << 24))
+
+/* Returns type from mbox message header */
+#define GET_TYPE(mbox_msg) (((mbox_msg) >> 20) & 0xf)
+
+/* Returns channel number from mbox message header */
+#define GET_CHANNEL(mbox_msg) ((mbox_msg) & 0xffff)
+
+/* Returns length of payload from mbox message header */
+#define GET_LENGTH(mbox_msg) (((mbox_msg) >> 16) & 0xf)
+
+/* Returns sequence number from mbox message header */
+#define GET_SEQ_NUMBER(mbox_msg) (((mbox_msg) >> 24)
+
+enum mbox_msg{
+ MBOX_CLOSE,
+ MBOX_OPEN,
+ MBOX_SEND,
+ MBOX_CAST,
+ MBOX_ACK,
+ MBOX_NAK,
+};
+
+enum mbox_dir {
+ MBOX_TX,
+ MBOX_RX,
+};
+
+struct mbox_channel_mapping {
+ u16 chan_base;
+ u8 mbox_id;
+ enum mbox_dir direction;
+};
+
+/* This table maps mbox logical channel to mbox id and direction */
+static struct mbox_channel_mapping channel_mappings[] = {
+ {0x500, 2, MBOX_RX}, /* channel 5 maps to mbox 0.1, dsp->app (unsec) */
+ {0x900, 2, MBOX_TX}, /* channel 9 maps to mbox 0.0, app->dsp (unsec) */
+};
+
+/* This table specifies mailbox ids which mbox channels module will use */
+static u8 mbox_ids[] = {
+ 2, /* app <-> dsp (unsec) */
+};
+
+/**
+ * struct mbox_unit_status - current status of mbox unit
+ * @mbox_id : holds mbox unit identification number
+ * @mbox : holds mbox pointer after mbox_register() call
+ * @tx_chans : holds list of open tx mbox channels
+ * @tx_lock: lock for tx channel
+ * @rx_chans : holds list of open rx mbox channels
+ * @rx_lock: lock for rx channel
+ */
+struct mbox_unit_status {
+ u8 mbox_id;
+ struct mbox *mbox;
+ struct list_head tx_chans;
+ spinlock_t tx_lock;
+ struct list_head rx_chans;
+ spinlock_t rx_lock;
+};
+
+static struct {
+ struct platform_device *pdev;
+ struct mbox_unit_status mbox_unit[ARRAY_SIZE(mbox_ids)];
+} channels;
+
+/* This structure describes pending element for mbox tx channel */
+struct pending_elem {
+ struct list_head list;
+ u32 *data;
+ u8 length;
+};
+
+struct rx_pending_elem {
+ u32 buffer[MAILBOX_NR_OF_DATAWORDS];
+ u8 length;
+ void *priv;
+};
+
+struct rx_pending_elem rx_pending[NUM_DSP_BUFFER];
+
+/* This structure holds list of pending elements for mbox tx channel */
+struct tx_channel {
+ struct list_head pending;
+};
+
+/* Specific status for mbox rx channel */
+struct rx_channel {
+ struct list_head pending;
+ spinlock_t lock;
+ u32 buffer[MAILBOX_NR_OF_DATAWORDS];
+ u8 index;
+ u8 length;
+};
+
+/**
+ * struct channel_status - status of mbox channel - common for tx and rx
+ * @list : holds list of channels registered
+ * @channel : holds channel number
+ * @state : holds state of channel
+ * @cb: holds callback function forr rx channel
+ * @with_ack : holds if ack is needed
+ * @rx: holds pointer to rx_channel
+ * @tx : holds pointer to tx_channel
+ * @receive_wq : holds pointer to receive workqueue_struct
+ * @cast_wq : holds pointer to cast workqueue_struct
+ * @open_msg: holds work_struct for open msg
+ * @receive_msg : holds work_struct for receive msg
+ * @cast_msg: holds work_struct for cast msg
+ * @lock: holds lock for channel
+ */
+struct channel_status {
+ atomic_t rcv_counter;
+ struct list_head list;
+ u16 channel;
+ int state;
+ mbox_channel_cb_t *cb;
+ void *priv;
+ u8 seq_number;
+ bool with_ack;
+ struct rx_channel rx;
+ struct tx_channel tx;
+ struct workqueue_struct *receive_wq;
+ struct workqueue_struct *cast_wq;
+ struct work_struct open_msg;
+ struct work_struct receive_msg;
+ struct work_struct cast_msg;
+ struct mutex lock;
+};
+
+/* Checks if provided channel number is valid */
+static bool check_channel(u16 channel, enum mbox_dir direction)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((channel >= channel_mappings[i].chan_base) &&
+ (channel < channel_mappings[i].chan_base +
+ CHANNELS_PER_MBOX_UNIT)) {
+ /* Check if direction of given channel is correct*/
+ if (channel_mappings[i].direction == direction)
+ return true;
+ else
+ break;
+ }
+ }
+ return false;
+}
+
+/* get the tx channel corresponding to the given rx channel */
+static u16 get_tx_channel(u16 channel)
+{
+ int i;
+ int relative_chan = 0;
+ int mbox_id = 0xFF;
+ u16 tx_channel = 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((channel >= channel_mappings[i].chan_base) &&
+ (channel < channel_mappings[i].chan_base +
+ CHANNELS_PER_MBOX_UNIT)) {
+ /* Check if direction of given channel is correct*/
+ relative_chan = channel - channel_mappings[i].chan_base;
+ mbox_id = channel_mappings[i].mbox_id;
+
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((mbox_id == channel_mappings[i].mbox_id) &&
+ (channel_mappings[i].direction == MBOX_TX))
+ tx_channel = channel_mappings[i].chan_base +
+ relative_chan;
+ }
+ return tx_channel;
+}
+
+/* Returns mbox unit id for given mbox channel */
+static int get_mbox_id(u16 channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((channel >= channel_mappings[i].chan_base) &&
+ (channel < channel_mappings[i].chan_base +
+ CHANNELS_PER_MBOX_UNIT)) {
+ return channel_mappings[i].mbox_id;
+ }
+ }
+ /* There is no mbox unit registered for given channel */
+ return -EINVAL;
+}
+
+/* Returns mbox structure saved after mbox_register() call */
+static struct mbox *get_mbox(u16 channel)
+{
+ int i;
+ int mbox_id = get_mbox_id(channel);
+
+ if (mbox_id < 0) {
+ dev_err(&channels.pdev->dev, "couldn't get mbox id\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(channels.mbox_unit); i++) {
+ if (channels.mbox_unit[i].mbox_id == mbox_id)
+ return channels.mbox_unit[i].mbox;
+ }
+ return NULL;
+}
+
+/* Returns pointer to rx mbox channels list for given mbox unit */
+static struct list_head *get_rx_list(u8 mbox_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) {
+ if (channels.mbox_unit[i].mbox_id == mbox_id)
+ return &channels.mbox_unit[i].rx_chans;
+ }
+ return NULL;
+}
+
+/* Returns pointer to tx mbox channels list for given mbox unit */
+static struct list_head *get_tx_list(u8 mbox_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) {
+ if (channels.mbox_unit[i].mbox_id == mbox_id)
+ return &channels.mbox_unit[i].tx_chans;
+ }
+ return NULL;
+}
+
+static int send_pdu(struct channel_status *chan_status, int command,
+ u16 channel)
+{
+ struct mbox *mbox;
+ u32 header = 0;
+ int ret = 0;
+ /* SEND PDU is not supported */
+ if (command == MBOX_SEND) {
+ dev_err(&channels.pdev->dev, "SEND command not implemented\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ mbox = get_mbox(chan_status->channel);
+ if (mbox == NULL) {
+ dev_err(&channels.pdev->dev, "couldn't get mailbox\n");
+ ret = -ENOSYS;
+ goto exit;
+ }
+ /* For CAST type send all pending messages */
+ if (command == MBOX_CAST) {
+ struct list_head *pos, *n;
+
+ /* Send all pending messages from TX channel */
+ list_for_each_safe(pos, n, &chan_status->tx.pending) {
+ struct pending_elem *pending =
+ list_entry(pos, struct pending_elem, list);
+ int i;
+
+ header = BUILD_HEADER(channel,
+ pending->length,
+ command,
+ chan_status->seq_number);
+
+ ret = mbox_send(mbox, header, true);
+ if (ret < 0) {
+ dev_err(&channels.pdev->dev,
+ "failed to send header, err=%d\n", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < pending->length; i++) {
+ ret = mbox_send(mbox, pending->data[i], true);
+ if (ret < 0) {
+ dev_err(&channels.pdev->dev,
+ "failed to send header, err=%d\n", ret);
+ goto exit;
+ }
+ }
+
+ /* Call client's callback that data is already sent */
+ if (chan_status->cb)
+ chan_status->cb(pending->data, pending->length,
+ chan_status->priv);
+ else
+ dev_err(&channels.pdev->dev,
+ "%s no callback provided:header 0x%x\n",
+ __func__, header);
+
+ /* Increment sequence number */
+ chan_status->seq_number++;
+
+ /* Remove and free element from the list */
+ list_del(&pending->list);
+ kfree(pending);
+ }
+ } else {
+ header = BUILD_HEADER(channel, 0,
+ command, chan_status->seq_number);
+
+ ret = mbox_send(mbox, header, true);
+ if (ret < 0)
+ dev_err(&channels.pdev->dev, "failed to send header\n");
+ /* Increment sequence number */
+ chan_status->seq_number++;
+ }
+
+exit:
+ return ret;
+}
+
+void mbox_handle_receive_msg(struct work_struct *work)
+{
+ struct channel_status *rx_chan = container_of(work,
+ struct channel_status,
+ receive_msg);
+
+ if (!atomic_read(&rx_chan->rcv_counter))
+ return;
+rcv_msg:
+ /* Call client's callback and reset state */
+ if (rx_chan->cb) {
+ static int rx_pending_count;
+ rx_chan->cb(rx_pending[rx_pending_count].buffer,
+ rx_pending[rx_pending_count].length,
+ rx_pending[rx_pending_count].priv);
+ rx_pending_count++;
+ if (rx_pending_count == NUM_DSP_BUFFER)
+ rx_pending_count = 0;
+ } else {
+ dev_err(&channels.pdev->dev,
+ "%s no callback provided\n", __func__);
+ }
+ if (atomic_dec_return(&rx_chan->rcv_counter) > 0)
+ goto rcv_msg;
+
+}
+
+void mbox_handle_open_msg(struct work_struct *work)
+{
+ struct channel_status *tx_chan = container_of(work,
+ struct channel_status,
+ open_msg);
+ /* Change channel state to OPEN */
+ tx_chan->state = MBOX_OPEN;
+ /* If pending list not empty, start sending data */
+ mutex_lock(&tx_chan->lock);
+ if (!list_empty(&tx_chan->tx.pending))
+ send_pdu(tx_chan, MBOX_CAST, tx_chan->channel);
+ mutex_unlock(&tx_chan->lock);
+}
+
+void mbox_handle_cast_msg(struct work_struct *work)
+{
+ struct channel_status *rx_chan = container_of(work,
+ struct channel_status,
+ cast_msg);
+ /* Check if channel is opened */
+ if (rx_chan->state == MBOX_CLOSE) {
+ /* Peer sent message to closed channel */
+ dev_err(&channels.pdev->dev,
+ "channel in wrong state\n");
+ }
+}
+
+static bool handle_receive_msg(u32 mbox_msg, struct channel_status *rx_chan)
+{
+ int i;
+ static int rx_pending_count;
+
+ if (rx_chan) {
+ /* Store received data in RX channel buffer */
+ rx_chan->rx.buffer[rx_chan->rx.index++] = mbox_msg;
+
+ /* Check if it's last data of PDU */
+ if (rx_chan->rx.index == rx_chan->rx.length) {
+ for (i = 0; i < MAILBOX_NR_OF_DATAWORDS; i++) {
+ rx_pending[rx_pending_count].buffer[i] =
+ rx_chan->rx.buffer[i];
+ }
+
+ rx_pending[rx_pending_count].length =
+ rx_chan->rx.length;
+ rx_pending[rx_pending_count].priv = rx_chan->priv;
+ rx_chan->rx.index = 0;
+ rx_chan->rx.length = 0;
+ rx_chan->state = MBOX_OPEN;
+ rx_chan->seq_number++;
+ rx_pending_count++;
+ if (rx_pending_count == NUM_DSP_BUFFER)
+ rx_pending_count = 0;
+ atomic_inc(&rx_chan->rcv_counter);
+ queue_work(rx_chan->receive_wq,
+ &rx_chan->receive_msg);
+ }
+ dev_dbg(&channels.pdev->dev, "%s OK\n", __func__);
+
+ return true;
+ }
+ return false;
+}
+
+static void handle_open_msg(u16 channel, u8 mbox_id)
+{
+ struct list_head *tx_list, *pos;
+ struct channel_status *tmp;
+ struct channel_status *tx_chan = NULL;
+ struct mbox_unit_status *mbox_unit;
+ channel = get_tx_channel(channel);
+ dev_dbg(&channels.pdev->dev, "%s mbox_id %d\tchannel %x\n",
+ __func__, mbox_id, channel);
+ /* Get TX channel for given mbox unit */
+ tx_list = get_tx_list(mbox_id);
+ if (tx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid %d\n",
+ mbox_id);
+ return;
+ }
+ mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans);
+ /* Search for channel in tx list */
+ spin_lock(&mbox_unit->tx_lock);
+ list_for_each(pos, tx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ dev_dbg(&channels.pdev->dev, "tmp->channel=%d\n",
+ tmp->channel);
+ if (tmp->channel == channel)
+ tx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->tx_lock);
+ if (tx_chan) {
+ schedule_work(&tx_chan->open_msg);
+ } else {
+ /* No tx channel found on the list, allocate new element */
+ tx_chan = kzalloc(sizeof(*tx_chan), GFP_ATOMIC);
+ if (tx_chan == NULL) {
+ dev_err(&channels.pdev->dev,
+ "failed to allocate memory\n");
+ return;
+ }
+
+ /* Fill initial data and add this element to tx list */
+ tx_chan->channel = get_tx_channel(channel);
+ tx_chan->state = MBOX_OPEN;
+ tx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER;
+ INIT_LIST_HEAD(&tx_chan->tx.pending);
+ INIT_WORK(&tx_chan->open_msg, mbox_handle_open_msg);
+ INIT_WORK(&tx_chan->cast_msg, mbox_handle_cast_msg);
+ INIT_WORK(&tx_chan->receive_msg, mbox_handle_receive_msg);
+ mutex_init(&tx_chan->lock);
+ spin_lock(&mbox_unit->tx_lock);
+ list_add_tail(&tx_chan->list, tx_list);
+ spin_unlock(&mbox_unit->tx_lock);
+ }
+}
+
+static void handle_cast_msg(u16 channel, struct channel_status *rx_chan,
+ u32 mbox_msg, bool send)
+{
+ dev_dbg(&channels.pdev->dev, " %s\n", __func__);
+ if (rx_chan) {
+ rx_chan->rx.buffer[0] = mbox_msg;
+ rx_chan->with_ack = send;
+ rx_chan->rx.length = GET_LENGTH(rx_chan->rx.buffer[0]);
+ if (rx_chan->rx.length <= MAILBOX_NR_OF_DATAWORDS &&
+ rx_chan->rx.length > 0) {
+ rx_chan->rx.index = 0;
+ rx_chan->state = MBOX_CAST;
+ }
+ queue_work(rx_chan->cast_wq,
+ &rx_chan->cast_msg);
+ } else {
+ /* Channel not found, peer sent wrong message */
+ dev_err(&channels.pdev->dev, "channel %d doesn't exist\n",
+ channel);
+ }
+}
+
+/*
+ * This callback is called whenever mbox unit receives data.
+ * priv parameter holds mbox unit id.
+ */
+static void mbox_cb(u32 mbox_msg, void *priv)
+{
+ u8 mbox_id = *(u8 *)priv;
+ struct list_head *rx_list;
+ u8 type = GET_TYPE(mbox_msg);
+ u16 channel = GET_CHANNEL(mbox_msg);
+ struct mbox_unit_status *mbox_unit;
+ struct list_head *pos;
+ struct channel_status *tmp;
+ struct channel_status *rx_chan = NULL;
+ bool is_Payload = 0;
+
+ dev_dbg(&channels.pdev->dev, "%s type %d\t, mbox_msg %x\n",
+ __func__, type, mbox_msg);
+
+ /* Get RX channels list for given mbox unit */
+ rx_list = get_rx_list(mbox_id);
+ if (rx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid %d\n",
+ mbox_id);
+ return;
+ }
+
+ mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans);
+ /* Search for channel in rx list */
+ spin_lock(&mbox_unit->rx_lock);
+ list_for_each(pos, rx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ if (tmp->state == MBOX_SEND ||
+ tmp->state == MBOX_CAST) {
+ /* Received message is payload */
+ is_Payload = 1;
+ rx_chan = tmp;
+ } else
+ if (tmp->channel == channel)
+ rx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->rx_lock);
+ /* if callback is present for that RX channel */
+ if (rx_chan && rx_chan->cb) {
+ /* If received message is payload this
+ * function will take care of it
+ */
+ if ((is_Payload) && (handle_receive_msg(mbox_msg, rx_chan)))
+ return;
+ } else
+ dev_err(&channels.pdev->dev, "callback not present:msg 0x%x "
+ "rx_chan 0x%x\n", mbox_msg, (u32)rx_chan);
+
+ /* Received message is header as no RX channel is in SEND/CAST state */
+ switch (type) {
+ case MBOX_CLOSE:
+ /* Not implemented */
+ break;
+ case MBOX_OPEN:
+ handle_open_msg(channel, mbox_id);
+ break;
+ case MBOX_SEND:
+ /* if callback is present for that RX channel */
+ if (rx_chan && rx_chan->cb)
+ handle_cast_msg(channel, rx_chan, mbox_msg, true);
+ break;
+ case MBOX_CAST:
+ /* if callback is present for that RX channel */
+ if (rx_chan && rx_chan->cb)
+ handle_cast_msg(channel, rx_chan, mbox_msg, false);
+ break;
+ case MBOX_ACK:
+ case MBOX_NAK:
+ /* Not implemented */
+ break;
+ }
+}
+
+/**
+ * mbox_channel_register() - Registers for a channel
+ * @channel: Channel Number.
+ * @cb: Pointer to function pointer mbox_channel_cb_t
+ * @priv: Pointer to private data
+ *
+ * This routine is used to register for a logical channel.
+ * It first does sanity check on the requested channel availability
+ * and parameters. Then it prepares internal entry for the channel.
+ * And send a OPEN request for that channel.
+ */
+int mbox_channel_register(u16 channel, mbox_channel_cb_t *cb, void *priv)
+{
+ struct channel_status *rx_chan;
+ struct list_head *pos, *rx_list;
+ int res = 0;
+ struct mbox_unit_status *mbox_unit;
+
+ dev_dbg(&channels.pdev->dev, " %s channel = %d\n", __func__, channel);
+ /* Check for callback fcn */
+ if (cb == NULL) {
+ dev_err(&channels.pdev->dev,
+ "channel callback missing:channel %d\n", channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ /* Check if provided channel number is valid */
+ if (!check_channel(channel, MBOX_RX)) {
+ dev_err(&channels.pdev->dev, "wrong mbox channel number %d\n",
+ channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ rx_list = get_rx_list(get_mbox_id(channel));
+ if (rx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans);
+
+ /* Check if channel is already registered */
+ spin_lock(&mbox_unit->rx_lock);
+ list_for_each(pos, rx_list) {
+ rx_chan = list_entry(pos, struct channel_status, list);
+
+ if (rx_chan->channel == channel) {
+ dev_dbg(&channels.pdev->dev,
+ "channel already registered\n");
+ rx_chan->cb = cb;
+ rx_chan->priv = priv;
+ spin_unlock(&mbox_unit->rx_lock);
+ goto exit;
+ }
+ }
+ spin_unlock(&mbox_unit->rx_lock);
+
+ rx_chan = kzalloc(sizeof(*rx_chan), GFP_KERNEL);
+ if (rx_chan == NULL) {
+ dev_err(&channels.pdev->dev,
+ "couldn't allocate channel status\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+
+ atomic_set(&rx_chan->rcv_counter, 0);
+ /* Fill out newly allocated element and add it to rx list */
+ rx_chan->channel = channel;
+ rx_chan->cb = cb;
+ rx_chan->priv = priv;
+ rx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER;
+ mutex_init(&rx_chan->lock);
+ INIT_LIST_HEAD(&rx_chan->rx.pending);
+ rx_chan->cast_wq = create_singlethread_workqueue("mbox_cast_msg");
+ if (!rx_chan->cast_wq) {
+ dev_err(&channels.pdev->dev, "failed to create work queue\n");
+ res = -ENOMEM;
+ goto error_cast_wq;
+ }
+ rx_chan->receive_wq = create_singlethread_workqueue("mbox_receive_msg");
+ if (!rx_chan->receive_wq) {
+ dev_err(&channels.pdev->dev, "failed to create work queue\n");
+ res = -ENOMEM;
+ goto error_recv_wq;
+ }
+ INIT_WORK(&rx_chan->open_msg, mbox_handle_open_msg);
+ INIT_WORK(&rx_chan->cast_msg, mbox_handle_cast_msg);
+ INIT_WORK(&rx_chan->receive_msg, mbox_handle_receive_msg);
+ spin_lock(&mbox_unit->rx_lock);
+ list_add_tail(&rx_chan->list, rx_list);
+ spin_unlock(&mbox_unit->rx_lock);
+
+ mutex_lock(&rx_chan->lock);
+ res = send_pdu(rx_chan, MBOX_OPEN, get_tx_channel(rx_chan->channel));
+ if (res) {
+ dev_err(&channels.pdev->dev, "failed to send OPEN command\n");
+ spin_lock(&mbox_unit->rx_lock);
+ list_del(&rx_chan->list);
+ spin_unlock(&mbox_unit->rx_lock);
+ mutex_unlock(&rx_chan->lock);
+ goto error_send_pdu;
+ } else {
+ rx_chan->seq_number++;
+ rx_chan->state = MBOX_OPEN;
+ mutex_unlock(&rx_chan->lock);
+ return res;
+ }
+error_send_pdu:
+ flush_workqueue(rx_chan->receive_wq);
+error_recv_wq:
+ flush_workqueue(rx_chan->cast_wq);
+error_cast_wq:
+ kfree(rx_chan);
+exit:
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_register);
+
+/**
+ * mbox_channel_deregister() - DeRegisters for a channel
+ * @channel: Channel Number.
+ *
+ * This routine is used to deregister for a logical channel.
+ * It first does sanity check on the requested channel availability
+ * and parameters. Then it deletes the channel
+ */
+int mbox_channel_deregister(u16 channel)
+{
+ struct channel_status *rx_chan = NULL;
+ struct list_head *pos, *rx_list;
+ int res = 0;
+ struct mbox_unit_status *mbox_unit;
+
+ dev_dbg(&channels.pdev->dev, " %s channel = %d\n", __func__, channel);
+ /* Check if provided channel number is valid */
+ if (!check_channel(channel, MBOX_RX)) {
+ dev_err(&channels.pdev->dev, "wrong mbox channel number %d\n",
+ channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ rx_list = get_rx_list(get_mbox_id(channel));
+ if (rx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans);
+
+ /* Check if channel is already registered */
+ spin_lock(&mbox_unit->rx_lock);
+ list_for_each(pos, rx_list) {
+ rx_chan = list_entry(pos, struct channel_status, list);
+
+ if (rx_chan->channel == channel) {
+ dev_dbg(&channels.pdev->dev,
+ "channel found\n");
+ rx_chan->cb = NULL;
+ }
+ }
+ list_del(&rx_chan->list);
+ spin_unlock(&mbox_unit->rx_lock);
+ flush_workqueue(rx_chan->cast_wq);
+ flush_workqueue(rx_chan->receive_wq);
+ kfree(rx_chan);
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_deregister);
+
+/**
+ * mbox_channel_send() - Send messages
+ * @msg: Pointer to mbox_channel_msg data structure.
+ *
+ * This routine is used to send messages over the registered logical
+ * TX channel. It first does sanity check on the message paramenters.
+ * It registered channel is not found then it just registers for that
+ * channel. If channel found, it puts the message to the pending list.
+ * If channel is OPEN, it then pushes the message to the mailbox in
+ * FIFO manner from the pending list.
+ */
+int mbox_channel_send(struct mbox_channel_msg *msg)
+{
+ struct list_head *pos, *tx_list;
+ struct channel_status *tmp = NULL;
+ struct channel_status *tx_chan = NULL;
+ struct pending_elem *pending;
+ struct mbox_unit_status *mbox_unit;
+ int res = 0;
+
+ if (msg->length > MAILBOX_NR_OF_DATAWORDS || msg->length == 0) {
+ dev_err(&channels.pdev->dev, "data length incorrect\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ if (!check_channel(msg->channel, MBOX_TX)) {
+ dev_err(&channels.pdev->dev, "wrong channel number %d\n",
+ msg->channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ tx_list = get_tx_list(get_mbox_id(msg->channel));
+ if (tx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans);
+
+ spin_lock(&mbox_unit->tx_lock);
+ dev_dbg(&channels.pdev->dev, "send:tx_list=%x\tmbox_unit=%x\n",
+ (u32)tx_list, (u32)mbox_unit);
+ list_for_each(pos, tx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ if (tmp->channel == msg->channel)
+ tx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->tx_lock);
+ /* Allocate pending element and add it to the list */
+ pending = kzalloc(sizeof(*pending), GFP_KERNEL);
+ if (pending == NULL) {
+ dev_err(&channels.pdev->dev,
+ "couldn't allocate memory for pending\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+ pending->data = msg->data;
+ pending->length = msg->length;
+
+ if (tx_chan) {
+ mutex_lock(&tx_chan->lock);
+ list_add_tail(&pending->list, &tx_chan->tx.pending);
+ tx_chan->cb = msg->cb;
+ tx_chan->priv = msg->priv;
+ /* If channel is already opened start sending data */
+ if (tx_chan->state == MBOX_OPEN)
+ send_pdu(tx_chan, MBOX_CAST, tx_chan->channel);
+ /* Stop processing here */
+ mutex_unlock(&tx_chan->lock);
+ } else {
+ /* No channel found on the list, allocate new element */
+ tx_chan = kzalloc(sizeof(*tx_chan), GFP_KERNEL);
+ if (tx_chan == NULL) {
+ dev_err(&channels.pdev->dev,
+ "couldn't allocate memory for \
+ tx_chan\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+ tx_chan->channel = msg->channel;
+ tx_chan->cb = msg->cb;
+ tx_chan->priv = msg->priv;
+ tx_chan->state = MBOX_CLOSE;
+ tx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER;
+ INIT_LIST_HEAD(&tx_chan->tx.pending);
+ INIT_WORK(&tx_chan->open_msg, mbox_handle_open_msg);
+ INIT_WORK(&tx_chan->cast_msg, mbox_handle_cast_msg);
+ INIT_WORK(&tx_chan->receive_msg, mbox_handle_receive_msg);
+ mutex_init(&tx_chan->lock);
+ spin_lock(&mbox_unit->tx_lock);
+ list_add_tail(&tx_chan->list, tx_list);
+ spin_unlock(&mbox_unit->tx_lock);
+ mutex_lock(&tx_chan->lock);
+ list_add_tail(&pending->list, &tx_chan->tx.pending);
+ mutex_unlock(&tx_chan->lock);
+ }
+ return 0;
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_send);
+
+static void revoke_pending_msgs(struct channel_status *tx_chan)
+{
+ struct list_head *pos, *n;
+ struct pending_elem *pending;
+
+ list_for_each_safe(pos, n, &tx_chan->tx.pending) {
+ pending = list_entry(pos, struct pending_elem, list);
+
+ if (tx_chan->cb)
+ tx_chan->cb(pending->data, pending->length,
+ tx_chan->priv);
+ else
+ dev_err(&channels.pdev->dev,
+ "%s no callback provided\n", __func__);
+ list_del(&pending->list);
+ kfree(pending);
+ }
+}
+
+/**
+ * mbox_channel_revoke_messages() - Revoke pending messages
+ * @channel: Channel on which action to be taken.
+ *
+ * This routine Clear all pending messages from TX channel
+ * It searches for the channel.Checks if there is pending
+ * messages.Calls if tehre is any registered function. And
+ * deletes the messages for the pending list.
+ */
+int mbox_channel_revoke_messages(u16 channel)
+{
+ struct list_head *pos, *tx_list;
+ struct channel_status *tmp;
+ struct channel_status *tx_chan = NULL;
+ struct mbox_unit_status *mbox_unit;
+ int res = 0;
+
+ if (!check_channel(channel, MBOX_TX)) {
+ dev_err(&channels.pdev->dev,
+ "wrong channel number %d\n", channel);
+ return -EINVAL;
+ }
+
+ tx_list = get_tx_list(get_mbox_id(channel));
+ if (tx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ return -EINVAL;
+ }
+
+ mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans);
+
+ spin_lock(&mbox_unit->tx_lock);
+ list_for_each(pos, tx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ if (tmp->channel == channel)
+ tx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->tx_lock);
+
+ if (tx_chan) {
+ mutex_lock(&tx_chan->lock);
+ revoke_pending_msgs(tx_chan);
+ mutex_unlock(&tx_chan->lock);
+ dev_dbg(&channels.pdev->dev, "channel %d cleared\n",
+ channel);
+ } else {
+ dev_err(&channels.pdev->dev, "no channel found\n");
+ res = -EINVAL;
+ }
+
+ dev_dbg(&channels.pdev->dev, "%s exiting %d\n", __func__, res);
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_revoke_messages);
+
+#if defined(CONFIG_DEBUG_FS)
+#define MBOXTEST_DEBUG 1
+#ifdef MBOXTEST_DEBUG
+#define DBG_TEST(x) x
+#else
+#define DBG_TEST(x)
+#endif
+
+#define MBOX_TEST_MAX_WORDS 3
+#define MBOX_RX_CHAN 0x500
+#define MBOX_TX_RX_CHANNEL_DIFF 0x400
+#define MBOX_MAX_NUM_TRANSFER 30000
+static int registration_done;
+/**
+ * struct mboxtest_data - mbox test via debugfs information
+ * @rx_buff: Buffer for incomming data
+ * @rx_pointer: Ptr to actual RX data buff
+ * @tx_buff: Buffer for outgoing data
+ * @tx_pointer: Ptr to actual TX data buff
+ * @tx_done: TX Transfer done indicator
+ * @rx_done: RX Transfer done indicator
+ * @received: Received words
+ * @xfer_words: Num of bytes in actual trf
+ * @xfers: Number of transfers
+ * @words: Number of total words
+ * @channel: Channel test number
+ */
+struct mboxtest_data {
+ unsigned int *rx_buff;
+ unsigned int *rx_pointer;
+ unsigned int *tx_buff;
+ unsigned int *tx_pointer;
+ struct completion tx_done;
+ struct completion rx_done;
+ int received;
+ int xfer_words;
+ int xfers;
+ int words;
+ int channel;
+};
+
+static void mboxtest_receive_cb(u32 *data, u32 len, void *arg)
+{
+ struct mboxtest_data *mboxtest = (struct mboxtest_data *) arg;
+ int i;
+
+ printk(KERN_INFO "receive_cb.. data.= 0x%X, len = %d\n",
+ *data, len);
+ for (i = 0; i < len; i++)
+ *(mboxtest->rx_pointer++) = *(data++);
+
+ mboxtest->received += len;
+
+ printk(KERN_INFO "received = %d, words = %d\n",
+ mboxtest->received, mboxtest->words);
+ if (mboxtest->received >= mboxtest->words)
+ complete(&mboxtest->rx_done);
+ dev_dbg(&channels.pdev->dev, "%s exiting\n", __func__);
+}
+
+static void mboxtest_send_cb(u32 *data, u32 len, void *arg)
+{
+ struct mboxtest_data *mboxtest = (struct mboxtest_data *) arg;
+
+ printk(KERN_INFO "send_cb.. data.= 0x%X, len = %d\n",
+ *data, len);
+
+ complete(&mboxtest->tx_done);
+ dev_dbg(&channels.pdev->dev, "kernel:mboxtest_send_cb exiting\n");
+}
+
+static int mboxtest_transmit(struct mboxtest_data *mboxtest)
+{
+ int status = 0;
+ struct mbox_channel_msg msg;
+
+ dev_dbg(&channels.pdev->dev, "%s entering\n", __func__);
+ init_completion(&mboxtest->tx_done);
+
+ msg.channel = mboxtest->channel;
+ msg.data = mboxtest->tx_pointer;
+ msg.length = mboxtest->words;
+ msg.cb = mboxtest_send_cb;
+ msg.priv = mboxtest;
+
+ status = mbox_channel_send(&msg);
+ if (!status) {
+ mboxtest->tx_pointer += mboxtest->xfer_words;
+ wait_for_completion(&mboxtest->tx_done);
+ }
+
+ dev_dbg(&channels.pdev->dev, "%s exiting %d\n",
+ __func__, status);
+ return status;
+}
+
+static int transfer_test(struct mboxtest_data *mboxtest)
+{
+ int status = 0;
+ int len = 0;
+ int i;
+
+ len = mboxtest->words;
+
+ dev_dbg(&channels.pdev->dev, "%s enterring\n", __func__);
+ /* Allocate buffers */
+ mboxtest->rx_buff = kzalloc(sizeof(unsigned int) * len, GFP_KERNEL);
+ if (!mboxtest->rx_buff) {
+ DBG_TEST(printk(KERN_INFO
+ "Cannot allocate mbox rx memory\n"));
+ status = -ENOMEM;
+ goto err1;
+ }
+ memset(mboxtest->rx_buff, '\0', sizeof(unsigned int) * len);
+
+ mboxtest->tx_buff = kzalloc(sizeof(unsigned int) * len, GFP_KERNEL);
+ if (!mboxtest->tx_buff) {
+ DBG_TEST(printk(KERN_INFO
+ "Cannot allocate mbox tx memory\n"));
+ status = -ENOMEM;
+ goto err2;
+ }
+ memset(mboxtest->tx_buff, '\0', sizeof(unsigned int) * len);
+
+ /* Generate data */
+ get_random_bytes((unsigned char *)mboxtest->tx_buff,
+ sizeof(unsigned int) * len);
+ /* Set pointers */
+ mboxtest->tx_pointer = mboxtest->tx_buff;
+ mboxtest->rx_pointer = mboxtest->rx_buff;
+ mboxtest->received = 0;
+ init_completion(&mboxtest->rx_done);
+
+ /* Start tx transfer test transfer */
+ status = mboxtest_transmit(mboxtest);
+ DBG_TEST(printk(KERN_INFO "xfer_words=%d\n",
+ mboxtest->xfer_words));
+ if (!status)
+ wait_for_completion(&mboxtest->rx_done);
+ for (i = 0; i < len; i++)
+ DBG_TEST(printk(KERN_INFO "%d -> TX:0x%X, RX:0x%X\n", i,
+ mboxtest->tx_buff[i], mboxtest->rx_buff[i]));
+
+ dev_dbg(&channels.pdev->dev, "%s exiting %d\n", __func__, status);
+ return status;
+err2:
+ kfree(mboxtest->rx_buff);
+err1:
+ return status;
+}
+
+static int mboxtest_prepare(struct mboxtest_data *mboxtest)
+{
+ int err = 0;
+
+ mboxtest->xfers = MBOX_MAX_NUM_TRANSFER;
+ /* Calculate number of bytes in each transfer */
+ mboxtest->xfer_words = mboxtest->words / mboxtest->xfers;
+
+ /* Trim to maxiumum data words per transfer */
+ if (mboxtest->xfer_words > MBOX_TEST_MAX_WORDS) {
+ DBG_TEST(printk(KERN_INFO "Recalculating xfers ...\n"));
+ mboxtest->xfer_words = MBOX_TEST_MAX_WORDS;
+ if (mboxtest->words % mboxtest->xfer_words)
+ mboxtest->xfers = (mboxtest->words /
+ mboxtest->xfer_words) + 1;
+ else
+ mboxtest->xfers = (mboxtest->words /
+ mboxtest->xfer_words);
+ }
+
+ DBG_TEST(printk(KERN_INFO "Params: chan=0x%X words=%d, xfers=%d\n",
+ mboxtest->channel, mboxtest->words,
+ mboxtest->xfers));
+
+ if (mbox_channel_register(mboxtest->channel,
+ mboxtest_receive_cb, mboxtest)) {
+ DBG_TEST(printk(KERN_INFO "Cannot register mbox channel\n"));
+ err = -ENOMEM;
+ goto err;
+ }
+
+ registration_done = true;
+ return 0;
+err:
+ return err;
+}
+
+struct mboxtest_data mboxtest;
+/*
+ * Expected input: <nbr_channel> <nbr_word>
+ * Example: "echo 500 2"
+ */
+static ssize_t mbox_write_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long nbr_channel;
+ unsigned long nbr_word;
+ char int_buf[16];
+ char *token;
+ char *val;
+
+ strncpy((char *) &int_buf, buf, sizeof(int_buf));
+ token = (char *) &int_buf;
+
+ /* Parse message */
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 16, &nbr_channel) != 0))
+ nbr_channel = MBOX_RX_CHAN;
+
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 16, &nbr_word) != 0))
+ nbr_word = 2;
+
+ dev_dbg(dev, "Will setup logical channel %ld\n", nbr_channel);
+ mboxtest.channel = nbr_channel;
+ mboxtest.words = nbr_word;
+
+ if (!registration_done)
+ mboxtest_prepare(&mboxtest);
+ else
+ dev_dbg(&channels.pdev->dev, "already registration done\n");
+
+ return count;
+}
+
+static ssize_t mbox_read_channel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ unsigned long i;
+ static bool config_done;
+
+ if (!config_done) {
+ config_done = true;
+ mboxtest.channel += MBOX_TX_RX_CHANNEL_DIFF;
+ }
+ dev_dbg(dev, "Will transfer %d words %d times at channel 0x%x\n",
+ mboxtest.words, mboxtest.xfers, mboxtest.channel);
+ for (i = 0; i < mboxtest.xfers; i++)
+ transfer_test(&mboxtest);
+
+ return 1;
+}
+static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, mbox_read_channel,
+ mbox_write_channel);
+
+#endif
+
+static int __init mbox_channel_probe(struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct mbox *mbox;
+
+ dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32)pdev);
+
+ /* Register to given mailbox units (ids) */
+ for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) {
+ mbox = mbox_setup(mbox_ids[i], mbox_cb, &mbox_ids[i]);
+ if (mbox == NULL) {
+ dev_err(&(pdev->dev), "Unable to setup mailbox %d\n",
+ mbox_ids[i]);
+ ret = -EBUSY;
+ goto exit;
+ }
+ channels.mbox_unit[i].mbox_id = mbox_ids[i];
+ channels.mbox_unit[i].mbox = mbox;
+ INIT_LIST_HEAD(&channels.mbox_unit[i].rx_chans);
+ INIT_LIST_HEAD(&channels.mbox_unit[i].tx_chans);
+ spin_lock_init(&channels.mbox_unit[i].rx_lock);
+ spin_lock_init(&channels.mbox_unit[i].tx_lock);
+ }
+
+ channels.pdev = pdev;
+
+ dev_dbg(&(pdev->dev), "Mailbox channel driver loaded\n");
+#if defined(CONFIG_DEBUG_FS)
+ ret = device_create_file(&(pdev->dev), &dev_attr_channel);
+ if (ret != 0)
+ dev_warn(&(pdev->dev),
+ "Unable to create mbox_channel sysfs entry");
+
+
+#endif
+exit:
+ return ret;
+}
+
+static struct platform_driver mbox_channel_driver = {
+ .driver = {
+ .name = "mbox_channel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mbox_channel_init(void)
+{
+ if (!machine_is_u5500())
+ return 0;
+
+ platform_device_register_simple("mbox_channel", 0, NULL, 0);
+
+ return platform_driver_probe(&mbox_channel_driver, mbox_channel_probe);
+}
+module_init(mbox_channel_init);
+
+static void __exit mbox_channel_exit(void)
+{
+ platform_driver_unregister(&mbox_channel_driver);
+}
+module_exit(mbox_channel_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MBOX channels driver");
diff --git a/drivers/misc/modem_audio/Kconfig b/drivers/misc/modem_audio/Kconfig
new file mode 100644
index 00000000000..5396868a9de
--- /dev/null
+++ b/drivers/misc/modem_audio/Kconfig
@@ -0,0 +1,6 @@
+config MODEM_AUDIO_DRIVER
+ bool "Modem Audio Driver"
+ depends on (U5500_MBOX && UX500_SOC_DB5500)
+ help
+ This module is used for read and write data between APE and
+ Access side in u5500 platform.
diff --git a/drivers/misc/modem_audio/Makefile b/drivers/misc/modem_audio/Makefile
new file mode 100644
index 00000000000..a5c1740ea48
--- /dev/null
+++ b/drivers/misc/modem_audio/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MODEM_AUDIO_DRIVER) += mad.o
+
diff --git a/drivers/misc/modem_audio/mad.c b/drivers/misc/modem_audio/mad.c
new file mode 100644
index 00000000000..d31d78ba3f2
--- /dev/null
+++ b/drivers/misc/modem_audio/mad.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ *
+ * Modem Audio Driver
+ *
+ * Author:Rahul Venkatram <rahul.venkatram@stericsson.com> for ST-Ericsson
+ * Haridhar KALVALA<haridhar.kalvala@stericsson.com> for ST-Ericsson
+ * Amaresh Mulage<amaresh.mulage@stericsson.com> for ST-Ericsson.
+ *
+ * License terms:GNU General Public License (GPLv2)version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <mach/mbox_channels-db5500.h>
+
+MODULE_DESCRIPTION("Modem Audio Driver");
+MODULE_LICENSE("GPLv2");
+
+/**
+ * -----------------------------------------------------
+ * | | | |
+ * | Data[0] |Data[1] |Data[2] |===>Data word 32 bits
+ * -----------------------------------------------------
+ * | MESSAGE |Data | Index |
+ * | TYPE |length | number |===>READ/WRITE message
+ * -----------------------------------------------------
+ * -----------------------------------------------------
+ * | MESSAGE | DSP SHM addr | max_no_of_buffers |===> READ
+ * | TYPE | to write data | ||buffersize |WRITE SETUP message
+ * -----------------------------------------------------
+ */
+
+
+#define MAD_NAME "mad"
+/* Bit mask */
+#define MASK_UPPER_WORD 0xFFFF
+
+/* channel values for each direction */
+#define CHANNEL_NUM_RX 0x500
+#define CHANNEL_NUM_TX 0x900
+
+/*
+ * Maximum number of datawords which can be sent
+ * in the mailbox each word is 32 bits
+ */
+#define MAX_NR_OF_DATAWORDS MAILBOX_NR_OF_DATAWORDS
+#define MAX_NUM_RX_BUFF NUM_DSP_BUFFER
+#define NR_OF_DATAWORDS_REQD_FOR_ACK 1
+
+/**
+ * Message types, must be identical in DSP Side
+ * VCS_MBOX_MSG_WRITE_IF_SETUP : DSP -> ARM
+ * VCS_MBOX_MSG_WRITE_IF_SETUP_ACK : ARM -> DSP
+ * VCS_MBOX_MSG_READ_IF_SETUP : DSP -> ARM
+ * VCS_MBOX_MSG_READ_IF_SETUP_ACK : ARM -> DSP
+ * VCS_MBOX_MSG_IF_ENC_DATA : ARM -> DSP
+ * VCS_MBOX_MSG_IF_DEC_DATA : DSP -> ARM
+ */
+#define VCS_MBOX_MSG_WRITE_IF_SETUP 0x200
+#define VCS_MBOX_MSG_WRITE_IF_SETUP_ACK 0x201
+#define VCS_MBOX_MSG_READ_IF_SETUP 0x400
+#define VCS_MBOX_MSG_READ_IF_SETUP_ACK 0x401
+#define VCS_MBOX_MSG_IF_ENC_DATA 0x80
+#define VCS_MBOX_MSG_IF_DEC_DATA 0x100
+
+/**
+ * struct mad_data - This structure holds the state of the Modem Audio Driver.
+ *
+ * @dsp_shm_write_ptr : Ptr to the first TX buffer in DSP
+ * @dsp_shm_read_ptr : Ptr to the first RX buffer in DSP
+ * @max_tx_buffs : No. of DSP buffers available to write
+ * @max_rx_buffs : No. of DSP buffers available to read
+ * @write_offset : Size of each buffer in the DSP
+ * @read_offset : Size of each buffer in the DSP
+ * @rx_buff : Buffer for incoming data
+ * @tx_buff : Buffer for outgoing data
+ * @tx_buffer_num : Buffer counter for writing to DSP
+ * @rx_buffer_num : Buffer counter for reading to DSP
+ * @rx_buffer_read : Buffer counter for reading from userspace
+ * @data_written : RX data message arrival indicator
+ * @read_setup_msg : flag for opening read data
+ * @readq : read queue of data message
+ * @lock : lock for r/w message queue
+ */
+struct mad_data {
+ void __iomem *dsp_shm_write_ptr;
+ void __iomem *dsp_shm_read_ptr;
+ int max_tx_buffs;
+ int max_rx_buffs;
+ int write_offset;
+ int read_offset;
+ u32 *rx_buff;
+ u32 *tx_buff;
+ int tx_buffer_num;
+ int rx_buffer_num;
+ int rx_buffer_read;
+ u32 data_written;
+ bool read_setup_msg;
+ bool open_check;
+ wait_queue_head_t readq;
+ spinlock_t lock;
+};
+
+static struct mad_data *mad;
+
+static void mad_receive_cb(u32 *data, u32 length, void *priv);
+static int mad_read(struct file *filp, char __user *buff, size_t count,
+ loff_t *offp);
+static int mad_write(struct file *filp, const char __user *buff, size_t count,
+ loff_t *offp);
+static unsigned int mad_select(struct file *filp, poll_table *wait);
+static void mad_send_cb(u32 *data, u32 len, void *arg);
+static int mad_open(struct inode *ino, struct file *filp);
+static int mad_close(struct inode *ino, struct file *filp);
+
+static const struct file_operations mad_fops = {
+ .release = mad_close,
+ .open = mad_open,
+ .read = mad_read,
+ .write = mad_write,
+ .poll = mad_select,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice mad_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = MAD_NAME,
+ .fops = &mad_fops
+};
+
+/**
+ * mad_send_cb - This function is default callback for send.
+ * @data -Pointer to the data buffer
+ * @len -Data buffer length
+ * @arg -Private data pointer associated with test
+ */
+static void mad_send_cb(u32 *data, u32 len, void *arg)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+}
+
+/**
+ * mad_receive_cb - This callback function is for receiving data from mailbox
+ * @data -Pointer to the data buffer
+ * @len -length of the Mailbox
+ * @arg -Private data pointer associated with test
+ */
+static void mad_receive_cb(u32 *data, u32 length, void *priv)
+{
+ struct mad_data *mad = priv;
+ struct mbox_channel_msg msg;
+ u32 ack_to_dsp;
+ unsigned long flags;
+
+ /* setup message for write address */
+ if (*data == VCS_MBOX_MSG_WRITE_IF_SETUP) {
+
+ ack_to_dsp = VCS_MBOX_MSG_WRITE_IF_SETUP_ACK;
+
+ /* if setup message comes again.unmap */
+ if (mad->dsp_shm_write_ptr != NULL) {
+ iounmap(mad->dsp_shm_write_ptr);
+ mad->dsp_shm_write_ptr = NULL;
+ mad->write_offset = 0;
+ mad->max_tx_buffs = 0;
+ }
+
+ /* convert offset to uint size */
+ mad->write_offset = (data[2] & MASK_UPPER_WORD);
+ mad->max_tx_buffs = (data[2] >> 16);
+
+ mad->dsp_shm_write_ptr = ioremap(data[1],
+ mad->max_tx_buffs * mad->write_offset);
+ if (mad->dsp_shm_write_ptr == NULL)
+ dev_err(mad_dev.this_device, "incrt write address");
+
+ /* Initialize all buffer numbers */
+ mad->tx_buffer_num = 0;
+
+ /* Send ACK to the DSP */
+ msg.channel = CHANNEL_NUM_TX;
+ msg.data = &ack_to_dsp;
+ msg.length = NR_OF_DATAWORDS_REQD_FOR_ACK;
+ msg.cb = mad_send_cb;
+ msg.priv = mad;
+
+ if (mbox_channel_send(&msg))
+ dev_err(mad_dev.this_device, "%s: can't send data\n",
+ __func__);
+
+ } /* setup message for reading SHM */
+ else if (*data == VCS_MBOX_MSG_READ_IF_SETUP) {
+
+ ack_to_dsp = VCS_MBOX_MSG_READ_IF_SETUP_ACK;
+
+ /* if setup message comes again.unmap */
+ if (mad->dsp_shm_read_ptr != NULL) {
+ iounmap(mad->dsp_shm_read_ptr);
+ mad->dsp_shm_read_ptr = NULL;
+ mad->read_offset = 0;
+ mad->max_rx_buffs = 0;
+ }
+
+ /*convert offset to uint size*/
+ mad->read_offset = (data[2] & MASK_UPPER_WORD);
+ mad->max_rx_buffs = data[2] >> 16;
+
+ mad->dsp_shm_read_ptr = ioremap(data[1],
+ mad->max_rx_buffs * mad->read_offset);
+
+ /* Initialize all buffer numbers and flags */
+ mad->rx_buffer_num = 0;
+ mad->rx_buffer_read = 0;
+ mad->data_written = 0;
+
+ /* Send ACK to the DSP */
+ msg.channel = CHANNEL_NUM_TX;
+ msg.data = &ack_to_dsp;
+ msg.length = NR_OF_DATAWORDS_REQD_FOR_ACK;
+ msg.cb = mad_send_cb;
+ msg.priv = mad;
+
+ if (mbox_channel_send(&msg))
+ dev_err(mad_dev.this_device, "%s: can't send data\n",
+ __func__);
+
+ /* allow read */
+ spin_lock_irqsave(&mad->lock, flags);
+ mad->read_setup_msg = true;
+ spin_unlock_irqrestore(&mad->lock, flags);
+ /* blocked in select() */
+ wake_up_interruptible(&mad->readq);
+
+ } else if (*data == VCS_MBOX_MSG_IF_DEC_DATA) {
+ /*
+ * Check if you have valid message with proper length in message
+ * otherwise Dont care
+ */
+ if ((data[1] <= 0) || (mad->rx_buff == NULL)
+ || (mad->dsp_shm_read_ptr == NULL)) {
+ if (mad->rx_buff == NULL)
+ dev_warn(mad_dev.this_device, "%s :MAD closed",
+ __func__);
+ else
+ dev_warn(mad_dev.this_device, "%s :0-len msg",
+ __func__);
+ } else {
+ mad->rx_buff[mad->rx_buffer_num] = data[1];
+ mad->rx_buffer_num++;
+
+ /* store the offset */
+ mad->rx_buff[mad->rx_buffer_num] = data[2];
+
+ if (mad->rx_buffer_num < ((MAX_NUM_RX_BUFF * 2)-1))
+ mad->rx_buffer_num++;
+ else
+ mad->rx_buffer_num = 0;
+
+ spin_lock_irqsave(&mad->lock, flags);
+ mad->data_written++;
+
+ if (mad->data_written > MAX_NUM_RX_BUFF) {
+ dev_warn(mad_dev.this_device,
+ "%s :Read msg overflow = %u\n",
+ __func__ , mad->data_written);
+ /*
+ * Donot exceed MAX_NUM_RX_BUFF size of buffer
+ * TO DO overflow control
+ */
+ mad->data_written = MAX_NUM_RX_BUFF ;
+ }
+ spin_unlock_irqrestore(&mad->lock, flags);
+ wake_up_interruptible(&mad->readq);
+ }
+ } else {
+ /* received Invalid message */
+ dev_err(mad_dev.this_device, "%s : Invalid Msg", __func__);
+ }
+}
+
+static int mad_read(struct file *filp, char __user *buff, size_t count,
+ loff_t *offp)
+{
+ unsigned long flags;
+ unsigned int size = 0;
+ void __iomem *shm_ptr = NULL;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (!(mad->data_written > 0)) {
+ if (wait_event_interruptible(mad->readq,
+ ((mad->data_written > 0) &&
+ (mad->dsp_shm_read_ptr != NULL))))
+ return -ERESTARTSYS;
+ }
+
+ if (mad->dsp_shm_read_ptr == NULL) {
+ dev_err(mad_dev.this_device, "%s :pointer err", __func__);
+ return -EINVAL ;
+ }
+
+ if (mad->rx_buff[mad->rx_buffer_read] > count) {
+ /*
+ * Size of message greater than buffer , this shouldnt happen
+ * It shouldnt come here : we ensured that message size
+ * smaller that buffer length
+ */
+ dev_err(mad_dev.this_device, "%s : Incrct length", __func__);
+ return -EFAULT;
+ }
+ size = mad->rx_buff[mad->rx_buffer_read];
+ mad->rx_buff[mad->rx_buffer_read] = 0;
+ mad->rx_buffer_read++;
+ shm_ptr = (u8 *)(mad->dsp_shm_read_ptr +
+ (mad->rx_buff[mad->rx_buffer_read] * mad->read_offset));
+ if (copy_to_user(buff, shm_ptr, size) < 0) {
+ dev_err(mad_dev.this_device, "%s :copy to user", __func__);
+ return -EFAULT;
+ }
+
+ if (mad->rx_buffer_read < ((MAX_NUM_RX_BUFF*2)-1))
+ mad->rx_buffer_read++;
+ else
+ mad->rx_buffer_read = 0;
+
+ spin_lock_irqsave(&mad->lock, flags);
+ mad->data_written--;
+ if (mad->data_written < 0) {
+ /* Means wrong read*/
+ mad->data_written = 0;
+ dev_err(mad_dev.this_device, "%s :data Rcev err", __func__);
+ }
+ spin_unlock_irqrestore(&mad->lock, flags);
+ return size;
+}
+
+static int mad_write(struct file *filp, const char __user *buff, size_t count,
+ loff_t *offp)
+{
+ int retval = 0;
+ void __iomem *dsp_write_address;
+ struct mbox_channel_msg msg;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ /* check for valid write pointer else skip writing*/
+ if (mad->dsp_shm_write_ptr == NULL) {
+ dev_err(mad_dev.this_device, "%s :Illegal memory", __func__);
+ return -EFAULT;
+ }
+
+ dsp_write_address = (mad->dsp_shm_write_ptr +
+ (mad->tx_buffer_num * mad->write_offset));
+
+ if (copy_from_user(dsp_write_address, buff, count)) {
+ dev_err(mad_dev.this_device, "%s:copy_from_user\n", __func__);
+ return -EFAULT;
+ }
+
+ mad->tx_buff[0] = VCS_MBOX_MSG_IF_ENC_DATA;
+ mad->tx_buff[1] = count;
+ mad->tx_buff[2] = mad->tx_buffer_num;
+
+ if (mad->tx_buffer_num < (mad->max_tx_buffs-1))
+ mad->tx_buffer_num++;
+ else
+ mad->tx_buffer_num = 0;
+
+ msg.channel = CHANNEL_NUM_TX;
+ msg.data = mad->tx_buff;
+ msg.length = MAX_NR_OF_DATAWORDS;
+ msg.cb = mad_send_cb;
+ msg.priv = mad;
+
+ retval = mbox_channel_send(&msg);
+ if (retval) {
+ dev_err(mad_dev.this_device, "%s:can't send data", __func__);
+ return retval;
+ }
+ return count;
+}
+
+static unsigned int mad_select(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ poll_wait(filp, &mad->readq, wait);
+ spin_lock_irqsave(&mad->lock, flags);
+
+ if ((true == mad->read_setup_msg) && (mad->data_written > 0))
+ mask |= POLLIN | POLLRDNORM; /* allow readable */
+ spin_unlock_irqrestore(&mad->lock, flags);
+
+ return mask;
+}
+
+static int mad_open(struct inode *ino, struct file *filp)
+{
+ int err = 0;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (mad->open_check == true) {
+ dev_err(mad_dev.this_device, "%s :Already opened", __func__);
+ return -EFAULT;
+ }
+
+ mad->rx_buff = kzalloc((MAX_NUM_RX_BUFF*2 *
+ sizeof(mad->rx_buff)), GFP_KERNEL);
+
+ if (mad->rx_buff == NULL) {
+ dev_err(mad_dev.this_device, "%s:RX memory\n", __func__);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mad->tx_buff = kzalloc(MAX_NR_OF_DATAWORDS, GFP_KERNEL);
+ if (mad->tx_buff == NULL) {
+ dev_err(mad_dev.this_device, "%s:TX memory\n", __func__);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* Init spinlock for critical section access*/
+ spin_lock_init(&mad->lock);
+ init_waitqueue_head(&(mad->readq));
+
+ err = mbox_channel_register(CHANNEL_NUM_RX, mad_receive_cb, mad);
+ if (err) {
+ dev_err(mad_dev.this_device, "%s: register err", __func__);
+ err = -EFAULT;
+ goto error;
+ }
+ mad->open_check = true;
+
+ return 0;
+error:
+ kfree(mad->rx_buff);
+ kfree(mad->tx_buff);
+ return err;
+}
+
+static int mad_close(struct inode *ino, struct file *filp)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (mbox_channel_deregister(CHANNEL_NUM_RX)) {
+ dev_err(mad_dev.this_device, "%s:deregister err", __func__);
+ return -EFAULT;
+ }
+ kfree(mad->rx_buff);
+ kfree(mad->tx_buff);
+ mad->data_written = 0;
+ mad->rx_buffer_num = 0;
+ mad->rx_buffer_read = 0;
+ mad->open_check = false;
+
+ return 0;
+}
+
+static int __init mad_init(void)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ mad = kzalloc(sizeof(*mad), GFP_KERNEL);
+ if (mad == NULL) {
+ dev_err(mad_dev.this_device, "%s :MAD failed", __func__);
+ return -ENOMEM;
+ }
+
+ return misc_register(&mad_dev);
+}
+module_init(mad_init);
+
+static void __exit mad_exit(void)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (mad->dsp_shm_write_ptr != NULL) {
+ iounmap(mad->dsp_shm_write_ptr);
+ mad->dsp_shm_write_ptr = NULL;
+ }
+
+ if (mad->dsp_shm_read_ptr != NULL) {
+ iounmap(mad->dsp_shm_read_ptr);
+ mad->dsp_shm_read_ptr = NULL;
+ }
+
+ kfree(mad);
+ misc_deregister(&mad_dev);
+}
diff --git a/drivers/misc/sim_detect.c b/drivers/misc/sim_detect.c
new file mode 100644
index 00000000000..6e6446d0fc5
--- /dev/null
+++ b/drivers/misc/sim_detect.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: BIBEK BASU <bibek.basu@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include <linux/modem/modem_client.h>
+#include <mach/sim_detect.h>
+#include <linux/regulator/consumer.h>
+
+/* time in millisec */
+#define TIMER_DELAY 10
+
+struct sim_detect{
+ struct work_struct timer_expired;
+ struct device *dev;
+ struct modem *modem;
+ struct hrtimer timer;
+ struct mutex lock;
+ int voltage;
+ struct regulator *vinvsim_regulator;
+ bool regulator_enabled;
+};
+
+static ssize_t show_voltage(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sim_detect *data = dev_get_drvdata(dev);
+ int ret, len;
+
+ ret = mutex_lock_interruptible(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ len = sprintf(buf, "%i\n", data->voltage);
+
+ mutex_unlock(&data->lock);
+
+ return len;
+}
+
+static ssize_t write_voltage(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sim_detect *sim_detect = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ /* check input */
+ if (strict_strtol(buf, 0, &val) != 0) {
+ dev_err(dev, "Invalid voltage class configured.\n");
+ return count;
+ }
+
+ switch (val) {
+ case -1:
+ case 0:
+ case 1800000:
+ case 3000000:
+ break;
+ default:
+ dev_err(dev, "Invalid voltage class configured.\n");
+ return count;
+ }
+
+ /* lock */
+ ret = mutex_lock_interruptible(&sim_detect->lock);
+ if (ret < 0)
+ return ret;
+
+ /* update state */
+ sim_detect->voltage = val;
+
+ /* call regulator */
+ switch (sim_detect->voltage) {
+ case 0:
+ /* SIM voltage is unknown, turn on regulator for 3 V SIM */
+ case 3000000:
+ /* Vinvsim supply is used only for 3 V SIM */
+ if (!sim_detect->regulator_enabled) {
+ ret = regulator_enable(sim_detect->vinvsim_regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator.\n");
+ goto out_unlock;
+ }
+ sim_detect->regulator_enabled = true;
+ }
+ break;
+ case 1800000:
+ case -1:
+ /* Vbatvsim is used otherwise */
+ if (sim_detect->regulator_enabled) {
+ regulator_disable(sim_detect->vinvsim_regulator);
+ sim_detect->regulator_enabled = false;
+ }
+ }
+
+out_unlock:
+ /* unlock and return */
+ mutex_unlock(&sim_detect->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(voltage, S_IWUSR | S_IRUGO, show_voltage, write_voltage);
+
+static struct attribute *sim_attributes[] = {
+ &dev_attr_voltage.attr,
+ NULL
+};
+
+static const struct attribute_group sim_attr_group = {
+ .attrs = sim_attributes,
+};
+
+static void inform_modem_release(struct work_struct *work)
+{
+ struct sim_detect *sim_detect =
+ container_of(work, struct sim_detect, timer_expired);
+
+ /* call Modem Access Framework api to release modem */
+ modem_release(sim_detect->modem);
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+ struct sim_detect *sim_detect =
+ container_of(timer, struct sim_detect, timer);
+
+ schedule_work(&sim_detect->timer_expired);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t sim_activity_irq(int irq, void *dev)
+{
+ struct sim_detect *sim_detect = dev;
+
+ /* call Modem Access Framework api to acquire modem */
+ modem_request(sim_detect->modem);
+ /* start the timer for 10ms */
+ hrtimer_start(&sim_detect->timer,
+ ktime_set(0, TIMER_DELAY*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+/**
+ * sim_detect_suspend() - This routine puts the Sim detect in to sustend state.
+ * @dev: pointer to device structure.
+ *
+ * This routine checks the current ongoing communication with Modem by
+ * examining the modem_get_usage and work_pending state.
+ * accordingly prevents suspend if modem communication
+ * is on-going.
+ */
+int sim_detect_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sim_detect *sim_detect = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s called...\n", __func__);
+ /* if modem is accessed, event system suspend */
+ if (modem_get_usage(sim_detect->modem)
+ || work_pending(&sim_detect->timer_expired))
+ return -EBUSY;
+ else
+ return 0;
+}
+
+static const struct dev_pm_ops sim_detect_dev_pm_ops = {
+ .suspend = sim_detect_suspend,
+};
+#endif
+
+
+static int __devinit sim_detect_probe(struct platform_device *pdev)
+{
+ struct sim_detect_platform_data *plat = dev_get_platdata(&pdev->dev);
+ struct sim_detect *sim_detect;
+ int ret;
+
+ sim_detect = kzalloc(sizeof(struct sim_detect), GFP_KERNEL);
+ if (sim_detect == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* initialize data */
+ mutex_init(&sim_detect->lock);
+ sim_detect->voltage = 0;
+
+ sim_detect->dev = &pdev->dev;
+ INIT_WORK(&sim_detect->timer_expired, inform_modem_release);
+ hrtimer_init(&sim_detect->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ sim_detect->timer.function = timer_callback;
+
+ sim_detect->modem = modem_get(sim_detect->dev, "u8500-shrm-modem");
+ if (IS_ERR(sim_detect->modem)) {
+ ret = PTR_ERR(sim_detect->modem);
+ dev_err(sim_detect->dev, "Could not retrieve the modem\n");
+ goto out_free;
+ }
+
+ /* set drvdata */
+ platform_set_drvdata(pdev, sim_detect);
+
+ /* request irq */
+ ret = request_threaded_irq(plat->irq_num,
+ NULL, sim_activity_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "sim activity", sim_detect);
+ if (ret < 0)
+ goto out_put_modem;
+
+ /* get regulator */
+ sim_detect->regulator_enabled = false;
+ sim_detect->vinvsim_regulator = regulator_get(sim_detect->dev,
+ "vinvsim");
+ if (IS_ERR(sim_detect->vinvsim_regulator)) {
+ dev_err(&pdev->dev,
+ "Failed to get regulator. (dev_name %s).\n",
+ dev_name(sim_detect->dev));
+ ret = PTR_ERR(sim_detect->vinvsim_regulator);
+ goto out_free_irq;
+ }
+
+ /* register sysfs entry */
+ ret = sysfs_create_group(&pdev->dev.kobj, &sim_attr_group);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create attribute group: %d\n", ret);
+ goto out_free_regulator;
+ }
+
+ return 0;
+
+out_free_regulator:
+ regulator_put(sim_detect->vinvsim_regulator);
+out_free_irq:
+ free_irq(plat->irq_num, sim_detect);
+out_put_modem:
+ modem_put(sim_detect->modem);
+ platform_set_drvdata(pdev, NULL);
+out_free:
+ kfree(sim_detect);
+ return ret;
+}
+
+static int __devexit sim_detect_remove(struct platform_device *pdev)
+{
+ struct sim_detect *sim_detect = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &sim_attr_group);
+ regulator_put(sim_detect->vinvsim_regulator);
+ modem_put(sim_detect->modem);
+ platform_set_drvdata(pdev, NULL);
+ kfree(sim_detect);
+ return 0;
+}
+
+static struct platform_driver sim_detect_driver = {
+ .driver = {
+ .name = "sim-detect",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &sim_detect_dev_pm_ops,
+#endif
+ },
+ .probe = sim_detect_probe,
+ .remove = __devexit_p(sim_detect_remove),
+};
+
+static int __init sim_detect_init(void)
+{
+ return platform_driver_register(&sim_detect_driver);
+}
+module_init(sim_detect_init);
+
+static void __exit sim_detect_exit(void)
+{
+ platform_driver_unregister(&sim_detect_driver);
+}
+module_exit(sim_detect_exit);
+
+MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
+MODULE_DESCRIPTION("Detects SIM Hot Swap and wakes modem");
+MODULE_ALIAS("platform:sim-detect");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c
new file mode 100644
index 00000000000..33bb26c27ca
--- /dev/null
+++ b/drivers/misc/stm.c
@@ -0,0 +1,850 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Philippe Langlais <philippe.Langlais@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <trace/stm.h>
+
+/* STM Registers */
+#define STM_CR (stm.virtbase)
+#define STM_MMC (stm.virtbase + 0x008)
+#define STM_TER (stm.virtbase + 0x010)
+#define STMPERIPHID0 (stm.virtbase + 0xFC0)
+#define STMPERIPHID1 (stm.virtbase + 0xFC8)
+#define STMPERIPHID2 (stm.virtbase + 0xFD0)
+#define STMPERIPHID3 (stm.virtbase + 0xFD8)
+#define STMPCELLID0 (stm.virtbase + 0xFE0)
+#define STMPCELLID1 (stm.virtbase + 0xFE8)
+#define STMPCELLID2 (stm.virtbase + 0xFF0)
+#define STMPCELLID3 (stm.virtbase + 0xFF8)
+
+#define STM_CLOCK_SHIFT 6
+#define STM_CLOCK_MASK 0x1C0
+
+/* Hardware mode for all sources */
+#define STM_MMC_DEFAULT CONFIG_STM_DEFAULT_MASTERS_MODES
+
+/* Max number of channels (multiple of 256) */
+#define STM_NUMBER_OF_CHANNEL CONFIG_STM_NUMBER_OF_CHANNEL
+
+/* # dynamically allocated channel with stm_trace_buffer */
+#define NB_KERNEL_DYNAMIC_CHANNEL 128
+
+static struct stm_device {
+ const struct stm_platform_data *pdata;
+ void __iomem *virtbase;
+ /* Used to register the allocated channels */
+ DECLARE_BITMAP(ch_bitmap, STM_NUMBER_OF_CHANNEL);
+} stm;
+
+volatile struct stm_channel __iomem *stm_channels;
+
+static struct cdev cdev;
+static struct class *stm_class;
+static int stm_major;
+
+static DEFINE_SPINLOCK(lock);
+
+/* Middle value for clock divisor */
+static enum clock_div stm_clockdiv = STM_CLOCK_DIV8;
+
+/* Default value for STM output connection */
+static enum stm_connection_type stm_connection = STM_DEFAULT_CONNECTION;
+
+#define STM_BUFSIZE 256
+struct channel_data {
+ DECLARE_BITMAP(bitmap, STM_NUMBER_OF_CHANNEL);
+ int numero;
+ spinlock_t lock;
+ u8 data_buffer[STM_BUFSIZE];
+};
+
+static u64 stm_printk_buf[1024/sizeof(u64)];
+static arch_spinlock_t stm_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+
+static char *mipi60 = "none";
+module_param(mipi60, charp, S_IRUGO);
+MODULE_PARM_DESC(mipi60, "STM Trace to output on probe2 of mipi60 "
+ "('none' or 'ape' or 'modem')");
+
+static char *mipi34 = "none";
+module_param(mipi34, charp, S_IRUGO);
+MODULE_PARM_DESC(mipi34, "STM Trace to output on mipi34 "
+ "('none' or 'ape' or 'modem')");
+
+static char *microsd = "none";
+module_param(microsd, charp, S_IRUGO);
+MODULE_PARM_DESC(microsd, "STM Trace to output on SD card connector "
+ "('none' or 'ape' or 'modem')");
+
+static unsigned int stm_ter;
+module_param(stm_ter, uint, 0);
+MODULE_PARM_DESC(stm_ter, "Value for STM_TER (trace control register). "
+ "Should be set by user as environment variable stm.stm_ter");
+
+#define IS_APE_ON_MIPI34 (mipi34 && !strcmp(mipi34, "ape"))
+#define IS_APE_ON_MIPI60 (mipi60 && !strcmp(mipi60, "ape"))
+#define IS_APE_ON_MICROSD (microsd && !strcmp(microsd, "ape"))
+#define IS_MODEM_ON_MICROSD (microsd && !strcmp(microsd, "modem"))
+
+static int stm_connection_set(void *data, u64 val);
+
+int stm_alloc_channel(int offset)
+{
+ int channel;
+
+ /* Look for a free channel from offset */
+ do {
+ channel = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ } while ((channel < STM_NUMBER_OF_CHANNEL)
+ && test_and_set_bit(channel, stm.ch_bitmap));
+ return channel;
+}
+EXPORT_SYMBOL(stm_alloc_channel);
+
+void stm_free_channel(int channel)
+{
+ clear_bit(channel, stm.ch_bitmap);
+}
+EXPORT_SYMBOL(stm_free_channel);
+
+static int stm_get_channel(struct channel_data *ch_data, int __user *arg)
+{
+ int channel, err;
+
+ channel = stm_alloc_channel(0);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ /* One free found ! */
+ err = put_user(channel, arg);
+ if (err)
+ stm_free_channel(channel);
+ else
+ /* Register it in the context of the file */
+ set_bit(channel, ch_data->bitmap);
+ } else
+ err = -ENOMEM;
+ return err;
+}
+
+static int stm_release_channel(struct channel_data *ch_data, int channel)
+{
+ if ((channel < 0) || (channel >= STM_NUMBER_OF_CHANNEL))
+ return -EINVAL;
+ stm_free_channel(channel);
+ clear_bit(channel, ch_data->bitmap);
+ return 0;
+}
+
+/*
+ * Trace a buffer on a given channel
+ * with auto time stamping on last byte(s) only
+ */
+int stm_trace_buffer_onchannel(int channel,
+ const void *data, size_t length)
+{
+ int i, mod64;
+ volatile struct stm_channel __iomem *pch;
+
+ if (channel >= STM_NUMBER_OF_CHANNEL || !stm_channels)
+ return 0;
+
+ pch = &stm_channels[channel];
+
+ /* Align data pointer to u64 & time stamp last byte(s) */
+ mod64 = (int)data & 7;
+ i = length - 8 + mod64;
+ switch (mod64) {
+ case 0:
+ if (i)
+ pch->no_stamp64 = *(u64 *)data;
+ else {
+ pch->stamp64 = *(u64 *)data;
+ return length;
+ }
+ data += 8;
+ break;
+ case 1:
+ pch->no_stamp8 = *(u8 *)data;
+ pch->no_stamp16 = *(u16 *)(data+1);
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+3);
+ else {
+ pch->stamp32 = *(u32 *)(data+3);
+ return length;
+ }
+ data += 7;
+ break;
+ case 2:
+ pch->no_stamp16 = *(u16 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+2);
+ else {
+ pch->stamp32 = *(u32 *)(data+2);
+ return length;
+ }
+ data += 6;
+ break;
+ case 3:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+1);
+ else {
+ pch->stamp32 = *(u32 *)(data+1);
+ return length;
+ }
+ data += 5;
+ break;
+ case 4:
+ if (i)
+ pch->no_stamp32 = *(u32 *)data;
+ else {
+ pch->stamp32 = *(u32 *)data;
+ return length;
+ }
+ data += 4;
+ break;
+ case 5:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp16 = *(u16 *)(data+1);
+ else {
+ pch->stamp16 = *(u16 *)(data+1);
+ return length;
+ }
+ data += 3;
+ break;
+ case 6:
+ if (i)
+ pch->no_stamp16 = *(u16 *)data;
+ else {
+ pch->stamp16 = *(u16 *)data;
+ return length;
+ }
+ data += 2;
+ break;
+ case 7:
+ if (i)
+ pch->no_stamp8 = *(u8 *)data;
+ else {
+ pch->stamp8 = *(u8 *)data;
+ return length;
+ }
+ data++;
+ break;
+ }
+ for (;;) {
+ if (i > 8) {
+ pch->no_stamp64 = *(u64 *)data;
+ data += 8;
+ i -= 8;
+ } else if (i == 8) {
+ pch->stamp64 = *(u64 *)data;
+ break;
+ } else if (i > 4) {
+ pch->no_stamp32 = *(u32 *)data;
+ data += 4;
+ i -= 4;
+ } else if (i == 4) {
+ pch->stamp32 = *(u32 *)data;
+ break;
+ } else if (i > 2) {
+ pch->no_stamp16 = *(u16 *)data;
+ data += 2;
+ i -= 2;
+ } else if (i == 2) {
+ pch->stamp16 = *(u16 *)data;
+ break;
+ } else {
+ pch->stamp8 = *(u8 *)data;
+ break;
+ }
+ }
+ return length;
+}
+EXPORT_SYMBOL(stm_trace_buffer_onchannel);
+
+static int stm_open(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel_data;
+ int retval = 0;
+
+ channel_data = kzalloc(sizeof(struct channel_data), GFP_KERNEL);
+ if (channel_data == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&channel_data->lock);
+ channel_data->numero = -1; /* Channel not yet allocated */
+ file->private_data = channel_data;
+
+ /*
+ * Check if microsd is selected as trace interface
+ * and enable corresponding pins muxing.
+ */
+ if (IS_MODEM_ON_MICROSD)
+ retval = stm_connection_set(NULL, STM_STE_MODEM_ON_MICROSD);
+ else if (IS_APE_ON_MICROSD)
+ retval = stm_connection_set(NULL, STM_STE_APE_ON_MICROSD);
+
+ if (retval)
+ pr_alert("stm_open: failed to connect STM output\n");
+
+ return retval;
+}
+
+static int stm_release(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel;
+
+ channel = (struct channel_data *)file->private_data;
+
+ /* Free allocated channel if necessary */
+ if (channel->numero != -1)
+ stm_free_channel(channel->numero);
+
+ bitmap_andnot(stm.ch_bitmap, stm.ch_bitmap,
+ channel->bitmap, STM_NUMBER_OF_CHANNEL);
+
+ kfree(channel);
+ return 0;
+}
+
+static ssize_t stm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct channel_data *channel = file->private_data;
+
+ /* Alloc channel at first write */
+ if (channel->numero == -1) {
+ channel->numero = stm_alloc_channel(0);
+ if (channel->numero > STM_NUMBER_OF_CHANNEL)
+ return -ENOMEM;
+ }
+
+ if (size > STM_BUFSIZE)
+ size = STM_BUFSIZE;
+
+ spin_lock(&channel->lock);
+
+ if (copy_from_user
+ (channel->data_buffer, (void __user *) buf, size)) {
+ spin_unlock(&channel->lock);
+ return -EFAULT;
+ }
+ size = stm_trace_buffer_onchannel(channel->numero,
+ channel->data_buffer, size);
+
+ spin_unlock(&channel->lock);
+
+ return size;
+}
+
+static int stm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Don't allow a mapping that covers more than the STM channels
+ */
+ if ((vma->vm_end - vma->vm_start) >
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel))
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ stm.pdata->channels_phys_base>>PAGE_SHIFT,
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel),
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/* Enable the trace for given sources (bitfield) */
+static void stm_enable_src(unsigned int v)
+{
+ unsigned int cr_val;
+ spin_lock(&lock);
+ cr_val = readl(STM_CR);
+ cr_val &= ~STM_CLOCK_MASK;
+ writel(cr_val|(stm_clockdiv<<STM_CLOCK_SHIFT), STM_CR);
+ /*
+ * If the kernel argument stm_ter has been set by the boot loader
+ * all calls to stm_enable_src will be ignored
+ */
+ v = stm_ter ? stm_ter : v;
+ writel(v, STM_TER);
+ spin_unlock(&lock);
+}
+
+/* Disable all sources */
+static void stm_disable_src(void)
+{
+ writel(0x0, STM_CR); /* stop clock */
+ writel(0x0, STM_TER); /* Disable cores */
+}
+
+/* Set clock speed */
+static int stm_set_ckdiv(enum clock_div v)
+{
+ unsigned int val;
+
+ spin_lock(&lock);
+ val = readl(STM_CR);
+ val &= ~STM_CLOCK_MASK;
+ writel(val | ((v << STM_CLOCK_SHIFT) & STM_CLOCK_MASK), STM_CR);
+ spin_unlock(&lock);
+ stm_clockdiv = v;
+
+ return 0;
+}
+
+/* Return the control register */
+static inline unsigned int stm_get_cr(void)
+{
+ return readl(STM_CR);
+}
+
+/*
+ * Set Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source
+ */
+static inline void stm_set_modes(unsigned int modes)
+{
+ writel(modes, STM_MMC);
+}
+
+/* Get Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source */
+static inline unsigned int stm_get_modes(void)
+{
+ return readl(STM_MMC);
+}
+
+/* Count # of free channels */
+static int stm_nb_free_channels(void)
+{
+ int nb_channels, offset;
+
+ nb_channels = 0;
+ offset = 0;
+ for (;;) {
+ offset = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ if (offset == STM_NUMBER_OF_CHANNEL)
+ break;
+ offset++;
+ nb_channels++;
+ }
+ return nb_channels;
+}
+
+static long stm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct channel_data *channel = file->private_data;
+
+ switch (cmd) {
+
+ case STM_CONNECTION:
+ if (stm.pdata->stm_connection)
+ stm.pdata->stm_connection(arg);
+ stm_connection = arg;
+ break;
+
+ case STM_DISABLE:
+ stm_disable_src();
+ break;
+
+ case STM_GET_NB_MAX_CHANNELS:
+ err = put_user(STM_NUMBER_OF_CHANNEL, (unsigned int *)arg);
+ break;
+
+ case STM_GET_NB_FREE_CHANNELS:
+ err = put_user(stm_nb_free_channels(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CHANNEL_NO:
+ err = put_user(channel->numero, (unsigned int *)arg);
+ break;
+
+ case STM_SET_CLOCK_DIV:
+ err = stm_set_ckdiv((enum clock_div) arg);
+ break;
+
+ case STM_SET_MODE:
+ stm_set_modes(arg);
+ break;
+
+ case STM_GET_MODE:
+ err = put_user(stm_get_modes(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CTRL_REG:
+ err = put_user(stm_get_cr(), (unsigned int *)arg);
+ break;
+
+ case STM_ENABLE_SRC:
+ stm_enable_src(arg);
+ break;
+
+ case STM_GET_FREE_CHANNEL:
+ err = stm_get_channel(channel, (int *)arg);
+ break;
+
+ case STM_RELEASE_CHANNEL:
+ err = stm_release_channel(channel, arg);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Trace a buffer on a dynamically allocated channel
+ * with auto time stamping on the first byte(s) only
+ * Dynamic channel number >=
+ * STM_NUMBER_OF_CHANNEL - NB_KERNEL_DYNAMIC_CHANNEL
+ */
+int stm_trace_buffer(const void *data, size_t length)
+{
+ int channel;
+
+ channel = stm_alloc_channel(STM_NUMBER_OF_CHANNEL
+ - NB_KERNEL_DYNAMIC_CHANNEL);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ length = stm_trace_buffer_onchannel(channel, data, length);
+ stm_free_channel(channel);
+ return length;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(stm_trace_buffer);
+
+static const struct file_operations stm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = stm_ioctl,
+ .open = stm_open,
+ .llseek = no_llseek,
+ .write = stm_write,
+ .release = stm_release,
+ .mmap = stm_mmap,
+};
+
+/*
+ * Init and deinit driver
+ */
+
+static int __devinit stm_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ pr_alert("No device/platform_data found on STM driver\n");
+ return -ENODEV;
+ }
+
+ stm.pdata = pdev->dev.platform_data;
+
+ cdev_init(&cdev, &stm_fops);
+ cdev.owner = THIS_MODULE;
+
+ stm_channels =
+ ioremap_nocache(stm.pdata->channels_phys_base,
+ STM_NUMBER_OF_CHANNEL*sizeof(*stm_channels));
+ if (stm_channels == NULL) {
+ dev_err(&pdev->dev, "could not remap STM Msg register\n");
+ return -ENODEV;
+ }
+
+ stm.virtbase = ioremap_nocache(stm.pdata->regs_phys_base, SZ_4K);
+ if (stm.virtbase == NULL) {
+ retval = -EIO;
+ dev_err(&pdev->dev, "could not remap STM Register\n");
+ goto err_channels;
+ }
+
+ retval = cdev_add(&cdev, MKDEV(stm_major, 0), 1);
+ if (retval) {
+ dev_err(&pdev->dev, "chardev registration failed\n");
+ goto err_channels;
+ }
+
+ if (IS_ERR(device_create(stm_class, &pdev->dev,
+ MKDEV(stm_major, 0), NULL, STM_DEV_NAME)))
+ dev_err(&pdev->dev, "can't create device\n");
+
+ /* Check chip IDs if necessary */
+ if (stm.pdata->id_mask) {
+ u32 periph_id, cell_id;
+
+ periph_id = (readb(STMPERIPHID3)<<24) +
+ (readb(STMPERIPHID2)<<16) +
+ (readb(STMPERIPHID1)<<8) +
+ readb(STMPERIPHID0);
+ cell_id = (readb(STMPCELLID3)<<24) +
+ (readb(STMPCELLID2)<<16) +
+ (readb(STMPCELLID1)<<8) +
+ readb(STMPCELLID0);
+ /* Only warns if it isn't a ST-Ericsson supported one */
+ if ((periph_id & stm.pdata->id_mask) != 0x00080dec ||
+ cell_id != 0xb105f00d) {
+ dev_warn(&pdev->dev, "STM-Trace IC not compatible\n");
+ dev_warn(&pdev->dev, "periph_id=%x\n", periph_id);
+ dev_warn(&pdev->dev, "pcell_id=%x\n", cell_id);
+ }
+ }
+
+ /* Reserve channels if necessary */
+ if (stm.pdata->channels_reserved_sz) {
+ int i;
+
+ for (i = 0; i < stm.pdata->channels_reserved_sz; i++) {
+ set_bit(stm.pdata->channels_reserved[i],
+ stm.ch_bitmap);
+ }
+ }
+ /* Reserve kernel trace channels on demand */
+#ifdef CONFIG_STM_PRINTK
+ set_bit(CONFIG_STM_PRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_FTRACE
+ set_bit(CONFIG_STM_FTRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_CTX_SWITCH
+ set_bit(CONFIG_STM_CTX_SWITCH_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_WAKEUP
+ set_bit(CONFIG_STM_WAKEUP_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_STACK_TRACE
+ set_bit(CONFIG_STM_STACK_TRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_TRACE_PRINTK
+ set_bit(CONFIG_STM_TRACE_PRINTK_CHANNEL, stm.ch_bitmap);
+ set_bit(CONFIG_STM_TRACE_BPRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+
+ /* Check kernel's environment parameters first */
+ if (IS_APE_ON_MIPI34)
+ stm_connection = STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60;
+ else if (IS_APE_ON_MIPI60)
+ stm_connection = STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60;
+
+ /* Apply parameters to driver */
+ if (stm.pdata->stm_connection) {
+ retval = stm.pdata->stm_connection(stm_connection);
+ if (retval) {
+ dev_err(&pdev->dev, "failed to connect STM output\n");
+ goto err_channels;
+ }
+ }
+
+ /* Enable STM Masters given in pdata */
+ if (stm.pdata->masters_enabled)
+ stm_enable_src(stm.pdata->masters_enabled);
+ stm_set_modes(STM_MMC_DEFAULT); /* Set all sources in HW mode */
+
+ dev_info(&pdev->dev, "STM-Trace driver probed successfully\n");
+ stm_printk("STM-Trace driver initialized\n");
+ return 0;
+
+err_channels:
+ iounmap(stm_channels);
+ return retval;
+}
+
+static int __devexit stm_remove(struct platform_device *pdev)
+{
+ device_destroy(stm_class, MKDEV(stm_major, 0));
+ cdev_del(&cdev);
+
+ if (stm.pdata->stm_connection)
+ (void) stm.pdata->stm_connection(STM_DISCONNECT);
+
+ stm_disable_src();
+ iounmap(stm.virtbase);
+ iounmap(stm_channels);
+
+ return 0;
+}
+
+int stm_printk(const char *fmt, ...)
+{
+ int ret;
+ size_t size;
+ va_list args;
+
+ va_start(args, fmt);
+ arch_spin_lock(&stm_buf_lock);
+ size = vscnprintf((char *)stm_printk_buf,
+ sizeof(stm_printk_buf), fmt, args);
+ ret = stm_trace_buffer(stm_printk_buf, size);
+ arch_spin_unlock(&stm_buf_lock);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(stm_printk);
+
+/*
+ * Debugfs interface
+ */
+
+static int stm_connection_show(void *data, u64 *val)
+{
+ *val = stm_connection;
+ return 0;
+}
+
+static int stm_connection_set(void *data, u64 val)
+{
+ int retval = 0;
+
+ if (stm.pdata->stm_connection) {
+ stm_connection = val;
+ retval = stm.pdata->stm_connection(val);
+ }
+ return retval;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_connection_fops, stm_connection_show,
+ stm_connection_set, "%llu\n");
+
+static int stm_clockdiv_show(void *data, u64 *val)
+{
+ *val = stm_clockdiv;
+ return 0;
+}
+
+static int stm_clockdiv_set(void *data, u64 val)
+{
+ stm_set_ckdiv(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_clockdiv_fops, stm_clockdiv_show,
+ stm_clockdiv_set, "%llu\n");
+
+static int stm_masters_enable_show(void *data, u64 *val)
+{
+ *val = readl(STM_TER);
+ return 0;
+}
+
+static int stm_masters_enable_set(void *data, u64 val)
+{
+ stm_enable_src(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_enable_fops, stm_masters_enable_show,
+ stm_masters_enable_set, "%08llx\n");
+
+static int stm_masters_modes_show(void *data, u64 *val)
+{
+ *val = stm_get_modes();
+ return 0;
+}
+
+static int stm_masters_modes_set(void *data, u64 val)
+{
+ stm_set_modes(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_modes_fops, stm_masters_modes_show,
+ stm_masters_modes_set, "%08llx\n");
+
+/* Count # of free channels */
+static int stm_free_channels_show(void *data, u64 *val)
+{
+ *val = stm_nb_free_channels();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_free_channels_fops, stm_free_channels_show,
+ NULL, "%lld\n");
+
+static __init int stm_init_debugfs(void)
+{
+ struct dentry *d_stm;
+
+ d_stm = debugfs_create_dir(STM_DEV_NAME, NULL);
+ if (!d_stm)
+ return -ENOMEM;
+
+ (void) debugfs_create_file("connection", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_connection_fops);
+ (void) debugfs_create_file("clockdiv", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_clockdiv_fops);
+ (void) debugfs_create_file("masters_enable", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_enable_fops);
+ (void) debugfs_create_file("masters_modes", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_modes_fops);
+ (void) debugfs_create_file("free_channels", S_IRUGO, d_stm,
+ NULL, &stm_free_channels_fops);
+ return 0;
+}
+fs_initcall(stm_init_debugfs);
+
+static struct platform_driver stm_driver = {
+ .probe = stm_probe,
+ .remove = __devexit_p(stm_remove),
+ .driver = {
+ .name = STM_DEV_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init stm_init(void)
+{
+ int retval;
+ dev_t dev;
+
+ stm_class = class_create(THIS_MODULE, STM_DEV_NAME);
+ if (IS_ERR(stm_class)) {
+ pr_err("stm: can't register stm class\n");
+ return PTR_ERR(stm_class);
+ }
+
+ retval = alloc_chrdev_region(&dev, 0, 1, STM_DEV_NAME);
+ if (retval) {
+ pr_err("stm: can't register character device\n");
+ class_destroy(stm_class);
+ return retval;
+ }
+ stm_major = MAJOR(dev);
+ return platform_driver_register(&stm_driver);
+}
+
+static void __exit stm_exit(void)
+{
+ platform_driver_unregister(&stm_driver);
+ unregister_chrdev_region(MKDEV(stm_major, 0), 1);
+ class_destroy(stm_class);
+}
+
+arch_initcall(stm_init); /* STM init ASAP need to wait GPIO init */
+module_exit(stm_exit);
+
+MODULE_AUTHOR("Paul Ghaleb - ST Microelectronics");
+MODULE_AUTHOR("Pierre Peiffer - ST-Ericsson");
+MODULE_AUTHOR("Philippe Langlais - ST-Ericsson");
+MODULE_DESCRIPTION("System Trace Module driver");
+MODULE_ALIAS("stm");
+MODULE_ALIAS("stm-trace");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c6a383d0244..6b7ab103da2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -108,6 +108,7 @@ struct mmc_blk_data {
unsigned int part_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
+ struct device_attribute power_ro_lock_legacy;
int area_type;
};
@@ -168,6 +169,87 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+#define EXT_CSD_BOOT_WP_PWR_WP_TEXT "pwr_ro"
+#define EXT_CSD_BOOT_WP_PERM_WP_TEXT "perm_ro"
+#define EXT_CSD_BOOT_WP_WP_DISABLED_TEXT "rw"
+static ssize_t boot_partition_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ const char *out_text;
+
+ if (card->ext_csd.boot_ro_lock
+ & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ out_text = EXT_CSD_BOOT_WP_PERM_WP_TEXT;
+ else if (card->ext_csd.boot_ro_lock
+ & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ out_text = EXT_CSD_BOOT_WP_PWR_WP_TEXT;
+ else
+ out_text = EXT_CSD_BOOT_WP_WP_DISABLED_TEXT;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", out_text);
+
+ return ret;
+}
+
+static ssize_t boot_partition_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_card *card;
+ u8 set = 0;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ if (!strncmp(buf, EXT_CSD_BOOT_WP_PWR_WP_TEXT,
+ strlen(EXT_CSD_BOOT_WP_PWR_WP_TEXT)))
+ set = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+ else if (!strncmp(buf, EXT_CSD_BOOT_WP_PERM_WP_TEXT,
+ strlen(EXT_CSD_BOOT_WP_PERM_WP_TEXT)))
+ set = EXT_CSD_BOOT_WP_B_PERM_WP_EN;
+
+ if (set) {
+ mmc_claim_host(card->host);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BOOT_WP,
+ set,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("Boot Partition Lock failed: %d", ret);
+ else
+ card->ext_csd.boot_ro_lock = set;
+
+ mmc_release_host(card->host);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition "
+ "%s",
+ md->disk->disk_name,
+ buf);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type ==
+ MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition "
+ "%s",
+ part_md->disk->disk_name,
+ buf);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+ }
+ ret = count;
+
+ mmc_blk_put(md);
+ return ret;
+}
+
static ssize_t power_ro_lock_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1379,6 +1461,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+ /*
+ * We must make sure we have not claimed the host before
+ * doing a flush to prevent deadlock, thus we check if
+ * the host needs a resume first.
+ */
+ if (mmc_host_needs_resume(card->host))
+ mmc_resume_host_sync(card->host);
+
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
mmc_claim_host(card->host);
@@ -1613,24 +1703,6 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
return ret;
}
-static int
-mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
-{
- int err;
-
- mmc_claim_host(card->host);
- err = mmc_set_blocklen(card, 512);
- mmc_release_host(card->host);
-
- if (err) {
- pr_err("%s: unable to set block size to 512: %d\n",
- md->disk->disk_name, err);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
struct mmc_card *card;
@@ -1640,9 +1712,12 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
- card->ext_csd.boot_ro_lockable)
+ card->ext_csd.boot_ro_lockable) {
device_remove_file(disk_to_dev(md->disk),
&md->power_ro_lock);
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock_legacy);
+ }
/* Stop new requests from getting into the queue */
del_gendisk(md->disk);
@@ -1702,9 +1777,24 @@ static int mmc_add_disk(struct mmc_blk_data *md)
&md->power_ro_lock);
if (ret)
goto power_ro_lock_fail;
+
+ /* Legacy mode */
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock_legacy.show = boot_partition_ro_lock_show;
+ md->power_ro_lock_legacy.store = boot_partition_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock_legacy.attr);
+ md->power_ro_lock_legacy.attr.mode = mode;
+ md->power_ro_lock_legacy.attr.name = "ro_lock";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock_legacy);
+ if (ret)
+ goto power_ro_lock_fail_legacy;
}
return ret;
+power_ro_lock_fail_legacy:
+ device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
power_ro_lock_fail:
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
force_ro_fail:
@@ -1758,7 +1848,6 @@ static const struct mmc_fixup blk_fixups[] =
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
- int err;
char cap_str[10];
/*
@@ -1771,10 +1860,6 @@ static int mmc_blk_probe(struct mmc_card *card)
if (IS_ERR(md))
return PTR_ERR(md);
- err = mmc_blk_set_blksize(md, card);
- if (err)
- goto out;
-
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s %s\n",
@@ -1799,7 +1884,7 @@ static int mmc_blk_probe(struct mmc_card *card)
out:
mmc_blk_remove_parts(card, md);
mmc_blk_remove_req(md);
- return err;
+ return 0;
}
static void mmc_blk_remove(struct mmc_card *card)
@@ -1835,8 +1920,6 @@ static int mmc_blk_resume(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
- mmc_blk_set_blksize(md, card);
-
/*
* Resume involves the card going into idle state,
* so current partition is always the main one.
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 759714ed6be..6622f2e6e05 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1253,6 +1253,130 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
return 0;
}
+
+/* helper function for various address alignment and sg length alignment */
+static int mmc_test_align_multi(struct mmc_test_card *test, bool do_write,
+ struct scatterlist *sg,
+ u32 *sizes, int sg_len, int offset)
+{
+ int ret, i;
+ unsigned int size;
+ u32 buf_off;
+ u32 sg_size;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+ size -= offset;
+ size -= size % 512;
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ for (i = 0, sg_size = 0;
+ i < sg_len && sg_size + sizes[i] < size; i++)
+ sg_size += sizes[i];
+
+ if (sg_size < size)
+ sizes[i-1] += size - sg_size;
+ sg_len = i;
+
+ sg_init_table(sg, sg_len);
+ for (i = 0, buf_off = offset; i < sg_len; i++) {
+ sg_set_buf(&sg[i], test->buffer + buf_off, sizes[i]);
+ buf_off += sizes[i];
+ }
+
+ ret = mmc_test_transfer(test, sg, sg_len, 0, size/512, 512, do_write);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_align_length_32(struct mmc_test_card *test, bool do_write)
+{
+ u32 sizes[] = {512, 32*1, 32*2, 32*3, 32*4, 32*5, 32*6, 32*7,
+ 32*8, 32*9, 32*10, 32*11, 32*12, 32*13, 2048};
+ struct scatterlist sg[ARRAY_SIZE(sizes)];
+
+ return mmc_test_align_multi(test, do_write, sg, sizes,
+ ARRAY_SIZE(sg), 0);
+}
+
+static int mmc_test_align_length_4(struct mmc_test_card *test, bool do_write)
+{
+ u32 sizes[] = {512, 4*1, 4*2, 4*3, 4*4, 4*5, 4*6, 4*7,
+ 4*8, 4*9, 520, 1040, 2080};
+ struct scatterlist sg[ARRAY_SIZE(sizes)];
+
+ return mmc_test_align_multi(test, do_write, sg, sizes,
+ ARRAY_SIZE(sg), 0);
+}
+
+static int mmc_test_align_length_4_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_length_4(test, do_write);
+}
+
+static int mmc_test_align_length_4_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_length_4(test, do_write);
+}
+
+static int mmc_test_align_length_32_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_length_32(test, do_write);
+}
+
+static int mmc_test_align_length_32_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_length_32(test, do_write);
+}
+
+/* helper function for testing address alignment */
+static int mmc_test_align_address(struct mmc_test_card *test, bool do_write,
+ u32 offset)
+{
+ u32 sizes[] = {512, 512, 1024, 1024, 2048};
+ struct scatterlist sg[ARRAY_SIZE(sizes)];
+
+ return mmc_test_align_multi(test, do_write, sg,
+ sizes, ARRAY_SIZE(sg), offset);
+}
+
+static int mmc_test_align_address_4_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_address(test, do_write, 4);
+}
+
+static int mmc_test_align_address_4_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_address(test, do_write, 4);
+}
+
+static int mmc_test_align_address_32_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_address(test, do_write, 32);
+}
+
+static int mmc_test_align_address_32_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_address(test, do_write, 32);
+}
+
static int mmc_test_xfersize_write(struct mmc_test_card *test)
{
int ret;
@@ -2451,6 +2575,62 @@ static const struct mmc_test_case mmc_test_cases[] = {
},
{
+ .name = "4 bytes aligned sg-element length write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_length_4_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "4 bytes aligned sg-element length read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_length_4_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element length write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_length_32_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element length read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_length_32_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "4 bytes aligned sg-element address write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_address_4_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "4 bytes aligned sg-element address read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_address_4_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element address write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_address_32_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element address read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_address_32_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
.name = "Correct xfer_size at write (start failure)",
.run = mmc_test_xfersize_write,
},
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 132378b89d7..976856af759 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -243,16 +243,17 @@ static void mmc_wait_done(struct mmc_request *mrq)
complete(&mrq->completion);
}
-static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
complete(&mrq->completion);
- return;
+ return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
+ return 0;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
@@ -336,6 +337,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
struct mmc_async_req *areq, int *error)
{
int err = 0;
+ int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
@@ -345,30 +347,23 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
- if (err) {
- /* post process the completed failed request */
- mmc_post_req(host, host->areq->mrq, 0);
- if (areq)
- /*
- * Cancel the new prepared request, because
- * it can't run until the failed
- * request has been properly handled.
- */
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- host->areq = NULL;
- goto out;
- }
}
- if (areq)
- __mmc_start_req(host, areq->mrq);
+ if (!err && areq)
+ start_err = __mmc_start_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- host->areq = areq;
- out:
+ /* Cancel a prepared request if it was not started. */
+ if ((err || start_err) && areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ if (err)
+ host->areq = NULL;
+ else
+ host->areq = areq;
+
if (error)
*error = err;
return data;
@@ -2137,7 +2132,7 @@ void mmc_rescan(struct work_struct *work)
container_of(work, struct mmc_host, detect.work);
int i;
- if (host->rescan_disable)
+ if (host->rescan_disable || mmc_host_needs_resume(host))
return;
mmc_bus_get(host);
@@ -2402,7 +2397,13 @@ int mmc_suspend_host(struct mmc_host *host)
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
+ cancel_delayed_work_sync(&host->resume);
mmc_flush_scheduled_work();
+
+ /* Skip suspend, if deferred resume were scheduled but not completed. */
+ if (mmc_host_needs_resume(host))
+ return 0;
+
if (mmc_try_claim_host(host)) {
err = mmc_cache_ctrl(host, 0);
mmc_do_release_host(host);
@@ -2443,6 +2444,10 @@ int mmc_suspend_host(struct mmc_host *host)
mmc_release_host(host);
host->pm_flags = 0;
err = 0;
+ } else if (mmc_card_mmc(host->card) ||
+ mmc_card_sd(host->card)) {
+ host->pm_state |= MMC_HOST_DEFERRED_RESUME |
+ MMC_HOST_NEEDS_RESUME;
}
} else {
err = -EBUSY;
@@ -2467,6 +2472,12 @@ int mmc_resume_host(struct mmc_host *host)
{
int err = 0;
+ if (mmc_host_deferred_resume(host)) {
+ mmc_schedule_delayed_work(&host->resume,
+ msecs_to_jiffies(3000));
+ return 0;
+ }
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
@@ -2501,6 +2512,24 @@ int mmc_resume_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_resume_host);
+void mmc_resume_work(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, resume.work);
+
+ host->pm_state &= ~MMC_HOST_DEFERRED_RESUME;
+ mmc_resume_host(host);
+ host->pm_state &= ~MMC_HOST_NEEDS_RESUME;
+
+ mmc_detect_change(host, 0);
+}
+
+void mmc_resume_host_sync(struct mmc_host *host)
+{
+ flush_delayed_work_sync(&host->resume);
+}
+EXPORT_SYMBOL(mmc_resume_host_sync);
+
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3bdafbca354..5796d2d85f4 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -59,6 +59,7 @@ static inline void mmc_delay(unsigned int ms)
void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+void mmc_resume_work(struct work_struct *work);
int _mmc_detect_card_removed(struct mmc_host *host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c3704e293a7..fe6e81529cb 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -331,6 +331,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
+ INIT_DELAYED_WORK(&host->resume, mmc_resume_work);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
#endif
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 11e589cd823..18a091cf39d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/amba/bus.h>
@@ -45,6 +46,7 @@ static unsigned int fmax = 515633;
* struct variant_data - MMCI variant-specific quirks
* @clkreg: default value for MCICLOCK register
* @clkreg_enable: enable value for MMCICLOCK register
+ * @dma_sdio_req_ctrl: enable value for DMAREQCTL register for SDIO write
* @datalength_bits: number of bits in the MMCIDATALENGTH register
* @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
* is asserted (likewise for RX)
@@ -53,28 +55,41 @@ static unsigned int fmax = 515633;
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
+ * @non_power_of_2_blksize: true if block sizes can be other than power of two
+ * @pwrreg_powerup: power up value for MMCIPOWER register
+ * @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_ctrl_power: bits in MMCIPOWER register controls ext. power supply
*/
struct variant_data {
unsigned int clkreg;
unsigned int clkreg_enable;
+ unsigned int dma_sdio_req_ctrl;
unsigned int datalength_bits;
unsigned int fifosize;
unsigned int fifohalfsize;
bool sdio;
bool st_clkdiv;
bool blksz_datactrl16;
+ bool non_power_of_2_blksize;
+ u32 pwrreg_powerup;
+ bool signal_direction;
+ bool pwrreg_ctrl_power;
};
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+ .pwrreg_ctrl_power = true,
};
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+ .pwrreg_ctrl_power = true,
};
static struct variant_data variant_u300 = {
@@ -83,6 +98,8 @@ static struct variant_data variant_u300 = {
.clkreg_enable = MCI_ST_U300_HWFCEN,
.datalength_bits = 16,
.sdio = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
};
static struct variant_data variant_ux500 = {
@@ -90,9 +107,12 @@ static struct variant_data variant_ux500 = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL,
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
};
static struct variant_data variant_ux500v2 = {
@@ -100,13 +120,64 @@ static struct variant_data variant_ux500v2 = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL,
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
+ .non_power_of_2_blksize = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
};
/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!host->variant->non_power_of_2_blksize &&
+ !is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ if (data->sg->offset & 3) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported alginment (0x%x)\n", data->sg->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
+{
+ if (host->clk_reg != clk) {
+ host->clk_reg = clk;
+ writel(clk, host->base + MMCICLOCK);
+ }
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
+{
+ if (host->pwr_reg != pwr) {
+ host->pwr_reg = pwr;
+ writel(pwr, host->base + MMCIPOWER);
+ }
+}
+
+/*
* This must be called with host->lock held
*/
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
@@ -153,7 +224,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_ST_8BIT_BUS;
- writel(clk, host->base + MMCICLOCK);
+ mmci_write_clkreg(host, clk);
}
static void
@@ -166,14 +237,10 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- /*
- * Need to drop the host lock here; mmc_request_done may call
- * back into the driver...
- */
- spin_unlock(&host->lock);
- pm_runtime_put(mmc_dev(host->mmc));
mmc_request_done(host->mmc, mrq);
- spin_lock(&host->lock);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
@@ -197,6 +264,7 @@ static void mmci_stop_data(struct mmci_host *host)
writel(0, host->base + MMCIDATACTRL);
mmci_set_mask1(host, 0);
host->data = NULL;
+ host->datactrl_reg = 0;
}
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
@@ -307,10 +375,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
+static void mmci_dma_data_error(struct mmci_host *host, struct mmc_data *data)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ data->host_cookie = 0;
+}
+
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
- struct dma_chan *chan = host->dma_current;
+ struct dma_chan *chan;
enum dma_data_direction dir;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
u32 status;
int i;
@@ -329,19 +420,12 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- dmaengine_terminate_all(chan);
- if (!data->error)
- data->error = -EIO;
- }
-
- if (data->flags & MMC_DATA_WRITE) {
- dir = DMA_TO_DEVICE;
- } else {
- dir = DMA_FROM_DEVICE;
+ data->error = -EIO;
+ mmci_dma_data_error(host, data);
}
if (!data->host_cookie)
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ mmci_dma_unmap(host, data);
/*
* Use of DMA with scatter-gather is impossible.
@@ -351,16 +435,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
-}
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
}
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
- struct mmci_host_next *next)
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct dma_chan **dma_chan,
+ struct dma_async_tx_descriptor **dma_desc)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -377,16 +460,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
enum dma_data_direction buffer_dirn;
int nr_sg;
- /* Check if next job is already prepared */
- if (data->host_cookie && !next &&
- host->dma_current && host->dma_desc_current)
- return 0;
-
- if (!next) {
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- }
-
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
@@ -401,8 +474,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!chan)
return -EINVAL;
- /* If less than or equal to the fifo size, don't bother with DMA */
- if (data->blksz * data->blocks <= variant->fifosize)
+ /*
+ * If less than or equal to the fifo size, don't bother with DMA
+ * SDIO transfers may not be 4 bytes aligned, fall back to PIO
+ */
+ if (data->blksz * data->blocks <= variant->fifosize ||
+ (data->blksz * data->blocks) & 3)
return -EINVAL;
device = chan->device;
@@ -416,29 +493,42 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!desc)
goto unmap_exit;
- if (next) {
- next->dma_chan = chan;
- next->dma_desc = desc;
- } else {
- host->dma_current = chan;
- host->dma_desc_current = desc;
- }
+ *dma_chan = chan;
+ *dma_desc = desc;
return 0;
unmap_exit:
- if (!next)
- dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static int inline mmci_dma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ /* Check if next job is already prepared. */
+ if (host->dma_current && host->dma_desc_current)
+ return 0;
+
+ /* No job were prepared thus do it now. */
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
+ &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct mmci_host_next *nd = &host->next_data;
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host)
{
int ret;
struct mmc_data *data = host->data;
+ struct variant_data *variant = host->variant;
- ret = mmci_dma_prep_data(host, host->data, NULL);
+ ret = mmci_dma_prep_data(host, host->data);
if (ret)
return ret;
@@ -449,10 +539,15 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
dmaengine_submit(host->dma_desc_current);
dma_async_issue_pending(host->dma_current);
- datactrl |= MCI_DPSM_DMAENABLE;
+ host->datactrl_reg |= MCI_DPSM_DMAENABLE;
+
+ /* Some hardware versions need special flags for SDIO DMA write */
+ if (variant->sdio && host->mmc->card && mmc_card_sdio(host->mmc->card)
+ && (data->flags & MMC_DATA_WRITE))
+ host->datactrl_reg |= variant->dma_sdio_req_ctrl;
/* Trigger the DMA transfer */
- writel(datactrl, host->base + MMCIDATACTRL);
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
/*
* Let the MMCI say when the data is ended and it's time
@@ -469,18 +564,14 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
struct mmci_host_next *next = &host->next_data;
if (data->host_cookie && data->host_cookie != next->cookie) {
- pr_warning("[%s] invalid cookie: data->host_cookie %d"
+ pr_err("[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
+ BUG();
}
- if (!data->host_cookie)
- return;
-
host->dma_desc_current = next->dma_desc;
host->dma_current = next->dma_chan;
-
next->dma_desc = NULL;
next->dma_chan = NULL;
}
@@ -495,19 +586,18 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (!data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
+ BUG_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
return;
- }
- /* if config for dma */
- if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
- ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
- if (mmci_dma_prep_data(host, data, nd))
- data->host_cookie = 0;
- else
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
- }
+ /*
+ * Don't prepare DMA if there is no previous request,
+ * is_first_req is set. Instead, prepare DMA while
+ * start command is being issued.
+ */
+ if (!is_first_req && !mmci_dma_prep_next(host, data))
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
}
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -515,29 +605,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- struct dma_chan *chan;
- enum dma_data_direction dir;
- if (!data)
+ if (!data || !data->host_cookie)
return;
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
- chan = host->dma_rx_channel;
- } else {
- dir = DMA_TO_DEVICE;
- chan = host->dma_tx_channel;
- }
+ mmci_dma_unmap(host, data);
+ if (err) {
+ struct mmci_host_next *next = &host->next_data;
+ struct dma_chan *chan;
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dmaengine_terminate_all(chan);
- /* if config for dma */
- if (chan) {
- if (err)
- dmaengine_terminate_all(chan);
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, dir);
- mrq->data->host_cookie = 0;
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
}
@@ -558,11 +642,20 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
}
-static inline void mmci_dma_data_error(struct mmci_host *host)
+static inline void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
{
}
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static inline void mmci_dma_data_error(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host)
+{
+ return -ENOSYS;
+}
+
+static inline int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data)
{
return -ENOSYS;
}
@@ -572,10 +665,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
#endif
-static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_setup_datactrl(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
- unsigned int datactrl, timeout, irqmask;
+ unsigned int datactrl, timeout;
unsigned long long clks;
void __iomem *base;
int blksz_bits;
@@ -597,7 +690,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(host->size, base + MMCIDATALENGTH);
blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
@@ -607,11 +699,44 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+ /* The ST Micro variants has a special bit to enable SDIO */
+ if (variant->sdio && host->mmc->card)
+ if (mmc_card_sdio(host->mmc->card)) {
+
+ /*
+ * The ST Micro variant for SDIO write transfer sizes
+ * less then 8 bytes needs to have clock H/W flow
+ * control disabled.
+ */
+ u32 clk;
+ if ((host->size < 8) && (data->flags & MMC_DATA_WRITE))
+ clk = host->clk_reg & ~variant->clkreg_enable;
+ else
+ clk = host->clk_reg | variant->clkreg_enable;
+
+ mmci_write_clkreg(host, clk);
+
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO.
+ */
+ datactrl |= MCI_ST_DPSM_SDIOEN;
+ }
+ host->datactrl_reg = datactrl;
+ writel(datactrl, base + MMCIDATACTRL);
+}
+
+static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+{
+ unsigned int irqmask;
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
- if (!mmci_dma_start_data(host, datactrl))
+ if (!mmci_dma_start_data(host))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
@@ -635,12 +760,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
- /* The ST Micro variants has a special bit to enable SDIO */
- if (variant->sdio && host->mmc->card)
- if (mmc_card_sdio(host->mmc->card))
- datactrl |= MCI_ST_DPSM_SDIOEN;
-
- writel(datactrl, base + MMCIDATACTRL);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
mmci_set_mask1(host, irqmask);
}
@@ -667,6 +786,14 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
+ /*
+ * For levelshifters we must not use more than 25MHz when
+ * sending commands.
+ */
+ host->cclk_desired = host->cclk;
+ if (host->plat->ios_handler && (host->cclk_desired > 25000000))
+ mmci_set_clkreg(host, 25000000);
+
host->cmd = cmd;
writel(cmd->arg, base + MMCIARGUMENT);
@@ -683,8 +810,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
- mmci_dma_data_error(host);
+ if (dma_inprogress(host)) {
+ mmci_dma_data_error(host, data);
+ mmci_dma_unmap(host, data);
+ }
/*
* Calculate how far we are into the transfer. Note that
@@ -723,7 +852,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & MCI_DATAEND || data->error) {
if (dma_inprogress(host))
- mmci_dma_unmap(host, data);
+ mmci_dma_finalize(host, data);
mmci_stop_data(host);
if (!data->error)
@@ -757,15 +886,24 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->resp[3] = readl(base + MMCIRESPONSE3);
}
+ /*
+ * For levelshifters we might have decreased cclk to 25MHz when
+ * sending commands, then we restore the frequency here.
+ */
+ if (host->plat->ios_handler && (host->cclk_desired > host->cclk))
+ mmci_set_clkreg(host, host->cclk_desired);
+
if (!cmd->data || cmd->error) {
- if (host->data) {
- /* Terminate the DMA transfer */
- if (dma_inprogress(host))
- mmci_dma_data_error(host);
- mmci_stop_data(host);
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host)) {
+ mmci_dma_data_error(host, host->mrq->data);
+ mmci_dma_unmap(host, host->mrq->data);
}
+ if (host->data)
+ mmci_stop_data(host);
mmci_request_end(host, cmd->mrq);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_setup_datactrl(host, cmd->data);
mmci_start_data(host, cmd->data);
}
}
@@ -786,7 +924,24 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
if (count <= 0)
break;
- readsl(base + MMCIFIFO, ptr, count >> 2);
+ /*
+ * SDIO especially may want to send something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc). Therefore make sure to always read the last bytes
+ * while only doing full 32-bit reads towards the FIFO.
+ */
+ if (unlikely(count & 0x3)) {
+ if (count < 4) {
+ unsigned char buf[4];
+ readsl(base + MMCIFIFO, buf, 1);
+ memcpy(ptr, buf, count);
+ } else {
+ readsl(base + MMCIFIFO, ptr, count >> 2);
+ count &= ~0x3;
+ }
+ } else {
+ readsl(base + MMCIFIFO, ptr, count >> 2);
+ }
ptr += count;
remain -= count;
@@ -815,23 +970,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
count = min(remain, maxcnt);
/*
- * The ST Micro variant for SDIO transfer sizes
- * less then 8 bytes should have clock H/W flow
- * control disabled.
- */
- if (variant->sdio &&
- mmc_card_sdio(host->mmc->card)) {
- if (count < 8)
- writel(readl(host->base + MMCICLOCK) &
- ~variant->clkreg_enable,
- host->base + MMCICLOCK);
- else
- writel(readl(host->base + MMCICLOCK) |
- variant->clkreg_enable,
- host->base + MMCICLOCK);
- }
-
- /*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc), and the FIFO only accept full 32-bit writes.
@@ -984,13 +1122,12 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
unsigned long flags;
+ bool dmaprep_after_cmd = false;
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
- mrq->data->blksz);
- mrq->cmd->error = -EINVAL;
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
@@ -1001,24 +1138,45 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
- if (mrq->data)
+ if (mrq->data) {
+ dmaprep_after_cmd =
+ (host->variant->clkreg_enable &&
+ (mrq->data->flags & MMC_DATA_READ)) ||
+ !(mrq->data->flags & MMC_DATA_READ);
mmci_get_next_data(host, mrq->data);
-
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- mmci_start_data(host, mrq->data);
+ if (mrq->data->flags & MMC_DATA_READ) {
+ mmci_setup_datactrl(host, mrq->data);
+ if (!dmaprep_after_cmd)
+ mmci_start_data(host, mrq->data);
+ }
+ }
mmci_start_command(host, mrq->cmd, 0);
+ if (mrq->data && dmaprep_after_cmd) {
+ mmci_dma_prep_data(host, mrq->data);
+
+ if (mrq->data->flags & MMC_DATA_READ)
+ mmci_start_data(host, mrq->data);
+ }
+
spin_unlock_irqrestore(&host->lock, flags);
}
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
int ret;
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ if (host->plat->ios_handler &&
+ host->plat->ios_handler(mmc_dev(mmc), ios))
+ dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
+
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (host->vcc)
@@ -1035,22 +1193,38 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* power should be rare so we print an error
* and return here.
*/
- return;
+ goto out;
}
}
- if (host->plat->vdd_handler)
- pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
- ios->power_mode);
- /* The ST version does not have this, fall through to POWER_ON */
- if (host->hw_designer != AMBA_VENDOR_ST) {
- pwr |= MCI_PWR_UP;
- break;
- }
+ /*
+ * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
+ * and instead uses MCI_PWR_ON so apply whatever value is
+ * configured in the variant data.
+ */
+ pwr |= variant->pwrreg_powerup;
+
+ break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
break;
}
+ if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
+ /*
+ * The ST Micro variant has some additional bits
+ * indicating signal direction for the signals in
+ * the SD/MMC bus and feedback-clock usage.
+ */
+ pwr |= host->plat->sigdir;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ pwr &= ~MCI_ST_DATA74DIREN;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ pwr &= (~MCI_ST_DATA74DIREN &
+ ~MCI_ST_DATA31DIREN &
+ ~MCI_ST_DATA2DIREN);
+ }
+
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
if (host->hw_designer != AMBA_VENDOR_ST)
pwr |= MCI_ROD;
@@ -1066,13 +1240,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
mmci_set_clkreg(host, ios->clock);
-
- if (host->pwr != pwr) {
- host->pwr = pwr;
- writel(pwr, host->base + MMCIPOWER);
- }
+ mmci_write_pwrreg(host, pwr);
spin_unlock_irqrestore(&host->lock, flags);
+
+ out:
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int mmci_get_ro(struct mmc_host *mmc)
@@ -1107,6 +1281,21 @@ static int mmci_get_cd(struct mmc_host *mmc)
return status;
}
+static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ if (host->plat->ios_handler) {
+ pm_runtime_get_sync(mmc_dev(mmc));
+ ret = host->plat->ios_handler(mmc_dev(mmc), ios);
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+
+ return ret;
+}
+
static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
@@ -1123,6 +1312,7 @@ static const struct mmc_host_ops mmci_ops = {
.set_ios = mmci_set_ios,
.get_ro = mmci_get_ro,
.get_cd = mmci_get_cd,
+ .start_signal_voltage_switch = mmci_sig_volt_switch,
};
static int __devinit mmci_probe(struct amba_device *dev,
@@ -1250,6 +1440,9 @@ static int __devinit mmci_probe(struct amba_device *dev,
mmc->caps = plat->capabilities;
mmc->caps2 = plat->capabilities2;
+ /* We support these PM capabilities. */
+ mmc->pm_caps = MMC_PM_KEEP_POWER;
+
/*
* We can do SGIO
*/
@@ -1319,7 +1512,8 @@ static int __devinit mmci_probe(struct amba_device *dev,
}
if ((host->plat->status || host->gpio_cd != -ENOSYS)
- && host->gpio_cd_irq < 0)
+ && host->gpio_cd_irq < 0
+ && !(mmc->caps & MMC_CAP_NONREMOVABLE))
mmc->caps |= MMC_CAP_NEEDS_POLL;
ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
@@ -1346,6 +1540,8 @@ static int __devinit mmci_probe(struct amba_device *dev,
mmci_dma_setup(host);
+ pm_runtime_set_autosuspend_delay(&dev->dev, 50);
+ pm_runtime_use_autosuspend(&dev->dev);
pm_runtime_put(&dev->dev);
mmc_add_host(mmc);
@@ -1430,43 +1626,153 @@ static int __devexit mmci_remove(struct amba_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-static int mmci_suspend(struct amba_device *dev, pm_message_t state)
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
+static int mmci_save(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
+ unsigned long flags;
+ struct mmc_ios ios;
int ret = 0;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
- ret = mmc_suspend_host(mmc);
- if (ret == 0)
- writel(0, host->base + MMCIMASK0);
+ /* Let the ios_handler act on a POWER_OFF to save power. */
+ if (host->plat->ios_handler) {
+ memcpy(&ios, &mmc->ios, sizeof(struct mmc_ios));
+ ios.power_mode = MMC_POWER_OFF;
+ ret = host->plat->ios_handler(mmc_dev(mmc),
+ &ios);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * Make sure we do not get any interrupts when we disabled the
+ * clock and the regulator and as well make sure to clear the
+ * registers for clock and power.
+ */
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ clk_disable(host->clk);
+ amba_vcore_disable(dev);
}
return ret;
}
-static int mmci_resume(struct amba_device *dev)
+static int mmci_restore(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
- int ret = 0;
+ unsigned long flags;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ amba_vcore_enable(dev);
+ clk_enable(host->clk);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore registers and re-enable interrupts. */
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Restore settings done by the ios_handler. */
+ if (host->plat->ios_handler)
+ host->plat->ios_handler(mmc_dev(mmc),
+ &mmc->ios);
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SUSPEND
+static int mmci_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+ int ret = 0;
+
+ if (mmc) {
+ ret = mmc_suspend_host(mmc);
+ if (ret == 0) {
+ pm_runtime_get_sync(dev);
+ mmci_save(adev);
+ amba_pclk_disable(adev);
+ }
+ }
+
+ return ret;
+}
+
+static int mmci_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+ int ret = 0;
+
+ if (mmc) {
+ amba_pclk_enable(adev);
+ mmci_restore(adev);
+ pm_runtime_put(dev);
+
ret = mmc_resume_host(mmc);
}
return ret;
}
-#else
-#define mmci_suspend NULL
-#define mmci_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int mmci_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
+ if (!variant->pwrreg_ctrl_power)
+ ret = mmci_save(adev);
+ }
+
+ return ret;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
+ if (!variant->pwrreg_ctrl_power)
+ ret = mmci_restore(adev);
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops mmci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
+ SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+};
+
static struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
@@ -1512,11 +1818,10 @@ MODULE_DEVICE_TABLE(amba, mmci_ids);
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
+ .pm = &mmci_dev_pm_ops,
},
.probe = mmci_probe,
.remove = __devexit_p(mmci_remove),
- .suspend = mmci_suspend,
- .resume = mmci_resume,
.id_table = mmci_ids,
};
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 79e4143ab9d..5a17beafd05 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -13,16 +13,6 @@
#define MCI_PWR_ON 0x03
#define MCI_OD (1 << 6)
#define MCI_ROD (1 << 7)
-/*
- * The ST Micro version does not have ROD and reuse the voltage registers
- * for direction settings
- */
-#define MCI_ST_DATA2DIREN (1 << 2)
-#define MCI_ST_CMDDIREN (1 << 3)
-#define MCI_ST_DATA0DIREN (1 << 4)
-#define MCI_ST_DATA31DIREN (1 << 5)
-#define MCI_ST_FBCLKEN (1 << 7)
-#define MCI_ST_DATA74DIREN (1 << 8)
#define MMCICLOCK 0x004
#define MCI_CLK_ENABLE (1 << 8)
@@ -70,6 +60,13 @@
#define MCI_ST_DPSM_RWMOD (1 << 10)
#define MCI_ST_DPSM_SDIOEN (1 << 11)
/* Control register extensions in the ST Micro Ux500 versions */
+/*
+ * DMA request control is required for write
+ * if transfer size is not 32 byte aligned.
+ * DMA request control is also needed if the total
+ * transfer size is 32 byte aligned but any of the
+ * sg element lengths are not aligned with 32 byte.
+ */
#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
#define MCI_ST_DPSM_BUSYMODE (1 << 14)
@@ -160,7 +157,7 @@
(MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
MCI_TXFIFOHALFEMPTYMASK)
-#define NR_SG 16
+#define NR_SG 128
struct clk;
struct variant_data;
@@ -189,7 +186,10 @@ struct mmci_host {
unsigned int mclk;
unsigned int cclk;
- u32 pwr;
+ unsigned int cclk_desired;
+ u32 pwr_reg;
+ u32 clk_reg;
+ u32 datactrl_reg;
struct mmci_platform_data *plat;
struct variant_data *variant;
diff --git a/drivers/modem/Kconfig b/drivers/modem/Kconfig
new file mode 100644
index 00000000000..be8476ed0f9
--- /dev/null
+++ b/drivers/modem/Kconfig
@@ -0,0 +1,44 @@
+config MODEM
+ bool "Modem Access Framework"
+ default y
+ help
+ Add support for Modem Access Framework. It allows different
+ platform specific drivers to register modem access mechanisms
+ and allows transparent access to modem to the client drivers.
+
+ If unsure, say N.
+
+config MODEM_U5500_MCDD
+ tristate "Modem crash dump detection driver for STE U5500 platform"
+ depends on (UX500_SOC_DB5500 && U5500_MODEM_IRQ && MODEM)
+ default y
+ help
+ Add support for Modem crash detection
+ driver for STE U5500 platform.
+ And inform userspace.
+
+ If unsure, say N.
+
+config MODEM_U8500
+ bool "Modem Access driver for STE U8500 platform"
+ depends on MODEM
+ default n
+ help
+ Add support for Modem Access driver on STE U8500 platform which
+ uses Shared Memroy as IPC mechanism between Modem processor and
+ Application processor.
+
+ If unsure, say N.
+
+source "drivers/modem/shrm/Kconfig"
+
+config MODEM_M6718
+ tristate "Modem Access driver for STE M6718 modem"
+ depends on MODEM
+ default n
+ help
+ Add support for the modem access driver for the M6718 modem.
+
+ If unsure, say N.
+
+source "drivers/modem/m6718_spi/Kconfig"
diff --git a/drivers/modem/Makefile b/drivers/modem/Makefile
new file mode 100644
index 00000000000..82921988f27
--- /dev/null
+++ b/drivers/modem/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MODEM) := modem_access.o
+obj-$(CONFIG_MODEM_U8500) += modem_u8500.o
+obj-$(CONFIG_U8500_SHRM) += shrm/
+obj-$(CONFIG_MODEM_M6718) += modem_m6718.o
+obj-$(CONFIG_MODEM_M6718_SPI) += m6718_spi/
+obj-$(CONFIG_MODEM_U5500_MCDD) += mcdd.o
diff --git a/drivers/modem/m6718_spi/Kconfig b/drivers/modem/m6718_spi/Kconfig
new file mode 100644
index 00000000000..f945d24a094
--- /dev/null
+++ b/drivers/modem/m6718_spi/Kconfig
@@ -0,0 +1,83 @@
+#
+# M6718 modem SPI IPC driver kernel configuration
+#
+config MODEM_M6718_SPI
+ tristate "M6718 modem IPC SPI driver"
+ depends on MODEM_M6718
+ default y
+ ---help---
+ If you say Y here, you will enable the M6718 modem IPC SPI driver.
+
+ If unsure, say Y.
+
+config MODEM_M6718_SPI_DEBUG
+ boolean "Modem driver debug"
+ depends on MODEM_M6718_SPI
+ default N
+ ---help---
+ If you say Y here, you will enable full debug trace from the M6718
+ modem driver. This should not be enabled by default.
+
+ If unsure, say N.
+
+config MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+ boolean "M6718 modem state driver integration"
+ depends on MODEM_M6718_SPI
+ default y
+ ---help---
+ Enables integration of the IPC driver with the modem state driver.
+ This allows the IPC driver to be notified of changes in modem state
+ (on, off, reset) and allows the IPC driver to cause modem state
+ changes if needed.
+
+ By default this should be enabled.
+
+config MODEM_M6718_SPI_ENABLE_FEATURE_FRAME_DUMP
+ boolean "IPC SPI L1 frame dump"
+ depends on MODEM_M6718_SPI
+ default n
+ ---help---
+ If you say Y here, you will enable dumping of the raw TX and RX frames
+ by the IPC driver L1.
+
+ If unsure, say N.
+
+config MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+ boolean "Modem IPC loopback support"
+ depends on MODEM_M6718_SPI
+ default y
+ ---help---
+ If you say Y here, you will enable the IPC loopback channels/devices.
+
+ If unsure, say Y.
+
+
+config MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ boolean "Verify loopback frames"
+ depends on MODEM_M6718_SPI
+ default n
+ ---help---
+ This will enabling checking of loopback frames to verify that the data
+ received is identical to the data sent.
+
+ If unsure, say N.
+
+config MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ boolean "Modem IPC throughput measurement"
+ depends on MODEM_M6718_SPI
+ default n
+ ---help---
+ If you say Y here, you will enable the IPC link throughput
+ measurement and reporting.
+
+ If unsure, say N.
+
+config MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY
+ int "Sample rate for throughput measurements (seconds)"
+ default "5"
+ depends on MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ help
+ The sample frequency for taking IPC SPI link throughput measurements.
+ Increasing the rate (reducing the time) will increase the accuracy of
+ the measurements, but will also increase the impact on link and system
+ performance.
diff --git a/drivers/modem/m6718_spi/Makefile b/drivers/modem/m6718_spi/Makefile
new file mode 100644
index 00000000000..a0a82c30b07
--- /dev/null
+++ b/drivers/modem/m6718_spi/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for M6718 SPI driver
+#
+ifeq ($(CONFIG_MODEM_M6718_SPI_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+m6718_modem_spi-objs := modem_driver.o protocol.o util.o queue.o debug.o \
+ netlink.o statemachine.o
+
+ifeq ($(CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE),y)
+m6718_modem_spi-objs += modem_state.o
+endif
+
+obj-$(CONFIG_MODEM_M6718_SPI) += m6718_modem_spi.o
diff --git a/drivers/modem/m6718_spi/debug.c b/drivers/modem/m6718_spi/debug.c
new file mode 100644
index 00000000000..522a37163c1
--- /dev/null
+++ b/drivers/modem/m6718_spi/debug.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010,2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * U9500 <-> M6718 IPC protocol implementation using SPI:
+ * debug functionality.
+ */
+#include <linux/gpio.h>
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include "modem_debug.h"
+#include "modem_private.h"
+#include "modem_util.h"
+#include "modem_queue.h"
+
+/* name of each state - must match enum ipc_sm_state_id */
+static const char * const sm_state_id_str[] = {
+ "IPC_INIT",
+ "IPC_HALT",
+ "IPC_RESET",
+ "IPC_WAIT_SLAVE_STABLE",
+ "IPC_WAIT_HANDSHAKE_INACTIVE",
+ "IPC_SLW_TX_BOOTREQ",
+ "IPC_ACT_TX_BOOTREQ",
+ "IPC_SLW_RX_BOOTRESP",
+ "IPC_ACT_RX_BOOTRESP",
+ "IPC_IDL",
+ "IPC_SLW_TX_WR_CMD",
+ "IPC_ACT_TX_WR_CMD",
+ "IPC_SLW_TX_WR_DAT",
+ "IPC_ACT_TX_WR_DAT",
+ "IPC_SLW_TX_RD_CMD",
+ "IPC_ACT_TX_RD_CMD",
+ "IPC_SLW_RX_WR_CMD",
+ "IPC_ACT_RX_WR_CMD",
+ "IPC_ACT_RX_WR_DAT",
+ "IPC_INIT_AUD",
+ "IPC_HALT_AUD",
+ "IPC_RESET_AUD",
+ "IPC_IDL_AUD",
+ "IPC_SLW_TX_WR_DAT_AUD",
+ "IPC_ACT_TX_WR_DAT_AUD",
+ "IPC_SLW_RX_WR_DAT_AUD",
+ "IPC_ACT_RX_WR_DAT_AUD",
+};
+
+/* name of each state machine run cause */
+static const char * const sm_run_cause_str[] = {
+ [IPC_SM_RUN_NONE] = "IPC_SM_RUN_NONE",
+ [IPC_SM_RUN_SLAVE_IRQ] = "IPC_SM_RUN_SLAVE_IRQ",
+ [IPC_SM_RUN_TFR_COMPLETE] = "IPC_SM_RUN_TFR_COMPLETE",
+ [IPC_SM_RUN_TX_REQ] = "IPC_SM_RUN_TX_REQ",
+ [IPC_SM_RUN_INIT] = "IPC_SM_RUN_INIT",
+ [IPC_SM_RUN_ABORT] = "IPC_SM_RUN_ABORT",
+ [IPC_SM_RUN_COMMS_TMO] = "IPC_SM_RUN_COMMS_TMO",
+ [IPC_SM_RUN_STABLE_TMO] = "IPC_SM_RUN_STABLE_TMO",
+ [IPC_SM_RUN_RESET] = "IPC_SM_RUN_RESET"
+};
+
+
+#if defined DUMP_SPI_TFRS || \
+ defined CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_FRAME_DUMP
+static const char *format_buf(const void *buffer, int len)
+{
+ static char dumpbuf[6000];
+ char *wr = dumpbuf;
+ const char *rd = buffer;
+ int maxlen = min(len, (int)(sizeof(dumpbuf) / 3));
+ int i;
+
+ for (i = 0 ; i < maxlen ; i++) {
+ sprintf(wr, "%02x ", rd[i]);
+ wr += 3;
+ }
+ return dumpbuf;
+}
+#endif
+
+void ipc_dbg_dump_frame(struct device *dev, int linkid,
+ struct ipc_tx_queue *frame, bool tx)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_FRAME_DUMP
+ if (frame->actual_len == 0)
+ return;
+
+ /*
+ * Use printk(KERN_DEBUG... directly to ensure these are printed even
+ * when DEBUG is not defined for this device - we want to be able to
+ * dump the frames independently from the debug logging.
+ */
+ printk(KERN_DEBUG "IPC link%d %s %3d %4d bytes:%s\n",
+ linkid, (tx ? "TX" : "RX"), frame->counter, frame->len,
+ format_buf(frame->data, frame->len));
+#endif
+}
+
+void ipc_dbg_dump_spi_tfr(struct ipc_link_context *context)
+{
+#ifdef DUMP_SPI_TFRS
+ struct spi_transfer *tfr = &context->spi_transfer;
+ struct spi_message *msg = &context->spi_message;
+
+ if (tfr->tx_buf != NULL)
+ dev_info(&context->sdev->dev, "link%d TX %4d bytes:%s\n",
+ context->link->id, msg->actual_length,
+ format_buf(tfr->tx_buf, msg->actual_length));
+
+ if (tfr->rx_buf != NULL)
+ dev_info(&context->sdev->dev, "link%d RX %4d bytes:%s\n",
+ context->link->id, msg->actual_length,
+ format_buf(tfr->rx_buf, msg->actual_length));
+#endif
+}
+
+const char *ipc_dbg_state_id(const struct ipc_sm_state *state)
+{
+ if (state == NULL)
+ return "(unknown)";
+ else
+ return sm_state_id_str[state->id];
+}
+
+const char *ipc_dbg_event(u8 event)
+{
+ return sm_run_cause_str[event];
+}
+
+char *ipc_dbg_link_state_str(struct ipc_link_context *context)
+{
+ char *statestr;
+ int ss_pin;
+ int int_pin;
+ int min_free_pc;
+
+ if (context == NULL)
+ return NULL;
+
+ statestr = kmalloc(500, GFP_ATOMIC);
+ if (statestr == NULL)
+ return NULL;
+
+ ss_pin = gpio_get_value(context->link->gpio.ss_pin);
+ int_pin = gpio_get_value(context->link->gpio.int_pin);
+ min_free_pc = context->tx_q_min > 0 ?
+ (context->tx_q_min * 100) / IPC_TX_QUEUE_MAX_SIZE :
+ 0;
+
+ sprintf(statestr,
+ "state=%s (for %lus)\n"
+ "ss=%s(%d)\n"
+ "int=%s(%d)\n"
+ "lastevent=%s\n"
+ "lastignored=%s in %s (ignoredinthis=%d)\n"
+ "tx_q_min=%d(%d%%)\n"
+ "tx_q_count=%d\n"
+ "lastcmd=0x%08x (type %d count %d len %d)\n",
+ sm_state_id_str[context->state->id],
+ (jiffies - context->statesince) / HZ,
+ ss_pin == ipc_util_ss_level_active(context) ?
+ "ACTIVE" : "INACTIVE",
+ ss_pin,
+ int_pin == ipc_util_int_level_active(context) ?
+ "ACTIVE" : "INACTIVE",
+ int_pin,
+ sm_run_cause_str[context->lastevent],
+ sm_run_cause_str[context->lastignored],
+ sm_state_id_str[context->lastignored_in],
+ context->lastignored_inthis,
+ context->tx_q_min,
+ min_free_pc,
+ atomic_read(&context->tx_q_count),
+ context->cmd,
+ ipc_util_get_l1_cmd(context->cmd),
+ ipc_util_get_l1_counter(context->cmd),
+ ipc_util_get_l1_length(context->cmd));
+ return statestr;
+}
+
+void ipc_dbg_verify_rx_frame(struct ipc_link_context *context)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ int i;
+ u8 *last;
+ u8 *curr;
+ bool good = true;
+
+ if (context->last_frame == NULL)
+ return;
+
+ if (context->last_frame->actual_len != context->frame->actual_len) {
+ dev_err(&context->sdev->dev,
+ "link %d error: loopback frame length error, "
+ "TX %d RX %d\n",
+ context->link->id,
+ context->last_frame->actual_len,
+ context->frame->actual_len);
+ good = false;
+ goto out;
+ }
+
+ last = (u8 *)context->last_frame->data;
+ curr = (u8 *)context->frame->data;
+
+ /* skip any padding bytes */
+ for (i = 0; i < context->last_frame->actual_len; i++) {
+ if (last[i] != curr[i]) {
+ dev_err(&context->sdev->dev,
+ "link %d bad byte %05d: "
+ "TX %02x RX %02x\n",
+ context->link->id,
+ i,
+ last[i],
+ curr[i]);
+ good = false;
+ }
+ }
+
+out:
+ if (!good)
+ dev_info(&context->sdev->dev,
+ "link %d error: loopback frame verification failed!\n",
+ context->link->id);
+
+ ipc_queue_delete_frame(context->last_frame);
+ context->last_frame = NULL;
+#endif
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_linkstate_open(struct inode *inode, struct file *file);
+static int debugfs_linkstate_show(struct seq_file *s, void *data);
+
+static int debugfs_msr_open(struct inode *inode, struct file *file);
+static int debugfs_msr_show(struct seq_file *s, void *data);
+static ssize_t debugfs_msr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations debugfs_fops = {
+ .open = debugfs_linkstate_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations debugfs_msr_fops = {
+ .open = debugfs_msr_open,
+ .read = seq_read,
+ .write = debugfs_msr_write,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int debugfs_linkstate_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_linkstate_show, inode->i_private);
+}
+
+static int debugfs_linkstate_show(struct seq_file *s, void *data)
+{
+ struct ipc_link_context *context = s->private;
+ char *statestr;
+
+ if (context == NULL) {
+ seq_printf(s, "invalid context\n");
+ return 0;
+ }
+
+ statestr = ipc_dbg_link_state_str(context);
+ if (statestr == NULL) {
+ seq_printf(s, "unable to get link state string\n");
+ return 0;
+ }
+
+ seq_printf(s, "%s:\n%s", context->link->name, statestr);
+ kfree(statestr);
+ return 0;
+}
+
+static int debugfs_msr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_msr_show, inode->i_private);
+}
+
+static int debugfs_msr_show(struct seq_file *s, void *data)
+{
+ struct ipc_l1_context *context = s->private;
+
+ if (context == NULL) {
+ seq_printf(s, "invalid context\n");
+ return 0;
+ }
+
+ seq_printf(s, "msr %s\n",
+ context->msr_disable ? "disabled" : "enabled");
+ return 0;
+}
+
+static ssize_t debugfs_msr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int buf_size;
+
+ /* get user space string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = 0;
+
+ if (buf[0] == '0' || buf[0] == 'd') {
+ pr_info("disabling msr\n");
+ l1_context.msr_disable = true;
+ } else if (buf[0] == '1' || buf[0] == 'e') {
+ pr_info("enabling msr\n");
+ l1_context.msr_disable = false;
+ } else {
+ pr_info("unknown request\n");
+ }
+
+ return buf_size;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+void ipc_dbg_debugfs_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ /* create debugfs directory entry for ipc in debugfs root */
+ l1_context.debugfsdir = debugfs_create_dir("modemipc", NULL);
+ l1_context.debugfs_silentreset =
+ debugfs_create_file("msrenable", S_IRUSR | S_IWUSR,
+ l1_context.debugfsdir, &l1_context, &debugfs_msr_fops);
+ if (l1_context.debugfs_silentreset == NULL)
+ pr_err("failed to create debugfs MSR control file\n");
+#endif
+}
+
+void ipc_dbg_debugfs_link_init(struct ipc_link_context *context)
+{
+#ifdef CONFIG_DEBUG_FS
+ context->debugfsfile = NULL;
+ context->lastevent = IPC_SM_RUN_NONE;
+ context->lastignored = IPC_SM_RUN_NONE;
+ context->lastignored_in = IPC_SM_IDL;
+ context->lastignored_inthis = false;
+ context->tx_q_min = IPC_TX_QUEUE_MAX_SIZE;
+ context->statesince = 0;
+
+ if (l1_context.debugfsdir != NULL) {
+ context->debugfsfile =
+ debugfs_create_file(context->link->name, S_IRUGO,
+ l1_context.debugfsdir, context, &debugfs_fops);
+ if (context->debugfsfile == NULL)
+ dev_err(&context->sdev->dev,
+ "link %d: failed to create debugfs file %s\n",
+ context->link->id,
+ context->link->name);
+ }
+#endif
+}
+
+void ipc_dbg_ignoring_event(struct ipc_link_context *context, u8 event)
+{
+#ifdef CONFIG_DEBUG_FS
+ context->lastignored = event;
+ context->lastignored_in = context->state->id;
+ context->lastignored_inthis = true;
+#endif
+}
+
+void ipc_dbg_handling_event(struct ipc_link_context *context, u8 event)
+{
+#ifdef CONFIG_DEBUG_FS
+ context->lastevent = event;
+ context->lastignored_inthis = false;
+#endif
+}
+
+void ipc_dbg_entering_state(struct ipc_link_context *context)
+{
+#ifdef CONFIG_DEBUG_FS
+ context->statesince = jiffies;
+#endif
+}
+
+void ipc_dbg_enter_idle(struct ipc_link_context *context)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ context->idl_idle_enter = jiffies;
+#endif
+}
+
+void ipc_dbg_exit_idle(struct ipc_link_context *context)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ context->idl_idle_total += jiffies - context->idl_idle_enter;
+#endif
+}
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+static int measure_usage(struct ipc_link_context *context)
+{
+ unsigned long now = jiffies;
+ unsigned long idle;
+ unsigned long total;
+
+ if (ipc_util_link_is_idle(context))
+ ipc_dbg_exit_idle(context);
+
+ idle = context->idl_idle_total;
+ total = now - context->idl_measured_at;
+
+ context->idl_measured_at = now;
+ context->idl_idle_total = 0;
+ if (ipc_util_link_is_idle(context))
+ context->idl_idle_enter = now;
+
+ return 100 - ((idle * 100) / total);
+}
+#endif
+
+void ipc_dbg_measure_throughput(unsigned long unused)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ u32 tx_bps_0, tx_bps_1;
+ u32 rx_bps_0, rx_bps_1;
+ int pc0, pc1;
+
+ tx_bps_0 = tx_bps_1 = 0;
+ rx_bps_0 = rx_bps_1 = 0;
+
+ /* link0 */
+ tx_bps_0 = (l1_context.device_context[0].tx_bytes * 8) /
+ CONFIG_MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY;
+ rx_bps_0 = (l1_context.device_context[0].rx_bytes * 8) /
+ CONFIG_MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY;
+ l1_context.device_context[0].tx_bytes = 0;
+ l1_context.device_context[0].rx_bytes = 0;
+ pc0 = measure_usage(&l1_context.device_context[0]);
+#if IPC_NBR_SUPPORTED_SPI_LINKS > 0
+ /* link1 */
+ tx_bps_1 = (l1_context.device_context[1].tx_bytes * 8) /
+ CONFIG_MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY;
+ rx_bps_1 = (l1_context.device_context[1].rx_bytes * 8) /
+ CONFIG_MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY;
+ l1_context.device_context[1].tx_bytes = 0;
+ l1_context.device_context[1].rx_bytes = 0;
+ pc1 = measure_usage(&l1_context.device_context[1]);
+#endif
+
+ pr_info("IPC THROUGHPUT (bit/s): "
+ "link0 TX:%8d RX:%8d %3d%% "
+ "link1 TX:%8d RX:%8d %3d%%\n",
+ tx_bps_0, rx_bps_0, pc0,
+ tx_bps_1, rx_bps_1, pc1);
+
+ /* restart the measurement timer */
+ l1_context.tp_timer.expires = jiffies +
+ (CONFIG_MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY * HZ);
+ add_timer(&l1_context.tp_timer);
+#endif
+}
+
+void ipc_dbg_throughput_init(void)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ pr_info("M6718 IPC throughput measurement interval: %d\n",
+ CONFIG_MODEM_M6718_SPI_SET_THROUGHPUT_FREQUENCY);
+ /* init the throughput measurement timer */
+ init_timer(&l1_context.tp_timer);
+ l1_context.tp_timer.function = ipc_dbg_measure_throughput;
+ l1_context.tp_timer.data = 0;
+#endif
+}
+
+void ipc_dbg_throughput_link_init(struct ipc_link_context *context)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ context->tx_bytes = 0;
+ context->rx_bytes = 0;
+ context->idl_measured_at = jiffies;
+ context->idl_idle_enter = 0;
+ context->idl_idle_total = 0;
+#endif
+}
+
diff --git a/drivers/modem/m6718_spi/modem_debug.h b/drivers/modem/m6718_spi/modem_debug.h
new file mode 100644
index 00000000000..9a2fa39acb4
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_debug.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header:
+ * debug functionality.
+ */
+#ifndef _MODEM_DEBUG_H_
+#define _MODEM_DEBUG_H_
+
+#include "modem_private.h"
+
+void ipc_dbg_dump_frame(struct device *dev, int linkid,
+ struct ipc_tx_queue *frame, bool tx);
+void ipc_dbg_dump_spi_tfr(struct ipc_link_context *context);
+const char *ipc_dbg_state_id(const struct ipc_sm_state *state);
+const char *ipc_dbg_event(u8 event);
+char *ipc_dbg_link_state_str(struct ipc_link_context *context);
+void ipc_dbg_verify_rx_frame(struct ipc_link_context *context);
+
+void ipc_dbg_debugfs_init(void);
+void ipc_dbg_debugfs_link_init(struct ipc_link_context *context);
+
+void ipc_dbg_ignoring_event(struct ipc_link_context *context, u8 event);
+void ipc_dbg_handling_event(struct ipc_link_context *context, u8 event);
+void ipc_dbg_entering_state(struct ipc_link_context *context);
+void ipc_dbg_enter_idle(struct ipc_link_context *context);
+void ipc_dbg_exit_idle(struct ipc_link_context *context);
+void ipc_dbg_measure_throughput(unsigned long unused);
+void ipc_dbg_throughput_init(void);
+void ipc_dbg_throughput_link_init(struct ipc_link_context *context);
+
+#endif /* _MODEM_DEBUG_H_ */
diff --git a/drivers/modem/m6718_spi/modem_driver.c b/drivers/modem/m6718_spi/modem_driver.c
new file mode 100644
index 00000000000..8086e97aa7c
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_driver.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on modem_shrm_driver.c
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * SPI driver implementing the M6718 inter-processor communication protocol.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/modem/modem_client.h>
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include <linux/modem/m6718_spi/modem_net.h>
+#include <linux/modem/m6718_spi/modem_char.h>
+#include "modem_protocol.h"
+
+#ifdef CONFIG_PHONET
+static void phonet_rcv_tasklet_func(unsigned long);
+static struct tasklet_struct phonet_rcv_tasklet;
+#endif
+
+static struct modem_spi_dev modem_driver_data = {
+ .dev = NULL,
+ .ndev = NULL,
+ .modem = NULL,
+ .isa_context = NULL,
+ .netdev_flag_up = 0
+};
+
+/**
+ * modem_m6718_spi_receive() - Receive a frame from L1 physical layer
+ * @sdev: pointer to spi device structure
+ * @channel: L2 mux channel id
+ * @len: frame data length
+ * @data: pointer to frame data
+ *
+ * This function is called from the driver L1 physical transport layer. It
+ * copies the frame data to the receive queue for the channel on which the data
+ * was received.
+ *
+ * Special handling is given to slave-loopback channels where the data is simply
+ * sent back to the modem on the same channel.
+ *
+ * Special handling is given to the ISI channel when PHONET is enabled - the
+ * phonet tasklet is scheduled in order to pump the received data through the
+ * net device interface.
+ */
+int modem_m6718_spi_receive(struct spi_device *sdev, u8 channel,
+ u32 len, void *data)
+{
+ u32 size = 0;
+ int ret = 0;
+ int idx;
+ u8 *psrc;
+ u32 writeptr;
+ struct message_queue *q;
+ struct isa_device_context *isadev;
+
+ dev_dbg(&sdev->dev, "L2 received frame from L1: channel %d len %d\n",
+ channel, len);
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+ if (channel == MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0 ||
+ channel == MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1) {
+ /* data received on slave loopback channel - loop it back */
+ modem_m6718_spi_send(&modem_driver_data, channel, len, data);
+ return 0;
+ }
+#endif
+
+ /* find the isa device index for this L2 channel */
+ idx = modem_get_cdev_index(channel);
+ if (idx < 0) {
+ dev_err(&sdev->dev, "failed to get isa device index\n");
+ return idx;
+ }
+ isadev = &modem_driver_data.isa_context->isadev[idx];
+ q = &isadev->dl_queue;
+
+ spin_lock(&q->update_lock);
+
+ /* verify message can be contained in buffer */
+ writeptr = q->writeptr;
+ ret = modem_isa_queue_msg(q, len);
+ if (ret >= 0) {
+ /* memcopy RX data */
+ if ((writeptr + len) >= q->size) {
+ psrc = (u8 *)data;
+ size = q->size - writeptr;
+ /* copy first part of msg */
+ memcpy((q->fifo_base + writeptr), psrc, size);
+ psrc += size;
+ /* copy second part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (len - size));
+ } else {
+ memcpy((q->fifo_base + writeptr), data, len);
+ }
+ }
+ spin_unlock(&q->update_lock);
+
+ if (ret < 0) {
+ dev_err(&sdev->dev, "failed to queue frame!");
+ return ret;
+ }
+
+#ifdef CONFIG_PHONET
+ if (channel == MODEM_M6718_SPI_CHN_ISI &&
+ modem_driver_data.netdev_flag_up)
+ tasklet_schedule(&phonet_rcv_tasklet);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL_GPL(modem_m6718_spi_receive);
+
+static void phonet_rcv_tasklet_func(unsigned long unused)
+{
+ ssize_t result;
+
+ dev_dbg(modem_driver_data.dev, "receiving frames for phonet\n");
+ /* continue receiving while there are frames in the queue */
+ for (;;) {
+ result = modem_net_receive(modem_driver_data.ndev);
+ if (result == 0) {
+ dev_dbg(modem_driver_data.dev,
+ "queue is empty, finished receiving\n");
+ break;
+ }
+ if (result < 0) {
+ dev_err(modem_driver_data.dev,
+ "failed to receive frame from queue!\n");
+ break;
+ }
+ }
+}
+
+static int spi_probe(struct spi_device *sdev)
+{
+ int result = 0;
+
+ spi_set_drvdata(sdev, &modem_driver_data);
+
+ if (modem_protocol_probe(sdev) != 0) {
+ dev_err(&sdev->dev,
+ "failed to initialise link protocol\n");
+ result = -ENODEV;
+ goto rollback;
+ }
+
+ /*
+ * Since we can have multiple spi links for the same modem, only
+ * initialise the modem data and char/net interfaces once.
+ */
+ if (modem_driver_data.dev == NULL) {
+ modem_driver_data.dev = &sdev->dev;
+ modem_driver_data.modem =
+ modem_get(modem_driver_data.dev, "m6718");
+ if (modem_driver_data.modem == NULL) {
+ dev_err(&sdev->dev,
+ "failed to retrieve modem description\n");
+ result = -ENODEV;
+ goto rollback_protocol_init;
+ }
+
+ result = modem_isa_init(&modem_driver_data);
+ if (result < 0) {
+ dev_err(&sdev->dev,
+ "failed to initialise char interface\n");
+ goto rollback_modem_get;
+ }
+
+ result = modem_net_init(&modem_driver_data);
+ if (result < 0) {
+ dev_err(&sdev->dev,
+ "failed to initialse net interface\n");
+ goto rollback_isa_init;
+ }
+
+#ifdef CONFIG_PHONET
+ tasklet_init(&phonet_rcv_tasklet, phonet_rcv_tasklet_func, 0);
+#endif
+ }
+ return result;
+
+rollback_isa_init:
+ modem_isa_exit(&modem_driver_data);
+rollback_modem_get:
+ modem_put(modem_driver_data.modem);
+rollback_protocol_init:
+ modem_protocol_exit();
+rollback:
+ return result;
+}
+
+static int __exit spi_remove(struct spi_device *sdev)
+{
+ modem_protocol_exit();
+ modem_net_exit(&modem_driver_data);
+ modem_isa_exit(&modem_driver_data);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * spi_suspend() - This routine puts the IPC driver in to suspend state.
+ * @sdev: pointer to spi device structure.
+ * @mesg: pm operation
+ *
+ * This routine checks the current ongoing communication with modem
+ * and prevents suspend if modem communication is on-going.
+ */
+static int spi_suspend(struct spi_device *sdev, pm_message_t mesg)
+{
+ bool busy;
+ int ret = -EBUSY;
+
+ dev_dbg(&sdev->dev, "suspend called\n");
+ busy = modem_protocol_is_busy(sdev);
+ if (busy) {
+ dev_warn(&sdev->dev, "suspend failed (protocol busy)\n");
+ return -EBUSY;
+ }
+ ret = modem_protocol_suspend(sdev);
+ if (ret) {
+ dev_warn(&sdev->dev, "suspend failed, (protocol suspend))\n");
+ return ret;
+ }
+ ret = modem_net_suspend(modem_driver_data.ndev);
+ if (ret) {
+ dev_warn(&sdev->dev, "suspend failed, (netdev suspend)\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * spi_resume() - This routine resumes the IPC driver from suspend state.
+ * @sdev: pointer to spi device structure
+ */
+static int spi_resume(struct spi_device *sdev)
+{
+ int ret;
+
+ dev_dbg(&sdev->dev, "resume called\n");
+ ret = modem_protocol_resume(sdev);
+ if (ret) {
+ dev_warn(&sdev->dev, "resume failed, (protocol resume))\n");
+ return ret;
+ }
+ ret = modem_net_resume(modem_driver_data.ndev);
+ if (ret) {
+ dev_warn(&sdev->dev, "resume failed, (netdev resume))\n");
+ return ret;
+ }
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct spi_driver spi_driver = {
+ .driver = {
+ .name = "spimodem",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE
+ },
+ .probe = spi_probe,
+ .remove = __exit_p(spi_remove),
+#ifdef CONFIG_PM
+ .suspend = spi_suspend,
+ .resume = spi_resume,
+#endif
+};
+
+static int __init m6718_spi_driver_init(void)
+{
+ pr_info("M6718 modem driver initialising\n");
+ modem_protocol_init();
+ return spi_register_driver(&spi_driver);
+}
+module_init(m6718_spi_driver_init);
+
+static void __exit m6718_spi_driver_exit(void)
+{
+ pr_debug("M6718 modem SPI IPC driver exit\n");
+ spi_unregister_driver(&spi_driver);
+}
+module_exit(m6718_spi_driver_exit);
+
+MODULE_AUTHOR("Chris Blair <chris.blair@stericsson.com>");
+MODULE_DESCRIPTION("M6718 modem IPC SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/modem/m6718_spi/modem_netlink.h b/drivers/modem/m6718_spi/modem_netlink.h
new file mode 100644
index 00000000000..19e123d9b12
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_netlink.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header:
+ * netlink related functionality.
+ */
+#ifndef _MODEM_NETLINK_H_
+#define _MODEM_NETLINK_H_
+
+#include "modem_protocol.h"
+
+bool ipc_create_netlink_socket(struct ipc_link_context *context);
+void ipc_broadcast_modem_online(struct ipc_link_context *context);
+void ipc_broadcast_modem_reset(struct ipc_link_context *context);
+
+#endif /* _MODEM_NETLINK_H_ */
diff --git a/drivers/modem/m6718_spi/modem_private.h b/drivers/modem/m6718_spi/modem_private.h
new file mode 100644
index 00000000000..10e651c01ea
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_private.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_driver.h
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header:
+ * private data
+ */
+#ifndef _MODEM_PRIVATE_H_
+#define _MODEM_PRIVATE_H_
+
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/atomic.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include "modem_protocol.h"
+#include "modem_statemachine.h"
+
+#define IPC_DRIVER_VERSION (0x03) /* APE protocol version */
+#define IPC_DRIVER_MODEM_MIN_VER (0x03) /* version required from modem */
+
+#define IPC_NBR_SUPPORTED_SPI_LINKS (2)
+#define IPC_LINK_COMMON (0)
+#define IPC_LINK_AUDIO (1)
+
+#define IPC_TX_QUEUE_MAX_SIZE (1024*1024)
+
+#define IPC_L1_HDR_SIZE (4)
+#define IPC_L2_HDR_SIZE (4)
+
+/* tx queue item (frame) */
+struct ipc_tx_queue {
+ struct list_head node;
+ int actual_len;
+ int len;
+ void *data;
+ int counter;
+};
+
+/* context structure for an spi link */
+struct ipc_link_context {
+ struct modem_m6718_spi_link_platform_data *link;
+ struct spi_device *sdev;
+ atomic_t suspended;
+ atomic_t gpio_configured;
+ atomic_t state_int;
+ spinlock_t sm_lock;
+ spinlock_t tx_q_update_lock;
+ atomic_t tx_q_count;
+ int tx_q_free;
+ struct list_head tx_q;
+ int tx_frame_counter;
+ const struct ipc_sm_state *state;
+ u32 cmd;
+ struct ipc_tx_queue *frame;
+ struct spi_message spi_message;
+ struct spi_transfer spi_transfer;
+ struct timer_list comms_timer;
+ struct timer_list slave_stable_timer;
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ struct ipc_tx_queue *last_frame;
+#endif
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ u32 tx_bytes;
+ u32 rx_bytes;
+ unsigned long idl_measured_at;
+ unsigned long idl_idle_enter;
+ unsigned long idl_idle_total;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfsfile;
+ u8 lastevent;
+ u8 lastignored;
+ enum ipc_sm_state_id lastignored_in;
+ bool lastignored_inthis;
+ int tx_q_min;
+ unsigned long statesince;
+#endif
+};
+
+/* context structure for the spi driver */
+struct ipc_l1_context {
+ bool init_done;
+ atomic_t boot_sync_done;
+ struct ipc_link_context device_context[IPC_NBR_SUPPORTED_SPI_LINKS];
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ struct timer_list tp_timer;
+#endif
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfsdir;
+ struct dentry *debugfs_silentreset;
+ bool msr_disable;
+#endif
+};
+
+extern struct ipc_l1_context l1_context;
+
+#endif /* _MODEM_PRIVATE_H_ */
diff --git a/drivers/modem/m6718_spi/modem_protocol.h b/drivers/modem/m6718_spi/modem_protocol.h
new file mode 100644
index 00000000000..751dcba1087
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_protocol.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_driver.h
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header.
+ */
+#ifndef _MODEM_PROTOCOL_H_
+#define _MODEM_PROTOCOL_H_
+
+#include <linux/spi/spi.h>
+
+void modem_protocol_init(void);
+int modem_protocol_probe(struct spi_device *sdev);
+void modem_protocol_exit(void);
+bool modem_protocol_is_busy(struct spi_device *sdev);
+bool modem_protocol_channel_is_open(u8 channel);
+int modem_protocol_suspend(struct spi_device *sdev);
+int modem_protocol_resume(struct spi_device *sdev);
+
+#endif /* _MODEM_PROTOCOL_H_ */
diff --git a/drivers/modem/m6718_spi/modem_queue.h b/drivers/modem/m6718_spi/modem_queue.h
new file mode 100644
index 00000000000..62604129945
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_queue.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header:
+ * queue functionality.
+ */
+#ifndef _MODEM_QUEUE_H_
+#define _MODEM_QUEUE_H_
+
+void ipc_queue_init(struct ipc_link_context *context);
+void ipc_queue_delete_frame(struct ipc_tx_queue *frame);
+struct ipc_tx_queue *ipc_queue_new_frame(struct ipc_link_context *link_context,
+ u32 l2_length);
+bool ipc_queue_is_empty(struct ipc_link_context *context);
+int ipc_queue_push_frame(struct ipc_link_context *link_context, u8 l2_header,
+ u32 l2_length, void *l2_data);
+struct ipc_tx_queue *ipc_queue_get_frame(struct ipc_link_context *context);
+void ipc_queue_reset(struct ipc_link_context *context);
+
+#endif /* _MODEM_QUEUE_H_ */
diff --git a/drivers/modem/m6718_spi/modem_state.c b/drivers/modem/m6718_spi/modem_state.c
new file mode 100644
index 00000000000..47376934bcb
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_state.c
@@ -0,0 +1,1300 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Derek Morton <derek.morton@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Power state driver for M6718 MODEM
+ */
+
+/* define DEBUG to enable debug logging */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/timer.h>
+#include <linux/gpio/nomadik.h>
+#include <plat/pincfg.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include "modem_state.h"
+
+/*
+ * To enable this driver add a struct platform_device in the board
+ * configuration file (e.g. board-*.c) with name="modemstate"
+ * optionally specify dev.initname="m6718" to define the driver
+ * name as it will appear in the file system.
+ * e.g.
+ * static struct platform_device modem_state_device =
+ * {
+ * .name = "modemstate",
+ * .dev =
+ * {
+ * .init_name = "m6718" // Name that will appear in FS
+ * },
+ * .num_resources = ARRAY_SIZE(modem_state_resources),
+ * .resource = modem_state_resources
+ * };
+ *
+ * This driver uses gpio pins which should be specified as resources *
+ * e.g.
+ * static struct resource modem_state_resources[] = .......
+ * Output pins are specified as IORESOURCE_IO
+ * Currently supported Output pins are:
+ * onkey_pin
+ * reset_pin
+ * vbat_pin
+ * Input pins are specified as IORESOURCE_IRQ
+ * Currently supported input pins are:
+ * rsthc_pin
+ * rstext_pin
+ * crash_pin
+ * Currently only the start value is used as the gpio pin number but
+ * end should also be specified as the gpio pin number in case gpio ranges
+ * are used in the future.
+ * e.g. if gpio 161 is used as the onkey pin
+ * {
+ * .start = 161,
+ * .end = 161,
+ * .name = "onkey_pin",
+ * .flags = IORESOURCE_IO,
+ * },
+ */
+
+struct modem_state_dev {
+ int onkey_pin;
+ int rsthc_pin;
+ int rstext_pin;
+ int crash_pin;
+ int reset_pin;
+ int vbat_pin;
+ int power_state;
+ int irq_state;
+ int busy;
+ struct timer_list onkey_timer;
+ struct timer_list reset_timer;
+ struct timer_list onkey_debounce_timer;
+ struct timer_list vbat_off_timer;
+ struct timer_list busy_timer;
+ spinlock_t lock;
+ struct device *dev;
+ struct workqueue_struct *workqueue;
+ struct work_struct wq_rsthc;
+ struct work_struct wq_rstext;
+ struct work_struct wq_crash;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfsdir;
+ struct dentry *debugfs_debug;
+#endif
+};
+
+struct callback_list {
+ struct list_head node;
+ int (*callback) (unsigned long);
+ unsigned long data;
+};
+LIST_HEAD(callback_list);
+
+static char *modem_state_str[] = {
+ "off",
+ "reset",
+ "crash",
+ "on",
+ /*
+ * Add new states before error and update enum modem_states
+ * in modem_state.h
+ */
+ "error"
+};
+
+static struct modem_state_dev *modem_state;
+
+static void set_on_config(struct modem_state_dev *msdev)
+{
+ if (msdev->crash_pin)
+ nmk_config_pin(PIN_CFG(msdev->crash_pin, GPIO) |
+ PIN_INPUT_PULLDOWN, false);
+ if (msdev->rstext_pin)
+ nmk_config_pin(PIN_CFG(msdev->rstext_pin, GPIO) |
+ PIN_INPUT_PULLDOWN, false);
+ if (msdev->rsthc_pin)
+ nmk_config_pin(PIN_CFG(msdev->rsthc_pin, GPIO) |
+ PIN_INPUT_PULLDOWN, false);
+ if (msdev->reset_pin)
+ nmk_config_pin(PIN_CFG(msdev->reset_pin, GPIO) |
+ PIN_OUTPUT_HIGH, false);
+}
+
+static void set_off_config(struct modem_state_dev *msdev)
+{
+ if (msdev->crash_pin)
+ nmk_config_pin(PIN_CFG(msdev->crash_pin, GPIO) |
+ PIN_INPUT_PULLDOWN, false);
+ if (msdev->rstext_pin)
+ nmk_config_pin(PIN_CFG(msdev->rstext_pin, GPIO) |
+ PIN_OUTPUT_LOW, false);
+ if (msdev->rsthc_pin)
+ nmk_config_pin(PIN_CFG(msdev->rsthc_pin, GPIO) | PIN_OUTPUT_LOW,
+ false);
+ if (msdev->reset_pin)
+ nmk_config_pin(PIN_CFG(msdev->reset_pin, GPIO) |
+ PIN_OUTPUT_HIGH, false);
+}
+
+static void enable_irq_all(struct modem_state_dev *msdev)
+{
+ if (msdev->rsthc_pin) {
+ enable_irq(GPIO_TO_IRQ(msdev->rsthc_pin));
+ if ((0 > enable_irq_wake(GPIO_TO_IRQ(msdev->rsthc_pin))))
+ dev_err(msdev->dev,
+ "Request for wake on pin %d failed\n",
+ msdev->rsthc_pin);
+ }
+ if (msdev->rstext_pin) {
+ enable_irq(GPIO_TO_IRQ(msdev->rstext_pin));
+ if ((0 > enable_irq_wake(GPIO_TO_IRQ(msdev->rstext_pin))))
+ dev_err(msdev->dev,
+ "Request for wake on pin %d failed\n",
+ msdev->rstext_pin);
+ }
+ if (msdev->crash_pin) {
+ enable_irq(GPIO_TO_IRQ(msdev->crash_pin));
+ if ((0 > enable_irq_wake(GPIO_TO_IRQ(msdev->crash_pin))))
+ dev_err(msdev->dev,
+ "Request for wake on pin %d failed\n",
+ msdev->crash_pin);
+ }
+}
+
+static void disable_irq_all(struct modem_state_dev *msdev)
+{
+ if (msdev->rsthc_pin) {
+ disable_irq_wake(GPIO_TO_IRQ(msdev->rsthc_pin));
+ disable_irq(GPIO_TO_IRQ(msdev->rsthc_pin));
+ }
+ if (msdev->rstext_pin) {
+ disable_irq_wake(GPIO_TO_IRQ(msdev->rstext_pin));
+ disable_irq(GPIO_TO_IRQ(msdev->rstext_pin));
+ }
+ if (msdev->crash_pin) {
+ disable_irq_wake(GPIO_TO_IRQ(msdev->crash_pin));
+ disable_irq(GPIO_TO_IRQ(msdev->crash_pin));
+ }
+}
+
+/*
+ * These functions which access GPIO must only be called
+ * with spinlock enabled.
+ */
+
+/*
+ * Toggle ONKEY pin high then low to turn modem on or off. Modem expects
+ * ONKEY line to be pulled low then high. GPIO needs to be driven high then
+ * low as logic is inverted through a transistor.
+ */
+static void toggle_modem_power(struct modem_state_dev *msdev)
+{
+ dev_info(msdev->dev, "Modem power toggle\n");
+ msdev->busy = 1;
+ gpio_set_value(msdev->onkey_pin, 1);
+ msdev->onkey_timer.data = (unsigned long)msdev;
+ /* Timeout of at least 1 second */
+ mod_timer(&msdev->onkey_timer, jiffies + (1 * HZ) + 1);
+}
+
+/* Modem is forced into reset when its reset line is pulled low */
+/* Drive GPIO low then high to reset modem */
+static void modem_reset(struct modem_state_dev *msdev)
+{
+ dev_info(msdev->dev, "Modem reset\n");
+ msdev->busy = 1;
+ gpio_set_value(msdev->reset_pin, 0);
+ msdev->reset_timer.data = (unsigned long)msdev;
+ /* Wait a couple of Jiffies */
+ mod_timer(&msdev->reset_timer, jiffies + 2);
+}
+
+static void modem_vbat_set_value(struct modem_state_dev *msdev, int vbat_val)
+{
+ switch (vbat_val) {
+ case 0:
+ msdev->power_state = 0;
+ dev_info(msdev->dev, "Modem vbat off\n");
+ gpio_set_value(msdev->vbat_pin, vbat_val);
+ if (1 == msdev->irq_state) {
+ msdev->irq_state = 0;
+ disable_irq_all(msdev);
+ set_off_config(msdev);
+ }
+ break;
+ case 1:
+ dev_info(msdev->dev, "Modem vbat on\n");
+ if (0 == msdev->irq_state) {
+ msdev->irq_state = 1;
+ set_on_config(msdev);
+ enable_irq_all(msdev);
+ }
+ gpio_set_value(msdev->vbat_pin, vbat_val);
+ break;
+ default:
+ return;
+ break;
+ }
+}
+
+static void modem_power_on(struct modem_state_dev *msdev)
+{
+ int rsthc = gpio_get_value(msdev->rsthc_pin);
+ msdev->power_state = 1;
+ del_timer(&msdev->vbat_off_timer);
+ if (rsthc == 0) {
+ modem_vbat_set_value(msdev, 1);
+ toggle_modem_power(msdev);
+ }
+}
+
+static void modem_power_off(struct modem_state_dev *msdev)
+{
+ int rsthc = gpio_get_value(msdev->rsthc_pin);
+
+ msdev->power_state = 0;
+ if (rsthc == 1) {
+ toggle_modem_power(msdev);
+ /* Cut power to modem after 10 seconds */
+ msdev->vbat_off_timer.data = (unsigned long)msdev;
+ mod_timer(&msdev->vbat_off_timer, jiffies + (10 * HZ));
+ }
+}
+/* End of functions requiring spinlock */
+
+static void call_callbacks(void)
+{
+ struct callback_list *item;
+
+ list_for_each_entry(item, &callback_list, node)
+ item->callback(item->data);
+}
+
+static int get_modem_state(struct modem_state_dev *msdev)
+{
+ int state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (0 == gpio_get_value(msdev->rsthc_pin))
+ state = MODEM_STATE_OFF;
+ else if (0 == gpio_get_value(msdev->rstext_pin))
+ state = MODEM_STATE_RESET;
+ else if (1 == gpio_get_value(msdev->crash_pin))
+ state = MODEM_STATE_CRASH;
+ else
+ state = MODEM_STATE_ON;
+ spin_unlock_irqrestore(&msdev->lock, flags);
+
+ return state;
+}
+
+/* modempower read handler */
+static ssize_t modem_state_power_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rsthc;
+ int power_state;
+ unsigned long flags;
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ rsthc = gpio_get_value(msdev->rsthc_pin);
+ power_state = msdev->power_state;
+ spin_unlock_irqrestore(&msdev->lock, flags);
+
+ return sprintf(buf, "state=%d, expected=%d\n", rsthc, power_state);
+}
+
+/*
+ * modempower write handler
+ * Write '0' to /sys/devices/platform/modemstate/modempower to turn modem off
+ * Write '1' to /sys/devices/platform/modemstate/modempower to turn modem on
+ */
+static ssize_t modem_state_power_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long flags;
+ int ret = count;
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy) {
+ ret = -EAGAIN;
+ } else if (count > 0) {
+ switch (buf[0]) {
+ case '0':
+ modem_power_off(msdev);
+ break;
+ case '1':
+ modem_power_on(msdev);
+ break;
+ default:
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+ return ret;
+}
+
+/* reset read handler */
+static ssize_t modem_state_reset_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rstext;
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ /* No need for spinlocks here as there is only 1 value */
+ rstext = gpio_get_value(msdev->rstext_pin);
+
+ return sprintf(buf, "state=%d\n", rstext);
+}
+
+/* reset write handler */
+/* Write '1' to /sys/devices/platform/modemstate/reset to reset modem */
+static ssize_t modem_state_reset_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long flags;
+ int ret = count;
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy) {
+ ret = -EAGAIN;
+ } else if (count > 0) {
+ if (buf[0] == '1')
+ modem_reset(msdev);
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+
+ return ret;
+}
+
+/* crash read handler */
+static ssize_t modem_state_crash_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int crash;
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ /* No need for spinlocks here as there is only 1 value */
+ crash = gpio_get_value(msdev->crash_pin);
+
+ return sprintf(buf, "state=%d\n", crash);
+}
+
+/* state read handler */
+static ssize_t modem_state_state_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int state;
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ state = get_modem_state(msdev);
+ if (state > MODEM_STATE_END_MARKER)
+ state = MODEM_STATE_END_MARKER;
+
+ return sprintf(buf, "%s\n", modem_state_str[state]);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int modem_state_debug_get(struct seq_file *s, void *data)
+{
+ int onkey;
+ int rsthc;
+ int rstext;
+ int reset;
+ int crash;
+ int vbat;
+ unsigned long flags;
+ struct modem_state_dev *msdev = s->private;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ onkey = gpio_get_value(msdev->onkey_pin);
+ rsthc = gpio_get_value(msdev->rsthc_pin);
+ rstext = gpio_get_value(msdev->rstext_pin);
+ reset = gpio_get_value(msdev->reset_pin);
+ crash = gpio_get_value(msdev->crash_pin);
+ vbat = gpio_get_value(msdev->vbat_pin);
+ spin_unlock_irqrestore(&msdev->lock, flags);
+
+ seq_printf(s, "onkey=%d, rsthc=%d, rstext=%d, "
+ "reset=%d, crash=%d, vbat=%d\n",
+ onkey, rsthc, rstext, reset, crash, vbat);
+ return 0;
+}
+
+/*
+ * debug write handler
+ * Write o['0'|'1'] to /sys/devices/platform/modemstate/debug to set
+ * onkey line low or high.
+ * Write r['0'|'1'] to /sys/devices/platform/modemstate/debug to set
+ * reset line low or high.
+ * Write v['0'|'1'] to /sys/devices/platform/modemstate/debug to set
+ * vbat line low or high.
+ */
+static ssize_t modem_state_debug_set(struct file *file,
+ const char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long flags;
+ int bufsize;
+ char buf[128];
+
+ bufsize = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, bufsize))
+ return -EFAULT;
+ buf[bufsize] = 0;
+
+ spin_lock_irqsave(&modem_state->lock, flags);
+ if (modem_state->busy) {
+ return -EAGAIN;
+ } else if (count > 1) {
+ switch (buf[1]) {
+ case '0': /* fallthrough */
+ case '1':
+ switch (buf[0]) {
+ case 'o':
+ gpio_set_value(modem_state->onkey_pin,
+ buf[1] - '0');
+ break;
+ case 'r':
+ gpio_set_value(modem_state->reset_pin,
+ buf[1] - '0');
+ break;
+ case 'v':
+ gpio_set_value(modem_state->vbat_pin,
+ buf[1] - '0');
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+
+ return bufsize;
+}
+
+static int modem_state_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, modem_state_debug_get, inode->i_private);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static DEVICE_ATTR(modempower, S_IRUSR | S_IWUSR,
+ modem_state_power_get, modem_state_power_set);
+static DEVICE_ATTR(reset, S_IRUSR | S_IWUSR,
+ modem_state_reset_get, modem_state_reset_set);
+static DEVICE_ATTR(crash, S_IRUSR, modem_state_crash_get, NULL);
+static DEVICE_ATTR(state, S_IRUSR, modem_state_state_get, NULL);
+
+static struct attribute *modemstate_attributes[] = {
+ &dev_attr_modempower.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_crash.attr,
+ &dev_attr_state.attr,
+ NULL
+};
+
+static struct attribute_group modemstate_attr_group = {
+ .attrs = modemstate_attributes,
+ .name = "modemstate"
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct file_operations debugfs_debug_fops = {
+ .open = modem_state_debug_open,
+ .read = seq_read,
+ .write = modem_state_debug_set,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+#endif
+
+static void sysfs_notify_rsthc(struct modem_state_dev *msdev)
+{
+ sysfs_notify(&msdev->dev->kobj, NULL, dev_attr_modempower.attr.name);
+ sysfs_notify(&msdev->dev->kobj, NULL, dev_attr_state.attr.name);
+}
+
+static void sysfs_notify_rstext(struct modem_state_dev *msdev)
+{
+ sysfs_notify(&msdev->dev->kobj, NULL, dev_attr_reset.attr.name);
+ sysfs_notify(&msdev->dev->kobj, NULL, dev_attr_state.attr.name);
+}
+
+static void sysfs_notify_crash(struct modem_state_dev *msdev)
+{
+ sysfs_notify(&msdev->dev->kobj, NULL, dev_attr_crash.attr.name);
+ sysfs_notify(&msdev->dev->kobj, NULL, dev_attr_state.attr.name);
+}
+
+static void wq_rsthc(struct work_struct *work)
+{
+ unsigned long flags;
+ int rsthc;
+ struct modem_state_dev *msdev =
+ container_of(work, struct modem_state_dev, wq_rsthc);
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ rsthc = gpio_get_value(msdev->rsthc_pin);
+ dev_dbg(msdev->dev, "RSTHC interrupt detected, rsthc=%d\n", rsthc);
+ if (msdev->power_state == rsthc) {
+ if (!rsthc) {
+ /* Modem has turned off, and we were expecting it to.
+ turn vbat to the modem off now */
+ del_timer(&msdev->vbat_off_timer);
+ modem_vbat_set_value(msdev, 0);
+ }
+ } else {
+ dev_dbg(msdev->dev,
+ "Modem power state is %d, expected %d\n", rsthc,
+ msdev->power_state);
+ dev_dbg(msdev->dev,
+ "Attempting to change modem power state "
+ "in 2 seconds\n");
+
+ msdev->onkey_debounce_timer.data = (unsigned long)msdev;
+ /* Wait > 2048ms due to debounce timer */
+ mod_timer(&msdev->onkey_debounce_timer,
+ jiffies + ((2050 * HZ) / 1000));
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+
+ call_callbacks();
+ sysfs_notify_rsthc(msdev);
+}
+
+static void wq_rstext(struct work_struct *work)
+{
+ struct modem_state_dev *msdev =
+ container_of(work, struct modem_state_dev, wq_rstext);
+
+ dev_dbg(msdev->dev, "RSTEXT interrupt detected, rstext=%d\n",
+ gpio_get_value(msdev->rstext_pin));
+
+ call_callbacks();
+ sysfs_notify_rstext(msdev);
+}
+
+static void wq_crash(struct work_struct *work)
+{
+ struct modem_state_dev *msdev =
+ container_of(work, struct modem_state_dev, wq_rstext);
+
+ dev_dbg(msdev->dev, "modem crash interrupt detected. crash=%d\n",
+ gpio_get_value(msdev->crash_pin));
+
+ call_callbacks();
+ sysfs_notify_crash(msdev);
+}
+
+/* Populate device structure used by the driver */
+static int modem_state_dev_init(struct platform_device *pdev,
+ struct modem_state_dev *msdev)
+{
+ int err = 0;
+ struct resource *r;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "onkey_pin");
+ if (r == NULL) {
+ err = -ENXIO;
+ dev_err(&pdev->dev,
+ "Could not get GPIO number for onkey pin\n");
+ goto err_resource;
+ }
+ msdev->onkey_pin = r->start;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "reset_pin");
+ if (r == NULL) {
+ err = -ENXIO;
+ dev_err(&pdev->dev,
+ "Could not get GPIO number for reset pin\n");
+ goto err_resource;
+ }
+ msdev->reset_pin = r->start;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "vbat_pin");
+ if (r == NULL) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Could not get GPIO number for vbat pin\n");
+ goto err_resource;
+ }
+ msdev->vbat_pin = r->start;
+
+ msdev->rsthc_pin = platform_get_irq_byname(pdev, "rsthc_pin");
+ if (msdev->rsthc_pin < 0) {
+ err = msdev->rsthc_pin;
+ dev_err(&pdev->dev,
+ "Could not get GPIO number for rsthc pin\n");
+ goto err_resource;
+ }
+
+ msdev->rstext_pin = platform_get_irq_byname(pdev, "rstext_pin");
+ if (msdev->rstext_pin < 0) {
+ err = msdev->rstext_pin;
+ dev_err(&pdev->dev,
+ "Could not get GPIO number for retext pin\n");
+ goto err_resource;
+ }
+
+ msdev->crash_pin = platform_get_irq_byname(pdev, "crash_pin");
+ if (msdev->crash_pin < 0) {
+ err = msdev->crash_pin;
+ dev_err(&pdev->dev,
+ "Could not get GPIO number for crash pin\n");
+ goto err_resource;
+ }
+err_resource:
+ return err;
+}
+
+/* IRQ handlers */
+
+/* Handlers for rsthc (modem power off indication) IRQ */
+static irqreturn_t rsthc_irq(int irq, void *dev)
+{
+ struct modem_state_dev *msdev = (struct modem_state_dev *)dev;
+
+ /* check it's our interrupt */
+ if (irq != GPIO_TO_IRQ(msdev->rsthc_pin)) {
+ dev_err(msdev->dev, "Spurious RSTHC irq\n");
+ return IRQ_NONE;
+ }
+
+ queue_work(msdev->workqueue, &msdev->wq_rsthc);
+ return IRQ_HANDLED;
+}
+
+/* Handlers for rstext (modem reset indication) IRQ */
+static irqreturn_t rstext_irq(int irq, void *dev)
+{
+ struct modem_state_dev *msdev = (struct modem_state_dev *)dev;
+
+ /* check it's our interrupt */
+ if (irq != GPIO_TO_IRQ(msdev->rstext_pin)) {
+ dev_err(msdev->dev, "Spurious RSTEXT irq\n");
+ return IRQ_NONE;
+ }
+
+ queue_work(msdev->workqueue, &msdev->wq_rstext);
+ return IRQ_HANDLED;
+}
+
+/* Handlers for modem crash indication IRQ */
+static irqreturn_t crash_irq(int irq, void *dev)
+{
+ struct modem_state_dev *msdev = (struct modem_state_dev *)dev;
+
+ /* check it's our interrupt */
+ if (irq != GPIO_TO_IRQ(msdev->crash_pin)) {
+ dev_err(msdev->dev, "Spurious modem crash irq\n");
+ return IRQ_NONE;
+ }
+
+ queue_work(msdev->workqueue, &msdev->wq_crash);
+ return IRQ_HANDLED;
+}
+
+static int request_irq_pin(int pin, irq_handler_t handler, unsigned long flags,
+ struct modem_state_dev *msdev)
+{
+ int err = 0;
+ if (pin) {
+ err = request_irq(GPIO_TO_IRQ(pin), handler, flags,
+ dev_name(msdev->dev), msdev);
+ if (err == 0) {
+ err = enable_irq_wake(GPIO_TO_IRQ(pin));
+ if (err < 0) {
+ dev_err(msdev->dev,
+ "Request for wake on pin %d failed\n",
+ pin);
+ free_irq(GPIO_TO_IRQ(pin), NULL);
+ }
+ } else {
+ dev_err(msdev->dev,
+ "Request for irq on pin %d failed\n", pin);
+ }
+ }
+ return err;
+}
+
+static void free_irq_pin(int pin)
+{
+ disable_irq_wake(GPIO_TO_IRQ(pin));
+ free_irq(GPIO_TO_IRQ(pin), NULL);
+}
+
+static int request_irq_all(struct modem_state_dev *msdev)
+{
+ int err;
+
+ err = request_irq_pin(msdev->rsthc_pin, rsthc_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_NO_SUSPEND, msdev);
+ if (err < 0)
+ goto err_rsthc_irq_req;
+
+ err = request_irq_pin(msdev->rstext_pin, rstext_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_NO_SUSPEND, msdev);
+ if (err < 0)
+ goto err_rstext_irq_req;
+
+ err = request_irq_pin(msdev->crash_pin, crash_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_NO_SUSPEND, msdev);
+ if (err < 0)
+ goto err_crash_irq_req;
+
+ return 0;
+
+err_crash_irq_req:
+ free_irq_pin(msdev->rstext_pin);
+err_rstext_irq_req:
+ free_irq_pin(msdev->rsthc_pin);
+err_rsthc_irq_req:
+ return err;
+}
+
+/* Configure GPIO used by the driver */
+static int modem_state_gpio_init(struct platform_device *pdev,
+ struct modem_state_dev *msdev)
+{
+ int err = 0;
+
+ /* Reserve gpio pins */
+ if (msdev->onkey_pin != 0) {
+ err = gpio_request(msdev->onkey_pin, dev_name(msdev->dev));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for onkey pin failed\n");
+ goto err_onkey_req;
+ }
+ }
+ if (msdev->reset_pin != 0) {
+ err = gpio_request(msdev->reset_pin, dev_name(msdev->dev));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for reset pin failed\n");
+ goto err_reset_req;
+ }
+ }
+ if (msdev->rsthc_pin != 0) {
+ err = gpio_request(msdev->rsthc_pin, dev_name(msdev->dev));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for rsthc pin failed\n");
+ goto err_rsthc_req;
+ }
+ }
+ if (msdev->rstext_pin != 0) {
+ err = gpio_request(msdev->rstext_pin, dev_name(msdev->dev));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for rstext pin failed\n");
+ goto err_rstext_req;
+ }
+ }
+ if (msdev->crash_pin != 0) {
+ err = gpio_request(msdev->crash_pin, dev_name(msdev->dev));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for crash pin failed\n");
+ goto err_crash_req;
+ }
+ }
+ if (msdev->vbat_pin != 0) {
+ err = gpio_request(msdev->vbat_pin, dev_name(msdev->dev));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for vbat pin failed\n");
+ goto err_vbat_req;
+ }
+ }
+
+ /* Set initial pin config */
+ set_on_config(msdev);
+ if (msdev->onkey_pin)
+ nmk_config_pin(PIN_CFG(msdev->onkey_pin, GPIO) |
+ PIN_OUTPUT_LOW, false);
+ if (msdev->vbat_pin)
+ nmk_config_pin(PIN_CFG(msdev->vbat_pin, GPIO) | PIN_OUTPUT_HIGH,
+ false);
+
+ /* Configure IRQs for GPIO pins */
+ err = request_irq_all(msdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Request for irqs failed, err = %d\n", err);
+ goto err_irq_req;
+ }
+ msdev->irq_state = 1;
+
+ /* Save current modem state */
+ msdev->power_state = gpio_get_value(msdev->rsthc_pin);
+
+ return 0;
+
+err_irq_req:
+ gpio_free(msdev->vbat_pin);
+err_vbat_req:
+ gpio_free(msdev->crash_pin);
+err_crash_req:
+ gpio_free(msdev->rstext_pin);
+err_rstext_req:
+ gpio_free(msdev->rsthc_pin);
+err_rsthc_req:
+ gpio_free(msdev->reset_pin);
+err_reset_req:
+ gpio_free(msdev->onkey_pin);
+err_onkey_req:
+ return err;
+}
+
+/* Timer handlers */
+
+static void modem_power_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy)
+ msdev->busy = 0;
+ else
+ dev_err(msdev->dev,
+ "onkey timer expired and busy flag not set\n");
+
+ gpio_set_value(msdev->onkey_pin, 0);
+ spin_unlock_irqrestore(&msdev->lock, flags);
+}
+
+static void modem_reset_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+
+ spin_lock_irqsave(&modem_state->lock, flags);
+ if (msdev->busy)
+ msdev->busy = 0;
+ else
+ dev_err(msdev->dev,
+ "reset timer expired and busy flag not set\n");
+
+ gpio_set_value(msdev->reset_pin, 1);
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+}
+
+static void modem_onkey_debounce_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy) {
+ dev_info(msdev->dev,
+ "Delayed onkey change aborted. "
+ "Another action in progress\n");
+ } else {
+ if (gpio_get_value(msdev->rsthc_pin) != msdev->power_state) {
+ if (0 == msdev->power_state)
+ modem_power_off(msdev);
+ else
+ modem_power_on(msdev);
+ }
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+}
+
+static void modem_vbat_off_timeout(unsigned long data)
+{
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+ unsigned long flags;
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (0 == msdev->power_state)
+ modem_vbat_set_value(msdev, 0);
+ spin_unlock_irqrestore(&msdev->lock, flags);
+}
+
+static void modem_busy_on_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy) {
+ mod_timer(&msdev->busy_timer, jiffies + 1);
+ } else {
+ msdev->busy_timer.function = NULL;
+ modem_power_on(msdev);
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+}
+
+static void modem_busy_off_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy) {
+ mod_timer(&msdev->busy_timer, jiffies + 1);
+ } else {
+ msdev->busy_timer.function = NULL;
+ modem_power_off(msdev);
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+}
+
+static void modem_busy_reset_timeout(unsigned long data)
+{
+ unsigned long flags;
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+
+ spin_lock_irqsave(&msdev->lock, flags);
+ if (msdev->busy) {
+ mod_timer(&msdev->busy_timer, jiffies + 1);
+ } else {
+ msdev->busy_timer.function = NULL;
+ modem_reset(msdev);
+ }
+ spin_unlock_irqrestore(&msdev->lock, flags);
+}
+
+#ifdef DEBUG
+static int callback_test(unsigned long data)
+{
+ struct modem_state_dev *msdev = (struct modem_state_dev *)data;
+ dev_info(msdev->dev, "Test callback. Modem state is %s\n",
+ modem_state_to_str(modem_state_get_state()));
+ return 0;
+}
+#endif
+
+/* Exported functions */
+
+void modem_state_power_on(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&modem_state->lock, flags);
+ if (modem_state->busy) {
+ /*
+ * Ignore on request if turning off is queued,
+ * cancel any queued reset request
+ */
+ if (modem_busy_reset_timeout ==
+ modem_state->busy_timer.function) {
+ del_timer_sync(&modem_state->busy_timer);
+ modem_state->busy_timer.function = NULL;
+ }
+ if (NULL == modem_state->busy_timer.function) {
+ modem_state->busy_timer.function =
+ modem_busy_on_timeout;
+ modem_state->busy_timer.data =
+ (unsigned long)modem_state;
+ mod_timer(&modem_state->busy_timer, jiffies + 1);
+ }
+ } else {
+ modem_power_on(modem_state);
+ }
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+}
+
+void modem_state_power_off(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&modem_state->lock, flags);
+ if (modem_state->busy) {
+ /*
+ * Prioritize off request if others are queued.
+ * Must turn modem off if system is shutting down
+ */
+ if (NULL != modem_state->busy_timer.function)
+ del_timer_sync(&modem_state->busy_timer);
+
+ modem_state->busy_timer.function = modem_busy_off_timeout;
+ modem_state->busy_timer.data = (unsigned long)modem_state;
+ mod_timer(&modem_state->busy_timer, jiffies + 1);
+ } else {
+ modem_power_off(modem_state);
+ }
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+}
+
+void modem_state_force_reset(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&modem_state->lock, flags);
+ if (modem_state->busy) {
+ /* Ignore reset request if turning on or off is queued */
+ if (NULL == modem_state->busy_timer.function) {
+ modem_state->busy_timer.function =
+ modem_busy_reset_timeout;
+ modem_state->busy_timer.data =
+ (unsigned long)modem_state;
+ mod_timer(&modem_state->busy_timer, jiffies + 1);
+ }
+ } else {
+ modem_reset(modem_state);
+ }
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+}
+
+int modem_state_get_state(void)
+{
+ return get_modem_state(modem_state);
+}
+
+char *modem_state_to_str(int state)
+{
+ if (state > MODEM_STATE_END_MARKER)
+ state = MODEM_STATE_END_MARKER;
+
+ return modem_state_str[state];
+}
+
+int modem_state_register_callback(int (*callback) (unsigned long),
+ unsigned long data)
+{
+ struct callback_list *item;
+ unsigned long flags;
+
+ if (NULL == modem_state)
+ return -EAGAIN;
+
+ if (NULL == callback)
+ return -EINVAL;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (NULL == item) {
+ dev_err(modem_state->dev,
+ "Could not allocate memory for struct callback_list\n");
+ return -ENOMEM;
+ }
+ item->callback = callback;
+ item->data = data;
+
+ spin_lock_irqsave(&modem_state->lock, flags);
+ list_add_tail(&item->node, &callback_list);
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+
+ return 0;
+}
+
+int modem_state_remove_callback(int (*callback) (unsigned long))
+{
+ struct callback_list *iterator;
+ struct callback_list *item;
+ unsigned long flags;
+ int ret = -ENXIO;
+
+ if (NULL == callback)
+ return -EINVAL;
+
+ spin_lock_irqsave(&modem_state->lock, flags);
+ list_for_each_entry_safe(iterator, item, &callback_list, node) {
+ if (callback == item->callback) {
+ list_del(&item->node);
+ kfree(item);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&modem_state->lock, flags);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+int modem_state_suspend(struct device *dev)
+{
+ struct modem_state_dev *msdev =
+ platform_get_drvdata(to_platform_device(dev));
+
+ if (msdev->busy) {
+ dev_info(dev, "Driver is busy\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+}
+
+int modem_state_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int __devinit modem_state_probe(struct platform_device *pdev)
+{
+ int err = 0;
+
+ dev_info(&pdev->dev, "Starting probe\n");
+
+ modem_state = kzalloc(sizeof(struct modem_state_dev), GFP_KERNEL);
+ if (NULL == modem_state) {
+ dev_err(&pdev->dev,
+ "Could not allocate memory for modem_state_dev\n");
+ return -ENOMEM;
+ }
+ modem_state->dev = &pdev->dev;
+
+ spin_lock_init(&modem_state->lock);
+
+ INIT_WORK(&modem_state->wq_rsthc, wq_rsthc);
+ INIT_WORK(&modem_state->wq_rstext, wq_rstext);
+ INIT_WORK(&modem_state->wq_crash, wq_crash);
+ modem_state->workqueue =
+ create_singlethread_workqueue(dev_name(&pdev->dev));
+ if (modem_state->workqueue == NULL) {
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_queue;
+ }
+
+ err = modem_state_dev_init(pdev, modem_state);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Could not initialize device structure\n");
+ goto err_dev;
+ }
+
+ init_timer(&modem_state->onkey_timer);
+ init_timer(&modem_state->reset_timer);
+ init_timer(&modem_state->onkey_debounce_timer);
+ init_timer(&modem_state->vbat_off_timer);
+ init_timer(&modem_state->busy_timer);
+ modem_state->onkey_timer.function = modem_power_timeout;
+ modem_state->reset_timer.function = modem_reset_timeout;
+ modem_state->onkey_debounce_timer.function =
+ modem_onkey_debounce_timeout;
+ modem_state->vbat_off_timer.function = modem_vbat_off_timeout;
+ modem_state->busy_timer.function = NULL;
+
+ platform_set_drvdata(pdev, modem_state);
+
+ err = modem_state_gpio_init(pdev, modem_state);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Could not initialize GPIO\n");
+ goto err_gpio;
+ }
+
+ if (sysfs_create_group(&pdev->dev.kobj, &modemstate_attr_group) < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs nodes\n");
+ goto err_sysfs;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ modem_state->debugfsdir = debugfs_create_dir("modemstate", NULL);
+ modem_state->debugfs_debug = debugfs_create_file("debug",
+ S_IRUGO | S_IWUGO,
+ modem_state->debugfsdir,
+ modem_state,
+ &debugfs_debug_fops);
+#endif
+
+#ifdef DEBUG
+ modem_state_register_callback(callback_test,
+ (unsigned long)modem_state);
+#endif
+ return 0;
+
+err_sysfs:
+err_gpio:
+err_dev:
+ destroy_workqueue(modem_state->workqueue);
+err_queue:
+ kfree(modem_state);
+ return err;
+}
+
+static int __devexit modem_state_remove(struct platform_device *pdev)
+{
+ struct modem_state_dev *msdev = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &modemstate_attr_group);
+ destroy_workqueue(msdev->workqueue);
+ kfree(msdev);
+ return 0;
+}
+
+static void modem_state_shutdown(struct platform_device *pdev)
+{
+ /*
+ * Trigger software shutdown of the modem and then wait until
+ * modem-off state is detected. If the modem does not power off
+ * when requested power will be removed and we will detect the
+ * modem-off state that way.
+ */
+ modem_state_power_off();
+ if (MODEM_STATE_OFF != modem_state_get_state())
+ dev_alert(&pdev->dev, "Waiting for modem to power down\n");
+ while (MODEM_STATE_OFF != modem_state_get_state())
+ cond_resched();
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops modem_state_dev_pm_ops = {
+ .suspend_noirq = modem_state_suspend,
+ .resume_noirq = modem_state_resume,
+};
+#endif
+
+static struct platform_driver modem_state_driver = {
+ .probe = modem_state_probe,
+ .remove = __devexit_p(modem_state_remove),
+ .shutdown = modem_state_shutdown,
+ .driver = {
+ .name = "modemstate",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &modem_state_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init modem_state_init(void)
+{
+#ifdef DEBUG
+ printk(KERN_ALERT "Modem state driver init\n");
+#endif
+ return platform_driver_probe(&modem_state_driver, modem_state_probe);
+}
+
+static void __exit modem_state_exit(void)
+{
+ platform_driver_unregister(&modem_state_driver);
+}
+
+module_init(modem_state_init);
+module_exit(modem_state_exit);
+
+MODULE_AUTHOR("Derek Morton");
+MODULE_DESCRIPTION("M6718 modem power state driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/modem/m6718_spi/modem_state.h b/drivers/modem/m6718_spi/modem_state.h
new file mode 100644
index 00000000000..a2f1d9fbe3e
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_state.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Derek Morton <derek.morton@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Power state driver for M6718 MODEM
+ */
+#ifndef MODEM_STATE_H
+#define MODEM_STATE_H
+
+enum modem_states {
+ MODEM_STATE_OFF,
+ MODEM_STATE_RESET,
+ MODEM_STATE_CRASH,
+ MODEM_STATE_ON,
+ /*
+ * Add new states before end marker and update modem_state_str[]
+ * in modem_state.c
+ */
+ MODEM_STATE_END_MARKER
+};
+
+void modem_state_power_on(void);
+void modem_state_power_off(void);
+void modem_state_force_reset(void);
+int modem_state_get_state(void);
+char *modem_state_to_str(int state);
+
+/* Callbacks will be running in tasklet context */
+int modem_state_register_callback(int (*callback) (unsigned long),
+ unsigned long data);
+int modem_state_remove_callback(int (*callback) (unsigned long));
+
+#endif
diff --git a/drivers/modem/m6718_spi/modem_statemachine.h b/drivers/modem/m6718_spi/modem_statemachine.h
new file mode 100644
index 00000000000..55e4a520d3d
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_statemachine.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header:
+ * statemachine functionality.
+ */
+#ifndef _MODEM_STATEMACHINE_H_
+#define _MODEM_STATEMACHINE_H_
+
+#include <linux/kernel.h>
+
+/* valid states for the driver state machine */
+enum ipc_sm_state_id {
+ /* common link and shared states below */
+ IPC_SM_INIT,
+ IPC_SM_HALT,
+ IPC_SM_RESET,
+ IPC_SM_WAIT_SLAVE_STABLE,
+ IPC_SM_WAIT_HANDSHAKE_INACTIVE,
+ IPC_SM_SLW_TX_BOOTREQ,
+ IPC_SM_ACT_TX_BOOTREQ,
+ IPC_SM_SLW_RX_BOOTRESP,
+ IPC_SM_ACT_RX_BOOTRESP,
+ IPC_SM_IDL,
+ IPC_SM_SLW_TX_WR_CMD,
+ IPC_SM_ACT_TX_WR_CMD,
+ IPC_SM_SLW_TX_WR_DAT,
+ IPC_SM_ACT_TX_WR_DAT,
+ IPC_SM_SLW_TX_RD_CMD,
+ IPC_SM_ACT_TX_RD_CMD,
+ IPC_SM_SLW_RX_WR_CMD,
+ IPC_SM_ACT_RX_WR_CMD,
+ IPC_SM_ACT_RX_WR_DAT,
+ /* audio link states below */
+ IPC_SM_INIT_AUD,
+ IPC_SM_HALT_AUD,
+ IPC_SM_RESET_AUD,
+ IPC_SM_IDL_AUD,
+ IPC_SM_SLW_TX_WR_DAT_AUD,
+ IPC_SM_ACT_TX_WR_DAT_AUD,
+ IPC_SM_SLW_RX_WR_DAT_AUD,
+ IPC_SM_ACT_RX_WR_DAT_AUD,
+ IPC_SM_STATE_ID_NBR
+};
+
+/* state machine trigger causes events */
+#define IPC_SM_RUN_NONE (0x00)
+#define IPC_SM_RUN_SLAVE_IRQ (0x01)
+#define IPC_SM_RUN_TFR_COMPLETE (0x02)
+#define IPC_SM_RUN_TX_REQ (0x04)
+#define IPC_SM_RUN_INIT (0x08)
+#define IPC_SM_RUN_ABORT (0x10)
+#define IPC_SM_RUN_COMMS_TMO (0x20)
+#define IPC_SM_RUN_STABLE_TMO (0x40)
+#define IPC_SM_RUN_RESET (0x80)
+
+struct ipc_link_context; /* forward declaration */
+
+typedef u8 (*ipc_sm_enter_func)(u8 event, struct ipc_link_context *context);
+typedef const struct ipc_sm_state *(*ipc_sm_exit_func)(u8 event,
+ struct ipc_link_context *context);
+
+struct ipc_sm_state {
+ enum ipc_sm_state_id id;
+ ipc_sm_enter_func enter;
+ ipc_sm_exit_func exit;
+ u8 events;
+};
+
+const struct ipc_sm_state *ipc_sm_idle_state(struct ipc_link_context *context);
+const struct ipc_sm_state *ipc_sm_init_state(struct ipc_link_context *context);
+const struct ipc_sm_state *ipc_sm_state(u8 id);
+bool ipc_sm_valid_for_state(u8 event, const struct ipc_sm_state *state);
+
+void ipc_sm_kick(u8 event, struct ipc_link_context *context);
+
+#endif /* _MODEM_STATEMACHINE_H_ */
diff --git a/drivers/modem/m6718_spi/modem_util.h b/drivers/modem/m6718_spi/modem_util.h
new file mode 100644
index 00000000000..2d9e2e39abc
--- /dev/null
+++ b/drivers/modem/m6718_spi/modem_util.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver protocol interface header:
+ * utility functionality.
+ */
+#ifndef _MODEM_UTIL_H_
+#define _MODEM_UTIL_H_
+
+#include <linux/kernel.h>
+#include "modem_private.h"
+
+bool ipc_util_channel_is_loopback(u8 channel);
+
+u32 ipc_util_make_l2_header(u8 channel, u32 len);
+u8 ipc_util_get_l2_channel(u32 hdr);
+u32 ipc_util_get_l2_length(u32 hdr);
+u32 ipc_util_make_l1_header(u8 cmd, u8 counter, u32 len);
+u8 ipc_util_get_l1_cmd(u32 hdr);
+u8 ipc_util_get_l1_counter(u32 hdr);
+u32 ipc_util_get_l1_length(u32 hdr);
+u8 ipc_util_get_l1_bootresp_ver(u32 bootresp);
+
+int ipc_util_ss_level_active(struct ipc_link_context *context);
+int ipc_util_ss_level_inactive(struct ipc_link_context *context);
+int ipc_util_int_level_active(struct ipc_link_context *context);
+int ipc_util_int_level_inactive(struct ipc_link_context *context);
+
+void ipc_util_deactivate_ss(struct ipc_link_context *context);
+void ipc_util_activate_ss(struct ipc_link_context *context);
+void ipc_util_activate_ss_with_tmo(struct ipc_link_context *context);
+
+bool ipc_util_int_is_active(struct ipc_link_context *context);
+
+bool ipc_util_link_is_idle(struct ipc_link_context *context);
+
+void ipc_util_start_slave_stable_timer(struct ipc_link_context *context);
+
+void ipc_util_spi_message_prepare(struct ipc_link_context *link_context,
+ void *tx_buf, void *rx_buf, int len);
+void ipc_util_spi_message_init(struct ipc_link_context *link_context,
+ void (*complete)(void *));
+
+bool ipc_util_link_gpio_request(struct ipc_link_context *context,
+ irqreturn_t (*irqhnd)(int, void *));
+bool ipc_util_link_gpio_config(struct ipc_link_context *context);
+bool ipc_util_link_gpio_unconfig(struct ipc_link_context *context);
+
+bool ipc_util_link_is_suspended(struct ipc_link_context *context);
+void ipc_util_suspend_link(struct ipc_link_context *context);
+void ipc_util_resume_link(struct ipc_link_context *context);
+
+#endif /* _MODEM_UTIL_H_ */
diff --git a/drivers/modem/m6718_spi/netlink.c b/drivers/modem/m6718_spi/netlink.c
new file mode 100644
index 00000000000..253b19162b1
--- /dev/null
+++ b/drivers/modem/m6718_spi/netlink.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010,2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_protocol.c
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * U9500 <-> M6718 IPC protocol implementation using SPI:
+ * netlink related functionality
+ */
+#include <linux/netlink.h>
+#include <linux/spi/spi.h>
+#include <linux/modem/m6718_spi/modem_net.h>
+#include <linux/modem/m6718_spi/modem_char.h>
+#include "modem_protocol.h"
+#include "modem_private.h"
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+#include "modem_state.h"
+#endif
+
+static struct sock *netlink_sk;
+struct modem_spi_dev *modem_dev;
+
+#define MAX_PAYLOAD 1024
+
+/*
+ * Netlink broadcast message values: this must correspond to those values
+ * expected by userspace for the appropriate message.
+ */
+enum netlink_msg_id {
+ NETLINK_MODEM_RESET = 1,
+ NETLINK_MODEM_QUERY_STATE,
+ NETLINK_USER_REQUEST_MODEM_RESET,
+ NETLINK_MODEM_STATUS_ONLINE,
+ NETLINK_MODEM_STATUS_OFFLINE
+};
+
+static void netlink_multicast_tasklet(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ enum netlink_msg_id nlmsg = (enum netlink_msg_id)data;
+
+ if (netlink_sk == NULL) {
+ pr_err("could not send multicast, no socket\n");
+ return;
+ }
+
+ /* prepare netlink message */
+ skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_ATOMIC);
+ if (!skb) {
+ pr_err("failed to allocate socket buffer\n");
+ return;
+ }
+
+ if (nlmsg == NETLINK_MODEM_RESET)
+ modem_isa_reset(modem_dev);
+
+ nlh = (struct nlmsghdr *)skb->data;
+ nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD);
+ nlh->nlmsg_pid = 0; /* from kernel */
+ nlh->nlmsg_flags = 0;
+ *(int *)NLMSG_DATA(nlh) = nlmsg;
+ skb_put(skb, MAX_PAYLOAD);
+ /* sender is in group 1<<0 */
+ NETLINK_CB(skb).pid = 0; /* from kernel */
+ /* to mcast group 1<<0 */
+ NETLINK_CB(skb).dst_group = 1;
+
+ /* multicast the message to all listening processes */
+ pr_debug("sending netlink multicast message %d\n", nlmsg);
+ netlink_broadcast(netlink_sk, skb, 0, 1, GFP_ATOMIC);
+
+}
+
+static void send_unicast(int dst_pid)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+
+ if (netlink_sk == NULL) {
+ pr_err("could not send unicast, no socket\n");
+ return;
+ }
+
+ /* prepare the message for unicast */
+ skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_KERNEL);
+ if (!skb) {
+ pr_err("failed to allocate socket buffer\n");
+ return;
+ }
+
+ nlh = (struct nlmsghdr *)skb->data;
+ nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD);
+ nlh->nlmsg_pid = 0; /* from kernel */
+ nlh->nlmsg_flags = 0;
+
+ if (modem_m6718_spi_is_boot_done()) {
+ pr_debug("sending netlink unicast message %d\n",
+ NETLINK_MODEM_STATUS_ONLINE);
+ *(int *)NLMSG_DATA(nlh) = NETLINK_MODEM_STATUS_ONLINE;
+ } else {
+ pr_debug("sending netlink unicast message %d\n",
+ NETLINK_MODEM_STATUS_OFFLINE);
+ *(int *)NLMSG_DATA(nlh) = NETLINK_MODEM_STATUS_OFFLINE;
+ }
+
+ skb_put(skb, MAX_PAYLOAD);
+ /* sender is in group 1<<0 */
+ NETLINK_CB(skb).pid = 0; /* from kernel */
+ NETLINK_CB(skb).dst_group = 0;
+
+ /* unicast the message to the querying process */
+ netlink_unicast(netlink_sk, skb, dst_pid, MSG_DONTWAIT);
+}
+
+static void netlink_receive(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = NULL;
+ int msg;
+
+ nlh = (struct nlmsghdr *)skb->data;
+ msg = *((int *)(NLMSG_DATA(nlh)));
+ switch (msg) {
+ case NETLINK_MODEM_QUERY_STATE:
+ send_unicast(nlh->nlmsg_pid);
+ break;
+ case NETLINK_USER_REQUEST_MODEM_RESET:
+ pr_info("user requested modem reset!\n");
+#ifdef CONFIG_DEBUG_FS
+ if (l1_context.msr_disable) {
+ pr_info("MSR is disabled, ignoring reset request\n");
+ break;
+ }
+#endif
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+ modem_state_force_reset();
+#else
+ pr_err("modestate integration is not enabled in IPC, "
+ "unable to reset modem\n");
+#endif
+ break;
+ default:
+ pr_debug("ignoring invalid netlink message\n");
+ break;
+ }
+}
+
+bool ipc_create_netlink_socket(struct ipc_link_context *context)
+{
+ if (netlink_sk != NULL)
+ return true;
+
+ netlink_sk = netlink_kernel_create(NULL, NETLINK_MODEM, 1,
+ netlink_receive, NULL, THIS_MODULE);
+ if (netlink_sk == NULL) {
+ dev_err(&context->sdev->dev,
+ "failed to create netlink socket\n");
+ return false;
+ }
+ modem_dev = spi_get_drvdata(context->sdev);
+ return true;
+}
+
+DECLARE_TASKLET(modem_online_tasklet, netlink_multicast_tasklet,
+ NETLINK_MODEM_STATUS_ONLINE);
+DECLARE_TASKLET(modem_reset_tasklet, netlink_multicast_tasklet,
+ NETLINK_MODEM_RESET);
+
+void ipc_broadcast_modem_online(struct ipc_link_context *context)
+{
+ dev_info(&context->sdev->dev, "broadcast modem online event!\n");
+ tasklet_schedule(&modem_online_tasklet);
+}
+
+void ipc_broadcast_modem_reset(struct ipc_link_context *context)
+{
+ dev_info(&context->sdev->dev, "broadcast modem reset event!\n");
+ tasklet_schedule(&modem_reset_tasklet);
+}
+
diff --git a/drivers/modem/m6718_spi/protocol.c b/drivers/modem/m6718_spi/protocol.c
new file mode 100644
index 00000000000..fa6b2528dd4
--- /dev/null
+++ b/drivers/modem/m6718_spi/protocol.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010,2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * U9500 <-> M6718 IPC protocol implementation using SPI.
+ */
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include "modem_protocol.h"
+#include "modem_private.h"
+#include "modem_util.h"
+#include "modem_queue.h"
+#include "modem_debug.h"
+#include "modem_netlink.h"
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+#include <linux/workqueue.h>
+#include "modem_state.h"
+
+#define MODEM_STATE_REGISTER_TMO_MS (500)
+#endif
+
+#ifdef WORKAROUND_DUPLICATED_IRQ
+#include <linux/amba/pl022.h>
+#endif
+
+struct l2mux_channel {
+ u8 open:1;
+ u8 link:7;
+};
+
+/* valid open L2 mux channels */
+static const struct l2mux_channel channels[255] = {
+ [MODEM_M6718_SPI_CHN_ISI] = {
+ .open = true,
+ .link = IPC_LINK_COMMON
+ },
+ [MODEM_M6718_SPI_CHN_AUDIO] = {
+ .open = true,
+ .link = IPC_LINK_AUDIO
+ },
+ [MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0] = {
+ .open = true,
+ .link = IPC_LINK_COMMON
+ },
+ [MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0] = {
+ .open = true,
+ .link = IPC_LINK_COMMON
+ },
+ [MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1] = {
+ .open = true,
+ .link = IPC_LINK_AUDIO
+ },
+ [MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1] = {
+ .open = true,
+ .link = IPC_LINK_AUDIO
+ }
+};
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+static void modem_state_reg_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(modem_state_reg_work, modem_state_reg_wq);
+#endif
+
+/* the spi driver context */
+struct ipc_l1_context l1_context = {
+#ifdef CONFIG_DEBUG_FS
+ .msr_disable = false,
+#endif
+ .init_done = false
+};
+
+bool modem_protocol_channel_is_open(u8 channel)
+{
+ return channels[channel].open;
+}
+
+void modem_comms_timeout(unsigned long data)
+{
+ ipc_sm_kick(IPC_SM_RUN_COMMS_TMO, (struct ipc_link_context *)data);
+}
+
+void slave_stable_timeout(unsigned long data)
+{
+ ipc_sm_kick(IPC_SM_RUN_STABLE_TMO, (struct ipc_link_context *)data);
+}
+
+/**
+ * modem_protocol_init() - initialise the IPC protocol
+ *
+ * Initialises the IPC protocol in preparation for use. After this is called
+ * the protocol is ready to be probed for each link to be supported.
+ */
+void modem_protocol_init(void)
+{
+ pr_info("M6718 IPC protocol initialising version %02x\n",
+ IPC_DRIVER_VERSION);
+
+ atomic_set(&l1_context.boot_sync_done, 0);
+ ipc_dbg_debugfs_init();
+ ipc_dbg_throughput_init();
+ l1_context.init_done = true;
+ ipc_dbg_measure_throughput(0);
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+ schedule_delayed_work(&modem_state_reg_work, 0);
+#endif
+}
+
+/**
+ * modem_m6718_spi_send() - send a frame using the IPC protocol
+ * @modem_spi_dev: pointer to modem driver information structure
+ * @channel: L2 channel to send on
+ * @len: length of data to send
+ * @data: pointer to buffer containing data
+ *
+ * Check that the requested channel is supported and open, queue a frame
+ * containing the data on the appropriate link and ensure the state machine
+ * is running to start the transfer.
+ */
+int modem_m6718_spi_send(struct modem_spi_dev *modem_spi_dev, u8 channel,
+ u32 len, void *data)
+{
+ int err;
+ struct ipc_link_context *context;
+
+ if (!channels[channel].open) {
+ dev_err(modem_spi_dev->dev,
+ "error: invalid channel (%d), discarding frame\n",
+ channel);
+ return -EINVAL;
+ }
+
+ context = &l1_context.device_context[channels[channel].link];
+ if (context->state == NULL || context->state->id == IPC_SM_HALT) {
+ static unsigned long linkfail_warn_time;
+ if (printk_timed_ratelimit(&linkfail_warn_time, 60 * 1000))
+ dev_err(modem_spi_dev->dev,
+ "error: link %d for ch %d is not available, "
+ "discarding frames\n",
+ channels[channel].link, channel);
+ return -ENODEV;
+ }
+
+ err = ipc_queue_push_frame(context, channel, len, data);
+ if (err < 0)
+ return err;
+
+ if (ipc_util_link_is_idle(context)) {
+ dev_dbg(modem_spi_dev->dev,
+ "link %d is idle, kicking\n", channels[channel].link);
+ ipc_sm_kick(IPC_SM_RUN_TX_REQ, context);
+ } else {
+ dev_dbg(modem_spi_dev->dev,
+ "link %d is already running\n", channels[channel].link);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_m6718_spi_send);
+
+/**
+ * modem_m6718_spi_is_boot_done() - check if boot handshake with modem is done
+ */
+bool modem_m6718_spi_is_boot_done(void)
+{
+ return atomic_read(&l1_context.boot_sync_done);
+}
+EXPORT_SYMBOL_GPL(modem_m6718_spi_is_boot_done);
+
+/**
+ * modem_protocol_is_busy() - check if the protocol is currently active
+ * @sdev: pointer to spi_device for link to check
+ *
+ * Checks each of the IPC links to see if they are inactive: this means they
+ * can be in either IDLE or INIT states. If any of the links are not idle then
+ * true is returned to indicate that the protocol is busy.
+ */
+bool modem_protocol_is_busy(struct spi_device *sdev)
+{
+ int i;
+
+ for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
+ switch (l1_context.device_context[i].state->id) {
+ case IPC_SM_IDL:
+ case IPC_SM_IDL_AUD:
+ case IPC_SM_INIT:
+ case IPC_SM_INIT_AUD:
+ case IPC_SM_WAIT_SLAVE_STABLE:
+ /* not busy; continue checking */
+ break;
+ default:
+ dev_info(&sdev->dev, "link %d is busy\n", i);
+ return true;
+ }
+ return false;
+}
+
+int modem_protocol_suspend(struct spi_device *sdev)
+{
+ struct modem_m6718_spi_link_platform_data *link =
+ sdev->dev.platform_data;
+ struct ipc_link_context *context;
+ int link_id;
+
+ if (link == NULL) {
+ /* platform data missing in board config? */
+ dev_err(&sdev->dev, "error: no platform data for link!\n");
+ return -ENODEV;
+ }
+
+ link_id = link->id;
+ context = &l1_context.device_context[link_id];
+
+ if (link_id >= IPC_NBR_SUPPORTED_SPI_LINKS) {
+ dev_err(&sdev->dev,
+ "link %d error: too many links! (max %d)\n",
+ link->id, IPC_NBR_SUPPORTED_SPI_LINKS);
+ return -ENODEV;
+ }
+
+ ipc_util_suspend_link(context);
+ return 0;
+}
+
+int modem_protocol_resume(struct spi_device *sdev)
+{
+ struct modem_m6718_spi_link_platform_data *link =
+ sdev->dev.platform_data;
+ struct ipc_link_context *context;
+ int link_id;
+
+ if (link == NULL) {
+ /* platform data missing in board config? */
+ dev_err(&sdev->dev, "error: no platform data for link!\n");
+ return -ENODEV;
+ }
+
+ link_id = link->id;
+ context = &l1_context.device_context[link_id];
+
+ if (link_id >= IPC_NBR_SUPPORTED_SPI_LINKS) {
+ dev_err(&sdev->dev,
+ "link %d error: too many links! (max %d)\n",
+ link->id, IPC_NBR_SUPPORTED_SPI_LINKS);
+ return -ENODEV;
+ }
+
+ ipc_util_resume_link(context);
+
+ /*
+ * If the resume event was an interrupt from the slave then the event
+ * is pending and we need to service it now.
+ */
+ if (ipc_util_int_is_active(context)) {
+ dev_dbg(&sdev->dev,
+ "link %d: slave-ready is pending after resume\n",
+ link_id);
+ ipc_sm_kick(IPC_SM_RUN_SLAVE_IRQ, context);
+ }
+ return 0;
+}
+
+static void spi_tfr_complete(void *context)
+{
+ ipc_sm_kick(IPC_SM_RUN_TFR_COMPLETE,
+ (struct ipc_link_context *)context);
+}
+
+static irqreturn_t slave_ready_irq(int irq, void *dev)
+{
+ struct ipc_link_context *context = (struct ipc_link_context *)dev;
+ struct modem_m6718_spi_link_platform_data *link = context->link;
+ struct spi_device *sdev = context->sdev;
+
+ if (irq != GPIO_TO_IRQ(link->gpio.int_pin)) {
+ dev_err(&sdev->dev,
+ "link %d error: spurious slave irq!", link->id);
+ return IRQ_NONE;
+ }
+
+#ifdef WORKAROUND_DUPLICATED_IRQ
+ if (link->id != IPC_LINK_AUDIO && pl022_tfr_in_progress(sdev)) {
+ dev_warn(&sdev->dev,
+ "link %d warning: slave irq while transfer "
+ "is active! discarding event\n", link->id);
+ return IRQ_HANDLED;
+ }
+#endif
+ ipc_sm_kick(IPC_SM_RUN_SLAVE_IRQ, context);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+static int modem_state_callback(unsigned long unused)
+{
+ int modem_state = modem_state_get_state();
+ struct ipc_link_context *contexts = l1_context.device_context;
+ u8 i;
+
+ pr_info("M6718 IPC protocol modemstate reports modem is %s\n",
+ modem_state_to_str(modem_state));
+
+ switch (modem_state) {
+ case MODEM_STATE_ON:
+ /*
+ * Modem is on, ensure each link is configured and trigger
+ * a state change on link0 to begin handshake.
+ */
+ for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
+ ipc_util_link_gpio_config(&contexts[i]);
+ ipc_sm_kick(IPC_SM_RUN_INIT, &contexts[0]);
+ break;
+ case MODEM_STATE_OFF:
+ case MODEM_STATE_RESET:
+ case MODEM_STATE_CRASH:
+ /* force all links to reset */
+ for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
+ ipc_sm_kick(IPC_SM_RUN_RESET, &contexts[i]);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void modem_state_reg_wq(struct work_struct *work)
+{
+ if (modem_state_register_callback(modem_state_callback, 0) == -EAGAIN) {
+ pr_info("M6718 IPC protocol failed to register with "
+ "modemstate, will retry\n");
+ schedule_delayed_work(&modem_state_reg_work,
+ (MODEM_STATE_REGISTER_TMO_MS * HZ) / 1000);
+ } else {
+ pr_info("M6718 IPC protocol registered with modemstate\n");
+ }
+}
+#endif
+
+int modem_protocol_probe(struct spi_device *sdev)
+{
+ struct modem_m6718_spi_link_platform_data *link =
+ sdev->dev.platform_data;
+ struct ipc_link_context *context;
+ int link_id;
+
+ if (link == NULL) {
+ /* platform data missing in board config? */
+ dev_err(&sdev->dev, "error: no platform data for link!\n");
+ return -ENODEV;
+ }
+
+ link_id = link->id;
+ context = &l1_context.device_context[link_id];
+
+ if (link_id >= IPC_NBR_SUPPORTED_SPI_LINKS) {
+ dev_err(&sdev->dev,
+ "link %d error: too many links! (max %d)\n",
+ link->id, IPC_NBR_SUPPORTED_SPI_LINKS);
+ return -ENODEV;
+ }
+
+ dev_info(&sdev->dev,
+ "link %d: registering SPI link bus:%d cs:%d\n",
+ link->id, sdev->master->bus_num, sdev->chip_select);
+
+ /* update spi device with correct word size for our device */
+ sdev->bits_per_word = 16;
+ spi_setup(sdev);
+
+ /* init link context */
+ context->link = link;
+ context->sdev = sdev;
+ ipc_util_resume_link(context);
+ atomic_set(&context->gpio_configured, 0);
+ atomic_set(&context->state_int,
+ ipc_util_int_level_inactive(context));
+ spin_lock_init(&context->sm_lock);
+ context->state = ipc_sm_init_state(context);
+ ipc_util_spi_message_init(context, spi_tfr_complete);
+ init_timer(&context->comms_timer);
+ context->comms_timer.function = modem_comms_timeout;
+ context->comms_timer.data = (unsigned long)context;
+ init_timer(&context->slave_stable_timer);
+ context->slave_stable_timer.function = slave_stable_timeout;
+ context->slave_stable_timer.data = (unsigned long)context;
+
+ if (!ipc_util_link_gpio_request(context, slave_ready_irq))
+ return -ENODEV;
+ if (!ipc_util_link_gpio_config(context))
+ return -ENODEV;
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ context->last_frame = NULL;
+#endif
+
+ ipc_queue_init(context);
+ ipc_dbg_debugfs_link_init(context);
+ ipc_dbg_throughput_link_init(context);
+ ipc_create_netlink_socket(context);
+
+ /*
+ * For link0 (the handshake link) we force a state transition now so
+ * that it prepares for boot sync.
+ */
+ if (link->id == 0)
+ ipc_sm_kick(IPC_SM_RUN_INIT, context);
+
+ /*
+ * unlikely but possible: for links other than 0, check if handshake is
+ * already complete by the time this link is probed - if so we force a
+ * state transition since the one issued by the handshake exit actions
+ * will have been ignored.
+ */
+ if (link->id > 0 && atomic_read(&l1_context.boot_sync_done)) {
+ dev_dbg(&sdev->dev,
+ "link %d: boot sync is done, kicking state machine\n",
+ link->id);
+ ipc_sm_kick(IPC_SM_RUN_INIT, context);
+ }
+ return 0;
+}
+
+void modem_protocol_exit(void)
+{
+ int i;
+
+ pr_info("M6718 IPC protocol exit\n");
+ for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
+ ipc_util_link_gpio_unconfig(&l1_context.device_context[i]);
+}
diff --git a/drivers/modem/m6718_spi/queue.c b/drivers/modem/m6718_spi/queue.c
new file mode 100644
index 00000000000..911d538ee82
--- /dev/null
+++ b/drivers/modem/m6718_spi/queue.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010,2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * U9500 <-> M6718 IPC protocol implementation using SPI:
+ * TX queue functionality.
+ */
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include "modem_util.h"
+
+#define FRAME_LENGTH_ALIGN (4)
+#define MAX_FRAME_COUNTER (256)
+
+/* fixed L1 frame size for audio link: 4 byte L2 header + 664 byte L2 payload */
+#define FRAME_SIZE_AUDIO (668)
+
+void ipc_queue_init(struct ipc_link_context *context)
+{
+ spin_lock_init(&context->tx_q_update_lock);
+ atomic_set(&context->tx_q_count, 0);
+ context->tx_q_free = IPC_TX_QUEUE_MAX_SIZE;
+ INIT_LIST_HEAD(&context->tx_q);
+ context->tx_frame_counter = 0;
+}
+
+void ipc_queue_delete_frame(struct ipc_tx_queue *frame)
+{
+ kfree(frame);
+}
+
+struct ipc_tx_queue *ipc_queue_new_frame(struct ipc_link_context *link_context,
+ u32 l2_length)
+{
+ struct ipc_tx_queue *frame;
+ u32 padded_len = l2_length;
+
+ /* audio link frames are always a fixed size */
+ if (link_context->link->id == IPC_LINK_AUDIO) {
+ if (l2_length > FRAME_SIZE_AUDIO) {
+ dev_err(&link_context->sdev->dev,
+ "link %d error: invalid frame size %d "
+ "requested, max is %d\n",
+ link_context->link->id,
+ l2_length,
+ FRAME_SIZE_AUDIO);
+ return NULL;
+ }
+ padded_len = FRAME_SIZE_AUDIO;
+ } else {
+ /* frame length padded to alignment boundary */
+ if (padded_len % FRAME_LENGTH_ALIGN)
+ padded_len += (FRAME_LENGTH_ALIGN -
+ (padded_len % FRAME_LENGTH_ALIGN));
+ }
+
+ dev_dbg(&link_context->sdev->dev,
+ "link %d: new frame: length %d, padded to %d\n",
+ link_context->link->id, l2_length, padded_len);
+
+ frame = kzalloc(sizeof(*frame) + padded_len, GFP_ATOMIC);
+ if (frame == NULL) {
+ dev_err(&link_context->sdev->dev,
+ "link %d error: failed to allocate frame\n",
+ link_context->link->id);
+ return NULL;
+ }
+
+ frame->actual_len = l2_length;
+ frame->len = padded_len;
+ frame->data = frame + 1;
+ return frame;
+}
+
+bool ipc_queue_is_empty(struct ipc_link_context *context)
+{
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&context->tx_q_update_lock, flags);
+ empty = list_empty(&context->tx_q);
+ spin_unlock_irqrestore(&context->tx_q_update_lock, flags);
+
+ return empty;
+}
+
+int ipc_queue_push_frame(struct ipc_link_context *context, u8 channel,
+ u32 length, void *data)
+{
+ u32 l2_hdr;
+ unsigned long flags;
+ struct ipc_tx_queue *frame;
+ int *tx_frame_counter = &context->tx_frame_counter;
+ int qcount;
+
+ /*
+ * Max queue size is only approximate so we allow it to go a few bytes
+ * over the limit
+ */
+ if (context->tx_q_free < length) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: tx queue full, wanted %d free %d\n",
+ context->link->id,
+ length,
+ context->tx_q_free);
+ return -EAGAIN;
+ }
+
+ frame = ipc_queue_new_frame(context, length + IPC_L2_HDR_SIZE);
+ if (frame == NULL)
+ return -ENOMEM;
+
+ /* create l2 header and copy to pdu buffer */
+ l2_hdr = ipc_util_make_l2_header(channel, length);
+ *(u32 *)frame->data = l2_hdr;
+
+ /* copy the l2 sdu into the pdu buffer after the header */
+ memcpy(frame->data + IPC_L2_HDR_SIZE, data, length);
+
+ spin_lock_irqsave(&context->tx_q_update_lock, flags);
+ frame->counter = *tx_frame_counter;
+ *tx_frame_counter = (*tx_frame_counter + 1) % MAX_FRAME_COUNTER;
+ list_add_tail(&frame->node, &context->tx_q);
+ qcount = atomic_add_return(1, &context->tx_q_count);
+ /* tx_q_free could go negative here */
+ context->tx_q_free -= frame->len;
+#ifdef CONFIG_DEBUG_FS
+ context->tx_q_min = min(context->tx_q_free, context->tx_q_min);
+#endif
+ spin_unlock_irqrestore(&context->tx_q_update_lock, flags);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: push tx frame %d: %08x (ch %d len %d), "
+ "new count %d, new free %d\n",
+ context->link->id,
+ frame->counter,
+ l2_hdr,
+ ipc_util_get_l2_channel(l2_hdr),
+ ipc_util_get_l2_length(l2_hdr),
+ qcount,
+ context->tx_q_free);
+ return 0;
+}
+
+struct ipc_tx_queue *ipc_queue_get_frame(struct ipc_link_context *context)
+{
+ unsigned long flags;
+ struct ipc_tx_queue *frame;
+ int qcount;
+
+ spin_lock_irqsave(&context->tx_q_update_lock, flags);
+ frame = list_first_entry(&context->tx_q, struct ipc_tx_queue, node);
+ list_del(&frame->node);
+ qcount = atomic_sub_return(1, &context->tx_q_count);
+ context->tx_q_free += frame->len;
+ spin_unlock_irqrestore(&context->tx_q_update_lock, flags);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: get tx frame %d, new count %d, "
+ "new free %d\n",
+ context->link->id, frame->counter, qcount, context->tx_q_free);
+ return frame;
+}
+
+void ipc_queue_reset(struct ipc_link_context *context)
+{
+ unsigned long flags;
+ struct ipc_tx_queue *frame;
+ int qcount;
+
+ spin_lock_irqsave(&context->tx_q_update_lock, flags);
+ qcount = atomic_read(&context->tx_q_count);
+ while (qcount != 0) {
+ frame = list_first_entry(&context->tx_q,
+ struct ipc_tx_queue, node);
+ list_del(&frame->node);
+ ipc_queue_delete_frame(frame);
+ qcount = atomic_sub_return(1, &context->tx_q_count);
+ }
+ spin_unlock_irqrestore(&context->tx_q_update_lock, flags);
+}
diff --git a/drivers/modem/m6718_spi/statemachine.c b/drivers/modem/m6718_spi/statemachine.c
new file mode 100644
index 00000000000..a956661c3bf
--- /dev/null
+++ b/drivers/modem/m6718_spi/statemachine.c
@@ -0,0 +1,1406 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010,2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * U9500 <-> M6718 IPC protocol implementation using SPI.
+ * state machine definition and functionality.
+ */
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include "modem_statemachine.h"
+#include "modem_util.h"
+#include "modem_netlink.h"
+#include "modem_debug.h"
+#include "modem_queue.h"
+#include "modem_protocol.h"
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+#include "modem_state.h"
+#endif
+
+#define CMD_BOOTREQ (1)
+#define CMD_BOOTRESP (2)
+#define CMD_WRITE (3)
+#define CMD_READ (4)
+
+static u8 sm_init_enter(u8 event, struct ipc_link_context *context)
+{
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+ /* if modem is off un-configure the IPC GPIO pins for low-power */
+ if (modem_state_get_state() == MODEM_STATE_OFF) {
+ dev_info(&context->sdev->dev,
+ "link %d: modem is off, un-configuring GPIO\n",
+ context->link->id);
+ ipc_util_link_gpio_unconfig(context);
+ }
+#endif
+ /* nothing more to do until an event happens */
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_init_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ bool int_active = false;
+
+ /*
+ * For reset event just re-enter init in case the modem has
+ * powered off - we need to reconfigure our GPIO pins
+ */
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_INIT);
+
+ /* re-sample link INT pin */
+ int_active = ipc_util_int_is_active(context);
+ atomic_set(&context->state_int, int_active);
+
+ dev_info(&context->sdev->dev,
+ "link %d: link initialised; SS:INACTIVE(%d) INT:%s(%d)\n",
+ context->link->id,
+ ipc_util_ss_level_inactive(context),
+ int_active ? "ACTIVE" : "INACTIVE",
+ int_active ? ipc_util_int_level_active(context) :
+ ipc_util_int_level_inactive(context));
+
+ /* handshake is only on link 0 */
+ if (context->link->id == 0) {
+ if (!int_active) {
+ dev_info(&context->sdev->dev,
+ "link %d: slave INT signal is inactive\n",
+ context->link->id);
+ /* start boot handshake */
+ return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
+ } else {
+ /* wait for slave INT signal to stabilise inactive */
+ return ipc_sm_state(IPC_SM_WAIT_SLAVE_STABLE);
+ }
+ } else {
+ dev_info(&context->sdev->dev,
+ "link %d: boot sync not needed, going idle\n",
+ context->link->id);
+ return ipc_sm_state(IPC_SM_IDL);
+ }
+}
+
+static const struct ipc_sm_state *sm_init_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ bool int_active = false;
+
+ /*
+ * For reset event just re-enter init in case the modem has
+ * powered off - we need to reconfigure our GPIO pins
+ */
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_INIT_AUD);
+
+ /* re-sample link INT pin */
+ int_active = ipc_util_int_is_active(context);
+ atomic_set(&context->state_int, int_active);
+
+ dev_info(&context->sdev->dev,
+ "link %d: link initialised; SS:INACTIVE(%d) INT:%s(%d)\n",
+ context->link->id,
+ ipc_util_ss_level_inactive(context),
+ int_active ? "ACTIVE" : "INACTIVE",
+ int_active ? ipc_util_int_level_active(context) :
+ ipc_util_int_level_inactive(context));
+ dev_info(&context->sdev->dev,
+ "link %d: boot sync not needed, going idle\n",
+ context->link->id);
+ return ipc_sm_state(IPC_SM_IDL_AUD);
+}
+
+static u8 sm_wait_slave_stable_enter(u8 event, struct ipc_link_context *context)
+{
+ static unsigned long printk_warn_time;
+ if (printk_timed_ratelimit(&printk_warn_time, 60 * 1000))
+ dev_info(&context->sdev->dev,
+ "link %d: waiting for stable inactive slave INT\n",
+ context->link->id);
+ ipc_util_start_slave_stable_timer(context);
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_wait_slave_stable_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (!ipc_util_int_is_active(context)) {
+ dev_info(&context->sdev->dev,
+ "link %d: slave INT signal is stable inactive\n",
+ context->link->id);
+ return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
+ } else {
+ return ipc_sm_state(IPC_SM_WAIT_SLAVE_STABLE);
+ }
+}
+
+static u8 sm_wait_handshake_inactive_enter(u8 event,
+ struct ipc_link_context *context)
+{
+ dev_info(&context->sdev->dev,
+ "link %d: waiting for stable inactive slave INT\n",
+ context->link->id);
+ ipc_util_start_slave_stable_timer(context);
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_wait_handshake_inactive_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ int i;
+
+ if (!ipc_util_int_is_active(context)) {
+ dev_info(&context->sdev->dev,
+ "link %d: slave INT signal is inactive, going idle\n",
+ context->link->id);
+
+ /* modem sync is done */
+ atomic_inc(&l1_context.boot_sync_done);
+ ipc_broadcast_modem_online(context);
+
+ /*
+ * Kick the state machine for any initialised links - skip link0
+ * since this link has just completed handshake
+ */
+ for (i = 1; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
+ if (l1_context.device_context[i].state != NULL) {
+ dev_dbg(&context->sdev->dev,
+ "link %d has already been probed, "
+ "kicking state machine\n", i);
+ ipc_sm_kick(IPC_SM_RUN_INIT,
+ &l1_context.device_context[i]);
+ }
+ return ipc_sm_state(IPC_SM_IDL);
+ } else {
+ return ipc_sm_state(IPC_SM_WAIT_HANDSHAKE_INACTIVE);
+ }
+}
+
+static u8 sm_idl_enter(u8 event, struct ipc_link_context *context)
+{
+ ipc_util_deactivate_ss(context);
+ ipc_dbg_enter_idle(context);
+
+ /* check if tx queue contains items */
+ if (atomic_read(&context->tx_q_count) > 0) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: tx queue contains items\n",
+ context->link->id);
+ return IPC_SM_RUN_TX_REQ;
+ }
+
+ /* check if modem has already requested transaction start */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ }
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: going idle\n", context->link->id);
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_idl_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ ipc_dbg_exit_idle(context);
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else if (event == IPC_SM_RUN_TX_REQ)
+ return ipc_sm_state(IPC_SM_SLW_TX_WR_CMD);
+ else if (event == IPC_SM_RUN_SLAVE_IRQ)
+ return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
+ else
+ return ipc_sm_state(IPC_SM_HALT);
+}
+
+static const struct ipc_sm_state *sm_idl_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ ipc_dbg_exit_idle(context);
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET_AUD);
+
+ /* always transmit data first */
+ return ipc_sm_state(IPC_SM_SLW_TX_WR_DAT_AUD);
+}
+
+static u8 sm_slw_tx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
+{
+ struct ipc_tx_queue *frame;
+
+ /* get the frame from the head of the tx queue */
+ if (ipc_queue_is_empty(context)) {
+ dev_err(&context->sdev->dev,
+ "link %d error: tx queue is empty!\n",
+ context->link->id);
+ return IPC_SM_RUN_ABORT;
+ }
+ frame = ipc_queue_get_frame(context);
+ ipc_dbg_dump_frame(&context->sdev->dev, context->link->id, frame, true);
+
+ context->cmd = ipc_util_make_l1_header(CMD_WRITE, frame->counter,
+ frame->len);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: TX FRAME cmd %08x (type %d counter %d len %d)\n",
+ context->link->id,
+ context->cmd,
+ ipc_util_get_l1_cmd(context->cmd),
+ ipc_util_get_l1_counter(context->cmd),
+ ipc_util_get_l1_length(context->cmd));
+
+ ipc_util_spi_message_prepare(context, &context->cmd,
+ NULL, IPC_L1_HDR_SIZE);
+ context->frame = frame;
+
+ /* slave might already have signalled ready to transmit */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+}
+
+static const struct ipc_sm_state *sm_slw_tx_wr_cmd_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else if (event == IPC_SM_RUN_COMMS_TMO)
+ return ipc_sm_state(IPC_SM_HALT);
+ else
+ return ipc_sm_state(IPC_SM_ACT_TX_WR_CMD);
+}
+
+static u8 sm_act_tx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* slave is ready - start the spi transfer */
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_tx_wr_cmd_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else
+ return ipc_sm_state(IPC_SM_SLW_TX_WR_DAT);
+}
+
+static u8 sm_slw_tx_wr_dat_enter(u8 event, struct ipc_link_context *context)
+{
+ /* prepare to transfer the frame tx data */
+ ipc_util_spi_message_prepare(context, context->frame->data,
+ NULL, context->frame->len);
+
+ /* slave might already have signalled ready to transmit */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+}
+
+static u8 sm_slw_tx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
+{
+ struct ipc_tx_queue *frame = NULL;
+
+ /* check if there is a frame to be sent */
+ if (!ipc_queue_is_empty(context)) {
+ frame = ipc_queue_get_frame(context);
+ } else {
+ /* no frame to send, create an empty one */
+ dev_dbg(&context->sdev->dev,
+ "link %d: no frame to send, allocating dummy\n",
+ context->link->id);
+ frame = ipc_queue_new_frame(context, 0);
+ if (frame == NULL)
+ return IPC_SM_RUN_ABORT;
+ }
+
+ ipc_dbg_dump_frame(&context->sdev->dev, context->link->id, frame, true);
+
+ /* prepare to transfer the frame tx data */
+ context->frame = frame;
+ ipc_util_spi_message_prepare(context, context->frame->data,
+ NULL, context->frame->len);
+
+ /* slave might already have signalled ready to transmit */
+ if (event == IPC_SM_RUN_SLAVE_IRQ || atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+}
+
+static const struct ipc_sm_state *sm_slw_tx_wr_dat_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else if (event == IPC_SM_RUN_COMMS_TMO)
+ return ipc_sm_state(IPC_SM_HALT);
+ else
+ return ipc_sm_state(IPC_SM_ACT_TX_WR_DAT);
+}
+
+static const struct ipc_sm_state *sm_slw_tx_wr_dat_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET_AUD);
+ else if (event == IPC_SM_RUN_COMMS_TMO)
+ return ipc_sm_state(IPC_SM_HALT_AUD);
+ else
+ return ipc_sm_state(IPC_SM_ACT_TX_WR_DAT_AUD);
+}
+
+static u8 sm_act_tx_wr_dat_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* slave is ready - start the spi transfer */
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_tx_wr_dat_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ /* frame is sent, increment link tx counter */
+ context->tx_bytes += context->frame->actual_len;
+#endif
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ {
+ u8 channel;
+
+ channel = ipc_util_get_l2_channel(*(u32 *)context->frame->data);
+ if (ipc_util_channel_is_loopback(channel)) {
+ context->last_frame = context->frame;
+ } else {
+ ipc_queue_delete_frame(context->frame);
+ context->frame = NULL;
+ }
+ }
+#else
+ /* free the sent frame */
+ ipc_queue_delete_frame(context->frame);
+ context->frame = NULL;
+#endif
+ return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
+}
+
+static const struct ipc_sm_state *sm_act_tx_wr_dat_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET_AUD);
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ /* frame is sent, increment link tx counter */
+ context->tx_bytes += context->frame->actual_len;
+#endif
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ {
+ u8 channel;
+
+ channel = ipc_util_get_l2_channel(*(u32 *)context->frame->data);
+ if (ipc_util_channel_is_loopback(channel)) {
+ /* create a copy of the frame */
+ context->last_frame = ipc_queue_new_frame(context,
+ context->frame->actual_len);
+ memcpy(context->last_frame->data,
+ context->frame->data,
+ context->frame->actual_len);
+ }
+ }
+#endif
+ return ipc_sm_state(IPC_SM_SLW_RX_WR_DAT_AUD);
+}
+
+static u8 sm_slw_tx_rd_cmd_enter(u8 event, struct ipc_link_context *context)
+{
+ context->cmd = ipc_util_make_l1_header(CMD_READ, 0, 0);
+ dev_dbg(&context->sdev->dev,
+ "link %d: cmd %08x (type %d)\n",
+ context->link->id,
+ context->cmd,
+ ipc_util_get_l1_cmd(context->cmd));
+
+ /* prepare the spi message to transfer */
+ ipc_util_spi_message_prepare(context, &context->cmd,
+ NULL, IPC_L1_HDR_SIZE);
+
+ /* check if the slave requested this transaction */
+ if (event == IPC_SM_RUN_SLAVE_IRQ) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave initiated transaction, continue\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ /* slave might already have signalled ready to transmit */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+ }
+}
+
+static const struct ipc_sm_state *sm_slw_tx_rd_cmd_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else if (event == IPC_SM_RUN_COMMS_TMO)
+ return ipc_sm_state(IPC_SM_HALT);
+ else
+ return ipc_sm_state(IPC_SM_ACT_TX_RD_CMD);
+}
+
+static u8 sm_act_tx_rd_cmd_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* slave is ready - start the spi transfer */
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_tx_rd_cmd_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else
+ return ipc_sm_state(IPC_SM_SLW_RX_WR_CMD);
+}
+
+static u8 sm_slw_rx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
+{
+ /* prepare to receive MESSAGE WRITE frame header */
+ ipc_util_spi_message_prepare(context, NULL,
+ &context->cmd, IPC_L1_HDR_SIZE);
+
+ /* slave might already have signalled ready to transmit */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+}
+
+static const struct ipc_sm_state *sm_slw_rx_wr_cmd_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+ else if (event == IPC_SM_RUN_COMMS_TMO)
+ return ipc_sm_state(IPC_SM_HALT);
+ else
+ return ipc_sm_state(IPC_SM_ACT_RX_WR_CMD);
+}
+
+static u8 sm_act_rx_wr_cmd_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* slave is ready - start the spi transfer */
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_rx_wr_cmd_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ u8 cmd_type = ipc_util_get_l1_cmd(context->cmd);
+ int counter = ipc_util_get_l1_counter(context->cmd);
+ int length = ipc_util_get_l1_length(context->cmd);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: RX HEADER %08x (type %d counter %d length %d)\n",
+ context->link->id,
+ context->cmd,
+ cmd_type,
+ counter,
+ length);
+
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+
+ if (cmd_type == CMD_WRITE) {
+ /* slave has data to send - allocate a frame to hold it */
+ context->frame = ipc_queue_new_frame(context, length);
+ if (context->frame == NULL)
+ return ipc_sm_state(IPC_SM_IDL);
+
+ context->frame->counter = counter;
+ ipc_util_spi_message_prepare(context, NULL,
+ context->frame->data, context->frame->len);
+ return ipc_sm_state(IPC_SM_ACT_RX_WR_DAT);
+ } else {
+ if (cmd_type != 0)
+ dev_err(&context->sdev->dev,
+ "link %d error: received invalid frame type %x "
+ "(%08x)! assuming TRANSACTION_END...\n",
+ context->link->id,
+ cmd_type,
+ context->cmd);
+
+ /* slave has no data to send */
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has no data to send\n",
+ context->link->id);
+ return ipc_sm_state(IPC_SM_IDL);
+ }
+}
+
+static u8 sm_slw_rx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
+{
+ /*
+ * We're using the same frame buffer we just sent, so no need for a
+ * new allocation here, just prepare the spi message
+ */
+ ipc_util_spi_message_prepare(context, NULL,
+ context->frame->data, context->frame->len);
+
+ /* slave might already have signalled ready to transmit */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+}
+
+static const struct ipc_sm_state *sm_slw_rx_wr_dat_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET_AUD);
+ else if (event == IPC_SM_RUN_COMMS_TMO)
+ return ipc_sm_state(IPC_SM_HALT_AUD);
+ else
+ return ipc_sm_state(IPC_SM_ACT_RX_WR_DAT_AUD);
+}
+
+static u8 sm_act_rx_wr_dat_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* assume slave is still ready - prepare and start the spi transfer */
+ ipc_util_spi_message_prepare(context, NULL,
+ context->frame->data, context->frame->len);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static u8 sm_act_rx_wr_dat_aud_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_rx_wr_dat_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ u32 frame_hdr;
+ unsigned char l2_header;
+ unsigned int l2_length;
+ u8 *l2_data;
+
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: RX PAYLOAD %d bytes\n",
+ context->link->id, context->frame->len);
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ /* frame is received, increment link rx counter */
+ context->rx_bytes += context->frame->len;
+#endif
+ /* decode L2 header */
+ frame_hdr = *(u32 *)context->frame->data;
+ l2_header = ipc_util_get_l2_channel(frame_hdr);
+ l2_length = ipc_util_get_l2_length(frame_hdr);
+ l2_data = (u8 *)context->frame->data + IPC_L2_HDR_SIZE;
+
+ context->frame->actual_len = l2_length + IPC_L2_HDR_SIZE;
+ ipc_dbg_dump_frame(&context->sdev->dev, context->link->id,
+ context->frame, false);
+
+ if (l2_length > (context->frame->len - 4)) {
+ dev_err(&context->sdev->dev,
+ "link %d: suspicious frame: L1 len %d L2 len %d\n",
+ context->link->id, context->frame->len, l2_length);
+ }
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: L2 PDU decode: header 0x%08x channel %d length %d "
+ "data[%02x%02x%02x...]\n",
+ context->link->id, frame_hdr, l2_header, l2_length,
+ l2_data[0], l2_data[1], l2_data[2]);
+
+ if (ipc_util_channel_is_loopback(l2_header))
+ ipc_dbg_verify_rx_frame(context);
+
+ /* pass received frame up to L2mux layer */
+ if (!modem_protocol_channel_is_open(l2_header)) {
+ dev_err(&context->sdev->dev,
+ "link %d error: received frame on invalid channel %d, "
+ "frame discarded\n",
+ context->link->id, l2_header);
+ } else {
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ /*
+ * Discard loopback frames if we are taking throughput
+ * measurements - we'll be loading the links and so will likely
+ * overload the buffers.
+ */
+ if (!ipc_util_channel_is_loopback(l2_header))
+#endif
+ modem_m6718_spi_receive(context->sdev,
+ l2_header, l2_length, l2_data);
+ }
+
+ /* data is copied by L2mux so free the frame here */
+ ipc_queue_delete_frame(context->frame);
+ context->frame = NULL;
+
+ /* check tx queue for content */
+ if (!ipc_queue_is_empty(context)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: tx queue not empty\n", context->link->id);
+ return ipc_sm_state(IPC_SM_SLW_TX_WR_CMD);
+ } else {
+ dev_dbg(&context->sdev->dev,
+ "link %d: tx queue empty\n", context->link->id);
+ return ipc_sm_state(IPC_SM_SLW_TX_RD_CMD);
+ }
+}
+
+static const struct ipc_sm_state *sm_act_rx_wr_dat_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ u32 frame_hdr;
+ unsigned char l2_header;
+ unsigned int l2_length;
+ u8 *l2_data;
+
+ if (event == IPC_SM_RUN_RESET)
+ return ipc_sm_state(IPC_SM_RESET_AUD);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: RX PAYLOAD %d bytes\n",
+ context->link->id, context->frame->len);
+
+ /* decode L2 header */
+ frame_hdr = *(u32 *)context->frame->data;
+ l2_header = ipc_util_get_l2_channel(frame_hdr);
+ l2_length = ipc_util_get_l2_length(frame_hdr);
+ l2_data = (u8 *)context->frame->data + IPC_L2_HDR_SIZE;
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ /* frame is received, increment link rx counter */
+ context->rx_bytes += l2_length;
+#endif
+ if (frame_hdr != 0)
+ context->frame->actual_len = l2_length + IPC_L2_HDR_SIZE;
+ else
+ context->frame->actual_len = 0;
+ ipc_dbg_dump_frame(&context->sdev->dev, context->link->id,
+ context->frame, false);
+
+ if (l2_length > (context->frame->len - 4))
+ dev_err(&context->sdev->dev,
+ "link %d: suspicious frame: L1 len %d L2 len %d\n",
+ context->link->id, context->frame->len, l2_length);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: L2 PDU decode: header 0x%08x channel %d length %d "
+ "data[%02x%02x%02x...]\n",
+ context->link->id, frame_hdr, l2_header, l2_length,
+ l2_data[0], l2_data[1], l2_data[2]);
+
+ if (ipc_util_channel_is_loopback(l2_header))
+ ipc_dbg_verify_rx_frame(context);
+
+ /* did the slave actually have anything to send? */
+ if (frame_hdr != 0) {
+ /* pass received frame up to L2mux layer */
+ if (!modem_protocol_channel_is_open(l2_header)) {
+ dev_err(&context->sdev->dev,
+ "link %d error: received frame on invalid "
+ "channel %d, frame discarded\n",
+ context->link->id, l2_header);
+ } else {
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_THROUGHPUT_MEASUREMENT
+ /*
+ * Discard loopback frames if we are taking throughput
+ * measurements - we'll be loading the links and so will
+ * likely overload the buffers.
+ */
+ if (!ipc_util_channel_is_loopback(l2_header))
+#endif
+ modem_m6718_spi_receive(context->sdev,
+ l2_header, l2_length, l2_data);
+ }
+ } else {
+ dev_dbg(&context->sdev->dev,
+ "link %d: received dummy frame, discarding\n",
+ context->link->id);
+ }
+
+ /* data is copied by L2mux so free the frame here */
+ ipc_queue_delete_frame(context->frame);
+ context->frame = NULL;
+
+ /* audio link goes idle ready for next transaction */
+ return ipc_sm_state(IPC_SM_IDL_AUD);
+}
+
+static u8 sm_halt_enter(u8 event, struct ipc_link_context *context)
+{
+ dev_err(&context->sdev->dev,
+ "link %d error: HALTED\n", context->link->id);
+
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_MODEM_STATE
+ /*
+ * Force modem reset, this will cause a reset event from the modemstate
+ * driver which will reset the links. If debugfs is enabled then there
+ * is a userspace file which controls whether MSR is enabled or not.
+ */
+#ifdef CONFIG_DEBUG_FS
+ if (l1_context.msr_disable) {
+ dev_info(&context->sdev->dev,
+ "link %d: MSR is disabled by user, "
+ "not requesting modem reset\n", context->link->id);
+ return IPC_SM_RUN_RESET;
+ }
+#endif
+ modem_state_force_reset();
+#endif
+ return IPC_SM_RUN_RESET;
+}
+
+static const struct ipc_sm_state *sm_halt_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ return ipc_sm_state(IPC_SM_RESET);
+}
+
+static const struct ipc_sm_state *sm_halt_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ return ipc_sm_state(IPC_SM_RESET_AUD);
+}
+
+static u8 sm_reset_enter(u8 event, struct ipc_link_context *context)
+{
+ dev_err(&context->sdev->dev,
+ "link %d resetting\n", context->link->id);
+
+ if (context->link->id == 0)
+ ipc_broadcast_modem_reset(context);
+
+ ipc_util_deactivate_ss(context);
+ ipc_queue_reset(context);
+ if (context->frame != NULL) {
+ ipc_queue_delete_frame(context->frame);
+ context->frame = NULL;
+ }
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_VERIFY_FRAMES
+ if (context->last_frame != NULL) {
+ ipc_queue_delete_frame(context->last_frame);
+ context->last_frame = NULL;
+ }
+#endif
+ dev_dbg(&context->sdev->dev,
+ "link %d reset completed\n", context->link->id);
+
+ return IPC_SM_RUN_RESET;
+}
+
+static const struct ipc_sm_state *sm_reset_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ return ipc_sm_state(IPC_SM_INIT);
+}
+
+static const struct ipc_sm_state *sm_reset_aud_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ return ipc_sm_state(IPC_SM_INIT_AUD);
+}
+
+static u8 sm_slw_tx_bootreq_enter(u8 event, struct ipc_link_context *context)
+{
+ dev_info(&context->sdev->dev,
+ "link %d: waiting for boot sync\n", context->link->id);
+
+ ipc_util_activate_ss(context);
+ context->cmd = ipc_util_make_l1_header(CMD_BOOTREQ, 0,
+ IPC_DRIVER_VERSION);
+ dev_dbg(&context->sdev->dev,
+ "link %d: TX HEADER cmd %08x (type %x)\n",
+ context->link->id,
+ context->cmd,
+ ipc_util_get_l1_cmd(context->cmd));
+ ipc_util_spi_message_prepare(context, &context->cmd,
+ NULL, IPC_L1_HDR_SIZE);
+
+ /* wait now for the slave to indicate ready... */
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_slw_tx_bootreq_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ return ipc_sm_state(IPC_SM_ACT_TX_BOOTREQ);
+}
+
+static u8 sm_act_tx_bootreq_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* slave is ready - start the spi transfer */
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_tx_bootreq_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ return ipc_sm_state(IPC_SM_SLW_RX_BOOTRESP);
+}
+
+static u8 sm_slw_rx_bootresp_enter(u8 event, struct ipc_link_context *context)
+{
+ /* prepare to receive BOOTRESP frame header */
+ ipc_util_spi_message_prepare(context, NULL,
+ &context->cmd, IPC_L1_HDR_SIZE);
+
+ /* slave might already have signalled ready to transmit */
+ if (atomic_read(&context->state_int)) {
+ dev_dbg(&context->sdev->dev,
+ "link %d: slave has already signalled ready\n",
+ context->link->id);
+ ipc_util_activate_ss(context);
+ return IPC_SM_RUN_SLAVE_IRQ;
+ } else {
+ ipc_util_activate_ss_with_tmo(context);
+ return IPC_SM_RUN_NONE;
+ }
+}
+
+static const struct ipc_sm_state *sm_slw_rx_bootresp_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ if (event == IPC_SM_RUN_COMMS_TMO) {
+ /*
+ * Modem timeout: was it really ready or just noise?
+ * Revert to waiting for handshake to start.
+ */
+ ipc_util_deactivate_ss(context);
+ return ipc_sm_state(IPC_SM_SLW_TX_BOOTREQ);
+ } else {
+ return ipc_sm_state(IPC_SM_ACT_RX_BOOTRESP);
+ }
+}
+
+static u8 sm_act_rx_bootresp_enter(u8 event, struct ipc_link_context *context)
+{
+ int err;
+
+ /* slave is ready - start the spi transfer */
+ dev_dbg(&context->sdev->dev,
+ "link %d: starting spi tfr\n", context->link->id);
+ err = spi_async(context->sdev, &context->spi_message);
+ if (err < 0) {
+ dev_err(&context->sdev->dev,
+ "link %d error: spi tfr start failed, error %d\n",
+ context->link->id, err);
+ return IPC_SM_RUN_ABORT;
+ }
+ return IPC_SM_RUN_NONE;
+}
+
+static const struct ipc_sm_state *sm_act_rx_bootresp_exit(u8 event,
+ struct ipc_link_context *context)
+{
+ u8 cmd_type = ipc_util_get_l1_cmd(context->cmd);
+ u8 modem_ver;
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: RX HEADER %08x (type %d)\n",
+ context->link->id, context->cmd, cmd_type);
+
+ if (cmd_type == CMD_BOOTRESP) {
+ modem_ver = ipc_util_get_l1_bootresp_ver(context->cmd);
+
+ dev_info(&context->sdev->dev,
+ "link %d: boot sync done; "
+ "APE version %02x, MODEM version %02x\n",
+ context->link->id, IPC_DRIVER_VERSION, modem_ver);
+
+ /* check for minimum required modem version */
+ if (modem_ver < IPC_DRIVER_MODEM_MIN_VER) {
+ dev_warn(&context->sdev->dev,
+ "link %d warning: modem version mismatch! "
+ "minimum required version is %02x\n",
+ context->link->id,
+ IPC_DRIVER_MODEM_MIN_VER);
+ }
+
+ return ipc_sm_state(IPC_SM_WAIT_HANDSHAKE_INACTIVE);
+ } else {
+ /* invalid response... this is not our slave */
+ dev_err(&context->sdev->dev,
+ "link %d error: expected %x (BOOTRESP), received %x.\n",
+ context->link->id,
+ CMD_BOOTRESP,
+ cmd_type);
+ return ipc_sm_state(IPC_SM_HALT);
+ }
+}
+
+/* the driver protocol state machine */
+static const struct ipc_sm_state state_machine[IPC_SM_STATE_ID_NBR] = {
+ [IPC_SM_INIT] = {
+ .id = IPC_SM_INIT,
+ .enter = sm_init_enter,
+ .exit = sm_init_exit,
+ .events = IPC_SM_RUN_INIT | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_HALT] = {
+ .id = IPC_SM_HALT,
+ .enter = sm_halt_enter,
+ .exit = sm_halt_exit,
+ .events = IPC_SM_RUN_RESET
+ },
+ [IPC_SM_RESET] = {
+ .id = IPC_SM_RESET,
+ .enter = sm_reset_enter,
+ .exit = sm_reset_exit,
+ .events = IPC_SM_RUN_RESET
+ },
+ [IPC_SM_WAIT_SLAVE_STABLE] = {
+ .id = IPC_SM_WAIT_SLAVE_STABLE,
+ .enter = sm_wait_slave_stable_enter,
+ .exit = sm_wait_slave_stable_exit,
+ .events = IPC_SM_RUN_STABLE_TMO
+ },
+ [IPC_SM_WAIT_HANDSHAKE_INACTIVE] = {
+ .id = IPC_SM_WAIT_HANDSHAKE_INACTIVE,
+ .enter = sm_wait_handshake_inactive_enter,
+ .exit = sm_wait_handshake_inactive_exit,
+ .events = IPC_SM_RUN_STABLE_TMO
+ },
+ [IPC_SM_SLW_TX_BOOTREQ] = {
+ .id = IPC_SM_SLW_TX_BOOTREQ,
+ .enter = sm_slw_tx_bootreq_enter,
+ .exit = sm_slw_tx_bootreq_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ
+ },
+ [IPC_SM_ACT_TX_BOOTREQ] = {
+ .id = IPC_SM_ACT_TX_BOOTREQ,
+ .enter = sm_act_tx_bootreq_enter,
+ .exit = sm_act_tx_bootreq_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE
+ },
+ [IPC_SM_SLW_RX_BOOTRESP] = {
+ .id = IPC_SM_SLW_RX_BOOTRESP,
+ .enter = sm_slw_rx_bootresp_enter,
+ .exit = sm_slw_rx_bootresp_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO
+ },
+ [IPC_SM_ACT_RX_BOOTRESP] = {
+ .id = IPC_SM_ACT_RX_BOOTRESP,
+ .enter = sm_act_rx_bootresp_enter,
+ .exit = sm_act_rx_bootresp_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE
+ },
+ [IPC_SM_IDL] = {
+ .id = IPC_SM_IDL,
+ .enter = sm_idl_enter,
+ .exit = sm_idl_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_TX_REQ |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_SLW_TX_WR_CMD] = {
+ .id = IPC_SM_SLW_TX_WR_CMD,
+ .enter = sm_slw_tx_wr_cmd_enter,
+ .exit = sm_slw_tx_wr_cmd_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_TX_WR_CMD] = {
+ .id = IPC_SM_ACT_TX_WR_CMD,
+ .enter = sm_act_tx_wr_cmd_enter,
+ .exit = sm_act_tx_wr_cmd_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_SLW_TX_WR_DAT] = {
+ .id = IPC_SM_SLW_TX_WR_DAT,
+ .enter = sm_slw_tx_wr_dat_enter,
+ .exit = sm_slw_tx_wr_dat_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_TX_WR_DAT] = {
+ .id = IPC_SM_ACT_TX_WR_DAT,
+ .enter = sm_act_tx_wr_dat_enter,
+ .exit = sm_act_tx_wr_dat_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_SLW_TX_RD_CMD] = {
+ .id = IPC_SM_SLW_TX_RD_CMD,
+ .enter = sm_slw_tx_rd_cmd_enter,
+ .exit = sm_slw_tx_rd_cmd_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_TX_RD_CMD] = {
+ .id = IPC_SM_ACT_TX_RD_CMD,
+ .enter = sm_act_tx_rd_cmd_enter,
+ .exit = sm_act_tx_rd_cmd_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_SLW_RX_WR_CMD] = {
+ .id = IPC_SM_SLW_RX_WR_CMD,
+ .enter = sm_slw_rx_wr_cmd_enter,
+ .exit = sm_slw_rx_wr_cmd_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_RX_WR_CMD] = {
+ .id = IPC_SM_ACT_RX_WR_CMD,
+ .enter = sm_act_rx_wr_cmd_enter,
+ .exit = sm_act_rx_wr_cmd_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_RX_WR_DAT] = {
+ .id = IPC_SM_ACT_RX_WR_DAT,
+ .enter = sm_act_rx_wr_dat_enter,
+ .exit = sm_act_rx_wr_dat_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ },
+ /* audio link states below */
+ [IPC_SM_INIT_AUD] = {
+ .id = IPC_SM_INIT_AUD,
+ .enter = sm_init_enter,
+ .exit = sm_init_aud_exit,
+ .events = IPC_SM_RUN_INIT | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_HALT_AUD] = {
+ .id = IPC_SM_HALT_AUD,
+ .enter = sm_halt_enter,
+ .exit = sm_halt_aud_exit,
+ .events = IPC_SM_RUN_RESET
+ },
+ [IPC_SM_RESET_AUD] = {
+ .id = IPC_SM_RESET_AUD,
+ .enter = sm_reset_enter,
+ .exit = sm_reset_aud_exit,
+ .events = IPC_SM_RUN_RESET
+ },
+ [IPC_SM_IDL_AUD] = {
+ .id = IPC_SM_IDL_AUD,
+ .enter = sm_idl_enter,
+ .exit = sm_idl_aud_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_TX_REQ |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_SLW_TX_WR_DAT_AUD] = {
+ .id = IPC_SM_SLW_TX_WR_DAT_AUD,
+ .enter = sm_slw_tx_wr_dat_aud_enter,
+ .exit = sm_slw_tx_wr_dat_aud_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_TX_WR_DAT_AUD] = {
+ .id = IPC_SM_ACT_TX_WR_DAT_AUD,
+ .enter = sm_act_tx_wr_dat_enter,
+ .exit = sm_act_tx_wr_dat_aud_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ },
+ [IPC_SM_SLW_RX_WR_DAT_AUD] = {
+ .id = IPC_SM_SLW_RX_WR_DAT_AUD,
+ .enter = sm_slw_rx_wr_dat_aud_enter,
+ .exit = sm_slw_rx_wr_dat_aud_exit,
+ .events = IPC_SM_RUN_SLAVE_IRQ | IPC_SM_RUN_COMMS_TMO |
+ IPC_SM_RUN_RESET
+ },
+ [IPC_SM_ACT_RX_WR_DAT_AUD] = {
+ .id = IPC_SM_ACT_RX_WR_DAT_AUD,
+ .enter = sm_act_rx_wr_dat_aud_enter,
+ .exit = sm_act_rx_wr_dat_aud_exit,
+ .events = IPC_SM_RUN_TFR_COMPLETE | IPC_SM_RUN_RESET
+ }
+};
+
+
+const struct ipc_sm_state *ipc_sm_idle_state(struct ipc_link_context *context)
+{
+ if (context->link->id == IPC_LINK_AUDIO)
+ return ipc_sm_state(IPC_SM_IDL_AUD);
+ else
+ return ipc_sm_state(IPC_SM_IDL);
+}
+
+const struct ipc_sm_state *ipc_sm_init_state(struct ipc_link_context *context)
+{
+ if (context->link->id == IPC_LINK_AUDIO)
+ return ipc_sm_state(IPC_SM_INIT_AUD);
+ else
+ return ipc_sm_state(IPC_SM_INIT);
+}
+
+const struct ipc_sm_state *ipc_sm_state(u8 id)
+{
+ BUG_ON(id >= IPC_SM_STATE_ID_NBR);
+ return &state_machine[id];
+}
+
+bool ipc_sm_valid_for_state(u8 event, const struct ipc_sm_state *state)
+{
+ return (state->events & event) == event;
+}
+
+static void state_machine_run(struct ipc_link_context *context, u8 event)
+{
+ struct modem_m6718_spi_link_platform_data *link = context->link;
+ struct spi_device *sdev = context->sdev;
+ const struct ipc_sm_state *cur_state = context->state;
+
+ /* some sanity checking */
+ if (context == NULL || link == NULL || cur_state == NULL) {
+ pr_err("M6718 IPC protocol error: "
+ "inconsistent driver state, ignoring event\n");
+ return;
+ }
+
+ dev_dbg(&sdev->dev, "link %d: RUNNING in %s (%s)\n", link->id,
+ ipc_dbg_state_id(cur_state), ipc_dbg_event(event));
+
+ /* valid trigger event for current state? */
+ if (!ipc_sm_valid_for_state(event, cur_state)) {
+ dev_dbg(&sdev->dev,
+ "link %d: ignoring invalid event\n", link->id);
+ ipc_dbg_ignoring_event(context, event);
+ return;
+ }
+ ipc_dbg_handling_event(context, event);
+
+ /* run machine while state entry functions trigger new changes */
+ do {
+ if (event == IPC_SM_RUN_SLAVE_IRQ &&
+ !ipc_util_int_is_active(context)) {
+ dev_err(&sdev->dev,
+ "link %d error: slave is not ready! (%s)",
+ link->id,
+ ipc_dbg_state_id(cur_state));
+ }
+
+ if (event == IPC_SM_RUN_ABORT) {
+ dev_err(&sdev->dev,
+ "link %d error: abort event\n", link->id);
+ /* reset state to idle */
+ context->state = ipc_sm_idle_state(context);
+ break;
+ } else {
+ /* exit current state */
+ dev_dbg(&sdev->dev, "link %d: exit %s (%s)\n",
+ link->id, ipc_dbg_state_id(cur_state),
+ ipc_dbg_event(event));
+ cur_state = cur_state->exit(event, context);
+ context->state = cur_state;
+ }
+
+ /* reset state of slave irq to prepare for next event */
+ if (event == IPC_SM_RUN_SLAVE_IRQ)
+ atomic_set(&context->state_int, 0);
+
+ /* enter new state */
+ dev_dbg(&sdev->dev, "link %d: enter %s (%s)\n", link->id,
+ ipc_dbg_state_id(cur_state), ipc_dbg_event(event));
+ event = context->state->enter(event, context);
+ ipc_dbg_entering_state(context);
+ } while (event != IPC_SM_RUN_NONE);
+
+ dev_dbg(&sdev->dev, "link %d: STOPPED in %s\n", link->id,
+ ipc_dbg_state_id(cur_state));
+}
+
+void ipc_sm_kick(u8 event, struct ipc_link_context *context)
+{
+ unsigned long flags;
+ struct modem_m6718_spi_link_platform_data *link = context->link;
+ struct spi_device *sdev = context->sdev;
+ struct spi_message *msg = &context->spi_message;
+ u8 i;
+
+ spin_lock_irqsave(&context->sm_lock, flags);
+ switch (event) {
+ case IPC_SM_RUN_SLAVE_IRQ:
+ dev_dbg(&sdev->dev,
+ "link %d EVENT: slave-ready irq\n", link->id);
+ del_timer(&context->comms_timer);
+ atomic_set(&context->state_int,
+ ipc_util_int_is_active(context));
+ break;
+
+ case IPC_SM_RUN_TFR_COMPLETE:
+ dev_dbg(&sdev->dev,
+ "link %d EVENT: spi tfr complete (status %d len %d)\n",
+ link->id, msg->status, msg->actual_length);
+ ipc_dbg_dump_spi_tfr(context);
+ break;
+
+ case IPC_SM_RUN_COMMS_TMO:
+ {
+ char *statestr;
+ struct ipc_link_context *contexts = l1_context.device_context;
+
+ statestr = ipc_dbg_link_state_str(context);
+ dev_err(&sdev->dev,
+ "link %d EVENT: modem comms timeout (%s)!\n",
+ link->id, ipc_dbg_state_id(context->state));
+ if (statestr != NULL) {
+ dev_err(&sdev->dev, "%s", statestr);
+ kfree(statestr);
+ }
+
+ /* cancel all link timeout timers except this one */
+ for (i = 0; i < IPC_NBR_SUPPORTED_SPI_LINKS; i++)
+ if (contexts[i].link->id != link->id)
+ del_timer(&contexts[i].comms_timer);
+ break;
+ }
+
+ case IPC_SM_RUN_STABLE_TMO:
+ dev_dbg(&sdev->dev,
+ "link %d EVENT: slave-stable timeout\n", link->id);
+ break;
+
+ case IPC_SM_RUN_RESET:
+ dev_dbg(&sdev->dev,
+ "link %d EVENT: reset\n", link->id);
+ del_timer(&context->comms_timer);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!ipc_util_link_is_suspended(context))
+ state_machine_run(context, event);
+ else
+ dev_dbg(&sdev->dev,
+ "link %d is suspended, waiting for resume\n", link->id);
+ spin_unlock_irqrestore(&context->sm_lock, flags);
+}
+
diff --git a/drivers/modem/m6718_spi/util.c b/drivers/modem/m6718_spi/util.c
new file mode 100644
index 00000000000..9c89eb9b34a
--- /dev/null
+++ b/drivers/modem/m6718_spi/util.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010,2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * U9500 <-> M6718 IPC protocol implementation using SPI:
+ * utility functions.
+ */
+#include <linux/gpio.h>
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include "modem_util.h"
+
+#define MODEM_COMMS_TMO_MS (5000) /* 0 == no timeout */
+#define SLAVE_STABLE_TMO_MS (1000)
+
+#define DRIVER_NAME "ipcspi" /* name used when reserving gpio pins */
+
+
+bool ipc_util_channel_is_loopback(u8 channel)
+{
+ return channel == MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0 ||
+ channel == MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1;
+}
+
+u32 ipc_util_make_l2_header(u8 channel, u32 len)
+{
+ return ((channel & 0xf) << 28) | (len & 0x000fffff);
+}
+
+u8 ipc_util_get_l2_channel(u32 hdr)
+{
+ return hdr >> 28;
+}
+
+u32 ipc_util_get_l2_length(u32 hdr)
+{
+ return hdr & 0x000fffff;
+}
+
+u32 ipc_util_make_l1_header(u8 cmd, u8 counter, u32 len)
+{
+ return (cmd << 28) |
+ ((counter & 0x000000ff) << 20) |
+ (len & 0x000fffff);
+}
+
+u8 ipc_util_get_l1_cmd(u32 hdr)
+{
+ return hdr >> 28;
+}
+
+u8 ipc_util_get_l1_counter(u32 hdr)
+{
+ return (hdr >> 20) & 0x000000ff;
+}
+
+u32 ipc_util_get_l1_length(u32 hdr)
+{
+ return hdr & 0x000fffff;
+}
+
+u8 ipc_util_get_l1_bootresp_ver(u32 bootresp)
+{
+ return bootresp & 0x000000ff;
+}
+
+int ipc_util_ss_level_active(struct ipc_link_context *context)
+{
+ return context->link->gpio.ss_active == 0 ? 0 : 1;
+}
+
+int ipc_util_ss_level_inactive(struct ipc_link_context *context)
+{
+ return !ipc_util_ss_level_active(context);
+}
+
+int ipc_util_int_level_active(struct ipc_link_context *context)
+{
+ return context->link->gpio.int_active == 0 ? 0 : 1;
+}
+
+int ipc_util_int_level_inactive(struct ipc_link_context *context)
+{
+ return !ipc_util_int_level_active(context);
+}
+
+void ipc_util_deactivate_ss(struct ipc_link_context *context)
+{
+ gpio_set_value(context->link->gpio.ss_pin,
+ ipc_util_ss_level_inactive(context));
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: deactivated SS\n", context->link->id);
+}
+
+void ipc_util_activate_ss(struct ipc_link_context *context)
+{
+ gpio_set_value(context->link->gpio.ss_pin,
+ ipc_util_ss_level_active(context));
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: activated SS\n", context->link->id);
+}
+
+void ipc_util_activate_ss_with_tmo(struct ipc_link_context *context)
+{
+ gpio_set_value(context->link->gpio.ss_pin,
+ ipc_util_ss_level_active(context));
+
+#if MODEM_COMMS_TMO_MS == 0
+ dev_dbg(&context->sdev->dev,
+ "link %d: activated SS (timeout is disabled)\n",
+ context->link->id);
+#else
+ context->comms_timer.expires = jiffies +
+ ((MODEM_COMMS_TMO_MS * HZ) / 1000);
+ add_timer(&context->comms_timer);
+
+ dev_dbg(&context->sdev->dev,
+ "link %d: activated SS with timeout\n", context->link->id);
+#endif
+}
+
+bool ipc_util_int_is_active(struct ipc_link_context *context)
+{
+ return gpio_get_value(context->link->gpio.int_pin) ==
+ ipc_util_int_level_active(context);
+}
+
+bool ipc_util_link_is_idle(struct ipc_link_context *context)
+{
+ if (context->state == NULL)
+ return false;
+
+ switch (context->state->id) {
+ case IPC_SM_IDL:
+ case IPC_SM_IDL_AUD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ipc_util_start_slave_stable_timer(struct ipc_link_context *context)
+{
+ context->slave_stable_timer.expires =
+ jiffies + ((SLAVE_STABLE_TMO_MS * HZ) / 1000);
+ add_timer(&context->slave_stable_timer);
+}
+
+void ipc_util_spi_message_prepare(struct ipc_link_context *link_context,
+ void *tx_buf, void *rx_buf, int len)
+{
+ struct spi_transfer *tfr = &link_context->spi_transfer;
+ struct spi_message *msg = &link_context->spi_message;
+
+ tfr->tx_buf = tx_buf;
+ tfr->rx_buf = rx_buf;
+ tfr->len = len;
+ msg->context = link_context;
+}
+
+void ipc_util_spi_message_init(struct ipc_link_context *link_context,
+ void (*complete)(void *))
+{
+ struct spi_message *msg = &link_context->spi_message;
+ struct spi_transfer *tfr = &link_context->spi_transfer;
+
+ tfr->bits_per_word = 16;
+
+ /* common init of transfer - use default from board device */
+ tfr->cs_change = 0;
+ tfr->speed_hz = 0;
+ tfr->delay_usecs = 0;
+
+ /* common init of message */
+ spi_message_init(msg);
+ msg->spi = link_context->sdev;
+ msg->complete = complete;
+ spi_message_add_tail(tfr, msg);
+}
+
+bool ipc_util_link_gpio_request(struct ipc_link_context *context,
+ irqreturn_t (*irqhnd)(int, void*))
+{
+ struct spi_device *sdev = context->sdev;
+ struct modem_m6718_spi_link_platform_data *link = context->link;
+ unsigned long irqflags;
+
+ if (gpio_request(link->gpio.ss_pin, DRIVER_NAME) < 0) {
+ dev_err(&sdev->dev,
+ "link %d error: failed to get gpio %d for SS pin\n",
+ link->id,
+ link->gpio.ss_pin);
+ return false;
+ }
+ if (gpio_request(link->gpio.int_pin, DRIVER_NAME) < 0) {
+ dev_err(&sdev->dev,
+ "link %d error: failed to get gpio %d for INT pin\n",
+ link->id,
+ link->gpio.int_pin);
+ return false;
+ }
+
+ if (ipc_util_int_level_active(context) == 1)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_FALLING;
+
+ if (request_irq(GPIO_TO_IRQ(link->gpio.int_pin),
+ irqhnd,
+ irqflags,
+ DRIVER_NAME,
+ context) < 0) {
+ dev_err(&sdev->dev,
+ "link %d error: could not get irq %d\n",
+ link->id, GPIO_TO_IRQ(link->gpio.int_pin));
+ return false;
+ }
+ return true;
+}
+
+bool ipc_util_link_gpio_config(struct ipc_link_context *context)
+{
+ struct spi_device *sdev = context->sdev;
+ struct modem_m6718_spi_link_platform_data *link = context->link;
+
+ if (atomic_read(&context->gpio_configured) == 1)
+ return true;
+
+ dev_dbg(&sdev->dev, "link %d: configuring GPIO\n", link->id);
+
+ ipc_util_deactivate_ss(context);
+ gpio_direction_input(link->gpio.int_pin);
+ if (enable_irq_wake(GPIO_TO_IRQ(link->gpio.int_pin)) < 0) {
+ dev_err(&sdev->dev,
+ "link %d error: failed to enable wake on INT\n",
+ link->id);
+ return false;
+ }
+
+ atomic_set(&context->state_int, gpio_get_value(link->gpio.int_pin));
+ atomic_set(&context->gpio_configured, 1);
+ return true;
+}
+
+bool ipc_util_link_gpio_unconfig(struct ipc_link_context *context)
+{
+ struct spi_device *sdev = context->sdev;
+ struct modem_m6718_spi_link_platform_data *link = context->link;
+
+ if (atomic_read(&context->gpio_configured) == 0)
+ return true;
+
+ dev_dbg(&sdev->dev, "link %d: un-configuring GPIO\n", link->id);
+
+ /* SS: output anyway, just make sure it is low */
+ gpio_set_value(link->gpio.ss_pin, 0);
+
+ /* INT: disable system-wake, reconfigure as output-low */
+ disable_irq_wake(GPIO_TO_IRQ(link->gpio.int_pin));
+ gpio_direction_output(link->gpio.int_pin, 0);
+ atomic_set(&context->gpio_configured, 0);
+ return true;
+}
+
+bool ipc_util_link_is_suspended(struct ipc_link_context *context)
+{
+ return atomic_read(&context->suspended) == 1;
+}
+
+void ipc_util_suspend_link(struct ipc_link_context *context)
+{
+ atomic_set(&context->suspended, 1);
+}
+
+void ipc_util_resume_link(struct ipc_link_context *context)
+{
+ atomic_set(&context->suspended, 0);
+}
diff --git a/drivers/modem/mcdd.c b/drivers/modem/mcdd.c
new file mode 100644
index 00000000000..d291944e810
--- /dev/null
+++ b/drivers/modem/mcdd.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Modem Crash Detection Driver
+ *
+ * Author:Bibek Basu <bibek.basu@stericsson.com> for ST-Ericsson
+ *
+ * License terms:GNU General Public License (GPLv2)version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#define MCDD_INTERRUPT_CLEAR (1 << 13)
+#define MODEM_CRASH_EVT 1
+
+struct mcdd_data {
+ bool modem_event;
+ u32 event_type;
+ wait_queue_head_t readq;
+ spinlock_t lock;
+ void __iomem *remap_intcon;
+ struct device *dev;
+ struct miscdevice misc_dev;
+};
+
+static struct mcdd_data *mcdd;
+
+static irqreturn_t mcdd_interrupt_cb(int irq, void *dev)
+{
+ writel(MCDD_INTERRUPT_CLEAR, (u32 *)mcdd->remap_intcon);
+ spin_lock(&mcdd->lock);
+ mcdd->modem_event = true;
+ mcdd->event_type = MODEM_CRASH_EVT;
+ spin_unlock(&mcdd->lock);
+ wake_up_interruptible(&mcdd->readq);
+ return IRQ_HANDLED;
+}
+
+static unsigned int mcdd_select(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ poll_wait(filp, &mcdd->readq, wait);
+ spin_lock_irqsave(&mcdd->lock, flags);
+
+ if (mcdd->modem_event == true) {
+ mask |= POLLPRI;
+ mcdd->modem_event = false;
+ }
+ spin_unlock_irqrestore(&mcdd->lock, flags);
+
+ return mask;
+}
+
+static int mcdd_open(struct inode *ino, struct file *filp)
+{
+ /* Do nothing */
+ return 0;
+}
+
+ssize_t mcdd_read(struct file *filp, char __user *buff, size_t size, loff_t *t)
+{
+ if (copy_to_user(buff, &mcdd->event_type, size))
+ return -EFAULT;
+ return 0;
+};
+
+static const struct file_operations mcdd_fops = {
+ .open = mcdd_open,
+ .poll = mcdd_select,
+ .read = mcdd_read,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit u5500_mcdd_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ int ret = 0;
+ int irq;
+
+ mcdd = kzalloc(sizeof(*mcdd), GFP_KERNEL);
+ if (!mcdd) {
+ dev_err(&pdev->dev, "Memory Allocation Failed");
+ return -ENOMEM;
+ }
+ mcdd->dev = &pdev->dev;
+ mcdd->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ mcdd->misc_dev.name = "mcdd";
+ mcdd->misc_dev.fops = &mcdd_fops;
+ spin_lock_init(&mcdd->lock);
+ init_waitqueue_head(&(mcdd->readq));
+
+ /* Get addr for mcdd crash interrupt reset register and ioremap it */
+ resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "mcdd_intreset_addr");
+ if (resource == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to retrieve mcdd_intreset_addr resource\n");
+ goto exit_free;
+ }
+ mcdd->remap_intcon = ioremap(resource->start, resource_size(resource));
+ if (!mcdd->remap_intcon) {
+ dev_err(&pdev->dev, "Unable to ioremap intcon mbox1\n");
+ ret = -EINVAL;
+ goto exit_free;
+ }
+
+ /* Get IRQ for mcdd mbox interrupt and allocate it */
+ irq = platform_get_irq_byname(pdev, "mcdd_mbox_irq");
+ if (irq < 0) {
+ dev_err(&pdev->dev,
+ "Unable to retrieve mcdd mbox irq resource\n");
+ goto exit_unmap;
+ }
+
+ ret = request_threaded_irq(irq, NULL,
+ mcdd_interrupt_cb, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "mcdd", &mcdd);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Could not allocate irq %d,error %d\n",
+ irq, ret);
+ goto exit_unmap;
+ }
+
+ ret = misc_register(&mcdd->misc_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't misc-register\n");
+ goto exit_unmap;
+ }
+ dev_info(&pdev->dev, "mcdd driver registration done\n");
+ return 0;
+
+exit_unmap:
+ iounmap(mcdd->remap_intcon);
+exit_free:
+ kfree(mcdd);
+ return ret;
+}
+
+static int u5500_mcdd_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (mcdd) {
+ iounmap(mcdd->remap_intcon);
+ ret = misc_deregister(&mcdd->misc_dev);
+ kfree(mcdd);
+ }
+ return ret;
+}
+
+static struct platform_driver u5500_mcdd_driver = {
+ .driver = {
+ .name = "u5500-mcdd-modem",
+ .owner = THIS_MODULE,
+ },
+ .probe = u5500_mcdd_probe,
+ .remove = __devexit_p(u5500_mcdd_remove),
+};
+
+static int __init mcdd_init(void)
+{
+ return platform_driver_register(&u5500_mcdd_driver);
+}
+module_init(mcdd_init);
+
+static void __exit mcdd_exit(void)
+{
+ platform_driver_unregister(&u5500_mcdd_driver);
+}
+module_exit(mcdd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
+MODULE_DESCRIPTION("Modem Dump Detection Driver");
+MODULE_ALIAS("mcdd driver");
diff --git a/drivers/modem/modem_access.c b/drivers/modem/modem_access.c
new file mode 100644
index 00000000000..2bd32957ae2
--- /dev/null
+++ b/drivers/modem/modem_access.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+ *
+ * Heavily adapted from Regulator framework.
+ * Provides mechanisms for registering platform specific access
+ * mechanisms for modem.
+ * Also, exposes APIs for gettng/releasing the access and even
+ * query the access status, and the modem usage status.
+ */
+#include <linux/module.h>
+#include <linux/modem/modem.h>
+#include <linux/modem/modem_client.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+static DEFINE_MUTEX(modem_list_mutex);
+static LIST_HEAD(modem_list);
+
+struct modem {
+ struct device *dev;
+ struct list_head list;
+ char *modem_name;
+ struct device_attribute dev_attr;
+ struct modem_dev *mdev;
+ atomic_t use;
+};
+
+static const char *mdev_get_name(struct modem_dev *mdev)
+{
+ if (mdev->desc->name)
+ return mdev->desc->name;
+ else
+ return "";
+}
+
+static int _modem_is_requested(struct modem_dev *mdev)
+{
+ /* If we don't know then assume that the modem is always on */
+ if (!mdev->desc->ops->is_requested)
+ return 0;
+
+ return mdev->desc->ops->is_requested(mdev);
+}
+
+/**
+ * modem_is_requested - check if modem access is requested
+ * @modem: modem device
+ *
+ * Checks whether modem is accessed or not by querying
+ * the underlying platform specific modem access
+ * implementation.
+ */
+int modem_is_requested(struct modem *modem)
+{
+ int ret;
+
+ mutex_lock(&modem->mdev->mutex);
+ ret = _modem_is_requested(modem->mdev);
+ mutex_unlock(&modem->mdev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(modem_is_requested);
+
+static int _modem_request(struct modem_dev *mdev)
+{
+ int ret;
+
+ if (++mdev->use_count == 1) {
+ ret = _modem_is_requested(mdev);
+ if (ret == 0)
+ mdev->desc->ops->request(mdev);
+ }
+
+ return 0;
+}
+
+/**
+ * modem_request - Request access the modem
+ * @modem: modem device
+ *
+ * API to access the modem. It keeps a client
+ * specific check on whether the particular modem
+ * requested is accessed or not.
+ */
+void modem_request(struct modem *modem)
+{
+ struct modem_dev *mdev = modem->mdev;
+ int ret = 0;
+
+
+ mutex_lock(&mdev->mutex);
+ if (atomic_read(&modem->use) == 1) {
+ mutex_unlock(&mdev->mutex);
+ return;
+ }
+ ret = _modem_request(mdev);
+ if (ret == 0)
+ atomic_set(&modem->use, 1);
+ mutex_unlock(&mdev->mutex);
+}
+EXPORT_SYMBOL(modem_request);
+
+static int _modem_release(struct modem_dev *mdev)
+{
+ if (WARN(mdev->use_count <= 0,
+ "unbalanced releases for %s\n",
+ mdev_get_name(mdev)))
+ return -EIO;
+
+ if (--mdev->use_count == 0)
+ mdev->desc->ops->release(mdev);
+
+ return 0;
+}
+
+/**
+ * modem_release - Release access to modem
+ * @modem: modem device
+ *
+ * Releases accesss to the modem. It keeps a client
+ * specific check on whether a particular modem
+ * is released or not.
+ */
+void modem_release(struct modem *modem)
+{
+ struct modem_dev *mdev = modem->mdev;
+ int ret = 0;
+
+ mutex_lock(&mdev->mutex);
+ if (atomic_read(&modem->use) == 0) {
+ mutex_unlock(&mdev->mutex);
+ return;
+ }
+ ret = _modem_release(mdev);
+ if (ret == 0)
+ atomic_set(&modem->use, 0);
+ mutex_unlock(&mdev->mutex);
+}
+EXPORT_SYMBOL(modem_release);
+
+/**
+ * modem_get_usage - Check if particular client is using modem
+ * @modem: modem device
+ *
+ * Checks whether the particular client is using access to modem.
+ * This API could be used by client drivers in making their
+ * suspend decisions.
+ */
+int modem_get_usage(struct modem *modem)
+{
+ return atomic_read(&modem->use);
+}
+EXPORT_SYMBOL(modem_get_usage);
+
+static struct modem *create_modem(struct modem_dev *mdev,
+ struct device *dev,
+ const char *id)
+{
+ struct modem *modem;
+
+ modem = kzalloc(sizeof(*modem), GFP_KERNEL);
+ if (modem == NULL)
+ return NULL;
+
+ mutex_lock(&mdev->mutex);
+ modem->mdev = mdev;
+ modem->dev = dev;
+ list_add(&modem->list, &mdev->client_list);
+
+ mutex_unlock(&mdev->mutex);
+ return modem;
+
+}
+
+static struct modem *_modem_get(struct device *dev, const char *id,
+ int exclusive)
+{
+ struct modem_dev *mdev_ptr;
+ struct modem *modem = ERR_PTR(-ENODEV);
+ int ret;
+
+ if (id == NULL) {
+ pr_err("modem_get with no identifier\n");
+ return modem;
+ }
+
+ mutex_lock(&modem_list_mutex);
+ list_for_each_entry(mdev_ptr, &modem_list, modem_list) {
+ if (strcmp(mdev_get_name(mdev_ptr), id) == 0)
+ goto found;
+ }
+
+ goto out;
+
+found:
+ if (!try_module_get(mdev_ptr->owner))
+ goto out;
+
+ modem = create_modem(mdev_ptr, dev, id);
+ if (modem == NULL) {
+ modem = ERR_PTR(-ENOMEM);
+ module_put(mdev_ptr->owner);
+ }
+
+ mdev_ptr->open_count++;
+ ret = _modem_is_requested(mdev_ptr);
+ if (ret)
+ mdev_ptr->use_count = 1;
+ else
+ mdev_ptr->use_count = 0;
+
+out:
+ mutex_unlock(&modem_list_mutex);
+ return modem;
+
+}
+
+/**
+ * modem_get - Get reference to a particular platform specific modem
+ * @dev: device
+ * @id: modem device name
+ *
+ * Get reference to a particular modem device.
+ */
+struct modem *modem_get(struct device *dev, const char *id)
+{
+ return _modem_get(dev, id, 0);
+}
+EXPORT_SYMBOL(modem_get);
+
+/**
+ * modem_put - Release reference to a modem device
+ * @modem: modem device
+ *
+ * Release reference to a modem device.
+ */
+void modem_put(struct modem *modem)
+{
+ struct modem_dev *mdev;
+
+ if (modem == NULL || IS_ERR(modem))
+ return;
+
+ mutex_lock(&modem_list_mutex);
+ mdev = modem->mdev;
+
+ list_del(&modem->list);
+ kfree(modem);
+
+ mdev->open_count--;
+
+ module_put(mdev->owner);
+ mutex_unlock(&modem_list_mutex);
+}
+EXPORT_SYMBOL(modem_put);
+
+static ssize_t modem_print_state(char *buf, int state)
+{
+ if (state > 0)
+ return sprintf(buf, "accessed\n");
+ else if (state == 0)
+ return sprintf(buf, "released\n");
+ else
+ return sprintf(buf, "unknown\n");
+}
+
+static ssize_t modem_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_dev *mdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ mutex_lock(&mdev->mutex);
+ ret = modem_print_state(buf, _modem_is_requested(mdev));
+ mutex_unlock(&mdev->mutex);
+
+ return ret;
+}
+static DEVICE_ATTR(state, 0444, modem_state_show, NULL);
+
+static ssize_t modem_use_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_dev *mdev = dev_get_drvdata(dev);
+ struct modem *mod;
+ size_t size = 0;
+
+ list_for_each_entry(mod, &mdev->client_list, list) {
+ if (mod->dev != NULL)
+ size += sprintf((buf + size), "%s (%d)\n",
+ dev_name(mod->dev), atomic_read(&mod->use));
+ else
+ size += sprintf((buf + size), "unknown (%d)\n",
+ atomic_read(&mod->use));
+ }
+ size += sprintf((buf + size), "\n");
+
+ return size;
+}
+static DEVICE_ATTR(use, 0444, modem_use_show, NULL);
+
+static ssize_t modem_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_dev *mdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", mdev_get_name(mdev));
+}
+static DEVICE_ATTR(name, 0444, modem_name_show, NULL);
+
+static ssize_t modem_num_active_users_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_dev *mdev = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", mdev->use_count);
+}
+static DEVICE_ATTR(num_active_users, 0444, modem_num_active_users_show, NULL);
+
+static int add_modem_attributes(struct modem_dev *mdev)
+{
+ struct device *dev = &mdev->dev;
+ struct modem_ops *ops = mdev->desc->ops;
+ int status = 0;
+
+ status = device_create_file(dev, &dev_attr_use);
+ if (status < 0)
+ return status;
+
+ status = device_create_file(dev, &dev_attr_name);
+ if (status < 0)
+ return status;
+
+ status = device_create_file(dev, &dev_attr_num_active_users);
+ if (status < 0)
+ return status;
+
+ if (ops->is_requested) {
+ status = device_create_file(dev, &dev_attr_state);
+ if (status < 0)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * modem_register - register a modem
+ * @modem_desc: - description for modem
+ * @dev: - device
+ * @driver_data:- driver specific data
+ *
+ * Register a modem with the modem access framework, so that
+ * it could be used by client drivers for accessing the
+ * modem.
+ */
+struct modem_dev *modem_register(struct modem_desc *modem_desc,
+ struct device *dev,
+ void *driver_data)
+{
+ static atomic_t modem_no = ATOMIC_INIT(0);
+ struct modem_dev *mdev;
+ int ret;
+
+ if (modem_desc == NULL)
+ return ERR_PTR(-EINVAL);
+
+ if (modem_desc->name == NULL || modem_desc->ops == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mdev = kzalloc(sizeof(struct modem_dev), GFP_KERNEL);
+ if (mdev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&modem_list_mutex);
+
+ mutex_init(&mdev->mutex);
+ mdev->modem_data = driver_data;
+ mdev->owner = modem_desc->owner;
+ mdev->desc = modem_desc;
+ INIT_LIST_HEAD(&mdev->client_list);
+ INIT_LIST_HEAD(&mdev->modem_list);
+ BLOCKING_INIT_NOTIFIER_HEAD(&mdev->notifier);
+
+ /* mdev->dev.class = &modem_class;*/
+ mdev->dev.parent = dev;
+ dev_set_name(&mdev->dev, "modem.%d", atomic_inc_return(&modem_no) - 1);
+ ret = device_register(&mdev->dev);
+ if (ret != 0)
+ goto clean;
+
+ dev_set_drvdata(&mdev->dev, mdev);
+
+ ret = add_modem_attributes(mdev);
+ if (ret < 0)
+ goto backoff;
+
+ list_add(&mdev->modem_list, &modem_list);
+
+out:
+ mutex_unlock(&modem_list_mutex);
+ return mdev;
+
+backoff:
+ device_unregister(&mdev->dev);
+ mdev = ERR_PTR(ret);
+ goto out;
+
+clean:
+ kfree(mdev);
+ mdev = ERR_PTR(ret);
+ goto out;
+}
+EXPORT_SYMBOL(modem_register);
diff --git a/drivers/modem/modem_m6718.c b/drivers/modem/modem_m6718.c
new file mode 100644
index 00000000000..5e457c16003
--- /dev/null
+++ b/drivers/modem/modem_m6718.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Chris Blair <chris.blair@stericsson.com>
+ * based on modem_u8500.c
+ *
+ * Platform driver implementing access mechanisms to the M6718 modem.
+ */
+#include <linux/modem/modem.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+static void modem_m6718_request(struct modem_dev *mdev)
+{
+ /* nothing to do - modem will wake when data is sent */
+}
+
+static void modem_m6718_release(struct modem_dev *mdev)
+{
+ /* nothing to do - modem does not need to be requested/released */
+}
+
+static int modem_m6718_is_requested(struct modem_dev *mdev)
+{
+ return 0;
+}
+
+static struct modem_ops modem_m6718_ops = {
+ .request = modem_m6718_request,
+ .release = modem_m6718_release,
+ .is_requested = modem_m6718_is_requested,
+};
+
+static struct modem_desc modem_m6718_desc = {
+ .name = "m6718",
+ .id = 0,
+ .ops = &modem_m6718_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit modem_m6718_probe(struct platform_device *pdev)
+{
+ struct modem_dev *mdev;
+ int err;
+
+ mdev = modem_register(&modem_m6718_desc, &pdev->dev,
+ NULL);
+ if (IS_ERR(mdev)) {
+ err = PTR_ERR(mdev);
+ dev_err(&pdev->dev, "failed to register %s: err %i\n",
+ modem_m6718_desc.name, err);
+ }
+
+ return 0;
+}
+
+static int __devexit modem_m6718_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver modem_m6718_driver = {
+ .driver = {
+ .name = "modem-m6718",
+ .owner = THIS_MODULE,
+ },
+ .probe = modem_m6718_probe,
+ .remove = __devexit_p(modem_m6718_remove),
+};
+
+static int __init modem_m6718_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&modem_m6718_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "modem_m6718: platform driver reg failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit modem_m6718_exit(void)
+{
+ platform_driver_unregister(&modem_m6718_driver);
+}
+
+module_init(modem_m6718_init);
+module_exit(modem_m6718_exit);
+
+MODULE_AUTHOR("Chris Blair <chris.blair@stericsson.com>");
+MODULE_DESCRIPTION("M6718 modem access driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/modem/modem_u8500.c b/drivers/modem/modem_u8500.c
new file mode 100644
index 00000000000..39951995e8e
--- /dev/null
+++ b/drivers/modem/modem_u8500.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+ *
+ * Platform driver implementing access mechanisms to modem
+ * on U8500 which uses Shared Memroy as IPC between Application
+ * Processor and Modem processor.
+ */
+#include <linux/module.h>
+#include <linux/modem/modem.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+static void u8500_modem_request(struct modem_dev *mdev)
+{
+ prcmu_ac_wake_req();
+}
+
+static void u8500_modem_release(struct modem_dev *mdev)
+{
+ prcmu_ac_sleep_req();
+}
+
+static int u8500_modem_is_requested(struct modem_dev *mdev)
+{
+ return prcmu_is_ac_wake_requested();
+}
+
+static struct modem_ops u8500_modem_ops = {
+ .request = u8500_modem_request,
+ .release = u8500_modem_release,
+ .is_requested = u8500_modem_is_requested,
+};
+
+static struct modem_desc u8500_modem_desc = {
+ .name = "u8500-shrm-modem",
+ .id = 0,
+ .ops = &u8500_modem_ops,
+ .owner = THIS_MODULE,
+};
+
+
+static int __devinit u8500_modem_probe(struct platform_device *pdev)
+{
+ struct modem_dev *mdev;
+ int err;
+
+ mdev = modem_register(&u8500_modem_desc, &pdev->dev,
+ NULL);
+ if (IS_ERR(mdev)) {
+ err = PTR_ERR(mdev);
+ pr_err("failed to register %s: err %i\n",
+ u8500_modem_desc.name, err);
+ }
+
+ return 0;
+}
+
+static int __devexit u8500_modem_remove(struct platform_device *pdev)
+{
+
+ return 0;
+}
+
+static struct platform_driver u8500_modem_driver = {
+ .driver = {
+ .name = "u8500-modem",
+ .owner = THIS_MODULE,
+ },
+ .probe = u8500_modem_probe,
+ .remove = __devexit_p(u8500_modem_remove),
+};
+
+static int __init u8500_modem_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&u8500_modem_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "u8500_modem: platform driver reg failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit u8500_modem_exit(void)
+{
+ platform_driver_unregister(&u8500_modem_driver);
+}
+
+arch_initcall(u8500_modem_init);
diff --git a/drivers/modem/shrm/Kconfig b/drivers/modem/shrm/Kconfig
new file mode 100644
index 00000000000..465c8bb10a1
--- /dev/null
+++ b/drivers/modem/shrm/Kconfig
@@ -0,0 +1,43 @@
+#
+# SHM HW kernel configuration
+#
+config U8500_SHRM
+ bool "U8500 SHRM hardware driver"
+ depends on ARCH_U8500 && PHONET && MODEM_U8500
+ default Y
+ ---help---
+ If you say Y here, you will enable the STN8500 SHM hardware driver.
+
+ If unsure, say N.
+choice
+ prompt "Modem Image Version"
+ depends on U8500_SHRM
+ default SHRM_V1_UPDATES_VERSION
+
+ config SHRM_V1_UPDATES_VERSION
+ depends on U8500_SHRM
+ bool "SHRM V1 UPDATES"
+ help
+ Modem Images with V1 Updates
+
+endchoice
+
+config U8500_SHRM_LOOP_BACK
+ bool "U8500 SHRM loopback"
+ depends on U8500_SHRM
+ default n
+ ---help---
+ If you say Y here, you will enable the shm loopback
+
+ If unsure, say N.
+
+config U8500_SHRM_MODEM_SILENT_RESET
+ bool "U8500 SHRM Modem Silent Reset"
+ depends on U8500_SHRM
+ default n
+ ---help---
+ If you say Y here, you will enable the modem silent reset feature
+
+ If unsure, say N.
+
+
diff --git a/drivers/modem/shrm/Makefile b/drivers/modem/shrm/Makefile
new file mode 100644
index 00000000000..8115c24920b
--- /dev/null
+++ b/drivers/modem/shrm/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for SHRM drivers
+#
+
+ifdef CONFIG_PHONET
+u8500_shrm-objs := modem_shrm_driver.o shrm_fifo.o shrm_protocol.o
+else
+u8500_shrm-objs := shrm_driver.o shrm_fifo.o shrm_protocol.o
+endif
+
+obj-$(CONFIG_U8500_SHRM) += u8500_shrm.o
diff --git a/drivers/modem/shrm/modem_shrm_driver.c b/drivers/modem/shrm/modem_shrm_driver.c
new file mode 100644
index 00000000000..f46b86bd22e
--- /dev/null
+++ b/drivers/modem/shrm/modem_shrm_driver.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+#ifdef CONFIG_HIGH_RES_TIMERS
+#include <linux/hrtimer.h>
+static struct hrtimer timer;
+#endif
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/phonet.h>
+#include <linux/modem/shrm/shrm_driver.h>
+#include <linux/modem/shrm/shrm_private.h>
+#include <linux/modem/shrm/shrm_config.h>
+#include <linux/modem/shrm/shrm_net.h>
+#include <linux/modem/shrm/shrm.h>
+
+#include <mach/isa_ioctl.h>
+/* debug functionality */
+#define ISA_DEBUG 0
+
+#define PHONET_TASKLET
+#define MAX_RCV_LEN 2048
+
+static void do_phonet_rcv_tasklet(unsigned long unused);
+struct tasklet_struct phonet_rcv_tasklet;
+
+/**
+ * audio_receive() - Receive audio channel completion callback
+ * @shrm: pointer to shrm device information structure
+ * @data: message pointer
+ * @n_bytes: message size
+ * @l2_header: L2 header/device ID 2->audio, 5->audio_loopback
+ *
+ * This fucntion is called from the audio receive handler. Copies the audio
+ * message from the FIFO to the AUDIO queue. The message is later copied from
+ * this queue to the user buffer through the char or net interface read
+ * operation.
+ */
+static int audio_receive(struct shrm_dev *shrm, void *data,
+ u32 n_bytes, u8 l2_header)
+{
+ u32 size = 0;
+ int ret = 0;
+ int idx;
+ u8 *psrc;
+ struct message_queue *q;
+ struct isadev_context *audiodev;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ idx = shrm_get_cdev_index(l2_header);
+ if (idx < 0) {
+ dev_err(shrm->dev, "failed to get index\n");
+ return idx;
+ }
+ audiodev = &shrm->isa_context->isadev[idx];
+ q = &audiodev->dl_queue;
+ spin_lock(&q->update_lock);
+ /* Memcopy RX data first */
+ if ((q->writeptr+n_bytes) >= q->size) {
+ psrc = (u8 *)data;
+ size = (q->size-q->writeptr);
+ /* Copy First Part of msg */
+ memcpy((q->fifo_base+q->writeptr), psrc, size);
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (n_bytes-size));
+ } else {
+ memcpy((q->fifo_base+q->writeptr), data, n_bytes);
+ }
+ ret = add_msg_to_queue(q, n_bytes);
+ spin_unlock(&q->update_lock);
+ if (ret < 0)
+ dev_err(shrm->dev, "Adding a msg to message queue failed");
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * common_receive() - Receive common channel completion callback
+ * @shrm: pointer to the shrm device information structure
+ * @data: message pointer
+ * @n_bytes: message size
+ * @l2_header: L2 header / device ID
+ *
+ * This function is called from the receive handler to copy the respective
+ * ISI, RPC, SECURITY message to its respective queue. The message is then
+ * copied from queue to the user buffer on char net interface read operation.
+ */
+static int common_receive(struct shrm_dev *shrm, void *data,
+ u32 n_bytes, u8 l2_header)
+{
+ u32 size = 0;
+ int ret = 0;
+ int idx;
+ u8 *psrc;
+ struct message_queue *q;
+ struct isadev_context *isa_dev;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ idx = shrm_get_cdev_index(l2_header);
+ if (idx < 0) {
+ dev_err(shrm->dev, "failed to get index\n");
+ return idx;
+ }
+ isa_dev = &shrm->isa_context->isadev[idx];
+ q = &isa_dev->dl_queue;
+ spin_lock(&q->update_lock);
+ /* Memcopy RX data first */
+ if ((q->writeptr+n_bytes) >= q->size) {
+ dev_dbg(shrm->dev, "Inside Loop Back\n");
+ psrc = (u8 *)data;
+ size = (q->size-q->writeptr);
+ /* Copy First Part of msg */
+ memcpy((q->fifo_base+q->writeptr), psrc, size);
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (n_bytes-size));
+ } else {
+ memcpy((q->fifo_base+q->writeptr), data, n_bytes);
+ }
+ ret = add_msg_to_queue(q, n_bytes);
+ spin_unlock(&q->update_lock);
+ if (ret < 0) {
+ dev_err(shrm->dev, "Adding a msg to message queue failed");
+ return ret;
+ }
+
+
+ if (l2_header == ISI_MESSAGING) {
+ if (shrm->netdev_flag_up) {
+ dev_dbg(shrm->dev,
+ "scheduling the phonet tasklet from %s!\n",
+ __func__);
+ tasklet_schedule(&phonet_rcv_tasklet);
+ }
+ dev_dbg(shrm->dev,
+ "Out of phonet tasklet %s!!!\n", __func__);
+ }
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * rx_common_l2msg_handler() - common channel receive handler
+ * @l2_header: L2 header
+ * @msg: pointer to the receive buffer
+ * @length: length of the msg to read
+ * @shrm: pointer to shrm device information structure
+ *
+ * This function is called to receive the message from CaMsgPendingNotification
+ * interrupt handler.
+ */
+static void rx_common_l2msg_handler(u8 l2_header,
+ void *msg, u32 length,
+ struct shrm_dev *shrm)
+{
+ int ret = 0;
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ ret = common_receive(shrm, msg, length, l2_header);
+ if (ret < 0)
+ dev_err(shrm->dev,
+ "common receive with l2 header %d failed\n", l2_header);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+/**
+ * rx_audio_l2msg_handler() - audio channel receive handler
+ * @l2_header: L2 header
+ * @msg: pointer to the receive buffer
+ * @length: length of the msg to read
+ * @shrm: pointer to shrm device information structure
+ *
+ * This function is called to receive the message from CaMsgPendingNotification
+ * interrupt handler.
+ */
+static void rx_audio_l2msg_handler(u8 l2_header,
+ void *msg, u32 length,
+ struct shrm_dev *shrm)
+{
+ int ret = 0;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ ret = audio_receive(shrm, msg, length, l2_header);
+ if (ret < 0)
+ dev_err(shrm->dev, "audio receive failed\n");
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+static int __init shm_initialise_irq(struct shrm_dev *shrm)
+{
+ int err = 0;
+
+ err = shrm_protocol_init(shrm,
+ rx_common_l2msg_handler, rx_audio_l2msg_handler);
+ if (err < 0) {
+ dev_err(shrm->dev, "SHM Protocol Init Failure\n");
+ return err;
+ }
+
+ err = request_irq(shrm->ca_wake_irq,
+ ca_wake_irq_handler, IRQF_TRIGGER_RISING,
+ "ca_wake-up", shrm);
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "Unable to allocate shm tx interrupt line\n");
+ free_irq(shrm->ca_wake_irq, shrm);
+ return err;
+ }
+
+ err = request_irq(shrm->ac_read_notif_0_irq,
+ ac_read_notif_0_irq_handler, 0,
+ "ac_read_notif_0", shrm);
+
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ac_read_notif_0_irq interrupt line\n");
+ goto irq_err1;
+ }
+
+ err = request_irq(shrm->ac_read_notif_1_irq,
+ ac_read_notif_1_irq_handler, 0,
+ "ac_read_notif_1", shrm);
+
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ac_read_notif_1_irq interrupt line\n");
+ goto irq_err2;
+ }
+
+ err = request_irq(shrm->ca_msg_pending_notif_0_irq,
+ ca_msg_pending_notif_0_irq_handler, 0,
+ "ca_msg_pending_notif_0", shrm);
+
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ca_msg_pending_notif_0_irq line\n");
+ goto irq_err3;
+ }
+
+ err = request_irq(shrm->ca_msg_pending_notif_1_irq,
+ ca_msg_pending_notif_1_irq_handler, 0,
+ "ca_msg_pending_notif_1", shrm);
+
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ca_msg_pending_notif_1_irq interrupt line\n");
+ goto irq_err4;
+ }
+ return err;
+irq_err4:
+ free_irq(shrm->ca_msg_pending_notif_0_irq, shrm);
+irq_err3:
+ free_irq(shrm->ac_read_notif_1_irq, shrm);
+irq_err2:
+ free_irq(shrm->ac_read_notif_0_irq, shrm);
+irq_err1:
+ free_irq(shrm->ca_wake_irq, shrm);
+ return err;
+}
+
+static void free_shm_irq(struct shrm_dev *shrm)
+{
+ free_irq(shrm->ca_wake_irq, shrm);
+ free_irq(shrm->ac_read_notif_0_irq, shrm);
+ free_irq(shrm->ac_read_notif_1_irq, shrm);
+ free_irq(shrm->ca_msg_pending_notif_0_irq, shrm);
+ free_irq(shrm->ca_msg_pending_notif_1_irq, shrm);
+}
+
+
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+static enum hrtimer_restart callback(struct hrtimer *timer)
+{
+ return HRTIMER_NORESTART;
+}
+#endif
+
+void do_phonet_rcv_tasklet(unsigned long unused)
+{
+ ssize_t ret;
+ struct shrm_dev *shrm = (struct shrm_dev *)unused;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ for (;;) {
+ ret = shrm_net_receive(shrm->ndev);
+ if (ret == 0) {
+ dev_dbg(shrm->dev, "len is zero, queue empty\n");
+ break;
+ }
+ if (ret < 0) {
+ dev_err(shrm->dev, "len < 0 !!! error!!!\n");
+ break;
+ }
+ }
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+static int shrm_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct resource *res;
+ struct shrm_dev *shrm = NULL;
+
+ shrm = kzalloc(sizeof(struct shrm_dev), GFP_KERNEL);
+ if (shrm == NULL) {
+ dev_err(&pdev->dev,
+ "Could not allocate memory for struct shm_dev\n");
+ return -ENOMEM;
+ }
+
+ shrm->dev = &pdev->dev;
+ shrm->modem = modem_get(shrm->dev, "u8500-shrm-modem");
+ if (shrm->modem == NULL) {
+ dev_err(shrm->dev, " Could not retrieve the modem.\n");
+ err = -ENODEV;
+ goto rollback_intr;
+ }
+
+ /* initialise the SHM */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map Ca Wake up interrupt\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ca_wake_irq = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map APE_Read_notif_common IRQ base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ac_read_notif_0_irq = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map APE_Read_notif_audio IRQ base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ac_read_notif_1_irq = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 3);
+
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map Cmt_msg_pending_notif_common IRQbase\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ca_msg_pending_notif_0_irq = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 4);
+
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map Cmt_msg_pending_notif_audio IRQ base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ca_msg_pending_notif_1_irq = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(shrm->dev,
+ "Could not get SHM IO memory information\n");
+ err = -ENODEV;
+ goto rollback_intr;
+ }
+ shrm->intr_base = (void __iomem *)ioremap_nocache(res->start,
+ res->end - res->start + 1);
+ if (!(shrm->intr_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ape_common_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_APE_COMMON_BASE;
+ shrm->ape_common_fifo_base =
+ (void __iomem *)ioremap_nocache(
+ U8500_SHM_FIFO_APE_COMMON_BASE,
+ SHM_FIFO_0_SIZE);
+ shrm->ape_common_fifo_size = (SHM_FIFO_0_SIZE)/4;
+
+ if (!(shrm->ape_common_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_ape_common_fifo_base;
+ }
+ shrm->cmt_common_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_CMT_COMMON_BASE;
+ shrm->cmt_common_fifo_base =
+ (void __iomem *)ioremap_nocache(
+ U8500_SHM_FIFO_CMT_COMMON_BASE, SHM_FIFO_0_SIZE);
+ shrm->cmt_common_fifo_size = (SHM_FIFO_0_SIZE)/4;
+
+ if (!(shrm->cmt_common_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_cmt_common_fifo_base;
+ }
+ shrm->ape_audio_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_APE_AUDIO_BASE;
+ shrm->ape_audio_fifo_base =
+ (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_APE_AUDIO_BASE,
+ SHM_FIFO_1_SIZE);
+ shrm->ape_audio_fifo_size = (SHM_FIFO_1_SIZE)/4;
+
+ if (!(shrm->ape_audio_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_ape_audio_fifo_base;
+ }
+ shrm->cmt_audio_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_CMT_AUDIO_BASE;
+ shrm->cmt_audio_fifo_base =
+ (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_CMT_AUDIO_BASE,
+ SHM_FIFO_1_SIZE);
+ shrm->cmt_audio_fifo_size = (SHM_FIFO_1_SIZE)/4;
+
+ if (!(shrm->cmt_audio_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_cmt_audio_fifo_base;
+ }
+ shrm->ac_common_shared_wptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_0_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_common_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_ac_common_shared_wptr;
+ }
+ shrm->ac_common_shared_rptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_0_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_common_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+ shrm->ca_common_shared_wptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_0_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_common_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+ shrm->ca_common_shared_rptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_0_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_common_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+ shrm->ac_audio_shared_wptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_1_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_audio_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+ shrm->ac_audio_shared_rptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_1_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_audio_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+ shrm->ca_audio_shared_wptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_1_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_audio_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+ shrm->ca_audio_shared_rptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_1_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_audio_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+ if (isa_init(shrm) != 0) {
+ dev_err(shrm->dev, "Driver Initialization Error\n");
+ err = -EBUSY;
+ }
+ /* install handlers and tasklets */
+ if (shm_initialise_irq(shrm)) {
+ dev_err(shrm->dev,
+ "shm error in interrupt registration\n");
+ goto rollback_irq;
+ }
+#ifdef CONFIG_HIGH_RES_TIMERS
+ hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer.function = callback;
+ hrtimer_start(&timer, ktime_set(0, 2*NSEC_PER_MSEC), HRTIMER_MODE_REL);
+#endif
+ err = shrm_register_netdev(shrm);
+ if (err < 0)
+ goto rollback_irq;
+
+ tasklet_init(&phonet_rcv_tasklet, do_phonet_rcv_tasklet, 0);
+ phonet_rcv_tasklet.data = (unsigned long)shrm;
+
+ platform_set_drvdata(pdev, shrm);
+
+ return err;
+rollback_irq:
+ free_shm_irq(shrm);
+rollback_map:
+ iounmap(shrm->ac_common_shared_wptr);
+ iounmap(shrm->ac_common_shared_rptr);
+ iounmap(shrm->ca_common_shared_wptr);
+ iounmap(shrm->ca_common_shared_rptr);
+ iounmap(shrm->ac_audio_shared_wptr);
+ iounmap(shrm->ac_audio_shared_rptr);
+ iounmap(shrm->ca_audio_shared_wptr);
+ iounmap(shrm->ca_audio_shared_rptr);
+rollback_ac_common_shared_wptr:
+ iounmap(shrm->cmt_audio_fifo_base);
+rollback_cmt_audio_fifo_base:
+ iounmap(shrm->ape_audio_fifo_base);
+rollback_ape_audio_fifo_base:
+ iounmap(shrm->cmt_common_fifo_base);
+rollback_cmt_common_fifo_base:
+ iounmap(shrm->ape_common_fifo_base);
+rollback_ape_common_fifo_base:
+ iounmap(shrm->intr_base);
+rollback_intr:
+ kfree(shrm);
+ return err;
+}
+
+static int __exit shrm_remove(struct platform_device *pdev)
+{
+ struct shrm_dev *shrm = platform_get_drvdata(pdev);
+
+ free_shm_irq(shrm);
+ iounmap(shrm->intr_base);
+ iounmap(shrm->ape_common_fifo_base);
+ iounmap(shrm->cmt_common_fifo_base);
+ iounmap(shrm->ape_audio_fifo_base);
+ iounmap(shrm->cmt_audio_fifo_base);
+ iounmap(shrm->ac_common_shared_wptr);
+ iounmap(shrm->ac_common_shared_rptr);
+ iounmap(shrm->ca_common_shared_wptr);
+ iounmap(shrm->ca_common_shared_rptr);
+ iounmap(shrm->ac_audio_shared_wptr);
+ iounmap(shrm->ac_audio_shared_rptr);
+ iounmap(shrm->ca_audio_shared_wptr);
+ iounmap(shrm->ca_audio_shared_rptr);
+ shrm_unregister_netdev(shrm);
+ isa_exit(shrm);
+ kfree(shrm);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * u8500_shrm_suspend() - This routine puts the SHRM in to sustend state.
+ * @dev: pointer to device structure.
+ *
+ * This routine checks the current ongoing communication with Modem by
+ * examining the ca_wake state and prevents suspend if modem communication
+ * is on-going.
+ * If ca_wake = 1 (high), modem comm. is on-going; don't suspend
+ * If ca_wake = 0 (low), no comm. with modem on-going.Allow suspend
+ */
+int u8500_shrm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shrm_dev *shrm = platform_get_drvdata(pdev);
+ int err;
+
+ dev_dbg(&pdev->dev, "%s called...\n", __func__);
+ dev_dbg(&pdev->dev, "ca_wake_req_state = %x\n",
+ get_ca_wake_req_state());
+
+ /* if ca_wake_req is high, prevent system suspend */
+ if (!get_ca_wake_req_state()) {
+ err = shrm_suspend_netdev(shrm->ndev);
+ return err;
+ } else
+ return -EBUSY;
+}
+
+/**
+ * u8500_shrm_resume() - This routine resumes the SHRM from suspend state.
+ * @dev: pointer to device structure
+ *
+ * This routine restore back the current state of the SHRM
+ */
+int u8500_shrm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shrm_dev *shrm = platform_get_drvdata(pdev);
+ int err;
+
+ dev_dbg(&pdev->dev, "%s called...\n", __func__);
+ err = shrm_resume_netdev(shrm->ndev);
+
+ return err;
+}
+
+static const struct dev_pm_ops shrm_dev_pm_ops = {
+ .suspend_noirq = u8500_shrm_suspend,
+ .resume_noirq = u8500_shrm_resume,
+};
+#endif
+
+static struct platform_driver shrm_driver = {
+ .remove = __exit_p(shrm_remove),
+ .driver = {
+ .name = "u8500_shrm",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &shrm_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init shrm_driver_init(void)
+{
+ return platform_driver_probe(&shrm_driver, shrm_probe);
+}
+
+static void __exit shrm_driver_exit(void)
+{
+ platform_driver_unregister(&shrm_driver);
+}
+
+module_init(shrm_driver_init);
+module_exit(shrm_driver_exit);
+
+MODULE_AUTHOR("Biju Das, Kumar Sanghvi, Arun Murthy");
+MODULE_DESCRIPTION("Shared Memory Modem Driver Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/modem/shrm/shrm_driver.c b/drivers/modem/shrm/shrm_driver.c
new file mode 100644
index 00000000000..11540831f95
--- /dev/null
+++ b/drivers/modem/shrm/shrm_driver.c
@@ -0,0 +1,1439 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define DEBUG
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/smp_lock.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/modem/shrm/shrm_driver.h>
+#include <linux/modem/shrm/shrm_private.h>
+#include <linux/modem/shrm/shrm_config.h>
+#include <linux/modem/shrm/shrm.h>
+
+#include <mach/isa_ioctl.h>
+
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+#include <linux/hrtimer.h>
+static struct hrtimer timer;
+#endif
+
+
+#define NAME "IPC_ISA"
+#define ISA_DEVICES 4
+/**debug functionality*/
+#define ISA_DEBUG 0
+
+#define ISI_MESSAGING (0)
+#define RPC_MESSAGING (1)
+#define AUDIO_MESSAGING (2)
+#define SECURITY_MESSAGING (3)
+
+#define SIZE_OF_FIFO (512*1024)
+
+static u8 message_fifo[4][SIZE_OF_FIFO];
+
+static u8 wr_isi_msg[10*1024];
+static u8 wr_rpc_msg[10*1024];
+static u8 wr_sec_msg[10*1024];
+static u8 wr_audio_msg[10*1024];
+
+/* global data */
+/*
+ * int major:This variable is exported to user as module_param to specify
+ * major number at load time
+ */
+static int major;
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+/* global fops mutex */
+static DEFINE_MUTEX(isa_lock);
+rx_cb common_rx;
+rx_cb audio_rx;
+
+
+static int isi_receive(struct shrm_dev *shrm, void *data, u32 n_bytes);
+static int rpc_receive(struct shrm_dev *shrm, void *data, u32 n_bytes);
+static int audio_receive(struct shrm_dev *shrm, void *data, u32 n_bytes);
+static int security_receive(struct shrm_dev *shrm,
+ void *data, u32 n_bytes);
+
+static void rx_common_l2msg_handler(u8 l2_header,
+ void *msg, u32 length,
+ struct shrm_dev *shrm)
+{
+ int ret = 0;
+#ifdef CONFIG_U8500_SHRM_LOOP_BACK
+ u8 *pdata;
+#endif
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ switch (l2_header) {
+ case ISI_MESSAGING:
+ ret = isi_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev, "isi receive failed\n");
+ break;
+ case RPC_MESSAGING:
+ ret = rpc_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev, "rpc receive failed\n");
+ break;
+ case SECURITY_MESSAGING:
+ ret = security_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev,
+ "security receive failed\n");
+ break;
+#ifdef CONFIG_U8500_SHRM_LOOP_BACK
+ case COMMMON_LOOPBACK_MESSAGING:
+ pdata = (u8 *)msg;
+ if ((*pdata == 0x50) || (*pdata == 0xAF)) {
+ ret = isi_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev, "isi receive failed\n");
+ } else if ((*pdata == 0x0A) || (*pdata == 0xF5)) {
+ ret = rpc_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev, "rpc receive failed\n");
+ } else if ((*pdata == 0xFF) || (*pdata == 0x00)) {
+ ret = security_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev,
+ "security receive failed\n");
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+static void rx_audio_l2msg_handler(u8 l2_header,
+ void *msg, u32 length,
+ struct shrm_dev *shrm)
+{
+ int ret = 0;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ audio_receive(shrm, msg, length);
+ if (ret < 0)
+ dev_err(shrm->dev, "audio receive failed\n");
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+static int __init shm_initialise_irq(struct shrm_dev *shrm)
+{
+ int err = 0;
+
+ shrm_protocol_init(shrm,
+ rx_common_l2msg_handler, rx_audio_l2msg_handler);
+
+ err = request_irq(shrm->ca_wake_irq,
+ ca_wake_irq_handler, IRQF_TRIGGER_RISING,
+ "ca_wake-up", shrm);
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "Unable to allocate shm tx interrupt line\n");
+ return err;
+ }
+
+ err = request_irq(shrm->ac_read_notif_0_irq,
+ ac_read_notif_0_irq_handler, 0,
+ "ac_read_notif_0", shrm);
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ac_read_notif_0_irq interrupt line\n");
+ goto irq_err1;
+ }
+
+ err = request_irq(shrm->ac_read_notif_1_irq,
+ ac_read_notif_1_irq_handler, 0,
+ "ac_read_notif_1", shrm);
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ac_read_notif_1_irq interrupt line\n");
+ goto irq_err2;
+ }
+
+ err = request_irq(shrm->ca_msg_pending_notif_0_irq,
+ ca_msg_pending_notif_0_irq_handler, 0,
+ "ca_msg_pending_notif_0", shrm);
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ca_msg_pending_notif_0_irq line\n");
+ goto irq_err3;
+ }
+
+ err = request_irq(shrm->ca_msg_pending_notif_1_irq,
+ ca_msg_pending_notif_1_irq_handler, 0,
+ "ca_msg_pending_notif_1", shrm);
+ if (err < 0) {
+ dev_err(shrm->dev,
+ "error ca_msg_pending_notif_1_irq interrupt line\n");
+ goto irq_err4;
+ }
+
+ return err;
+
+irq_err4:
+ free_irq(shrm->ca_msg_pending_notif_0_irq, shrm);
+irq_err3:
+ free_irq(shrm->ac_read_notif_1_irq, shrm);
+irq_err2:
+ free_irq(shrm->ac_read_notif_0_irq, shrm);
+irq_err1:
+ free_irq(shrm->ca_wake_irq, shrm);
+ return err;
+}
+
+static void free_shm_irq(struct shrm_dev *shrm)
+{
+ free_irq(shrm->ca_wake_irq, shrm);
+ free_irq(shrm->ac_read_notif_0_irq, shrm);
+ free_irq(shrm->ac_read_notif_1_irq, shrm);
+ free_irq(shrm->ca_msg_pending_notif_0_irq, shrm);
+ free_irq(shrm->ca_msg_pending_notif_1_irq, shrm);
+}
+
+/**
+ * create_queue() - To create FIFO for Tx and Rx message buffering.
+ * @q: message queue.
+ * @devicetype: device type 0-isi,1-rpc,2-audio,3-security.
+ *
+ * This function creates a FIFO buffer of n_bytes size using
+ * dma_alloc_coherent(). It also initializes all queue handling
+ * locks, queue management pointers. It also initializes message list
+ * which occupies this queue.
+ *
+ * It return -ENOMEM in case of no memory.
+ */
+static int create_queue(struct message_queue *q, u32 devicetype,
+ struct shrm_dev *shrm)
+{
+ q->fifo_base = (u8 *)&message_fifo[devicetype];
+ q->size = SIZE_OF_FIFO;
+ q->readptr = 0;
+ q->writeptr = 0;
+ q->no = 0;
+ q->shrm = shrm;
+ spin_lock_init(&q->update_lock);
+ INIT_LIST_HEAD(&q->msg_list);
+ init_waitqueue_head(&q->wq_readable);
+ atomic_set(&q->q_rp, 0);
+
+ return 0;
+}
+/**
+ * delete_queue() - To delete FIFO and assiciated memory.
+ * @q: message queue
+ *
+ * This function deletes FIFO created using create_queue() function.
+ * It resets queue management pointers.
+ */
+static void delete_queue(struct message_queue *q)
+{
+ q->size = 0;
+ q->readptr = 0;
+ q->writeptr = 0;
+}
+
+/**
+ * add_msg_to_queue() - Add a message inside inside queue
+ *
+ * @q: message queue
+ * @size: size in bytes
+ *
+ * This function tries to allocate n_bytes of size in FIFO q.
+ * It returns negative number when no memory can be allocated
+ * currently.
+ */
+int add_msg_to_queue(struct message_queue *q, u32 size)
+{
+ struct queue_element *new_msg = NULL;
+ struct shrm_dev *shrm = q->shrm;
+
+ dev_dbg(shrm->dev, "%s IN q->writeptr=%d\n",
+ __func__, q->writeptr);
+ new_msg = kmalloc(sizeof(struct queue_element),
+ GFP_KERNEL|GFP_ATOMIC);
+
+ if (new_msg == NULL) {
+ dev_err(shrm->dev, "memory overflow inside while(1)\n");
+ return -ENOMEM;
+ }
+ new_msg->offset = q->writeptr;
+ new_msg->size = size;
+ new_msg->no = q->no++;
+
+ /* check for overflow condition */
+ if (q->readptr <= q->writeptr) {
+ if (((q->writeptr-q->readptr) + size) >= q->size) {
+ dev_err(shrm->dev, "Buffer overflow !!\n");
+ BUG_ON(((q->writeptr-q->readptr) + size) >= q->size);
+ }
+ } else {
+ if ((q->writeptr + size) >= q->readptr) {
+ dev_err(shrm->dev, "Buffer overflow !!\n");
+ BUG_ON((q->writeptr + size) >= q->readptr);
+ }
+ }
+ q->writeptr = (q->writeptr + size) % q->size;
+ if (list_empty(&q->msg_list)) {
+ list_add_tail(&new_msg->entry, &q->msg_list);
+ /* There can be 2 blocking calls read and another select */
+
+ atomic_set(&q->q_rp, 1);
+ wake_up_interruptible(&q->wq_readable);
+ } else
+ list_add_tail(&new_msg->entry, &q->msg_list);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return 0;
+}
+
+/**
+ * remove_msg_from_queue() - To remove a message from the msg queue.
+ *
+ * @q: message queue
+ *
+ * This function delets a message from the message list associated with message
+ * queue q and also updates read ptr.
+ * If the message list is empty, then, event is set to block the select and
+ * read calls of the paricular queue.
+ *
+ * The message list is FIFO style and message is always added to tail and
+ * removed from head.
+ */
+
+int remove_msg_from_queue(struct message_queue *q)
+{
+ struct queue_element *old_msg = NULL;
+ struct shrm_dev *shrm = q->shrm;
+ struct list_head *msg;
+
+ dev_dbg(shrm->dev, "%s IN q->readptr %d\n",
+ __func__, q->readptr);
+
+ list_for_each(msg, &q->msg_list) {
+ old_msg = list_entry(msg, struct queue_element, entry);
+ if (old_msg == NULL) {
+ dev_err(shrm->dev, ":no message found\n");
+ return -EFAULT;
+ }
+ break;
+ }
+ list_del(msg);
+ q->readptr = (q->readptr + old_msg->size) % q->size;
+ if (list_empty(&q->msg_list)) {
+ dev_dbg(shrm->dev, "List is empty setting RP= 0\n");
+ atomic_set(&q->q_rp, 0);
+ }
+ kfree(old_msg);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return 0;
+}
+
+/**
+ * get_size_of_new_msg() - retrieve new message from message list
+ *
+ * @q: message queue
+ *
+ * This function will retrieve most recent message from the corresponding
+ * queue list. New message is always retrieved from head side.
+ * It returns new message no, offset if FIFO and size.
+ */
+int get_size_of_new_msg(struct message_queue *q)
+{
+ struct queue_element *new_msg = NULL;
+ struct list_head *msg_list;
+ struct shrm_dev *shrm = q->shrm;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ spin_lock_bh(&q->update_lock);
+ list_for_each(msg_list, &q->msg_list) {
+ new_msg = list_entry(msg_list, struct queue_element, entry);
+ if (new_msg == NULL) {
+ spin_unlock_bh(&q->update_lock);
+ dev_err(shrm->dev, "no message found\n");
+ return -1;
+ }
+ break;
+ }
+ spin_unlock_bh(&q->update_lock);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return new_msg->size;
+}
+
+/**
+ * isi_receive() - Rx Completion callback
+ *
+ * @data:message pointer
+ * @n_bytes:message size
+ *
+ * This function is a callback to indicate ISI message reception is complete.
+ * It updates Writeptr of the Fifo
+ */
+static int isi_receive(struct shrm_dev *shrm,
+ void *data, u32 n_bytes)
+{
+ u32 size = 0;
+ int ret = 0;
+ u8 *psrc;
+ struct message_queue *q;
+ struct isadev_context *isidev = &shrm->isa_context->isadev[0];
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ q = &isidev->dl_queue;
+ spin_lock(&q->update_lock);
+ /* Memcopy RX data first */
+ if ((q->writeptr+n_bytes) >= q->size) {
+ dev_dbg(shrm->dev, "Inside Loop Back\n");
+ psrc = (u8 *)data;
+ size = (q->size-q->writeptr);
+ /* Copy First Part of msg */
+ memcpy((q->fifo_base+q->writeptr), psrc, size);
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (n_bytes-size));
+ } else {
+ memcpy((q->fifo_base+q->writeptr), data, n_bytes);
+ }
+ ret = add_msg_to_queue(q, n_bytes);
+ if (ret < 0)
+ dev_err(shrm->dev, "Adding msg to message queue failed\n");
+ spin_unlock(&q->update_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * rpc_receive() - Rx Completion callback
+ *
+ * @data:message pointer
+ * @n_bytes:message size
+ *
+ * This function is a callback to indicate RPC message reception is complete.
+ * It updates Writeptr of the Fifo
+ */
+static int rpc_receive(struct shrm_dev *shrm,
+ void *data, u32 n_bytes)
+{
+ u32 size = 0;
+ int ret = 0;
+ u8 *psrc;
+ struct message_queue *q;
+ struct isadev_context *rpcdev = &shrm->isa_context->isadev[1];
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ q = &rpcdev->dl_queue;
+ spin_lock(&q->update_lock);
+ /* Memcopy RX data first */
+ if ((q->writeptr+n_bytes) >= q->size) {
+ psrc = (u8 *)data;
+ size = (q->size-q->writeptr);
+ /* Copy First Part of msg */
+ memcpy((q->fifo_base+q->writeptr), psrc, size);
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (n_bytes-size));
+ } else {
+ memcpy((q->fifo_base+q->writeptr), data, n_bytes);
+ }
+
+ ret = add_msg_to_queue(q, n_bytes);
+ if (ret < 0)
+ dev_err(shrm->dev, "Adding msg to message queue failed\n");
+ spin_unlock(&q->update_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * audio_receive() - Rx Completion callback
+ *
+ * @data:message pointer
+ * @n_bytes:message size
+ *
+ * This function is a callback to indicate audio message reception is complete.
+ * It updates Writeptr of the Fifo
+ */
+static int audio_receive(struct shrm_dev *shrm,
+ void *data, u32 n_bytes)
+{
+ u32 size = 0;
+ int ret = 0;
+ u8 *psrc;
+ struct message_queue *q;
+ struct isadev_context *audiodev = &shrm->isa_context->isadev[2];
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ q = &audiodev->dl_queue;
+ spin_lock(&q->update_lock);
+ /* Memcopy RX data first */
+ if ((q->writeptr+n_bytes) >= q->size) {
+ psrc = (u8 *)data;
+ size = (q->size-q->writeptr);
+ /* Copy First Part of msg */
+ memcpy((q->fifo_base+q->writeptr), psrc, size);
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (n_bytes-size));
+ } else {
+ memcpy((q->fifo_base+q->writeptr), data, n_bytes);
+ }
+ ret = add_msg_to_queue(q, n_bytes);
+ if (ret < 0)
+ dev_err(shrm->dev, "Adding msg to message queue failed\n");
+ spin_unlock(&q->update_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * security_receive() - Rx Completion callback
+ *
+ * @data:message pointer
+ * @n_bytes: message size
+ *
+ * This function is a callback to indicate security message reception
+ * is complete.It updates Writeptr of the Fifo
+ */
+static int security_receive(struct shrm_dev *shrm,
+ void *data, u32 n_bytes)
+{
+ u32 size = 0;
+ int ret = 0;
+ u8 *psrc;
+ struct message_queue *q;
+ struct isadev_context *secdev = &shrm->isa_context->isadev[3];
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ q = &secdev->dl_queue;
+ spin_lock(&q->update_lock);
+ /* Memcopy RX data first */
+ if ((q->writeptr+n_bytes) >= q->size) {
+ psrc = (u8 *)data;
+ size = (q->size-q->writeptr);
+ /* Copy First Part of msg */
+ memcpy((q->fifo_base+q->writeptr), psrc, size);
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ memcpy(q->fifo_base, psrc, (n_bytes-size));
+ } else {
+ memcpy((q->fifo_base+q->writeptr), data, n_bytes);
+ }
+ ret = add_msg_to_queue(q, n_bytes);
+ if (ret < 0)
+ dev_err(shrm->dev, "Adding msg to message queue failed\n");
+ spin_unlock(&q->update_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+
+/**
+ * isa_select() - Select Interface
+ *
+ * @filp:file descriptor pointer
+ * @wait:poll_table_struct pointer
+ *
+ * This function is used to perform non-blocking read operations. It allows
+ * a process to determine whether it can read from one or more open files
+ * without blocking. These calls can also block a process until any of a
+ * given set of file descriptors becomes available for reading.
+ * If a file is ready to read, POLLIN | POLLRDNORM bitmask is returned.
+ * The driver method is called whenever the user-space program performs a select
+ * system call involving a file descriptor associated with the driver.
+ */
+static u32 isa_select(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct message_queue *q;
+ u32 mask = 0;
+ u32 m = iminor(filp->f_path.dentry->d_inode);
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (isadev->device_id != m)
+ return -1;
+ q = &isadev->dl_queue;
+ poll_wait(filp, &q->wq_readable, wait);
+ if (atomic_read(&q->q_rp) == 1)
+ mask = POLLIN | POLLRDNORM;
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return mask;
+}
+
+/**
+ * isa_read() - Read from device
+ *
+ * @filp:file descriptor
+ * @buf:user buffer pointer
+ * @len:size of requested data transfer
+ * @ppos:not used
+ *
+ * This function is called whenever user calls read() system call.
+ * It reads a oldest message from queue and copies it into user buffer and
+ * returns its size.
+ * If there is no message present in queue, then it blocks until new data is
+ * available.
+ */
+ssize_t isa_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct isadev_context *isadev = (struct isadev_context *)
+ filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct message_queue *q;
+ char *psrc;
+ u32 msgsize;
+ u32 size = 0;
+ int ret = 0;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (len <= 0)
+ return -EFAULT;
+ q = &isadev->dl_queue;
+
+ spin_lock_bh(&q->update_lock);
+ if (list_empty(&q->msg_list)) {
+ spin_unlock_bh(&q->update_lock);
+ if (wait_event_interruptible(q->wq_readable,
+ atomic_read(&q->q_rp) == 1)) {
+ return -ERESTARTSYS;
+ }
+ } else
+ spin_unlock_bh(&q->update_lock);
+
+ msgsize = get_size_of_new_msg(q);
+ if ((q->readptr+msgsize) >= q->size) {
+ dev_dbg(shrm->dev, "Inside Loop Back\n");
+ psrc = (char *)buf;
+ size = (q->size-q->readptr);
+ /* Copy First Part of msg */
+ if (copy_to_user(psrc,
+ (u8 *)(q->fifo_base+q->readptr),
+ size)) {
+ dev_err(shrm->dev, "copy_to_user failed\n");
+ return -EFAULT;
+ }
+ psrc += size;
+ /* Copy Second Part of msg at the top of fifo */
+ if (copy_to_user(psrc,
+ (u8 *)(q->fifo_base),
+ (msgsize-size))) {
+ dev_err(shrm->dev, "copy_to_user failed\n");
+ return -EFAULT;
+ }
+ } else {
+ if (copy_to_user(buf,
+ (u8 *)(q->fifo_base+q->readptr),
+ msgsize)) {
+ dev_err(shrm->dev, "copy_to_user failed\n");
+ return -EFAULT;
+ }
+ }
+
+ spin_lock_bh(&q->update_lock);
+ ret = remove_msg_from_queue(q);
+ if (ret < 0) {
+ dev_err(shrm->dev,
+ "Removing msg from message queue failed\n");
+ msgsize = ret;
+ }
+ spin_unlock_bh(&q->update_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return msgsize;
+}
+/**
+ * isa_write() - Write to device
+ *
+ * @filp:file descriptor
+ * @buf:user buffer pointer
+ * @len:size of requested data transfer
+ * @ppos:not used
+ *
+ * This function is called whenever user calls write() system call.
+ * It checks if there is space available in queue, and copies the message
+ * inside queue. If there is no space, it blocks until space becomes available.
+ * It also schedules transfer thread to transmit the newly added message.
+ */
+static ssize_t isa_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct message_queue *q;
+ int err, ret;
+ void *addr = 0;
+ u8 l2_header = 0;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ if (len <= 0)
+ return -EFAULT;
+ q = &isadev->dl_queue;
+
+ switch (isadev->device_id) {
+ case ISI_MESSAGING:
+ dev_dbg(shrm->dev, "ISI\n");
+ addr = (void *)wr_isi_msg;
+#ifdef CONFIG_U8500_SHRM_LOOP_BACK
+ dev_dbg(shrm->dev, "Loopback\n");
+ l2_header = COMMON_LOOPBACK_MESSAGING;
+#else
+ l2_header = isadev->device_id;
+#endif
+ break;
+ case RPC_MESSAGING:
+ dev_dbg(shrm->dev, "RPC\n");
+ addr = (void *)wr_rpc_msg;
+#ifdef CONFIG_U8500_SHRM_LOOP_BACK
+ l2_header = COMMON_LOOPBACK_MESSAGING;
+#else
+ l2_header = isadev->device_id;
+#endif
+ break;
+ case AUDIO_MESSAGING:
+ dev_dbg(shrm->dev, "Audio\n");
+ addr = (void *)wr_audio_msg;
+#ifdef CONFIG_U8500_SHRM_LOOP_BACK
+ l2_header = AUDIO_LOOPBACK_MESSAGING;
+#else
+ l2_header = isadev->device_id;
+#endif
+
+ break;
+ case SECURITY_MESSAGING:
+ dev_dbg(shrm->dev, "Security\n");
+ addr = (void *)wr_sec_msg;
+#ifdef CONFIG_U8500_SHRM_LOOP_BACK
+ l2_header = COMMON_LOOPBACK_MESSAGING;
+#else
+ l2_header = isadev->device_id;
+#endif
+ break;
+ default:
+ dev_dbg(shrm->dev, "Wrong device\n");
+ return -EFAULT;
+ }
+
+ if (copy_from_user(addr, buf, len)) {
+ dev_err(shrm->dev, "copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ /* Write msg to Fifo */
+ if (isadev->device_id == 2) {
+ mutex_lock(&shrm->isa_context->tx_audio_mutex);
+ err = shm_write_msg(shrm, l2_header, addr, len);
+ if (!err)
+ ret = len;
+ else
+ ret = err;
+ mutex_unlock(&shrm->isa_context->tx_audio_mutex);
+ } else {
+ spin_lock_bh(&shrm->isa_context->common_tx);
+ err = shm_write_msg(shrm, l2_header, addr, len);
+ if (!err)
+ ret = len;
+ else
+ ret = err;
+ spin_unlock_bh(&shrm->isa_context->common_tx);
+ }
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return ret;
+}
+
+/**
+ * isa_ioctl() - To handle different ioctl commands supported by driver.
+ *
+ * @inode: structure is used by the kernel internally to represent files
+ * @filp:file descriptor pointer
+ * @cmd:ioctl command
+ * @arg:input param
+ *
+ * Following ioctls are supported by this driver.
+ * DLP_IOCTL_ALLOCATE_BUFFER - To allocate buffer for new uplink message.
+ * This ioctl is called with required message size. It returns offset for
+ * the allocates space in the queue. DLP_IOCTL_PUT_MESSAGE - To indicate
+ * new uplink message available in queuq for transmission. Message is copied
+ * from offset location returned by previous ioctl before calling this ioctl.
+ * DLP_IOCTL_GET_MESSAGE - To check if any downlink message is available in
+ * queue. It returns offset for new message inside queue.
+ * DLP_IOCTL_DEALLOCATE_BUFFER - To deallocate any buffer allocate for
+ * downlink message once the message is copied. Message is copied from offset
+ * location returned by previous ioctl before calling this ioctl.
+ */
+static int isa_ioctl(struct inode *inode, struct file *filp,
+ unsigned cmd, unsigned long arg)
+{
+ int err = 0;
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ u32 m = iminor(inode);
+
+ if (isadev->device_id != m)
+ return -1;
+
+ switch (cmd) {
+ case DLP_IOC_ALLOCATE_BUFFER:
+ dev_dbg(shrm->dev, "DLP_IOC_ALLOCATE_BUFFER\n");
+ break;
+ case DLP_IOC_PUT_MESSAGE:
+ dev_dbg(shrm->dev, "DLP_IOC_PUT_MESSAGE\n");
+ break;
+ case DLP_IOC_GET_MESSAGE:
+ dev_dbg(shrm->dev, "DLP_IOC_GET_MESSAGE\n");
+ break;
+ case DLP_IOC_DEALLOCATE_BUFFER:
+ dev_dbg(shrm->dev, "DLP_IOC_DEALLOCATE_BUFFER\n");
+ break;
+ default:
+ dev_dbg(shrm->dev, "Unknown IOCTL\n");
+ err = -1;
+ break;
+ }
+ return err;
+}
+/**
+ * isa_mmap() - Maps kernel queue memory to user space.
+ *
+ * @filp:file descriptor pointer
+ * @vma:virtual area memory structure.
+ *
+ * This function maps kernel FIFO into user space. This function
+ * shall be called twice to map both uplink and downlink buffers.
+ */
+static int isa_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+
+ u32 m = iminor(filp->f_path.dentry->d_inode);
+ dev_dbg(shrm->dev, "%s %dIN\n", __func__, m);
+
+ isadev = (struct isadev_context *)filp->private_data;
+ return 0;
+}
+
+/**
+ * isa_close() - Close device file
+ *
+ * @inode:structure is used by the kernel internally to represent files
+ * @filp:device file descriptor
+ *
+ * This function deletes structues associated with this file, deletes
+ * queues, flushes and destroys workqueus and closes this file.
+ * It also unregisters itself from l2mux driver.
+ */
+static int isa_close(struct inode *inode, struct file *filp)
+{
+ struct isadev_context *isadev = filp->private_data;
+ struct shrm_dev *shrm = isadev->dl_queue.shrm;
+ struct isa_driver_context *isa_context = shrm->isa_context;
+ u8 m;
+
+ mutex_lock(&isa_lock);
+ m = iminor(filp->f_path.dentry->d_inode);
+ dev_dbg(shrm->dev, "%s IN %d", __func__, m);
+
+ if (atomic_dec_and_test(&isa_context->is_open[m])) {
+ atomic_inc(&isa_context->is_open[m]);
+ dev_err(shrm->dev, "Device not opened yet\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ }
+ atomic_set(&isa_context->is_open[m], 1);
+
+ dev_dbg(shrm->dev, "isadev->device_id %d", isadev->device_id);
+ dev_dbg(shrm->dev, "Closed %d device\n", m);
+
+ if (m == ISI_MESSAGING)
+ dev_dbg(shrm->dev, "Closed ISI_MESSAGING Device\n");
+ else if (m == RPC_MESSAGING)
+ dev_dbg(shrm->dev, "Closed RPC_MESSAGING Device\n");
+ else if (m == AUDIO_MESSAGING)
+ dev_dbg(shrm->dev, "Closed AUDIO_MESSAGING Device\n");
+ else if (m == SECURITY_MESSAGING)
+ dev_dbg(shrm->dev, "Closed SECURITY_MESSAGING Device\n");
+ else
+ dev_dbg(shrm->dev, NAME ":No such device present\n");
+
+ mutex_unlock(&isa_lock);
+ return 0;
+}
+/**
+ * isa_open() - Open device file
+ *
+ * @inode: structure is used by the kernel internally to represent files
+ * @filp: device file descriptor
+ *
+ * This function performs initialization tasks needed to open SHM channel.
+ * Following tasks are performed.
+ * -return if device is already opened
+ * -create uplink FIFO
+ * -create downlink FIFO
+ * -init delayed workqueue thread
+ * -register to l2mux driver
+ */
+static int isa_open(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ u8 m;
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context = container_of(
+ inode->i_cdev,
+ struct isa_driver_context,
+ cdev);
+ struct shrm_dev *shrm = isa_context->isadev->dl_queue.shrm;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (get_boot_state() != BOOT_DONE) {
+ dev_err(shrm->dev, "Boot is not done\n");
+ return -EBUSY;
+ }
+ mutex_lock(&isa_lock);
+ m = iminor(inode);
+
+ if ((m != ISI_MESSAGING) && (m != RPC_MESSAGING) &&
+ (m != AUDIO_MESSAGING) && (m != SECURITY_MESSAGING)) {
+ dev_err(shrm->dev, "No such device present\n");
+ mutex_unlock(&isa_lock);
+ return -ENODEV;
+ }
+ if (!atomic_dec_and_test(&isa_context->is_open[m])) {
+ atomic_inc(&isa_context->is_open[m]);
+ dev_err(shrm->dev, "Device already opened\n");
+ mutex_unlock(&isa_lock);
+ return -EBUSY;
+ }
+
+ if (m == ISI_MESSAGING)
+ dev_dbg(shrm->dev, "Open ISI_MESSAGING Device\n");
+ else if (m == RPC_MESSAGING)
+ dev_dbg(shrm->dev, "Open RPC_MESSAGING Device\n");
+ else if (m == AUDIO_MESSAGING)
+ dev_dbg(shrm->dev, "Open AUDIO_MESSAGING Device\n");
+ else if (m == SECURITY_MESSAGING)
+ dev_dbg(shrm->dev, "Open SECURITY_MESSAGING Device\n");
+ else
+ dev_dbg(shrm->dev, ":No such device present\n");
+
+ isadev = &isa_context->isadev[m];
+ if (filp != NULL)
+ filp->private_data = isadev;
+
+ mutex_unlock(&isa_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return err;
+}
+
+const struct file_operations isa_fops = {
+ .owner = THIS_MODULE,
+ .open = isa_open,
+ .release = isa_close,
+ .ioctl = isa_ioctl,
+ .mmap = isa_mmap,
+ .read = isa_read,
+ .write = isa_write,
+ .poll = isa_select,
+};
+
+/**
+ * isa_init() - module insertion function
+ *
+ * This function registers module as a character driver using
+ * register_chrdev_region() or alloc_chrdev_region. It adds this
+ * driver to system using cdev_add() call. Major number is dynamically
+ * allocated using alloc_chrdev_region() by default or left to user to specify
+ * it during load time. For this variable major is used as module_param
+ * Nodes to be created using
+ * mknod /dev/isi c $major 0
+ * mknod /dev/rpc c $major 1
+ * mknod /dev/audio c $major 2
+ * mknod /dev/sec c $major 3
+ */
+int isa_init(struct shrm_dev *shrm)
+{
+ dev_t dev_id;
+ int retval, no_dev;
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context;
+
+ isa_context = kzalloc(sizeof(struct isa_driver_context),
+ GFP_KERNEL);
+ shrm->isa_context = isa_context;
+ if (isa_context == NULL) {
+ dev_err(shrm->dev, "Failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ if (major) {
+ dev_id = MKDEV(major, 0);
+ retval = register_chrdev_region(dev_id, ISA_DEVICES, NAME);
+ } else {
+ retval = alloc_chrdev_region(&dev_id, 0, ISA_DEVICES, NAME);
+ major = MAJOR(dev_id);
+ }
+
+ dev_dbg(shrm->dev, "major %d\n", major);
+
+ cdev_init(&isa_context->cdev, &isa_fops);
+ isa_context->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&isa_context->cdev, dev_id, ISA_DEVICES);
+ if (retval) {
+ dev_err(shrm->dev, "Failed to add char device\n");
+ return retval;
+ }
+
+ for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++)
+ atomic_set(&isa_context->is_open[no_dev], 1);
+
+ isa_context->isadev = kzalloc(sizeof
+ (struct isadev_context)*ISA_DEVICES,
+ GFP_KERNEL);
+ if (isa_context->isadev == NULL) {
+ dev_err(shrm->dev, "Failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++) {
+ isadev = &isa_context->isadev[no_dev];
+ isadev->device_id = no_dev;
+ retval = create_queue(&isadev->dl_queue,
+ isadev->device_id, shrm);
+ if (retval < 0) {
+ dev_err(shrm->dev, "create dl_queue failed\n");
+ delete_queue(&isadev->dl_queue);
+ kfree(isadev);
+ return retval;
+ }
+ }
+ mutex_init(&isa_context->tx_audio_mutex);
+ spin_lock_init(&isa_context->common_tx);
+
+ dev_err(shrm->dev, "SHRM char driver added\n");
+
+ return retval;
+}
+
+void isa_exit(struct shrm_dev *shrm)
+{
+ int no_dev;
+ struct isadev_context *isadev;
+ struct isa_driver_context *isa_context = shrm->isa_context;
+ dev_t dev_id = MKDEV(major, 0);
+
+ for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++) {
+ isadev = &isa_context->isadev[no_dev];
+ delete_queue(&isadev->dl_queue);
+ kfree(isadev);
+ }
+
+ cdev_del(&isa_context->cdev);
+ unregister_chrdev_region(dev_id, ISA_DEVICES);
+ kfree(isa_context);
+
+ dev_err(shrm->dev, "SHRM char driver removed\n");
+}
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+static enum hrtimer_restart callback(struct hrtimer *timer)
+{
+ return HRTIMER_NORESTART;
+}
+#endif
+
+
+static int __init shrm_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct resource *res;
+ struct shrm_dev *shrm = NULL;
+
+ if (pdev == NULL) {
+ dev_err(shrm->dev,
+ "No device/platform_data found on shm device\n");
+ return -ENODEV;
+ }
+
+
+ shrm = kzalloc(sizeof(struct shrm_dev), GFP_KERNEL);
+ if (shrm == NULL) {
+ dev_err(shrm->dev,
+ "Could not allocate memory for struct shm_dev\n");
+ return -ENOMEM;
+ }
+ shrm->dev = &pdev->dev;
+
+ /* initialise the SHM */
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(shrm->dev, "Unable to map Ca Wake up interrupt\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ca_wake_irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map APE_Read_notif_common IRQ base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ac_read_notif_0_irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map APE_Read_notif_audio IRQ base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ac_read_notif_1_irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 3);
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map Cmt_msg_pending_notif_common IRQbase\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ca_msg_pending_notif_0_irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 4);
+ if (!res) {
+ dev_err(shrm->dev,
+ "Unable to map Cmt_msg_pending_notif_audio IRQ base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+ shrm->ca_msg_pending_notif_1_irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(shrm->dev,
+ "Could not get SHM IO memory information\n");
+ err = -ENODEV;
+ goto rollback_intr;
+ }
+
+ shrm->intr_base = (void __iomem *)ioremap_nocache(res->start,
+ res->end - res->start + 1);
+
+ if (!(shrm->intr_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_intr;
+ }
+
+ shrm->ape_common_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_APE_COMMON_BASE;
+ shrm->ape_common_fifo_base =
+ (void __iomem *)ioremap_nocache(
+ U8500_SHM_FIFO_APE_COMMON_BASE,
+ SHM_FIFO_0_SIZE);
+ shrm->ape_common_fifo_size = (SHM_FIFO_0_SIZE)/4;
+
+ if (!(shrm->ape_common_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_ape_common_fifo_base;
+ }
+
+ shrm->cmt_common_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_CMT_COMMON_BASE;
+
+ shrm->cmt_common_fifo_base =
+ (void __iomem *)ioremap_nocache(
+ U8500_SHM_FIFO_CMT_COMMON_BASE, SHM_FIFO_0_SIZE);
+ shrm->cmt_common_fifo_size = (SHM_FIFO_0_SIZE)/4;
+
+ if (!(shrm->cmt_common_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_cmt_common_fifo_base;
+ }
+
+ shrm->ape_audio_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_APE_AUDIO_BASE;
+ shrm->ape_audio_fifo_base =
+ (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_APE_AUDIO_BASE,
+ SHM_FIFO_1_SIZE);
+ shrm->ape_audio_fifo_size = (SHM_FIFO_1_SIZE)/4;
+
+ if (!(shrm->ape_audio_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_ape_audio_fifo_base;
+ }
+
+ shrm->cmt_audio_fifo_base_phy =
+ (u32 *)U8500_SHM_FIFO_CMT_AUDIO_BASE;
+ shrm->cmt_audio_fifo_base =
+ (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_CMT_AUDIO_BASE,
+ SHM_FIFO_1_SIZE);
+ shrm->cmt_audio_fifo_size = (SHM_FIFO_1_SIZE)/4;
+
+ if (!(shrm->cmt_audio_fifo_base)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_cmt_audio_fifo_base;
+ }
+
+ shrm->ac_common_shared_wptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_0_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_common_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_ac_common_shared_wptr;
+ }
+
+ shrm->ac_common_shared_rptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_0_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_common_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+
+ shrm->ca_common_shared_wptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_0_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_common_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+ shrm->ca_common_shared_rptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_0_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_common_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+
+ shrm->ac_audio_shared_wptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_1_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_audio_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+
+ shrm->ac_audio_shared_rptr =
+ (void __iomem *)ioremap(SHM_ACFIFO_1_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ac_audio_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+
+ shrm->ca_audio_shared_wptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_1_WRITE_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_audio_shared_wptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+
+ shrm->ca_audio_shared_rptr =
+ (void __iomem *)ioremap(SHM_CAFIFO_1_READ_AMCU, SHM_PTR_SIZE);
+
+ if (!(shrm->ca_audio_shared_rptr)) {
+ dev_err(shrm->dev, "Unable to map register base\n");
+ err = -EBUSY;
+ goto rollback_map;
+ }
+
+
+ if (isa_init(shrm) != 0) {
+ dev_err(shrm->dev, "Driver Initialization Error\n");
+ err = -EBUSY;
+ }
+ /* install handlers and tasklets */
+ if (shm_initialise_irq(shrm)) {
+ dev_err(shrm->dev, "shm error in interrupt registration\n");
+ goto rollback_irq;
+ }
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+ hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer.function = callback;
+
+ hrtimer_start(&timer, ktime_set(0, 2*NSEC_PER_MSEC), HRTIMER_MODE_REL);
+#endif
+
+ return err;
+
+rollback_irq:
+ free_shm_irq(shrm);
+rollback_map:
+ iounmap(shrm->ac_common_shared_wptr);
+ iounmap(shrm->ac_common_shared_rptr);
+ iounmap(shrm->ca_common_shared_wptr);
+ iounmap(shrm->ca_common_shared_rptr);
+ iounmap(shrm->ac_audio_shared_wptr);
+ iounmap(shrm->ac_audio_shared_rptr);
+ iounmap(shrm->ca_audio_shared_wptr);
+ iounmap(shrm->ca_audio_shared_rptr);
+rollback_ac_common_shared_wptr:
+ iounmap(shrm->cmt_audio_fifo_base);
+rollback_cmt_audio_fifo_base:
+ iounmap(shrm->ape_audio_fifo_base);
+rollback_ape_audio_fifo_base:
+ iounmap(shrm->cmt_common_fifo_base);
+rollback_cmt_common_fifo_base:
+ iounmap(shrm->ape_common_fifo_base);
+rollback_ape_common_fifo_base:
+ iounmap(shrm->intr_base);
+rollback_intr:
+ kfree(shrm);
+ return err;
+}
+
+static int __exit shrm_remove(struct platform_device *pdev)
+{
+ struct shrm_dev *shrm = platform_get_drvdata(pdev);
+
+ free_shm_irq(shrm);
+ iounmap(shrm->intr_base);
+ iounmap(shrm->ape_common_fifo_base);
+ iounmap(shrm->cmt_common_fifo_base);
+ iounmap(shrm->ape_audio_fifo_base);
+ iounmap(shrm->cmt_audio_fifo_base);
+ iounmap(shrm->ac_common_shared_wptr);
+ iounmap(shrm->ac_common_shared_rptr);
+ iounmap(shrm->ca_common_shared_wptr);
+ iounmap(shrm->ca_common_shared_rptr);
+ iounmap(shrm->ac_audio_shared_wptr);
+ iounmap(shrm->ac_audio_shared_rptr);
+ iounmap(shrm->ca_audio_shared_wptr);
+ iounmap(shrm->ca_audio_shared_rptr);
+ kfree(shrm);
+ isa_exit(shrm);
+
+ return 0;
+}
+#ifdef CONFIG_PM
+
+/**
+ * u8500_shrm_suspend() - This routine puts the SHRM in to sustend state.
+ * @pdev: platform device.
+ *
+ * This routine checks the current ongoing communication with Modem by
+ * examining the ca_wake state and prevents suspend if modem communication
+ * is on-going.
+ * If ca_wake = 1 (high), modem comm. is on-going; don't suspend
+ * If ca_wake = 0 (low), no comm. with modem on-going.Allow suspend
+ */
+int u8500_shrm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shrm_dev *shrm = platform_get_drvdata(pdev);
+
+ dev_dbg(shrm->dev, "%s called...\n", __func__);
+ dev_dbg(shrm->dev, "\n ca_wake_req_state = %x\n",
+ get_ca_wake_req_state());
+ /* if ca_wake_req is high, prevent system suspend */
+ if (get_ca_wake_req_state())
+ return -EBUSY;
+ else
+ return 0;
+}
+
+/**
+ * u8500_shrm_resume() - This routine resumes the SHRM from sustend state.
+ * @pdev: platform device.
+ *
+ * This routine restore back the current state of the SHRM
+ */
+int u8500_shrm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shrm_dev *shrm = platform_get_drvdata(pdev);
+
+ dev_dbg(shrm->dev, "%s called...\n", __func__);
+ /* TODO:
+ * As of now, no state save takes place in suspend.
+ * So, nothing to restore in resume.
+ * Simply return as of now.
+ * State saved in suspend should be restored here.
+ */
+
+ return 0;
+}
+
+static const struct dev_pm_ops shrm_dev_pm_ops = {
+ .suspend = u8500_shrm_suspend,
+ .resume = u8500_shrm_resume,
+};
+#endif
+
+static struct platform_driver shrm_driver = {
+ .remove = __exit_p(shrm_remove),
+ .driver = {
+ .name = "u8500_shrm",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &shrm_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init shrm_driver_init(void)
+{
+ return platform_driver_probe(&shrm_driver, shrm_probe);
+}
+
+static void __exit shrm_driver_exit(void)
+{
+ platform_driver_unregister(&shrm_driver);
+}
+
+module_init(shrm_driver_init);
+module_exit(shrm_driver_exit);
+
+MODULE_AUTHOR("Biju Das");
+MODULE_DESCRIPTION("Shared Memory Modem Driver Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/modem/shrm/shrm_fifo.c b/drivers/modem/shrm/shrm_fifo.c
new file mode 100644
index 00000000000..1804c1be69e
--- /dev/null
+++ b/drivers/modem/shrm/shrm_fifo.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/modem/shrm/shrm.h>
+#include <linux/modem/shrm/shrm_driver.h>
+#include <linux/modem/shrm/shrm_private.h>
+#include <linux/modem/shrm/shrm_net.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define L1_BOOT_INFO_REQ 1
+#define L1_BOOT_INFO_RESP 2
+#define L1_NORMAL_MSG 3
+#define L1_HEADER_MASK 28
+#define L1_MAPID_MASK 0xF0000000
+#define CONFIG_OFFSET 8
+#define COUNTER_OFFSET 20
+#define L2_HEADER_SIZE 4
+#define L2_HEADER_OFFSET 24
+#define MASK_0_15_BIT 0xFF
+#define MASK_16_31_BIT 0xFF00
+#define MASK_16_27_BIT 0xFFF0000
+#define MASK_0_39_BIT 0xFFFFF
+#define MASK_40_55_BIT 0xFF00000
+#define MASK_8_16_BIT 0x0000FF00
+#define MSG_LEN_OFFSET 16
+#define SHRM_VER 2
+#define ca_ist_inactivity_timer 100 /*100ms */
+#define ca_csc_inactivity_timer 100 /*100ms */
+
+static u8 msg_audio_counter;
+static u8 msg_common_counter;
+
+struct fifo_write_params ape_shm_fifo_0;
+struct fifo_write_params ape_shm_fifo_1;
+struct fifo_read_params cmt_shm_fifo_0;
+struct fifo_read_params cmt_shm_fifo_1;
+
+
+static u8 cmt_read_notif_0_send;
+static u8 cmt_read_notif_1_send;
+
+void shm_fifo_init(struct shrm_dev *shrm)
+{
+ ape_shm_fifo_0.writer_local_wptr = 0;
+ ape_shm_fifo_0.writer_local_rptr = 0;
+ *((u32 *)shrm->ac_common_shared_wptr) = 0;
+ *((u32 *)shrm->ac_common_shared_rptr) = 0;
+ ape_shm_fifo_0.shared_wptr = 0;
+ ape_shm_fifo_0.shared_rptr = 0;
+ ape_shm_fifo_0.availablesize = shrm->ape_common_fifo_size;
+ ape_shm_fifo_0.end_addr_fifo = shrm->ape_common_fifo_size;
+ ape_shm_fifo_0.fifo_virtual_addr = shrm->ape_common_fifo_base;
+ spin_lock_init(&ape_shm_fifo_0.fifo_update_lock);
+
+
+ cmt_shm_fifo_0.reader_local_rptr = 0;
+ cmt_shm_fifo_0.reader_local_wptr = 0;
+ cmt_shm_fifo_0.shared_wptr =
+ *((u32 *)shrm->ca_common_shared_wptr);
+ cmt_shm_fifo_0.shared_rptr =
+ *((u32 *)shrm->ca_common_shared_rptr);
+ cmt_shm_fifo_0.availablesize = shrm->cmt_common_fifo_size;
+ cmt_shm_fifo_0.end_addr_fifo = shrm->cmt_common_fifo_size;
+ cmt_shm_fifo_0.fifo_virtual_addr = shrm->cmt_common_fifo_base;
+
+ ape_shm_fifo_1.writer_local_wptr = 0;
+ ape_shm_fifo_1.writer_local_rptr = 0;
+ ape_shm_fifo_1.shared_wptr = 0;
+ ape_shm_fifo_1.shared_rptr = 0;
+ *((u32 *)shrm->ac_audio_shared_wptr) = 0;
+ *((u32 *)shrm->ac_audio_shared_rptr) = 0;
+ ape_shm_fifo_1.availablesize = shrm->ape_audio_fifo_size;
+ ape_shm_fifo_1.end_addr_fifo = shrm->ape_audio_fifo_size;
+ ape_shm_fifo_1.fifo_virtual_addr = shrm->ape_audio_fifo_base;
+ spin_lock_init(&ape_shm_fifo_1.fifo_update_lock);
+
+ cmt_shm_fifo_1.reader_local_rptr = 0;
+ cmt_shm_fifo_1.reader_local_wptr = 0;
+ cmt_shm_fifo_1.shared_wptr =
+ *((u32 *)shrm->ca_audio_shared_wptr);
+ cmt_shm_fifo_1.shared_rptr =
+ *((u32 *)shrm->ca_audio_shared_rptr);
+ cmt_shm_fifo_1.availablesize = shrm->cmt_audio_fifo_size;
+ cmt_shm_fifo_1.end_addr_fifo = shrm->cmt_audio_fifo_size;
+ cmt_shm_fifo_1.fifo_virtual_addr = shrm->cmt_audio_fifo_base;
+ msg_audio_counter = 0;
+ msg_common_counter = 0;
+}
+
+u8 read_boot_info_req(struct shrm_dev *shrm,
+ u32 *config,
+ u32 *version)
+{
+ struct fifo_read_params *fifo = &cmt_shm_fifo_0;
+ u32 *msg;
+ u32 header = 0;
+ u8 msgtype;
+
+ /* Read L1 header read content of reader_local_rptr */
+ msg = (u32 *)
+ (fifo->reader_local_rptr + fifo->fifo_virtual_addr);
+ header = *msg;
+ msgtype = (header & L1_MAPID_MASK) >> L1_MSG_MAPID_OFFSET;
+ if (msgtype != L1_BOOT_INFO_REQ) {
+ dev_err(shrm->dev, "Read_Boot_Info_Req Fatal ERROR\n");
+ dev_err(shrm->dev, "Received msgtype is %d\n", msgtype);
+ dev_info(shrm->dev, "Initiating a modem reset\n");
+ queue_kthread_work(&shrm->shm_ac_wake_kw,
+ &shrm->shm_mod_reset_req);
+ return 0;
+ }
+ *config = (header >> CONFIG_OFFSET) & MASK_0_15_BIT;
+ *version = header & MASK_0_15_BIT;
+ fifo->reader_local_rptr += 1;
+
+ return 1;
+}
+
+void write_boot_info_resp(struct shrm_dev *shrm, u32 config,
+ u32 version)
+{
+ struct fifo_write_params *fifo = &ape_shm_fifo_0;
+ u32 *msg;
+ u8 msg_length;
+ version = SHRM_VER;
+
+ spin_lock_bh(&fifo->fifo_update_lock);
+ /* Read L1 header read content of reader_local_rptr */
+ msg = (u32 *)
+ (fifo->writer_local_wptr+fifo->fifo_virtual_addr);
+ if (version < 1) {
+ *msg = ((L1_BOOT_INFO_RESP << L1_MSG_MAPID_OFFSET) |
+ ((config << CONFIG_OFFSET) & MASK_16_31_BIT)
+ | (version & MASK_0_15_BIT));
+ msg_length = 1;
+ } else {
+ *msg = ((L1_BOOT_INFO_RESP << L1_MSG_MAPID_OFFSET) |
+ ((0x8 << MSG_LEN_OFFSET) & MASK_16_27_BIT) |
+ ((config << CONFIG_OFFSET) & MASK_8_16_BIT)|
+ version);
+ msg++;
+ *msg = ca_ist_inactivity_timer;
+ msg++;
+ *msg = ca_csc_inactivity_timer;
+ msg_length = L1_NORMAL_MSG;
+ }
+ fifo->writer_local_wptr += msg_length;
+ fifo->availablesize -= msg_length;
+ spin_unlock_bh(&fifo->fifo_update_lock);
+}
+
+/**
+ * shm_write_msg_to_fifo() - write message to FIFO
+ * @shrm: pointer to shrm device information structure
+ * @channel: audio or common channel
+ * @l2header: L2 header or device ID
+ * @addr: pointer to write buffer address
+ * @length: length of mst to write
+ *
+ * Function Which Writes the data into Fifo in IPC zone
+ * It is called from shm_write_msg. This function will copy the msg
+ * from the kernel buffer to FIFO. There are 4 kernel buffers from where
+ * the data is to copied to FIFO one for each of the messages ISI, RPC,
+ * AUDIO and SECURITY. ISI, RPC and SECURITY messages are pushed to FIFO
+ * in commmon channel and AUDIO message is pushed onto audio channel FIFO.
+ */
+int shm_write_msg_to_fifo(struct shrm_dev *shrm, u8 channel,
+ u8 l2header, void *addr, u32 length)
+{
+ struct fifo_write_params *fifo = NULL;
+ u32 l1_header = 0, l2_header = 0;
+ u32 requiredsize;
+ u32 size = 0;
+ u32 *msg;
+ u8 *src;
+
+ if (channel == COMMON_CHANNEL)
+ fifo = &ape_shm_fifo_0;
+ else if (channel == AUDIO_CHANNEL)
+ fifo = &ape_shm_fifo_1;
+ else {
+ dev_err(shrm->dev, "invalid channel\n");
+ return -EINVAL;
+ }
+
+ /* L2 size in 32b */
+ requiredsize = ((length + 3) / 4);
+ /* Add size of L1 & L2 header */
+ requiredsize += 2;
+
+ /* if availablesize = or < requiredsize then error */
+ if (fifo->availablesize <= requiredsize) {
+ /* Fatal ERROR - should never happens */
+ dev_dbg(shrm->dev, "wr_wptr= %x\n",
+ fifo->writer_local_wptr);
+ dev_dbg(shrm->dev, "wr_rptr= %x\n",
+ fifo->writer_local_rptr);
+ dev_dbg(shrm->dev, "shared_wptr= %x\n",
+ fifo->shared_wptr);
+ dev_dbg(shrm->dev, "shared_rptr= %x\n",
+ fifo->shared_rptr);
+ dev_dbg(shrm->dev, "availsize= %x\n",
+ fifo->availablesize);
+ dev_dbg(shrm->dev, "end__fifo= %x\n",
+ fifo->end_addr_fifo);
+ dev_warn(shrm->dev, "Modem is busy, please wait."
+ " c_cnt = %d; a_cnt = %d\n", msg_common_counter,
+ msg_audio_counter);
+ if (channel == COMMON_CHANNEL) {
+ dev_warn(shrm->dev,
+ "Modem is lagging behind in reading."
+ "Stopping n/w dev queue\n");
+ shrm_stop_netdev(shrm->ndev);
+ }
+
+ return -EAGAIN;
+ }
+
+ if (channel == COMMON_CHANNEL) {
+ /* build L1 header */
+ l1_header = ((L1_NORMAL_MSG << L1_MSG_MAPID_OFFSET) |
+ (((msg_common_counter++) << COUNTER_OFFSET)
+ & MASK_40_55_BIT) |
+ ((length + L2_HEADER_SIZE) & MASK_0_39_BIT));
+ } else if (channel == AUDIO_CHANNEL) {
+ /* build L1 header */
+ l1_header = ((L1_NORMAL_MSG << L1_MSG_MAPID_OFFSET) |
+ (((msg_audio_counter++) << COUNTER_OFFSET)
+ & MASK_40_55_BIT) |
+ ((length + L2_HEADER_SIZE) & MASK_0_39_BIT));
+ }
+
+ /*
+ * Need to take care race condition for fifo->availablesize
+ * & fifo->writer_local_rptr with Ac_Read_notification interrupt.
+ * One option could be use stack variable for LocalRptr and recompute
+ * fifo->availablesize,based on flag enabled in the
+ * Ac_read_notification
+ */
+ l2_header = ((l2header << L2_HEADER_OFFSET) |
+ ((length) & MASK_0_39_BIT));
+ spin_lock_bh(&fifo->fifo_update_lock);
+ /* Check Local Rptr is less than or equal to Local WPtr */
+ if (fifo->writer_local_rptr <= fifo->writer_local_wptr) {
+ msg = (u32 *)
+ (fifo->fifo_virtual_addr+fifo->writer_local_wptr);
+
+ /* check enough place bewteen writer_local_wptr & end of FIFO */
+ if ((fifo->end_addr_fifo-fifo->writer_local_wptr) >=
+ requiredsize) {
+ /* Add L1 header and L2 header */
+ *msg = l1_header;
+ msg++;
+ *msg = l2_header;
+ msg++;
+
+ /* copy the l2 message in 1 memcpy */
+ memcpy((void *)msg, addr, length);
+ /* UpdateWptr */
+ fifo->writer_local_wptr += requiredsize;
+ fifo->availablesize -= requiredsize;
+ fifo->writer_local_wptr %= fifo->end_addr_fifo;
+ } else {
+ /*
+ * message is split between and of FIFO and beg of FIFO
+ * copy first part from writer_local_wptr to end of FIFO
+ */
+ size = fifo->end_addr_fifo-fifo->writer_local_wptr;
+
+ if (size == 1) {
+ /* Add L1 header */
+ *msg = l1_header;
+ msg++;
+ /* UpdateWptr */
+ fifo->writer_local_wptr = 0;
+ fifo->availablesize -= size;
+ /*
+ * copy second part from beg of FIFO
+ * with remaining part of msg
+ */
+ msg = (u32 *)
+ fifo->fifo_virtual_addr;
+ *msg = l2_header;
+ msg++;
+
+ /* copy the l3 message in 1 memcpy */
+ memcpy((void *)msg, addr, length);
+ /* UpdateWptr */
+ fifo->writer_local_wptr +=
+ requiredsize-size;
+ fifo->availablesize -=
+ (requiredsize-size);
+ } else if (size == 2) {
+ /* Add L1 header and L2 header */
+ *msg = l1_header;
+ msg++;
+ *msg = l2_header;
+ msg++;
+
+ /* UpdateWptr */
+ fifo->writer_local_wptr = 0;
+ fifo->availablesize -= size;
+
+ /*
+ * copy second part from beg of FIFO
+ * with remaining part of msg
+ */
+ msg = (u32 *)
+ fifo->fifo_virtual_addr;
+ /* copy the l3 message in 1 memcpy */
+ memcpy((void *)msg, addr, length);
+
+ /* UpdateWptr */
+ fifo->writer_local_wptr +=
+ requiredsize-size;
+ fifo->availablesize -=
+ (requiredsize-size);
+ } else {
+ /* Add L1 header and L2 header */
+ *msg = l1_header;
+ msg++;
+ *msg = l2_header;
+ msg++;
+
+ /* copy the l2 message in 1 memcpy */
+ memcpy((void *)msg, addr, (size-2)*4);
+
+
+ /* UpdateWptr */
+ fifo->writer_local_wptr = 0;
+ fifo->availablesize -= size;
+
+ /*
+ * copy second part from beg of FIFO
+ * with remaining part of msg
+ */
+ msg = (u32 *)fifo->fifo_virtual_addr;
+ src = (u8 *)addr+((size - 2) * 4);
+ memcpy((void *)msg, src,
+ (length-((size - 2) * 4)));
+
+ /* UpdateWptr */
+ fifo->writer_local_wptr +=
+ requiredsize-size;
+ fifo->availablesize -=
+ (requiredsize-size);
+ }
+
+ }
+ } else {
+ /* writer_local_rptr > writer_local_wptr */
+ msg = (u32 *)
+ (fifo->fifo_virtual_addr+fifo->writer_local_wptr);
+ /* Add L1 header and L2 header */
+ *msg = l1_header;
+ msg++;
+ *msg = l2_header;
+ msg++;
+ /*
+ * copy message possbile between writer_local_wptr up
+ * to writer_local_rptr copy the l3 message in 1 memcpy
+ */
+ memcpy((void *)msg, addr, length);
+
+ /* UpdateWptr */
+ fifo->writer_local_wptr += requiredsize;
+ fifo->availablesize -= requiredsize;
+
+ }
+ spin_unlock_bh(&fifo->fifo_update_lock);
+ return length;
+}
+
+/**
+ * read_one_l2msg_common() - read message from common channel
+ * @shrm: pointer to shrm device information structure
+ * @l2_msg: pointer to the read L2 message buffer
+ * @len: message length
+ *
+ * This function read one message from the FIFO and returns l2 header type
+ */
+u8 read_one_l2msg_common(struct shrm_dev *shrm,
+ u8 *l2_msg, u32 *len)
+{
+ struct fifo_read_params *fifo = &cmt_shm_fifo_0;
+
+ u32 *msg;
+ u32 l1_header = 0;
+ u32 l2_header = 0;
+ u32 length;
+ u8 msgtype;
+ u32 msg_size;
+ u32 size = 0;
+
+ /* Read L1 header read content of reader_local_rptr */
+ msg = (u32 *)
+ (fifo->reader_local_rptr+fifo->fifo_virtual_addr);
+ l1_header = *msg++;
+ msgtype = (l1_header & 0xF0000000) >> L1_HEADER_MASK;
+
+ if (msgtype != L1_NORMAL_MSG) {
+ /* Fatal ERROR - should never happens */
+ dev_info(shrm->dev, "wr_wptr= %x\n",
+ fifo->reader_local_wptr);
+ dev_info(shrm->dev, "wr_rptr= %x\n",
+ fifo->reader_local_rptr);
+ dev_info(shrm->dev, "shared_wptr= %x\n",
+ fifo->shared_wptr);
+ dev_info(shrm->dev, "shared_rptr= %x\n",
+ fifo->shared_rptr);
+ dev_info(shrm->dev, "availsize= %x\n",
+ fifo->availablesize);
+ dev_info(shrm->dev, "end_fifo= %x\n",
+ fifo->end_addr_fifo);
+ /* Fatal ERROR - should never happens */
+ dev_crit(shrm->dev, "Fatal ERROR - should never happen\n");
+ dev_info(shrm->dev, "Initiating a modem reset\n");
+ queue_kthread_work(&shrm->shm_ac_wake_kw,
+ &shrm->shm_mod_reset_req);
+ }
+ if (fifo->reader_local_rptr == (fifo->end_addr_fifo-1)) {
+ l2_header = (*((u32 *)fifo->fifo_virtual_addr));
+ length = l2_header & MASK_0_39_BIT;
+ } else {
+ /* Read L2 header,Msg size & content of reader_local_rptr */
+ l2_header = *msg;
+ length = l2_header & MASK_0_39_BIT;
+ }
+
+ *len = length;
+ msg_size = ((length + 3) / 4);
+ msg_size += 2;
+
+ if (fifo->reader_local_rptr + msg_size <=
+ fifo->end_addr_fifo) {
+ /* Skip L2 header */
+ msg++;
+
+ /* read msg between reader_local_rptr and end of FIFO */
+ memcpy((void *)l2_msg, (void *)msg, length);
+ /* UpdateLocalRptr */
+ fifo->reader_local_rptr += msg_size;
+ fifo->reader_local_rptr %= fifo->end_addr_fifo;
+ } else {
+ /*
+ * msg split between end of FIFO and beg copy first
+ * part of msg read msg between reader_local_rptr
+ * and end of FIFO
+ */
+ size = fifo->end_addr_fifo-fifo->reader_local_rptr;
+ if (size == 1) {
+ msg = (u32 *)(fifo->fifo_virtual_addr);
+ /* Skip L2 header */
+ msg++;
+ memcpy((void *)l2_msg, (void *)(msg), length);
+ } else if (size == 2) {
+ /* Skip L2 header */
+ msg++;
+ msg = (u32 *)(fifo->fifo_virtual_addr);
+ memcpy((void *)l2_msg,
+ (void *)(msg), length);
+ } else {
+ /* Skip L2 header */
+ msg++;
+ memcpy((void *)l2_msg, (void *)msg, ((size - 2) * 4));
+ /* copy second part of msg */
+ l2_msg += ((size - 2) * 4);
+ msg = (u32 *)(fifo->fifo_virtual_addr);
+ memcpy((void *)l2_msg, (void *)(msg),
+ (length-((size - 2) * 4)));
+ }
+ fifo->reader_local_rptr =
+ (fifo->reader_local_rptr+msg_size) %
+ fifo->end_addr_fifo;
+ }
+ return (l2_header>>L2_HEADER_OFFSET) & MASK_0_15_BIT;
+ }
+
+u8 read_remaining_messages_common()
+{
+ struct fifo_read_params *fifo = &cmt_shm_fifo_0;
+ /*
+ * There won't be any Race condition reader_local_rptr &
+ * fifo->reader_local_wptr with CaMsgpending Notification Interrupt
+ */
+ return ((fifo->reader_local_rptr != fifo->reader_local_wptr) ? 1 : 0);
+}
+
+u8 read_one_l2msg_audio(struct shrm_dev *shrm,
+ u8 *l2_msg, u32 *len)
+{
+ struct fifo_read_params *fifo = &cmt_shm_fifo_1;
+
+ u32 *msg;
+ u32 l1_header = 0;
+ u32 l2_header = 0;
+ u32 length;
+ u8 msgtype;
+ u32 msg_size;
+ u32 size = 0;
+
+ /* Read L1 header read content of reader_local_rptr */
+ msg = (u32 *)
+ (fifo->reader_local_rptr+fifo->fifo_virtual_addr);
+ l1_header = *msg++;
+ msgtype = (l1_header & 0xF0000000) >> L1_HEADER_MASK;
+
+ if (msgtype != L1_NORMAL_MSG) {
+ /* Fatal ERROR - should never happens */
+ dev_info(shrm->dev, "wr_local_wptr= %x\n",
+ fifo->reader_local_wptr);
+ dev_info(shrm->dev, "wr_local_rptr= %x\n",
+ fifo->reader_local_rptr);
+ dev_info(shrm->dev, "shared_wptr= %x\n",
+ fifo->shared_wptr);
+ dev_info(shrm->dev, "shared_rptr= %x\n",
+ fifo->shared_rptr);
+ dev_info(shrm->dev, "availsize=%x\n",
+ fifo->availablesize);
+ dev_info(shrm->dev, "end_fifo= %x\n",
+ fifo->end_addr_fifo);
+ dev_info(shrm->dev, "Received msgtype is %d\n", msgtype);
+ /* Fatal ERROR - should never happens */
+ dev_crit(shrm->dev, "Fatal ERROR - should never happen\n");
+ dev_info(shrm->dev, "Initiating a modem reset\n");
+ queue_kthread_work(&shrm->shm_ac_wake_kw,
+ &shrm->shm_mod_reset_req);
+ }
+ if (fifo->reader_local_rptr == (fifo->end_addr_fifo-1)) {
+ l2_header = (*((u32 *)fifo->fifo_virtual_addr));
+ length = l2_header & MASK_0_39_BIT;
+ } else {
+ /* Read L2 header,Msg size & content of reader_local_rptr */
+ l2_header = *msg;
+ length = l2_header & MASK_0_39_BIT;
+ }
+
+ *len = length;
+ msg_size = ((length + 3) / 4);
+ msg_size += 2;
+
+ if (fifo->reader_local_rptr + msg_size <=
+ fifo->end_addr_fifo) {
+ /* Skip L2 header */
+ msg++;
+ /* read msg between reader_local_rptr and end of FIFO */
+ memcpy((void *)l2_msg, (void *)msg, length);
+ /* UpdateLocalRptr */
+ fifo->reader_local_rptr += msg_size;
+ fifo->reader_local_rptr %= fifo->end_addr_fifo;
+ } else {
+
+ /*
+ * msg split between end of FIFO and beg
+ * copy first part of msg
+ * read msg between reader_local_rptr and end of FIFO
+ */
+ size = fifo->end_addr_fifo-fifo->reader_local_rptr;
+ if (size == 1) {
+ msg = (u32 *)(fifo->fifo_virtual_addr);
+ /* Skip L2 header */
+ msg++;
+ memcpy((void *)l2_msg, (void *)(msg), length);
+ } else if (size == 2) {
+ /* Skip L2 header */
+ msg++;
+ msg = (u32 *)(fifo->fifo_virtual_addr);
+ memcpy((void *)l2_msg, (void *)(msg), length);
+ } else {
+ /* Skip L2 header */
+ msg++;
+ memcpy((void *)l2_msg, (void *)msg, ((size - 2) * 4));
+ /* copy second part of msg */
+ l2_msg += ((size - 2) * 4);
+ msg = (u32 *)(fifo->fifo_virtual_addr);
+ memcpy((void *)l2_msg, (void *)(msg),
+ (length-((size - 2) * 4)));
+ }
+ fifo->reader_local_rptr =
+ (fifo->reader_local_rptr+msg_size) %
+ fifo->end_addr_fifo;
+
+ }
+ return (l2_header>>L2_HEADER_OFFSET) & MASK_0_15_BIT;
+ }
+
+u8 read_remaining_messages_audio()
+{
+ struct fifo_read_params *fifo = &cmt_shm_fifo_1;
+
+ return ((fifo->reader_local_rptr != fifo->reader_local_wptr) ?
+ 1 : 0);
+}
+
+u8 is_the_only_one_unread_message(struct shrm_dev *shrm,
+ u8 channel, u32 length)
+{
+ struct fifo_write_params *fifo = NULL;
+ u32 messagesize = 0;
+ u8 is_only_one_unread_msg = 0;
+
+ if (channel == COMMON_CHANNEL)
+ fifo = &ape_shm_fifo_0;
+ else /* channel = AUDIO_CHANNEL */
+ fifo = &ape_shm_fifo_1;
+
+ /* L3 size in 32b */
+ messagesize = ((length + 3) / 4);
+ /* Add size of L1 & L2 header */
+ messagesize += 2;
+ /*
+ * possibility of race condition with Ac Read notification interrupt.
+ * need to check ?
+ */
+ if (fifo->writer_local_wptr > fifo->writer_local_rptr)
+ is_only_one_unread_msg =
+ ((fifo->writer_local_rptr + messagesize) ==
+ fifo->writer_local_wptr) ? 1 : 0;
+ else
+ /* Msg split between end of fifo and starting of Fifo */
+ is_only_one_unread_msg =
+ (((fifo->writer_local_rptr + messagesize) %
+ fifo->end_addr_fifo) == fifo->writer_local_wptr) ?
+ 1 : 0;
+
+ return is_only_one_unread_msg;
+}
+
+void update_ca_common_local_wptr(struct shrm_dev *shrm)
+{
+ /*
+ * update CA common reader local write pointer with the
+ * shared write pointer
+ */
+ struct fifo_read_params *fifo = &cmt_shm_fifo_0;
+
+ fifo->shared_wptr =
+ (*((u32 *)shrm->ca_common_shared_wptr));
+ fifo->reader_local_wptr = fifo->shared_wptr;
+}
+
+void update_ca_audio_local_wptr(struct shrm_dev *shrm)
+{
+ /*
+ * update CA audio reader local write pointer with the
+ * shared write pointer
+ */
+ struct fifo_read_params *fifo = &cmt_shm_fifo_1;
+
+ fifo->shared_wptr =
+ (*((u32 *)shrm->ca_audio_shared_wptr));
+ fifo->reader_local_wptr = fifo->shared_wptr;
+}
+
+void update_ac_common_local_rptr(struct shrm_dev *shrm)
+{
+ /*
+ * update AC common writer local read pointer with the
+ * shared read pointer
+ */
+ struct fifo_write_params *fifo;
+ u32 free_space = 0;
+
+ fifo = &ape_shm_fifo_0;
+
+ spin_lock_bh(&fifo->fifo_update_lock);
+ fifo->shared_rptr =
+ (*((u32 *)shrm->ac_common_shared_rptr));
+
+ if (fifo->shared_rptr >= fifo->writer_local_rptr)
+ free_space =
+ (fifo->shared_rptr-fifo->writer_local_rptr);
+ else {
+ free_space =
+ (fifo->end_addr_fifo-fifo->writer_local_rptr);
+ free_space += fifo->shared_rptr;
+ }
+
+ /* Chance of race condition of below variables with write_msg */
+ fifo->availablesize += free_space;
+ fifo->writer_local_rptr = fifo->shared_rptr;
+ spin_unlock_bh(&fifo->fifo_update_lock);
+}
+
+void update_ac_audio_local_rptr(struct shrm_dev *shrm)
+{
+ /*
+ * update AC audio writer local read pointer with the
+ * shared read pointer
+ */
+ struct fifo_write_params *fifo;
+ u32 free_space = 0;
+
+ fifo = &ape_shm_fifo_1;
+ spin_lock_bh(&fifo->fifo_update_lock);
+ fifo->shared_rptr =
+ (*((u32 *)shrm->ac_audio_shared_rptr));
+
+ if (fifo->shared_rptr >= fifo->writer_local_rptr)
+ free_space =
+ (fifo->shared_rptr-fifo->writer_local_rptr);
+ else {
+ free_space =
+ (fifo->end_addr_fifo-fifo->writer_local_rptr);
+ free_space += fifo->shared_rptr;
+ }
+
+ /* Chance of race condition of below variables with write_msg */
+ fifo->availablesize += free_space;
+ fifo->writer_local_rptr = fifo->shared_rptr;
+ spin_unlock_bh(&fifo->fifo_update_lock);
+}
+
+void update_ac_common_shared_wptr(struct shrm_dev *shrm)
+{
+ /*
+ * update AC common shared write pointer with the
+ * local write pointer
+ */
+ struct fifo_write_params *fifo;
+
+ fifo = &ape_shm_fifo_0;
+ spin_lock_bh(&fifo->fifo_update_lock);
+ /* Update shared pointer fifo offset of the IPC zone */
+ (*((u32 *)shrm->ac_common_shared_wptr)) =
+ fifo->writer_local_wptr;
+
+ fifo->shared_wptr = fifo->writer_local_wptr;
+ spin_unlock_bh(&fifo->fifo_update_lock);
+}
+
+void update_ac_audio_shared_wptr(struct shrm_dev *shrm)
+{
+ /*
+ * update AC audio shared write pointer with the
+ * local write pointer
+ */
+ struct fifo_write_params *fifo;
+
+ fifo = &ape_shm_fifo_1;
+ spin_lock_bh(&fifo->fifo_update_lock);
+ /* Update shared pointer fifo offset of the IPC zone */
+ (*((u32 *)shrm->ac_audio_shared_wptr)) =
+ fifo->writer_local_wptr;
+ fifo->shared_wptr = fifo->writer_local_wptr;
+ spin_unlock_bh(&fifo->fifo_update_lock);
+}
+
+void update_ca_common_shared_rptr(struct shrm_dev *shrm)
+{
+ /*
+ * update CA common shared read pointer with the
+ * local read pointer
+ */
+ struct fifo_read_params *fifo;
+
+ fifo = &cmt_shm_fifo_0;
+
+ /* Update shared pointer fifo offset of the IPC zone */
+ (*((u32 *)shrm->ca_common_shared_rptr)) =
+ fifo->reader_local_rptr;
+ fifo->shared_rptr = fifo->reader_local_rptr;
+}
+
+void update_ca_audio_shared_rptr(struct shrm_dev *shrm)
+{
+ /*
+ * update CA audio shared read pointer with the
+ * local read pointer
+ */
+ struct fifo_read_params *fifo;
+
+ fifo = &cmt_shm_fifo_1;
+
+ /* Update shared pointer fifo offset of the IPC zone */
+ (*((u32 *)shrm->ca_audio_shared_rptr)) =
+ fifo->reader_local_rptr;
+ fifo->shared_rptr = fifo->reader_local_rptr;
+}
+
+void get_reader_pointers(u8 channel_type, u32 *reader_local_rptr,
+ u32 *reader_local_wptr, u32 *shared_rptr)
+{
+ struct fifo_read_params *fifo = NULL;
+
+ if (channel_type == COMMON_CHANNEL)
+ fifo = &cmt_shm_fifo_0;
+ else /* channel_type = AUDIO_CHANNEL */
+ fifo = &cmt_shm_fifo_1;
+
+ *reader_local_rptr = fifo->reader_local_rptr;
+ *reader_local_wptr = fifo->reader_local_wptr;
+ *shared_rptr = fifo->shared_rptr;
+}
+
+void get_writer_pointers(u8 channel_type, u32 *writer_local_rptr,
+ u32 *writer_local_wptr, u32 *shared_wptr)
+{
+ struct fifo_write_params *fifo = NULL;
+
+ if (channel_type == COMMON_CHANNEL)
+ fifo = &ape_shm_fifo_0;
+ else /* channel_type = AUDIO_CHANNEL */
+ fifo = &ape_shm_fifo_1;
+
+ spin_lock_bh(&fifo->fifo_update_lock);
+ *writer_local_rptr = fifo->writer_local_rptr;
+ *writer_local_wptr = fifo->writer_local_wptr;
+ *shared_wptr = fifo->shared_wptr;
+ spin_unlock_bh(&fifo->fifo_update_lock);
+}
+
+void set_ca_msg_0_read_notif_send(u8 val)
+{
+ cmt_read_notif_0_send = val;
+}
+
+u8 get_ca_msg_0_read_notif_send(void)
+{
+ return cmt_read_notif_0_send;
+}
+
+void set_ca_msg_1_read_notif_send(u8 val)
+{
+ cmt_read_notif_1_send = val;
+}
+
+u8 get_ca_msg_1_read_notif_send(void)
+{
+ return cmt_read_notif_1_send;
+}
diff --git a/drivers/modem/shrm/shrm_protocol.c b/drivers/modem/shrm/shrm_protocol.c
new file mode 100644
index 00000000000..cbb3a820317
--- /dev/null
+++ b/drivers/modem/shrm/shrm_protocol.c
@@ -0,0 +1,1262 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/netlink.h>
+#include <linux/kthread.h>
+#include <linux/modem/shrm/shrm.h>
+#include <linux/modem/shrm/shrm_driver.h>
+#include <linux/modem/shrm/shrm_private.h>
+#include <linux/modem/shrm/shrm_net.h>
+#include <linux/modem/modem_client.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/reboot_reasons.h>
+#include <mach/suspend.h>
+
+#define L2_HEADER_ISI 0x0
+#define L2_HEADER_RPC 0x1
+#define L2_HEADER_AUDIO 0x2
+#define L2_HEADER_SECURITY 0x3
+#define L2_HEADER_COMMON_SIMPLE_LOOPBACK 0xC0
+#define L2_HEADER_COMMON_ADVANCED_LOOPBACK 0xC1
+#define L2_HEADER_AUDIO_SIMPLE_LOOPBACK 0x80
+#define L2_HEADER_AUDIO_ADVANCED_LOOPBACK 0x81
+#define L2_HEADER_CIQ 0xC3
+#define L2_HEADER_RTC_CALIBRATION 0xC8
+#define MAX_PAYLOAD 1024
+
+#define PRCM_HOSTACCESS_REQ 0x334
+
+static u8 boot_state = BOOT_INIT;
+static u8 recieve_common_msg[8*1024];
+static u8 recieve_audio_msg[8*1024];
+static received_msg_handler rx_common_handler;
+static received_msg_handler rx_audio_handler;
+static struct hrtimer timer;
+struct sock *shrm_nl_sk;
+
+static char shrm_common_tx_state = SHRM_SLEEP_STATE;
+static char shrm_common_rx_state = SHRM_SLEEP_STATE;
+static char shrm_audio_tx_state = SHRM_SLEEP_STATE;
+static char shrm_audio_rx_state = SHRM_SLEEP_STATE;
+
+static atomic_t ac_sleep_disable_count = ATOMIC_INIT(0);
+static atomic_t ac_msg_pend_1 = ATOMIC_INIT(0);
+static struct shrm_dev *shm_dev;
+
+/* Spin lock and tasklet declaration */
+DECLARE_TASKLET(shm_ca_0_tasklet, shm_ca_msgpending_0_tasklet, 0);
+DECLARE_TASKLET(shm_ca_1_tasklet, shm_ca_msgpending_1_tasklet, 0);
+DECLARE_TASKLET(shm_ac_read_0_tasklet, shm_ac_read_notif_0_tasklet, 0);
+DECLARE_TASKLET(shm_ac_read_1_tasklet, shm_ac_read_notif_1_tasklet, 0);
+
+static DEFINE_MUTEX(ac_state_mutex);
+
+static DEFINE_SPINLOCK(ca_common_lock);
+static DEFINE_SPINLOCK(ca_audio_lock);
+static DEFINE_SPINLOCK(ca_wake_req_lock);
+static DEFINE_SPINLOCK(boot_lock);
+
+enum shrm_nl {
+ SHRM_NL_MOD_RESET = 1,
+ SHRM_NL_MOD_QUERY_STATE,
+ SHRM_NL_USER_MOD_RESET,
+ SHRM_NL_STATUS_MOD_ONLINE,
+ SHRM_NL_STATUS_MOD_OFFLINE,
+};
+
+void shm_mod_reset_req_work(struct kthread_work *work)
+{
+ prcmu_modem_reset();
+}
+
+static void shm_ac_sleep_req_work(struct kthread_work *work)
+{
+ mutex_lock(&ac_state_mutex);
+ if (atomic_read(&ac_sleep_disable_count) == 0)
+ modem_release(shm_dev->modem);
+ mutex_unlock(&ac_state_mutex);
+}
+
+static void shm_ac_wake_req_work(struct kthread_work *work)
+{
+ mutex_lock(&ac_state_mutex);
+ modem_request(shm_dev->modem);
+ mutex_unlock(&ac_state_mutex);
+}
+
+static u32 get_host_accessport_val(void)
+{
+ u32 prcm_hostaccess;
+
+ prcm_hostaccess = prcmu_read(PRCM_HOSTACCESS_REQ);
+ wmb();
+ prcm_hostaccess = prcm_hostaccess & 0x01;
+
+ return prcm_hostaccess;
+}
+static enum hrtimer_restart callback(struct hrtimer *timer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ca_wake_req_lock, flags);
+ if (((shrm_common_rx_state == SHRM_IDLE) ||
+ (shrm_common_rx_state == SHRM_SLEEP_STATE))
+ && ((shrm_common_tx_state == SHRM_IDLE) ||
+ (shrm_common_tx_state == SHRM_SLEEP_STATE))
+ && ((shrm_audio_rx_state == SHRM_IDLE) ||
+ (shrm_audio_rx_state == SHRM_SLEEP_STATE))
+ && ((shrm_audio_tx_state == SHRM_IDLE) ||
+ (shrm_audio_tx_state == SHRM_SLEEP_STATE))) {
+
+ shrm_common_rx_state = SHRM_SLEEP_STATE;
+ shrm_audio_rx_state = SHRM_SLEEP_STATE;
+ shrm_common_tx_state = SHRM_SLEEP_STATE;
+ shrm_audio_tx_state = SHRM_SLEEP_STATE;
+
+ queue_kthread_work(&shm_dev->shm_ac_sleep_kw,
+ &shm_dev->shm_ac_sleep_req);
+
+ }
+ spin_unlock_irqrestore(&ca_wake_req_lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+int nl_send_multicast_message(int msg, gfp_t gfp_mask)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh = NULL;
+ int err;
+
+ /* prepare netlink message */
+ skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), gfp_mask);
+ if (!skb) {
+ dev_err(shm_dev->dev, "%s:alloc_skb failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ nlh = (struct nlmsghdr *)skb->data;
+ nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD);
+ dev_dbg(shm_dev->dev, "nlh->nlmsg_len = %d\n", nlh->nlmsg_len);
+
+ nlh->nlmsg_pid = 0; /* from kernel */
+ nlh->nlmsg_flags = 0;
+ *(int *)NLMSG_DATA(nlh) = msg;
+ skb_put(skb, MAX_PAYLOAD);
+ /* sender is in group 1<<0 */
+ NETLINK_CB(skb).pid = 0; /* from kernel */
+ /* to mcast group 1<<0 */
+ NETLINK_CB(skb).dst_group = 1;
+
+ /*multicast the message to all listening processes*/
+ err = netlink_broadcast(shrm_nl_sk, skb, 0, 1, gfp_mask);
+ dev_dbg(shm_dev->dev, "ret val from nl-multicast = %d\n", err);
+
+out:
+ return err;
+}
+
+static void nl_send_unicast_message(int dst_pid)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh = NULL;
+ int err;
+ int bt_state;
+ unsigned long flags;
+
+ dev_info(shm_dev->dev, "Sending unicast message\n");
+
+ /* prepare the NL message for unicast */
+ skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_KERNEL);
+ if (!skb) {
+ dev_err(shm_dev->dev, "%s:alloc_skb failed\n", __func__);
+ return;
+ }
+
+ nlh = (struct nlmsghdr *)skb->data;
+ nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD);
+ dev_dbg(shm_dev->dev, "nlh->nlmsg_len = %d\n", nlh->nlmsg_len);
+
+ nlh->nlmsg_pid = 0; /* from kernel */
+ nlh->nlmsg_flags = 0;
+
+ spin_lock_irqsave(&boot_lock, flags);
+ bt_state = boot_state;
+ spin_unlock_irqrestore(&boot_lock, flags);
+
+ if (bt_state == BOOT_DONE)
+ *(int *)NLMSG_DATA(nlh) = SHRM_NL_STATUS_MOD_ONLINE;
+ else
+ *(int *)NLMSG_DATA(nlh) = SHRM_NL_STATUS_MOD_OFFLINE;
+
+ skb_put(skb, MAX_PAYLOAD);
+ /* sender is in group 1<<0 */
+ NETLINK_CB(skb).pid = 0; /* from kernel */
+ NETLINK_CB(skb).dst_group = 0;
+
+ /*unicast the message to the querying processes*/
+ err = netlink_unicast(shrm_nl_sk, skb, dst_pid, MSG_DONTWAIT);
+ dev_dbg(shm_dev->dev, "ret val from nl-unicast = %d\n", err);
+}
+
+
+static int check_modem_in_reset(void)
+{
+ u8 bt_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&boot_lock, flags);
+ bt_state = boot_state;
+ spin_unlock_irqrestore(&boot_lock, flags);
+
+#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
+ if (bt_state != BOOT_UNKNOWN)
+ return 0;
+ else
+ return -ENODEV;
+#else
+ /*
+ * this check won't be applicable and won't work correctly
+ * if modem-silent-feature is not enabled
+ * so, simply return 0
+ */
+ return 0;
+#endif
+}
+
+void shm_ca_msgpending_0_tasklet(unsigned long tasklet_data)
+{
+ struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data;
+ u32 reader_local_rptr;
+ u32 reader_local_wptr;
+ u32 shared_rptr;
+ u32 config = 0, version = 0;
+ unsigned long flags;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ /* Interprocess locking */
+ spin_lock(&ca_common_lock);
+
+ /* Update_reader_local_wptr with shared_wptr */
+ update_ca_common_local_wptr(shrm);
+ get_reader_pointers(COMMON_CHANNEL, &reader_local_rptr,
+ &reader_local_wptr, &shared_rptr);
+
+ set_ca_msg_0_read_notif_send(0);
+
+ if (boot_state == BOOT_DONE) {
+ shrm_common_rx_state = SHRM_PTR_FREE;
+
+ if (reader_local_rptr != shared_rptr)
+ ca_msg_read_notification_0(shrm);
+ if (reader_local_rptr != reader_local_wptr)
+ receive_messages_common(shrm);
+ get_reader_pointers(COMMON_CHANNEL, &reader_local_rptr,
+ &reader_local_wptr, &shared_rptr);
+ if (reader_local_rptr == reader_local_wptr)
+ shrm_common_rx_state = SHRM_IDLE;
+ } else {
+ /* BOOT phase.only a BOOT_RESP should be in FIFO */
+ if (boot_state != BOOT_INFO_SYNC) {
+ if (!read_boot_info_req(shrm, &config, &version)) {
+ dev_err(shrm->dev,
+ "Unable to read boot state\n");
+ return;
+ }
+ /* SendReadNotification */
+ ca_msg_read_notification_0(shrm);
+ /*
+ * Check the version number before
+ * sending Boot info response
+ */
+
+ /* send MsgPending notification */
+ write_boot_info_resp(shrm, config, version);
+ spin_lock_irqsave(&boot_lock, flags);
+ boot_state = BOOT_INFO_SYNC;
+ spin_unlock_irqrestore(&boot_lock, flags);
+ dev_info(shrm->dev, "BOOT_INFO_SYNC\n");
+ queue_kthread_work(&shrm->shm_common_ch_wr_kw,
+ &shrm->send_ac_msg_pend_notify_0);
+ } else {
+ ca_msg_read_notification_0(shrm);
+ dev_info(shrm->dev,
+ "BOOT_INFO_SYNC\n");
+ }
+ }
+ /* Interprocess locking */
+ spin_unlock(&ca_common_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+void shm_ca_msgpending_1_tasklet(unsigned long tasklet_data)
+{
+ struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data;
+ u32 reader_local_rptr;
+ u32 reader_local_wptr;
+ u32 shared_rptr;
+
+ /*
+ * This function is called when CaMsgPendingNotification Trigerred
+ * by CMU. It means that CMU has wrote a message into Ca Audio FIFO
+ */
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown\n",
+ __func__);
+ return;
+ }
+
+ /* Interprocess locking */
+ spin_lock(&ca_audio_lock);
+
+ /* Update_reader_local_wptr(with shared_wptr) */
+ update_ca_audio_local_wptr(shrm);
+ get_reader_pointers(AUDIO_CHANNEL, &reader_local_rptr,
+ &reader_local_wptr, &shared_rptr);
+
+ set_ca_msg_1_read_notif_send(0);
+
+ if (boot_state != BOOT_DONE) {
+ dev_err(shrm->dev, "Boot Error\n");
+ return;
+ }
+ shrm_audio_rx_state = SHRM_PTR_FREE;
+ /* Check we already read the message */
+ if (reader_local_rptr != shared_rptr)
+ ca_msg_read_notification_1(shrm);
+ if (reader_local_rptr != reader_local_wptr)
+ receive_messages_audio(shrm);
+
+ get_reader_pointers(AUDIO_CHANNEL, &reader_local_rptr,
+ &reader_local_wptr, &shared_rptr);
+ if (reader_local_rptr == reader_local_wptr)
+ shrm_audio_rx_state = SHRM_IDLE;
+
+ /* Interprocess locking */
+ spin_unlock(&ca_audio_lock);
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+void shm_ac_read_notif_0_tasklet(unsigned long tasklet_data)
+{
+ struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data;
+ u32 writer_local_rptr;
+ u32 writer_local_wptr;
+ u32 shared_wptr;
+ unsigned long flags;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ /* Update writer_local_rptrwith shared_rptr */
+ update_ac_common_local_rptr(shrm);
+ get_writer_pointers(COMMON_CHANNEL, &writer_local_rptr,
+ &writer_local_wptr, &shared_wptr);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown\n",
+ __func__);
+ return;
+ }
+
+ if (boot_state == BOOT_INFO_SYNC) {
+ /* BOOT_RESP sent by APE has been received by CMT */
+ spin_lock_irqsave(&boot_lock, flags);
+ boot_state = BOOT_DONE;
+ spin_unlock_irqrestore(&boot_lock, flags);
+ dev_info(shrm->dev, "IPC_ISA BOOT_DONE\n");
+
+ if (shrm->msr_flag) {
+ shrm_start_netdev(shrm->ndev);
+ shrm->msr_flag = 0;
+
+ /* multicast that modem is online */
+ nl_send_multicast_message(SHRM_NL_STATUS_MOD_ONLINE,
+ GFP_ATOMIC);
+ }
+
+ } else if (boot_state == BOOT_DONE) {
+ if (writer_local_rptr != writer_local_wptr) {
+ shrm_common_tx_state = SHRM_PTR_FREE;
+ queue_kthread_work(&shrm->shm_common_ch_wr_kw,
+ &shrm->send_ac_msg_pend_notify_0);
+ } else {
+ shrm_common_tx_state = SHRM_IDLE;
+ shrm_restart_netdev(shrm->ndev);
+ }
+ } else {
+ dev_err(shrm->dev, "Invalid boot state\n");
+ }
+ /* start timer here */
+ hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+ atomic_dec(&ac_sleep_disable_count);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+void shm_ac_read_notif_1_tasklet(unsigned long tasklet_data)
+{
+ struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data;
+ u32 writer_local_rptr;
+ u32 writer_local_wptr;
+ u32 shared_wptr;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown\n",
+ __func__);
+ return;
+ }
+
+ /* Update writer_local_rptr(with shared_rptr) */
+ update_ac_audio_local_rptr(shrm);
+ get_writer_pointers(AUDIO_CHANNEL, &writer_local_rptr,
+ &writer_local_wptr, &shared_wptr);
+ if (boot_state != BOOT_DONE) {
+ dev_err(shrm->dev, "Error Case in boot state\n");
+ return;
+ }
+ if (writer_local_rptr != writer_local_wptr) {
+ shrm_audio_tx_state = SHRM_PTR_FREE;
+ queue_kthread_work(&shrm->shm_audio_ch_wr_kw,
+ &shrm->send_ac_msg_pend_notify_1);
+ } else {
+ shrm_audio_tx_state = SHRM_IDLE;
+ }
+ /* start timer here */
+ hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+ atomic_dec(&ac_sleep_disable_count);
+ atomic_dec(&ac_msg_pend_1);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+void shm_ca_sleep_req_work(struct kthread_work *work)
+{
+ dev_dbg(shm_dev->dev, "%s:IRQ_PRCMU_CA_SLEEP\n", __func__);
+
+ shrm_common_rx_state = SHRM_IDLE;
+ shrm_audio_rx_state = SHRM_IDLE;
+
+ if (check_modem_in_reset()) {
+ dev_err(shm_dev->dev, "%s:Modem state reset or unknown\n",
+ __func__);
+ return;
+ }
+
+ writel((1<<GOP_CA_WAKE_ACK_BIT),
+ shm_dev->intr_base + GOP_SET_REGISTER_BASE);
+
+ hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+ suspend_unblock_sleep();
+ atomic_dec(&ac_sleep_disable_count);
+}
+
+void shm_ca_wake_req_work(struct kthread_work *work)
+{
+ struct shrm_dev *shrm = container_of(work,
+ struct shrm_dev, shm_ca_wake_req);
+
+ /* initialize the FIFO Variables */
+ if (boot_state == BOOT_INIT)
+ shm_fifo_init(shrm);
+
+ mutex_lock(&ac_state_mutex);
+ modem_request(shrm->modem);
+ mutex_unlock(&ac_state_mutex);
+
+ /* send ca_wake_ack_interrupt to CMU */
+ if (!get_host_accessport_val()) {
+ dev_crit(shrm->dev, "get_host_accessport failed\n");
+ dev_info(shrm->dev, "Initiating a modem reset\n");
+ prcmu_modem_reset();
+ }
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown\n",
+ __func__);
+ return;
+ }
+
+ writel((1<<GOP_CA_WAKE_ACK_BIT),
+ shm_dev->intr_base + GOP_SET_REGISTER_BASE);
+}
+#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
+static int shrm_modem_reset_sequence(void)
+{
+ int err;
+ unsigned long flags;
+
+ hrtimer_cancel(&timer);
+
+ /*
+ * keep the count to 0 so that we can bring down the line
+ * for normal ac-wake and ac-sleep logic
+ */
+ atomic_set(&ac_sleep_disable_count, 0);
+ atomic_set(&ac_msg_pend_1, 0);
+
+ /* workaround for MSR */
+ queue_kthread_work(&shm_dev->shm_ac_wake_kw,
+ &shm_dev->shm_ac_wake_req);
+
+ /* reset char device queues */
+ shrm_char_reset_queues(shm_dev);
+
+ /* reset protocol states */
+ shrm_common_tx_state = SHRM_SLEEP_STATE;
+ shrm_common_rx_state = SHRM_SLEEP_STATE;
+ shrm_audio_tx_state = SHRM_SLEEP_STATE;
+ shrm_audio_rx_state = SHRM_SLEEP_STATE;
+
+ /* set the msr flag */
+ shm_dev->msr_flag = 1;
+
+ /* multicast that modem is going to reset */
+ err = nl_send_multicast_message(SHRM_NL_MOD_RESET, GFP_ATOMIC);
+
+ /* reset the boot state */
+ spin_lock_irqsave(&boot_lock, flags);
+ boot_state = BOOT_INIT;
+ spin_unlock_irqrestore(&boot_lock, flags);
+
+ /* re-enable irqs */
+ enable_irq(shm_dev->ac_read_notif_0_irq);
+ enable_irq(shm_dev->ac_read_notif_1_irq);
+ enable_irq(shm_dev->ca_msg_pending_notif_0_irq);
+ enable_irq(shm_dev->ca_msg_pending_notif_1_irq);
+ enable_irq(IRQ_PRCMU_CA_WAKE);
+ enable_irq(IRQ_PRCMU_CA_SLEEP);
+
+ return err;
+}
+#endif
+
+static void shrm_modem_reset_callback(unsigned long irq)
+{
+ dev_err(shm_dev->dev, "Received mod_reset_req interrupt\n");
+
+#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
+ {
+ int err;
+ dev_info(shm_dev->dev, "Initiating Modem silent reset\n");
+
+ err = shrm_modem_reset_sequence();
+ if (err)
+ dev_err(shm_dev->dev,
+ "Failed multicast of modem reset\n");
+ }
+#else
+ dev_info(shm_dev->dev, "Modem in reset loop, doing System reset\n");
+
+ /* Call the PRCMU reset API */
+ prcmu_system_reset(SW_RESET_NO_ARGUMENT);
+#endif
+}
+
+DECLARE_TASKLET(shrm_sw_reset_callback, shrm_modem_reset_callback,
+ IRQ_PRCMU_MODEM_SW_RESET_REQ);
+
+static irqreturn_t shrm_prcmu_irq_handler(int irq, void *data)
+{
+ struct shrm_dev *shrm = data;
+ unsigned long flags;
+
+ switch (irq) {
+ case IRQ_PRCMU_CA_WAKE:
+ suspend_block_sleep();
+ if (shrm->msr_flag)
+ atomic_set(&ac_sleep_disable_count, 0);
+ atomic_inc(&ac_sleep_disable_count);
+ queue_kthread_work(&shrm->shm_ca_wake_kw, &shrm->shm_ca_wake_req);
+ break;
+ case IRQ_PRCMU_CA_SLEEP:
+ queue_kthread_work(&shrm->shm_ca_wake_kw, &shrm->shm_ca_sleep_req);
+ break;
+ case IRQ_PRCMU_MODEM_SW_RESET_REQ:
+ /* update the boot_state */
+ spin_lock_irqsave(&boot_lock, flags);
+ boot_state = BOOT_UNKNOWN;
+
+ /*
+ * put a barrier over here to make sure boot_state is updated
+ * else, it is seen that some of already executing modem
+ * irqs or tasklets fail the protocol checks and will ultimately
+ * try to acces the modem causing system to hang.
+ * This is particularly seen with user-space initiated modem reset
+ */
+ wmb();
+ spin_unlock_irqrestore(&boot_lock, flags);
+
+ disable_irq_nosync(shrm->ac_read_notif_0_irq);
+ disable_irq_nosync(shrm->ac_read_notif_1_irq);
+ disable_irq_nosync(shrm->ca_msg_pending_notif_0_irq);
+ disable_irq_nosync(shrm->ca_msg_pending_notif_1_irq);
+ disable_irq_nosync(IRQ_PRCMU_CA_WAKE);
+ disable_irq_nosync(IRQ_PRCMU_CA_SLEEP);
+
+ /* stop network queue */
+ shrm_stop_netdev(shm_dev->ndev);
+
+ tasklet_schedule(&shrm_sw_reset_callback);
+ break;
+ default:
+ dev_err(shrm->dev, "%s: => IRQ %d\n", __func__, irq);
+ return IRQ_NONE;
+ }
+ return IRQ_HANDLED;
+}
+
+static void send_ac_msg_pend_notify_0_work(struct kthread_work *work)
+{
+ struct shrm_dev *shrm = container_of(work, struct shrm_dev,
+ send_ac_msg_pend_notify_0);
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ update_ac_common_shared_wptr(shrm);
+
+ mutex_lock(&ac_state_mutex);
+ atomic_inc(&ac_sleep_disable_count);
+ modem_request(shrm->modem);
+ mutex_unlock(&ac_state_mutex);
+
+ if (!get_host_accessport_val()) {
+ dev_crit(shrm->dev, "get_host_accessport failed\n");
+ dev_info(shrm->dev, "Initiating a modem reset\n");
+ prcmu_modem_reset();
+ }
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ /* Trigger AcMsgPendingNotification to CMU */
+ writel((1<<GOP_COMMON_AC_MSG_PENDING_NOTIFICATION_BIT),
+ shrm->intr_base + GOP_SET_REGISTER_BASE);
+
+ if (shrm_common_tx_state == SHRM_PTR_FREE)
+ shrm_common_tx_state = SHRM_PTR_BUSY;
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+static void send_ac_msg_pend_notify_1_work(struct kthread_work *work)
+{
+ struct shrm_dev *shrm = container_of(work, struct shrm_dev,
+ send_ac_msg_pend_notify_1);
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ /* Update shared_wptr with writer_local_wptr) */
+ update_ac_audio_shared_wptr(shrm);
+
+ mutex_lock(&ac_state_mutex);
+ if (!atomic_read(&ac_msg_pend_1)) {
+ atomic_inc(&ac_sleep_disable_count);
+ atomic_inc(&ac_msg_pend_1);
+ }
+ modem_request(shrm->modem);
+ mutex_unlock(&ac_state_mutex);
+
+ if (!get_host_accessport_val()) {
+ dev_crit(shrm->dev, "get_host_accessport failed\n");
+ dev_info(shrm->dev, "Initiating a modem reset\n");
+ prcmu_modem_reset();
+ }
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ /* Trigger AcMsgPendingNotification to CMU */
+ writel((1<<GOP_AUDIO_AC_MSG_PENDING_NOTIFICATION_BIT),
+ shrm->intr_base + GOP_SET_REGISTER_BASE);
+
+ if (shrm_audio_tx_state == SHRM_PTR_FREE)
+ shrm_audio_tx_state = SHRM_PTR_BUSY;
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+void shm_nl_receive(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = NULL;
+ int msg;
+
+ dev_dbg(shm_dev->dev, "Received NL msg from user-space\n");
+
+ nlh = (struct nlmsghdr *)skb->data;
+ msg = *((int *)(NLMSG_DATA(nlh)));
+ switch (msg) {
+ case SHRM_NL_MOD_QUERY_STATE:
+ dev_info(shm_dev->dev, "mod-query-state from user-space\n");
+ nl_send_unicast_message(nlh->nlmsg_pid);
+ break;
+
+ case SHRM_NL_USER_MOD_RESET:
+ dev_info(shm_dev->dev, "user-space inited mod-reset-req\n");
+ dev_info(shm_dev->dev, "PCRMU resets modem\n");
+ prcmu_modem_reset();
+ break;
+
+ default:
+ dev_err(shm_dev->dev, "Invalid NL msg from user-space\n");
+ break;
+ };
+}
+
+int shrm_protocol_init(struct shrm_dev *shrm,
+ received_msg_handler common_rx_handler,
+ received_msg_handler audio_rx_handler)
+{
+ int err;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ shm_dev = shrm;
+ boot_state = BOOT_INIT;
+ dev_info(shrm->dev, "IPC_ISA BOOT_INIT\n");
+ rx_common_handler = common_rx_handler;
+ rx_audio_handler = audio_rx_handler;
+ atomic_set(&ac_sleep_disable_count, 0);
+
+ hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer.function = callback;
+
+ init_kthread_worker(&shrm->shm_common_ch_wr_kw);
+ shrm->shm_common_ch_wr_kw_task = kthread_run(kthread_worker_fn,
+ &shrm->shm_common_ch_wr_kw,
+ "shm_common_channel_irq");
+ if (IS_ERR(shrm->shm_common_ch_wr_kw_task)) {
+ dev_err(shrm->dev, "failed to create work task\n");
+ return -ENOMEM;
+ }
+
+ init_kthread_worker(&shrm->shm_audio_ch_wr_kw);
+ shrm->shm_audio_ch_wr_kw_task = kthread_run(kthread_worker_fn,
+ &shrm->shm_audio_ch_wr_kw,
+ "shm_audio_channel_irq");
+ if (IS_ERR(shrm->shm_audio_ch_wr_kw_task)) {
+ dev_err(shrm->dev, "failed to create work task\n");
+ err = -ENOMEM;
+ goto free_kw1;
+ }
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(shrm->shm_audio_ch_wr_kw_task, SCHED_FIFO, &param);
+
+ init_kthread_worker(&shrm->shm_ac_wake_kw);
+ shrm->shm_ac_wake_kw_task = kthread_run(kthread_worker_fn,
+ &shrm->shm_ac_wake_kw,
+ "shm_ac_wake_req");
+ if (IS_ERR(shrm->shm_ac_wake_kw_task)) {
+ dev_err(shrm->dev, "failed to create work task\n");
+ err = -ENOMEM;
+ goto free_kw2;
+ }
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(shrm->shm_ac_wake_kw_task, SCHED_FIFO, &param);
+
+ init_kthread_worker(&shrm->shm_ca_wake_kw);
+ shrm->shm_ca_wake_kw_task = kthread_run(kthread_worker_fn,
+ &shrm->shm_ca_wake_kw,
+ "shm_ca_wake_req");
+ if (IS_ERR(shrm->shm_ca_wake_kw_task)) {
+ dev_err(shrm->dev, "failed to create work task\n");
+ err = -ENOMEM;
+ goto free_kw3;
+ }
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(shrm->shm_ca_wake_kw_task, SCHED_FIFO, &param);
+
+ init_kthread_worker(&shrm->shm_ac_sleep_kw);
+ shrm->shm_ac_sleep_kw_task = kthread_run(kthread_worker_fn,
+ &shrm->shm_ac_sleep_kw,
+ "shm_ac_sleep_req");
+ if (IS_ERR(shrm->shm_ac_sleep_kw_task)) {
+ dev_err(shrm->dev, "failed to create work task\n");
+ err = -ENOMEM;
+ goto free_kw4;
+ }
+
+ init_kthread_work(&shrm->send_ac_msg_pend_notify_0,
+ send_ac_msg_pend_notify_0_work);
+ init_kthread_work(&shrm->send_ac_msg_pend_notify_1,
+ send_ac_msg_pend_notify_1_work);
+ init_kthread_work(&shrm->shm_ca_wake_req, shm_ca_wake_req_work);
+ init_kthread_work(&shrm->shm_ca_sleep_req, shm_ca_sleep_req_work);
+ init_kthread_work(&shrm->shm_ac_sleep_req, shm_ac_sleep_req_work);
+ init_kthread_work(&shrm->shm_ac_wake_req, shm_ac_wake_req_work);
+ init_kthread_work(&shrm->shm_mod_reset_req, shm_mod_reset_req_work);
+
+ /* set tasklet data */
+ shm_ca_0_tasklet.data = (unsigned long)shrm;
+ shm_ca_1_tasklet.data = (unsigned long)shrm;
+
+ err = request_irq(IRQ_PRCMU_CA_SLEEP, shrm_prcmu_irq_handler,
+ IRQF_NO_SUSPEND, "ca-sleep", shrm);
+ if (err < 0) {
+ dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_SLEEP.\n");
+ goto free_kw5;
+ }
+
+ err = request_irq(IRQ_PRCMU_CA_WAKE, shrm_prcmu_irq_handler,
+ IRQF_NO_SUSPEND, "ca-wake", shrm);
+ if (err < 0) {
+ dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_WAKE.\n");
+ goto drop2;
+ }
+
+ err = request_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, shrm_prcmu_irq_handler,
+ IRQF_NO_SUSPEND, "modem-sw-reset-req", shrm);
+ if (err < 0) {
+ dev_err(shm_dev->dev,
+ "Failed alloc IRQ_PRCMU_MODEM_SW_RESET_REQ.\n");
+ goto drop1;
+ }
+
+#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
+ /* init netlink socket for user-space communication */
+ shrm_nl_sk = netlink_kernel_create(NULL, NETLINK_SHRM, 1,
+ shm_nl_receive, NULL, THIS_MODULE);
+
+ if (!shrm_nl_sk) {
+ dev_err(shm_dev->dev, "netlink socket creation failed\n");
+ goto drop;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET
+drop:
+ free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL);
+#endif
+drop1:
+ free_irq(IRQ_PRCMU_CA_WAKE, NULL);
+drop2:
+ free_irq(IRQ_PRCMU_CA_SLEEP, NULL);
+free_kw5:
+ kthread_stop(shrm->shm_ac_sleep_kw_task);
+free_kw4:
+ kthread_stop(shrm->shm_ca_wake_kw_task);
+free_kw3:
+ kthread_stop(shrm->shm_ac_wake_kw_task);
+free_kw2:
+ kthread_stop(shrm->shm_audio_ch_wr_kw_task);
+free_kw1:
+ kthread_stop(shrm->shm_common_ch_wr_kw_task);
+ return err;
+}
+
+void shrm_protocol_deinit(struct shrm_dev *shrm)
+{
+ free_irq(IRQ_PRCMU_CA_SLEEP, NULL);
+ free_irq(IRQ_PRCMU_CA_WAKE, NULL);
+ free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL);
+ flush_kthread_worker(&shrm->shm_common_ch_wr_kw);
+ flush_kthread_worker(&shrm->shm_audio_ch_wr_kw);
+ flush_kthread_worker(&shrm->shm_ac_wake_kw);
+ flush_kthread_worker(&shrm->shm_ca_wake_kw);
+ flush_kthread_worker(&shrm->shm_ac_sleep_kw);
+ kthread_stop(shrm->shm_common_ch_wr_kw_task);
+ kthread_stop(shrm->shm_audio_ch_wr_kw_task);
+ kthread_stop(shrm->shm_ac_wake_kw_task);
+ kthread_stop(shrm->shm_ca_wake_kw_task);
+ kthread_stop(shrm->shm_ac_sleep_kw_task);
+ modem_put(shrm->modem);
+}
+
+int get_ca_wake_req_state(void)
+{
+ return ((atomic_read(&ac_sleep_disable_count) > 0) ||
+ modem_get_usage(shm_dev->modem));
+}
+
+irqreturn_t ca_wake_irq_handler(int irq, void *ctrlr)
+{
+ struct shrm_dev *shrm = ctrlr;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+ /* initialize the FIFO Variables */
+ if (boot_state == BOOT_INIT)
+ shm_fifo_init(shrm);
+
+ dev_dbg(shrm->dev, "Inside ca_wake_irq_handler\n");
+
+ /* Clear the interrupt */
+ writel((1 << GOP_CA_WAKE_REQ_BIT),
+ shrm->intr_base + GOP_CLEAR_REGISTER_BASE);
+
+ /* send ca_wake_ack_interrupt to CMU */
+ writel((1 << GOP_CA_WAKE_ACK_BIT),
+ shrm->intr_base + GOP_SET_REGISTER_BASE);
+
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return IRQ_HANDLED;
+}
+
+
+irqreturn_t ac_read_notif_0_irq_handler(int irq, void *ctrlr)
+{
+ struct shrm_dev *shrm = ctrlr;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ shm_ac_read_0_tasklet.data = (unsigned long)shrm;
+ tasklet_schedule(&shm_ac_read_0_tasklet);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ /* Clear the interrupt */
+ writel((1 << GOP_COMMON_AC_READ_NOTIFICATION_BIT),
+ shrm->intr_base + GOP_CLEAR_REGISTER_BASE);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ac_read_notif_1_irq_handler(int irq, void *ctrlr)
+{
+ struct shrm_dev *shrm = ctrlr;
+
+ dev_dbg(shrm->dev, "%s IN+\n", __func__);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ shm_ac_read_1_tasklet.data = (unsigned long)shrm;
+ tasklet_schedule(&shm_ac_read_1_tasklet);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ /* Clear the interrupt */
+ writel((1 << GOP_AUDIO_AC_READ_NOTIFICATION_BIT),
+ shrm->intr_base + GOP_CLEAR_REGISTER_BASE);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ca_msg_pending_notif_0_irq_handler(int irq, void *ctrlr)
+{
+ struct shrm_dev *shrm = ctrlr;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ tasklet_schedule(&shm_ca_0_tasklet);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ /* Clear the interrupt */
+ writel((1 << GOP_COMMON_CA_MSG_PENDING_NOTIFICATION_BIT),
+ shrm->intr_base + GOP_CLEAR_REGISTER_BASE);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ca_msg_pending_notif_1_irq_handler(int irq, void *ctrlr)
+{
+ struct shrm_dev *shrm = ctrlr;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ tasklet_schedule(&shm_ca_1_tasklet);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ /* Clear the interrupt */
+ writel((1<<GOP_AUDIO_CA_MSG_PENDING_NOTIFICATION_BIT),
+ shrm->intr_base+GOP_CLEAR_REGISTER_BASE);
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return IRQ_HANDLED;
+
+}
+
+/**
+ * shm_write_msg() - write message to shared memory
+ * @shrm: pointer to the shrm device information structure
+ * @l2_header: L2 header
+ * @addr: pointer to the message
+ * @length: length of the message to be written
+ *
+ * This function is called from net or char interface driver write operation.
+ * Prior to calling this function the message is copied from the user space
+ * buffer to the kernel buffer. This function based on the l2 header routes
+ * the message to the respective channel and FIFO. Then makes a call to the
+ * fifo write function where the message is written to the physical device.
+ */
+int shm_write_msg(struct shrm_dev *shrm, u8 l2_header,
+ void *addr, u32 length)
+{
+ u8 channel = 0;
+ int ret;
+
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (boot_state != BOOT_DONE) {
+ dev_err(shrm->dev,
+ "error:after boot done call this fn, L2Header = %d\n",
+ l2_header);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if ((l2_header == L2_HEADER_ISI) ||
+ (l2_header == L2_HEADER_RPC) ||
+ (l2_header == L2_HEADER_SECURITY) ||
+ (l2_header == L2_HEADER_COMMON_SIMPLE_LOOPBACK) ||
+ (l2_header == L2_HEADER_COMMON_ADVANCED_LOOPBACK) ||
+ (l2_header == L2_HEADER_CIQ) ||
+ (l2_header == L2_HEADER_RTC_CALIBRATION)) {
+ channel = 0;
+ if (shrm_common_tx_state == SHRM_SLEEP_STATE)
+ shrm_common_tx_state = SHRM_PTR_FREE;
+ else if (shrm_common_tx_state == SHRM_IDLE)
+ shrm_common_tx_state = SHRM_PTR_FREE;
+
+ } else if ((l2_header == L2_HEADER_AUDIO) ||
+ (l2_header == L2_HEADER_AUDIO_SIMPLE_LOOPBACK) ||
+ (l2_header == L2_HEADER_AUDIO_ADVANCED_LOOPBACK)) {
+ if (shrm_audio_tx_state == SHRM_SLEEP_STATE)
+ shrm_audio_tx_state = SHRM_PTR_FREE;
+ else if (shrm_audio_tx_state == SHRM_IDLE)
+ shrm_audio_tx_state = SHRM_PTR_FREE;
+
+ channel = 1;
+ } else {
+ ret = -ENODEV;
+ goto out;
+ }
+ ret = shm_write_msg_to_fifo(shrm, channel, l2_header, addr, length);
+ if (ret < 0) {
+ dev_err(shrm->dev, "write message to fifo failed\n");
+ return ret;
+ }
+ /*
+ * notify only if new msg copied is the only unread one
+ * otherwise it means that reading process is ongoing
+ */
+ if (is_the_only_one_unread_message(shrm, channel, length)) {
+
+ /* Send Message Pending Noitication to CMT */
+ if (channel == 0)
+ queue_kthread_work(&shrm->shm_common_ch_wr_kw,
+ &shrm->send_ac_msg_pend_notify_0);
+ else
+ queue_kthread_work(&shrm->shm_audio_ch_wr_kw,
+ &shrm->send_ac_msg_pend_notify_1);
+
+ }
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+ return 0;
+
+out:
+ return ret;
+}
+
+void ca_msg_read_notification_0(struct shrm_dev *shrm)
+{
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (get_ca_msg_0_read_notif_send() == 0) {
+ update_ca_common_shared_rptr(shrm);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ /* Trigger CaMsgReadNotification to CMU */
+ writel((1 << GOP_COMMON_CA_READ_NOTIFICATION_BIT),
+ shrm->intr_base + GOP_SET_REGISTER_BASE);
+ set_ca_msg_0_read_notif_send(1);
+ shrm_common_rx_state = SHRM_PTR_BUSY;
+ }
+
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+void ca_msg_read_notification_1(struct shrm_dev *shrm)
+{
+ dev_dbg(shrm->dev, "%s IN\n", __func__);
+
+ if (get_ca_msg_1_read_notif_send() == 0) {
+ update_ca_audio_shared_rptr(shrm);
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ /* Trigger CaMsgReadNotification to CMU */
+ writel((1<<GOP_AUDIO_CA_READ_NOTIFICATION_BIT),
+ shrm->intr_base+GOP_SET_REGISTER_BASE);
+ set_ca_msg_1_read_notif_send(1);
+ shrm_audio_rx_state = SHRM_PTR_BUSY;
+ }
+ dev_dbg(shrm->dev, "%s OUT\n", __func__);
+}
+
+/**
+ * receive_messages_common - receive common channnel msg from
+ * CMT(Cellular Mobile Terminal)
+ * @shrm: pointer to shrm device information structure
+ *
+ * The messages sent from CMT to APE are written to the respective FIFO
+ * and an interrupt is triggered by the CMT. This ca message pending
+ * interrupt calls this function. This function sends a read notification
+ * acknowledgement to the CMT and calls the common channel receive handler
+ * where the messsage is copied to the respective(ISI, RPC, SECURIT) queue
+ * based on the message l2 header.
+ */
+void receive_messages_common(struct shrm_dev *shrm)
+{
+ u8 l2_header;
+ u32 len;
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ l2_header = read_one_l2msg_common(shrm, recieve_common_msg, &len);
+ /* Send Recieve_Call_back to Upper Layer */
+ if (!rx_common_handler) {
+ dev_err(shrm->dev, "common_rx_handler is Null\n");
+ BUG();
+ }
+ (*rx_common_handler)(l2_header, &recieve_common_msg, len,
+ shrm);
+ /* SendReadNotification */
+ ca_msg_read_notification_0(shrm);
+
+ while (read_remaining_messages_common()) {
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ l2_header = read_one_l2msg_common(shrm, recieve_common_msg,
+ &len);
+ /* Send Recieve_Call_back to Upper Layer */
+ (*rx_common_handler)(l2_header,
+ &recieve_common_msg, len,
+ shrm);
+ }
+}
+
+/**
+ * receive_messages_audio() - receive audio message from CMT
+ * @shrm: pointer to shrm device information structure
+ *
+ * The messages sent from CMT to APE are written to the respective FIFO
+ * and an interrupt is triggered by the CMT. This ca message pending
+ * interrupt calls this function. This function sends a read notification
+ * acknowledgement to the CMT and calls the common channel receive handler
+ * where the messsage is copied to the audio queue.
+ */
+void receive_messages_audio(struct shrm_dev *shrm)
+{
+ u8 l2_header;
+ u32 len;
+
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ l2_header = read_one_l2msg_audio(shrm, recieve_audio_msg, &len);
+ /* Send Recieve_Call_back to Upper Layer */
+
+ if (!rx_audio_handler) {
+ dev_crit(shrm->dev, "audio_rx_handler is Null\n");
+ BUG();
+ }
+ (*rx_audio_handler)(l2_header, &recieve_audio_msg,
+ len, shrm);
+
+ /* SendReadNotification */
+ ca_msg_read_notification_1(shrm);
+ while (read_remaining_messages_audio()) {
+ if (check_modem_in_reset()) {
+ dev_err(shrm->dev, "%s:Modem state reset or unknown.\n",
+ __func__);
+ return;
+ }
+
+ l2_header = read_one_l2msg_audio(shrm,
+ recieve_audio_msg, &len);
+ /* Send Recieve_Call_back to Upper Layer */
+ (*rx_audio_handler)(l2_header,
+ &recieve_audio_msg, len,
+ shrm);
+ }
+}
+
+u8 get_boot_state()
+{
+ return boot_state;
+}
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a6b8ce11a22..a0f2484368b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -70,3 +70,8 @@ obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
+
+ifdef CONFIG_PHONET
+obj-$(CONFIG_U8500_SHRM) += u8500_shrm.o
+obj-$(CONFIG_MODEM_M6718_SPI) += m6718_modem_net.o
+endif
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 8a3054b8481..957363ceae4 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -182,6 +182,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
* This is not yet handled.
*/
+ BUG_ON(ser->dev == NULL);
/*
* Workaround for garbage at start of transmission,
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b2041319a3..fc55bf65f1c 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
+#include <linux/kthread.h>
#include <net/caif/caif_device.h>
#include <net/caif/caif_shm.h>
@@ -107,8 +108,13 @@ struct shmdrv_layer {
struct workqueue_struct *pshm_tx_workqueue;
struct workqueue_struct *pshm_rx_workqueue;
+ struct kthread_worker pshm_flow_ctrl_kw;
+ struct task_struct *pshm_flow_ctrl_kw_task;
+
struct work_struct shm_tx_work;
struct work_struct shm_rx_work;
+ struct kthread_work shm_flow_on_work;
+ struct kthread_work shm_flow_off_work;
struct sk_buff_head sk_qhead;
struct shmdev_layer *pshm_dev;
@@ -126,6 +132,24 @@ static int shm_netdev_close(struct net_device *shm_netdev)
return 0;
}
+static void shm_flow_on_work_func(struct kthread_work *work)
+{
+ struct shmdrv_layer *pshm_drv = container_of(work, struct shmdrv_layer, shm_flow_on_work);
+
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+}
+
+static void shm_flow_off_work_func(struct kthread_work *work)
+{
+ struct shmdrv_layer *pshm_drv = container_of(work, struct shmdrv_layer, shm_flow_off_work);
+
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+}
+
int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
{
struct buf_list *pbuf;
@@ -238,11 +262,9 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ queue_kthread_work(&pshm_drv->pshm_flow_ctrl_kw,
+ &pshm_drv->shm_flow_on_work);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_ON);
-
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -426,11 +448,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_OFF);
- spin_lock_irqsave(&pshm_drv->lock, flags);
+ queue_kthread_work(&pshm_drv->pshm_flow_ctrl_kw,
+ &pshm_drv->shm_flow_off_work);
}
/*
* We simply return back to the caller if we do not have space
@@ -503,7 +522,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pbuf->frames++;
pbuf->frm_ofs += frmlen + (frmlen % 32);
- } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+ } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF &&
+ pbuf->frm_ofs < pbuf->len);
/* Assign buffer as full. */
list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
@@ -562,6 +582,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
{
int result, j;
struct shmdrv_layer *pshm_drv = NULL;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
"cfshm%d", shm_netdev_setup);
@@ -622,11 +643,20 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+ init_kthread_work(&pshm_drv->shm_flow_on_work, shm_flow_on_work_func);
+ init_kthread_work(&pshm_drv->shm_flow_off_work, shm_flow_off_work_func);
+
pshm_drv->pshm_tx_workqueue =
create_singlethread_workqueue("shm_tx_work");
pshm_drv->pshm_rx_workqueue =
create_singlethread_workqueue("shm_rx_work");
+ init_kthread_worker(&pshm_drv->pshm_flow_ctrl_kw);
+ pshm_drv->pshm_flow_ctrl_kw_task = kthread_run(kthread_worker_fn,
+ &pshm_drv->pshm_flow_ctrl_kw, "pshm_caif_flow_ctrl");
+ /* must use the FIFO scheduler as it is realtime sensitive */
+ sched_setscheduler(pshm_drv->pshm_flow_ctrl_kw_task, SCHED_FIFO, &param);
+
for (j = 0; j < NR_TX_BUF; j++) {
struct buf_list *tx_buf =
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
@@ -744,6 +774,8 @@ void caif_shmcore_remove(struct net_device *pshm_netdev)
/* Destroy work queues. */
destroy_workqueue(pshm_drv->pshm_tx_workqueue);
destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+ flush_kthread_worker(&pshm_drv->pshm_flow_ctrl_kw);
+ kthread_stop(pshm_drv->pshm_flow_ctrl_kw_task);
unregister_netdev(pshm_netdev);
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 24d2df068d7..3b2a64ab1d6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/crc32.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -144,6 +145,9 @@ struct smsc911x_data {
/* regulators */
struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
+
+ /* clock */
+ struct clk *fsmc_clk;
};
/* Easy access to information */
@@ -369,7 +373,7 @@ out:
}
/*
- * enable resources, currently just regulators.
+ * enable resources, regulators & clocks.
*/
static int smsc911x_enable_resources(struct platform_device *pdev)
{
@@ -379,9 +383,17 @@ static int smsc911x_enable_resources(struct platform_device *pdev)
ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
- if (ret)
+ if (ret) {
netdev_err(ndev, "failed to enable regulators %d\n",
ret);
+ return ret;
+ }
+
+ if (pdata->fsmc_clk) {
+ ret = clk_enable(pdata->fsmc_clk);
+ if (ret < 0)
+ netdev_err(ndev, "failed to enable clock %d\n", ret);
+ }
return ret;
}
@@ -396,6 +408,8 @@ static int smsc911x_disable_resources(struct platform_device *pdev)
ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
+ if (pdata->fsmc_clk)
+ clk_disable(pdata->fsmc_clk);
return ret;
}
@@ -418,9 +432,17 @@ static int smsc911x_request_resources(struct platform_device *pdev)
ret = regulator_bulk_get(&pdev->dev,
ARRAY_SIZE(pdata->supplies),
pdata->supplies);
- if (ret)
- netdev_err(ndev, "couldn't get regulators %d\n",
- ret);
+ if (ret) {
+ netdev_err(ndev, "couldn't get regulators %d\n", ret);
+ return ret;
+ }
+
+ /* Request clock, ignore if not here */
+ pdata->fsmc_clk = clk_get(NULL, "fsmc");
+ if (IS_ERR(pdata->fsmc_clk)) {
+ netdev_warn(ndev, "couldn't get clock %d\n", ret);
+ pdata->fsmc_clk = NULL;
+ }
return ret;
}
@@ -436,6 +458,12 @@ static void smsc911x_free_resources(struct platform_device *pdev)
/* Free regulators */
regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
+
+ /* Free clock */
+ if (pdata->fsmc_clk) {
+ clk_put(pdata->fsmc_clk);
+ pdata->fsmc_clk = NULL;
+ }
}
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
@@ -2346,6 +2374,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
unsigned int intcfg = 0;
int res_size, irq_flags;
int retval;
+ int to = 100;
pr_info("Driver version %s\n", SMSC_DRV_VERSION);
@@ -2424,6 +2453,18 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (pdata->config.shift)
pdata->ops = &shifted_smsc911x_ops;
+ /* poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms
+ */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+
+ if (to == 0) {
+ pr_err("Device not READY in 100ms aborting\n");
+ goto out_0;
+ }
+
+
retval = smsc911x_init(dev);
if (retval < 0)
goto out_disable_resources;
diff --git a/drivers/net/m6718_modem_net.c b/drivers/net/m6718_modem_net.c
new file mode 100644
index 00000000000..f64a775560b
--- /dev/null
+++ b/drivers/net/m6718_modem_net.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on u8500_shrm.c
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * M6718 modem net device interface.
+ */
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/phonet.h>
+#include <linux/if_phonet.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pep.h>
+#include <linux/modem/m6718_spi/modem_net.h>
+#include <linux/modem/m6718_spi/modem_driver.h>
+#include <linux/modem/m6718_spi/modem_char.h>
+#include <linux/ratelimit.h>
+
+
+/**
+ * modem_net_receive() - receive data and copy to user space buffer
+ * @dev: pointer to the network device structure
+ *
+ * Copy data from ISI queue to the user space buffer.
+ */
+int modem_net_receive(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct isa_device_context *isadev;
+ struct message_queue *q;
+ u32 msgsize;
+ u32 size = 0;
+ struct modem_spi_net_dev *net_iface_priv =
+ (struct modem_spi_net_dev *)netdev_priv(dev);
+ struct modem_spi_dev *modem_spi_dev = net_iface_priv->modem_spi_dev;
+
+ isadev = &modem_spi_dev->isa_context->isadev[MODEM_M6718_SPI_CHN_ISI];
+ q = &isadev->dl_queue;
+
+ spin_lock_bh(&q->update_lock);
+ if (list_empty(&q->msg_list)) {
+ spin_unlock_bh(&q->update_lock);
+ dev_dbg(modem_spi_dev->dev, "empty queue!\n");
+ return 0;
+ }
+ spin_unlock_bh(&q->update_lock);
+
+ msgsize = modem_isa_msg_size(q);
+ if (msgsize <= 0)
+ return msgsize;
+
+ /*
+ * The packet has been retrieved from the transmission
+ * medium. Build an skb around it, so upper layers can handle it
+ */
+ skb = dev_alloc_skb(msgsize);
+ if (!skb) {
+ pr_notice_ratelimited("isa rx: low on mem - packet dropped\n");
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ if ((q->readptr + msgsize) >= q->size) {
+ size = (q->size - q->readptr);
+ /* copy first part of msg */
+ skb_copy_to_linear_data(skb,
+ (u8 *)(q->fifo_base + q->readptr), size);
+ skb_put(skb, size);
+
+ /* copy second part of msg at the top of fifo */
+ skb_copy_to_linear_data_offset(skb, size,
+ (u8 *)(q->fifo_base), (msgsize - size));
+ skb_put(skb, msgsize - size);
+
+ } else {
+ skb_copy_to_linear_data(skb,
+ (u8 *)(q->fifo_base + q->readptr), msgsize);
+ skb_put(skb, msgsize);
+ }
+
+ spin_lock_bh(&q->update_lock);
+ modem_isa_unqueue_msg(q);
+ spin_unlock_bh(&q->update_lock);
+
+ skb_reset_mac_header(skb);
+ __skb_pull(skb, dev->hard_header_len);
+ /* write metadata and then pass to the receive level */
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_PHONET);
+ skb->priority = 0;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += msgsize;
+ } else {
+ dev->stats.rx_dropped++;
+ }
+
+ return msgsize;
+}
+EXPORT_SYMBOL_GPL(modem_net_receive);
+
+static int netdev_isa_open(struct net_device *dev)
+{
+ struct modem_spi_net_dev *net_iface_priv =
+ (struct modem_spi_net_dev *)netdev_priv(dev);
+ struct modem_spi_dev *modem_spi_dev = net_iface_priv->modem_spi_dev;
+
+ modem_spi_dev->netdev_flag_up = 1;
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int netdev_isa_close(struct net_device *dev)
+{
+ struct modem_spi_net_dev *net_iface_priv =
+ (struct modem_spi_net_dev *)netdev_priv(dev);
+ struct modem_spi_dev *modem_spi_dev = net_iface_priv->modem_spi_dev;
+
+ modem_spi_dev->netdev_flag_up = 0;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ return 0;
+}
+
+static int netdev_isa_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct if_phonet_req *req = (struct if_phonet_req *)ifr;
+
+ switch (cmd) {
+ case SIOCPNGAUTOCONF:
+ req->ifr_phonet_autoconf.device = PN_DEV_HOST;
+ return 0;
+ }
+ return -ENOIOCTLCMD;
+}
+
+static struct net_device_stats *netdev_isa_stats(struct net_device *dev)
+{
+ return &dev->stats;
+}
+
+/**
+ * netdev_isa_write() - write through the net interface
+ * @skb: pointer to the socket buffer
+ * @dev: pointer to the network device structure
+ *
+ * Copies data(ISI message) from the user buffer to the kernel buffer and
+ * schedule transfer thread to transmit the message to the modem via FIFO.
+ */
+static netdev_tx_t netdev_isa_write(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ int retval = 0;
+ struct modem_spi_net_dev *net_iface_priv =
+ (struct modem_spi_net_dev *)netdev_priv(dev);
+ struct modem_spi_dev *modem_spi_dev = net_iface_priv->modem_spi_dev;
+
+ /*
+ * FIXME:
+ * U8500 modem requires that Pipe created/enabled Indication should
+ * be sent from the port corresponding to GPRS socket.
+ * Also, the U8500 modem does not implement Pipe controller
+ * which takes care of port manipulations for GPRS traffic.
+ *
+ * Now, APE has GPRS socket and the socket for sending
+ * Indication msgs bound to different ports.
+ * Phonet stack does not allow an indication msg to be sent
+ * from GPRS socket, since Phonet stack assumes the presence
+ * of Pipe controller in modem.
+ *
+ * So, due to lack of Pipe controller implementation in the
+ * U8500 modem, carry out the port manipulation related to
+ * GPRS traffic here.
+ * Ideally, it should be done either by Pipe controller in
+ * modem OR some implementation of Pipe controller on APE side
+ */
+ if (skb->data[RESOURCE_ID_INDEX] == PN_PIPE) {
+ if ((skb->data[MSG_ID_INDEX] == PNS_PIPE_CREATED_IND) ||
+ (skb->data[MSG_ID_INDEX] == PNS_PIPE_ENABLED_IND) ||
+ (skb->data[MSG_ID_INDEX] == PNS_PIPE_DISABLED_IND))
+ skb->data[SRC_OBJ_INDEX] = skb->data[PIPE_HDL_INDEX];
+ }
+
+ spin_lock_bh(&modem_spi_dev->isa_context->common_tx_lock);
+ err = modem_m6718_spi_send(modem_spi_dev, MODEM_M6718_SPI_CHN_ISI,
+ skb->len, skb->data);
+ if (!err) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ retval = NETDEV_TX_OK;
+ dev_kfree_skb(skb);
+ } else {
+ dev->stats.tx_dropped++;
+ retval = NETDEV_TX_BUSY;
+ }
+ spin_unlock_bh(&modem_spi_dev->isa_context->common_tx_lock);
+
+ return retval;
+}
+
+static const struct net_device_ops modem_netdev_ops = {
+ .ndo_open = netdev_isa_open,
+ .ndo_stop = netdev_isa_close,
+ .ndo_do_ioctl = netdev_isa_ioctl,
+ .ndo_start_xmit = netdev_isa_write,
+ .ndo_get_stats = netdev_isa_stats,
+};
+
+static void net_device_init(struct net_device *dev)
+{
+ struct modem_spi_net_dev *net_iface_priv;
+
+ dev->netdev_ops = &modem_netdev_ops;
+ dev->header_ops = &phonet_header_ops;
+ dev->type = ARPHRD_PHONET;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = PHONET_MAX_MTU;
+ dev->hard_header_len = MODEM_HLEN;
+ dev->addr_len = PHONET_ALEN;
+ dev->tx_queue_len = PN_TX_QUEUE_LEN;
+ dev->destructor = free_netdev;
+ dev->dev_addr[0] = PN_LINK_ADDR;
+ net_iface_priv = netdev_priv(dev);
+ memset(net_iface_priv, 0 , sizeof(struct modem_spi_net_dev));
+}
+
+int modem_net_init(struct modem_spi_dev *modem_spi_dev)
+{
+ struct net_device *nw_device;
+ struct modem_spi_net_dev *net_iface_priv;
+ int err;
+ /*
+ * keep the same net device name as U8500 to allow userspace clients
+ * to remain unchanged and use the same interfaces
+ */
+ char *devname = "shrm%d";
+
+ /* allocate the net device */
+ nw_device = modem_spi_dev->ndev =
+ alloc_netdev(sizeof(struct modem_spi_net_dev),
+ devname, net_device_init);
+ if (nw_device == NULL) {
+ dev_err(modem_spi_dev->dev,
+ "failed to allocate modem net device\n");
+ return -ENOMEM;
+ }
+ err = register_netdev(modem_spi_dev->ndev);
+ if (err) {
+ dev_err(modem_spi_dev->dev,
+ "failed to register modem net device: error %d\n", err);
+ free_netdev(modem_spi_dev->ndev);
+ return -ENODEV;
+ }
+ dev_dbg(modem_spi_dev->dev, "registered modem net device\n");
+
+ net_iface_priv = (struct modem_spi_net_dev *)netdev_priv(nw_device);
+ net_iface_priv->modem_spi_dev = modem_spi_dev;
+ net_iface_priv->iface_num = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(modem_net_init);
+
+int modem_net_stop(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+int modem_net_restart(struct net_device *dev)
+{
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_net_restart);
+
+int modem_net_start(struct net_device *dev)
+{
+ struct modem_spi_net_dev *net_iface_priv =
+ (struct modem_spi_net_dev *)netdev_priv(dev);
+ struct modem_spi_dev *modem_spi_dev = net_iface_priv->modem_spi_dev;
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+ modem_spi_dev->netdev_flag_up = 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_net_start);
+
+int modem_net_suspend(struct net_device *dev)
+{
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ }
+ netif_device_detach(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_net_suspend);
+
+int modem_net_resume(struct net_device *dev)
+{
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(modem_net_resume);
+
+void modem_net_exit(struct modem_spi_dev *modem_spi_dev)
+{
+ if (modem_spi_dev && modem_spi_dev->ndev) {
+ unregister_netdev(modem_spi_dev->ndev);
+ modem_spi_dev->ndev = NULL;
+ dev_dbg(modem_spi_dev->dev, "removed modem net device\n");
+ }
+}
+EXPORT_SYMBOL_GPL(modem_net_exit);
+
+MODULE_AUTHOR("Chris Blair <chris.blair@stericsson.com>");
+MODULE_DESCRIPTION("M6718 modem IPC net device interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/u8500_shrm.c b/drivers/net/u8500_shrm.c
new file mode 100644
index 00000000000..0e813bbb3cc
--- /dev/null
+++ b/drivers/net/u8500_shrm.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/phonet.h>
+#include <linux/if_phonet.h>
+#include <linux/if_arp.h>
+#include <linux/modem/shrm/shrm_driver.h>
+#include <linux/modem/shrm/shrm_private.h>
+#include <linux/modem/shrm/shrm_config.h>
+#include <linux/modem/shrm/shrm_net.h>
+#include <linux/modem/shrm/shrm.h>
+#include <net/sock.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pep.h>
+
+
+/**
+ * shrm_net_receive() - receive data and copy to user space buffer
+ * @dev: pointer to the network device structure
+ *
+ * Copy data from ISI queue to the user space buffer.
+ */
+int shrm_net_receive(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct isadev_context *isadev;
+ struct message_queue *q;
+ u32 msgsize;
+ u32 size = 0;
+ struct shrm_net_iface_priv *net_iface_priv =
+ (struct shrm_net_iface_priv *)netdev_priv(dev);
+ struct shrm_dev *shrm = net_iface_priv->shrm_device;
+
+ isadev = &shrm->isa_context->isadev[ISI_MESSAGING];
+ q = &isadev->dl_queue;
+
+ spin_lock_bh(&q->update_lock);
+ if (list_empty(&q->msg_list)) {
+ spin_unlock_bh(&q->update_lock);
+ dev_dbg(shrm->dev, "Empty Shrm queue\n");
+ return 0;
+ }
+ spin_unlock_bh(&q->update_lock);
+
+ msgsize = get_size_of_new_msg(q);
+ if (msgsize <= 0)
+ return msgsize;
+
+ /*
+ * The packet has been retrieved from the transmission
+ * medium. Build an skb around it, so upper layers can handle it
+ */
+ skb = dev_alloc_skb(msgsize);
+ if (!skb) {
+ if (printk_ratelimit())
+ dev_notice(shrm->dev,
+ "isa rx: low on mem - packet dropped\n");
+ dev->stats.rx_dropped++;
+ goto out;
+ }
+
+ if ((q->readptr+msgsize) >= q->size) {
+ size = (q->size-q->readptr);
+ /*Copy First Part of msg*/
+ skb_copy_to_linear_data(skb,
+ (u8 *)(q->fifo_base + q->readptr), size);
+ skb_put(skb, size);
+
+ /*Copy Second Part of msg at the top of fifo*/
+ skb_copy_to_linear_data_offset(skb, size,
+ (u8 *)(q->fifo_base), (msgsize - size));
+ skb_put(skb, msgsize-size);
+
+ } else {
+ skb_copy_to_linear_data(skb,
+ (u8 *)(q->fifo_base+q->readptr), msgsize);
+ skb_put(skb, msgsize);
+ }
+
+ spin_lock_bh(&q->update_lock);
+ remove_msg_from_queue(q);
+ spin_unlock_bh(&q->update_lock);
+
+ skb_reset_mac_header(skb);
+ __skb_pull(skb, dev->hard_header_len);
+ /*Write metadata, and then pass to the receive level*/
+ skb->dev = dev;/*kmalloc(sizeof(struct net_device), GFP_ATOMIC);*/
+ skb->protocol = htons(ETH_P_PHONET);
+ skb->priority = 0;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += msgsize;
+ } else
+ dev->stats.rx_dropped++;
+
+ return msgsize;
+out:
+ return -ENOMEM;
+}
+
+static int netdev_isa_open(struct net_device *dev)
+{
+ struct shrm_net_iface_priv *net_iface_priv =
+ (struct shrm_net_iface_priv *)netdev_priv(dev);
+ struct shrm_dev *shrm = net_iface_priv->shrm_device;
+
+ shrm->netdev_flag_up = 1;
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int netdev_isa_close(struct net_device *dev)
+{
+ struct shrm_net_iface_priv *net_iface_priv =
+ (struct shrm_net_iface_priv *)netdev_priv(dev);
+ struct shrm_dev *shrm = net_iface_priv->shrm_device;
+
+ shrm->netdev_flag_up = 0;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ return 0;
+}
+
+static int netdev_isa_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct if_phonet_req *req = (struct if_phonet_req *)ifr;
+
+ switch (cmd) {
+ case SIOCPNGAUTOCONF:
+ req->ifr_phonet_autoconf.device = PN_DEV_HOST;
+ return 0;
+ }
+ return -ENOIOCTLCMD;
+}
+
+static struct net_device_stats *netdev_isa_stats(struct net_device *dev)
+{
+ return &dev->stats;
+}
+
+/**
+ * netdev_isa_write() - write through the net interface
+ * @skb: pointer to the socket buffer
+ * @dev: pointer to the network device structure
+ *
+ * Copies data(ISI message) from the user buffer to the kernel buffer and
+ * schedule transfer thread to transmit the message to the modem via FIFO.
+ */
+static netdev_tx_t netdev_isa_write(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ int retval = 0;
+ struct shrm_net_iface_priv *net_iface_priv =
+ (struct shrm_net_iface_priv *)netdev_priv(dev);
+ struct shrm_dev *shrm = net_iface_priv->shrm_device;
+
+ /*
+ * FIXME:
+ * U8500 modem requires that Pipe created/enabled Indication should
+ * be sent from the port corresponding to GPRS socket.
+ * Also, the U8500 modem does not implement Pipe controller
+ * which takes care of port manipulations for GPRS traffic.
+ *
+ * Now, APE has GPRS socket and the socket for sending
+ * Indication msgs bound to different ports.
+ * Phonet stack does not allow an indication msg to be sent
+ * from GPRS socket, since Phonet stack assumes the presence
+ * of Pipe controller in modem.
+ *
+ * So, due to lack of Pipe controller implementation in the
+ * U8500 modem, carry out the port manipulation related to
+ * GPRS traffic here.
+ * Ideally, it should be done either by Pipe controller in
+ * modem OR some implementation of Pipe controller on APE side
+ */
+ if (skb->data[RESOURCE_ID_INDEX] == PN_PIPE) {
+ if ((skb->data[MSG_ID_INDEX] == PNS_PIPE_CREATED_IND) ||
+ (skb->data[MSG_ID_INDEX] == PNS_PIPE_ENABLED_IND) ||
+ (skb->data[MSG_ID_INDEX] == PNS_PIPE_DISABLED_IND))
+ skb->data[SRC_OBJ_INDEX] = skb->data[PIPE_HDL_INDEX];
+ }
+
+ spin_lock_bh(&shrm->isa_context->common_tx);
+ err = shm_write_msg(shrm, ISI_MESSAGING, skb->data,
+ skb->len);
+ if (!err) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ retval = NETDEV_TX_OK;
+ dev_kfree_skb(skb);
+ } else {
+ dev->stats.tx_dropped++;
+ retval = NETDEV_TX_BUSY;
+ }
+ spin_unlock_bh(&shrm->isa_context->common_tx);
+
+ return retval;
+}
+
+static const struct net_device_ops shrm_netdev_ops = {
+ .ndo_open = netdev_isa_open,
+ .ndo_stop = netdev_isa_close,
+ .ndo_do_ioctl = netdev_isa_ioctl,
+ .ndo_start_xmit = netdev_isa_write,
+ .ndo_get_stats = netdev_isa_stats,
+};
+
+static void shm_net_init(struct net_device *dev)
+{
+ struct shrm_net_iface_priv *net_iface_priv;
+
+ dev->netdev_ops = &shrm_netdev_ops;
+ dev->header_ops = &phonet_header_ops;
+ dev->type = ARPHRD_PHONET;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = PHONET_MAX_MTU;
+ dev->hard_header_len = SHRM_HLEN;
+ dev->addr_len = PHONET_ALEN;
+ dev->tx_queue_len = PN_TX_QUEUE_LEN;
+ dev->destructor = free_netdev;
+ dev->dev_addr[0] = PN_LINK_ADDR;
+ net_iface_priv = netdev_priv(dev);
+ memset(net_iface_priv, 0 , sizeof(struct shrm_net_iface_priv));
+}
+
+int shrm_register_netdev(struct shrm_dev *shrm)
+{
+ struct net_device *nw_device;
+ struct shrm_net_iface_priv *net_iface_priv;
+ char *devname = "shrm%d";
+ int err;
+
+ /* allocate the net device */
+ nw_device = shrm->ndev = alloc_netdev(
+ sizeof(struct shrm_net_iface_priv),
+ devname, shm_net_init);
+ if (nw_device == NULL) {
+ dev_err(shrm->dev, "Failed to allocate SHRM Netdev\n");
+ return -ENOMEM;
+ }
+ err = register_netdev(shrm->ndev);
+ if (err) {
+ dev_err(shrm->dev, "Err %i in reg shrm-netdev\n", err);
+ free_netdev(shrm->ndev);
+ return -ENODEV;
+ }
+ dev_info(shrm->dev, "Registered shrm netdev\n");
+
+ net_iface_priv = (struct shrm_net_iface_priv *)netdev_priv(nw_device);
+ net_iface_priv->shrm_device = shrm;
+ net_iface_priv->iface_num = 0;
+
+ return err;
+}
+
+int shrm_stop_netdev(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+int shrm_restart_netdev(struct net_device *dev)
+{
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ return 0;
+}
+
+int shrm_start_netdev(struct net_device *dev)
+{
+ struct shrm_net_iface_priv *net_iface_priv =
+ (struct shrm_net_iface_priv *)netdev_priv(dev);
+ struct shrm_dev *shrm = net_iface_priv->shrm_device;
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+ shrm->netdev_flag_up = 1;
+ return 0;
+}
+
+int shrm_suspend_netdev(struct net_device *dev)
+{
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ }
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+int shrm_resume_netdev(struct net_device *dev)
+{
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ }
+
+ return 0;
+}
+
+void shrm_unregister_netdev(struct shrm_dev *shrm)
+{
+ unregister_netdev(shrm->ndev);
+}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 3a8daf85874..e288d2f35e2 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -260,6 +260,38 @@ config CHARGER_MANAGER
runtime and in suspend-to-RAM by waking up the system periodically
with help of suspend_again support.
+config AB8500_BM
+ bool "AB8500 Battery Management Driver"
+ depends on AB8500_CORE && AB8500_GPADC && ARCH_U8500
+ help
+ Say Y to include support for AB8500 battery management.
+
+config AB8500_BATTERY_THERM_ON_BATCTRL
+ bool "Thermistor connected on BATCTRL ADC"
+ depends on AB8500_BM
+ help
+ Say Y to enable battery temperature measurements using
+ thermistor connected on BATCTRL ADC.
+
+config AB8500_9100_LI_ION_BATTERY
+ bool "Enable support of the 9100 Li-ion battery charging"
+ depends on AB8500_BM
+ help
+ Say Y to enable support of the 9100 Li-ion battery charging.
+
+config AB5500_BM
+ bool "AB5500 Battery Management Driver"
+ depends on AB5500_CORE && AB5500_GPADC && MACH_U5500
+ help
+ Say Y to include support for AB5500 battery management.
+
+config AB5500_BATTERY_THERM_ON_BATCTRL
+ bool "Thermistor connected on BATCTRL ADC"
+ depends on AB5500_BM
+ help
+ Say Y to enable battery temperature measurements using
+ thermistor connected on BATCTRL ADC.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index e429008eaf1..c07d46ce141 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -39,5 +39,7 @@ obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
+obj-$(CONFIG_AB8500_BM) += ab8500_charger.o ab8500_fg.o ab8500_btemp.o ab8500_chargalg.o
+obj-$(CONFIG_AB5500_BM) += ab5500_charger.o ab5500_btemp.o ab5500_fg.o abx500_chargalg.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/ab5500_btemp.c b/drivers/power/ab5500_btemp.c
new file mode 100644
index 00000000000..08d5ae89dbe
--- /dev/null
+++ b/drivers/power/ab5500_btemp.c
@@ -0,0 +1,923 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Battery temperature driver for ab5500
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+
+#define BTEMP_THERMAL_LOW_LIMIT -10
+#define BTEMP_THERMAL_MED_LIMIT 0
+#define BTEMP_THERMAL_HIGH_LIMIT_62 62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA 7
+#define BTEMP_BATCTRL_CURR_SRC_15UA 15
+#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+
+#define UART_MODE 0x0F
+#define BAT_CUR_SRC 0x1F
+#define RESIS_ID_MODE 0x03
+#define RESET 0x00
+#define ADOUT_10K_PULL_UP 0x07
+
+/* Enable battery temp monitoring manual mode */
+#define BTEMP_MANUAL_MONITORING
+
+#define to_ab5500_btemp_device_info(x) container_of((x), \
+ struct ab5500_btemp, btemp_psy);
+
+/**
+ * struct ab5500_btemp_interrupts - ab5500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_btemp_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab5500_btemp_events {
+ bool batt_rem;
+ bool usb_conn;
+};
+
+/**
+ * struct ab5500_btemp - ab5500 BTEMP device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the AB5500
+ * @curr_source: What current source we use, in uA
+ * @bat_temp: Battery temperature in degree Celcius
+ * @prev_bat_temp Last dispatched battery temperature
+ * @node: struct of type list_head
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @gpadc-auto: Pointer to the struct adc_auto_input
+ * @pdata: Pointer to the ab5500_btemp platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @btemp_psy: Structure for BTEMP specific battery properties
+ * @events: Structure for information about events triggered
+ * @btemp_wq: Work queue for measuring the temperature periodically
+ * @btemp_periodic_work: Work for measuring the temperature periodically
+ */
+struct ab5500_btemp {
+ struct device *dev;
+ u8 chip_id;
+ int curr_source;
+ int bat_temp;
+ int prev_bat_temp;
+ struct list_head node;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_btemp_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply btemp_psy;
+ struct ab5500_btemp_events events;
+ struct workqueue_struct *btemp_wq;
+ struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab5500_btemp_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab5500_btemp_list);
+
+static int ab5500_btemp_bat_temp_trig(int mux);
+
+struct ab5500_btemp *ab5500_btemp_get(void)
+{
+ struct ab5500_btemp *di;
+ di = list_first_entry(&ab5500_btemp_list, struct ab5500_btemp, node);
+
+ return di;
+}
+
+/**
+ * ab5500_btemp_get_batctrl_temp() - get the temperature
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *di)
+{
+ return di->bat_temp * 1000;
+}
+
+/**
+ * ab5500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
+ * @di: pointer to the ab5500_btemp structure
+ * @v_batctrl: measured batctrl voltage
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL voltage.
+ * Returns value in Ohms.
+ */
+static int ab5500_btemp_batctrl_volt_to_res(struct ab5500_btemp *di,
+ int v_batctrl)
+{
+ int rbs;
+
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+ /*
+ * If the battery has internal NTC, we use the current
+ * source to calculate the resistance, 7uA or 20uA
+ */
+ rbs = v_batctrl * 1000 / di->curr_source;
+ } else {
+ /*
+ * BAT_CTRL is internally
+ * connected to 1.8V through a 10k resistor
+ */
+ rbs = (10000 * (v_batctrl)) / (1800 - v_batctrl);
+ }
+ return rbs;
+}
+
+/**
+ * ab5500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab5500_btemp_read_batctrl_voltage(struct ab5500_btemp *di)
+{
+ int vbtemp;
+ static int prev;
+
+ vbtemp = ab5500_gpadc_convert(di->gpadc, BAT_CTRL);
+ if (vbtemp < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value",
+ __func__);
+ return prev;
+ }
+ prev = vbtemp;
+ return vbtemp;
+}
+
+/**
+ * ab5500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di: pointer to the ab5500_btemp structure
+ * @enable: enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab5500_btemp_curr_source_enable(struct ab5500_btemp *di,
+ bool enable)
+{
+ int ret = 0;
+
+ /* Only do this for batteries with internal NTC */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+
+ dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, RESIS_ID_MODE);
+ if (ret) {
+ dev_err(di->dev,
+ "%s failed setting resistance identification mode\n",
+ __func__);
+ return ret;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, BAT_CTRL_15U_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+ } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+ dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ }
+ }
+ return ret;
+disable_curr_source:
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * ab5500_btemp_get_batctrl_res() - get battery resistance
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab5500_btemp_get_batctrl_res(struct ab5500_btemp *di)
+{
+ int ret;
+ int batctrl;
+ int res;
+
+ ret = ab5500_btemp_curr_source_enable(di, true);
+ /* TODO: This delay has to be optimised */
+ msleep(100);
+ if (ret) {
+ dev_err(di->dev, "%s curr source enable failed\n", __func__);
+ return ret;
+ }
+
+ batctrl = ab5500_btemp_read_batctrl_voltage(di);
+ res = ab5500_btemp_batctrl_volt_to_res(di, batctrl);
+
+ ret = ab5500_btemp_curr_source_enable(di, false);
+ if (ret) {
+ dev_err(di->dev, "%s curr source disable failed\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(di->dev, "%s batctrl: %d res: %d ",
+ __func__, batctrl, res);
+
+ return res;
+}
+
+/**
+ * ab5500_btemp_res_to_temp() - resistance to temperature
+ * @di: pointer to the ab5500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @res: resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab5500_btemp_res_to_temp(struct ab5500_btemp *di,
+ const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+{
+ int i, temp;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (res > tbl[0].resist)
+ i = 0;
+ else if (res <= tbl[tbl_size - 1].resist)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(res <= tbl[i].resist &&
+ res > tbl[i + 1].resist))
+ i++;
+ }
+
+ temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+ return temp;
+}
+
+/**
+ * ab5500_btemp_measure_temp() - measure battery temperature
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab5500_btemp_measure_temp(struct ab5500_btemp *di)
+{
+ int temp, ret;
+ static int prev;
+ int rbat, vntc;
+ int rntc = 0;
+ u8 id;
+
+ id = di->bat->batt_id;
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ id != BATTERY_UNKNOWN) {
+ rbat = ab5500_btemp_get_batctrl_res(di);
+ if (rbat < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n",
+ __func__);
+ /*
+ * Return out-of-range temperature so that
+ * charging is stopped
+ */
+ return BTEMP_THERMAL_LOW_LIMIT;
+ }
+
+ temp = ab5500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ } else {
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, ADOUT_10K_PULL_UP);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable 10k pull up to Vadout\n");
+ }
+ vntc = ab5500_gpadc_convert(di->gpadc, BTEMP_BALL);
+ if (vntc < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed,"
+ " using previous value\n", __func__);
+ return prev;
+ }
+ /*
+ * The PCB NTC is sourced from 2.75v via a 10kOhm
+ * resistor.
+ */
+ rntc = 10000 * vntc / (27500 - vntc);
+
+ temp = ab5500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+ prev = temp;
+ }
+ dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+ return temp;
+}
+
+/**
+ * ab5500_btemp_id() - Identify the connected battery
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab5500_btemp_id(struct ab5500_btemp *di)
+{
+ int res;
+ u8 i;
+
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->bat->batt_id = BATTERY_UNKNOWN;
+
+ res = ab5500_btemp_get_batctrl_res(di);
+ if (res < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+ return -ENXIO;
+ }
+
+ /* BATTERY_UNKNOWN is defined on position 0, skip it! */
+ for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+ if ((res <= di->bat->bat_type[i].resis_high) &&
+ (res >= di->bat->bat_type[i].resis_low)) {
+ dev_dbg(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bat->bat_type[i].resis_low, res,
+ di->bat->bat_type[i].resis_high, i);
+
+ di->bat->batt_id = i;
+ break;
+ }
+ }
+
+ if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ dev_warn(di->dev, "Battery identified as unknown"
+ ", resistance %d Ohm\n", res);
+ return -ENXIO;
+ }
+
+ /*
+ * We only have to change current source if the
+ * detected type is Type 1, else we use the 7uA source
+ */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ di->bat->batt_id == 1) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 15uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_15UA;
+ }
+
+ return di->bat->batt_id;
+}
+
+/**
+ * ab5500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab5500_btemp_periodic_work(struct work_struct *work)
+{
+ struct ab5500_btemp *di = container_of(work,
+ struct ab5500_btemp, btemp_periodic_work.work);
+
+ di->bat_temp = ab5500_btemp_measure_temp(di);
+
+ if (di->bat_temp != di->prev_bat_temp) {
+ di->prev_bat_temp = di->bat_temp;
+ power_supply_changed(&di->btemp_psy);
+ }
+ di->bat->temp_now = di->bat_temp;
+#if defined(BTEMP_MANUAL_MONITORING)
+ /* Check for temperature limits */
+ ab5500_btemp_bat_temp_trig(0);
+
+ /* Schedule a new measurement */
+ if (di->events.usb_conn)
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(di->bat->interval_charging * HZ));
+ else
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(di->bat->interval_not_charging * HZ));
+#else
+ /* Schedule a new measurement */
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(di->bat->interval_charging * HZ));
+#endif
+}
+
+/**
+ * ab5500_btemp_batt_removal_handler() - battery removal detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab5500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_btemp_batt_removal_handler(int irq, void *_di)
+{
+ struct ab5500_btemp *di = _di;
+ dev_err(di->dev, "Battery removal detected!\n");
+
+ di->events.batt_rem = true;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_btemp_batt_attach_handler() - battery insertion detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab5500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_btemp_batt_attach_handler(int irq, void *_di)
+{
+ struct ab5500_btemp *di = _di;
+ dev_err(di->dev, "Battery attached!\n");
+
+ di->events.batt_rem = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_btemp_periodic() - Periodic temperature measurements
+ * @di: pointer to the ab5500_btemp structure
+ * @enable: enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab5500_btemp_periodic(struct ab5500_btemp *di,
+ bool enable)
+{
+ dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+ enable);
+
+ if (enable)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+ else
+ cancel_delayed_work_sync(&di->btemp_periodic_work);
+}
+
+/**
+ * ab5500_btemp_get_property() - get the btemp properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online: presence of the battery
+ * present: presence of the battery
+ * technology: battery technology
+ * temp: battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_btemp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_btemp *di;
+
+ di = to_ab5500_btemp_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (di->events.batt_rem)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ if (di->bat->batt_id == BATTERY_UNKNOWN)
+ /*
+ * In case the battery is not identified, its assumed that
+ * we are using the power supply and since no monitoring is
+ * done for the same, a nominal temp is hardocded.
+ */
+ val->intval = 250;
+ else
+ val->intval = di->bat_temp * 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab5500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab5500_btemp *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab5500_btemp_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval && di->events.usb_conn) {
+ di->events.usb_conn = false;
+#if !defined(BTEMP_MANUAL_MONITORING)
+ ab5500_btemp_periodic(di,
+ false);
+#endif
+ }
+ /* USB connected */
+ else if (ret.intval && !di->events.usb_conn) {
+ di->events.usb_conn = true;
+
+#if !defined(BTEMP_MANUAL_MONITORING)
+ ab5500_btemp_periodic(di, true);
+#endif
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab5500_btemp_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab5500_btemp_external_power_changed(struct power_supply *psy)
+{
+ struct ab5500_btemp *di = to_ab5500_btemp_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->btemp_psy, ab5500_btemp_get_ext_psy_data);
+}
+
+/* ab5500 btemp driver interrupts and their respective isr */
+static struct ab5500_btemp_interrupts ab5500_btemp_irq[] = {
+ {"BATT_REMOVAL", ab5500_btemp_batt_removal_handler},
+ {"BATT_ATTACH", ab5500_btemp_batt_attach_handler},
+};
+
+static int ab5500_btemp_bat_temp_trig(int mux)
+{
+ struct ab5500_btemp *di = ab5500_btemp_get();
+
+ if (di->bat_temp < BTEMP_THERMAL_LOW_LIMIT) {
+ dev_err(di->dev,
+ "battery temp less than lower threshold (-10 deg cel)\n");
+ power_supply_changed(&di->btemp_psy);
+ } else if (di->bat_temp > BTEMP_THERMAL_HIGH_LIMIT_62) {
+ dev_err(di->dev, "battery temp greater them max threshold\n");
+ power_supply_changed(&di->btemp_psy);
+ }
+ return 0;;
+}
+
+#if !defined(BTEMP_MANUAL_MONITORING)
+static int ab5500_btemp_auto_temp(struct ab5500_btemp *di)
+{
+ struct adc_auto_input *auto_ip;
+ int ret = 0;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(di->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = BTEMP_BALL;
+ auto_ip->freq = MS500;
+ auto_ip->min = BTEMP_THERMAL_LOW_LIMIT;
+ auto_ip->max = BTEMP_THERMAL_HIGH_LIMIT_62;
+ auto_ip->auto_adc_callback = ab5500_btemp_bat_temp_trig;
+ di->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto);
+ if (ret)
+ dev_err(di->dev,
+ "failed to set auto trigger for battery temp\n");
+ return ret;
+}
+#endif
+
+#if defined(CONFIG_PM)
+static int ab5500_btemp_resume(struct platform_device *pdev)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+
+ if (di->events.usb_conn)
+ ab5500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static int ab5500_btemp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+
+ if (di->events.usb_conn)
+ ab5500_btemp_periodic(di, false);
+
+ return 0;
+}
+#else
+#define ab5500_btemp_suspend NULL
+#define ab5500_btemp_resume NULL
+#endif
+
+static int __devexit ab5500_btemp_remove(struct platform_device *pdev)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->btemp_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di->gpadc_auto);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_btemp_probe(struct platform_device *pdev)
+{
+ int irq, i, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab5500_btemp *di =
+ kzalloc(sizeof(struct ab5500_btemp), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->btemp;
+ di->bat = plat_data->battery;
+
+ /* get btemp specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no btemp platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* BTEMP supply */
+ di->btemp_psy.name = "ab5500_btemp";
+ di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->btemp_psy.properties = ab5500_btemp_props;
+ di->btemp_psy.num_properties = ARRAY_SIZE(ab5500_btemp_props);
+ di->btemp_psy.get_property = ab5500_btemp_get_property;
+ di->btemp_psy.supplied_to = di->pdata->supplied_to;
+ di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.external_power_changed =
+ ab5500_btemp_external_power_changed;
+
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ create_singlethread_workqueue("ab5500_btemp_wq");
+ if (di->btemp_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for measuring temperature periodically */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ ab5500_btemp_periodic_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_btemp_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "ab5500 CID is: 0x%02x\n",
+ di->chip_id);
+
+ /* Identify the battery */
+ if (ab5500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+
+ /* Measure temperature once initially */
+ di->bat_temp = ab5500_btemp_measure_temp(di);
+ di->bat->temp_now = di->bat_temp;
+
+ /* Register BTEMP power supply class */
+ ret = power_supply_register(di->dev, &di->btemp_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register BTEMP psy\n");
+ goto free_btemp_wq;
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_btemp_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab5500_btemp_irq[i].name, di);
+
+ if (ret) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_btemp_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_btemp_irq[i].name, irq, ret);
+ }
+#if defined(BTEMP_MANUAL_MONITORING)
+ /* Schedule monitoring work only if battery type is known */
+ if (di->bat->batt_id != BATTERY_UNKNOWN)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+#else
+ ret = ab5500_btemp_auto_temp(di);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to register auto trigger for battery temp\n");
+ goto free_irq;
+ }
+#endif
+
+ platform_set_drvdata(pdev, di);
+ list_add_tail(&di->node, &ab5500_btemp_list);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->btemp_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+free_btemp_wq:
+ destroy_workqueue(di->btemp_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_btemp_driver = {
+ .probe = ab5500_btemp_probe,
+ .remove = __devexit_p(ab5500_btemp_remove),
+ .suspend = ab5500_btemp_suspend,
+ .resume = ab5500_btemp_resume,
+ .driver = {
+ .name = "ab5500-btemp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_btemp_init(void)
+{
+ return platform_driver_register(&ab5500_btemp_driver);
+}
+
+static void __exit ab5500_btemp_exit(void)
+{
+ platform_driver_unregister(&ab5500_btemp_driver);
+}
+
+subsys_initcall_sync(ab5500_btemp_init);
+module_exit(ab5500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-btemp");
+MODULE_DESCRIPTION("AB5500 battery temperature driver");
diff --git a/drivers/power/ab5500_charger.c b/drivers/power/ab5500_charger.c
new file mode 100644
index 00000000000..b90c51a4f31
--- /dev/null
+++ b/drivers/power/ab5500_charger.c
@@ -0,0 +1,1820 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Charger driver for AB5500
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+
+/* Charger constants */
+#define NO_PW_CONN 0
+#define USB_PW_CONN 2
+
+/* HW failure constants */
+#define VBUS_CH_NOK 0x0A
+#define VBUS_OVV_TH 0x06
+
+/* AB5500 Charger constants */
+#define AB5500_USB_LINK_STATUS 0x78
+#define CHARGER_REV_SUP 0x10
+#define SW_EOC 0x40
+#define USB_CHAR_DET 0x02
+#define VBUS_RISING 0x20
+#define VBUS_FALLING 0x40
+#define USB_LINK_UPDATE 0x02
+#define USB_CH_TH_PROT_LOW 0x02
+#define USB_CH_TH_PROT_HIGH 0x01
+#define USB_ID_HOST_DET_ENA_MASK 0x02
+#define USB_ID_HOST_DET_ENA 0x02
+#define USB_ID_DEVICE_DET_ENA_MASK 0x01
+#define USB_ID_DEVICE_DET_ENA 0x01
+#define CHARGER_ISET_IN_1_1A 0x0C
+#define LED_ENABLE 0x01
+#define RESET 0x00
+#define SSW_ENABLE_REBOOT 0x80
+#define SSW_REBOOT_EN 0x40
+#define SSW_CONTROL_AUTOC 0x04
+#define SSW_PSEL_480S 0x00
+
+/* UsbLineStatus register - usb types */
+enum ab5500_charger_link_status {
+ USB_STAT_NOT_CONFIGURED,
+ USB_STAT_STD_HOST_NC,
+ USB_STAT_STD_HOST_C_NS,
+ USB_STAT_STD_HOST_C_S,
+ USB_STAT_HOST_CHG_NM,
+ USB_STAT_HOST_CHG_HS,
+ USB_STAT_HOST_CHG_HS_CHIRP,
+ USB_STAT_DEDICATED_CHG,
+ USB_STAT_ACA_RID_A,
+ USB_STAT_ACA_RID_B,
+ USB_STAT_ACA_RID_C_NM,
+ USB_STAT_ACA_RID_C_HS,
+ USB_STAT_ACA_RID_C_HS_CHIRP,
+ USB_STAT_HM_IDGND,
+ USB_STAT_RESERVED,
+ USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab5500_usb_state {
+ AB5500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
+ AB5500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
+ AB5500_BM_USB_STATE_CONFIGURED,
+ AB5500_BM_USB_STATE_SUSPEND,
+ AB5500_BM_USB_STATE_RESUME,
+ AB5500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB5500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05 50
+#define USB_CH_IP_CUR_LVL_0P09 98
+#define USB_CH_IP_CUR_LVL_0P19 193
+#define USB_CH_IP_CUR_LVL_0P29 290
+#define USB_CH_IP_CUR_LVL_0P38 380
+#define USB_CH_IP_CUR_LVL_0P45 450
+#define USB_CH_IP_CUR_LVL_0P5 500
+#define USB_CH_IP_CUR_LVL_0P6 600
+#define USB_CH_IP_CUR_LVL_0P7 700
+#define USB_CH_IP_CUR_LVL_0P8 800
+#define USB_CH_IP_CUR_LVL_0P9 900
+#define USB_CH_IP_CUR_LVL_1P0 1000
+#define USB_CH_IP_CUR_LVL_1P1 1100
+#define USB_CH_IP_CUR_LVL_1P3 1300
+#define USB_CH_IP_CUR_LVL_1P4 1400
+#define USB_CH_IP_CUR_LVL_1P5 1500
+
+#define to_ab5500_charger_usb_device_info(x) container_of((x), \
+ struct ab5500_charger, usb_chg)
+
+/**
+ * struct ab5500_charger_interrupts - ab5500 interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_charger_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab5500_charger_info {
+ int charger_connected;
+ int charger_online;
+ int charger_voltage;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct ab5500_charger_event_flags {
+ bool usb_thermal_prot;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool vbus_collapse;
+};
+
+struct ab5500_charger_usb_state {
+ bool usb_changed;
+ int usb_current;
+ enum ab5500_usb_state state;
+ spinlock_t usb_lock;
+};
+
+/**
+ * struct ab5500_charger - ab5500 Charger device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the ab5500
+ * @max_usb_in_curr: Max USB charger input current
+ * @vbus_detected: VBUS detected
+ * @vbus_detected_start:
+ * VBUS detected during startup
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab5500_charger platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @flags: Structure for information about events triggered
+ * @usb_state: Structure for usb stack information
+ * @usb_chg: USB charger power supply
+ * @ac: Structure that holds the AC charger properties
+ * @usb: Structure that holds the USB charger properties
+ * @charger_wq: Work queue for the IRQs and checking HW state
+ * @check_hw_failure_work: Work for checking HW state
+ * @check_usbchgnotok_work: Work for checking USB charger not ok status
+ * @ac_work: Work for checking AC charger connection
+ * @detect_usb_type_work: Work for detecting the USB type connected
+ * @usb_link_status_work: Work for checking the new USB link status
+ * @usb_state_changed_work: Work for checking USB state
+ * @check_main_thermal_prot_work:
+ * Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ * Work for checking USB thermal status
+ * @ otg: pointer to struct otg_transceiver, used to
+ * notify the current during a standard host
+ * charger.
+ * @nb: structture of type notifier_block, which has
+ * a function pointer referenced by usb driver.
+ */
+struct ab5500_charger {
+ struct device *dev;
+ u8 chip_id;
+ int max_usb_in_curr;
+ bool vbus_detected;
+ bool vbus_detected_start;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct abx500_charger_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct ab5500_charger_event_flags flags;
+ struct ab5500_charger_usb_state usb_state;
+ struct ux500_charger usb_chg;
+ struct ab5500_charger_info usb;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_hw_failure_work;
+ struct delayed_work check_usbchgnotok_work;
+ struct work_struct detect_usb_type_work;
+ struct work_struct usb_link_status_work;
+ struct work_struct usb_state_changed_work;
+ struct work_struct check_usb_thermal_prot_work;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+};
+
+/* USB properties */
+static enum power_supply_property ab5500_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab5500_charger_get_vbus_voltage() - get vbus voltage
+ * @di: pointer to the ab5500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab5500_charger_get_vbus_voltage(struct ab5500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->usb.charger_connected) {
+ vch = ab5500_gpadc_convert(di->gpadc, VBUS_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab5500_charger_get_usb_current() - get usb charger current
+ * @di: pointer to the ab5500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab5500_charger_get_usb_current(struct ab5500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->usb.charger_online) {
+ ich = ab5500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab5500_charger_detect_chargers() - Detect the connected chargers
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN no power supply is connected
+ * USB_PW_CONN if the USB power supply is connected
+ */
+static int ab5500_charger_detect_chargers(struct ab5500_charger *di)
+{
+ int result = NO_PW_CONN;
+ int ret;
+ u8 val;
+ /* Check for USB charger */
+ /*
+ * TODO: Since there are no status register validating by
+ * reading the IT souce registers
+ */
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_IT,
+ AB5500_IT_SOURCE8, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & VBUS_RISING)
+ result |= USB_PW_CONN;
+ else if (val & VBUS_FALLING)
+ result = NO_PW_CONN;
+
+ return result;
+}
+
+/**
+ * ab5500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di: pointer to the ab5500_charger structure
+ * @link_status: the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_charger_max_usb_curr(struct ab5500_charger *di,
+ enum ab5500_charger_link_status link_status)
+{
+ int ret = 0;
+
+ switch (link_status) {
+ case USB_STAT_STD_HOST_NC:
+ case USB_STAT_STD_HOST_C_NS:
+ case USB_STAT_STD_HOST_C_S:
+ dev_dbg(di->dev, "USB Type - Standard host is "
+ "detected through USB driver\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case USB_STAT_HOST_CHG_HS_CHIRP:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ case USB_STAT_HOST_CHG_HS:
+ case USB_STAT_ACA_RID_C_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ break;
+ case USB_STAT_ACA_RID_A:
+ /*
+ * Dedicated charger level minus maximum current accessory
+ * can consume (300mA). Closest level is 1100mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ break;
+ case USB_STAT_ACA_RID_B:
+ /*
+ * Dedicated charger level minus 120mA (20mA for ACA and
+ * 100mA for potential accessory). Closest level is 1300mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ case USB_STAT_HOST_CHG_NM:
+ case USB_STAT_ACA_RID_C_HS_CHIRP:
+ case USB_STAT_ACA_RID_C_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ case USB_STAT_RESERVED:
+ /*
+ * This state is used to indicate that VBUS has dropped below
+ * the detection level 4 times in a row. This is due to the
+ * charger output current is set to high making the charger
+ * voltage collapse. This have to be propagated through to
+ * chargalg. This is done using the property
+ * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+ */
+ di->flags.vbus_collapse = true;
+ dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -1;
+ break;
+ case USB_STAT_HM_IDGND:
+ case USB_STAT_NOT_CONFIGURED:
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ default:
+ dev_err(di->dev, "USB Type - Unknown\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ };
+
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_read_usb_type() - read the type of usb connected
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_charger_read_usb_type(struct ab5500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_USB,
+ AB5500_USB_LINE_STATUS, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return ret;
+ }
+
+ /* get the USB type */
+ val = (val & AB5500_USB_LINK_STATUS) >> 3;
+ ret = ab5500_charger_max_usb_curr(di,
+ (enum ab5500_charger_link_status) val);
+
+ return ret;
+}
+
+static int ab5500_charger_voltage_map[] = {
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3875 ,
+ 3900 ,
+ 3925 ,
+ 3950 ,
+ 3975 ,
+ 4000 ,
+ 4025 ,
+ 4050 ,
+ 4060 ,
+ 4070 ,
+ 4080 ,
+ 4090 ,
+ 4100 ,
+ 4110 ,
+ 4120 ,
+ 4130 ,
+ 4140 ,
+ 4150 ,
+ 4160 ,
+ 4170 ,
+ 4180 ,
+ 4190 ,
+ 4200 ,
+ 4210 ,
+ 4220 ,
+ 4230 ,
+ 4240 ,
+ 4250 ,
+ 4260 ,
+ 4270 ,
+ 4280 ,
+ 4290 ,
+ 4300 ,
+ 4310 ,
+ 4320 ,
+ 4330 ,
+ 4340 ,
+ 4350 ,
+ 4360 ,
+ 4370 ,
+ 4380 ,
+ 4390 ,
+ 4400 ,
+ 4410 ,
+ 4420 ,
+ 4430 ,
+ 4440 ,
+ 4450 ,
+ 4460 ,
+ 4470 ,
+ 4480 ,
+ 4490 ,
+ 4500 ,
+ 4510 ,
+ 4520 ,
+ 4530 ,
+ 4540 ,
+ 4550 ,
+ 4560 ,
+ 4570 ,
+ 4580 ,
+ 4590 ,
+ 4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the ab5500
+ * Values taken from the AB5500 product specification manual
+ */
+static int ab5500_charger_current_map[] = {
+ 100 ,
+ 200 ,
+ 300 ,
+ 400 ,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1500,
+ 1500,
+};
+
+static int ab5500_icsr_current_map[] = {
+ 50,
+ 93,
+ 193,
+ 290,
+ 380,
+ 450,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000,
+ 1100,
+ 1300,
+ 1400,
+ 1500,
+};
+
+static int ab5500_cvrec_voltage_map[] = {
+ 3300,
+ 3325,
+ 3350,
+ 3375,
+ 3400,
+ 3425,
+ 3450,
+ 3475,
+ 3500,
+ 3525,
+ 3550,
+ 3575,
+ 3600,
+ 3625,
+ 3650,
+ 3675,
+ 3700,
+ 3725,
+ 3750,
+ 3775,
+ 3800,
+ 3825,
+ 3850,
+ 3875,
+ 3900,
+ 3925,
+ 4000,
+ 4025,
+ 4050,
+ 4075,
+ 4100,
+ 4125,
+ 4150,
+ 4175,
+ 4200,
+ 4225,
+ 4250,
+ 4275,
+ 4300,
+ 4325,
+ 4350,
+ 4375,
+ 4400,
+ 4425,
+ 4450,
+ 4475,
+ 4500,
+ 4525,
+ 4550,
+ 4575,
+ 4600,
+};
+
+static int ab5500_cvrec_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.3V */
+ if (voltage < ab5500_cvrec_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(ab5500_cvrec_voltage_map); i++) {
+ if (voltage < ab5500_cvrec_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_cvrec_voltage_map) - 1;
+ if (voltage == ab5500_cvrec_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.3V */
+ if (voltage < ab5500_charger_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(ab5500_charger_voltage_map); i++) {
+ if (voltage < ab5500_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_charger_voltage_map) - 1;
+ if (voltage == ab5500_charger_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_icsr_curr_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab5500_icsr_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab5500_icsr_current_map); i++) {
+ if (curr < ab5500_icsr_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_icsr_current_map) - 1;
+ if (curr == ab5500_icsr_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab5500_charger_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_current_map); i++) {
+ if (curr < ab5500_charger_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_charger_current_map) - 1;
+ if (curr == ab5500_charger_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+/**
+ * ab5500_charger_get_usb_cur() - get usb current
+ * @di: pointer to the ab5500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab5500_charger_get_usb_cur(struct ab5500_charger *di)
+{
+ switch (di->usb_state.usb_current) {
+ case 50:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ break;
+ case 100:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case 200:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+ break;
+ case 300:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+ break;
+ case 400:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+ break;
+ case 500:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ default:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ return -1;
+ break;
+ };
+ return 0;
+}
+
+/**
+ * ab5500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di: pointer to the ab5500_charger structure
+ * @ich_in: charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_set_vbus_in_curr(struct ab5500_charger *di,
+ int ich_in)
+{
+ int ret;
+ int input_curr_index;
+ int min_value;
+
+ /* We should always use to lowest current limit */
+ min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+ input_curr_index = ab5500_icsr_curr_to_regval(min_value);
+ if (input_curr_index < 0) {
+ dev_err(di->dev, "VBUS input current limit too high\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG,
+ AB5500_ICSR, input_curr_index);
+ if (ret)
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_usb_en() - enable usb charging
+ * @di: pointer to the ab5500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @ich_out: charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_usb_en(struct ux500_charger *charger,
+ int enable, int vset, int ich_out)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+
+ struct ab5500_charger *di = to_ab5500_charger_usb_device_info(charger);
+
+ if (enable) {
+ /* Check if USB is connected */
+ if (!di->usb.charger_connected) {
+ dev_err(di->dev, "USB charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable USB charging */
+ dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+ volt_index = ab5500_voltage_to_regval(vset);
+ curr_index = ab5500_current_to_regval(ich_out) ;
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, ich_out);
+ if (ret) {
+ dev_err(di->dev, "%s setting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Battery voltage when charging should be resumed after
+ * completion of charging
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CVREC,
+ ab5500_cvrec_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].recharge_vol));
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Battery temperature:
+ * Input to the TBDATA register corresponds to the battery
+ * temperature(temp being multiples of 2)
+ * In order to obatain the value to be written to this reg
+ * divide the temperature obtained from gpadc by 2
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_TBDATA,
+ di->bat->temp_now / 2);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* If success power on charging LED indication */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_LEDT, LED_ENABLE);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Register DCIOCURRENT is one among the charging watchdog
+ * rekick sequence, hence irrespective of usb charging this
+ * register will have to be written.
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_DCIOCURRENT,
+ RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ di->usb.charger_online = 1;
+ } else {
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s resetting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /* If success power off charging LED indication */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_LEDT, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ di->usb.charger_online = 0;
+ di->usb.wd_expired = false;
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+ }
+ power_supply_changed(&di->usb_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_watchdog_kick() - kick charger watchdog
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct ab5500_charger *di;
+ int volt_index, curr_index;
+ u8 value = 0;
+
+ /* TODO: update */
+ if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab5500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_STARTUP,
+ AB5500_MCB, &value);
+ if (ret)
+ dev_err(di->dev, "Failed to read!\n");
+
+ value = value | (SSW_ENABLE_REBOOT | SSW_REBOOT_EN |
+ SSW_CONTROL_AUTOC | SSW_PSEL_480S);
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_STARTUP,
+ AB5500_MCB, value);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ volt_index = ab5500_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl);
+ curr_index = ab5500_current_to_regval(di->max_usb_in_curr);
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ /* current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "%s setting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Battery voltage when charging should be resumed after
+ * completion of charging
+ */
+ /* Charger_Vrechar[5:0] = '4.025 V' */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CVREC,
+ ab5500_cvrec_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].recharge_vol));
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Battery temperature:
+ * Input to the TBDATA register corresponds to the battery
+ * temperature(temp being multiples of 2)
+ * In order to obatain the value to be written to this reg
+ * divide the temperature obtained from gpadc by 2
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_TBDATA,
+ di->bat->temp_now / 2);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Register DCIOCURRENT is one among the charging watchdog
+ * rekick sequence, hence irrespective of usb charging this
+ * register will have to be written.
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_DCIOCURRENT,
+ RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_update_charger_current() - update charger current
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret = 0;
+ int curr_index;
+ struct ab5500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab5500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = ab5500_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(di->dev,
+ "Charger current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG,
+ AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_check_hw_failure_work() - check main charger failure
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab5500_charger_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_hw_failure_work.work);
+
+ /* Check if the status bits for HW failure is still active */
+ if (di->flags.vbus_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_PHY_STATUS,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & VBUS_OVV_TH)) {
+ di->flags.vbus_ovv = false;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+ /* If we still have a failure, schedule a new check */
+ if (di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab5500_charger_detect_usb_type_work() - work to detect USB type
+ * @work: Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+void ab5500_charger_detect_usb_type_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, detect_usb_type_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab5500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ }
+}
+
+/**
+ * ab5500_charger_usb_link_status_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab5500_charger_usb_link_status_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, usb_link_status_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab5500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ ret = ab5500_charger_read_usb_type(di);
+ if (!ret) {
+ /* Update maximum input current */
+ ret = ab5500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ } else if (ret == -ENXIO) {
+ /* No valid charger type detected */
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+}
+
+static void ab5500_charger_usb_state_changed_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, usb_state_changed_work);
+
+ if (!di->vbus_detected)
+ return;
+
+ spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+ di->usb_state.usb_changed = false;
+ spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ msleep(250);
+
+ if (di->usb_state.usb_changed)
+ return;
+
+ dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current);
+
+ switch (di->usb_state.state) {
+ case AB5500_BM_USB_STATE_RESET_HS:
+ case AB5500_BM_USB_STATE_RESET_FS:
+ case AB5500_BM_USB_STATE_SUSPEND:
+ case AB5500_BM_USB_STATE_MAX:
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ break;
+
+ case AB5500_BM_USB_STATE_RESUME:
+ /*
+ * when suspend->resume there should be delay
+ * of 1sec for enabling charging
+ */
+ msleep(1000);
+ /* Intentional fall through */
+ case AB5500_BM_USB_STATE_CONFIGURED:
+ /*
+ * USB is configured, enable charging with the charging
+ * input current obtained from USB driver
+ */
+ if (!ab5500_charger_get_usb_cur(di)) {
+ /* Update maximum input current */
+ ret = ab5500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ break;
+
+ default:
+ break;
+ };
+}
+
+/**
+ * ab5500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab5500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+ bool prev_status;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_usbchgnotok_work.work);
+
+ /* Check if the status bit for usbchargernotok is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_CHGFSM_CHARGER_DETECT, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ prev_status = di->flags.usbchargernotok;
+
+ if (reg_value & VBUS_CH_NOK) {
+ di->flags.usbchargernotok = true;
+ /* Check again in 1sec */
+ queue_delayed_work(di->charger_wq,
+ &di->check_usbchgnotok_work, HZ);
+ } else {
+ di->flags.usbchargernotok = false;
+ di->flags.vbus_collapse = false;
+ }
+
+ if (prev_status != di->flags.usbchargernotok)
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab5500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab5500_charger_check_usb_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_usb_thermal_prot_work);
+
+ /* Check if the status bit for usb_thermal_prot is still active */
+ /* TODO: Interrupt source reg 15 bit 4 */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_CHGFSM_USB_BTEMP_CURR_LIM, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & USB_CH_TH_PROT_LOW || reg_value & USB_CH_TH_PROT_HIGH)
+ di->flags.usb_thermal_prot = true;
+ else
+ di->flags.usb_thermal_prot = false;
+
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab5500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusdetf_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS falling detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusdetr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ di->vbus_detected = true;
+ dev_dbg(di->dev, "VBUS rising detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "USB link status changed\n");
+
+ if (!di->usb.charger_online)
+ queue_work(di->charger_wq, &di->usb_link_status_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "Not allowed USB charger detected\n");
+ queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_chwdexp_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "Charger watchdog expired\n");
+
+ /*
+ * The charger that was online when the watchdog expired
+ * needs to be restarted for charging to start again
+ */
+ if (di->usb.charger_online) {
+ di->usb.wd_expired = true;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusovv_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS overvoltage detected\n");
+ di->flags.vbus_ovv = true;
+ power_supply_changed(&di->usb_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usb_get_property() - get the usb properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online: usb charging is in progress or not
+ * present: presence of the usb
+ * voltage: vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_charger *di;
+
+ di = to_ab5500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.usbchargernotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.usb_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (di->flags.vbus_ovv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->usb.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->usb.charger_voltage = ab5500_charger_get_vbus_voltage(di);
+ val->intval = di->usb.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab5500_charger_get_usb_current(di) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ /*
+ * This property is used to indicate when VBUS has collapsed
+ * due to too high output current from the USB charger
+ */
+ if (di->flags.vbus_collapse)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab5500_charger_hw_registers() - Set up charger related registers
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab5500_charger_init_hw_registers(struct ab5500_charger *di)
+{
+ int ret = 0;
+
+ /* Enable ID Host and Device detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_OTG_CTRL,
+ USB_ID_HOST_DET_ENA_MASK, USB_ID_HOST_DET_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable usb charger detection\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_OTG_CTRL,
+ USB_ID_DEVICE_DET_ENA_MASK, USB_ID_DEVICE_DET_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable usb charger detection\n");
+ goto out;
+ }
+
+ /* Over current protection for reverse supply */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CREVS, CHARGER_REV_SUP,
+ CHARGER_REV_SUP);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable over current protection for reverse supply\n");
+ goto out;
+ }
+
+ /* Enable SW EOC at flatcurrent detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CCTRL, SW_EOC, SW_EOC);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable end of charge at flatcurrent detection\n");
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/*
+ * ab5500 charger driver interrupts and their respective isr
+ */
+static struct ab5500_charger_interrupts ab5500_charger_irq[] = {
+ {"VBUS_FALLING", ab5500_charger_vbusdetf_handler},
+ {"VBUS_RISING", ab5500_charger_vbusdetr_handler},
+ {"USB_LINK_UPDATE", ab5500_charger_usblinkstatus_handler},
+ {"USB_CH_TH_PROTECTION", ab5500_charger_usbchthprotr_handler},
+ {"USB_CH_NOT_OK", ab5500_charger_usbchargernotokr_handler},
+ {"OVV", ab5500_charger_vbusovv_handler},
+ /* TODO: Interrupt missing, will be available in cut 2 */
+ /*{"CHG_SW_TIMER_OUT", ab5500_charger_chwdexp_handler},*/
+};
+
+static int ab5500_charger_usb_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *power)
+{
+ struct ab5500_charger *di =
+ container_of(nb, struct ab5500_charger, nb);
+ enum ab5500_usb_state bm_usb_state;
+ unsigned mA = *((unsigned *)power);
+
+ if (event != USB_EVENT_VBUS) {
+ dev_dbg(di->dev, "not a standard host, returning\n");
+ return NOTIFY_DONE;
+ }
+
+ /* TODO: State is fabricate here. See if charger really needs USB
+ * state or if mA is enough
+ */
+ if ((di->usb_state.usb_current == 2) && (mA > 2))
+ bm_usb_state = AB5500_BM_USB_STATE_RESUME;
+ else if (mA == 0)
+ bm_usb_state = AB5500_BM_USB_STATE_RESET_HS;
+ else if (mA == 2)
+ bm_usb_state = AB5500_BM_USB_STATE_SUSPEND;
+ else if (mA >= 8) /* 8, 100, 500 */
+ bm_usb_state = AB5500_BM_USB_STATE_CONFIGURED;
+ else /* Should never occur */
+ bm_usb_state = AB5500_BM_USB_STATE_RESET_FS;
+
+ dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+ __func__, bm_usb_state, mA);
+
+ spin_lock(&di->usb_state.usb_lock);
+ di->usb_state.usb_changed = true;
+ di->usb_state.state = bm_usb_state;
+ di->usb_state.usb_current = mA;
+ spin_unlock(&di->usb_state.usb_lock);
+
+ queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_charger_resume(struct platform_device *pdev)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+
+ /* If we still have a HW failure, schedule a new check */
+ if (di->flags.usbchargernotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, 0);
+ }
+
+ return 0;
+}
+
+static int ab5500_charger_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+
+ /* Cancel any pending HW failure check */
+ if (delayed_work_pending(&di->check_hw_failure_work))
+ cancel_delayed_work(&di->check_hw_failure_work);
+
+ return 0;
+}
+#else
+#define ab5500_charger_suspend NULL
+#define ab5500_charger_resume NULL
+#endif
+
+static int __devexit ab5500_charger_remove(struct platform_device *pdev)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable USB charging */
+ ab5500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ otg_unregister_notifier(di->otg, &di->nb);
+ otg_put_transceiver(di->otg);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->charger_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->usb_chg.psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_charger_probe(struct platform_device *pdev)
+{
+ int irq, i, charger_status, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab5500_charger *di =
+ kzalloc(sizeof(struct ab5500_charger), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ /* initialize lock */
+ spin_lock_init(&di->usb_state.usb_lock);
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->charger;
+ di->bat = plat_data->battery;
+
+ /* get charger specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ /* USB supply */
+ /* power_supply base class */
+ di->usb_chg.psy.name = "ab5500_usb";
+ di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+ di->usb_chg.psy.properties = ab5500_charger_usb_props;
+ di->usb_chg.psy.num_properties = ARRAY_SIZE(ab5500_charger_usb_props);
+ di->usb_chg.psy.get_property = ab5500_charger_usb_get_property;
+ di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->usb_chg.ops.enable = &ab5500_charger_usb_en;
+ di->usb_chg.ops.kick_wd = &ab5500_charger_watchdog_kick;
+ di->usb_chg.ops.update_curr = &ab5500_charger_update_charger_current;
+ di->usb_chg.max_out_volt = ab5500_charger_voltage_map[
+ ARRAY_SIZE(ab5500_charger_voltage_map) - 1];
+ di->usb_chg.max_out_curr = ab5500_charger_current_map[
+ ARRAY_SIZE(ab5500_charger_current_map) - 1];
+
+
+ /* Create a work queue for the charger */
+ di->charger_wq =
+ create_singlethread_workqueue("ab5500_charger_wq");
+ if (di->charger_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ ab5500_charger_check_hw_failure_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ ab5500_charger_check_usbchargernotok_work);
+
+ /* Init work for charger detection */
+ INIT_WORK(&di->usb_link_status_work,
+ ab5500_charger_usb_link_status_work);
+ INIT_WORK(&di->detect_usb_type_work,
+ ab5500_charger_detect_usb_type_work);
+
+ INIT_WORK(&di->usb_state_changed_work,
+ ab5500_charger_usb_state_changed_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&di->check_usb_thermal_prot_work,
+ ab5500_charger_check_usb_thermal_prot_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_charger_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB5500 CID is: 0x%02x\n", di->chip_id);
+
+ /* Initialize OVV, and other registers */
+ ret = ab5500_charger_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize ABB registers\n");
+ goto free_device_info;
+ }
+
+ /* Register USB charger class */
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_device_info;
+ }
+
+ di->otg = otg_get_transceiver();
+ if (!di->otg) {
+ dev_err(di->dev, "failed to get otg transceiver\n");
+ goto free_usb;
+ }
+ di->nb.notifier_call = ab5500_charger_usb_notifier_call;
+ ret = otg_register_notifier(di->otg, &di->nb);
+ if (ret) {
+ dev_err(di->dev, "failed to register otg notifier\n");
+ goto put_otg_transceiver;
+ }
+
+ /* Identify the connected charger types during startup */
+ charger_status = ab5500_charger_detect_chargers(di);
+ if (charger_status & USB_PW_CONN) {
+ dev_dbg(di->dev, "VBUS Detect during startup\n");
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->usb_link_status_work);
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab5500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_charger_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_charger_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_irq:
+ otg_unregister_notifier(di->otg, &di->nb);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+put_otg_transceiver:
+ otg_put_transceiver(di->otg);
+free_usb:
+ power_supply_unregister(&di->usb_chg.psy);
+free_charger_wq:
+ destroy_workqueue(di->charger_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_charger_driver = {
+ .probe = ab5500_charger_probe,
+ .remove = __devexit_p(ab5500_charger_remove),
+ .suspend = ab5500_charger_suspend,
+ .resume = ab5500_charger_resume,
+ .driver = {
+ .name = "ab5500-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_charger_init(void)
+{
+ return platform_driver_register(&ab5500_charger_driver);
+}
+
+static void __exit ab5500_charger_exit(void)
+{
+ platform_driver_unregister(&ab5500_charger_driver);
+}
+
+subsys_initcall_sync(ab5500_charger_init);
+module_exit(ab5500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-charger");
+MODULE_DESCRIPTION("AB5500 charger management driver");
diff --git a/drivers/power/ab5500_fg.c b/drivers/power/ab5500_fg.c
new file mode 100644
index 00000000000..c74d351bd8b
--- /dev/null
+++ b/drivers/power/ab5500_fg.c
@@ -0,0 +1,1954 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+static LIST_HEAD(ab5500_fg_list);
+
+/* U5500 Constants */
+#define FG_ON_MASK 0x04
+#define FG_ON 0x04
+#define FG_ACC_RESET_ON_READ_MASK 0x08
+#define FG_ACC_RESET_ON_READ 0x08
+#define EN_READOUT_MASK 0x01
+#define EN_READOUT 0x01
+#define EN_ACC_RESET_ON_READ 0x08
+#define ACC_RESET_ON_READ 0x08
+#define RESET 0x00
+#define EOC_52_mA 0x04
+#define MILLI_TO_MICRO 1000
+#define FG_LSB_IN_MA 770
+#define QLSB_NANO_AMP_HOURS_X100 5353
+#define SEC_TO_SAMPLE(S) (S * 4)
+#define NBR_AVG_SAMPLES 20
+#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+#define FG_PERIODIC_START_INTERVAL (250 * HZ)/1000 /* 250 msec */
+
+#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
+
+#define interpolate(x, x1, y1, x2, y2) \
+ ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab5500_fg_device_info(x) container_of((x), \
+ struct ab5500_fg, fg_psy);
+
+/**
+ * struct ab5500_fg_interrupts - ab5500 fg interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_fg_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab5500_fg_discharge_state {
+ AB5500_FG_DISCHARGE_INIT,
+ AB5500_FG_DISCHARGE_INITMEASURING,
+ AB5500_FG_DISCHARGE_INIT_RECOVERY,
+ AB5500_FG_DISCHARGE_RECOVERY,
+ AB5500_FG_DISCHARGE_READOUT,
+ AB5500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+ "DISCHARGE_INIT",
+ "DISCHARGE_INITMEASURING",
+ "DISCHARGE_INIT_RECOVERY",
+ "DISCHARGE_RECOVERY",
+ "DISCHARGE_READOUT",
+ "DISCHARGE_WAKEUP",
+};
+
+enum ab5500_fg_charge_state {
+ AB5500_FG_CHARGE_INIT,
+ AB5500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+ "CHARGE_INIT",
+ "CHARGE_READOUT",
+};
+
+enum ab5500_fg_calibration_state {
+ AB5500_FG_CALIB_INIT,
+ AB5500_FG_CALIB_WAIT,
+ AB5500_FG_CALIB_END,
+};
+
+struct ab5500_fg_avg_cap {
+ int avg;
+ int samples[NBR_AVG_SAMPLES];
+ __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ int pos;
+ int nbr_samples;
+ int sum;
+};
+
+struct ab5500_fg_battery_capacity {
+ int max_mah_design;
+ int max_mah;
+ int mah;
+ int permille;
+ int level;
+ int prev_mah;
+ int prev_percent;
+ int prev_level;
+};
+
+struct ab5500_fg_flags {
+ bool fg_enabled;
+ bool conv_done;
+ bool charging;
+ bool fully_charged;
+ bool low_bat_delay;
+ bool low_bat;
+ bool bat_ovv;
+ bool batt_unknown;
+ bool calibrate;
+};
+
+/**
+ * struct ab5500_fg - ab5500 FG device information
+ * @dev: Pointer to the structure device
+ * @vbat: Battery voltage in mV
+ * @vbat_nom: Nominal battery voltage in mV
+ * @inst_curr: Instantenous battery current in mA
+ * @avg_curr: Average battery current in mA
+ * @fg_samples: Number of samples used in the FG accumulation
+ * @accu_charge: Accumulated charge from the last conversion
+ * @recovery_cnt: Counter for recovery mode
+ * @high_curr_cnt: Counter for high current mode
+ * @init_cnt: Counter for init mode
+ * @v_to_cap: capacity based on battery voltage
+ * @recovery_needed: Indicate if recovery is needed
+ * @high_curr_mode: Indicate if we're in high current mode
+ * @init_capacity: Indicate if initial capacity measuring should be done
+ * @calib_state State during offset calibration
+ * @discharge_state: Current discharge state
+ * @charge_state: Current charge state
+ * @flags: Structure for information about events triggered
+ * @bat_cap: Structure for battery capacity specific parameters
+ * @avg_cap: Average capacity filter
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @gpadc_auto: Pointer tot he struct adc_auto_input
+ * @pdata: Pointer to the ab5500_fg platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @fg_psy: Structure that holds the FG specific battery properties
+ * @fg_wq: Work queue for running the FG algorithm
+ * @fg_periodic_work: Work to run the FG algorithm periodically
+ * @fg_low_bat_work: Work to check low bat condition
+ * @fg_reinit_work: Work to reset and re-initialize fuel gauge
+ * @fg_work: Work to run the FG algorithm instantly
+ * @fg_acc_cur_work: Work to read the FG accumulator
+ * @cc_lock: Mutex for locking the CC
+ * @node: struct of type list_head
+ */
+struct ab5500_fg {
+ struct device *dev;
+ int vbat;
+ int vbat_nom;
+ int inst_curr;
+ int avg_curr;
+ int fg_samples;
+ int accu_charge;
+ int recovery_cnt;
+ int high_curr_cnt;
+ int init_cnt;
+ int v_to_cap;
+ bool recovery_needed;
+ bool high_curr_mode;
+ bool init_capacity;
+ enum ab5500_fg_calibration_state calib_state;
+ enum ab5500_fg_discharge_state discharge_state;
+ enum ab5500_fg_charge_state charge_state;
+ struct ab5500_fg_flags flags;
+ struct ab5500_fg_battery_capacity bat_cap;
+ struct ab5500_fg_avg_cap avg_cap;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_fg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply fg_psy;
+ struct workqueue_struct *fg_wq;
+ struct delayed_work fg_periodic_work;
+ struct delayed_work fg_low_bat_work;
+ struct delayed_work fg_reinit_work;
+ struct work_struct fg_work;
+ struct delayed_work fg_acc_cur_work;
+ struct mutex cc_lock;
+ struct list_head node;
+ struct timer_list avg_current_timer;
+};
+
+/* Main battery properties */
+static enum power_supply_property ab5500_fg_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+/* Function Prototype */
+static int ab5500_fg_bat_v_trig(int mux);
+
+static int prev_samples, prev_val;
+
+struct ab5500_fg *ab5500_fg_get(void)
+{
+ struct ab5500_fg *di;
+ di = list_first_entry(&ab5500_fg_list, struct ab5500_fg, node);
+
+ return di;
+}
+
+/**
+ * ab5500_fg_is_low_curr() - Low or high current mode
+ * @di: pointer to the ab5500_fg structure
+ * @curr: the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab5500_fg_is_low_curr(struct ab5500_fg *di, int curr)
+{
+ /*
+ * We want to know if we're in low current mode
+ */
+ if (curr > -di->bat->fg_params->high_curr_threshold)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ab5500_fg_add_cap_sample() - Add capacity to average filter
+ * @di: pointer to the ab5500_fg structure
+ * @sample: the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab5500_fg_add_cap_sample(struct ab5500_fg *di, int sample)
+{
+ struct timespec ts;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ do {
+ avg->sum += sample - avg->samples[avg->pos];
+ avg->samples[avg->pos] = sample;
+ avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->pos++;
+
+ if (avg->pos == NBR_AVG_SAMPLES)
+ avg->pos = 0;
+
+ if (avg->nbr_samples < NBR_AVG_SAMPLES)
+ avg->nbr_samples++;
+
+ /*
+ * Check the time stamp for each sample. If too old,
+ * replace with latest sample
+ */
+ } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+ avg->avg = avg->sum / avg->nbr_samples;
+
+ return avg->avg;
+}
+
+/**
+ * ab5500_fg_clear_cap_samples() - Clear average filter
+ * @di: pointer to the ab5500_fg structure
+ *
+ * The capacity filter is is reset to zero.
+ */
+static void ab5500_fg_clear_cap_samples(struct ab5500_fg *di)
+{
+ int i;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ avg->pos = 0;
+ avg->nbr_samples = 0;
+ avg->sum = 0;
+ avg->avg = 0;
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = 0;
+ avg->time_stamps[i] = 0;
+ }
+}
+
+
+/**
+ * ab5500_fg_fill_cap_sample() - Fill average filter
+ * @di: pointer to the ab5500_fg structure
+ * @sample: the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab5500_fg_fill_cap_sample(struct ab5500_fg *di, int sample)
+{
+ int i;
+ struct timespec ts;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = sample;
+ avg->time_stamps[i] = ts.tv_sec;
+ }
+
+ avg->pos = 0;
+ avg->nbr_samples = NBR_AVG_SAMPLES;
+ avg->sum = sample * NBR_AVG_SAMPLES;
+ avg->avg = sample;
+}
+
+/**
+ * ab5500_fg_coulomb_counter() - enable coulomb counter
+ * @di: pointer to the ab5500_fg structure
+ * @enable: enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab5500_fg_coulomb_counter(struct ab5500_fg *di, bool enable)
+{
+ int ret = 0;
+ mutex_lock(&di->cc_lock);
+ if (enable) {
+ /* Power-up the CC */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ (FG_ON | FG_ACC_RESET_ON_READ));
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = true;
+ } else {
+ /* Stop the CC */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ FG_ON_MASK, RESET);
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = false;
+
+ }
+ dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+ enable, di->fg_samples);
+
+ mutex_unlock(&di->cc_lock);
+
+ return ret;
+cc_err:
+ dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab5500_fg_inst_curr() - battery instantaneous current
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery instantenous current(on success) else error code
+ */
+static int ab5500_fg_inst_curr(struct ab5500_fg *di)
+{
+ u8 low, high;
+ static int val;
+ int ret = 0;
+ bool fg_off = false;
+
+ if (!di->flags.fg_enabled) {
+ fg_off = true;
+ /* Power-up the CC */
+ ab5500_fg_coulomb_counter(di, true);
+ msleep(250);
+ }
+
+ mutex_lock(&di->cc_lock);
+
+ /* Enable read request */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_B,
+ EN_READOUT_MASK, EN_READOUT);
+ if (ret)
+ goto inst_curr_err;
+
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FGDIR_READ0, &low);
+ if (ret < 0)
+ goto inst_curr_err;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FGDIR_READ1, &high);
+ if (ret < 0)
+ goto inst_curr_err;
+
+ /*
+ * negative value for Discharging
+ * convert 2's compliment into decimal
+ */
+ if (high & 0x10)
+ val = (low | (high << 8) | 0xFFFFE000);
+ else
+ val = (low | (high << 8));
+
+ /*
+ * Convert to unit value in mA
+ * R(FGSENSE) = 20 mOhm
+ * Scaling of LSB: This corresponds fro R(FGSENSE) to a current of
+ * I = Q/t = 192.7 uC * 4 Hz = 0.77mA
+ */
+ val = (val * 770) / 1000;
+
+ mutex_unlock(&di->cc_lock);
+
+ if (fg_off) {
+ dev_dbg(di->dev, "%s Disable FG\n", __func__);
+ /* Power-off the CC */
+ ab5500_fg_coulomb_counter(di, false);
+ }
+
+ return val;
+
+inst_curr_err:
+ dev_err(di->dev, "%s Get instanst current failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+static void ab5500_fg_acc_cur_timer_expired(unsigned long data)
+{
+ struct ab5500_fg *di = (struct ab5500_fg *) data;
+ dev_dbg(di->dev, "Avg current timer expired\n");
+
+ /* Trigger execution of the algorithm instantly */
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, 0);
+}
+
+/**
+ * ab5500_fg_acc_cur_work() - average battery current
+ * @work: pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab5500_fg_acc_cur_work(struct work_struct *work)
+{
+ int val, raw_val, sample;
+ int ret;
+ u8 low, med, high, cnt_low, cnt_high;
+
+ struct ab5500_fg *di = container_of(work,
+ struct ab5500_fg, fg_acc_cur_work.work);
+
+ if (!di->flags.fg_enabled) {
+ /* Power-up the CC */
+ ab5500_fg_coulomb_counter(di, true);
+ msleep(250);
+ }
+ mutex_lock(&di->cc_lock);
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_C,
+ EN_READOUT_MASK, EN_READOUT);
+ if (ret < 0)
+ goto exit;
+ /* If charging read charging registers for accumulated values */
+ if (di->flags.charging) {
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, EN_ACC_RESET_ON_READ);
+ if (ret < 0)
+ goto exit;
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH0, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH1, &med);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH2, &high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT0, &cnt_low);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT1, &cnt_high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, RESET);
+ if (ret < 0)
+ goto exit;
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ di->bat->interval_charging * HZ);
+ } else { /* discharging */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, EN_ACC_RESET_ON_READ);
+ if (ret < 0)
+ goto exit;
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH0, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH1, &med);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH2, &high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT0, &cnt_low);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT1, &cnt_high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, RESET);
+ if (ret < 0)
+ goto exit;
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ di->bat->interval_not_charging * HZ);
+ }
+ di->fg_samples = (cnt_low | (cnt_high << 8));
+ /*
+ * TODO: Workaround due to the hardware issue that accumulator is not
+ * reset after setting reset_on_read bit and reading the accumulator
+ * Registers.
+ */
+ if (prev_samples > di->fg_samples) {
+ /* overflow has occured */
+ sample = (0xFFFF - prev_samples) + di->fg_samples;
+ } else
+ sample = di->fg_samples - prev_samples;
+ prev_samples = di->fg_samples;
+ di->fg_samples = sample;
+ val = (low | (med << 8) | (high << 16));
+ /*
+ * TODO: Workaround due to the hardware issue that accumulator is not
+ * reset after setting reset_on_read bit and reading the accumulator
+ * Registers.
+ */
+ if (prev_val > val)
+ raw_val = (0xFFFFFF - prev_val) + val;
+ else
+ raw_val = val - prev_val;
+ prev_val = val;
+ val = raw_val;
+
+ if (di->fg_samples) {
+ di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X100)/100000;
+ di->avg_curr = (val * FG_LSB_IN_MA) / (di->fg_samples * 1000);
+ } else
+ dev_err(di->dev,
+ "samples is zero, using previous calculated average current\n");
+ di->flags.conv_done = true;
+ di->calib_state = AB5500_FG_CALIB_END;
+
+ mutex_unlock(&di->cc_lock);
+
+ queue_work(di->fg_wq, &di->fg_work);
+
+ return;
+exit:
+ dev_err(di->dev,
+ "Failed to read or write gas gauge registers\n");
+ mutex_unlock(&di->cc_lock);
+ queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab5500_fg_bat_voltage() - get battery voltage
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab5500_fg_bat_voltage(struct ab5500_fg *di)
+{
+ int vbat;
+ static int prev;
+
+ vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+ if (vbat < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value\n",
+ __func__);
+ return prev;
+ }
+
+ prev = vbat;
+ return vbat;
+}
+
+/**
+ * ab5500_fg_volt_to_capacity() - Voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ * @voltage: The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab5500_fg_volt_to_capacity(struct ab5500_fg *di, int voltage)
+{
+ int i, tbl_size;
+ struct abx500_v_to_cap *tbl;
+ int cap = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (di->vbat < tbl[i].voltage && di->vbat > tbl[i+1].voltage)
+ di->v_to_cap = tbl[i].capacity;
+ }
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (voltage > tbl[i].voltage)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ cap = interpolate(voltage,
+ tbl[i].voltage,
+ tbl[i].capacity * 10,
+ tbl[i-1].voltage,
+ tbl[i-1].capacity * 10);
+ } else if (i == 0) {
+ cap = 1000;
+ } else {
+ cap = 0;
+ }
+
+ dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+ __func__, voltage, cap);
+
+ return cap;
+}
+
+/**
+ * ab5500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab5500_fg_uncomp_volt_to_capacity(struct ab5500_fg *di)
+{
+ di->vbat = ab5500_fg_bat_voltage(di);
+ return ab5500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab5500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab5500_fg_load_comp_volt_to_capacity(struct ab5500_fg *di)
+{
+ int vbat_comp;
+
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ di->vbat = ab5500_fg_bat_voltage(di);
+
+ /* Use Ohms law to get the load compensated voltage */
+ vbat_comp = di->vbat - (di->inst_curr *
+ di->bat->bat_type[di->bat->batt_id].battery_resistance) / 1000;
+
+ dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+ "R: %dmOhm, Current: %dmA\n",
+ __func__,
+ di->vbat,
+ vbat_comp,
+ di->bat->bat_type[di->bat->batt_id].battery_resistance,
+ di->inst_curr);
+
+ return ab5500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab5500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di: pointer to the ab5500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab5500_fg_convert_mah_to_permille(struct ab5500_fg *di, int cap_mah)
+{
+ return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab5500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di: pointer to the ab5500_fg structure
+ * @cap_pm: capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab5500_fg_convert_permille_to_mah(struct ab5500_fg *di, int cap_pm)
+{
+ return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab5500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di: pointer to the ab5500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab5500_fg_convert_mah_to_uwh(struct ab5500_fg *di, int cap_mah)
+{
+ u64 div_res;
+ u32 div_rem;
+
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+ div_rem = do_div(div_res, 1000);
+
+ /* Make sure to round upwards if necessary */
+ if (div_rem >= 1000 / 2)
+ div_res++;
+
+ return (int) div_res;
+}
+
+/**
+ * ab5500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab5500_fg_calc_cap_charging(struct ab5500_fg *di)
+{
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ /*
+ * We force capacity to 100% as long as the algorithm
+ * reports that it's full.
+ */
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+ di->flags.fully_charged)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ /* We need to update battery voltage and inst current when charging */
+ di->vbat = ab5500_fg_bat_voltage(di);
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di: pointer to the ab5500_fg structure
+ * @comp: if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab5500_fg_calc_cap_discharge_voltage(struct ab5500_fg *di, bool comp)
+{
+ int permille, mah;
+
+ if (comp)
+ permille = ab5500_fg_load_comp_volt_to_capacity(di);
+ else
+ permille = ab5500_fg_uncomp_volt_to_capacity(di);
+
+ mah = ab5500_fg_convert_permille_to_mah(di, permille);
+
+ di->bat_cap.mah = ab5500_fg_add_cap_sample(di, mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab5500_fg_calc_cap_discharge_fg(struct ab5500_fg *di)
+{
+ int permille_volt, permille;
+
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ /*
+ * Check against voltage based capacity. It can not be lower
+ * than what the uncompensated voltage says
+ */
+ permille = ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ permille_volt = ab5500_fg_uncomp_volt_to_capacity(di);
+
+ if (permille < permille_volt) {
+ di->bat_cap.permille = permille_volt;
+ di->bat_cap.mah = ab5500_fg_convert_permille_to_mah(di,
+ di->bat_cap.permille);
+
+ dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+ __func__,
+ permille,
+ permille_volt);
+
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ } else {
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ }
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_capacity_level() - Get the battery capacity level
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab5500_fg_capacity_level(struct ab5500_fg *di)
+{
+ int ret, percent;
+
+ percent = di->bat_cap.permille / 10;
+
+ if (percent <= di->bat->cap_levels->critical ||
+ di->flags.low_bat)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (percent <= di->bat->cap_levels->low)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (percent <= di->bat->cap_levels->normal)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (percent <= di->bat->cap_levels->high)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ return ret;
+}
+
+/**
+ * ab5500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di: pointer to the ab5500_fg structure
+ * @init: capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab5500_fg_check_capacity_limits(struct ab5500_fg *di, bool init)
+{
+ bool changed = false;
+
+ di->bat_cap.level = ab5500_fg_capacity_level(di);
+
+ if (di->bat_cap.level != di->bat_cap.prev_level) {
+ /*
+ * We do not allow reported capacity level to go up
+ * unless we're charging or if we're in init
+ */
+ if (!(!di->flags.charging && di->bat_cap.level >
+ di->bat_cap.prev_level) || init) {
+ dev_dbg(di->dev, "level changed from %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ di->bat_cap.prev_level = di->bat_cap.level;
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "level not allowed to go up "
+ "since no charger is connected: %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ }
+ }
+
+ /*
+ * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+ * shutdown
+ */
+ if (di->flags.low_bat) {
+ dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+ di->bat_cap.prev_percent = 0;
+ di->bat_cap.permille = 0;
+ di->bat_cap.prev_mah = 0;
+ di->bat_cap.mah = 0;
+ changed = true;
+ } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+ if (di->bat_cap.permille / 10 == 0) {
+ /*
+ * We will not report 0% unless we've got
+ * the LOW_BAT IRQ, no matter what the FG
+ * algorithm says.
+ */
+ di->bat_cap.prev_percent = 1;
+ di->bat_cap.permille = 1;
+ di->bat_cap.prev_mah = 1;
+ di->bat_cap.mah = 1;
+
+ changed = true;
+ } else if (!(!di->flags.charging &&
+ (di->bat_cap.permille / 10) >
+ di->bat_cap.prev_percent) || init) {
+ /*
+ * We do not allow reported capacity to go up
+ * unless we're charging or if we're in init
+ */
+ dev_dbg(di->dev,
+ "capacity changed from %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "capacity not allowed to go up since "
+ "no charger is connected: %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ }
+ }
+
+ if (changed)
+ power_supply_changed(&di->fg_psy);
+
+}
+
+static void ab5500_fg_charge_state_to(struct ab5500_fg *di,
+ enum ab5500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+ di->charge_state,
+ charge_state[di->charge_state],
+ new_state,
+ charge_state[new_state]);
+
+ di->charge_state = new_state;
+}
+
+static void ab5500_fg_discharge_state_to(struct ab5500_fg *di,
+ enum ab5500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ di->discharge_state,
+ discharge_state[di->discharge_state],
+ new_state,
+ discharge_state[new_state]);
+
+ di->discharge_state = new_state;
+}
+
+/**
+ * ab5500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab5500_fg_algorithm_charging(struct ab5500_fg *di)
+{
+ /*
+ * If we change to discharge mode
+ * we should start with recovery
+ */
+ if (di->discharge_state != AB5500_FG_DISCHARGE_INIT_RECOVERY)
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_INIT_RECOVERY);
+
+ switch (di->charge_state) {
+ case AB5500_FG_CHARGE_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_charging);
+
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_READOUT);
+
+ break;
+
+ case AB5500_FG_CHARGE_READOUT:
+ /*
+ * Read the FG and calculate the new capacity
+ */
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ ab5500_fg_calc_cap_charging(di);
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Check capacity limits */
+ ab5500_fg_check_capacity_limits(di, false);
+}
+
+/**
+ * ab5500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab5500_fg_algorithm_discharging(struct ab5500_fg *di)
+{
+ int sleep_time;
+
+ /* If we change to charge mode we should start with init */
+ if (di->charge_state != AB5500_FG_CHARGE_INIT)
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+
+ switch (di->discharge_state) {
+ case AB5500_FG_DISCHARGE_INIT:
+ /* We use the FG IRQ to work on */
+ di->init_cnt = 0;
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_INITMEASURING);
+
+ /* Intentional fallthrough */
+ case AB5500_FG_DISCHARGE_INITMEASURING:
+ /*
+ * Discard a number of samples during startup.
+ * After that, use compensated voltage for a few
+ * samples to get an initial capacity.
+ * Then go to READOUT
+ */
+ sleep_time = di->bat->fg_params->init_timer;
+
+ /* Discard the first [x] seconds */
+ if (di->init_cnt >
+ di->bat->fg_params->init_discard_time) {
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+
+ ab5500_fg_check_capacity_limits(di, true);
+ }
+
+ di->init_cnt += sleep_time;
+ if (di->init_cnt >
+ di->bat->fg_params->init_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB5500_FG_DISCHARGE_INIT_RECOVERY:
+ di->recovery_cnt = 0;
+ di->recovery_needed = true;
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_RECOVERY);
+
+ /* Intentional fallthrough */
+
+ case AB5500_FG_DISCHARGE_RECOVERY:
+ sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+ /*
+ * We should check the power consumption
+ * If low, go to READOUT (after x min) or
+ * RECOVERY_SLEEP if time left.
+ * If high, go to READOUT
+ */
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ if (ab5500_fg_is_low_curr(di, di->inst_curr)) {
+ if (di->recovery_cnt >
+ di->bat->fg_params->recovery_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ di->recovery_needed = false;
+ } else {
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ sleep_time * HZ);
+ }
+ di->recovery_cnt += sleep_time;
+ } else {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB5500_FG_DISCHARGE_READOUT:
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ if (ab5500_fg_is_low_curr(di, di->inst_curr)) {
+ /* Detect mode change */
+ if (di->high_curr_mode) {
+ di->high_curr_mode = false;
+ di->high_curr_cnt = 0;
+ }
+
+ if (di->recovery_needed) {
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_RECOVERY);
+
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ 0);
+
+ break;
+ }
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ } else {
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ /* Detect mode change */
+ if (!di->high_curr_mode) {
+ di->high_curr_mode = true;
+ di->high_curr_cnt = 0;
+ }
+
+ di->high_curr_cnt +=
+ di->bat->fg_params->accu_high_curr;
+ if (di->high_curr_cnt >
+ di->bat->fg_params->high_curr_time)
+ di->recovery_needed = true;
+
+ ab5500_fg_calc_cap_discharge_fg(di);
+ }
+
+ ab5500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ case AB5500_FG_DISCHARGE_WAKEUP:
+ ab5500_fg_coulomb_counter(di, true);
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ /* Re-program number of samples set above */
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_READOUT);
+
+ ab5500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ab5500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di: pointer to the ab5500_fg structure
+ *
+ */
+static void ab5500_fg_algorithm_calibrate(struct ab5500_fg *di)
+{
+ int ret;
+
+ switch (di->calib_state) {
+ case AB5500_FG_CALIB_INIT:
+ dev_dbg(di->dev, "Calibration ongoing...\n");
+ /* TODO: For Cut 1.1 no calibration */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ FG_ACC_RESET_ON_READ_MASK, FG_ACC_RESET_ON_READ);
+ if (ret)
+ goto err;
+ di->calib_state = AB5500_FG_CALIB_WAIT;
+ break;
+ case AB5500_FG_CALIB_END:
+ di->flags.calibrate = false;
+ dev_dbg(di->dev, "Calibration done...\n");
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ break;
+ case AB5500_FG_CALIB_WAIT:
+ dev_dbg(di->dev, "Calibration WFI\n");
+ default:
+ break;
+ }
+ return;
+err:
+ /* Something went wrong, don't calibrate then */
+ dev_err(di->dev, "failed to calibrate the CC\n");
+ di->flags.calibrate = false;
+ di->calib_state = AB5500_FG_CALIB_INIT;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab5500_fg_algorithm() - Entry point for the FG algorithm
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab5500_fg_algorithm(struct ab5500_fg *di)
+{
+ if (di->flags.calibrate)
+ ab5500_fg_algorithm_calibrate(di);
+ else {
+ if (di->flags.charging)
+ ab5500_fg_algorithm_charging(di);
+ else
+ ab5500_fg_algorithm_discharging(di);
+ }
+
+ dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+ "%d %d %d %d %d %d %d\n",
+ di->bat_cap.max_mah_design,
+ di->bat_cap.mah,
+ di->bat_cap.permille,
+ di->bat_cap.level,
+ di->bat_cap.prev_mah,
+ di->bat_cap.prev_percent,
+ di->bat_cap.prev_level,
+ di->vbat,
+ di->inst_curr,
+ di->avg_curr,
+ di->accu_charge,
+ di->flags.charging,
+ di->charge_state,
+ di->discharge_state,
+ di->high_curr_mode,
+ di->recovery_needed);
+}
+
+/**
+ * ab5500_fg_periodic_work() - Run the FG state machine periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab5500_fg_periodic_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_periodic_work.work);
+
+ if (di->init_capacity) {
+ /* A dummy read that will return 0 */
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ /* Get an initial capacity calculation */
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ ab5500_fg_check_capacity_limits(di, true);
+ di->init_capacity = false;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else
+ ab5500_fg_algorithm(di);
+}
+
+/**
+ * ab5500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab5500_fg_low_bat_work(struct work_struct *work)
+{
+ int vbat;
+
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_low_bat_work.work);
+
+ vbat = ab5500_fg_bat_voltage(di);
+
+ /* Check if LOW_BAT still fulfilled */
+ if (vbat < di->bat->fg_params->lowbat_threshold) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+
+ /*
+ * We need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ power_supply_changed(&di->fg_psy);
+ } else {
+ di->flags.low_bat = false;
+ dev_warn(di->dev, "Battery voltage OK again\n");
+ power_supply_changed(&di->fg_psy);
+ }
+
+ /* This is needed to dispatch LOW_BAT */
+ ab5500_fg_check_capacity_limits(di, false);
+
+ /* Set this flag to check if LOW_BAT IRQ still occurs */
+ di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab5500_fg_instant_work() - Run the FG state machine instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab5500_fg_instant_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg, fg_work);
+
+ ab5500_fg_algorithm(di);
+}
+
+/**
+ * ab5500_fg_get_property() - get the fg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now: battery voltage
+ * current_now: battery instant current
+ * current_avg: battery average current
+ * charge_full_design: capacity where battery is considered full
+ * charge_now: battery capacity in nAh
+ * capacity: capacity in percent
+ * capacity_level: capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_fg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_fg *di;
+
+ di = to_ab5500_fg_device_info(psy);
+
+ /*
+ * If battery is identified as unknown and charging of unknown
+ * batteries is disabled, we always report 100% capacity and
+ * capacity level UNKNOWN, since we can't calculate
+ * remaining capacity
+ */
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (di->flags.bat_ovv)
+ val->intval = 47500000;
+ else {
+ di->vbat = ab5500_gpadc_convert
+ (di->gpadc, MAIN_BAT_V);
+ val->intval = di->vbat * 1000;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ val->intval = di->inst_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = di->avg_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah_design);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ else
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.prev_mah);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = di->bat_cap.max_mah_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = di->bat_cap.max_mah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = di->bat_cap.max_mah;
+ else
+ val->intval = di->bat_cap.prev_mah;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = 100;
+ else
+ val->intval = di->bat_cap.prev_percent;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else
+ val->intval = di->bat_cap.prev_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab5500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab5500_fg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab5500_fg_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ switch (ret.intval) {
+ case POWER_SUPPLY_STATUS_UNKNOWN:
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (!di->flags.charging)
+ break;
+ di->flags.charging = false;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (di->flags.fully_charged)
+ break;
+ di->flags.fully_charged = true;
+ /* Save current capacity as maximum */
+ di->bat_cap.max_mah = di->bat_cap.mah;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ if (di->flags.charging)
+ break;
+ di->flags.charging = true;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ };
+ default:
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->flags.batt_unknown = false;
+ else
+ di->flags.batt_unknown = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab5500_fg_init_hw_registers() - Set up FG related registers
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab5500_fg_init_hw_registers(struct ab5500_fg *di)
+{
+ int ret;
+ struct adc_auto_input *auto_ip;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(di->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = MAIN_BAT_V;
+ auto_ip->freq = MS500;
+ auto_ip->min = di->bat->fg_params->lowbat_threshold;
+ auto_ip->max = di->bat->fg_params->overbat_threshold;
+ auto_ip->auto_adc_callback = ab5500_fg_bat_v_trig;
+ di->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto);
+ if (ret)
+ dev_err(di->dev,
+ "failed to set auto trigger for battery votlage\n");
+ /* set End Of Charge current to 247mA */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_EOC, EOC_52_mA);
+ return ret;
+}
+
+static int ab5500_fg_bat_v_trig(int mux)
+{
+ struct ab5500_fg *di = ab5500_fg_get();
+
+ di->vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+
+ /* check if the battery voltage is below low threshold */
+ if (di->vbat < di->bat->fg_params->lowbat_threshold) {
+ dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+ di->flags.low_bat_delay = true;
+ /*
+ * Start a timer to check LOW_BAT again after some time
+ * This is done to avoid shutdown on single voltage dips
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ power_supply_changed(&di->fg_psy);
+ }
+ /* check if battery votlage is above OVV */
+ else if (di->vbat > di->bat->fg_params->overbat_threshold) {
+ dev_warn(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
+
+ power_supply_changed(&di->fg_psy);
+ } else
+ dev_err(di->dev,
+ "Invalid gpadc auto trigger for battery voltage\n");
+
+ kfree(di->gpadc_auto);
+ ab5500_fg_init_hw_registers(di);
+ return 0;
+}
+
+/**
+ * ab5500_fg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab5500_fg_external_power_changed(struct power_supply *psy)
+{
+ struct ab5500_fg *di = to_ab5500_fg_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->fg_psy, ab5500_fg_get_ext_psy_data);
+}
+
+/**
+ * abab5500_fg_reinit_work() - work to reset the FG algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Used to reset the current battery capacity to be able to
+ * retrigger a new voltage base capacity calculation. For
+ * test and verification purpose.
+ */
+static void ab5500_fg_reinit_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_reinit_work.work);
+
+ if (di->flags.calibrate == false) {
+ dev_dbg(di->dev, "Resetting FG state machine to init.\n");
+ ab5500_fg_clear_cap_samples(di);
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT);
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ } else {
+ dev_err(di->dev,
+ "Residual offset calibration ongoing retrying..\n");
+ /* Wait one second until next try*/
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
+ round_jiffies(1));
+ }
+}
+
+/**
+ * ab5500_fg_reinit() - forces FG algorithm to reinitialize with current values
+ *
+ * This function can be used to force the FG algorithm to recalculate a new
+ * voltage based battery capacity.
+ */
+void ab5500_fg_reinit(void)
+{
+ struct ab5500_fg *di = ab5500_fg_get();
+ /* User won't be notified if a null pointer returned. */
+ if (di != NULL)
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_fg_resume(struct platform_device *pdev)
+{
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ /*
+ * Change state if we're not charging. If we're charging we will wake
+ * up on the FG IRQ
+ */
+ if (!di->flags.charging) {
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_WAKEUP);
+ queue_work(di->fg_wq, &di->fg_work);
+ }
+
+ return 0;
+}
+
+static int ab5500_fg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ flush_delayed_work(&di->fg_periodic_work);
+
+ /*
+ * If the FG is enabled we will disable it before going to suspend
+ * only if we're not charging
+ */
+ if (di->flags.fg_enabled && !di->flags.charging)
+ ab5500_fg_coulomb_counter(di, false);
+
+ return 0;
+}
+#else
+#define ab5500_fg_suspend NULL
+#define ab5500_fg_resume NULL
+#endif
+
+static int __devexit ab5500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ /* Disable coulomb counter */
+ ret = ab5500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(di->dev, "failed to disable coulomb counter\n");
+
+ destroy_workqueue(di->fg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->fg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di->gpadc_auto);
+ kfree(di);
+ return ret;
+}
+
+static int __devinit ab5500_fg_probe(struct platform_device *pdev)
+{
+ struct abx500_bm_plat_data *plat_data;
+ int ret = 0;
+
+ struct ab5500_fg *di =
+ kzalloc(sizeof(struct ab5500_fg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->fg;
+ di->bat = plat_data->battery;
+
+ /* get fg specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no fg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ /* powerup fg to start sampling */
+ ab5500_fg_coulomb_counter(di, true);
+
+ di->fg_psy.name = "ab5500_fg";
+ di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->fg_psy.properties = ab5500_fg_props;
+ di->fg_psy.num_properties = ARRAY_SIZE(ab5500_fg_props);
+ di->fg_psy.get_property = ab5500_fg_get_property;
+ di->fg_psy.supplied_to = di->pdata->supplied_to;
+ di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.external_power_changed = ab5500_fg_external_power_changed;
+
+ di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+ di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+ di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+ di->init_capacity = true;
+
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = create_singlethread_workqueue("ab5500_fg_wq");
+ if (di->fg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for running the fg algorithm instantly */
+ INIT_WORK(&di->fg_work, ab5500_fg_instant_work);
+
+ /* Init work for getting the battery accumulated current */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_acc_cur_work,
+ ab5500_fg_acc_cur_work);
+
+ /* Init work for reinitialising the fg algorithm */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+ ab5500_fg_reinit_work);
+
+ /* Work delayed Queue to run the state machine */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ ab5500_fg_periodic_work);
+
+ /* Work to check low battery condition */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ ab5500_fg_low_bat_work);
+
+ list_add_tail(&di->node, &ab5500_fg_list);
+
+ /* Consider battery unknown until we're informed otherwise */
+ di->flags.batt_unknown = true;
+
+ /* Register FG power supply class */
+ ret = power_supply_register(di->dev, &di->fg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register FG psy\n");
+ goto free_fg_wq;
+ }
+
+ /* Initialize OVV, and other registers */
+ ret = ab5500_fg_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize registers\n");
+ goto pow_unreg;
+ }
+
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+
+ /* Initilialize avg current timer */
+ init_timer(&di->avg_current_timer);
+ di->avg_current_timer.function = ab5500_fg_acc_cur_timer_expired;
+ di->avg_current_timer.data = (unsigned long) di;
+ di->avg_current_timer.expires = 60 * HZ;
+ if (!timer_pending(&di->avg_current_timer))
+ add_timer(&di->avg_current_timer);
+ else
+ mod_timer(&di->avg_current_timer, 60 * HZ);
+
+ platform_set_drvdata(pdev, di);
+
+ /* Calibrate the fg first time */
+ di->flags.calibrate = true;
+ di->calib_state = AB5500_FG_CALIB_INIT;
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work,
+ FG_PERIODIC_START_INTERVAL);
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ FG_PERIODIC_START_INTERVAL);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+pow_unreg:
+ power_supply_unregister(&di->fg_psy);
+free_fg_wq:
+ destroy_workqueue(di->fg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_fg_driver = {
+ .probe = ab5500_fg_probe,
+ .remove = __devexit_p(ab5500_fg_remove),
+ .suspend = ab5500_fg_suspend,
+ .resume = ab5500_fg_resume,
+ .driver = {
+ .name = "ab5500-fg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_fg_init(void)
+{
+ return platform_driver_register(&ab5500_fg_driver);
+}
+
+static void __exit ab5500_fg_exit(void)
+{
+ platform_driver_unregister(&ab5500_fg_driver);
+}
+
+subsys_initcall_sync(ab5500_fg_init);
+module_exit(ab5500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-fg");
+MODULE_DESCRIPTION("AB5500 Fuel Gauge driver");
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
new file mode 100644
index 00000000000..271263050b2
--- /dev/null
+++ b/drivers/power/ab8500_btemp.c
@@ -0,0 +1,1152 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Battery temperature driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/jiffies.h>
+
+#define VTVOUT_V 1800
+
+#define BTEMP_THERMAL_LOW_LIMIT -10
+#define BTEMP_THERMAL_MED_LIMIT 0
+#define BTEMP_THERMAL_HIGH_LIMIT_52 52
+#define BTEMP_THERMAL_HIGH_LIMIT_57 57
+#define BTEMP_THERMAL_HIGH_LIMIT_62 62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA 7
+#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+
+#define to_ab8500_btemp_device_info(x) container_of((x), \
+ struct ab8500_btemp, btemp_psy);
+
+/**
+ * struct ab8500_btemp_interrupts - ab8500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_btemp_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_btemp_events {
+ bool batt_rem;
+ bool btemp_high;
+ bool btemp_medhigh;
+ bool btemp_lowmed;
+ bool btemp_low;
+ bool ac_conn;
+ bool usb_conn;
+};
+
+struct ab8500_btemp_ranges {
+ int btemp_high_limit;
+ int btemp_med_limit;
+ int btemp_low_limit;
+};
+
+/**
+ * struct ab8500_btemp - ab8500 BTEMP device information
+ * @dev: Pointer to the structure device
+ * @node: List of AB8500 BTEMPs, hence prepared for reentrance
+ * @chip_id: Chip-Id of the AB8500
+ * @curr_source: What current source we use, in uA
+ * @bat_temp: Battery temperature in degree Celcius
+ * @prev_bat_temp Last dispatched battery temperature
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @fg: Pointer to the struct fg
+ * @pdata: Pointer to the ab8500_btemp platform data
+ * @bat: Pointer to the ab8500_bm platform data
+ * @btemp_psy: Structure for BTEMP specific battery properties
+ * @events: Structure for information about events triggered
+ * @btemp_ranges: Battery temperature range structure
+ * @btemp_wq: Work queue for measuring the temperature periodically
+ * @btemp_periodic_work: Work for measuring the temperature periodically
+ */
+struct ab8500_btemp {
+ struct device *dev;
+ struct list_head node;
+ u8 chip_id;
+ int curr_source;
+ int bat_temp;
+ int prev_bat_temp;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_fg *fg;
+ struct ab8500_btemp_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct power_supply btemp_psy;
+ struct ab8500_btemp_events events;
+ struct ab8500_btemp_ranges btemp_ranges;
+ struct workqueue_struct *btemp_wq;
+ struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab8500_btemp_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab8500_btemp_list);
+
+/**
+ * ab8500_btemp_get() - returns a reference to the primary AB8500 BTEMP
+ * (i.e. the first BTEMP in the instance list)
+ */
+struct ab8500_btemp *ab8500_btemp_get(void)
+{
+ struct ab8500_btemp *btemp;
+ btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
+
+ return btemp;
+}
+
+/**
+ * ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
+ * @di: pointer to the ab8500_btemp structure
+ * @v_batctrl: measured batctrl voltage
+ * @inst_curr: measured instant current
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL voltage.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
+ int v_batctrl, int inst_curr)
+{
+ int rbs;
+
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ /*
+ * For ABB cut1.0 and 1.1 BAT_CTRL is internally
+ * connected to 1.8V through a 450k resistor
+ */
+ rbs = (450000 * (v_batctrl)) / (1800 - v_batctrl);
+ break;
+ default:
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL) {
+ /*
+ * If the battery has internal NTC, we use the current
+ * source to calculate the resistance, 7uA or 20uA
+ */
+ rbs = (v_batctrl * 1000
+ - di->bat->gnd_lift_resistance * inst_curr)
+ / di->curr_source;
+ } else {
+ /*
+ * BAT_CTRL is internally
+ * connected to 1.8V through a 80k resistor
+ */
+ rbs = (80000 * (v_batctrl)) / (1800 - v_batctrl);
+ }
+ break;
+ }
+
+ return rbs;
+}
+
+/**
+ * ab8500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
+{
+ int vbtemp;
+ static int prev;
+
+ vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
+ if (vbtemp < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value",
+ __func__);
+ return prev;
+ }
+ prev = vbtemp;
+ return vbtemp;
+}
+
+/**
+ * ab8500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di: pointer to the ab8500_btemp structure
+ * @enable: enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
+ bool enable)
+{
+ int curr;
+ int ret = 0;
+
+ /*
+ * BATCTRL current sources are included on AB8500 cut2.0
+ * and future versions
+ */
+ if (di->chip_id == AB8500_CUT1P0 || di->chip_id == AB8500_CUT1P1)
+ return 0;
+
+ /* Only do this for batteries with internal NTC */
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL && enable) {
+ if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+ curr = BAT_CTRL_7U_ENA;
+ else
+ curr = BAT_CTRL_20U_ENA;
+
+ dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed setting cmp_force\n",
+ __func__);
+ return ret;
+ }
+
+ /*
+ * We have to wait one 32kHz cycle before enabling
+ * the current source, since ForceBatCtrlCmpHigh needs
+ * to be written in a separate cycle
+ */
+ udelay(32);
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH | curr);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+ } else if (di->bat->adc_therm == ADC_THERM_BATCTRL && !enable) {
+ dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+
+ /* Enable Pull-Up and comparator */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling PU and comp\n",
+ __func__);
+ goto enable_pu_comp;
+ }
+
+ /*
+ * We have to wait one 32kHz cycle before disabling
+ * ForceBatCtrlCmpHigh since this needs to be written
+ * in a separate cycle
+ */
+ udelay(32);
+
+ /* Disable 'force comparator' */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ goto disable_force_comp;
+ }
+ }
+ return ret;
+
+ /*
+ * We have to try unsetting FORCE_BAT_CTRL_CMP_HIGH one more time
+ * if we got an error above
+ */
+disable_curr_source:
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+ ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ return ret;
+ }
+enable_pu_comp:
+ /* Enable Pull-Up and comparator */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
+ BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling PU and comp\n",
+ __func__);
+ return ret;
+ }
+
+disable_force_comp:
+ /*
+ * We have to wait one 32kHz cycle before disabling
+ * ForceBatCtrlCmpHigh since this needs to be written
+ * in a separate cycle
+ */
+ udelay(32);
+
+ /* Disable 'force comparator' */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+ FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_res() - get battery resistance
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
+{
+ int ret;
+ int batctrl = 0;
+ int res;
+ int inst_curr;
+ int i;
+ unsigned long stop_time;
+
+ /*
+ * BATCTRL current sources are included on AB8500 cut2.0
+ * and future versions
+ */
+ ret = ab8500_btemp_curr_source_enable(di, true);
+ if (ret) {
+ dev_err(di->dev, "%s curr source enabled failed\n", __func__);
+ return ret;
+ }
+
+ if (!di->fg)
+ di->fg = ab8500_fg_get();
+ if (!di->fg) {
+ dev_err(di->dev, "No fg found\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_fg_inst_curr_start(di->fg);
+
+ if (ret) {
+ dev_err(di->dev, "Failed to start current measurement\n");
+ return ret;
+ }
+
+ /*
+ * Since there is no interrupt when current measurement is done,
+ * loop for over 250ms (250ms is one sample conversion time
+ * with 32.768 Khz RTC clock). Note that a stop time must be set
+ * since the ab8500_btemp_read_batctrl_voltage call can block and
+ * take an unknown amount of time to complete.
+ */
+ stop_time = jiffies + msecs_to_jiffies(250);
+ i = 0;
+ do {
+ batctrl += ab8500_btemp_read_batctrl_voltage(di);
+ i++;
+ msleep(25);
+ } while (time_after(stop_time, jiffies));
+ batctrl /= i;
+
+ ret = ab8500_fg_inst_curr_finalize(di->fg, &inst_curr);
+ if (ret) {
+ dev_err(di->dev, "Failed to finalize current measurement\n");
+ return ret;
+ }
+
+ res = ab8500_btemp_batctrl_volt_to_res(di, batctrl, inst_curr);
+
+ ret = ab8500_btemp_curr_source_enable(di, false);
+ if (ret) {
+ dev_err(di->dev, "%s curr source disable failed\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(di->dev, "%s batctrl: %d res: %d inst_curr: %d\n",
+ __func__, batctrl, res, inst_curr);
+
+ return res;
+}
+
+/**
+ * ab8500_btemp_res_to_temp() - resistance to temperature
+ * @di: pointer to the ab8500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @res: resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
+ const struct res_to_temp *tbl, int tbl_size, int res)
+{
+ int i, temp;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (res > tbl[0].resist)
+ i = 0;
+ else if (res <= tbl[tbl_size - 1].resist)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(res <= tbl[i].resist &&
+ res > tbl[i + 1].resist))
+ i++;
+ }
+
+ temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+ return temp;
+}
+
+/**
+ * ab8500_btemp_measure_temp() - measure battery temperature
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
+{
+ int temp;
+ static int prev;
+ int rbat, rntc, vntc;
+ u8 id;
+
+ id = di->bat->batt_id;
+
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL &&
+ id != BATTERY_UNKNOWN) {
+
+ rbat = ab8500_btemp_get_batctrl_res(di);
+ if (rbat < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n",
+ __func__);
+ /*
+ * Return out-of-range temperature so that
+ * charging is stopped
+ */
+ return BTEMP_THERMAL_LOW_LIMIT;
+ }
+
+ temp = ab8500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ } else {
+ vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
+ if (vntc < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed,"
+ " using previous value\n", __func__);
+ return prev;
+ }
+ /*
+ * The PCB NTC is sourced from VTVOUT via a 230kOhm
+ * resistor.
+ */
+ rntc = 230000 * vntc / (VTVOUT_V - vntc);
+
+ temp = ab8500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+ prev = temp;
+ }
+ dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+ return temp;
+}
+
+/**
+ * ab8500_btemp_id() - Identify the connected battery
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab8500_btemp_id(struct ab8500_btemp *di)
+{
+ int res;
+ u8 i;
+
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->bat->batt_id = BATTERY_UNKNOWN;
+
+ res = ab8500_btemp_get_batctrl_res(di);
+ if (res < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+ return -ENXIO;
+ }
+
+ /* BATTERY_UNKNOWN is defined on position 0, skip it! */
+ for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+ if ((res <= di->bat->bat_type[i].resis_high) &&
+ (res >= di->bat->bat_type[i].resis_low)) {
+ dev_dbg(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bat->adc_therm == ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bat->bat_type[i].resis_low, res,
+ di->bat->bat_type[i].resis_high, i);
+
+ di->bat->batt_id = i;
+ break;
+ }
+ }
+
+ if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ dev_warn(di->dev, "Battery identified as unknown"
+ ", resistance %d Ohm\n", res);
+ return -ENXIO;
+ }
+
+ /*
+ * We only have to change current source if the
+ * detected type is Type 1, else we use the 7uA source
+ */
+ if (di->bat->adc_therm == ADC_THERM_BATCTRL && di->bat->batt_id == 1) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+ }
+
+ return di->bat->batt_id;
+}
+
+/**
+ * ab8500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab8500_btemp_periodic_work(struct work_struct *work)
+{
+ int interval;
+ struct ab8500_btemp *di = container_of(work,
+ struct ab8500_btemp, btemp_periodic_work.work);
+
+ di->bat_temp = ab8500_btemp_measure_temp(di);
+
+ if (di->bat_temp != di->prev_bat_temp) {
+ di->prev_bat_temp = di->bat_temp;
+ power_supply_changed(&di->btemp_psy);
+ }
+
+ if (di->events.ac_conn || di->events.usb_conn)
+ interval = di->bat->temp_interval_chg;
+ else
+ interval = di->bat->temp_interval_nochg;
+
+ /* Schedule a new measurement */
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(interval * HZ));
+}
+
+/**
+ * ab8500_btemp_batctrlindb_handler() - battery removal detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_batctrlindb_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+ dev_err(di->dev, "Battery removal detected!\n");
+
+ di->events.batt_rem = true;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_templow_handler() - battery temp lower than 10 degrees
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ case AB8500_CUT2P0:
+ dev_dbg(di->dev, "Ignore false btemp low irq"
+ " for ABB cut 1.0, 1.1 and 2.0\n");
+
+ break;
+ default:
+ dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
+
+ di->events.btemp_low = true;
+ di->events.btemp_high = false;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_lowmed = false;
+ power_supply_changed(&di->btemp_psy);
+
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_temphigh_handler() - battery temp higher than max temp
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_temphigh_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_crit(di->dev, "Battery temperature is higher than MAX temp\n");
+
+ di->events.btemp_high = true;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_lowmed = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_lowmed_handler() - battery temp between low and medium
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_lowmed_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_dbg(di->dev, "Battery temperature is between low and medium\n");
+
+ di->events.btemp_lowmed = true;
+ di->events.btemp_medhigh = false;
+ di->events.btemp_high = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_medhigh_handler() - battery temp between medium and high
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab8500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_btemp_medhigh_handler(int irq, void *_di)
+{
+ struct ab8500_btemp *di = _di;
+
+ dev_dbg(di->dev, "Battery temperature is between medium and high\n");
+
+ di->events.btemp_medhigh = true;
+ di->events.btemp_lowmed = false;
+ di->events.btemp_high = false;
+ di->events.btemp_low = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_btemp_periodic() - Periodic temperature measurements
+ * @di: pointer to the ab8500_btemp structure
+ * @enable: enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab8500_btemp_periodic(struct ab8500_btemp *di,
+ bool enable)
+{
+ dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+ enable);
+ /*
+ * Make sure a new measurement is done directly by cancelling
+ * any pending work
+ */
+ cancel_delayed_work_sync(&di->btemp_periodic_work);
+
+ if (enable)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+}
+
+/**
+ * ab8500_btemp_get_temp() - get battery temperature
+ * @di: pointer to the ab8500_btemp structure
+ *
+ * Returns battery temperature
+ */
+static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
+{
+ int temp = 0;
+
+ /*
+ * The BTEMP events are not reliabe on AB8500 cut2.0
+ * and prior versions
+ */
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ case AB8500_CUT2P0:
+ temp = di->bat_temp * 10;
+
+ break;
+ default:
+ if (di->events.btemp_low) {
+ if (temp > di->btemp_ranges.btemp_low_limit)
+ temp = di->btemp_ranges.btemp_low_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_high) {
+ if (temp < di->btemp_ranges.btemp_high_limit)
+ temp = di->btemp_ranges.btemp_high_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_lowmed) {
+ if (temp > di->btemp_ranges.btemp_med_limit)
+ temp = di->btemp_ranges.btemp_med_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else if (di->events.btemp_medhigh) {
+ if (temp < di->btemp_ranges.btemp_med_limit)
+ temp = di->btemp_ranges.btemp_med_limit;
+ else
+ temp = di->bat_temp * 10;
+ } else
+ temp = di->bat_temp * 10;
+
+ break;
+ }
+ return temp;
+}
+
+/**
+ * ab8500_btemp_get_batctrl_temp() - get the temperature
+ * @btemp: pointer to the btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+ return btemp->bat_temp * 1000;
+}
+
+/**
+ * ab8500_btemp_get_property() - get the btemp properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online: presence of the battery
+ * present: presence of the battery
+ * technology: battery technology
+ * temp: battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_btemp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_btemp *di;
+
+ di = to_ab8500_btemp_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (di->events.batt_rem)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = ab8500_btemp_get_temp(di);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_btemp *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_btemp_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval && di->events.ac_conn) {
+ di->events.ac_conn = false;
+ }
+ /* AC connected */
+ else if (ret.intval && !di->events.ac_conn) {
+ di->events.ac_conn = true;
+ if (!di->events.usb_conn)
+ ab8500_btemp_periodic(di, true);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval && di->events.usb_conn) {
+ di->events.usb_conn = false;
+ }
+ /* USB connected */
+ else if (ret.intval && !di->events.usb_conn) {
+ di->events.usb_conn = true;
+ if (!di->events.ac_conn)
+ ab8500_btemp_periodic(di, true);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_btemp_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab8500_btemp_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_btemp *di = to_ab8500_btemp_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+}
+
+/* ab8500 btemp driver interrupts and their respective isr */
+static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
+ {"BAT_CTRL_INDB", ab8500_btemp_batctrlindb_handler},
+ {"BTEMP_LOW", ab8500_btemp_templow_handler},
+ {"BTEMP_HIGH", ab8500_btemp_temphigh_handler},
+ {"BTEMP_LOW_MEDIUM", ab8500_btemp_lowmed_handler},
+ {"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
+};
+
+#if defined(CONFIG_PM)
+static int ab8500_btemp_resume(struct platform_device *pdev)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+ ab8500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static int ab8500_btemp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+
+ ab8500_btemp_periodic(di, false);
+
+ return 0;
+}
+#else
+#define ab8500_btemp_suspend NULL
+#define ab8500_btemp_resume NULL
+#endif
+
+static int __devexit ab8500_btemp_remove(struct platform_device *pdev)
+{
+ struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->btemp_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
+{
+ int irq, i, ret = 0;
+ u8 val;
+ struct ab8500_platform_data *plat;
+
+ struct ab8500_btemp *di =
+ kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get();
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get btemp specific platform data */
+ if (!plat->btemp) {
+ dev_err(di->dev, "no btemp platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->btemp;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ /* BTEMP supply */
+ di->btemp_psy.name = "ab8500_btemp";
+ di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->btemp_psy.properties = ab8500_btemp_props;
+ di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
+ di->btemp_psy.get_property = ab8500_btemp_get_property;
+ di->btemp_psy.supplied_to = di->pdata->supplied_to;
+ di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.external_power_changed =
+ ab8500_btemp_external_power_changed;
+
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ create_singlethread_workqueue("ab8500_btemp_wq");
+ if (di->btemp_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for measuring temperature periodically */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ ab8500_btemp_periodic_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_btemp_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB8500 CID is: 0x%02x\n",
+ di->chip_id);
+
+ /* Identify the battery */
+ if (ab8500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+
+ /* Set BTEMP thermal limits. Low and Med are fixed */
+ di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
+ di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_BTEMP_HIGH_TH, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ goto free_btemp_wq;
+ }
+ switch (val) {
+ case BTEMP_HIGH_TH_57_0:
+ case BTEMP_HIGH_TH_57_1:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_57;
+ break;
+ case BTEMP_HIGH_TH_52:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_52;
+ break;
+ case BTEMP_HIGH_TH_62:
+ di->btemp_ranges.btemp_high_limit =
+ BTEMP_THERMAL_HIGH_LIMIT_62;
+ break;
+ }
+
+ /* Register BTEMP power supply class */
+ ret = power_supply_register(di->dev, &di->btemp_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register BTEMP psy\n");
+ goto free_btemp_wq;
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_btemp_irq[i].name, di);
+
+ if (ret) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_btemp_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_btemp_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* Kick off periodic temperature measurements */
+ ab8500_btemp_periodic(di, true);
+ list_add_tail(&di->node, &ab8500_btemp_list);
+
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->btemp_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+free_btemp_wq:
+ destroy_workqueue(di->btemp_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_btemp_driver = {
+ .probe = ab8500_btemp_probe,
+ .remove = __devexit_p(ab8500_btemp_remove),
+ .suspend = ab8500_btemp_suspend,
+ .resume = ab8500_btemp_resume,
+ .driver = {
+ .name = "ab8500-btemp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_btemp_init(void)
+{
+ return platform_driver_register(&ab8500_btemp_driver);
+}
+
+static void __exit ab8500_btemp_exit(void)
+{
+ platform_driver_unregister(&ab8500_btemp_driver);
+}
+
+subsys_initcall_sync(ab8500_btemp_init);
+module_exit(ab8500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-btemp");
+MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_chargalg.c b/drivers/power/ab8500_chargalg.c
new file mode 100644
index 00000000000..dba7b638b4f
--- /dev/null
+++ b/drivers/power/ab8500_chargalg.c
@@ -0,0 +1,1989 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Charging algorithm driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/ab8500/ux500_chargalg.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL (60 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT 10
+
+/* Recharge criteria counter */
+#define RCH_COND_CNT 3
+
+#define to_ab8500_chargalg_device_info(x) container_of((x), \
+ struct ab8500_chargalg, chargalg_psy);
+
+enum ab8500_chargers {
+ NO_CHG,
+ AC_CHG,
+ USB_CHG,
+};
+
+struct ab8500_chargalg_charger_info {
+ enum ab8500_chargers conn_chg;
+ enum ab8500_chargers prev_conn_chg;
+ enum ab8500_chargers online_chg;
+ enum ab8500_chargers prev_online_chg;
+ enum ab8500_chargers charger_type;
+ bool usb_chg_ok;
+ bool ac_chg_ok;
+ int usb_volt;
+ int usb_curr;
+ int ac_volt;
+ int ac_curr;
+ int usb_vset;
+ int usb_iset;
+ int ac_vset;
+ int ac_iset;
+};
+
+struct ab8500_chargalg_suspension_status {
+ bool suspended_change;
+ bool ac_suspended;
+ bool usb_suspended;
+};
+
+struct ab8500_chargalg_battery_data {
+ int temp;
+ int volt;
+ int avg_curr;
+ int inst_curr;
+ int percent;
+};
+
+enum ab8500_chargalg_states {
+ STATE_HANDHELD_INIT,
+ STATE_HANDHELD,
+ STATE_CHG_NOT_OK_INIT,
+ STATE_CHG_NOT_OK,
+ STATE_HW_TEMP_PROTECT_INIT,
+ STATE_HW_TEMP_PROTECT,
+ STATE_NORMAL_INIT,
+ STATE_NORMAL,
+ STATE_WAIT_FOR_RECHARGE_INIT,
+ STATE_WAIT_FOR_RECHARGE,
+ STATE_MAINTENANCE_A_INIT,
+ STATE_MAINTENANCE_A,
+ STATE_MAINTENANCE_B_INIT,
+ STATE_MAINTENANCE_B,
+ STATE_TEMP_UNDEROVER_INIT,
+ STATE_TEMP_UNDEROVER,
+ STATE_TEMP_LOWHIGH_INIT,
+ STATE_TEMP_LOWHIGH,
+ STATE_SUSPENDED_INIT,
+ STATE_SUSPENDED,
+ STATE_OVV_PROTECT_INIT,
+ STATE_OVV_PROTECT,
+ STATE_SAFETY_TIMER_EXPIRED_INIT,
+ STATE_SAFETY_TIMER_EXPIRED,
+ STATE_BATT_REMOVED_INIT,
+ STATE_BATT_REMOVED,
+ STATE_WD_EXPIRED_INIT,
+ STATE_WD_EXPIRED,
+};
+
+static const char *states[] = {
+ "HANDHELD_INIT",
+ "HANDHELD",
+ "CHG_NOT_OK_INIT",
+ "CHG_NOT_OK",
+ "HW_TEMP_PROTECT_INIT",
+ "HW_TEMP_PROTECT",
+ "NORMAL_INIT",
+ "NORMAL",
+ "WAIT_FOR_RECHARGE_INIT",
+ "WAIT_FOR_RECHARGE",
+ "MAINTENANCE_A_INIT",
+ "MAINTENANCE_A",
+ "MAINTENANCE_B_INIT",
+ "MAINTENANCE_B",
+ "TEMP_UNDEROVER_INIT",
+ "TEMP_UNDEROVER",
+ "TEMP_LOWHIGH_INIT",
+ "TEMP_LOWHIGH",
+ "SUSPENDED_INIT",
+ "SUSPENDED",
+ "OVV_PROTECT_INIT",
+ "OVV_PROTECT",
+ "SAFETY_TIMER_EXPIRED_INIT",
+ "SAFETY_TIMER_EXPIRED",
+ "BATT_REMOVED_INIT",
+ "BATT_REMOVED",
+ "WD_EXPIRED_INIT",
+ "WD_EXPIRED",
+};
+
+struct ab8500_chargalg_events {
+ bool batt_unknown;
+ bool mainextchnotok;
+ bool batt_ovv;
+ bool batt_rem;
+ bool btemp_underover;
+ bool btemp_lowhigh;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool main_ovv;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool safety_timer_expired;
+ bool maintenance_timer_expired;
+ bool ac_wd_expired;
+ bool usb_wd_expired;
+ bool ac_cv_active;
+ bool usb_cv_active;
+ bool vbus_collapsed;
+};
+
+/**
+ * struct ab8500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset: the non optimized/maximised charger current
+ * @current_iset: the charging current used at this moment
+ * @test_delta_i: the delta between the current we want to charge and the
+ current that is really going into the battery
+ * @condition_cnt: number of iterations needed before a new charger current
+ is set
+ * @max_current: maximum charger current
+ * @wait_cnt: to avoid too fast current step down in case of charger
+ * voltage collapse, we insert this delay between step
+ * down
+ * @level: tells in how many steps the charging current has been
+ increased
+ */
+struct ab8500_charge_curr_maximization {
+ int original_iset;
+ int current_iset;
+ int test_delta_i;
+ int condition_cnt;
+ int max_current;
+ int wait_cnt;
+ u8 level;
+};
+
+enum maxim_ret {
+ MAXIM_RET_NOACTION,
+ MAXIM_RET_CHANGE,
+ MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+enum maintenance_state {
+ MAINT_A,
+ MAINT_B,
+};
+
+/**
+ * struct ab8500_chargalg - ab8500 Charging algorithm device information
+ * @dev: pointer to the structure device
+ * @charge_status: battery operating status
+ * @eoc_cnt: counter used to determine end-of_charge
+ * @rch_cnt: counter used to determine start of recharge
+ * @maintenance_chg: indicate if maintenance charge is active
+ * @maint_state: indicate what maintenance state we should go to next
+ * @t_hyst_norm temperature hysteresis when the temperature has been
+ * over or under normal limits
+ * @t_hyst_lowhigh temperature hysteresis when the temperature has been
+ * over or under the high or low limits
+ * @charge_state: current state of the charging algorithm
+ * @ccm charging current maximization parameters
+ * @chg_info: information about connected charger types
+ * @batt_data: data of the battery
+ * @susp_status: current charger suspension status
+ * @parent: pointer to the struct ab8500
+ * @pdata: pointer to the ab8500_chargalg platform data
+ * @bat: pointer to the ab8500_bm platform data
+ * @chargalg_psy: structure that holds the battery properties exposed by
+ * the charging algorithm
+ * @events: structure for information about events triggered
+ * @chargalg_wq: work queue for running the charging algorithm
+ * @chargalg_periodic_work: work to run the charging algorithm periodically
+ * @chargalg_wd_work: work to kick the charger watchdog periodically
+ * @chargalg_work: work to run the charging algorithm instantly
+ * @safety_timer: charging safety timer
+ * @maintenance_timer: maintenance charging timer
+ * @chargalg_kobject: structure of type kobject
+ */
+struct ab8500_chargalg {
+ struct device *dev;
+ int charge_status;
+ int eoc_cnt;
+ int rch_cnt;
+ bool maintenance_chg;
+ enum maintenance_state maint_state;
+ int t_hyst_norm;
+ int t_hyst_lowhigh;
+ enum ab8500_chargalg_states charge_state;
+ struct ab8500_charge_curr_maximization ccm;
+ struct ab8500_chargalg_charger_info chg_info;
+ struct ab8500_chargalg_battery_data batt_data;
+ struct ab8500_chargalg_suspension_status susp_status;
+ struct ab8500 *parent;
+ struct ab8500_chargalg_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct power_supply chargalg_psy;
+ struct ux500_charger *ac_chg;
+ struct ux500_charger *usb_chg;
+ struct ab8500_chargalg_events events;
+ struct workqueue_struct *chargalg_wq;
+ struct delayed_work chargalg_periodic_work;
+ struct delayed_work chargalg_wd_work;
+ struct work_struct chargalg_work;
+ struct timer_list safety_timer;
+ struct timer_list maintenance_timer;
+ struct kobject chargalg_kobject;
+};
+
+/* Main battery properties */
+static enum power_supply_property ab8500_chargalg_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+/**
+ * ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @data: pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static void ab8500_chargalg_safety_timer_expired(unsigned long data)
+{
+ struct ab8500_chargalg *di = (struct ab8500_chargalg *) data;
+ dev_err(di->dev, "Safety timer expired\n");
+ di->events.safety_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @i: pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static void ab8500_chargalg_maintenance_timer_expired(unsigned long data)
+{
+
+ struct ab8500_chargalg *di = (struct ab8500_chargalg *) data;
+ dev_dbg(di->dev, "Maintenance timer expired\n");
+ di->events.maintenance_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_state_to() - Change charge state
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void ab8500_chargalg_state_to(struct ab8500_chargalg *di,
+ enum ab8500_chargalg_states state)
+{
+ dev_dbg(di->dev,
+ "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+ di->charge_state == state ? "NO" : "YES",
+ di->charge_state,
+ states[di->charge_state],
+ state,
+ states[state]);
+
+ di->charge_state = state;
+}
+
+/**
+ * ab8500_chargalg_check_charger_connection() - Check charger connection change
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int ab8500_chargalg_check_charger_connection(struct ab8500_chargalg *di)
+{
+ if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+ di->susp_status.suspended_change) {
+ /*
+ * Charger state changed or suspension
+ * has changed since last update
+ */
+ if ((di->chg_info.conn_chg & AC_CHG) &&
+ !di->susp_status.ac_suspended) {
+ dev_dbg(di->dev, "Charging source is AC\n");
+ if (di->chg_info.charger_type != AC_CHG) {
+ di->chg_info.charger_type = AC_CHG;
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ } else if ((di->chg_info.conn_chg & USB_CHG) &&
+ !di->susp_status.usb_suspended) {
+ dev_dbg(di->dev, "Charging source is USB\n");
+ di->chg_info.charger_type = USB_CHG;
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else if (di->chg_info.conn_chg &&
+ (di->susp_status.ac_suspended ||
+ di->susp_status.usb_suspended)) {
+ dev_dbg(di->dev, "Charging is suspended\n");
+ di->chg_info.charger_type = NO_CHG;
+ ab8500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+ } else {
+ dev_dbg(di->dev, "Charging source is OFF\n");
+ di->chg_info.charger_type = NO_CHG;
+ ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+ di->susp_status.suspended_change = false;
+ }
+ return di->chg_info.conn_chg;
+}
+
+/**
+ * ab8500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void ab8500_chargalg_start_safety_timer(struct ab8500_chargalg *di)
+{
+ unsigned long timer_expiration = 0;
+
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->main_safety_tmr_h * 3600 * HZ));
+ break;
+
+ case USB_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->usb_safety_tmr_h * 3600 * HZ));
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+
+ di->events.safety_timer_expired = false;
+ di->safety_timer.expires = timer_expiration;
+ if (!timer_pending(&di->safety_timer))
+ add_timer(&di->safety_timer);
+ else
+ mod_timer(&di->safety_timer, timer_expiration);
+}
+
+/**
+ * ab8500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di)
+{
+ di->events.safety_timer_expired = false;
+ if (timer_pending(&di->safety_timer))
+ del_timer(&di->safety_timer);
+}
+
+/**
+ * ab8500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di: pointer to the ab8500_chargalg structure
+ * @duration: duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void ab8500_chargalg_start_maintenance_timer(struct ab8500_chargalg *di,
+ int duration)
+{
+ unsigned long timer_expiration;
+
+ /* Convert from hours to jiffies */
+ timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
+
+ di->events.maintenance_timer_expired = false;
+ di->maintenance_timer.expires = timer_expiration;
+ if (!timer_pending(&di->maintenance_timer))
+ add_timer(&di->maintenance_timer);
+ else
+ mod_timer(&di->maintenance_timer, timer_expiration);
+}
+
+/**
+ * ab8500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void ab8500_chargalg_stop_maintenance_timer(struct ab8500_chargalg *di)
+{
+ di->events.maintenance_timer_expired = false;
+ del_timer(&di->maintenance_timer);
+}
+
+/**
+ * ab8500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
+{
+ /* Check if charger exists and kick watchdog if charging */
+ if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+ di->chg_info.online_chg & AC_CHG)
+ return di->ac_chg->ops.kick_wd(di->ac_chg);
+ else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+ di->chg_info.online_chg & USB_CHG)
+ return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+ return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di: pointer to the ab8500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->ac_chg || !di->ac_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->ac_chg->max_out_volt)
+ vset = min(vset, di->ac_chg->max_out_volt);
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+ di->chg_info.ac_vset = vset;
+
+ return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di: pointer to the ab8500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int ab8500_chargalg_usb_en(struct ab8500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->usb_chg || !di->usb_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->usb_chg->max_out_volt)
+ vset = min(vset, di->usb_chg->max_out_volt);
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+ di->chg_info.usb_vset = vset;
+
+ return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * ab8500_chargalg_update_chg_curr() - Update charger current
+ * @di: pointer to the ab8500_chargalg structure
+ * @iset: requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
+ int iset)
+{
+ /* Check if charger exists and update current if charging */
+ if (di->ac_chg && di->ac_chg->ops.update_curr &&
+ di->chg_info.charger_type & AC_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+
+ return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+ } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+ di->chg_info.charger_type & USB_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+
+ return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+ }
+
+ return -ENXIO;
+}
+
+/**
+ * ab8500_chargalg_stop_charging() - Stop charging
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void ab8500_chargalg_stop_charging(struct ab8500_chargalg *di)
+{
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * ab8500_chargalg_hold_charging() - Pauses charging
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void ab8500_chargalg_hold_charging(struct ab8500_chargalg *di)
+{
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+}
+
+/**
+ * ab8500_chargalg_start_charging() - Start the charger
+ * @di: pointer to the ab8500_chargalg structure
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
+ int vset, int iset)
+{
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ dev_dbg(di->dev,
+ "AC parameters: Vset %d, Ich %d\n", vset, iset);
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_ac_en(di, true, vset, iset);
+ break;
+
+ case USB_CHG:
+ dev_dbg(di->dev,
+ "USB parameters: Vset %d, Ich %d\n", vset, iset);
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ ab8500_chargalg_usb_en(di, true, vset, iset);
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+}
+
+/**
+ * ab8500_chargalg_check_temp() - Check battery temperature ranges
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
+{
+ if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
+ di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+ /* Temp OK! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = 0;
+ } else {
+ if (((di->batt_data.temp >= di->bat->temp_high) &&
+ (di->batt_data.temp <
+ (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+ ((di->batt_data.temp >
+ (di->bat->temp_under + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= di->bat->temp_low))) {
+ /* TEMP minor!!!!! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = true;
+ di->t_hyst_norm = di->bat->temp_hysteresis;
+ di->t_hyst_lowhigh = 0;
+ } else if (di->batt_data.temp <= di->bat->temp_under ||
+ di->batt_data.temp >= di->bat->temp_over) {
+ /* TEMP major!!!!! */
+ di->events.btemp_underover = true;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+ } else {
+ /* Within hysteresis */
+ dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+ "hyst_lowhigh %d, hyst normal %d\n",
+ di->batt_data.temp, di->t_hyst_lowhigh,
+ di->t_hyst_norm);
+ }
+ }
+}
+
+/**
+ * ab8500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void ab8500_chargalg_check_charger_voltage(struct ab8500_chargalg *di)
+{
+ if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+ di->chg_info.usb_chg_ok = false;
+ else
+ di->chg_info.usb_chg_ok = true;
+
+ if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+ di->chg_info.ac_chg_ok = false;
+ else
+ di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * ab8500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
+{
+ if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ di->charge_state == STATE_NORMAL &&
+ !di->maintenance_chg && (di->batt_data.volt >=
+ di->bat->bat_type[di->bat->batt_id].termination_vol ||
+ di->events.usb_cv_active || di->events.ac_cv_active) &&
+ di->batt_data.avg_curr <
+ di->bat->bat_type[di->bat->batt_id].termination_curr &&
+ di->batt_data.avg_curr > 0) {
+ if (++di->eoc_cnt >= EOC_COND_CNT) {
+ di->eoc_cnt = 0;
+ di->charge_status = POWER_SUPPLY_STATUS_FULL;
+ di->maintenance_chg = true;
+ dev_dbg(di->dev, "EOC reached!\n");
+ power_supply_changed(&di->chargalg_psy);
+ } else {
+ dev_dbg(di->dev,
+ " EOC limit reached for the %d"
+ " time, out of %d before EOC\n",
+ di->eoc_cnt,
+ EOC_COND_CNT);
+ }
+ } else {
+ di->eoc_cnt = 0;
+ }
+}
+
+static void init_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+ di->ccm.original_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.current_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
+ di->ccm.max_current = di->bat->maxi->chg_curr;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.level = 0;
+}
+
+/**
+ * ab8500_chargalg_chg_curr_maxim - increases the charger current to
+ * compensate for the system load
+ * @di pointer to the ab8500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
+{
+ int delta_i;
+
+ if (!di->bat->maxi->ena_maxi)
+ return MAXIM_RET_NOACTION;
+
+ delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+ if (di->events.vbus_collapsed) {
+ dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+ di->ccm.wait_cnt);
+ if (di->ccm.wait_cnt == 0) {
+ dev_dbg(di->dev, "lowering current\n");
+ di->ccm.wait_cnt++;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.max_current =
+ di->ccm.current_iset - di->ccm.test_delta_i;
+ di->ccm.current_iset = di->ccm.max_current;
+ di->ccm.level--;
+ return MAXIM_RET_CHANGE;
+ } else {
+ dev_dbg(di->dev, "waiting\n");
+ /* Let's go in here twice before lowering curr again */
+ di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+ return MAXIM_RET_NOACTION;
+ }
+ }
+
+ di->ccm.wait_cnt = 0;
+
+ if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+ dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+ " (limit %dmA) (current iset: %dmA)!\n",
+ di->batt_data.inst_curr, di->ccm.original_iset,
+ di->ccm.current_iset);
+
+ if (di->ccm.current_iset == di->ccm.original_iset)
+ return MAXIM_RET_NOACTION;
+
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset = di->ccm.original_iset;
+ di->ccm.level = 0;
+
+ return MAXIM_RET_IBAT_TOO_HIGH;
+ }
+
+ if (delta_i > di->ccm.test_delta_i &&
+ (di->ccm.current_iset + di->ccm.test_delta_i) <
+ di->ccm.max_current) {
+ if (di->ccm.condition_cnt-- == 0) {
+ /* Increse the iset with cco.test_delta_i */
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset += di->ccm.test_delta_i;
+ di->ccm.level++;
+ dev_dbg(di->dev, " Maximization needed, increase"
+ " with %d mA to %dmA (Optimal ibat: %d)"
+ " Level %d\n",
+ di->ccm.test_delta_i,
+ di->ccm.current_iset,
+ di->ccm.original_iset,
+ di->ccm.level);
+ return MAXIM_RET_CHANGE;
+ } else {
+ return MAXIM_RET_NOACTION;
+ }
+ } else {
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ return MAXIM_RET_NOACTION;
+ }
+}
+
+static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
+{
+ enum maxim_ret ret;
+ int result;
+
+ ret = ab8500_chargalg_chg_curr_maxim(di);
+ switch (ret) {
+ case MAXIM_RET_CHANGE:
+ result = ab8500_chargalg_update_chg_curr(di,
+ di->ccm.current_iset);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+ case MAXIM_RET_IBAT_TOO_HIGH:
+ result = ab8500_chargalg_update_chg_curr(di,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+
+ case MAXIM_RET_NOACTION:
+ default:
+ /* Do nothing..*/
+ break;
+ }
+}
+
+static void ab8500_chargalg_check_safety_timer(struct ab8500_chargalg *di)
+{
+ /*
+ * The safety timer will not be started until the capacity reported
+ * from the FG algorithm is 100%. Then we know that the amount of
+ * charge that's gone into the battery is enough for the battery
+ * to be full. If it has not reached end-of-charge before the safety
+ * timer has expired then we know that the battery is overcharged
+ * and charging will be stopped to protect the battery.
+ */
+ if (di->batt_data.percent == 100 &&
+ !timer_pending(&di->safety_timer)) {
+ ab8500_chargalg_start_safety_timer(di);
+ dev_dbg(di->dev, "start safety timer\n");
+ } else if (di->batt_data.percent != 100 &&
+ timer_pending(&di->safety_timer)) {
+ ab8500_chargalg_stop_safety_timer(di);
+ dev_dbg(di->dev, "stop safety timer\n");
+ }
+}
+
+static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_chargalg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_chargalg_device_info(psy);
+
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ /* Initialize chargers if not already done */
+ if (!di->ac_chg &&
+ ext->type == POWER_SUPPLY_TYPE_MAINS)
+ di->ac_chg = psy_to_ux500_charger(ext);
+ else if (!di->usb_chg &&
+ ext->type == POWER_SUPPLY_TYPE_USB)
+ di->usb_chg = psy_to_ux500_charger(ext);
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ /* Battery present */
+ if (ret.intval)
+ di->events.batt_rem = false;
+ /* Battery removed */
+ else
+ di->events.batt_rem = true;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~AC_CHG;
+ }
+ /* AC connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= AC_CHG;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~USB_CHG;
+ }
+ /* USB connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= USB_CHG;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~AC_CHG;
+ }
+ /* AC online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= AC_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~USB_CHG;
+ }
+ /* USB online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= USB_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.mainextchnotok = true;
+ di->events.main_thermal_prot = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.ac_wd_expired = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.main_thermal_prot = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.main_thermal_prot = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.main_ovv = true;
+ di->events.mainextchnotok = false;
+ di->events.main_thermal_prot = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.main_thermal_prot = false;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_TYPE_USB:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.usbchargernotok = true;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.usb_wd_expired = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.usb_thermal_prot = true;
+ di->events.usbchargernotok = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.vbus_ovv = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_volt = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.ac_cv_active = true;
+ else
+ di->events.ac_cv_active = false;
+
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.usb_cv_active = true;
+ else
+ di->events.usb_cv_active = false;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->events.batt_unknown = false;
+ else
+ di->events.batt_unknown = true;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ di->batt_data.temp = ret.intval / 10;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.inst_curr = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.avg_curr = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ if (ret.intval)
+ di->events.vbus_collapsed = true;
+ else
+ di->events.vbus_collapsed = false;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ di->batt_data.percent = ret.intval;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_chargalg *di = to_ab8500_chargalg_device_info(psy);
+
+ /*
+ * Trigger execution of the algorithm instantly and read
+ * all power_supply properties there instead
+ */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_algorithm() - Main function for the algorithm
+ * @di: pointer to the ab8500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
+{
+ int charger_status;
+
+ /* Collect data from all power_supply class devices */
+ class_for_each_device(power_supply_class, NULL,
+ &di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
+
+ ab8500_chargalg_end_of_charge(di);
+ ab8500_chargalg_check_temp(di);
+ ab8500_chargalg_check_charger_voltage(di);
+ charger_status = ab8500_chargalg_check_charger_connection(di);
+
+ /*
+ * First check if we have a charger connected.
+ * Also we don't allow charging of unknown batteries if configured
+ * this way
+ */
+ if (!charger_status ||
+ (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+ if (di->charge_state != STATE_HANDHELD) {
+ di->events.safety_timer_expired = false;
+ ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ }
+
+ /* If suspended, we should not continue checking the flags */
+ else if (di->charge_state == STATE_SUSPENDED_INIT ||
+ di->charge_state == STATE_SUSPENDED) {
+ /* We don't do anything here, just don,t continue */
+ }
+
+ /* Safety timer expiration */
+ else if (di->events.safety_timer_expired) {
+ if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+ ab8500_chargalg_state_to(di,
+ STATE_SAFETY_TIMER_EXPIRED_INIT);
+ }
+ /*
+ * Check if any interrupts has occured
+ * that will prevent us from charging
+ */
+
+ /* Battery removed */
+ else if (di->events.batt_rem) {
+ if (di->charge_state != STATE_BATT_REMOVED)
+ ab8500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+ }
+ /* Main or USB charger not ok. */
+ else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+ /*
+ * If vbus_collapsed is set, we have to lower the charger
+ * current, which is done in the normal state below
+ */
+ if (di->charge_state != STATE_CHG_NOT_OK &&
+ !di->events.vbus_collapsed)
+ ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+ }
+ /* VBUS, Main or VBAT OVV. */
+ else if (di->events.vbus_ovv ||
+ di->events.main_ovv ||
+ di->events.batt_ovv ||
+ !di->chg_info.usb_chg_ok ||
+ !di->chg_info.ac_chg_ok) {
+ if (di->charge_state != STATE_OVV_PROTECT)
+ ab8500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+ }
+ /* USB Thermal, stop charging */
+ else if (di->events.main_thermal_prot ||
+ di->events.usb_thermal_prot) {
+ if (di->charge_state != STATE_HW_TEMP_PROTECT)
+ ab8500_chargalg_state_to(di,
+ STATE_HW_TEMP_PROTECT_INIT);
+ }
+ /* Battery temp over/under */
+ else if (di->events.btemp_underover) {
+ if (di->charge_state != STATE_TEMP_UNDEROVER)
+ ab8500_chargalg_state_to(di,
+ STATE_TEMP_UNDEROVER_INIT);
+ }
+ /* Watchdog expired */
+ else if (di->events.ac_wd_expired ||
+ di->events.usb_wd_expired) {
+ if (di->charge_state != STATE_WD_EXPIRED)
+ ab8500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+ }
+ /* Battery temp high/low */
+ else if (di->events.btemp_lowhigh) {
+ if (di->charge_state != STATE_TEMP_LOWHIGH)
+ ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+ }
+
+ dev_dbg(di->dev,
+ "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+ "State %s Active_chg %d Chg_status %d AC %d USB %d "
+ "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+ "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+ di->batt_data.volt,
+ di->batt_data.avg_curr,
+ di->batt_data.inst_curr,
+ di->batt_data.temp,
+ di->batt_data.percent,
+ di->maintenance_chg,
+ states[di->charge_state],
+ di->chg_info.charger_type,
+ di->charge_status,
+ di->chg_info.conn_chg & AC_CHG,
+ di->chg_info.conn_chg & USB_CHG,
+ di->chg_info.online_chg & AC_CHG,
+ di->chg_info.online_chg & USB_CHG,
+ di->events.ac_cv_active,
+ di->events.usb_cv_active,
+ di->chg_info.ac_curr,
+ di->chg_info.usb_curr,
+ di->chg_info.ac_vset,
+ di->chg_info.ac_iset,
+ di->chg_info.usb_vset,
+ di->chg_info.usb_iset);
+
+ switch (di->charge_state) {
+ case STATE_HANDHELD_INIT:
+ ab8500_chargalg_stop_charging(di);
+ di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ ab8500_chargalg_state_to(di, STATE_HANDHELD);
+ /* Intentional fallthrough */
+
+ case STATE_HANDHELD:
+ break;
+
+ case STATE_SUSPENDED_INIT:
+ if (di->susp_status.ac_suspended)
+ ab8500_chargalg_ac_en(di, false, 0, 0);
+ if (di->susp_status.usb_suspended)
+ ab8500_chargalg_usb_en(di, false, 0, 0);
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ ab8500_chargalg_state_to(di, STATE_SUSPENDED);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_SUSPENDED:
+ /* CHARGING is suspended */
+ break;
+
+ case STATE_BATT_REMOVED_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_BATT_REMOVED);
+ /* Intentional fallthrough */
+
+ case STATE_BATT_REMOVED:
+ if (!di->events.batt_rem)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_HW_TEMP_PROTECT_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_HW_TEMP_PROTECT:
+ if (!di->events.main_thermal_prot &&
+ !di->events.usb_thermal_prot)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_OVV_PROTECT_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_OVV_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_OVV_PROTECT:
+ if (!di->events.vbus_ovv &&
+ !di->events.main_ovv &&
+ !di->events.batt_ovv &&
+ di->chg_info.usb_chg_ok &&
+ di->chg_info.ac_chg_ok)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_CHG_NOT_OK_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+ /* Intentional fallthrough */
+
+ case STATE_CHG_NOT_OK:
+ if (!di->events.mainextchnotok &&
+ !di->events.usbchargernotok)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_SAFETY_TIMER_EXPIRED_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_SAFETY_TIMER_EXPIRED:
+ /* We exit this state when charger is removed */
+ break;
+
+ case STATE_NORMAL_INIT:
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ ab8500_chargalg_state_to(di, STATE_NORMAL);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ init_maxim_chg_curr(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->eoc_cnt = 0;
+ di->maintenance_chg = false;
+ di->maint_state = MAINT_A;
+ power_supply_changed(&di->chargalg_psy);
+
+ break;
+
+ case STATE_NORMAL:
+ handle_maxim_chg_curr(di);
+ if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+ di->maintenance_chg)
+ ab8500_chargalg_state_to(di,
+ STATE_WAIT_FOR_RECHARGE_INIT);
+
+ /* Check whether we should start the safety timer or not */
+ ab8500_chargalg_check_safety_timer(di);
+ break;
+
+ case STATE_WAIT_FOR_RECHARGE_INIT:
+ ab8500_chargalg_hold_charging(di);
+ ab8500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+ di->rch_cnt = RCH_COND_CNT;
+ /* Intentional fallthrough */
+
+ case STATE_WAIT_FOR_RECHARGE:
+ if (di->bat->no_maintenance) {
+ if (di->batt_data.volt <= di->bat->bat_type[
+ di->bat->batt_id].recharge_vol) {
+ if (di->rch_cnt-- == 0)
+ ab8500_chargalg_state_to(di,
+ STATE_NORMAL_INIT);
+ } else {
+ di->rch_cnt = RCH_COND_CNT;
+ }
+ } else {
+ /* Maintenance A */
+ if (di->maint_state == MAINT_A &&
+ di->batt_data.volt <
+ di->bat->bat_type[di->bat->batt_id].
+ maint_a_vol_lvl) {
+ if (di->rch_cnt-- == 0)
+ ab8500_chargalg_state_to(di,
+ STATE_MAINTENANCE_A_INIT);
+ }
+ /* Maintenance B */
+ else if (di->maint_state == MAINT_B &&
+ di->batt_data.volt <
+ di->bat->bat_type[di->bat->batt_id].
+ maint_b_vol_lvl) {
+ if (di->rch_cnt-- == 0)
+ ab8500_chargalg_state_to(di,
+ STATE_MAINTENANCE_B_INIT);
+ } else {
+ di->rch_cnt = RCH_COND_CNT;
+ }
+ }
+ break;
+
+ case STATE_MAINTENANCE_A_INIT:
+ ab8500_chargalg_stop_safety_timer(di);
+ ab8500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_chg_timer_h);
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_cur_lvl);
+ ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+ di->maint_state = MAINT_B;
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_A:
+ if (di->events.maintenance_timer_expired) {
+ ab8500_chargalg_stop_maintenance_timer(di);
+ ab8500_chargalg_state_to(di,
+ STATE_WAIT_FOR_RECHARGE_INIT);
+ }
+ break;
+
+ case STATE_MAINTENANCE_B_INIT:
+ ab8500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_chg_timer_h);
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_cur_lvl);
+ ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_B:
+ if (di->events.maintenance_timer_expired) {
+ ab8500_chargalg_stop_maintenance_timer(di);
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ break;
+
+ case STATE_TEMP_LOWHIGH_INIT:
+ ab8500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_cur_lvl);
+ ab8500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_LOWHIGH:
+ if (!di->events.btemp_lowhigh)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_WD_EXPIRED_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_WD_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_WD_EXPIRED:
+ if (!di->events.ac_wd_expired &&
+ !di->events.usb_wd_expired)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_TEMP_UNDEROVER_INIT:
+ ab8500_chargalg_stop_charging(di);
+ ab8500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_UNDEROVER:
+ if (!di->events.btemp_underover)
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+ }
+
+ /* Start charging directly if the new state is a charge state */
+ if (di->charge_state == STATE_NORMAL_INIT ||
+ di->charge_state == STATE_MAINTENANCE_A_INIT ||
+ di->charge_state == STATE_MAINTENANCE_B_INIT ||
+ di->charge_state == STATE_WAIT_FOR_RECHARGE_INIT)
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * ab8500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void ab8500_chargalg_periodic_work(struct work_struct *work)
+{
+ struct ab8500_chargalg *di = container_of(work,
+ struct ab8500_chargalg, chargalg_periodic_work.work);
+
+ ab8500_chargalg_algorithm(di);
+
+ /*
+ * If a charger is connected then the battery has to be monitored
+ * frequently, else the work can be delayed.
+ */
+ if (di->chg_info.conn_chg)
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_charging * HZ);
+ else
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_not_charging * HZ);
+}
+
+/**
+ * ab8500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void ab8500_chargalg_wd_work(struct work_struct *work)
+{
+ int ret;
+ struct ab8500_chargalg *di = container_of(work,
+ struct ab8500_chargalg, chargalg_wd_work.work);
+
+ dev_dbg(di->dev, "ab8500_chargalg_wd_work\n");
+
+ ret = ab8500_chargalg_kick_watchdog(di);
+ if (ret < 0)
+ dev_err(di->dev, "failed to kick watchdog\n");
+
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * ab8500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void ab8500_chargalg_work(struct work_struct *work)
+{
+ struct ab8500_chargalg *di = container_of(work,
+ struct ab8500_chargalg, chargalg_work);
+
+ ab8500_chargalg_algorithm(di);
+}
+
+/**
+ * ab8500_chargalg_get_property() - get the chargalg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status: charging/discharging/full/unknown
+ * health: health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_chargalg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_chargalg *di;
+
+ di = to_ab8500_chargalg_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = di->charge_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->events.batt_ovv) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else if (di->events.btemp_underover) {
+ if (di->batt_data.temp <= di->bat->temp_under)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else {
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+/**
+ * ab8500_chargalg_sysfs_charger() - sysfs store operations
+ * @kobj: pointer to the struct kobject
+ * @attr: pointer to the struct attribute
+ * @buf: buffer that holds the parameter passed from userspace
+ * @length: length of the parameter passed
+ *
+ * Returns length of the buffer(input taken from user space) on success
+ * else error code on failure
+ * The operation to be performed on passing the parameters from the user space.
+ */
+static ssize_t ab8500_chargalg_sysfs_charger(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t length)
+{
+ struct ab8500_chargalg *di = container_of(kobj,
+ struct ab8500_chargalg, chargalg_kobject);
+ long int param;
+ int ac_usb;
+ int ret;
+ char entry = *attr->name;
+
+ switch (entry) {
+ case 'c':
+ ret = strict_strtol(buf, 10, &param);
+ if (ret < 0)
+ return ret;
+
+ ac_usb = param;
+ switch (ac_usb) {
+ case 0:
+ /* Disable charging */
+ di->susp_status.ac_suspended = true;
+ di->susp_status.usb_suspended = true;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 1:
+ /* Enable AC Charging */
+ di->susp_status.ac_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 2:
+ /* Enable USB charging */
+ di->susp_status.usb_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ default:
+ dev_info(di->dev, "Wrong input\n"
+ "Enter 0. Disable AC/USB Charging\n"
+ "1. Enable AC charging\n"
+ "2. Enable USB Charging\n");
+ };
+ break;
+ };
+ return strlen(buf);
+}
+
+static struct attribute ab8500_chargalg_en_charger = \
+{
+ .name = "chargalg",
+ .mode = S_IWUGO,
+};
+
+static struct attribute *ab8500_chargalg_chg[] = {
+ &ab8500_chargalg_en_charger,
+ NULL
+};
+
+const struct sysfs_ops ab8500_chargalg_sysfs_ops = {
+ .store = ab8500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type ab8500_chargalg_ktype = {
+ .sysfs_ops = &ab8500_chargalg_sysfs_ops,
+ .default_attrs = ab8500_chargalg_chg,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_chargalg_sysfs_exit(struct ab8500_chargalg *di)
+{
+ kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_chargalg_sysfs_init(struct ab8500_chargalg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->chargalg_kobject,
+ &ab8500_chargalg_ktype,
+ NULL, "ab8500_chargalg");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int ab8500_chargalg_resume(struct platform_device *pdev)
+{
+ struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* Kick charger watchdog if charging (any charger online) */
+ if (di->chg_info.online_chg)
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+ /*
+ * Run the charging algorithm directly to be sure we don't
+ * do it too seldom
+ */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ return 0;
+}
+
+static int ab8500_chargalg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+ if (di->chg_info.online_chg)
+ cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+ cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+ return 0;
+}
+#else
+#define ab8500_chargalg_suspend NULL
+#define ab8500_chargalg_resume NULL
+#endif
+
+static int __devexit ab8500_chargalg_remove(struct platform_device *pdev)
+{
+ struct ab8500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* sysfs interface to enable/disbale charging from user space */
+ ab8500_chargalg_sysfs_exit(di);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->chargalg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->chargalg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_chargalg_probe(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *plat;
+ int ret = 0;
+
+ struct ab8500_chargalg *di =
+ kzalloc(sizeof(struct ab8500_chargalg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get chargalg specific platform data */
+ if (!plat->chargalg) {
+ dev_err(di->dev, "no chargalg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->chargalg;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ /* chargalg supply */
+ di->chargalg_psy.name = "ab8500_chargalg";
+ di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->chargalg_psy.properties = ab8500_chargalg_props;
+ di->chargalg_psy.num_properties = ARRAY_SIZE(ab8500_chargalg_props);
+ di->chargalg_psy.get_property = ab8500_chargalg_get_property;
+ di->chargalg_psy.supplied_to = di->pdata->supplied_to;
+ di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.external_power_changed =
+ ab8500_chargalg_external_power_changed;
+
+ /* Initilialize safety timer */
+ init_timer(&di->safety_timer);
+ di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+ di->safety_timer.data = (unsigned long) di;
+
+ /* Initilialize maintenance timer */
+ init_timer(&di->maintenance_timer);
+ di->maintenance_timer.function =
+ ab8500_chargalg_maintenance_timer_expired;
+ di->maintenance_timer.data = (unsigned long) di;
+
+ /* Create a work queue for the chargalg */
+ di->chargalg_wq =
+ create_singlethread_workqueue("ab8500_chargalg_wq");
+ if (di->chargalg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for chargalg */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+ ab8500_chargalg_periodic_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+ ab8500_chargalg_wd_work);
+
+ /* Init work for chargalg */
+ INIT_WORK(&di->chargalg_work, ab8500_chargalg_work);
+
+ /* To detect charger at startup */
+ di->chg_info.prev_conn_chg = -1;
+
+ /* Register chargalg power supply class */
+ ret = power_supply_register(di->dev, &di->chargalg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register chargalg psy\n");
+ goto free_chargalg_wq;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* sysfs interface to enable/disable charging from user space */
+ ret = ab8500_chargalg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_psy;
+ }
+
+ /* Run the charging algorithm */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+ return ret;
+
+free_psy:
+ power_supply_unregister(&di->chargalg_psy);
+free_chargalg_wq:
+ destroy_workqueue(di->chargalg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_chargalg_driver = {
+ .probe = ab8500_chargalg_probe,
+ .remove = __devexit_p(ab8500_chargalg_remove),
+ .suspend = ab8500_chargalg_suspend,
+ .resume = ab8500_chargalg_resume,
+ .driver = {
+ .name = "ab8500-chargalg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_chargalg_init(void)
+{
+ return platform_driver_register(&ab8500_chargalg_driver);
+}
+
+static void __exit ab8500_chargalg_exit(void)
+{
+ platform_driver_unregister(&ab8500_chargalg_driver);
+}
+
+module_init(ab8500_chargalg_init);
+module_exit(ab8500_chargalg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-chargalg");
+MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
new file mode 100644
index 00000000000..c927f32f9e4
--- /dev/null
+++ b/drivers/power/ab8500_charger.c
@@ -0,0 +1,2818 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Charger driver for AB8500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/ab8500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+#include <asm/mach-types.h>
+
+/* Charger constants */
+#define NO_PW_CONN 0
+#define AC_PW_CONN 1
+#define USB_PW_CONN 2
+
+#define MAIN_WDOG_ENA 0x01
+#define MAIN_WDOG_KICK 0x02
+#define MAIN_WDOG_DIS 0x00
+#define CHARG_WD_KICK 0x01
+#define MAIN_CH_ENA 0x01
+#define MAIN_CH_NO_OVERSHOOT_ENA_N 0x02
+#define USB_CH_ENA 0x01
+#define USB_CHG_NO_OVERSHOOT_ENA_N 0x02
+#define MAIN_CH_DET 0x01
+#define MAIN_CH_CV_ON 0x04
+#define USB_CH_CV_ON 0x08
+#define VBUS_DET_DBNC100 0x02
+#define VBUS_DET_DBNC1 0x01
+#define OTP_ENABLE_WD 0x01
+
+#define MAIN_CH_INPUT_CURR_SHIFT 4
+#define VBUS_IN_CURR_LIM_SHIFT 4
+
+#define LED_INDICATOR_PWM_ENA 0x01
+#define LED_INDICATOR_PWM_DIS 0x00
+#define LED_IND_CUR_5MA 0x04
+#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
+
+/* HW failure constants */
+#define MAIN_CH_TH_PROT 0x02
+#define VBUS_CH_NOK 0x08
+#define USB_CH_TH_PROT 0x02
+#define VBUS_OVV_TH 0x01
+#define MAIN_CH_NOK 0x01
+#define VBUS_DET 0x80
+
+/* UsbLineStatus register bit masks */
+#define AB8500_USB_LINK_STATUS 0x78
+#define AB8500_STD_HOST_SUSP 0x18
+
+/* Watchdog timeout constant */
+#define WD_TIMER 0x30 /* 4min */
+#define WD_KICK_INTERVAL (60 * HZ)
+
+/* Lowest charger voltage is 3.39V -> 0x4E */
+#define LOW_VOLT_REG 0x4E
+
+/* UsbLineStatus register - usb types */
+enum ab8500_charger_link_status {
+ USB_STAT_NOT_CONFIGURED,
+ USB_STAT_STD_HOST_NC,
+ USB_STAT_STD_HOST_C_NS,
+ USB_STAT_STD_HOST_C_S,
+ USB_STAT_HOST_CHG_NM,
+ USB_STAT_HOST_CHG_HS,
+ USB_STAT_HOST_CHG_HS_CHIRP,
+ USB_STAT_DEDICATED_CHG,
+ USB_STAT_ACA_RID_A,
+ USB_STAT_ACA_RID_B,
+ USB_STAT_ACA_RID_C_NM,
+ USB_STAT_ACA_RID_C_HS,
+ USB_STAT_ACA_RID_C_HS_CHIRP,
+ USB_STAT_HM_IDGND,
+ USB_STAT_RESERVED,
+ USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab8500_usb_state {
+ AB8500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
+ AB8500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
+ AB8500_BM_USB_STATE_CONFIGURED,
+ AB8500_BM_USB_STATE_SUSPEND,
+ AB8500_BM_USB_STATE_RESUME,
+ AB8500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB8500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05 50
+#define USB_CH_IP_CUR_LVL_0P09 98
+#define USB_CH_IP_CUR_LVL_0P19 193
+#define USB_CH_IP_CUR_LVL_0P29 290
+#define USB_CH_IP_CUR_LVL_0P38 380
+#define USB_CH_IP_CUR_LVL_0P45 450
+#define USB_CH_IP_CUR_LVL_0P5 500
+#define USB_CH_IP_CUR_LVL_0P6 600
+#define USB_CH_IP_CUR_LVL_0P7 700
+#define USB_CH_IP_CUR_LVL_0P8 800
+#define USB_CH_IP_CUR_LVL_0P9 900
+#define USB_CH_IP_CUR_LVL_1P0 1000
+#define USB_CH_IP_CUR_LVL_1P1 1100
+#define USB_CH_IP_CUR_LVL_1P3 1300
+#define USB_CH_IP_CUR_LVL_1P4 1400
+#define USB_CH_IP_CUR_LVL_1P5 1500
+
+#define VBAT_TRESH_IP_CUR_RED 3800
+
+#define to_ab8500_charger_usb_device_info(x) container_of((x), \
+ struct ab8500_charger, usb_chg)
+#define to_ab8500_charger_ac_device_info(x) container_of((x), \
+ struct ab8500_charger, ac_chg)
+
+/**
+ * struct ab8500_charger_interrupts - ab8500 interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_charger_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab8500_charger_info {
+ int charger_connected;
+ int charger_online;
+ int charger_voltage;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct ab8500_charger_event_flags {
+ bool mainextchnotok;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool chgwdexp;
+ bool vbus_collapse;
+};
+
+struct ab8500_charger_usb_state {
+ bool usb_changed;
+ int usb_current;
+ enum ab8500_usb_state state;
+ spinlock_t usb_lock;
+};
+
+/**
+ * struct ab8500_charger - ab8500 Charger device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the AB8500
+ * @max_usb_in_curr: Max USB charger input current
+ * @vbus_detected: VBUS detected
+ * @vbus_detected_start:
+ * VBUS detected during startup
+ * @ac_conn: This will be true when the AC charger has been plugged
+ * @vddadc_en_ac: Indicate if VDD ADC supply is enabled because AC
+ * charger is enabled
+ * @vddadc_en_usb: Indicate if VDD ADC supply is enabled because USB
+ * charger is enabled
+ * @vbat Battery voltage
+ * @old_vbat Previously measured battery voltage
+ * @autopower Indicate if we should have automatic pwron after pwrloss
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab8500_charger platform data
+ * @bat: Pointer to the ab8500_bm platform data
+ * @flags: Structure for information about events triggered
+ * @usb_state: Structure for usb stack information
+ * @ac_chg: AC charger power supply
+ * @usb_chg: USB charger power supply
+ * @ac: Structure that holds the AC charger properties
+ * @usb: Structure that holds the USB charger properties
+ * @regu: Pointer to the struct regulator
+ * @charger_wq: Work queue for the IRQs and checking HW state
+ * @check_vbat_work Work for checking vbat threshold to adjust vbus current
+ * @check_hw_failure_work: Work for checking HW state
+ * @check_usbchgnotok_work: Work for checking USB charger not ok status
+ * @kick_wd_work: Work for kicking the charger watchdog in case
+ * of ABB rev 1.* due to the watchog logic bug
+ * @ac_work: Work for checking AC charger connection
+ * @detect_usb_type_work: Work for detecting the USB type connected
+ * @usb_link_status_work: Work for checking the new USB link status
+ * @usb_state_changed_work: Work for checking USB state
+ * @check_main_thermal_prot_work:
+ * Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ * Work for checking USB thermal status
+ */
+struct ab8500_charger {
+ struct device *dev;
+ u8 chip_id;
+ int max_usb_in_curr;
+ bool vbus_detected;
+ bool vbus_detected_start;
+ bool ac_conn;
+ bool vddadc_en_ac;
+ bool vddadc_en_usb;
+ int vbat;
+ int old_vbat;
+ bool autopower;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_charger_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct ab8500_charger_event_flags flags;
+ struct ab8500_charger_usb_state usb_state;
+ struct ux500_charger ac_chg;
+ struct ux500_charger usb_chg;
+ struct ab8500_charger_info ac;
+ struct ab8500_charger_info usb;
+ struct regulator *regu;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_vbat_work;
+ struct delayed_work check_hw_failure_work;
+ struct delayed_work check_usbchgnotok_work;
+ struct delayed_work kick_wd_work;
+ struct work_struct ac_work;
+ struct work_struct detect_usb_type_work;
+ struct work_struct usb_link_status_work;
+ struct work_struct usb_state_changed_work;
+ struct work_struct check_main_thermal_prot_work;
+ struct work_struct check_usb_thermal_prot_work;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+};
+
+/* AC properties */
+static enum power_supply_property ab8500_charger_ac_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/* USB properties */
+static enum power_supply_property ab8500_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab8500_power_loss_handling - set how we handle powerloss.
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Magic nummbers are from STE HW department.
+ */
+static void ab8500_power_loss_handling(struct ab8500_charger *di)
+{
+ int loop;
+ u8 banksize = 0xF;
+ u8 regs[banksize];
+
+ for (loop = 0 ; loop < banksize; loop++)
+ (void) abx500_get_register_interruptible(di->dev,
+ 0x15, loop,
+ &regs[loop]);
+
+
+ (void) abx500_set_register_interruptible(di->dev, 0x11, 0, 0x2);
+ (void) abx500_set_register_interruptible(di->dev, 0x14, 0xB1, 0x2);
+ regs[0xC] &= ~0x4;
+ dev_dbg(di->dev, "Autopower %s\n", di->autopower ? "on" : "off");
+
+ if (di->autopower)
+ regs[0x0] |= 0x8;
+ else
+ regs[0x0] &= ~0x8;
+
+ for (loop = 0 ; loop < banksize; loop++)
+ (void) abx500_set_register_interruptible(di->dev, 0x15,
+ loop,
+ regs[loop]);
+
+ (void) abx500_set_register_interruptible(di->dev, 0x14, 0xB1, 0x03);
+}
+
+/**
+ * ab8500_power_supply_changed - a wrapper with local extentions for
+ * power_supply_changed
+ * @di: pointer to the ab8500_charger structure
+ * @psy: pointer to power_supply_that have changed.
+ *
+ */
+static void ab8500_power_supply_changed(struct ab8500_charger *di,
+ struct power_supply *psy)
+{
+ if (di->pdata->autopower_cfg) {
+ if (!di->usb.charger_connected &&
+ !di->ac.charger_connected &&
+ di->autopower) {
+ di->autopower = false;
+ ab8500_power_loss_handling(di);
+ } else if (!di->autopower &&
+ (di->ac.charger_connected ||
+ di->usb.charger_connected)) {
+ di->autopower = true;
+ ab8500_power_loss_handling(di);
+ }
+ }
+ power_supply_changed(psy);
+}
+
+static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
+ bool connected)
+{
+ if (connected != di->usb.charger_connected) {
+ dev_dbg(di->dev, "USB connected:%i\n", connected);
+ di->usb.charger_connected = connected;
+ sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
+ }
+}
+
+/**
+ * ab8500_charger_get_ac_voltage() - get ac charger voltage
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger voltage (on success)
+ */
+static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->ac.charger_connected) {
+ vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab8500_charger_ac_cv() - check if the main charger is in CV mode
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_ac_cv(struct ab8500_charger *di)
+{
+ u8 val;
+ int ret = 0;
+
+ /* Only check CV mode if the charger is online */
+ if (di->ac.charger_online) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_STATUS1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return 0;
+ }
+
+ if (val & MAIN_CH_CV_ON)
+ ret = 1;
+ else
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_get_vbus_voltage() - get vbus voltage
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->usb.charger_connected) {
+ vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab8500_charger_get_usb_current() - get usb charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->usb.charger_online) {
+ ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab8500_charger_get_ac_current() - get ac charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * This function returns the ac charger current.
+ * Returns ac current (on success) and error code on failure.
+ */
+static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->ac.charger_online) {
+ ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab8500_charger_usb_cv() - check if the usb charger is in CV mode
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns ac charger CV mode (on success) else error code
+ */
+static int ab8500_charger_usb_cv(struct ab8500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ /* Only check CV mode if the charger is online */
+ if (di->usb.charger_online) {
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return 0;
+ }
+
+ if (val & USB_CH_CV_ON)
+ ret = 1;
+ else
+ ret = 0;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_detect_chargers() - Detect the connected chargers
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN no power supply is connected
+ * AC_PW_CONN if the AC power supply is connected
+ * USB_PW_CONN if the USB power supply is connected
+ * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
+ */
+static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
+{
+ int result = NO_PW_CONN;
+ int ret;
+ u8 val;
+
+ /* Check for AC charger */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_STATUS1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & MAIN_CH_DET)
+ result = AC_PW_CONN;
+
+ /* Check for USB charger */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_USBCH_STAT1_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
+ result |= USB_PW_CONN;
+
+ return result;
+}
+
+/**
+ * ab8500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di: pointer to the ab8500_charger structure
+ * @link_status: the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
+ enum ab8500_charger_link_status link_status)
+{
+ int ret = 0;
+
+ switch (link_status) {
+ case USB_STAT_STD_HOST_NC:
+ case USB_STAT_STD_HOST_C_NS:
+ case USB_STAT_STD_HOST_C_S:
+ dev_dbg(di->dev, "USB Type - Standard host is "
+ "detected through USB driver\n");
+ ret = -1;
+ break;
+ case USB_STAT_HOST_CHG_HS_CHIRP:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ case USB_STAT_HOST_CHG_HS:
+ case USB_STAT_ACA_RID_C_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ break;
+ case USB_STAT_ACA_RID_A:
+ /*
+ * Dedicated charger level minus maximum current accessory
+ * can consume (300mA). Closest level is 1100mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ break;
+ case USB_STAT_ACA_RID_B:
+ /*
+ * Dedicated charger level minus 120mA (20mA for ACA and
+ * 100mA for potential accessory). Closest level is 1300mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ case USB_STAT_HOST_CHG_NM:
+ case USB_STAT_ACA_RID_C_HS_CHIRP:
+ case USB_STAT_ACA_RID_C_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ case USB_STAT_RESERVED:
+ /*
+ * This state is used to indicate that VBUS has dropped below
+ * the detection level 4 times in a row. This is due to the
+ * charger output current is set to high making the charger
+ * voltage collapse. This have to be propagated through to
+ * chargalg. This is done using the property
+ * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+ */
+ di->flags.vbus_collapse = true;
+ dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -1;
+ break;
+ case USB_STAT_HM_IDGND:
+ case USB_STAT_NOT_CONFIGURED:
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ default:
+ dev_err(di->dev, "USB Type - Unknown\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ };
+
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_read_usb_type() - read the type of usb connected
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+
+ /* get the USB type */
+ val = (val & AB8500_USB_LINK_STATUS) >> 3;
+ ret = ab8500_charger_max_usb_curr(di,
+ (enum ab8500_charger_link_status) val);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_detect_usb_type() - get the type of usb connected
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
+{
+ int i, ret;
+ u8 val;
+
+ /*
+ * On getting the VBUS rising edge detect interrupt there
+ * is a 250ms delay after which the register UsbLineStatus
+ * is filled with valid data.
+ */
+ for (i = 0; i < 10; i++) {
+ msleep(250);
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
+ &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+ AB8500_USB_LINE_STAT_REG, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return ret;
+ }
+ /*
+ * Until the IT source register is read the UsbLineStatus
+ * register is not updated, hence doing the same
+ * Revisit this:
+ */
+
+ /* get the USB type */
+ val = (val & AB8500_USB_LINK_STATUS) >> 3;
+ if (val)
+ break;
+ }
+ ret = ab8500_charger_max_usb_curr(di,
+ (enum ab8500_charger_link_status) val);
+
+ return ret;
+}
+
+/*
+ * This array maps the raw hex value to charger voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_voltage_map[] = {
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3875 ,
+ 3900 ,
+ 3925 ,
+ 3950 ,
+ 3975 ,
+ 4000 ,
+ 4025 ,
+ 4050 ,
+ 4060 ,
+ 4070 ,
+ 4080 ,
+ 4090 ,
+ 4100 ,
+ 4110 ,
+ 4120 ,
+ 4130 ,
+ 4140 ,
+ 4150 ,
+ 4160 ,
+ 4170 ,
+ 4180 ,
+ 4190 ,
+ 4200 ,
+ 4210 ,
+ 4220 ,
+ 4230 ,
+ 4240 ,
+ 4250 ,
+ 4260 ,
+ 4270 ,
+ 4280 ,
+ 4290 ,
+ 4300 ,
+ 4310 ,
+ 4320 ,
+ 4330 ,
+ 4340 ,
+ 4350 ,
+ 4360 ,
+ 4370 ,
+ 4380 ,
+ 4390 ,
+ 4400 ,
+ 4410 ,
+ 4420 ,
+ 4430 ,
+ 4440 ,
+ 4450 ,
+ 4460 ,
+ 4470 ,
+ 4480 ,
+ 4490 ,
+ 4500 ,
+ 4510 ,
+ 4520 ,
+ 4530 ,
+ 4540 ,
+ 4550 ,
+ 4560 ,
+ 4570 ,
+ 4580 ,
+ 4590 ,
+ 4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_current_map[] = {
+ 100 ,
+ 200 ,
+ 300 ,
+ 400 ,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000 ,
+ 1100 ,
+ 1200 ,
+ 1300 ,
+ 1400 ,
+ 1500 ,
+};
+
+/*
+ * This array maps the raw hex value to VBUS input current used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_charger_vbus_in_curr_map[] = {
+ USB_CH_IP_CUR_LVL_0P05,
+ USB_CH_IP_CUR_LVL_0P09,
+ USB_CH_IP_CUR_LVL_0P19,
+ USB_CH_IP_CUR_LVL_0P29,
+ USB_CH_IP_CUR_LVL_0P38,
+ USB_CH_IP_CUR_LVL_0P45,
+ USB_CH_IP_CUR_LVL_0P5,
+ USB_CH_IP_CUR_LVL_0P6,
+ USB_CH_IP_CUR_LVL_0P7,
+ USB_CH_IP_CUR_LVL_0P8,
+ USB_CH_IP_CUR_LVL_0P9,
+ USB_CH_IP_CUR_LVL_1P0,
+ USB_CH_IP_CUR_LVL_1P1,
+ USB_CH_IP_CUR_LVL_1P3,
+ USB_CH_IP_CUR_LVL_1P4,
+ USB_CH_IP_CUR_LVL_1P5,
+};
+
+static int ab8500_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.5V */
+ if (voltage < ab8500_charger_voltage_map[0])
+ return LOW_VOLT_REG;
+
+ for (i = 1; i < ARRAY_SIZE(ab8500_charger_voltage_map); i++) {
+ if (voltage < ab8500_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_voltage_map) - 1;
+ if (voltage == ab8500_charger_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab8500_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab8500_charger_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_current_map); i++) {
+ if (curr < ab8500_charger_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_current_map) - 1;
+ if (curr == ab8500_charger_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab8500_vbus_in_curr_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab8500_charger_vbus_in_curr_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_vbus_in_curr_map); i++) {
+ if (curr < ab8500_charger_vbus_in_curr_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab8500_charger_vbus_in_curr_map) - 1;
+ if (curr == ab8500_charger_vbus_in_curr_map[i])
+ return i;
+ else
+ return -1;
+}
+
+/**
+ * ab8500_charger_get_usb_cur() - get usb current
+ * @di: pointer to the ab8500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
+{
+ switch (di->usb_state.usb_current) {
+ case 100:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case 200:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+ break;
+ case 300:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+ break;
+ case 400:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+ break;
+ case 500:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ default:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ return -1;
+ break;
+ };
+ return 0;
+}
+
+/**
+ * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di: pointer to the ab8500_charger structure
+ * @ich_in: charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
+ int ich_in)
+{
+ int ret;
+ int input_curr_index;
+ int min_value;
+
+ /* We should always use to lowest current limit */
+ min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+ switch (min_value) {
+ case 100:
+ if (di->vbat < VBAT_TRESH_IP_CUR_RED)
+ min_value = USB_CH_IP_CUR_LVL_0P05;
+ break;
+ case 500:
+ if (di->vbat < VBAT_TRESH_IP_CUR_RED)
+ min_value = USB_CH_IP_CUR_LVL_0P45;
+ break;
+ default:
+ break;
+ }
+
+ input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
+ if (input_curr_index < 0) {
+ dev_err(di->dev, "VBUS input current limit too high\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_USBCH_IPT_CRNTLVL_REG,
+ input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
+ if (ret)
+ dev_err(di->dev, "%s write failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_led_en() - turn on/off chargign led
+ * @di: pointer to the ab8500_charger structure
+ * @on: flag to turn on/off the chargign led
+ *
+ * Power ON/OFF charging LED indication
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_led_en(struct ab8500_charger *di, int on)
+{
+ int ret;
+
+ if (on) {
+ /* Power ON charging LED indicator, set LED current to 5mA */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_CTRL,
+ (LED_IND_CUR_5MA | LED_INDICATOR_PWM_ENA));
+ if (ret) {
+ dev_err(di->dev, "Power ON LED failed\n");
+ return ret;
+ }
+ /* LED indicator PWM duty cycle 252/256 */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_DUTY,
+ LED_INDICATOR_PWM_DUTY_252_256);
+ if (ret) {
+ dev_err(di->dev, "Set LED PWM duty cycle failed\n");
+ return ret;
+ }
+ } else {
+ /* Power off charging LED indicator */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_LED_INDICATOR_PWM_CTRL,
+ LED_INDICATOR_PWM_DIS);
+ if (ret) {
+ dev_err(di->dev, "Power-off LED failed\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_ac_en() - enable or disable ac charging
+ * @di: pointer to the ab8500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @iset: charging current
+ *
+ * Enable/Disable AC/Mains charging and turns on/off the charging led
+ * respectively.
+ **/
+static int ab8500_charger_ac_en(struct ux500_charger *charger,
+ int enable, int vset, int iset)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ int input_curr_index;
+ u8 overshoot = 0;
+
+ struct ab8500_charger *di = to_ab8500_charger_ac_device_info(charger);
+
+ if (enable) {
+ /* Check if AC is connected */
+ if (!di->ac.charger_connected) {
+ dev_err(di->dev, "AC charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable AC charging */
+ dev_dbg(di->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+
+ /*
+ * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+ * will be triggered everytime we enable the VDD ADC supply.
+ * This will turn off charging for a short while.
+ * It can be avoided by having the supply on when
+ * there is a charger enabled. Normally the VDD ADC supply
+ * is enabled everytime a GPADC conversion is triggered. We will
+ * force it to be enabled from this driver to have
+ * the GPADC module independant of the AB8500 chargers
+ */
+ if (!di->vddadc_en_ac) {
+ regulator_enable(di->regu);
+ di->vddadc_en_ac = true;
+ }
+
+ /* Check if the requested voltage or current is valid */
+ volt_index = ab8500_voltage_to_regval(vset);
+ curr_index = ab8500_current_to_regval(iset);
+ input_curr_index = ab8500_current_to_regval(
+ di->bat->chg_params->ac_curr_max);
+ if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
+ dev_err(di->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ /* ChVoltLevel: maximum battery charging voltage */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* MainChInputCurr: current that can be drawn from the charger*/
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_MCH_IPT_CURLVL_REG,
+ input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Check if VBAT overshoot control should be enabled */
+ if (!di->bat->enable_overshoot)
+ overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
+
+ /* Enable Main Charger */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_MCH_CTRL1, MAIN_CH_ENA | overshoot);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Power on charging LED indication */
+ ret = ab8500_charger_led_en(di, true);
+ if (ret < 0)
+ dev_err(di->dev, "failed to enable LED\n");
+
+ di->ac.charger_online = 1;
+ } else {
+ /* Disable AC charging */
+
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the
+ * watchdog logic. That means we have to continously
+ * kick the charger watchdog even when no charger is
+ * connected. This is only valid once the AC charger
+ * has been enabled. This is a bug that is not handled
+ * by the algorithm and the watchdog have to be kicked
+ * by the charger driver when the AC charger
+ * is disabled
+ */
+ if (di->ac_conn) {
+ queue_delayed_work(di->charger_wq,
+ &di->kick_wd_work,
+ round_jiffies(WD_KICK_INTERVAL));
+ }
+
+ /*
+ * We can't turn off charging completely
+ * due to a bug in AB8500 cut1.
+ * If we do, charging will not start again.
+ * That is why we set the lowest voltage
+ * and current possible
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, CH_VOL_LVL_3P5);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+ break;
+
+ case AB8500_CUT2P0:
+ default:
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_MCH_CTRL1, 0);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+ break;
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0)
+ dev_err(di->dev, "failed to disable LED\n");
+
+ di->ac.charger_online = 0;
+ di->ac.wd_expired = false;
+
+ /* Disable regulator if enabled */
+ if (di->vddadc_en_ac) {
+ regulator_disable(di->regu);
+ di->vddadc_en_ac = false;
+ }
+
+ dev_dbg(di->dev, "%s Disabled AC charging\n", __func__);
+ }
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_usb_en() - enable usb charging
+ * @di: pointer to the ab8500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @ich_out: charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_en(struct ux500_charger *charger,
+ int enable, int vset, int ich_out)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+ u8 overshoot = 0;
+
+ struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
+
+ if (enable) {
+ /* Check if USB is connected */
+ if (!di->usb.charger_connected) {
+ dev_err(di->dev, "USB charger not connected\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
+ * will be triggered everytime we enable the VDD ADC supply.
+ * This will turn off charging for a short while.
+ * It can be avoided by having the supply on when
+ * there is a charger enabled. Normally the VDD ADC supply
+ * is enabled everytime a GPADC conversion is triggered. We will
+ * force it to be enabled from this driver to have
+ * the GPADC module independant of the AB8500 chargers
+ */
+ if (!di->vddadc_en_usb) {
+ regulator_enable(di->regu);
+ di->vddadc_en_usb = true;
+ }
+
+ /* Enable USB charging */
+ dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+ /* Check if the requested voltage or current is valid */
+ volt_index = ab8500_voltage_to_regval(vset);
+ curr_index = ab8500_current_to_regval(ich_out);
+ if (volt_index < 0 || curr_index < 0) {
+ dev_err(di->dev,
+ "Charger voltage or current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "setting USBChInputCurr failed\n");
+ return ret;
+ }
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ /* Check if VBAT overshoot control should be enabled */
+ if (!di->bat->enable_overshoot)
+ overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
+
+ /* Enable USB Charger */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* If success power on charging LED indication */
+ ret = ab8500_charger_led_en(di, true);
+ if (ret < 0)
+ dev_err(di->dev, "failed to enable LED\n");
+
+ queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
+
+ di->usb.charger_online = 1;
+ } else {
+ /* Disable USB charging */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL1_REG, 0);
+ if (ret) {
+ dev_err(di->dev,
+ "%s write failed\n", __func__);
+ return ret;
+ }
+
+ ret = ab8500_charger_led_en(di, false);
+ if (ret < 0)
+ dev_err(di->dev, "failed to disable LED\n");
+
+ di->usb.charger_online = 0;
+ di->usb.wd_expired = false;
+
+ /* Disable regulator if enabled */
+ if (di->vddadc_en_usb) {
+ regulator_disable(di->regu);
+ di->vddadc_en_usb = false;
+ }
+
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+
+ /* Cancel any pending Vbat check work */
+ if (delayed_work_pending(&di->check_vbat_work))
+ cancel_delayed_work(&di->check_vbat_work);
+
+ }
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_watchdog_kick() - kick charger watchdog
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct ab8500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ di = to_ab8500_charger_ac_device_info(charger);
+ else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab8500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ return ret;
+}
+
+/**
+ * ab8500_charger_update_charger_current() - update charger current
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret;
+ int curr_index;
+ struct ab8500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+ di = to_ab8500_charger_ac_device_info(charger);
+ else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab8500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = ab8500_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(di->dev,
+ "Charger current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ /* Reset the main and usb drop input current measurement counter */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARGER_CTRL,
+ 0x1);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_charger *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+ struct ux500_charger *usb_chg;
+
+ usb_chg = (struct ux500_charger *)data;
+ psy = &usb_chg->psy;
+
+ di = to_ab8500_charger_usb_device_info(usb_chg);
+
+ ext = dev_get_drvdata(dev);
+
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->vbat = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_check_vbat_work() - keep vbus current within spec
+ * @work pointer to the work_struct structure
+ *
+ * Due to a asic bug it is necessary to lower the input current to the vbus
+ * charger when charging with at some specific levels. This issue is only valid
+ * for below a certain battery voltage. This function makes sure that the
+ * the allowed current limit isn't exceeded.
+ */
+static void ab8500_charger_check_vbat_work(struct work_struct *work)
+{
+ int t = 10;
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_vbat_work.work);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->usb_chg.psy, ab8500_charger_get_ext_psy_data);
+
+ /* First run old_vbat is 0. */
+ if (di->old_vbat == 0)
+ di->old_vbat = di->vbat;
+
+ if (!((di->old_vbat <= VBAT_TRESH_IP_CUR_RED &&
+ di->vbat <= VBAT_TRESH_IP_CUR_RED) ||
+ (di->old_vbat > VBAT_TRESH_IP_CUR_RED &&
+ di->vbat > VBAT_TRESH_IP_CUR_RED))) {
+
+ dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
+ " old: %d\n", di->max_usb_in_curr, di->vbat,
+ di->old_vbat);
+ ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ di->old_vbat = di->vbat;
+
+ /*
+ * No need to check the battery voltage every second when not close to
+ * the threshold.
+ */
+ if (di->vbat < (VBAT_TRESH_IP_CUR_RED + 100) &&
+ (di->vbat > (VBAT_TRESH_IP_CUR_RED - 100)))
+ t = 1;
+
+ queue_delayed_work(di->charger_wq, &di->check_vbat_work, t * HZ);
+}
+
+/**
+ * ab8500_charger_check_hw_failure_work() - check main charger failure
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_hw_failure_work.work);
+
+ /* Check if the status bits for HW failure is still active */
+ if (di->flags.mainextchnotok) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & MAIN_CH_NOK)) {
+ di->flags.mainextchnotok = false;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ }
+ }
+ if (di->flags.vbus_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & VBUS_OVV_TH)) {
+ di->flags.vbus_ovv = false;
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+ }
+ /* If we still have a failure, schedule a new check */
+ if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab8500_charger_kick_watchdog_work() - kick the watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog.
+ *
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+static void ab8500_charger_kick_watchdog_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, kick_wd_work.work);
+
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ /* Schedule a new watchdog kick */
+ queue_delayed_work(di->charger_wq,
+ &di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL));
+}
+
+/**
+ * ab8500_charger_ac_work() - work to get and set main charger status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab8500_charger_ac_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, ac_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if the main charger is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (ret & AC_PW_CONN) {
+ di->ac.charger_connected = 1;
+ di->ac_conn = true;
+ } else {
+ di->ac.charger_connected = 0;
+ }
+
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
+}
+
+/**
+ * ab8500_charger_detect_usb_type_work() - work to detect USB type
+ * @work: Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+void ab8500_charger_detect_usb_type_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, detect_usb_type_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ ret = ab8500_charger_detect_usb_type(di);
+ if (!ret) {
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di,
+ &di->usb_chg.psy);
+ }
+ break;
+
+ case AB8500_CUT2P0:
+ default:
+ /* For ABB cut2.0 and onwards we have an IRQ,
+ * USB_LINK_STATUS that will be triggered when the USB
+ * link status changes. The exception is USB connected
+ * during startup. Then we don't get a
+ * USB_LINK_STATUS IRQ
+ */
+ if (di->vbus_detected_start) {
+ di->vbus_detected_start = false;
+ ret = ab8500_charger_detect_usb_type(di);
+ if (!ret) {
+ ab8500_charger_set_usb_connected(di,
+ true);
+ ab8500_power_supply_changed(di,
+ &di->usb_chg.psy);
+ }
+ }
+ break;
+ }
+ }
+}
+
+/**
+ * ab8500_charger_usb_link_status_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_usb_link_status_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, usb_link_status_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab8500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ ret = ab8500_charger_read_usb_type(di);
+ if (!ret) {
+ /* Update maximum input current */
+ ret = ab8500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ } else if (ret == -ENXIO) {
+ /* No valid charger type detected */
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+ }
+}
+
+static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, usb_state_changed_work);
+
+ if (!di->vbus_detected)
+ return;
+
+ spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+ di->usb_state.usb_changed = false;
+ spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ msleep(250);
+
+ if (di->usb_state.usb_changed)
+ return;
+
+ dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current);
+
+ switch (di->usb_state.state) {
+ case AB8500_BM_USB_STATE_RESET_HS:
+ case AB8500_BM_USB_STATE_RESET_FS:
+ case AB8500_BM_USB_STATE_SUSPEND:
+ case AB8500_BM_USB_STATE_MAX:
+ ab8500_charger_set_usb_connected(di, false);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ break;
+
+ case AB8500_BM_USB_STATE_RESUME:
+ /*
+ * when suspend->resume there should be delay
+ * of 1sec for enabling charging
+ */
+ msleep(1000);
+ /* Intentional fall through */
+ case AB8500_BM_USB_STATE_CONFIGURED:
+ /*
+ * USB is configured, enable charging with the charging
+ * input current obtained from USB driver
+ */
+ if (!ab8500_charger_get_usb_cur(di)) {
+ /* Update maximum input current */
+ ret = ab8500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ ab8500_charger_set_usb_connected(di, true);
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+ break;
+
+ default:
+ break;
+ };
+}
+
+/**
+ * ab8500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+ bool prev_status;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_usbchgnotok_work.work);
+
+ /* Check if the status bit for usbchargernotok is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ prev_status = di->flags.usbchargernotok;
+
+ if (reg_value & VBUS_CH_NOK) {
+ di->flags.usbchargernotok = true;
+ /* Check again in 1sec */
+ queue_delayed_work(di->charger_wq,
+ &di->check_usbchgnotok_work, HZ);
+ } else {
+ di->flags.usbchargernotok = false;
+ di->flags.vbus_collapse = false;
+ }
+
+ if (prev_status != di->flags.usbchargernotok)
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_main_thermal_prot_work() - check main thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the Main thermal prot status
+ */
+static void ab8500_charger_check_main_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_main_thermal_prot_work);
+
+ /* Check if the status bit for main_thermal_prot is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & MAIN_CH_TH_PROT)
+ di->flags.main_thermal_prot = true;
+ else
+ di->flags.main_thermal_prot = false;
+
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+}
+
+/**
+ * ab8500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab8500_charger_check_usb_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab8500_charger *di = container_of(work,
+ struct ab8500_charger, check_usb_thermal_prot_work);
+
+ /* Check if the status bit for usb_thermal_prot is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & USB_CH_TH_PROT)
+ di->flags.usb_thermal_prot = true;
+ else
+ di->flags.usb_thermal_prot = false;
+
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
+/**
+ * ab8500_charger_mainchunplugdet_handler() - main charger unplugged
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger unplugged\n");
+ queue_work(di->charger_wq, &di->ac_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchplugdet_handler() - main charger plugged
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger plugged\n");
+ queue_work(di->charger_wq, &di->ac_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainextchnotok_handler() - main charger not ok
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainextchnotok_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Main charger not ok\n");
+ di->flags.mainextchnotok = true;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotr_handler() - Die temp is above main charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above Main charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_mainchthprotf_handler() - Die temp is below main charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp ok for Main charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS falling detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ di->vbus_detected = true;
+ dev_dbg(di->dev, "VBUS rising detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "USB link status changed\n");
+
+ queue_work(di->charger_wq, &di->usb_link_status_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchthprotf_handler() - Die temp is below usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchthprotf_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp ok for USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Not allowed USB charger detected\n");
+ queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "Charger watchdog expired\n");
+
+ /*
+ * The charger that was online when the watchdog expired
+ * needs to be restarted for charging to start again
+ */
+ if (di->ac.charger_online) {
+ di->ac.wd_expired = true;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ }
+ if (di->usb.charger_online) {
+ di->usb.wd_expired = true;
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbusovv_handler(int irq, void *_di)
+{
+ struct ab8500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS overvoltage detected\n");
+ di->flags.vbus_ovv = true;
+ ab8500_power_supply_changed(di, &di->usb_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_charger_ac_get_property() - get the ac/mains properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the ac/mains
+ * properties by reading the sysfs files.
+ * AC/Mains properties are online, present and voltage.
+ * online: ac/mains charging is in progress or not
+ * present: presence of the ac/mains
+ * voltage: AC/Mains voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_charger *di;
+
+ di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.mainextchnotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->ac.wd_expired || di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.main_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->ac.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->ac.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
+ val->intval = di->ac.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to indicate when CV mode is entered
+ * for the AC charger
+ */
+ di->ac.cv_active = ab8500_charger_ac_cv(di);
+ val->intval = di->ac.cv_active;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab8500_charger_get_ac_current(di) * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_usb_get_property() - get the usb properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online: usb charging is in progress or not
+ * present: presence of the usb
+ * voltage: vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_charger *di;
+
+ di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.usbchargernotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->ac.wd_expired || di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.usb_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (di->flags.vbus_ovv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->usb.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
+ val->intval = di->usb.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ /*
+ * This property is used to indicate when CV mode is entered
+ * for the USB charger
+ */
+ di->usb.cv_active = ab8500_charger_usb_cv(di);
+ val->intval = di->usb.cv_active;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab8500_charger_get_usb_current(di) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ /*
+ * This property is used to indicate when VBUS has collapsed
+ * due to too high output current from the USB charger
+ */
+ if (di->flags.vbus_collapse)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab8500_charger_init_hw_registers() - Set up charger related registers
+ * @di: pointer to the ab8500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
+{
+ int ret = 0;
+
+ /* Setup maximum charger current and voltage for ABB cut2.0 */
+ switch (di->chip_id) {
+ case AB8500_CUT1P0:
+ case AB8500_CUT1P1:
+ break;
+ case AB8500_CUT2P0:
+ default:
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_VOLT_LVL_MAX_REG, CH_VOL_LVL_4P6);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to set CH_VOLT_LVL_MAX_REG\n");
+ goto out;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_CH_OPT_CRNTLVL_MAX_REG, CH_OP_CUR_LVL_1P6);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to set CH_OPT_CRNTLVL_MAX_REG\n");
+ goto out;
+ }
+
+ break;
+ }
+
+ /* VBUS OVV set to 6.3V and enable automatic current limitiation */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_USBCH_CTRL2_REG,
+ VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to set VBUS OVV\n");
+ goto out;
+ }
+
+ /* Enable main watchdog in OTP */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_OTP_EMUL, AB8500_OTP_CONF_15, OTP_ENABLE_WD);
+ if (ret) {
+ dev_err(di->dev, "failed to enable main WD in OTP\n");
+ goto out;
+ }
+
+ /* Enable main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_ENA);
+ if (ret) {
+ dev_err(di->dev, "faile to enable main watchdog\n");
+ goto out;
+ }
+
+ /*
+ * Due to internal synchronisation, Enable and Kick watchdog bits
+ * cannot be enabled in a single write.
+ * A minimum delay of 2*32 kHz period (62.5µs) must be inserted
+ * between writing Enable then Kick bits.
+ */
+ udelay(63);
+
+ /* Kick main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG,
+ (MAIN_WDOG_ENA | MAIN_WDOG_KICK));
+ if (ret) {
+ dev_err(di->dev, "failed to kick main watchdog\n");
+ goto out;
+ }
+
+ /* Disable main watchdog */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_DIS);
+ if (ret) {
+ dev_err(di->dev, "failed to disable main watchdog\n");
+ goto out;
+ }
+
+ /* Set watchdog timeout */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CH_WD_TIMER_REG, WD_TIMER);
+ if (ret) {
+ dev_err(di->dev, "failed to set charger watchdog timeout\n");
+ goto out;
+ }
+
+ /* Backup battery voltage and current */
+ if (machine_is_snowball())
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_RTC,
+ AB8500_RTC_BACKUP_CHG_REG,
+ BUP_VCH_SEL_3P1V |
+ BUP_ICH_SEL_150UA);
+ else
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_RTC,
+ AB8500_RTC_BACKUP_CHG_REG,
+ di->bat->bkup_bat_v |
+ di->bat->bkup_bat_i);
+ if (ret) {
+ dev_err(di->dev, "failed to setup backup battery charging\n");
+ goto out;
+ }
+
+ /* Enable backup battery charging */
+ abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_RTC, AB8500_RTC_CTRL_REG,
+ RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
+ if (ret < 0)
+ dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+out:
+ return ret;
+}
+
+/*
+ * ab8500 charger driver interrupts and their respective isr
+ */
+static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
+ {"MAIN_CH_UNPLUG_DET", ab8500_charger_mainchunplugdet_handler},
+ {"MAIN_CHARGE_PLUG_DET", ab8500_charger_mainchplugdet_handler},
+ {"MAIN_EXT_CH_NOT_OK", ab8500_charger_mainextchnotok_handler},
+ {"MAIN_CH_TH_PROT_R", ab8500_charger_mainchthprotr_handler},
+ {"MAIN_CH_TH_PROT_F", ab8500_charger_mainchthprotf_handler},
+ {"VBUS_DET_F", ab8500_charger_vbusdetf_handler},
+ {"VBUS_DET_R", ab8500_charger_vbusdetr_handler},
+ {"USB_LINK_STATUS", ab8500_charger_usblinkstatus_handler},
+ {"USB_CH_TH_PROT_R", ab8500_charger_usbchthprotr_handler},
+ {"USB_CH_TH_PROT_F", ab8500_charger_usbchthprotf_handler},
+ {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
+ {"VBUS_OVV", ab8500_charger_vbusovv_handler},
+ {"CH_WD_EXP", ab8500_charger_chwdexp_handler},
+};
+
+static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *power)
+{
+ struct ab8500_charger *di =
+ container_of(nb, struct ab8500_charger, nb);
+ enum ab8500_usb_state bm_usb_state;
+ unsigned mA = *((unsigned *)power);
+
+ if (event != USB_EVENT_VBUS) {
+ dev_dbg(di->dev, "not a standard host, returning\n");
+ return NOTIFY_DONE;
+ }
+
+ /* TODO: State is fabricate here. See if charger really needs USB
+ * state or if mA is enough
+ */
+ if ((di->usb_state.usb_current == 2) && (mA > 2))
+ bm_usb_state = AB8500_BM_USB_STATE_RESUME;
+ else if (mA == 0)
+ bm_usb_state = AB8500_BM_USB_STATE_RESET_HS;
+ else if (mA == 2)
+ bm_usb_state = AB8500_BM_USB_STATE_SUSPEND;
+ else if (mA >= 8) /* 8, 100, 500 */
+ bm_usb_state = AB8500_BM_USB_STATE_CONFIGURED;
+ else /* Should never occur */
+ bm_usb_state = AB8500_BM_USB_STATE_RESET_FS;
+
+ if (di == NULL)
+ return;
+
+ dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+ __func__, bm_usb_state, mA);
+
+ spin_lock(&di->usb_state.usb_lock);
+ di->usb_state.usb_changed = true;
+ spin_unlock(&di->usb_state.usb_lock);
+
+ di->usb_state.state = bm_usb_state;
+ di->usb_state.usb_current = mA;
+
+ queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab8500_charger_resume(struct platform_device *pdev)
+{
+ int ret;
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+ if (di->ac_conn && (di->chip_id == AB8500_CUT1P0 ||
+ di->chip_id == AB8500_CUT1P1)) {
+ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+ AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ /* If not already pending start a new timer */
+ if (!delayed_work_pending(
+ &di->kick_wd_work)) {
+ queue_delayed_work(di->charger_wq, &di->kick_wd_work,
+ round_jiffies(WD_KICK_INTERVAL));
+ }
+ }
+
+ /* If we still have a HW failure, schedule a new check */
+ if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, 0);
+ }
+
+ return 0;
+}
+
+static int ab8500_charger_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+
+ /* Cancel any pending HW failure check */
+ if (delayed_work_pending(&di->check_hw_failure_work))
+ cancel_delayed_work(&di->check_hw_failure_work);
+
+ return 0;
+}
+#else
+#define ab8500_charger_suspend NULL
+#define ab8500_charger_resume NULL
+#endif
+
+static int __devexit ab8500_charger_remove(struct platform_device *pdev)
+{
+ struct ab8500_charger *di = platform_get_drvdata(pdev);
+ int i, irq, ret;
+
+ /* Disable AC charging */
+ ab8500_charger_ac_en(&di->ac_chg, false, 0, 0);
+
+ /* Disable USB charging */
+ ab8500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* disable the regulator */
+ regulator_put(di->regu);
+
+ /* Backup battery voltage and current disable */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
+ if (ret < 0)
+ dev_err(di->dev, "%s mask and set failed\n", __func__);
+
+ otg_unregister_notifier(di->otg, &di->nb);
+ otg_put_transceiver(di->otg);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->charger_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->usb_chg.psy);
+ power_supply_unregister(&di->ac_chg.psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab8500_charger_probe(struct platform_device *pdev)
+{
+ int irq, i, charger_status, ret = 0;
+ struct ab8500_platform_data *plat;
+
+ struct ab8500_charger *di =
+ kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get();
+
+ /* initialize lock */
+ spin_lock_init(&di->usb_state.usb_lock);
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get charger specific platform data */
+ if (!plat->charger) {
+ dev_err(di->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->charger;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+ di->autopower = false;
+
+ /* AC supply */
+ /* power_supply base class */
+ di->ac_chg.psy.name = "ab8500_ac";
+ di->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
+ di->ac_chg.psy.properties = ab8500_charger_ac_props;
+ di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
+ di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
+ di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->ac_chg.ops.enable = &ab8500_charger_ac_en;
+ di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+ di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+ di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
+ ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+ di->ac_chg.max_out_curr = ab8500_charger_current_map[
+ ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+ /* USB supply */
+ /* power_supply base class */
+ di->usb_chg.psy.name = "ab8500_usb";
+ di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+ di->usb_chg.psy.properties = ab8500_charger_usb_props;
+ di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
+ di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
+ di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->usb_chg.ops.enable = &ab8500_charger_usb_en;
+ di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
+ di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+ di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
+ ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
+ di->usb_chg.max_out_curr = ab8500_charger_current_map[
+ ARRAY_SIZE(ab8500_charger_current_map) - 1];
+
+
+ /* Create a work queue for the charger */
+ di->charger_wq =
+ create_singlethread_workqueue("ab8500_charger_wq");
+ if (di->charger_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ ab8500_charger_check_hw_failure_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ ab8500_charger_check_usbchargernotok_work);
+
+ /*
+ * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
+ * logic. That means we have to continously kick the charger
+ * watchdog even when no charger is connected. This is only
+ * valid once the AC charger has been enabled. This is
+ * a bug that is not handled by the algorithm and the
+ * watchdog have to be kicked by the charger driver
+ * when the AC charger is disabled
+ */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+ ab8500_charger_kick_watchdog_work);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+ ab8500_charger_check_vbat_work);
+
+ /* Init work for charger detection */
+ INIT_WORK(&di->usb_link_status_work,
+ ab8500_charger_usb_link_status_work);
+ INIT_WORK(&di->ac_work, ab8500_charger_ac_work);
+ INIT_WORK(&di->detect_usb_type_work,
+ ab8500_charger_detect_usb_type_work);
+
+ INIT_WORK(&di->usb_state_changed_work,
+ ab8500_charger_usb_state_changed_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&di->check_main_thermal_prot_work,
+ ab8500_charger_check_main_thermal_prot_work);
+ INIT_WORK(&di->check_usb_thermal_prot_work,
+ ab8500_charger_check_usb_thermal_prot_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_charger_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB8500 CID is: 0x%02x\n", di->chip_id);
+
+ /*
+ * VDD ADC supply needs to be enabled from this driver when there
+ * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+ * interrupts during charging
+ */
+ di->regu = regulator_get(di->dev, "vddadc");
+ if (IS_ERR(di->regu)) {
+ ret = PTR_ERR(di->regu);
+ dev_err(di->dev, "failed to get vddadc regulator\n");
+ goto free_charger_wq;
+ }
+
+
+ /* Initialize OVV, and other registers */
+ ret = ab8500_charger_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize ABB registers\n");
+ goto free_regulator;
+ }
+
+ /* Register AC charger class */
+ ret = power_supply_register(di->dev, &di->ac_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register AC charger\n");
+ goto free_regulator;
+ }
+
+ /* Register USB charger class */
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_ac;
+ }
+
+ di->otg = otg_get_transceiver();
+ if (!di->otg) {
+ dev_err(di->dev, "failed to get otg transceiver\n");
+ ret = -EINVAL;
+ goto free_usb;
+ }
+ di->nb.notifier_call = ab8500_charger_usb_notifier_call;
+ ret = otg_register_notifier(di->otg, &di->nb);
+ if (ret) {
+ dev_err(di->dev, "failed to register otg notifier\n");
+ goto put_otg_transceiver;
+ }
+
+ /* Identify the connected charger types during startup */
+ charger_status = ab8500_charger_detect_chargers(di);
+ if (charger_status & AC_PW_CONN) {
+ di->ac.charger_connected = 1;
+ di->ac_conn = true;
+ ab8500_power_supply_changed(di, &di->ac_chg.psy);
+ sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
+ }
+
+ if (charger_status & USB_PW_CONN) {
+ dev_dbg(di->dev, "VBUS Detect during startup\n");
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->detect_usb_type_work);
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_charger_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_charger_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ return ret;
+
+free_irq:
+ otg_unregister_notifier(di->otg, &di->nb);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+put_otg_transceiver:
+ otg_put_transceiver(di->otg);
+free_usb:
+ power_supply_unregister(&di->usb_chg.psy);
+free_ac:
+ power_supply_unregister(&di->ac_chg.psy);
+free_regulator:
+ regulator_put(di->regu);
+free_charger_wq:
+ destroy_workqueue(di->charger_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_charger_driver = {
+ .probe = ab8500_charger_probe,
+ .remove = __devexit_p(ab8500_charger_remove),
+ .suspend = ab8500_charger_suspend,
+ .resume = ab8500_charger_resume,
+ .driver = {
+ .name = "ab8500-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_charger_init(void)
+{
+ return platform_driver_register(&ab8500_charger_driver);
+}
+
+static void __exit ab8500_charger_exit(void)
+{
+ platform_driver_unregister(&ab8500_charger_driver);
+}
+
+subsys_initcall_sync(ab8500_charger_init);
+module_exit(ab8500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
+MODULE_ALIAS("platform:ab8500-charger");
+MODULE_DESCRIPTION("AB8500 charger management driver");
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
new file mode 100644
index 00000000000..32dc5dbd95e
--- /dev/null
+++ b/drivers/power/ab8500_fg.c
@@ -0,0 +1,2498 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/slab.h>
+#include <linux/mfd/ab8500/bm.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/time.h>
+
+#define MILLI_TO_MICRO 1000
+#define FG_LSB_IN_MA 1627
+#define QLSB_NANO_AMP_HOURS_X10 1129
+
+#define SEC_TO_SAMPLE(S) (S * 4)
+
+#define NBR_AVG_SAMPLES 20
+
+#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+
+#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
+#define BATT_OK_MIN 2360 /* mV */
+#define BATT_OK_INCREMENT 50 /* mV */
+#define BATT_OK_MAX_NR_INCREMENTS 0xE
+
+#define interpolate(x, x1, y1, x2, y2) \
+ ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab8500_fg_device_info(x) container_of((x), \
+ struct ab8500_fg, fg_psy);
+
+/**
+ * struct ab8500_fg_interrupts - ab8500 fg interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab8500_fg_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab8500_fg_discharge_state {
+ AB8500_FG_DISCHARGE_INIT,
+ AB8500_FG_DISCHARGE_INITMEASURING,
+ AB8500_FG_DISCHARGE_INIT_RECOVERY,
+ AB8500_FG_DISCHARGE_RECOVERY,
+ AB8500_FG_DISCHARGE_READOUT_INIT,
+ AB8500_FG_DISCHARGE_READOUT,
+ AB8500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+ "DISCHARGE_INIT",
+ "DISCHARGE_INITMEASURING",
+ "DISCHARGE_INIT_RECOVERY",
+ "DISCHARGE_RECOVERY",
+ "DISCHARGE_READOUT_INIT",
+ "DISCHARGE_READOUT",
+ "DISCHARGE_WAKEUP",
+};
+
+enum ab8500_fg_charge_state {
+ AB8500_FG_CHARGE_INIT,
+ AB8500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+ "CHARGE_INIT",
+ "CHARGE_READOUT",
+};
+
+enum ab8500_fg_calibration_state {
+ AB8500_FG_CALIB_INIT,
+ AB8500_FG_CALIB_WAIT,
+ AB8500_FG_CALIB_END,
+};
+
+struct ab8500_fg_avg_cap {
+ int avg;
+ int samples[NBR_AVG_SAMPLES];
+ __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ int pos;
+ int nbr_samples;
+ int sum;
+};
+
+struct ab8500_fg_battery_capacity {
+ int max_mah_design;
+ int max_mah;
+ int mah;
+ int permille;
+ int level;
+ int prev_mah;
+ int prev_percent;
+ int prev_level;
+ int user_mah;
+};
+
+struct ab8500_fg_flags {
+ bool fg_enabled;
+ bool conv_done;
+ bool charging;
+ bool fully_charged;
+ bool force_full;
+ bool low_bat_delay;
+ bool low_bat;
+ bool bat_ovv;
+ bool batt_unknown;
+ bool calibrate;
+ bool user_cap;
+ bool batt_id_received;
+};
+
+struct inst_curr_result_list {
+ struct list_head list;
+ int *result;
+};
+
+/**
+ * struct ab8500_fg - ab8500 FG device information
+ * @dev: Pointer to the structure device
+ * @node: a list of AB8500 FGs, hence prepared for reentrance
+ * @vbat: Battery voltage in mV
+ * @vbat_nom: Nominal battery voltage in mV
+ * @inst_curr: Instantenous battery current in mA
+ * @avg_curr: Average battery current in mA
+ * @bat_temp battery temperature
+ * @fg_samples: Number of samples used in the FG accumulation
+ * @accu_charge: Accumulated charge from the last conversion
+ * @recovery_cnt: Counter for recovery mode
+ * @high_curr_cnt: Counter for high current mode
+ * @init_cnt: Counter for init mode
+ * @recovery_needed: Indicate if recovery is needed
+ * @high_curr_mode: Indicate if we're in high current mode
+ * @init_capacity: Indicate if initial capacity measuring should be done
+ * @turn_off_fg: True if fg was off before current measurement
+ * @calib_state State during offset calibration
+ * @discharge_state: Current discharge state
+ * @charge_state: Current charge state
+ * @flags: Structure for information about events triggered
+ * @bat_cap: Structure for battery capacity specific parameters
+ * @avg_cap: Average capacity filter
+ * @parent: Pointer to the struct ab8500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab8500_fg platform data
+ * @bat: Pointer to the ab8500_bm platform data
+ * @fg_psy: Structure that holds the FG specific battery properties
+ * @fg_wq: Work queue for running the FG algorithm
+ * @fg_periodic_work: Work to run the FG algorithm periodically
+ * @fg_low_bat_work: Work to check low bat condition
+ * @fg_reinit_work Work used to reset and reinitialise the FG algorithm
+ * @fg_work: Work to run the FG algorithm instantly
+ * @fg_acc_cur_work: Work to read the FG accumulator
+ * @cc_lock: Mutex for locking the CC
+ * @fg_kobject: Structure of type kobject
+ */
+struct ab8500_fg {
+ struct device *dev;
+ struct list_head node;
+ int vbat;
+ int vbat_nom;
+ int inst_curr;
+ int avg_curr;
+ int bat_temp;
+ int fg_samples;
+ int accu_charge;
+ int recovery_cnt;
+ int high_curr_cnt;
+ int init_cnt;
+ bool recovery_needed;
+ bool high_curr_mode;
+ bool init_capacity;
+ bool turn_off_fg;
+ enum ab8500_fg_calibration_state calib_state;
+ enum ab8500_fg_discharge_state discharge_state;
+ enum ab8500_fg_charge_state charge_state;
+ struct ab8500_fg_flags flags;
+ struct ab8500_fg_battery_capacity bat_cap;
+ struct ab8500_fg_avg_cap avg_cap;
+ struct ab8500 *parent;
+ struct ab8500_gpadc *gpadc;
+ struct ab8500_fg_platform_data *pdata;
+ struct ab8500_bm_data *bat;
+ struct power_supply fg_psy;
+ struct workqueue_struct *fg_wq;
+ struct delayed_work fg_periodic_work;
+ struct delayed_work fg_low_bat_work;
+ struct delayed_work fg_reinit_work;
+ struct work_struct fg_work;
+ struct work_struct fg_acc_cur_work;
+ struct mutex cc_lock;
+ struct kobject fg_kobject;
+};
+static LIST_HEAD(ab8500_fg_list);
+
+/**
+ * ab8500_fg_get() - returns a reference to the primary AB8500 fuel gauge
+ * (i.e. the first fuel gauge in the instance list)
+ */
+struct ab8500_fg *ab8500_fg_get(void)
+{
+ struct ab8500_fg *fg;
+
+ if (list_empty(&ab8500_fg_list))
+ return NULL;
+
+ fg = list_first_entry(&ab8500_fg_list, struct ab8500_fg, node);
+ return fg;
+}
+
+/* Main battery properties */
+static enum power_supply_property ab8500_fg_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+/*
+ * This array maps the raw hex value to lowbat voltage used by the AB8500
+ * Values taken from the UM0836
+ */
+static int ab8500_fg_lowbat_voltage_map[] = {
+ 2300 ,
+ 2325 ,
+ 2350 ,
+ 2375 ,
+ 2400 ,
+ 2425 ,
+ 2450 ,
+ 2475 ,
+ 2500 ,
+ 2525 ,
+ 2550 ,
+ 2575 ,
+ 2600 ,
+ 2625 ,
+ 2650 ,
+ 2675 ,
+ 2700 ,
+ 2725 ,
+ 2750 ,
+ 2775 ,
+ 2800 ,
+ 2825 ,
+ 2850 ,
+ 2875 ,
+ 2900 ,
+ 2925 ,
+ 2950 ,
+ 2975 ,
+ 3000 ,
+ 3025 ,
+ 3050 ,
+ 3075 ,
+ 3100 ,
+ 3125 ,
+ 3150 ,
+ 3175 ,
+ 3200 ,
+ 3225 ,
+ 3250 ,
+ 3275 ,
+ 3300 ,
+ 3325 ,
+ 3350 ,
+ 3375 ,
+ 3400 ,
+ 3425 ,
+ 3450 ,
+ 3475 ,
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3850 ,
+};
+
+static u8 ab8500_volt_to_regval(int voltage)
+{
+ int i;
+
+ if (voltage < ab8500_fg_lowbat_voltage_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_lowbat_voltage_map); i++) {
+ if (voltage < ab8500_fg_lowbat_voltage_map[i])
+ return (u8) i - 1;
+ }
+
+ /* If not captured above, return index of last element */
+ return (u8) ARRAY_SIZE(ab8500_fg_lowbat_voltage_map) - 1;
+}
+
+/**
+ * ab8500_fg_is_low_curr() - Low or high current mode
+ * @di: pointer to the ab8500_fg structure
+ * @curr: the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
+{
+ /*
+ * We want to know if we're in low current mode
+ */
+ if (curr > -di->bat->fg_params->high_curr_threshold)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ab8500_fg_add_cap_sample() - Add capacity to average filter
+ * @di: pointer to the ab8500_fg structure
+ * @sample: the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
+{
+ struct timespec ts;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ do {
+ avg->sum += sample - avg->samples[avg->pos];
+ avg->samples[avg->pos] = sample;
+ avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->pos++;
+
+ if (avg->pos == NBR_AVG_SAMPLES)
+ avg->pos = 0;
+
+ if (avg->nbr_samples < NBR_AVG_SAMPLES)
+ avg->nbr_samples++;
+
+ /*
+ * Check the time stamp for each sample. If too old,
+ * replace with latest sample
+ */
+ } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+ avg->avg = avg->sum / avg->nbr_samples;
+
+ return avg->avg;
+}
+
+/**
+ * ab8500_fg_clear_cap_samples() - Clear average filter
+ * @di: pointer to the ab8500_fg structure
+ *
+ * The capacity filter is is reset to zero.
+ */
+static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
+{
+ int i;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ avg->pos = 0;
+ avg->nbr_samples = 0;
+ avg->sum = 0;
+ avg->avg = 0;
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = 0;
+ avg->time_stamps[i] = 0;
+ }
+}
+
+/**
+ * ab8500_fg_fill_cap_sample() - Fill average filter
+ * @di: pointer to the ab8500_fg structure
+ * @sample: the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
+{
+ int i;
+ struct timespec ts;
+ struct ab8500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = sample;
+ avg->time_stamps[i] = ts.tv_sec;
+ }
+
+ avg->pos = 0;
+ avg->nbr_samples = NBR_AVG_SAMPLES;
+ avg->sum = sample * NBR_AVG_SAMPLES;
+ avg->avg = sample;
+}
+
+/**
+ * ab8500_fg_coulomb_counter() - enable coulomb counter
+ * @di: pointer to the ab8500_fg structure
+ * @enable: enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
+{
+ int ret = 0;
+ mutex_lock(&di->cc_lock);
+ if (enable) {
+ /* To be able to reprogram the number of samples, we have to
+ * first stop the CC and then enable it again */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0x00);
+ if (ret)
+ goto cc_err;
+
+ /* Program the samples */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+ di->fg_samples);
+ if (ret)
+ goto cc_err;
+
+ /* Start the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG,
+ (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = true;
+ } else {
+ /* Clear any pending read requests */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ if (ret)
+ goto cc_err;
+
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU_CTRL, 0);
+ if (ret)
+ goto cc_err;
+
+ /* Stop the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0);
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = false;
+
+ }
+ dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+ enable, di->fg_samples);
+
+ mutex_unlock(&di->cc_lock);
+
+ return ret;
+cc_err:
+ dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_start() - start battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns 0 or error code
+ * Note: This is part "one" and has to be called before
+ * ab8500_fg_inst_curr_finalize()
+ */
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+{
+ u8 reg_val;
+ int ret;
+
+ mutex_lock(&di->cc_lock);
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, &reg_val);
+ if (ret < 0)
+ goto fail;
+
+ if (!(reg_val & CC_PWR_UP_ENA)) {
+ dev_dbg(di->dev, "%s Enable FG\n", __func__);
+ di->turn_off_fg = true;
+
+ /* Program the samples */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
+ SEC_TO_SAMPLE(10));
+ if (ret)
+ goto fail;
+
+ /* Start the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG,
+ (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
+ if (ret)
+ goto fail;
+ } else {
+ di->turn_off_fg = false;
+ }
+
+ /* Reset counter and Read request */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_CTRL_REG, (RESET_ACCU | READ_REQ));
+ if (ret)
+ goto fail;
+
+ /* Note: cc_lock is still locked */
+ return 0;
+fail:
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_finalize() - battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ * @res: battery instantenous current(on success)
+ *
+ * Returns 0 or an error code
+ * Note: This is part "two" and has to be called at earliest 250 ms
+ * after ab8500_fg_inst_curr_start()
+ */
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+{
+ u8 low, high;
+ int val;
+ int ret;
+
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_SMPL_CNVL_REG, &low);
+ if (ret < 0)
+ goto fail;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_SMPL_CNVH_REG, &high);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * negative value for Discharging
+ * convert 2's compliment into decimal
+ */
+ if (high & 0x10)
+ val = (low | (high << 8) | 0xFFFFE000);
+ else
+ val = (low | (high << 8));
+
+ /*
+ * Convert to unit value in mA
+ * Full scale input voltage is
+ * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+ * Given a 250ms conversion cycle time the LSB corresponds
+ * to 112.9 nAh. Convert to current by dividing by the conversion
+ * time in hours (250ms = 1 / (3600 * 4)h)
+ * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ */
+ val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
+ (1000 * di->bat->fg_res);
+
+ if (di->turn_off_fg) {
+ dev_dbg(di->dev, "%s Disable FG\n", __func__);
+
+ /* Clear any pending read requests */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ if (ret)
+ goto fail;
+
+ /* Stop the CC */
+ ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+ AB8500_RTC_CC_CONF_REG, 0);
+ if (ret)
+ goto fail;
+ }
+ mutex_unlock(&di->cc_lock);
+ (*res) = val;
+
+ return 0;
+fail:
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab8500_fg_inst_curr_blocking() - battery instantaneous current
+ * @di: pointer to the ab8500_fg structure
+ * @res: battery instantenous current(on success)
+ *
+ * Returns 0 else error code
+ */
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
+{
+ int ret;
+ int res = 0;
+
+ ret = ab8500_fg_inst_curr_start(di);
+ if (ret) {
+ dev_err(di->dev, "Failed to initialize fg_inst\n");
+ return 0;
+ }
+
+ /*
+ * Since there is no interrupt for this wait for 253ms to be
+ * on the safe side.
+ *
+ * one sample conversion takes 250 ms at 32.768 Khz RTC clock
+ */
+ msleep(253);
+
+ ret = ab8500_fg_inst_curr_finalize(di, &res);
+ if (ret) {
+ dev_err(di->dev, "Failed to finalize fg_inst\n");
+ return 0;
+ }
+
+ return res;
+}
+
+/**
+ * ab8500_fg_acc_cur_work() - average battery current
+ * @work: pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab8500_fg_acc_cur_work(struct work_struct *work)
+{
+ int val;
+ int ret;
+ u8 low, med, high;
+
+ struct ab8500_fg *di = container_of(work,
+ struct ab8500_fg, fg_acc_cur_work);
+
+ mutex_lock(&di->cc_lock);
+ ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_CTRL, RD_NCONV_ACCU_REQ);
+ if (ret)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_LOW, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_MED, &med);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
+ AB8500_GASG_CC_NCOV_ACCU_HIGH, &high);
+ if (ret < 0)
+ goto exit;
+
+ /* Check for sign bit in case of negative value, 2's compliment */
+ if (high & 0x10)
+ val = (low | (med << 8) | (high << 16) | 0xFFE00000);
+ else
+ val = (low | (med << 8) | (high << 16));
+
+ /*
+ * Convert to uAh
+ * Given a 250ms conversion cycle time the LSB corresponds
+ * to 112.9 nAh.
+ * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ */
+ di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
+ (100 * di->bat->fg_res);
+
+ /*
+ * Convert to unit value in mA
+ * Full scale input voltage is
+ * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+ * Given a 250ms conversion cycle time the LSB corresponds
+ * to 112.9 nAh. Convert to current by dividing by the conversion
+ * time in hours (= samples / (3600 * 4)h)
+ * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+ */
+ di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
+ (1000 * di->bat->fg_res * (di->fg_samples / 4));
+
+ di->flags.conv_done = true;
+
+ mutex_unlock(&di->cc_lock);
+
+ queue_work(di->fg_wq, &di->fg_work);
+
+ return;
+exit:
+ dev_err(di->dev,
+ "Failed to read or write gas gauge registers\n");
+ mutex_unlock(&di->cc_lock);
+ queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab8500_fg_bat_voltage() - get battery voltage
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
+{
+ int vbat;
+ static int prev;
+
+ vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+ if (vbat < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value\n",
+ __func__);
+ return prev;
+ }
+
+ prev = vbat;
+ return vbat;
+}
+
+/**
+ * ab8500_fg_volt_to_capacity() - Voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ * @voltage: The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
+{
+ int i, tbl_size;
+ struct v_to_cap *tbl;
+ int cap = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (voltage > tbl[i].voltage)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ cap = interpolate(voltage,
+ tbl[i].voltage,
+ tbl[i].capacity * 10,
+ tbl[i-1].voltage,
+ tbl[i-1].capacity * 10);
+ } else if (i == 0) {
+ cap = 1000;
+ } else {
+ cap = 0;
+ }
+
+ dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+ __func__, voltage, cap);
+
+ return cap;
+}
+
+/**
+ * ab8500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
+{
+ di->vbat = ab8500_fg_bat_voltage(di);
+ return ab8500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab8500_fg_battery_resistance() - Returns the battery inner resistance
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery inner resistance added with the fuel gauge resistor value
+ * to get the total resistance in the whole link from gnd to bat+ node.
+ */
+static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
+{
+ int i, tbl_size;
+ struct batres_vs_temp *tbl;
+ int resist = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl;
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (di->bat_temp / 10 > tbl[i].temp)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ resist = interpolate(di->bat_temp / 10,
+ tbl[i].temp,
+ tbl[i].resist,
+ tbl[i-1].temp,
+ tbl[i-1].resist);
+ } else if (i == 0) {
+ resist = tbl[0].resist;
+ } else {
+ resist = tbl[tbl_size - 1].resist;
+ }
+
+ dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
+ " fg resistance %d, total: %d (mOhm)\n",
+ __func__, di->bat_temp, resist, di->bat->fg_res / 10,
+ (di->bat->fg_res / 10) + resist);
+
+ /* fg_res variable is in 0.1mOhm */
+ resist += di->bat->fg_res / 10;
+
+ return resist;
+}
+
+/**
+ * ab8500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
+{
+ int vbat_comp, res;
+
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ di->vbat = ab8500_fg_bat_voltage(di);
+
+ res = ab8500_fg_battery_resistance(di);
+
+ /* Use Ohms law to get the load compensated voltage */
+ vbat_comp = di->vbat - (di->inst_curr * res) / 1000;
+
+ dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+ "R: %dmOhm, Current: %dmA\n",
+ __func__, di->vbat, vbat_comp, res, di->inst_curr);
+
+ return ab8500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab8500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di: pointer to the ab8500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab8500_fg_convert_mah_to_permille(struct ab8500_fg *di, int cap_mah)
+{
+ return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab8500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di: pointer to the ab8500_fg structure
+ * @cap_pm: capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab8500_fg_convert_permille_to_mah(struct ab8500_fg *di, int cap_pm)
+{
+ return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab8500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di: pointer to the ab8500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab8500_fg_convert_mah_to_uwh(struct ab8500_fg *di, int cap_mah)
+{
+ u64 div_res;
+ u32 div_rem;
+
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+ div_rem = do_div(div_res, 1000);
+
+ /* Make sure to round upwards if necessary */
+ if (div_rem >= 1000 / 2)
+ div_res++;
+
+ return (int) div_res;
+}
+
+/**
+ * ab8500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab8500_fg_calc_cap_charging(struct ab8500_fg *di)
+{
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+ /*
+ * We force capacity to 100% once when the algorithm
+ * reports that it's full.
+ */
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+ di->flags.force_full) {
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+ }
+
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ /* We need to update battery voltage and inst current when charging */
+ di->vbat = ab8500_fg_bat_voltage(di);
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di: pointer to the ab8500_fg structure
+ * @comp: if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_voltage(struct ab8500_fg *di, bool comp)
+{
+ int permille, mah;
+
+ if (comp)
+ permille = ab8500_fg_load_comp_volt_to_capacity(di);
+ else
+ permille = ab8500_fg_uncomp_volt_to_capacity(di);
+
+ mah = ab8500_fg_convert_permille_to_mah(di, permille);
+
+ di->bat_cap.mah = ab8500_fg_add_cap_sample(di, mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab8500_fg_calc_cap_discharge_fg(struct ab8500_fg *di)
+{
+ int permille_volt, permille;
+
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ /*
+ * Check against voltage based capacity. It can not be lower
+ * than what the uncompensated voltage says
+ */
+ permille = ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ permille_volt = ab8500_fg_uncomp_volt_to_capacity(di);
+
+ if (permille < permille_volt) {
+ di->bat_cap.permille = permille_volt;
+ di->bat_cap.mah = ab8500_fg_convert_permille_to_mah(di,
+ di->bat_cap.permille);
+
+ dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+ __func__,
+ permille,
+ permille_volt);
+
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ } else {
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ }
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab8500_fg_capacity_level() - Get the battery capacity level
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab8500_fg_capacity_level(struct ab8500_fg *di)
+{
+ int ret, percent;
+
+ percent = di->bat_cap.permille / 10;
+
+ if (percent <= di->bat->cap_levels->critical ||
+ di->flags.low_bat)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (percent <= di->bat->cap_levels->low)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (percent <= di->bat->cap_levels->normal)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (percent <= di->bat->cap_levels->high)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ return ret;
+}
+
+/**
+ * ab8500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di: pointer to the ab8500_fg structure
+ * @init: capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
+{
+ bool changed = false;
+
+ di->bat_cap.level = ab8500_fg_capacity_level(di);
+
+ if (di->bat_cap.level != di->bat_cap.prev_level) {
+ /*
+ * We do not allow reported capacity level to go up
+ * unless we're charging or if we're in init
+ */
+ if (!(!di->flags.charging && di->bat_cap.level >
+ di->bat_cap.prev_level) || init) {
+ dev_dbg(di->dev, "level changed from %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ di->bat_cap.prev_level = di->bat_cap.level;
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "level not allowed to go up "
+ "since no charger is connected: %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ }
+ }
+
+ /*
+ * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+ * shutdown
+ */
+ if (di->flags.low_bat) {
+ dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+ di->bat_cap.prev_percent = 0;
+ di->bat_cap.permille = 0;
+ di->bat_cap.prev_mah = 0;
+ di->bat_cap.mah = 0;
+ changed = true;
+ } else if (di->flags.fully_charged) {
+ /*
+ * We report 100% if algorithm reported fully charged
+ * unless capacity drops too much
+ */
+ if (di->flags.force_full) {
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+ } else if (!di->flags.force_full &&
+ di->bat_cap.prev_percent !=
+ (di->bat_cap.permille) / 10 &&
+ (di->bat_cap.permille / 10) <
+ di->bat->fg_params->maint_thres) {
+ dev_dbg(di->dev,
+ "battery reported full "
+ "but capacity dropping: %d\n",
+ di->bat_cap.permille / 10);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ }
+ } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+ if (di->bat_cap.permille / 10 == 0) {
+ /*
+ * We will not report 0% unless we've got
+ * the LOW_BAT IRQ, no matter what the FG
+ * algorithm says.
+ */
+ di->bat_cap.prev_percent = 1;
+ di->bat_cap.permille = 1;
+ di->bat_cap.prev_mah = 1;
+ di->bat_cap.mah = 1;
+
+ changed = true;
+ } else if (!(!di->flags.charging &&
+ (di->bat_cap.permille / 10) >
+ di->bat_cap.prev_percent) || init) {
+ /*
+ * We do not allow reported capacity to go up
+ * unless we're charging or if we're in init
+ */
+ dev_dbg(di->dev,
+ "capacity changed from %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "capacity not allowed to go up since "
+ "no charger is connected: %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ }
+ }
+
+ if (changed) {
+ power_supply_changed(&di->fg_psy);
+ if (di->flags.fully_charged && di->flags.force_full) {
+ dev_dbg(di->dev, "Battery full, notifying.\n");
+ di->flags.force_full = false;
+ sysfs_notify(&di->fg_kobject, NULL, "charge_full");
+ }
+ sysfs_notify(&di->fg_kobject, NULL, "charge_now");
+ }
+}
+
+static void ab8500_fg_charge_state_to(struct ab8500_fg *di,
+ enum ab8500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+ di->charge_state,
+ charge_state[di->charge_state],
+ new_state,
+ charge_state[new_state]);
+
+ di->charge_state = new_state;
+}
+
+static void ab8500_fg_discharge_state_to(struct ab8500_fg *di,
+ enum ab8500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ di->discharge_state,
+ discharge_state[di->discharge_state],
+ new_state,
+ discharge_state[new_state]);
+
+ di->discharge_state = new_state;
+}
+
+/**
+ * ab8500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
+{
+ /*
+ * If we change to discharge mode
+ * we should start with recovery
+ */
+ if (di->discharge_state != AB8500_FG_DISCHARGE_INIT_RECOVERY)
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_INIT_RECOVERY);
+
+ switch (di->charge_state) {
+ case AB8500_FG_CHARGE_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_charging);
+
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
+
+ break;
+
+ case AB8500_FG_CHARGE_READOUT:
+ /*
+ * Read the FG and calculate the new capacity
+ */
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ ab8500_fg_calc_cap_charging(di);
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Check capacity limits */
+ ab8500_fg_check_capacity_limits(di, false);
+}
+
+static void force_capacity(struct ab8500_fg *di)
+{
+ int cap;
+
+ ab8500_fg_clear_cap_samples(di);
+ cap = di->bat_cap.user_mah;
+ if (cap > di->bat_cap.max_mah_design) {
+ dev_dbg(di->dev, "Remaining cap %d can't be bigger than total"
+ " %d\n", cap, di->bat_cap.max_mah_design);
+ cap = di->bat_cap.max_mah_design;
+ }
+ ab8500_fg_fill_cap_sample(di, di->bat_cap.user_mah);
+ di->bat_cap.permille = ab8500_fg_convert_mah_to_permille(di, cap);
+ di->bat_cap.mah = cap;
+ ab8500_fg_check_capacity_limits(di, true);
+}
+
+static bool check_sysfs_capacity(struct ab8500_fg *di)
+{
+ int cap, lower, upper;
+ int cap_permille;
+
+ cap = di->bat_cap.user_mah;
+
+ cap_permille = ab8500_fg_convert_mah_to_permille(di,
+ di->bat_cap.user_mah);
+
+ lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10;
+ upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10;
+
+ if (lower < 0)
+ lower = 0;
+ /* 1000 is permille, -> 100 percent */
+ if (upper > 1000)
+ upper = 1000;
+
+ dev_dbg(di->dev, "Capacity limits:"
+ " (Lower: %d User: %d Upper: %d) [user: %d, was: %d]\n",
+ lower, cap_permille, upper, cap, di->bat_cap.mah);
+
+ /* If within limits, use the saved capacity and exit estimation...*/
+ if (cap_permille > lower && cap_permille < upper) {
+ dev_dbg(di->dev, "OK! Using users cap %d uAh now\n", cap);
+ force_capacity(di);
+ return true;
+ }
+ dev_dbg(di->dev, "Capacity from user out of limits, ignoring");
+ return false;
+}
+
+/**
+ * ab8500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
+{
+ int sleep_time;
+
+ /* If we change to charge mode we should start with init */
+ if (di->charge_state != AB8500_FG_CHARGE_INIT)
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+
+ switch (di->discharge_state) {
+ case AB8500_FG_DISCHARGE_INIT:
+ /* We use the FG IRQ to work on */
+ di->init_cnt = 0;
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_INITMEASURING);
+
+ /* Intentional fallthrough */
+ case AB8500_FG_DISCHARGE_INITMEASURING:
+ /*
+ * Discard a number of samples during startup.
+ * After that, use compensated voltage for a few
+ * samples to get an initial capacity.
+ * Then go to READOUT
+ */
+ sleep_time = di->bat->fg_params->init_timer;
+
+ /* Discard the first [x] seconds */
+ if (di->init_cnt >
+ di->bat->fg_params->init_discard_time) {
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+ ab8500_fg_check_capacity_limits(di, true);
+ }
+
+ di->init_cnt += sleep_time;
+ if (di->init_cnt >
+ di->bat->fg_params->init_total_time)
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+
+ break;
+
+ case AB8500_FG_DISCHARGE_INIT_RECOVERY:
+ di->recovery_cnt = 0;
+ di->recovery_needed = true;
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_RECOVERY);
+
+ /* Intentional fallthrough */
+
+ case AB8500_FG_DISCHARGE_RECOVERY:
+ sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+ /*
+ * We should check the power consumption
+ * If low, go to READOUT (after x min) or
+ * RECOVERY_SLEEP if time left.
+ * If high, go to READOUT
+ */
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ if (di->recovery_cnt >
+ di->bat->fg_params->recovery_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ di->recovery_needed = false;
+ } else {
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ sleep_time * HZ);
+ }
+ di->recovery_cnt += sleep_time;
+ } else {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ }
+ break;
+
+ case AB8500_FG_DISCHARGE_READOUT_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+ break;
+
+ case AB8500_FG_DISCHARGE_READOUT:
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
+ /* Detect mode change */
+ if (di->high_curr_mode) {
+ di->high_curr_mode = false;
+ di->high_curr_cnt = 0;
+ }
+
+ if (di->recovery_needed) {
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_RECOVERY);
+
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work, 0);
+
+ break;
+ }
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ } else {
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ /* Detect mode change */
+ if (!di->high_curr_mode) {
+ di->high_curr_mode = true;
+ di->high_curr_cnt = 0;
+ }
+
+ di->high_curr_cnt +=
+ di->bat->fg_params->accu_high_curr;
+ if (di->high_curr_cnt >
+ di->bat->fg_params->high_curr_time)
+ di->recovery_needed = true;
+
+ ab8500_fg_calc_cap_discharge_fg(di);
+ }
+
+ ab8500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ case AB8500_FG_DISCHARGE_WAKEUP:
+ ab8500_fg_coulomb_counter(di, true);
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab8500_fg_coulomb_counter(di, true);
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT);
+
+ ab8500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ab8500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di: pointer to the ab8500_fg structure
+ *
+ */
+static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
+{
+ int ret;
+
+ switch (di->calib_state) {
+ case AB8500_FG_CALIB_INIT:
+ dev_dbg(di->dev, "Calibration ongoing...\n");
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_INT_CAL_N_AVG_MASK, CC_INT_CAL_SAMPLES_8);
+ if (ret < 0)
+ goto err;
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_INTAVGOFFSET_ENA, CC_INTAVGOFFSET_ENA);
+ if (ret < 0)
+ goto err;
+ di->calib_state = AB8500_FG_CALIB_WAIT;
+ break;
+ case AB8500_FG_CALIB_END:
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ CC_MUXOFFSET, CC_MUXOFFSET);
+ if (ret < 0)
+ goto err;
+ di->flags.calibrate = false;
+ dev_dbg(di->dev, "Calibration done...\n");
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ break;
+ case AB8500_FG_CALIB_WAIT:
+ dev_dbg(di->dev, "Calibration WFI\n");
+ default:
+ break;
+ }
+ return;
+err:
+ /* Something went wrong, don't calibrate then */
+ dev_err(di->dev, "failed to calibrate the CC\n");
+ di->flags.calibrate = false;
+ di->calib_state = AB8500_FG_CALIB_INIT;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab8500_fg_algorithm() - Entry point for the FG algorithm
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab8500_fg_algorithm(struct ab8500_fg *di)
+{
+ if (di->flags.calibrate)
+ ab8500_fg_algorithm_calibrate(di);
+ else {
+ if (di->flags.charging)
+ ab8500_fg_algorithm_charging(di);
+ else
+ ab8500_fg_algorithm_discharging(di);
+ }
+
+ dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+ "%d %d %d %d %d %d %d\n",
+ di->bat_cap.max_mah_design,
+ di->bat_cap.mah,
+ di->bat_cap.permille,
+ di->bat_cap.level,
+ di->bat_cap.prev_mah,
+ di->bat_cap.prev_percent,
+ di->bat_cap.prev_level,
+ di->vbat,
+ di->inst_curr,
+ di->avg_curr,
+ di->accu_charge,
+ di->flags.charging,
+ di->charge_state,
+ di->discharge_state,
+ di->high_curr_mode,
+ di->recovery_needed);
+}
+
+/**
+ * ab8500_fg_periodic_work() - Run the FG state machine periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab8500_fg_periodic_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_periodic_work.work);
+
+ if (di->init_capacity) {
+ /* A dummy read that will return 0 */
+ di->inst_curr = ab8500_fg_inst_curr_blocking(di);
+ /* Get an initial capacity calculation */
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ ab8500_fg_check_capacity_limits(di, true);
+ di->init_capacity = false;
+
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else if (di->flags.user_cap) {
+ if (check_sysfs_capacity(di)) {
+ ab8500_fg_check_capacity_limits(di, true);
+ if (di->flags.charging)
+ ab8500_fg_charge_state_to(di,
+ AB8500_FG_CHARGE_INIT);
+ else
+ ab8500_fg_discharge_state_to(di,
+ AB8500_FG_DISCHARGE_READOUT_INIT);
+ }
+ di->flags.user_cap = false;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else
+ ab8500_fg_algorithm(di);
+}
+
+/**
+ * ab8500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab8500_fg_low_bat_work(struct work_struct *work)
+{
+ int vbat;
+
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_low_bat_work.work);
+
+ vbat = ab8500_fg_bat_voltage(di);
+
+ /* Check if LOW_BAT still fulfilled */
+ if (vbat < di->bat->fg_params->lowbat_threshold) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+
+ /*
+ * We need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ } else {
+ di->flags.low_bat = false;
+ dev_warn(di->dev, "Battery voltage OK again\n");
+ }
+
+ /* This is needed to dispatch LOW_BAT */
+ ab8500_fg_check_capacity_limits(di, false);
+
+ /* Set this flag to check if LOW_BAT IRQ still occurs */
+ di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab8500_fg_battok_calc - calculate the bit pattern corresponding
+ * to the target voltage.
+ * @di: pointer to the ab8500_fg structure
+ * @target target voltage
+ *
+ * Returns bit pattern closest to the target voltage
+ * valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
+ */
+
+static int ab8500_fg_battok_calc(struct ab8500_fg *di, int target)
+{
+ if (target > BATT_OK_MIN +
+ (BATT_OK_INCREMENT * BATT_OK_MAX_NR_INCREMENTS))
+ return BATT_OK_MAX_NR_INCREMENTS;
+ if (target < BATT_OK_MIN)
+ return 0;
+ return (target - BATT_OK_MIN) / BATT_OK_INCREMENT;
+}
+
+/**
+ * ab8500_fg_battok_init_hw_register - init battok levels
+ * @di: pointer to the ab8500_fg structure
+ *
+ */
+
+static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
+{
+ int selected;
+ int sel0;
+ int sel1;
+ int cbp_sel0;
+ int cbp_sel1;
+ int ret;
+ int new_val;
+
+ sel0 = di->bat->fg_params->battok_falling_th_sel0;
+ sel1 = di->bat->fg_params->battok_raising_th_sel1;
+
+ cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
+ cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
+
+ selected = BATT_OK_MIN + cbp_sel0 * BATT_OK_INCREMENT;
+
+ if (selected != sel0)
+ dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
+ sel0, selected, cbp_sel0);
+
+ selected = BATT_OK_MIN + cbp_sel1 * BATT_OK_INCREMENT;
+
+ if (selected != sel1)
+ dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
+ sel1, selected, cbp_sel1);
+
+ new_val = cbp_sel0 | (cbp_sel1 << 4);
+
+ dev_dbg(di->dev, "using: %x %d %d\n", new_val, cbp_sel0, cbp_sel1);
+ ret = abx500_set_register_interruptible(di->dev, AB8500_SYS_CTRL2_BLOCK,
+ AB8500_BATT_OK_REG, new_val);
+ return ret;
+}
+
+/**
+ * ab8500_fg_instant_work() - Run the FG state machine instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab8500_fg_instant_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work);
+
+ ab8500_fg_algorithm(di);
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_int_calib_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+ di->calib_state = AB8500_FG_CALIB_END;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_cc_convend_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ queue_work(di->fg_wq, &di->fg_acc_cur_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_batt_ovv_handler() - Battery OVV occured
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ dev_dbg(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
+
+ power_supply_changed(&di->fg_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_lowbatf_handler() - Battery voltage is below LOW threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab8500_fg structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
+{
+ struct ab8500_fg *di = _di;
+
+ if (!di->flags.low_bat_delay) {
+ dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+ di->flags.low_bat_delay = true;
+ /*
+ * Start a timer to check LOW_BAT again after some time
+ * This is done to avoid shutdown on single voltage dips
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab8500_fg_get_property() - get the fg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now: battery voltage
+ * current_now: battery instant current
+ * current_avg: battery average current
+ * charge_full_design: capacity where battery is considered full
+ * charge_now: battery capacity in nAh
+ * capacity: capacity in percent
+ * capacity_level: capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab8500_fg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab8500_fg *di;
+
+ di = to_ab8500_fg_device_info(psy);
+
+ /*
+ * If battery is identified as unknown and charging of unknown
+ * batteries is disabled, we always report 100% capacity and
+ * capacity level UNKNOWN, since we can't calculate
+ * remaining capacity
+ */
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (di->flags.bat_ovv)
+ val->intval = 47500000;
+ else
+ val->intval = di->vbat * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = di->inst_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = di->avg_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah_design);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ else
+ val->intval = ab8500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.prev_mah);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = di->bat_cap.max_mah_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = di->bat_cap.max_mah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = di->bat_cap.max_mah;
+ else
+ val->intval = di->bat_cap.prev_mah;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = 100;
+ else
+ val->intval = di->bat_cap.prev_percent;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+ di->flags.batt_id_received)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else
+ val->intval = di->bat_cap.prev_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab8500_fg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab8500_fg_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ switch (ret.intval) {
+ case POWER_SUPPLY_STATUS_UNKNOWN:
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (!di->flags.charging)
+ break;
+ di->flags.charging = false;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (di->flags.fully_charged)
+ break;
+ di->flags.fully_charged = true;
+ di->flags.force_full = true;
+ /* Save current capacity as maximum */
+ di->bat_cap.max_mah = di->bat_cap.mah;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ if (di->flags.charging)
+ break;
+ di->flags.charging = true;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ };
+ default:
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (!di->flags.batt_id_received)
+ di->flags.batt_id_received = true;
+ if (ret.intval)
+ di->flags.batt_unknown = false;
+ else
+ di->flags.batt_unknown = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (di->flags.batt_id_received)
+ di->bat_temp = ret.intval;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab8500_fg_init_hw_registers() - Set up FG related registers
+ * @di: pointer to the ab8500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
+{
+ int ret;
+
+ /* Set up VBAT OVV register */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER,
+ AB8500_BATT_OVV,
+ (BATT_OVV_ENA | BATT_OVV_TH_4P75));
+ if (ret) {
+ dev_err(di->dev, "failed to set BATT_OVV\n");
+ goto out;
+ }
+
+ /* Low Battery Voltage */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_SYS_CTRL2_BLOCK,
+ AB8500_LOW_BAT_REG,
+ ab8500_volt_to_regval(
+ di->bat->fg_params->lowbat_threshold) << 1 |
+ LOW_BAT_ENABLE);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ goto out;
+ }
+
+ /* Battery OK threshold */
+ ret = ab8500_fg_battok_init_hw_register(di);
+ if (ret) {
+ dev_err(di->dev, "BattOk init write failed.\n");
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/**
+ * ab8500_fg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab8500_fg_external_power_changed(struct power_supply *psy)
+{
+ struct ab8500_fg *di = to_ab8500_fg_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->fg_psy, ab8500_fg_get_ext_psy_data);
+}
+
+/**
+ * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Used to reset the current battery capacity to be able to
+ * retrigger a new voltage base capacity calculation. For
+ * test and verification purpose.
+ */
+static void ab8500_fg_reinit_work(struct work_struct *work)
+{
+ struct ab8500_fg *di = container_of(work, struct ab8500_fg,
+ fg_reinit_work.work);
+
+ if (di->flags.calibrate == false) {
+ dev_dbg(di->dev, "Resetting FG state machine to init.\n");
+ ab8500_fg_clear_cap_samples(di);
+ ab8500_fg_calc_cap_discharge_voltage(di, true);
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ } else {
+ dev_err(di->dev, "Residual offset calibration ongoing "
+ "retrying..\n");
+ /* Wait one second until next try*/
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
+ round_jiffies(1));
+ }
+}
+
+/**
+ * ab8500_fg_reinit() - forces FG algorithm to reinitialize with current values
+ *
+ * This function can be used to force the FG algorithm to recalculate a new
+ * voltage based battery capacity.
+ */
+void ab8500_fg_reinit(void)
+{
+ struct ab8500_fg *di = ab8500_fg_get();
+ /* User won't be notified if a null pointer returned. */
+ if (di != NULL)
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
+}
+
+/* Exposure to the sysfs interface */
+
+struct ab8500_fg_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct ab8500_fg *, char *);
+ ssize_t (*store)(struct ab8500_fg *, const char *, size_t);
+};
+
+static ssize_t charge_full_show(struct ab8500_fg *di, char *buf)
+{
+ return sprintf(buf, "%d\n", di->bat_cap.max_mah);
+}
+
+static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
+ size_t count)
+{
+ unsigned long charge_full;
+ ssize_t ret = -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &charge_full);
+
+ dev_dbg(di->dev, "Ret %d charge_full %lu", ret, charge_full);
+
+ if (!ret) {
+ di->bat_cap.max_mah = (int) charge_full;
+ ret = count;
+ }
+ return ret;
+}
+
+static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
+{
+ return sprintf(buf, "%d\n", di->bat_cap.prev_mah);
+}
+
+static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
+ size_t count)
+{
+ unsigned long charge_now;
+ ssize_t ret;
+
+ ret = strict_strtoul(buf, 10, &charge_now);
+
+ dev_dbg(di->dev, "Ret %d charge_now %lu was %d",
+ ret, charge_now, di->bat_cap.prev_mah);
+
+ if (!ret) {
+ di->bat_cap.user_mah = (int) charge_now;
+ di->flags.user_cap = true;
+ ret = count;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ }
+ return ret;
+}
+
+static struct ab8500_fg_sysfs_entry charge_full_attr =
+ __ATTR(charge_full, 0644, charge_full_show, charge_full_store);
+
+static struct ab8500_fg_sysfs_entry charge_now_attr =
+ __ATTR(charge_now, 0644, charge_now_show, charge_now_store);
+
+static ssize_t
+ab8500_fg_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct ab8500_fg_sysfs_entry *entry;
+ struct ab8500_fg *di;
+
+ entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+ di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(di, buf);
+}
+static ssize_t
+ab8500_fg_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ struct ab8500_fg_sysfs_entry *entry;
+ struct ab8500_fg *di;
+
+ entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
+ di = container_of(kobj, struct ab8500_fg, fg_kobject);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(di, buf, count);
+}
+
+const struct sysfs_ops ab8500_fg_sysfs_ops = {
+ .show = ab8500_fg_show,
+ .store = ab8500_fg_store,
+};
+
+static struct attribute *ab8500_fg_attrs[] = {
+ &charge_full_attr.attr,
+ &charge_now_attr.attr,
+ NULL,
+};
+
+static struct kobj_type ab8500_fg_ktype = {
+ .sysfs_ops = &ab8500_fg_sysfs_ops,
+ .default_attrs = ab8500_fg_attrs,
+};
+
+/**
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
+{
+ kobject_del(&di->fg_kobject);
+}
+
+/**
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct ab8500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_fg_sysfs_init(struct ab8500_fg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->fg_kobject,
+ &ab8500_fg_ktype,
+ NULL, "battery");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int ab8500_fg_resume(struct platform_device *pdev)
+{
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ /*
+ * Change state if we're not charging. If we're charging we will wake
+ * up on the FG IRQ
+ */
+ if (!di->flags.charging) {
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_WAKEUP);
+ queue_work(di->fg_wq, &di->fg_work);
+ }
+
+ return 0;
+}
+
+static int ab8500_fg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ flush_delayed_work(&di->fg_periodic_work);
+
+ /*
+ * If the FG is enabled we will disable it before going to suspend
+ * only if we're not charging
+ */
+ if (di->flags.fg_enabled && !di->flags.charging)
+ ab8500_fg_coulomb_counter(di, false);
+
+ return 0;
+}
+#else
+#define ab8500_fg_suspend NULL
+#define ab8500_fg_resume NULL
+#endif
+
+static int __devexit ab8500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_fg *di = platform_get_drvdata(pdev);
+
+ list_del(&di->node);
+
+ /* Disable coulomb counter */
+ ret = ab8500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(di->dev, "failed to disable coulomb counter\n");
+
+ destroy_workqueue(di->fg_wq);
+ ab8500_fg_sysfs_exit(di);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->fg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+ return ret;
+}
+
+/* ab8500 fg driver interrupts and their respective isr */
+static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
+ {"NCONV_ACCU", ab8500_fg_cc_convend_handler},
+ {"BATT_OVV", ab8500_fg_batt_ovv_handler},
+ {"LOW_BAT_F", ab8500_fg_lowbatf_handler},
+ {"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
+};
+
+static int __devinit ab8500_fg_probe(struct platform_device *pdev)
+{
+ int i, irq;
+ struct ab8500_platform_data *plat;
+ int ret = 0;
+
+ struct ab8500_fg *di =
+ kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab8500_gpadc_get();
+
+ plat = dev_get_platdata(di->parent->dev);
+
+ /* get fg specific platform data */
+ if (!plat->fg) {
+ dev_err(di->dev, "no fg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->pdata = plat->fg;
+
+ /* get battery specific platform data */
+ if (!plat->battery) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ di->bat = plat->battery;
+
+ di->fg_psy.name = "ab8500_fg";
+ di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->fg_psy.properties = ab8500_fg_props;
+ di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
+ di->fg_psy.get_property = ab8500_fg_get_property;
+ di->fg_psy.supplied_to = di->pdata->supplied_to;
+ di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
+
+ di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+ di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+ di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+ di->init_capacity = true;
+
+ ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
+ ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
+ if (di->fg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for running the fg algorithm instantly */
+ INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
+
+ /* Init work for getting the battery accumulated current */
+ INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
+
+ /* Init work for reinitialising the fg algorithm */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+ ab8500_fg_reinit_work);
+
+ /* Work delayed Queue to run the state machine */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ ab8500_fg_periodic_work);
+
+ /* Work to check low battery condition */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ ab8500_fg_low_bat_work);
+
+ /* Initialize OVV, and other registers */
+ ret = ab8500_fg_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize registers\n");
+ goto free_inst_curr_wq;
+ }
+
+ /* Consider battery unknown until we're informed otherwise */
+ di->flags.batt_unknown = true;
+ di->flags.batt_id_received = false;
+
+ /* Register FG power supply class */
+ ret = power_supply_register(di->dev, &di->fg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register FG psy\n");
+ goto free_inst_curr_wq;
+ }
+
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab8500_fg_coulomb_counter(di, true);
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab8500_fg_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab8500_fg_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ ret = ab8500_fg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_irq;
+ }
+
+ /* Calibrate the fg first time */
+ di->flags.calibrate = true;
+ di->calib_state = AB8500_FG_CALIB_INIT;
+
+ /* Use room temp as default value until we get an update from driver. */
+ di->bat_temp = 210;
+
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ list_add_tail(&di->node, &ab8500_fg_list);
+
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->fg_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
+ free_irq(irq, di);
+ }
+free_inst_curr_wq:
+ destroy_workqueue(di->fg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab8500_fg_driver = {
+ .probe = ab8500_fg_probe,
+ .remove = __devexit_p(ab8500_fg_remove),
+ .suspend = ab8500_fg_suspend,
+ .resume = ab8500_fg_resume,
+ .driver = {
+ .name = "ab8500-fg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab8500_fg_init(void)
+{
+ return platform_driver_register(&ab8500_fg_driver);
+}
+
+static void __exit ab8500_fg_exit(void)
+{
+ platform_driver_unregister(&ab8500_fg_driver);
+}
+
+subsys_initcall_sync(ab8500_fg_init);
+module_exit(ab8500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab8500-fg");
+MODULE_DESCRIPTION("AB8500 Fuel Gauge driver");
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
new file mode 100644
index 00000000000..bb0fa42b109
--- /dev/null
+++ b/drivers/power/abx500_chargalg.c
@@ -0,0 +1,1920 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Charging algorithm driver for abx500 variants
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+
+/* Watchdog kick interval */
+#define CHG_WD_INTERVAL (6 * HZ)
+
+/* End-of-charge criteria counter */
+#define EOC_COND_CNT 10
+
+/* Recharge criteria counter */
+#define RCH_COND_CNT 3
+
+#define to_abx500_chargalg_device_info(x) container_of((x), \
+ struct abx500_chargalg, chargalg_psy);
+
+enum abx500_chargers {
+ NO_CHG,
+ AC_CHG,
+ USB_CHG,
+};
+
+struct abx500_chargalg_charger_info {
+ enum abx500_chargers conn_chg;
+ enum abx500_chargers prev_conn_chg;
+ enum abx500_chargers online_chg;
+ enum abx500_chargers prev_online_chg;
+ enum abx500_chargers charger_type;
+ bool usb_chg_ok;
+ bool ac_chg_ok;
+ int usb_volt;
+ int usb_curr;
+ int ac_volt;
+ int ac_curr;
+ int usb_vset;
+ int usb_iset;
+ int ac_vset;
+ int ac_iset;
+};
+
+struct abx500_chargalg_suspension_status {
+ bool suspended_change;
+ bool ac_suspended;
+ bool usb_suspended;
+};
+
+struct abx500_chargalg_battery_data {
+ int temp;
+ int volt;
+ int avg_curr;
+ int inst_curr;
+ int percent;
+};
+
+enum abx500_chargalg_states {
+ STATE_HANDHELD_INIT,
+ STATE_HANDHELD,
+ STATE_CHG_NOT_OK_INIT,
+ STATE_CHG_NOT_OK,
+ STATE_HW_TEMP_PROTECT_INIT,
+ STATE_HW_TEMP_PROTECT,
+ STATE_NORMAL_INIT,
+ STATE_NORMAL,
+ STATE_WAIT_FOR_RECHARGE_INIT,
+ STATE_WAIT_FOR_RECHARGE,
+ STATE_MAINTENANCE_A_INIT,
+ STATE_MAINTENANCE_A,
+ STATE_MAINTENANCE_B_INIT,
+ STATE_MAINTENANCE_B,
+ STATE_TEMP_UNDEROVER_INIT,
+ STATE_TEMP_UNDEROVER,
+ STATE_TEMP_LOWHIGH_INIT,
+ STATE_TEMP_LOWHIGH,
+ STATE_SUSPENDED_INIT,
+ STATE_SUSPENDED,
+ STATE_OVV_PROTECT_INIT,
+ STATE_OVV_PROTECT,
+ STATE_SAFETY_TIMER_EXPIRED_INIT,
+ STATE_SAFETY_TIMER_EXPIRED,
+ STATE_BATT_REMOVED_INIT,
+ STATE_BATT_REMOVED,
+ STATE_WD_EXPIRED_INIT,
+ STATE_WD_EXPIRED,
+};
+
+static const char *states[] = {
+ "HANDHELD_INIT",
+ "HANDHELD",
+ "CHG_NOT_OK_INIT",
+ "CHG_NOT_OK",
+ "HW_TEMP_PROTECT_INIT",
+ "HW_TEMP_PROTECT",
+ "NORMAL_INIT",
+ "NORMAL",
+ "WAIT_FOR_RECHARGE_INIT",
+ "WAIT_FOR_RECHARGE",
+ "MAINTENANCE_A_INIT",
+ "MAINTENANCE_A",
+ "MAINTENANCE_B_INIT",
+ "MAINTENANCE_B",
+ "TEMP_UNDEROVER_INIT",
+ "TEMP_UNDEROVER",
+ "TEMP_LOWHIGH_INIT",
+ "TEMP_LOWHIGH",
+ "SUSPENDED_INIT",
+ "SUSPENDED",
+ "OVV_PROTECT_INIT",
+ "OVV_PROTECT",
+ "SAFETY_TIMER_EXPIRED_INIT",
+ "SAFETY_TIMER_EXPIRED",
+ "BATT_REMOVED_INIT",
+ "BATT_REMOVED",
+ "WD_EXPIRED_INIT",
+ "WD_EXPIRED",
+};
+
+struct abx500_chargalg_events {
+ bool batt_unknown;
+ bool mainextchnotok;
+ bool batt_ovv;
+ bool batt_rem;
+ bool btemp_underover;
+ bool btemp_lowhigh;
+ bool main_thermal_prot;
+ bool usb_thermal_prot;
+ bool main_ovv;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool safety_timer_expired;
+ bool maintenance_timer_expired;
+ bool ac_wd_expired;
+ bool usb_wd_expired;
+ bool ac_cv_active;
+ bool usb_cv_active;
+ bool vbus_collapsed;
+};
+
+/**
+ * struct abx500_charge_curr_maximization - Charger maximization parameters
+ * @original_iset: the non optimized/maximised charger current
+ * @current_iset: the charging current used at this moment
+ * @test_delta_i: the delta between the current we want to charge and the
+ current that is really going into the battery
+ * @condition_cnt: number of iterations needed before a new charger current
+ is set
+ * @max_current: maximum charger current
+ * @wait_cnt: to avoid too fast current step down in case of charger
+ * voltage collapse, we insert this delay between step
+ * down
+ * @level: tells in how many steps the charging current has been
+ increased
+ */
+struct abx500_charge_curr_maximization {
+ int original_iset;
+ int current_iset;
+ int test_delta_i;
+ int condition_cnt;
+ int max_current;
+ int wait_cnt;
+ u8 level;
+};
+
+enum maxim_ret {
+ MAXIM_RET_NOACTION,
+ MAXIM_RET_CHANGE,
+ MAXIM_RET_IBAT_TOO_HIGH,
+};
+
+/**
+ * struct abx500_chargalg - abx500 Charging algorithm device information
+ * @dev: pointer to the structure device
+ * @charge_status: battery operating status
+ * @eoc_cnt: counter used to determine end-of_charge
+ * @rch_cnt: counter used to determine start of recharge
+ * @maintenance_chg: indicate if maintenance charge is active
+ * @t_hyst_norm temperature hysteresis when the temperature has been
+ * over or under normal limits
+ * @t_hyst_lowhigh temperature hysteresis when the temperature has been
+ * over or under the high or low limits
+ * @charge_state: current state of the charging algorithm
+ * @ccm charging current maximization parameters
+ * @chg_info: information about connected charger types
+ * @batt_data: data of the battery
+ * @susp_status: current charger suspension status
+ * @pdata: pointer to the abx500_chargalg platform data
+ * @bat: pointer to the abx500_bm platform data
+ * @chargalg_psy: structure that holds the battery properties exposed by
+ * the charging algorithm
+ * @events: structure for information about events triggered
+ * @chargalg_wq: work queue for running the charging algorithm
+ * @chargalg_periodic_work: work to run the charging algorithm periodically
+ * @chargalg_wd_work: work to kick the charger watchdog periodically
+ * @chargalg_work: work to run the charging algorithm instantly
+ * @safety_timer: charging safety timer
+ * @maintenance_timer: maintenance charging timer
+ * @chargalg_kobject: structure of type kobject
+ */
+struct abx500_chargalg {
+ struct device *dev;
+ int charge_status;
+ int eoc_cnt;
+ int rch_cnt;
+ bool maintenance_chg;
+ int t_hyst_norm;
+ int t_hyst_lowhigh;
+ enum abx500_chargalg_states charge_state;
+ struct abx500_charge_curr_maximization ccm;
+ struct abx500_chargalg_charger_info chg_info;
+ struct abx500_chargalg_battery_data batt_data;
+ struct abx500_chargalg_suspension_status susp_status;
+ struct abx500_chargalg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply chargalg_psy;
+ struct ux500_charger *ac_chg;
+ struct ux500_charger *usb_chg;
+ struct abx500_chargalg_events events;
+ struct workqueue_struct *chargalg_wq;
+ struct delayed_work chargalg_periodic_work;
+ struct delayed_work chargalg_wd_work;
+ struct work_struct chargalg_work;
+ struct timer_list safety_timer;
+ struct timer_list maintenance_timer;
+ struct kobject chargalg_kobject;
+};
+
+/* Main battery properties */
+static enum power_supply_property abx500_chargalg_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+/**
+ * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * @data: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the safety timer for the charger
+ * expires
+ */
+static void abx500_chargalg_safety_timer_expired(unsigned long data)
+{
+ struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+ dev_err(di->dev, "Safety timer expired\n");
+ di->events.safety_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_maintenance_timer_expired() - Expiration of
+ * the maintenance timer
+ * @i: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when the maintenence timer
+ * expires
+ */
+static void abx500_chargalg_maintenance_timer_expired(unsigned long data)
+{
+
+ struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+ dev_dbg(di->dev, "Maintenance timer expired\n");
+ di->events.maintenance_timer_expired = true;
+
+ /* Trigger execution of the algorithm instantly */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_state_to() - Change charge state
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function gets called when a charge state change should occur
+ */
+static void abx500_chargalg_state_to(struct abx500_chargalg *di,
+ enum abx500_chargalg_states state)
+{
+ dev_dbg(di->dev,
+ "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
+ di->charge_state == state ? "NO" : "YES",
+ di->charge_state,
+ states[di->charge_state],
+ state,
+ states[state]);
+
+ di->charge_state = state;
+}
+
+/**
+ * abx500_chargalg_check_charger_connection() - Check charger connection change
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function will check if there is a change in the charger connection
+ * and change charge state accordingly. AC has precedence over USB.
+ */
+static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
+{
+ if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
+ di->susp_status.suspended_change) {
+ /*
+ * Charger state changed or suspension
+ * has changed since last update
+ */
+ if ((di->chg_info.conn_chg & AC_CHG) &&
+ !di->susp_status.ac_suspended) {
+ dev_dbg(di->dev, "Charging source is AC\n");
+ if (di->chg_info.charger_type != AC_CHG) {
+ di->chg_info.charger_type = AC_CHG;
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ } else if ((di->chg_info.conn_chg & USB_CHG) &&
+ !di->susp_status.usb_suspended) {
+ dev_dbg(di->dev, "Charging source is USB\n");
+ di->chg_info.charger_type = USB_CHG;
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else if (di->chg_info.conn_chg &&
+ (di->susp_status.ac_suspended ||
+ di->susp_status.usb_suspended)) {
+ dev_dbg(di->dev, "Charging is suspended\n");
+ di->chg_info.charger_type = NO_CHG;
+ abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+ } else {
+ dev_dbg(di->dev, "Charging source is OFF\n");
+ di->chg_info.charger_type = NO_CHG;
+ abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
+ di->susp_status.suspended_change = false;
+ }
+ return di->chg_info.conn_chg;
+}
+
+/**
+ * abx500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The safety timer is used to avoid overcharging of old or bad batteries.
+ * There are different timers for AC and USB
+ */
+static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
+{
+ unsigned long timer_expiration = 0;
+
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->main_safety_tmr_h * 3600 * HZ));
+ break;
+
+ case USB_CHG:
+ timer_expiration =
+ round_jiffies(jiffies +
+ (di->bat->usb_safety_tmr_h * 3600 * HZ));
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+
+ di->events.safety_timer_expired = false;
+ di->safety_timer.expires = timer_expiration;
+ if (!timer_pending(&di->safety_timer))
+ add_timer(&di->safety_timer);
+ else
+ mod_timer(&di->safety_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The safety timer is stopped whenever the NORMAL state is exited
+ */
+static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
+{
+ di->events.safety_timer_expired = false;
+ del_timer(&di->safety_timer);
+}
+
+/**
+ * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di: pointer to the abx500_chargalg structure
+ * @duration: duration of ther maintenance timer in hours
+ *
+ * The maintenance timer is used to maintain the charge in the battery once
+ * the battery is considered full. These timers are chosen to match the
+ * discharge curve of the battery
+ */
+static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
+ int duration)
+{
+ unsigned long timer_expiration;
+
+ /* Convert from hours to jiffies */
+ timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
+
+ di->events.maintenance_timer_expired = false;
+ di->maintenance_timer.expires = timer_expiration;
+ if (!timer_pending(&di->maintenance_timer))
+ add_timer(&di->maintenance_timer);
+ else
+ mod_timer(&di->maintenance_timer, timer_expiration);
+}
+
+/**
+ * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The maintenance timer is stopped whenever maintenance ends or when another
+ * state is entered
+ */
+static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
+{
+ di->events.maintenance_timer_expired = false;
+ del_timer(&di->maintenance_timer);
+}
+
+/**
+ * abx500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The charger watchdog have to be kicked periodically whenever the charger is
+ * on, else the ABB will reset the system
+ */
+static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
+{
+ /* Check if charger exists and kick watchdog if charging */
+ if (di->ac_chg && di->ac_chg->ops.kick_wd &&
+ di->chg_info.online_chg & AC_CHG)
+ return di->ac_chg->ops.kick_wd(di->ac_chg);
+ else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+ di->chg_info.online_chg & USB_CHG)
+ return di->usb_chg->ops.kick_wd(di->usb_chg);
+
+ return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di: pointer to the abx500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The AC charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->ac_chg || !di->ac_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->ac_chg->max_out_volt)
+ vset = min(vset, di->ac_chg->max_out_volt);
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+ di->chg_info.ac_vset = vset;
+
+ return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di: pointer to the abx500_chargalg structure
+ * @enable: charger on/off
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * The USB charger will be turned on/off with the requested charge voltage and
+ * current
+ */
+static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
+ int vset, int iset)
+{
+ if (!di->usb_chg || !di->usb_chg->ops.enable)
+ return -ENXIO;
+
+ /* Select maximum of what both the charger and the battery supports */
+ if (di->usb_chg->max_out_volt)
+ vset = min(vset, di->usb_chg->max_out_volt);
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+ di->chg_info.usb_vset = vset;
+
+ return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
+}
+
+/**
+ * abx500_chargalg_update_chg_curr() - Update charger current
+ * @di: pointer to the abx500_chargalg structure
+ * @iset: requested charger output current
+ *
+ * The charger output current will be updated for the charger
+ * that is currently in use
+ */
+static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
+ int iset)
+{
+ /* Check if charger exists and update current if charging */
+ if (di->ac_chg && di->ac_chg->ops.update_curr &&
+ di->chg_info.charger_type & AC_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->ac_chg->max_out_curr)
+ iset = min(iset, di->ac_chg->max_out_curr);
+
+ di->chg_info.ac_iset = iset;
+
+ return di->ac_chg->ops.update_curr(di->ac_chg, iset);
+ } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
+ di->chg_info.charger_type & USB_CHG) {
+ /*
+ * Select maximum of what both the charger
+ * and the battery supports
+ */
+ if (di->usb_chg->max_out_curr)
+ iset = min(iset, di->usb_chg->max_out_curr);
+
+ di->chg_info.usb_iset = iset;
+
+ return di->usb_chg->ops.update_curr(di->usb_chg, iset);
+ }
+
+ return -ENXIO;
+}
+
+/**
+ * abx500_chargalg_stop_charging() - Stop charging
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function is called from any state where charging should be stopped.
+ * All charging is disabled and all status parameters and timers are changed
+ * accordingly
+ */
+static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
+{
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_hold_charging() - Pauses charging
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This function is called in the case where maintenance charging has been
+ * disabled and instead a battery voltage mode is entered to check when the
+ * battery voltage has reached a certain recharge voltage
+ */
+static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
+{
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->maintenance_chg = false;
+ cancel_delayed_work(&di->chargalg_wd_work);
+ power_supply_changed(&di->chargalg_psy);
+}
+
+/**
+ * abx500_chargalg_start_charging() - Start the charger
+ * @di: pointer to the abx500_chargalg structure
+ * @vset: requested charger output voltage
+ * @iset: requested charger output current
+ *
+ * A charger will be enabled depending on the requested charger type that was
+ * detected previously.
+ */
+static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
+ int vset, int iset)
+{
+ switch (di->chg_info.charger_type) {
+ case AC_CHG:
+ dev_dbg(di->dev,
+ "AC parameters: Vset %d, Ich %d\n", vset, iset);
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_ac_en(di, true, vset, iset);
+ break;
+
+ case USB_CHG:
+ dev_dbg(di->dev,
+ "USB parameters: Vset %d, Ich %d\n", vset, iset);
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ abx500_chargalg_usb_en(di, true, vset, iset);
+ break;
+
+ default:
+ dev_err(di->dev, "Unknown charger to charge from\n");
+ break;
+ }
+}
+
+/**
+ * abx500_chargalg_check_temp() - Check battery temperature ranges
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * The battery temperature is checked against the predefined limits and the
+ * charge state is changed accordingly
+ */
+static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
+{
+ if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
+ di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+ /* Temp OK! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = 0;
+ } else {
+ if (((di->batt_data.temp >= di->bat->temp_high) &&
+ (di->batt_data.temp <
+ (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+ ((di->batt_data.temp >
+ (di->bat->temp_under + di->t_hyst_lowhigh)) &&
+ (di->batt_data.temp <= di->bat->temp_low))) {
+ /* TEMP minor!!!!! */
+ di->events.btemp_underover = false;
+ di->events.btemp_lowhigh = true;
+ di->t_hyst_norm = di->bat->temp_hysteresis;
+ di->t_hyst_lowhigh = 0;
+ } else if (di->batt_data.temp <= di->bat->temp_under ||
+ di->batt_data.temp >= di->bat->temp_over) {
+ /* TEMP major!!!!! */
+ di->events.btemp_underover = true;
+ di->events.btemp_lowhigh = false;
+ di->t_hyst_norm = 0;
+ di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+ } else {
+ /* Within hysteresis */
+ dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+ "hyst_lowhigh %d, hyst normal %d\n",
+ di->batt_data.temp, di->t_hyst_lowhigh,
+ di->t_hyst_norm);
+ }
+ }
+}
+
+/**
+ * abx500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * Charger voltage is checked against maximum limit
+ */
+static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
+{
+ if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+ di->chg_info.usb_chg_ok = false;
+ else
+ di->chg_info.usb_chg_ok = true;
+
+ if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+ di->chg_info.ac_chg_ok = false;
+ else
+ di->chg_info.ac_chg_ok = true;
+
+}
+
+/**
+ * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * End-of-charge criteria is fulfilled when the battery voltage is above a
+ * certain limit and the battery current is below a certain limit for a
+ * predefined number of consecutive seconds. If true, the battery is full
+ */
+static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
+{
+ if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ di->charge_state == STATE_NORMAL &&
+ !di->maintenance_chg && (di->batt_data.volt >=
+ di->bat->bat_type[di->bat->batt_id].termination_vol ||
+ di->events.usb_cv_active || di->events.ac_cv_active) &&
+ di->batt_data.avg_curr <
+ di->bat->bat_type[di->bat->batt_id].termination_curr &&
+ di->batt_data.avg_curr > 0) {
+ if (++di->eoc_cnt >= EOC_COND_CNT) {
+ di->eoc_cnt = 0;
+ di->charge_status = POWER_SUPPLY_STATUS_FULL;
+ di->maintenance_chg = true;
+ dev_dbg(di->dev, "EOC reached!\n");
+ power_supply_changed(&di->chargalg_psy);
+ } else {
+ dev_dbg(di->dev,
+ " EOC limit reached for the %d"
+ " time, out of %d before EOC\n",
+ di->eoc_cnt,
+ EOC_COND_CNT);
+ }
+ } else {
+ di->eoc_cnt = 0;
+ }
+}
+
+static void init_maxim_chg_curr(struct abx500_chargalg *di)
+{
+ di->ccm.original_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.current_iset =
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+ di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
+ di->ccm.max_current = di->bat->maxi->chg_curr;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.level = 0;
+}
+
+/**
+ * abx500_chargalg_chg_curr_maxim - increases the charger current to
+ * compensate for the system load
+ * @di pointer to the abx500_chargalg structure
+ *
+ * This maximization function is used to raise the charger current to get the
+ * battery current as close to the optimal value as possible. The battery
+ * current during charging is affected by the system load
+ */
+static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
+{
+ int delta_i;
+
+ if (!di->bat->maxi->ena_maxi)
+ return MAXIM_RET_NOACTION;
+
+ delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
+
+ if (di->events.vbus_collapsed) {
+ dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
+ di->ccm.wait_cnt);
+ if (di->ccm.wait_cnt == 0) {
+ dev_dbg(di->dev, "lowering current\n");
+ di->ccm.wait_cnt++;
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.max_current =
+ di->ccm.current_iset - di->ccm.test_delta_i;
+ di->ccm.current_iset = di->ccm.max_current;
+ di->ccm.level--;
+ return MAXIM_RET_CHANGE;
+ } else {
+ dev_dbg(di->dev, "waiting\n");
+ /* Let's go in here twice before lowering curr again */
+ di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
+ return MAXIM_RET_NOACTION;
+ }
+ }
+
+ di->ccm.wait_cnt = 0;
+
+ if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+ dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
+ " (limit %dmA) (current iset: %dmA)!\n",
+ di->batt_data.inst_curr, di->ccm.original_iset,
+ di->ccm.current_iset);
+
+ if (di->ccm.current_iset == di->ccm.original_iset)
+ return MAXIM_RET_NOACTION;
+
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset = di->ccm.original_iset;
+ di->ccm.level = 0;
+
+ return MAXIM_RET_IBAT_TOO_HIGH;
+ }
+
+ if (delta_i > di->ccm.test_delta_i &&
+ (di->ccm.current_iset + di->ccm.test_delta_i) <
+ di->ccm.max_current) {
+ if (di->ccm.condition_cnt-- == 0) {
+ /* Increse the iset with cco.test_delta_i */
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ di->ccm.current_iset += di->ccm.test_delta_i;
+ di->ccm.level++;
+ dev_dbg(di->dev, " Maximization needed, increase"
+ " with %d mA to %dmA (Optimal ibat: %d)"
+ " Level %d\n",
+ di->ccm.test_delta_i,
+ di->ccm.current_iset,
+ di->ccm.original_iset,
+ di->ccm.level);
+ return MAXIM_RET_CHANGE;
+ } else {
+ return MAXIM_RET_NOACTION;
+ }
+ } else {
+ di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+ return MAXIM_RET_NOACTION;
+ }
+}
+
+static void handle_maxim_chg_curr(struct abx500_chargalg *di)
+{
+ enum maxim_ret ret;
+ int result;
+
+ ret = abx500_chargalg_chg_curr_maxim(di);
+ switch (ret) {
+ case MAXIM_RET_CHANGE:
+ result = abx500_chargalg_update_chg_curr(di,
+ di->ccm.current_iset);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+ case MAXIM_RET_IBAT_TOO_HIGH:
+ result = abx500_chargalg_update_chg_curr(di,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ if (result)
+ dev_err(di->dev, "failed to set chg curr\n");
+ break;
+
+ case MAXIM_RET_NOACTION:
+ default:
+ /* Do nothing..*/
+ break;
+ }
+}
+
+static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct abx500_chargalg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_abx500_chargalg_device_info(psy);
+ /* For all psy where the driver name appears in any supplied_to */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ /* Initialize chargers if not already done */
+ if (!di->ac_chg &&
+ ext->type == POWER_SUPPLY_TYPE_MAINS)
+ di->ac_chg = psy_to_ux500_charger(ext);
+ else if (!di->usb_chg &&
+ ext->type == POWER_SUPPLY_TYPE_USB)
+ di->usb_chg = psy_to_ux500_charger(ext);
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ /* Battery present */
+ if (ret.intval)
+ di->events.batt_rem = false;
+ /* Battery removed */
+ else
+ di->events.batt_rem = true;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~AC_CHG;
+ }
+ /* AC connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & AC_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= AC_CHG;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval &&
+ (di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg &= ~USB_CHG;
+ }
+ /* USB connected */
+ else if (ret.intval &&
+ !(di->chg_info.conn_chg & USB_CHG)) {
+ di->chg_info.prev_conn_chg =
+ di->chg_info.conn_chg;
+ di->chg_info.conn_chg |= USB_CHG;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AC offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~AC_CHG;
+ }
+ /* AC online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & AC_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= AC_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB offline */
+ if (!ret.intval &&
+ (di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg &= ~USB_CHG;
+ }
+ /* USB online */
+ else if (ret.intval &&
+ !(di->chg_info.online_chg & USB_CHG)) {
+ di->chg_info.prev_online_chg =
+ di->chg_info.online_chg;
+ di->chg_info.online_chg |= USB_CHG;
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.mainextchnotok = true;
+ di->events.main_thermal_prot = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.ac_wd_expired = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.main_thermal_prot = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.main_thermal_prot = true;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.main_ovv = true;
+ di->events.mainextchnotok = false;
+ di->events.main_thermal_prot = false;
+ di->events.ac_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.main_thermal_prot = false;
+ di->events.mainextchnotok = false;
+ di->events.main_ovv = false;
+ di->events.ac_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_TYPE_USB:
+ switch (ret.intval) {
+ case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
+ di->events.usbchargernotok = true;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_DEAD:
+ di->events.usb_wd_expired = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ break;
+ case POWER_SUPPLY_HEALTH_COLD:
+ case POWER_SUPPLY_HEALTH_OVERHEAT:
+ di->events.usb_thermal_prot = true;
+ di->events.usbchargernotok = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
+ di->events.vbus_ovv = true;
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.usb_wd_expired = false;
+ break;
+ case POWER_SUPPLY_HEALTH_GOOD:
+ di->events.usbchargernotok = false;
+ di->events.usb_thermal_prot = false;
+ di->events.vbus_ovv = false;
+ di->events.usb_wd_expired = false;
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_volt = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_volt = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.ac_cv_active = true;
+ else
+ di->events.ac_cv_active = false;
+
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ /* AVG is used to indicate when we are
+ * in CV mode */
+ if (ret.intval)
+ di->events.usb_cv_active = true;
+ else
+ di->events.usb_cv_active = false;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->events.batt_unknown = false;
+ else
+ di->events.batt_unknown = true;
+
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ di->batt_data.temp = ret.intval / 10;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_MAINS:
+ di->chg_info.ac_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ di->chg_info.usb_curr =
+ ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.inst_curr = ret.intval / 1000;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ di->batt_data.avg_curr = ret.intval / 1000;
+ break;
+ case POWER_SUPPLY_TYPE_USB:
+ if (ret.intval)
+ di->events.vbus_collapsed = true;
+ else
+ di->events.vbus_collapsed = false;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ di->batt_data.percent = ret.intval;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * abx500_chargalg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void abx500_chargalg_external_power_changed(struct power_supply *psy)
+{
+ struct abx500_chargalg *di = to_abx500_chargalg_device_info(psy);
+
+ /*
+ * Trigger execution of the algorithm instantly and read
+ * all power_supply properties there instead
+ */
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_algorithm() - Main function for the algorithm
+ * @di: pointer to the abx500_chargalg structure
+ *
+ * This is the main control function for the charging algorithm.
+ * It is called periodically or when something happens that will
+ * trigger a state change
+ */
+static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
+{
+ int charger_status;
+
+ /* Collect data from all power_supply class devices */
+ class_for_each_device(power_supply_class, NULL,
+ &di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
+
+ abx500_chargalg_end_of_charge(di);
+ abx500_chargalg_check_temp(di);
+ abx500_chargalg_check_charger_voltage(di);
+
+ charger_status = abx500_chargalg_check_charger_connection(di);
+ /*
+ * First check if we have a charger connected.
+ * Also we don't allow charging of unknown batteries if configured
+ * this way
+ */
+ if (!charger_status ||
+ (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+ if (di->charge_state != STATE_HANDHELD) {
+ di->events.safety_timer_expired = false;
+ abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+ }
+ }
+
+ /* If suspended, we should not continue checking the flags */
+ else if (di->charge_state == STATE_SUSPENDED_INIT ||
+ di->charge_state == STATE_SUSPENDED) {
+ /* We don't do anything here, just don,t continue */
+ }
+
+ /* Safety timer expiration */
+ else if (di->events.safety_timer_expired) {
+ if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
+ abx500_chargalg_state_to(di,
+ STATE_SAFETY_TIMER_EXPIRED_INIT);
+ }
+ /*
+ * Check if any interrupts has occured
+ * that will prevent us from charging
+ */
+
+ /* Battery removed */
+ else if (di->events.batt_rem) {
+ if (di->charge_state != STATE_BATT_REMOVED)
+ abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+ }
+ /* Main or USB charger not ok. */
+ else if (di->events.mainextchnotok || di->events.usbchargernotok) {
+ /*
+ * If vbus_collapsed is set, we have to lower the charger
+ * current, which is done in the normal state below
+ */
+ if (di->charge_state != STATE_CHG_NOT_OK &&
+ !di->events.vbus_collapsed)
+ abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+ }
+ /* VBUS, Main or VBAT OVV. */
+ else if (di->events.vbus_ovv ||
+ di->events.main_ovv ||
+ di->events.batt_ovv ||
+ !di->chg_info.usb_chg_ok ||
+ !di->chg_info.ac_chg_ok) {
+ if (di->charge_state != STATE_OVV_PROTECT)
+ abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+ }
+ /* USB Thermal, stop charging */
+ else if (di->events.main_thermal_prot ||
+ di->events.usb_thermal_prot) {
+ if (di->charge_state != STATE_HW_TEMP_PROTECT)
+ abx500_chargalg_state_to(di,
+ STATE_HW_TEMP_PROTECT_INIT);
+ }
+ /* Battery temp over/under */
+ else if (di->events.btemp_underover) {
+ if (di->charge_state != STATE_TEMP_UNDEROVER)
+ abx500_chargalg_state_to(di,
+ STATE_TEMP_UNDEROVER_INIT);
+ }
+ /* Watchdog expired */
+ else if (di->events.ac_wd_expired ||
+ di->events.usb_wd_expired) {
+ if (di->charge_state != STATE_WD_EXPIRED)
+ abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+ }
+ /* Battery temp high/low */
+ else if (di->events.btemp_lowhigh) {
+ if (di->charge_state != STATE_TEMP_LOWHIGH)
+ abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+ }
+
+ dev_dbg(di->dev,
+ "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
+ "State %s Active_chg %d Chg_status %d AC %d USB %d "
+ "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
+ "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
+ di->batt_data.volt,
+ di->batt_data.avg_curr,
+ di->batt_data.inst_curr,
+ di->batt_data.temp,
+ di->batt_data.percent,
+ di->maintenance_chg,
+ states[di->charge_state],
+ di->chg_info.charger_type,
+ di->charge_status,
+ di->chg_info.conn_chg & AC_CHG,
+ di->chg_info.conn_chg & USB_CHG,
+ di->chg_info.online_chg & AC_CHG,
+ di->chg_info.online_chg & USB_CHG,
+ di->events.ac_cv_active,
+ di->events.usb_cv_active,
+ di->chg_info.ac_curr,
+ di->chg_info.usb_curr,
+ di->chg_info.ac_vset,
+ di->chg_info.ac_iset,
+ di->chg_info.usb_vset,
+ di->chg_info.usb_iset);
+
+ switch (di->charge_state) {
+ case STATE_HANDHELD_INIT:
+ abx500_chargalg_stop_charging(di);
+ di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ abx500_chargalg_state_to(di, STATE_HANDHELD);
+ /* Intentional fallthrough */
+
+ case STATE_HANDHELD:
+ break;
+
+ case STATE_SUSPENDED_INIT:
+ if (di->susp_status.ac_suspended)
+ abx500_chargalg_ac_en(di, false, 0, 0);
+ if (di->susp_status.usb_suspended)
+ abx500_chargalg_usb_en(di, false, 0, 0);
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ di->maintenance_chg = false;
+ abx500_chargalg_state_to(di, STATE_SUSPENDED);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_SUSPENDED:
+ /* CHARGING is suspended */
+ break;
+
+ case STATE_BATT_REMOVED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
+ /* Intentional fallthrough */
+
+ case STATE_BATT_REMOVED:
+ if (!di->events.batt_rem)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_HW_TEMP_PROTECT_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_HW_TEMP_PROTECT:
+ if (!di->events.main_thermal_prot &&
+ !di->events.usb_thermal_prot)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_OVV_PROTECT_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
+ /* Intentional fallthrough */
+
+ case STATE_OVV_PROTECT:
+ if (!di->events.vbus_ovv &&
+ !di->events.main_ovv &&
+ !di->events.batt_ovv &&
+ di->chg_info.usb_chg_ok &&
+ di->chg_info.ac_chg_ok)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_CHG_NOT_OK_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+ /* Intentional fallthrough */
+
+ case STATE_CHG_NOT_OK:
+ if (!di->events.mainextchnotok &&
+ !di->events.usbchargernotok)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_SAFETY_TIMER_EXPIRED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_SAFETY_TIMER_EXPIRED:
+ /* We exit this state when charger is removed */
+ break;
+
+ case STATE_NORMAL_INIT:
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
+ di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_NORMAL);
+ abx500_chargalg_start_safety_timer(di);
+ abx500_chargalg_stop_maintenance_timer(di);
+ init_maxim_chg_curr(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ di->eoc_cnt = 0;
+ di->maintenance_chg = false;
+ power_supply_changed(&di->chargalg_psy);
+
+ break;
+
+ case STATE_NORMAL:
+ handle_maxim_chg_curr(di);
+ if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
+ di->maintenance_chg) {
+ if (di->bat->no_maintenance)
+ abx500_chargalg_state_to(di,
+ STATE_WAIT_FOR_RECHARGE_INIT);
+ else
+ abx500_chargalg_state_to(di,
+ STATE_MAINTENANCE_A_INIT);
+ }
+ break;
+
+ /* This state will be used when the maintenance state is disabled */
+ case STATE_WAIT_FOR_RECHARGE_INIT:
+ abx500_chargalg_hold_charging(di);
+ abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+ di->rch_cnt = RCH_COND_CNT;
+ /* Intentional fallthrough */
+
+ case STATE_WAIT_FOR_RECHARGE:
+ if (di->batt_data.volt <=
+ di->bat->bat_type[di->bat->batt_id].recharge_vol) {
+ if (di->rch_cnt-- == 0)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ } else
+ di->rch_cnt = RCH_COND_CNT;
+ break;
+
+ case STATE_MAINTENANCE_A_INIT:
+ abx500_chargalg_stop_safety_timer(di);
+ abx500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_chg_timer_h);
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_a_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_A:
+ if (di->events.maintenance_timer_expired) {
+ abx500_chargalg_stop_maintenance_timer(di);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+ }
+ break;
+
+ case STATE_MAINTENANCE_B_INIT:
+ abx500_chargalg_start_maintenance_timer(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_chg_timer_h);
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].maint_b_cur_lvl);
+ abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough*/
+
+ case STATE_MAINTENANCE_B:
+ if (di->events.maintenance_timer_expired) {
+ abx500_chargalg_stop_maintenance_timer(di);
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ }
+ break;
+
+ case STATE_TEMP_LOWHIGH_INIT:
+ abx500_chargalg_start_charging(di,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_vol_lvl,
+ di->bat->bat_type[
+ di->bat->batt_id].low_high_cur_lvl);
+ abx500_chargalg_stop_maintenance_timer(di);
+ di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+ power_supply_changed(&di->chargalg_psy);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_LOWHIGH:
+ if (!di->events.btemp_lowhigh)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_WD_EXPIRED_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
+ /* Intentional fallthrough */
+
+ case STATE_WD_EXPIRED:
+ if (!di->events.ac_wd_expired &&
+ !di->events.usb_wd_expired)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+
+ case STATE_TEMP_UNDEROVER_INIT:
+ abx500_chargalg_stop_charging(di);
+ abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+ /* Intentional fallthrough */
+
+ case STATE_TEMP_UNDEROVER:
+ if (!di->events.btemp_underover)
+ abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ break;
+ }
+
+ /* Start charging directly if the new state is a charge state */
+ if (di->charge_state == STATE_NORMAL_INIT ||
+ di->charge_state == STATE_MAINTENANCE_A_INIT ||
+ di->charge_state == STATE_MAINTENANCE_B_INIT)
+ queue_work(di->chargalg_wq, &di->chargalg_work);
+}
+
+/**
+ * abx500_chargalg_periodic_work() - Periodic work for the algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for the charging algorithm
+ */
+static void abx500_chargalg_periodic_work(struct work_struct *work)
+{
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_periodic_work.work);
+
+ abx500_chargalg_algorithm(di);
+
+ /*
+ * If a charger is connected then the battery has to be monitored
+ * frequently, else the work can be delayed.
+ */
+ if (di->chg_info.conn_chg)
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_charging * HZ);
+ else
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_periodic_work,
+ di->bat->interval_not_charging * HZ);
+}
+
+/**
+ * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for kicking the charger watchdog
+ */
+static void abx500_chargalg_wd_work(struct work_struct *work)
+{
+ int ret;
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_wd_work.work);
+
+ dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
+
+ ret = abx500_chargalg_kick_watchdog(di);
+ if (ret < 0)
+ dev_err(di->dev, "failed to kick watchdog\n");
+
+ queue_delayed_work(di->chargalg_wq,
+ &di->chargalg_wd_work, CHG_WD_INTERVAL);
+}
+
+/**
+ * abx500_chargalg_work() - Work to run the charging algorithm instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for calling the charging algorithm
+ */
+static void abx500_chargalg_work(struct work_struct *work)
+{
+ struct abx500_chargalg *di = container_of(work,
+ struct abx500_chargalg, chargalg_work);
+
+ abx500_chargalg_algorithm(di);
+}
+
+/**
+ * abx500_chargalg_get_property() - get the chargalg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * chargalg properties by reading the sysfs files.
+ * status: charging/discharging/full/unknown
+ * health: health of the battery
+ * Returns error code in case of failure else 0 on success
+ */
+static int abx500_chargalg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct abx500_chargalg *di;
+
+ di = to_abx500_chargalg_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = di->charge_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->events.batt_ovv) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else if (di->events.btemp_underover) {
+ if (di->batt_data.temp <= di->bat->temp_under)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ } else {
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Exposure to the sysfs interface */
+
+/**
+ * abx500_chargalg_sysfs_charger() - sysfs store operations
+ * @kobj: pointer to the struct kobject
+ * @attr: pointer to the struct attribute
+ * @buf: buffer that holds the parameter passed from userspace
+ * @length: length of the parameter passed
+ *
+ * Returns length of the buffer(input taken from user space) on success
+ * else error code on failure
+ * The operation to be performed on passing the parameters from the user space.
+ */
+static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t length)
+{
+ struct abx500_chargalg *di = container_of(kobj,
+ struct abx500_chargalg, chargalg_kobject);
+ long int param;
+ int ac_usb;
+ int ret;
+ char entry = *attr->name;
+
+ switch (entry) {
+ case 'c':
+ ret = strict_strtol(buf, 10, &param);
+ if (ret < 0)
+ return ret;
+
+ ac_usb = param;
+ switch (ac_usb) {
+ case 0:
+ /* Disable charging */
+ di->susp_status.ac_suspended = true;
+ di->susp_status.usb_suspended = true;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 1:
+ /* Enable AC Charging */
+ di->susp_status.ac_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ case 2:
+ /* Enable USB charging */
+ di->susp_status.usb_suspended = false;
+ di->susp_status.suspended_change = true;
+ /* Trigger a state change */
+ queue_work(di->chargalg_wq,
+ &di->chargalg_work);
+ break;
+ default:
+ dev_info(di->dev, "Wrong input\n"
+ "Enter 0. Disable AC/USB Charging\n"
+ "1. Enable AC charging\n"
+ "2. Enable USB Charging\n");
+ };
+ break;
+ };
+ return strlen(buf);
+}
+
+static struct attribute abx500_chargalg_en_charger = \
+{
+ .name = "chargalg",
+ .mode = S_IWUGO,
+};
+
+static struct attribute *abx500_chargalg_chg[] = {
+ &abx500_chargalg_en_charger,
+ NULL
+};
+
+const struct sysfs_ops abx500_chargalg_sysfs_ops = {
+ .store = abx500_chargalg_sysfs_charger,
+};
+
+static struct kobj_type abx500_chargalg_ktype = {
+ .sysfs_ops = &abx500_chargalg_sysfs_ops,
+ .default_attrs = abx500_chargalg_chg,
+};
+
+/**
+ * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di: pointer to the struct abx500_chargalg
+ *
+ * This function removes the entry in sysfs.
+ */
+static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
+{
+ kobject_del(&di->chargalg_kobject);
+}
+
+/**
+ * abx500_chargalg_sysfs_init() - init of sysfs entry
+ * @di: pointer to the struct abx500_chargalg
+ *
+ * This function adds an entry in sysfs.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
+{
+ int ret = 0;
+
+ ret = kobject_init_and_add(&di->chargalg_kobject,
+ &abx500_chargalg_ktype,
+ NULL, "abx500_chargalg");
+ if (ret < 0)
+ dev_err(di->dev, "failed to create sysfs entry\n");
+
+ return ret;
+}
+/* Exposure to the sysfs interface <<END>> */
+
+#if defined(CONFIG_PM)
+static int abx500_chargalg_resume(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* Kick charger watchdog if charging (any charger online) */
+ if (di->chg_info.online_chg)
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
+
+ /*
+ * Run the charging algorithm directly to be sure we don't
+ * do it too seldom
+ */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ return 0;
+}
+
+static int abx500_chargalg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ if (di->chg_info.online_chg)
+ cancel_delayed_work_sync(&di->chargalg_wd_work);
+
+ cancel_delayed_work_sync(&di->chargalg_periodic_work);
+
+ return 0;
+}
+#else
+#define abx500_chargalg_suspend NULL
+#define abx500_chargalg_resume NULL
+#endif
+
+static int __devexit abx500_chargalg_remove(struct platform_device *pdev)
+{
+ struct abx500_chargalg *di = platform_get_drvdata(pdev);
+
+ /* sysfs interface to enable/disbale charging from user space */
+ abx500_chargalg_sysfs_exit(di);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->chargalg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->chargalg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
+{
+ struct abx500_bm_plat_data *plat_data;
+ int ret = 0;
+
+ struct abx500_chargalg *di =
+ kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get device struct */
+ di->dev = &pdev->dev;
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->chargalg;
+ di->bat = plat_data->battery;
+
+ /* chargalg supply */
+ di->chargalg_psy.name = "abx500_chargalg";
+ di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->chargalg_psy.properties = abx500_chargalg_props;
+ di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
+ di->chargalg_psy.get_property = abx500_chargalg_get_property;
+ di->chargalg_psy.supplied_to = di->pdata->supplied_to;
+ di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.external_power_changed =
+ abx500_chargalg_external_power_changed;
+
+ /* Initilialize safety timer */
+ init_timer(&di->safety_timer);
+ di->safety_timer.function = abx500_chargalg_safety_timer_expired;
+ di->safety_timer.data = (unsigned long) di;
+
+ /* Initilialize maintenance timer */
+ init_timer(&di->maintenance_timer);
+ di->maintenance_timer.function =
+ abx500_chargalg_maintenance_timer_expired;
+ di->maintenance_timer.data = (unsigned long) di;
+
+ /* Create a work queue for the chargalg */
+ di->chargalg_wq =
+ create_singlethread_workqueue("abx500_chargalg_wq");
+ if (di->chargalg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for chargalg */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+ abx500_chargalg_periodic_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+ abx500_chargalg_wd_work);
+
+ /* Init work for chargalg */
+ INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
+
+ /* To detect charger at startup */
+ di->chg_info.prev_conn_chg = -1;
+
+ /* Register chargalg power supply class */
+ ret = power_supply_register(di->dev, &di->chargalg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register chargalg psy\n");
+ goto free_chargalg_wq;
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ /* sysfs interface to enable/disable charging from user space */
+ ret = abx500_chargalg_sysfs_init(di);
+ if (ret) {
+ dev_err(di->dev, "failed to create sysfs entry\n");
+ goto free_psy;
+ }
+
+ /* Run the charging algorithm */
+ queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_psy:
+ power_supply_unregister(&di->chargalg_psy);
+free_chargalg_wq:
+ destroy_workqueue(di->chargalg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver abx500_chargalg_driver = {
+ .probe = abx500_chargalg_probe,
+ .remove = __devexit_p(abx500_chargalg_remove),
+ .suspend = abx500_chargalg_suspend,
+ .resume = abx500_chargalg_resume,
+ .driver = {
+ .name = "abx500-chargalg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init abx500_chargalg_init(void)
+{
+ return platform_driver_register(&abx500_chargalg_driver);
+}
+
+static void __exit abx500_chargalg_exit(void)
+{
+ platform_driver_unregister(&abx500_chargalg_driver);
+}
+
+module_init(abx500_chargalg_init);
+module_exit(abx500_chargalg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:abx500-chargalg");
+MODULE_DESCRIPTION("abx500 battery charging algorithm");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 7a61b17ddd0..d042634a9ce 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -292,6 +292,13 @@ config REGULATOR_AD5398
This driver supports AD5398 and AD5821 current regulator chips.
If building into module, its name is ad5398.ko.
+config REGULATOR_AB5500
+ bool "ST-Ericsson AB5500 Power Regulators"
+ depends on AB5500_CORE
+ help
+ This driver supports the regulators found on the ST-Ericsson mixed
+ signal AB5500 PMIC
+
config REGULATOR_AB8500
bool "ST-Ericsson AB8500 Power Regulators"
depends on AB8500_CORE
@@ -299,13 +306,41 @@ config REGULATOR_AB8500
This driver supports the regulators found on the ST-Ericsson mixed
signal AB8500 PMIC
+config REGULATOR_AB8500_EXT
+ bool "ST-Ericsson AB8500 External Regulators"
+ depends on REGULATOR_AB8500
+ default y if REGULATOR_AB8500
+ help
+ This driver supports the external regulator controls found on the
+ ST-Ericsson mixed signal AB8500 PMIC
+
+config REGULATOR_DBX500_PRCMU
+ bool
+
+config REGULATOR_DB5500_PRCMU
+ bool "ST-Ericsson DB5500 Voltage Domain Regulators"
+ depends on MFD_DB5500_PRCMU
+ select REGULATOR_DBX500_PRCMU
+ help
+ This driver supports the voltage domain regulators controlled by the
+ DB5500 PRCMU
+
config REGULATOR_DB8500_PRCMU
bool "ST-Ericsson DB8500 Voltage Domain Regulators"
depends on MFD_DB8500_PRCMU
+ select REGULATOR_DBX500_PRCMU
help
This driver supports the voltage domain regulators controlled by the
DB8500 PRCMU
+config REGULATOR_AB8500_DEBUG
+ bool "AB8500 regulator debug"
+ depends on REGULATOR_AB8500
+ help
+ Say Y here to add debug functionality for ST-Ericsson
+ ab8500 regulators. This is a module that exposes a
+ number of settings and debug output in debugfs.
+
config REGULATOR_TPS6586X
tristate "TI TPS6586X Power regulators"
depends on MFD_TPS6586X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 503bac87715..17e52873705 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -44,9 +44,14 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
+obj-$(CONFIG_REGULATOR_AB5500) += ab5500.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
+obj-$(CONFIG_REGULATOR_AB8500_EXT) += ab8500-ext.o
+obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
+obj-$(CONFIG_REGULATOR_DB5500_PRCMU) += db5500-prcmu.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
+obj-$(CONFIG_REGULATOR_AB8500_DEBUG) += ab8500-debug.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab5500.c b/drivers/regulator/ab5500.c
new file mode 100644
index 00000000000..a3de7701923
--- /dev/null
+++ b/drivers/regulator/ab5500.c
@@ -0,0 +1,625 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Based on ab3100.c.
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/regulator/ab5500.h>
+
+#define AB5500_LDO_VDIGMIC_ST 0x50
+
+#define AB5500_LDO_G_ST 0x78
+#define AB5500_LDO_G_PWR1 0x79
+#define AB5500_LDO_G_PWR0 0x7a
+
+#define AB5500_LDO_H_ST 0x7b
+#define AB5500_LDO_H_PWR1 0x7c
+#define AB5500_LDO_H_PWR0 0x7d
+
+#define AB5500_LDO_K_ST 0x7e
+#define AB5500_LDO_K_PWR1 0x7f
+#define AB5500_LDO_K_PWR0 0x80
+
+#define AB5500_LDO_L_ST 0x81
+#define AB5500_LDO_L_PWR1 0x82
+#define AB5500_LDO_L_PWR0 0x83
+
+/* In SIM bank */
+#define AB5500_SIM_SUP 0x14
+
+#define AB5500_MBIAS2 0x01
+
+#define AB5500_LDO_MODE_MASK (0x3 << 4)
+#define AB5500_LDO_MODE_FULLPOWER (0x3 << 4)
+#define AB5500_LDO_MODE_PWRCTRL (0x2 << 4)
+#define AB5500_LDO_MODE_LOWPOWER (0x1 << 4)
+#define AB5500_LDO_MODE_OFF (0x0 << 4)
+#define AB5500_LDO_VOLT_MASK 0x07
+
+#define AB5500_MBIAS2_ENABLE (0x1 << 1)
+#define AB5500_MBIAS2_VOLT_MASK (0x1 << 2)
+#define AB5500_MBIAS2_MODE_MASK (0x1 << 1)
+
+struct ab5500_regulator {
+ struct regulator_desc desc;
+ const int *voltages;
+ int num_holes;
+ bool off_is_lowpower;
+ bool enabled;
+ int enable_time;
+ int load_lp_uA;
+ u8 bank;
+ u8 reg;
+ u8 mode;
+ u8 update_mask;
+ u8 update_val_idle;
+ u8 update_val_normal;
+ u8 voltage_mask;
+};
+
+struct ab5500_regulators {
+ struct device *dev;
+ struct ab5500_regulator *regulator[AB5500_NUM_REGULATORS];
+ struct regulator_dev *rdev[AB5500_NUM_REGULATORS];
+};
+
+static int ab5500_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ return r->enable_time; /* microseconds */
+}
+
+static int ab5500_regulator_enable(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ int ret;
+
+ ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg,
+ r->update_mask, r->mode);
+ if (ret < 0)
+ return ret;
+
+ r->enabled = true;
+
+ return 0;
+}
+
+static int ab5500_regulator_disable(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int ret;
+
+ if (r->off_is_lowpower)
+ regval = AB5500_LDO_MODE_LOWPOWER;
+ else
+ regval = AB5500_LDO_MODE_OFF;
+
+ ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg,
+ r->update_mask, regval);
+ if (ret < 0)
+ return ret;
+
+ r->enabled = false;
+
+ return 0;
+}
+
+static unsigned int ab5500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ if (r->mode == r->update_val_idle)
+ return REGULATOR_MODE_IDLE;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static unsigned int
+ab5500_regulator_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ unsigned int mode;
+
+ if (load_uA <= r->load_lp_uA)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static int ab5500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ r->mode = r->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ r->mode = r->update_val_idle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (r->enabled)
+ return ab5500_regulator_enable(rdev);
+
+ return 0;
+}
+
+static int ab5500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int err;
+
+ err = abx500_get_register_interruptible(ab5500->dev,
+ r->bank, r->reg, &regval);
+ if (err) {
+ dev_err(rdev_get_dev(rdev), "unable to get register 0x%x\n",
+ r->reg);
+ return err;
+ }
+
+ switch (regval & r->update_mask) {
+ case AB5500_LDO_MODE_PWRCTRL:
+ case AB5500_LDO_MODE_OFF:
+ r->enabled = false;
+ break;
+ case AB5500_LDO_MODE_LOWPOWER:
+ if (r->off_is_lowpower) {
+ r->enabled = false;
+ break;
+ }
+ /* fall through */
+ default:
+ r->enabled = true;
+ break;
+ }
+
+ return r->enabled;
+}
+
+static int
+ab5500_regulator_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ unsigned n_voltages = r->desc.n_voltages;
+ int selindex;
+ int i;
+
+ for (i = 0, selindex = 0; selindex < n_voltages; i++) {
+ int voltage = r->voltages[i];
+
+ if (!voltage)
+ continue;
+
+ if (selindex == selector)
+ return voltage;
+
+ selindex++;
+ }
+
+ return -EINVAL;
+}
+
+static int ab5500_regulator_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ return r->voltages[0];
+}
+
+static int ab5500_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int ret;
+
+ ret = abx500_get_register_interruptible(ab5500->dev,
+ r->bank, r->reg, &regval);
+ if (ret) {
+ dev_warn(rdev_get_dev(rdev),
+ "failed to get regulator value in register "
+ "%02x\n", r->reg);
+ return ret;
+ }
+
+ regval &= r->voltage_mask;
+ if (regval >= r->desc.n_voltages + r->num_holes)
+ return -EINVAL;
+
+ if (!r->voltages[regval])
+ return -EINVAL;
+
+ return r->voltages[regval];
+}
+
+static int ab5500_get_best_voltage_index(struct ab5500_regulator *r,
+ int min_uV, int max_uV)
+{
+ unsigned n_voltages = r->desc.n_voltages;
+ int bestmatch = INT_MAX;
+ int bestindex = -EINVAL;
+ int selindex;
+ int i;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ for (i = 0, selindex = 0; selindex < n_voltages; i++) {
+ int voltage = r->voltages[i];
+
+ if (!voltage)
+ continue;
+
+ if (voltage <= max_uV &&
+ voltage >= min_uV &&
+ voltage < bestmatch) {
+ bestmatch = voltage;
+ bestindex = i;
+ }
+
+ selindex++;
+ }
+
+ return bestindex;
+}
+
+static int ab5500_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ int bestindex;
+
+ bestindex = ab5500_get_best_voltage_index(r, min_uV, max_uV);
+ if (bestindex < 0) {
+ dev_warn(rdev_get_dev(rdev),
+ "requested %d<=x<=%d uV, out of range!\n",
+ min_uV, max_uV);
+ return bestindex;
+ }
+
+ *selector = bestindex;
+
+ return abx500_mask_and_set_register_interruptible(ab5500->dev,
+ r->bank, r->reg, r->voltage_mask, bestindex);
+
+}
+
+static struct regulator_ops ab5500_regulator_variable_ops = {
+ .enable = ab5500_regulator_enable,
+ .disable = ab5500_regulator_disable,
+ .is_enabled = ab5500_regulator_is_enabled,
+ .enable_time = ab5500_regulator_enable_time,
+ .get_voltage = ab5500_regulator_get_voltage,
+ .set_voltage = ab5500_regulator_set_voltage,
+ .list_voltage = ab5500_regulator_list_voltage,
+ .set_mode = ab5500_regulator_set_mode,
+ .get_mode = ab5500_regulator_get_mode,
+ .get_optimum_mode = ab5500_regulator_get_optimum_mode,
+};
+
+static struct regulator_ops ab5500_regulator_fixed_ops = {
+ .enable = ab5500_regulator_enable,
+ .disable = ab5500_regulator_disable,
+ .is_enabled = ab5500_regulator_is_enabled,
+ .enable_time = ab5500_regulator_enable_time,
+ .get_voltage = ab5500_regulator_fixed_get_voltage,
+ .list_voltage = ab5500_regulator_list_voltage,
+ .set_mode = ab5500_regulator_set_mode,
+ .get_mode = ab5500_regulator_get_mode,
+ .get_optimum_mode = ab5500_regulator_get_optimum_mode,
+};
+
+static const int ab5500_ldo_lg_voltages[] = {
+ [0x00] = 1200000,
+ [0x01] = 0, /* not used */
+ [0x02] = 1500000,
+ [0x03] = 1800000,
+ [0x04] = 0, /* not used */
+ [0x05] = 2500000,
+ [0x06] = 2730000,
+ [0x07] = 2910000,
+};
+
+static const int ab5500_ldo_kh_voltages[] = {
+ [0x00] = 1200000,
+ [0x01] = 1500000,
+ [0x02] = 1800000,
+ [0x03] = 2100000,
+ [0x04] = 2500000,
+ [0x05] = 2750000,
+ [0x06] = 2790000,
+ [0x07] = 2910000,
+};
+
+static const int ab5500_ldo_vdigmic_voltages[] = {
+ [0x00] = 2100000,
+};
+
+static const int ab5500_ldo_sim_voltages[] = {
+ [0x00] = 1875000,
+ [0x01] = 2800000,
+ [0x02] = 2900000,
+};
+
+static const int ab5500_bias2_voltages[] = {
+ [0x00] = 2000000,
+ [0x01] = 2200000,
+};
+
+static struct ab5500_regulator ab5500_regulators[] = {
+ [AB5500_LDO_L] = {
+ .desc = {
+ .name = "LDO_L",
+ .id = AB5500_LDO_L,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) -
+ 2,
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_L_ST,
+ .voltages = ab5500_ldo_lg_voltages,
+ .num_holes = 2, /* 2 register values unused */
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_G] = {
+ .desc = {
+ .name = "LDO_G",
+ .id = AB5500_LDO_G,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) -
+ 2,
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_G_ST,
+ .voltages = ab5500_ldo_lg_voltages,
+ .num_holes = 2, /* 2 register values unused */
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_K] = {
+ .desc = {
+ .name = "LDO_K",
+ .id = AB5500_LDO_K,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_K_ST,
+ .voltages = ab5500_ldo_kh_voltages,
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_H] = {
+ .desc = {
+ .name = "LDO_H",
+ .id = AB5500_LDO_H,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_H_ST,
+ .voltages = ab5500_ldo_kh_voltages,
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_VDIGMIC] = {
+ .desc = {
+ .name = "LDO_VDIGMIC",
+ .id = AB5500_LDO_VDIGMIC,
+ .ops = &ab5500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages =
+ ARRAY_SIZE(ab5500_ldo_vdigmic_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_VDIGMIC_ST,
+ .voltages = ab5500_ldo_vdigmic_voltages,
+ .enable_time = 450,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_SIM] = {
+ .desc = {
+ .name = "LDO_SIM",
+ .id = AB5500_LDO_SIM,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_sim_voltages),
+ },
+ .bank = AB5500_BANK_SIM_USBSIM,
+ .reg = AB5500_SIM_SUP,
+ .voltages = ab5500_ldo_sim_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_BIAS2] = {
+ .desc = {
+ .name = "BIAS2",
+ .id = AB5500_BIAS2,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_bias2_voltages),
+ },
+ .bank = AB5500_BANK_AUDIO_HEADSETUSB,
+ .reg = AB5500_MBIAS2,
+ .voltages = ab5500_bias2_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_MBIAS2_ENABLE,
+ .update_mask = AB5500_MBIAS2_MODE_MASK,
+ .update_val_normal = AB5500_MBIAS2_ENABLE,
+ .update_val_idle = AB5500_MBIAS2_ENABLE,
+ .voltage_mask = AB5500_MBIAS2_VOLT_MASK,
+ },
+};
+
+
+static int __devinit ab5500_regulator_probe(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *ppdata = pdev->dev.parent->platform_data;
+ struct ab5500_regulator_platform_data *pdata = ppdata->regulator;
+ struct ab5500_regulator_data *regdata;
+ struct ab5500_regulators *ab5500;
+ int err = 0;
+ int i;
+
+ if (!pdata || !pdata->regulator)
+ return -EINVAL;
+
+ ab5500 = kzalloc(sizeof(*ab5500), GFP_KERNEL);
+ if (!ab5500)
+ return -ENOMEM;
+
+ ab5500->dev = &pdev->dev;
+ regdata = pdata->data;
+
+ platform_set_drvdata(pdev, ab5500);
+
+ for (i = 0; i < AB5500_NUM_REGULATORS; i++) {
+ struct ab5500_regulator *regulator = &ab5500_regulators[i];
+ struct regulator_dev *rdev;
+
+ if (regdata)
+ regulator->off_is_lowpower = regdata[i].off_is_lowpower;
+
+ ab5500->regulator[i] = regulator;
+
+ rdev = regulator_register(&regulator->desc, &pdev->dev,
+ &pdata->regulator[i], ab5500);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "failed to register regulator %s err %d\n",
+ regulator->desc.name, err);
+ goto err_unregister;
+ }
+
+ ab5500->rdev[i] = rdev;
+ }
+
+ return 0;
+
+err_unregister:
+ /* remove the already registered regulators */
+ while (--i >= 0)
+ regulator_unregister(ab5500->rdev[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ab5500);
+
+ return err;
+}
+
+static int __devexit ab5500_regulators_remove(struct platform_device *pdev)
+{
+ struct ab5500_regulators *ab5500 = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < AB5500_NUM_REGULATORS; i++)
+ regulator_unregister(ab5500->rdev[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ab5500);
+
+ return 0;
+}
+
+static struct platform_driver ab5500_regulator_driver = {
+ .driver = {
+ .name = "ab5500-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_regulator_probe,
+ .remove = __devexit_p(ab5500_regulators_remove),
+};
+
+static __init int ab5500_regulator_init(void)
+{
+ return platform_driver_register(&ab5500_regulator_driver);
+}
+
+static __exit void ab5500_regulator_exit(void)
+{
+ platform_driver_unregister(&ab5500_regulator_driver);
+}
+
+subsys_initcall(ab5500_regulator_init);
+module_exit(ab5500_regulator_exit);
+
+MODULE_DESCRIPTION("AB5500 Regulator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ab5500-regulator");
diff --git a/drivers/regulator/ab8500-debug.c b/drivers/regulator/ab8500-debug.c
new file mode 100644
index 00000000000..16dad8f06dd
--- /dev/null
+++ b/drivers/regulator/ab8500-debug.c
@@ -0,0 +1,1777 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson.
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/regulator/ab8500-debug.h>
+#include <linux/io.h>
+#include <mach/db8500-regs.h> /* U8500_BACKUPRAM1_BASE */
+#include <mach/hardware.h>
+
+/* board profile address - to determine if suspend-force is default */
+#define BOOT_INFO_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0xffc)
+#define BOARD_PROFILE_BACKUPRAM1 (0x3)
+
+/* board profile option */
+#define OPTION_BOARD_VERSION_V5X 50
+
+/* for error prints */
+struct device *dev;
+struct platform_device *pdev;
+
+/* setting for suspend force (disabled by default) */
+static bool setting_suspend_force;
+
+/*
+ * regulator states
+ */
+enum ab8500_regulator_state_id {
+ AB8500_REGULATOR_STATE_INIT,
+ AB8500_REGULATOR_STATE_SUSPEND,
+ AB8500_REGULATOR_STATE_SUSPEND_CORE,
+ AB8500_REGULATOR_STATE_RESUME_CORE,
+ AB8500_REGULATOR_STATE_RESUME,
+ AB8500_REGULATOR_STATE_CURRENT,
+ NUM_REGULATOR_STATE
+};
+
+static const char *regulator_state_name[NUM_REGULATOR_STATE] = {
+ [AB8500_REGULATOR_STATE_INIT] = "init",
+ [AB8500_REGULATOR_STATE_SUSPEND] = "suspend",
+ [AB8500_REGULATOR_STATE_SUSPEND_CORE] = "suspend-core",
+ [AB8500_REGULATOR_STATE_RESUME_CORE] = "resume-core",
+ [AB8500_REGULATOR_STATE_RESUME] = "resume",
+ [AB8500_REGULATOR_STATE_CURRENT] = "current",
+};
+
+/*
+ * regulator register definitions
+ */
+enum ab8500_register_id {
+ AB8500_REGU_NOUSE, /* if not defined */
+ AB8500_REGU_REQUEST_CTRL1,
+ AB8500_REGU_REQUEST_CTRL2,
+ AB8500_REGU_REQUEST_CTRL3,
+ AB8500_REGU_REQUEST_CTRL4,
+ AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ AB8500_REGU_HW_HP_REQ1_VALID1,
+ AB8500_REGU_HW_HP_REQ1_VALID2,
+ AB8500_REGU_HW_HP_REQ2_VALID1,
+ AB8500_REGU_HW_HP_REQ2_VALID2,
+ AB8500_REGU_SW_HP_REQ_VALID1,
+ AB8500_REGU_SW_HP_REQ_VALID2,
+ AB8500_REGU_SYSCLK_REQ1_VALID,
+ AB8500_REGU_SYSCLK_REQ2_VALID,
+ AB8500_REGU_MISC1,
+ AB8500_REGU_OTG_SUPPLY_CTRL,
+ AB8500_REGU_VUSB_CTRL,
+ AB8500_REGU_VAUDIO_SUPPLY,
+ AB8500_REGU_CTRL1_VAMIC,
+ AB8500_REGU_ARM_REGU1,
+ AB8500_REGU_ARM_REGU2,
+ AB8500_REGU_VAPE_REGU,
+ AB8500_REGU_VSMPS1_REGU,
+ AB8500_REGU_VSMPS2_REGU,
+ AB8500_REGU_VSMPS3_REGU,
+ AB8500_REGU_VPLL_VANA_REGU,
+ AB8500_REGU_VREF_DDR,
+ AB8500_REGU_EXT_SUPPLY_REGU,
+ AB8500_REGU_VAUX12_REGU,
+ AB8500_REGU_VRF1_VAUX3_REGU,
+ AB8500_REGU_VARM_SEL1,
+ AB8500_REGU_VARM_SEL2,
+ AB8500_REGU_VARM_SEL3,
+ AB8500_REGU_VAPE_SEL1,
+ AB8500_REGU_VAPE_SEL2,
+ AB8500_REGU_VAPE_SEL3,
+ AB8500_REGU_VBB_SEL1,
+ AB8500_REGU_VBB_SEL2,
+ AB8500_REGU_VSMPS1_SEL1,
+ AB8500_REGU_VSMPS1_SEL2,
+ AB8500_REGU_VSMPS1_SEL3,
+ AB8500_REGU_VSMPS2_SEL1,
+ AB8500_REGU_VSMPS2_SEL2,
+ AB8500_REGU_VSMPS2_SEL3,
+ AB8500_REGU_VSMPS3_SEL1,
+ AB8500_REGU_VSMPS3_SEL2,
+ AB8500_REGU_VSMPS3_SEL3,
+ AB8500_REGU_VAUX1_SEL,
+ AB8500_REGU_VAUX2_SEL,
+ AB8500_REGU_VRF1_VAUX3_SEL,
+ AB8500_REGU_CTRL_EXT_SUP,
+ AB8500_REGU_VMOD_REGU,
+ AB8500_REGU_VMOD_SEL1,
+ AB8500_REGU_VMOD_SEL2,
+ AB8500_REGU_CTRL_DISCH,
+ AB8500_REGU_CTRL_DISCH2,
+ AB8500_OTHER_SYSCLK_CTRL, /* Other */
+ AB8500_OTHER_VSIM_SYSCLK_CTRL, /* Other */
+ AB8500_OTHER_SYSULPCLK_CTRL1, /* Other */
+ AB8500_OTHER_TVOUT_CTRL, /* Other */
+ NUM_AB8500_REGISTER
+};
+
+struct ab8500_register {
+ const char *name;
+ u8 bank;
+ u8 addr;
+};
+
+static struct ab8500_register
+ ab8500_register[NUM_AB8500_REGISTER] = {
+ [AB8500_REGU_REQUEST_CTRL1] = {
+ .name = "ReguRequestCtrl1",
+ .bank = 0x03,
+ .addr = 0x03,
+ },
+ [AB8500_REGU_REQUEST_CTRL2] = {
+ .name = "ReguRequestCtrl2",
+ .bank = 0x03,
+ .addr = 0x04,
+ },
+ [AB8500_REGU_REQUEST_CTRL3] = {
+ .name = "ReguRequestCtrl3",
+ .bank = 0x03,
+ .addr = 0x05,
+ },
+ [AB8500_REGU_REQUEST_CTRL4] = {
+ .name = "ReguRequestCtrl4",
+ .bank = 0x03,
+ .addr = 0x06,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_HP_VALID1] = {
+ .name = "ReguSysClkReq1HPValid",
+ .bank = 0x03,
+ .addr = 0x07,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_HP_VALID2] = {
+ .name = "ReguSysClkReq1HPValid2",
+ .bank = 0x03,
+ .addr = 0x08,
+ },
+ [AB8500_REGU_HW_HP_REQ1_VALID1] = {
+ .name = "ReguHwHPReq1Valid1",
+ .bank = 0x03,
+ .addr = 0x09,
+ },
+ [AB8500_REGU_HW_HP_REQ1_VALID2] = {
+ .name = "ReguHwHPReq1Valid2",
+ .bank = 0x03,
+ .addr = 0x0a,
+ },
+ [AB8500_REGU_HW_HP_REQ2_VALID1] = {
+ .name = "ReguHwHPReq2Valid1",
+ .bank = 0x03,
+ .addr = 0x0b,
+ },
+ [AB8500_REGU_HW_HP_REQ2_VALID2] = {
+ .name = "ReguHwHPReq2Valid2",
+ .bank = 0x03,
+ .addr = 0x0c,
+ },
+ [AB8500_REGU_SW_HP_REQ_VALID1] = {
+ .name = "ReguSwHPReqValid1",
+ .bank = 0x03,
+ .addr = 0x0d,
+ },
+ [AB8500_REGU_SW_HP_REQ_VALID2] = {
+ .name = "ReguSwHPReqValid2",
+ .bank = 0x03,
+ .addr = 0x0e,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_VALID] = {
+ .name = "ReguSysClkReqValid1",
+ .bank = 0x03,
+ .addr = 0x0f,
+ },
+ [AB8500_REGU_SYSCLK_REQ2_VALID] = {
+ .name = "ReguSysClkReqValid2",
+ .bank = 0x03,
+ .addr = 0x10,
+ },
+ [AB8500_REGU_MISC1] = {
+ .name = "ReguMisc1",
+ .bank = 0x03,
+ .addr = 0x80,
+ },
+ [AB8500_REGU_OTG_SUPPLY_CTRL] = {
+ .name = "OTGSupplyCtrl",
+ .bank = 0x03,
+ .addr = 0x81,
+ },
+ [AB8500_REGU_VUSB_CTRL] = {
+ .name = "VusbCtrl",
+ .bank = 0x03,
+ .addr = 0x82,
+ },
+ [AB8500_REGU_VAUDIO_SUPPLY] = {
+ .name = "VaudioSupply",
+ .bank = 0x03,
+ .addr = 0x83,
+ },
+ [AB8500_REGU_CTRL1_VAMIC] = {
+ .name = "ReguCtrl1VAmic",
+ .bank = 0x03,
+ .addr = 0x84,
+ },
+ [AB8500_REGU_ARM_REGU1] = {
+ .name = "ArmRegu1",
+ .bank = 0x04,
+ .addr = 0x00,
+ },
+ [AB8500_REGU_ARM_REGU2] = {
+ .name = "ArmRegu2",
+ .bank = 0x04,
+ .addr = 0x01,
+ },
+ [AB8500_REGU_VAPE_REGU] = {
+ .name = "VapeRegu",
+ .bank = 0x04,
+ .addr = 0x02,
+ },
+ [AB8500_REGU_VSMPS1_REGU] = {
+ .name = "Vsmps1Regu",
+ .bank = 0x04,
+ .addr = 0x03,
+ },
+ [AB8500_REGU_VSMPS2_REGU] = {
+ .name = "Vsmps2Regu",
+ .bank = 0x04,
+ .addr = 0x04,
+ },
+ [AB8500_REGU_VSMPS3_REGU] = {
+ .name = "Vsmps3Regu",
+ .bank = 0x04,
+ .addr = 0x05,
+ },
+ [AB8500_REGU_VPLL_VANA_REGU] = {
+ .name = "VpllVanaRegu",
+ .bank = 0x04,
+ .addr = 0x06,
+ },
+ [AB8500_REGU_VREF_DDR] = {
+ .name = "VrefDDR",
+ .bank = 0x04,
+ .addr = 0x07,
+ },
+ [AB8500_REGU_EXT_SUPPLY_REGU] = {
+ .name = "ExtSupplyRegu",
+ .bank = 0x04,
+ .addr = 0x08,
+ },
+ [AB8500_REGU_VAUX12_REGU] = {
+ .name = "Vaux12Regu",
+ .bank = 0x04,
+ .addr = 0x09,
+ },
+ [AB8500_REGU_VRF1_VAUX3_REGU] = {
+ .name = "VRF1Vaux3Regu",
+ .bank = 0x04,
+ .addr = 0x0a,
+ },
+ [AB8500_REGU_VARM_SEL1] = {
+ .name = "VarmSel1",
+ .bank = 0x04,
+ .addr = 0x0b,
+ },
+ [AB8500_REGU_VARM_SEL2] = {
+ .name = "VarmSel2",
+ .bank = 0x04,
+ .addr = 0x0c,
+ },
+ [AB8500_REGU_VARM_SEL3] = {
+ .name = "VarmSel3",
+ .bank = 0x04,
+ .addr = 0x0d,
+ },
+ [AB8500_REGU_VAPE_SEL1] = {
+ .name = "VapeSel1",
+ .bank = 0x04,
+ .addr = 0x0e,
+ },
+ [AB8500_REGU_VAPE_SEL2] = {
+ .name = "VapeSel2",
+ .bank = 0x04,
+ .addr = 0x0f,
+ },
+ [AB8500_REGU_VAPE_SEL3] = {
+ .name = "VapeSel3",
+ .bank = 0x04,
+ .addr = 0x10,
+ },
+ [AB8500_REGU_VBB_SEL1] = {
+ .name = "VBBSel1",
+ .bank = 0x04,
+ .addr = 0x11,
+ },
+ [AB8500_REGU_VBB_SEL2] = {
+ .name = "VBBSel2",
+ .bank = 0x04,
+ .addr = 0x12,
+ },
+ [AB8500_REGU_VSMPS1_SEL1] = {
+ .name = "Vsmps1Sel1",
+ .bank = 0x04,
+ .addr = 0x13,
+ },
+ [AB8500_REGU_VSMPS1_SEL2] = {
+ .name = "Vsmps1Sel2",
+ .bank = 0x04,
+ .addr = 0x14,
+ },
+ [AB8500_REGU_VSMPS1_SEL3] = {
+ .name = "Vsmps1Sel3",
+ .bank = 0x04,
+ .addr = 0x15,
+ },
+ [AB8500_REGU_VSMPS2_SEL1] = {
+ .name = "Vsmps2Sel1",
+ .bank = 0x04,
+ .addr = 0x17,
+ },
+ [AB8500_REGU_VSMPS2_SEL2] = {
+ .name = "Vsmps2Sel2",
+ .bank = 0x04,
+ .addr = 0x18,
+ },
+ [AB8500_REGU_VSMPS2_SEL3] = {
+ .name = "Vsmps2Sel3",
+ .bank = 0x04,
+ .addr = 0x19,
+ },
+ [AB8500_REGU_VSMPS3_SEL1] = {
+ .name = "Vsmps3Sel1",
+ .bank = 0x04,
+ .addr = 0x1b,
+ },
+ [AB8500_REGU_VSMPS3_SEL2] = {
+ .name = "Vsmps3Sel2",
+ .bank = 0x04,
+ .addr = 0x1c,
+ },
+ [AB8500_REGU_VSMPS3_SEL3] = {
+ .name = "Vsmps3Sel3",
+ .bank = 0x04,
+ .addr = 0x1d,
+ },
+ [AB8500_REGU_VAUX1_SEL] = {
+ .name = "Vaux1Sel",
+ .bank = 0x04,
+ .addr = 0x1f,
+ },
+ [AB8500_REGU_VAUX2_SEL] = {
+ .name = "Vaux2Sel",
+ .bank = 0x04,
+ .addr = 0x20,
+ },
+ [AB8500_REGU_VRF1_VAUX3_SEL] = {
+ .name = "VRF1Vaux3Sel",
+ .bank = 0x04,
+ .addr = 0x21,
+ },
+ [AB8500_REGU_CTRL_EXT_SUP] = {
+ .name = "ReguCtrlExtSup",
+ .bank = 0x04,
+ .addr = 0x22,
+ },
+ [AB8500_REGU_VMOD_REGU] = {
+ .name = "VmodRegu",
+ .bank = 0x04,
+ .addr = 0x40,
+ },
+ [AB8500_REGU_VMOD_SEL1] = {
+ .name = "VmodSel1",
+ .bank = 0x04,
+ .addr = 0x41,
+ },
+ [AB8500_REGU_VMOD_SEL2] = {
+ .name = "VmodSel2",
+ .bank = 0x04,
+ .addr = 0x42,
+ },
+ [AB8500_REGU_CTRL_DISCH] = {
+ .name = "ReguCtrlDisch",
+ .bank = 0x04,
+ .addr = 0x43,
+ },
+ [AB8500_REGU_CTRL_DISCH2] = {
+ .name = "ReguCtrlDisch2",
+ .bank = 0x04,
+ .addr = 0x44,
+ },
+ /* Outside regulator banks */
+ [AB8500_OTHER_SYSCLK_CTRL] = {
+ .name = "SysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x0c,
+ },
+ [AB8500_OTHER_VSIM_SYSCLK_CTRL] = {
+ .name = "VsimSysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x33,
+ },
+ [AB8500_OTHER_SYSULPCLK_CTRL1] = {
+ .name = "SysUlpClkCtrl1",
+ .bank = 0x02,
+ .addr = 0x0b,
+ },
+ [AB8500_OTHER_TVOUT_CTRL] = {
+ .name = "TVoutCtrl",
+ .bank = 0x06,
+ .addr = 0x80,
+ },
+};
+
+static u8 ab8500_register_state[NUM_REGULATOR_STATE][NUM_AB8500_REGISTER];
+static bool ab8500_register_state_saved[NUM_REGULATOR_STATE];
+static bool ab8500_register_state_save = true;
+
+static int ab8500_regulator_record_state(int state)
+{
+ u8 val;
+ int i;
+ int ret;
+
+ /* check arguments */
+ if ((state > NUM_REGULATOR_STATE) || (state < 0)) {
+ dev_err(dev, "Wrong state specified\n");
+ return -EINVAL;
+ }
+
+ /* record */
+ if (!ab8500_register_state_save)
+ goto exit;
+
+ ab8500_register_state_saved[state] = true;
+
+ for (i = 1; i < NUM_AB8500_REGISTER; i++) {
+ ret = abx500_get_register_interruptible(dev,
+ ab8500_register[i].bank,
+ ab8500_register[i].addr,
+ &val);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ ab8500_register_state[state][i] = val;
+ }
+exit:
+ return 0;
+}
+
+/*
+ * regulator register dump
+ */
+static int ab8500_regulator_dump_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int state, reg_id, i;
+ int err;
+
+ /* record current state */
+ ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT);
+
+ /* print dump header */
+ err = seq_printf(s, "ab8500-regulator dump:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print states */
+ for (state = NUM_REGULATOR_STATE - 1; state >= 0; state--) {
+ if (ab8500_register_state_saved[state])
+ err = seq_printf(s, "%16s saved -------",
+ regulator_state_name[state]);
+ else
+ err = seq_printf(s, "%12s not saved -------",
+ regulator_state_name[state]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ for (i = 0; i < NUM_REGULATOR_STATE; i++) {
+ if (i < state)
+ err = seq_printf(s, "-----");
+ else if (i == state)
+ err = seq_printf(s, "----+");
+ else
+ err = seq_printf(s, " |");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n",
+ __LINE__);
+ }
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ }
+
+ /* print labels */
+ err = seq_printf(s, "\n addr\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ /* dump registers */
+ for (reg_id = 1; reg_id < NUM_AB8500_REGISTER; reg_id++) {
+ err = seq_printf(s, "%22s 0x%02x%02x:",
+ ab8500_register[reg_id].name,
+ ab8500_register[reg_id].bank,
+ ab8500_register[reg_id].addr);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+
+ for (state = 0; state < NUM_REGULATOR_STATE; state++) {
+ err = seq_printf(s, " 0x%02x",
+ ab8500_register_state[state][reg_id]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+ }
+
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+ }
+
+ return 0;
+}
+
+static int ab8500_regulator_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_regulator_dump_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_dump_fops = {
+ .open = ab8500_regulator_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * regulator status print
+ */
+enum ab8500_regulator_id {
+ AB8500_VARM,
+ AB8500_VBBP,
+ AB8500_VBBN,
+ AB8500_VAPE,
+ AB8500_VSMPS1,
+ AB8500_VSMPS2,
+ AB8500_VSMPS3,
+ AB8500_VPLL,
+ AB8500_VREFDDR,
+ AB8500_VMOD,
+ AB8500_VEXTSUPPLY1,
+ AB8500_VEXTSUPPLY2,
+ AB8500_VEXTSUPPLY3,
+ AB8500_VRF1,
+ AB8500_VANA,
+ AB8500_VAUX1,
+ AB8500_VAUX2,
+ AB8500_VAUX3,
+ AB8500_VINTCORE,
+ AB8500_VTVOUT,
+ AB8500_VAUDIO,
+ AB8500_VANAMIC1,
+ AB8500_VANAMIC2,
+ AB8500_VDMIC,
+ AB8500_VUSB,
+ AB8500_VOTG,
+ AB8500_VBUSBIS,
+ AB8500_NUM_REGULATORS,
+};
+
+/*
+ * regulator_voltage
+ */
+struct regulator_volt {
+ u8 value;
+ int volt;
+};
+
+struct regulator_volt_range {
+ struct regulator_volt start;
+ struct regulator_volt step;
+ struct regulator_volt end;
+};
+
+/*
+ * ab8500_regulator
+ * @name
+ * @update_regid
+ * @update_mask
+ * @update_val[4] {off, on, hw, lp}
+ * @hw_mode_regid
+ * @hw_mode_mask
+ * @hw_mode_val[4] {hp/lp, hp/off, hp, hp}
+ * @hw_valid_regid[4] {sysclkreq1, hw1, hw2, sw}
+ * @hw_valid_mask[4] {sysclkreq1, hw1, hw2, sw}
+ * @vsel_sel_regid
+ * @vsel_sel_mask
+ * @vsel_val[333] {sel1, sel2, sel3, sel3}
+ * @vsel_regid
+ * @vsel_mask
+ * @vsel_range
+ * @vsel_range_len
+ */
+struct ab8500_regulator {
+ const char *name;
+ int update_regid;
+ u8 update_mask;
+ u8 update_val[4];
+ int hw_mode_regid;
+ u8 hw_mode_mask;
+ u8 hw_mode_val[4];
+ int hw_valid_regid[4];
+ u8 hw_valid_mask[4];
+ int vsel_sel_regid;
+ u8 vsel_sel_mask;
+ u8 vsel_sel_val[4];
+ int vsel_regid[3];
+ u8 vsel_mask[3];
+ struct regulator_volt_range const *vsel_range[3];
+ int vsel_range_len[3];
+};
+
+static const char *update_val_name[] = {
+ "off",
+ "on ",
+ "hw ",
+ "lp ",
+ " - " /* undefined value */
+};
+
+static const char *hw_mode_val_name[] = {
+ "hp/lp ",
+ "hp/off",
+ "hp ",
+ "hp ",
+ "-/- ", /* undefined value */
+};
+
+/* voltage selection */
+static const struct regulator_volt_range varm_vape_vmod_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1362500} },
+ { {0x36, 1362500}, {0x01, 0}, {0x3f, 1362500} },
+};
+
+static const struct regulator_volt_range vbbp_vsel[] = {
+ { {0x00, 0}, {0x10, 100000}, {0x40, 400000} },
+ { {0x50, 400000}, {0x10, 0}, {0x70, 400000} },
+ { {0x80, -400000}, {0x10, 0}, {0xb0, -400000} },
+ { {0xc0, -400000}, {0x10, 100000}, {0xf0, -100000} },
+};
+
+static const struct regulator_volt_range vbbn_vsel[] = {
+ { {0x00, 0}, {0x01, -100000}, {0x04, -400000} },
+ { {0x05, -400000}, {0x01, 0}, {0x07, -400000} },
+ { {0x08, 0}, {0x01, 100000}, {0x0c, 400000} },
+ { {0x0d, 400000}, {0x01, 0}, {0x0f, 400000} },
+};
+
+static const struct regulator_volt_range vsmps1_vsel[] = {
+ { {0x00, 1100000}, {0x01, 0}, {0x1f, 1100000} },
+ { {0x20, 1100000}, {0x01, 12500}, {0x30, 1300000} },
+ { {0x31, 1300000}, {0x01, 0}, {0x3f, 1300000} },
+};
+
+static const struct regulator_volt_range vsmps2_vsel[] = {
+ { {0x00, 1800000}, {0x01, 0}, {0x38, 1800000} },
+ { {0x39, 1800000}, {0x01, 12500}, {0x7f, 1875000} },
+};
+
+static const struct regulator_volt_range vsmps3_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1363500} },
+ { {0x36, 1363500}, {0x01, 0}, {0x7f, 1363500} },
+};
+
+static const struct regulator_volt_range vaux1_vaux2_vsel[] = {
+ { {0x00, 1100000}, {0x01, 100000}, {0x04, 1500000} },
+ { {0x05, 1800000}, {0x01, 50000}, {0x07, 1900000} },
+ { {0x08, 2500000}, {0x01, 0}, {0x08, 2500000} },
+ { {0x09, 2650000}, {0x01, 50000}, {0x0c, 2800000} },
+ { {0x0d, 2900000}, {0x01, 100000}, {0x0e, 3000000} },
+ { {0x0f, 3300000}, {0x01, 0}, {0x0f, 3300000} },
+};
+
+static const struct regulator_volt_range vaux3_vsel[] = {
+ { {0x00, 1200000}, {0x01, 300000}, {0x03, 2100000} },
+ { {0x04, 2500000}, {0x01, 250000}, {0x05, 2750000} },
+ { {0x06, 2790000}, {0x01, 0}, {0x06, 2790000} },
+ { {0x07, 2910000}, {0x01, 0}, {0x07, 2910000} },
+};
+
+static const struct regulator_volt_range vrf1_vsel[] = {
+ { {0x00, 1800000}, {0x10, 200000}, {0x10, 2000000} },
+ { {0x20, 2150000}, {0x10, 0}, {0x20, 2150000} },
+ { {0x30, 2500000}, {0x10, 0}, {0x30, 2500000} },
+};
+
+static const struct regulator_volt_range vintcore12_vsel[] = {
+ { {0x00, 1200000}, {0x08, 25000}, {0x30, 1350000} },
+ { {0x38, 1350000}, {0x01, 0}, {0x38, 1350000} },
+};
+
+/* regulators */
+static struct ab8500_regulator ab8500_regulator[AB8500_NUM_REGULATORS] = {
+ [AB8500_VARM] = {
+ .name = "Varm",
+ .update_regid = AB8500_REGU_ARM_REGU1,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x02,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VARM_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = varm_vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VARM_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = varm_vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[2] = AB8500_REGU_VARM_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = varm_vape_vmod_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ },
+ [AB8500_VBBP] = {
+ .name = "Vbbp",
+ .update_regid = AB8500_REGU_ARM_REGU2,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x00},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x10,
+ .vsel_sel_val = {0x00, 0x10, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VBB_SEL1,
+ .vsel_mask[0] = 0xf0,
+ .vsel_range[0] = vbbp_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vbbp_vsel),
+ .vsel_regid[1] = AB8500_REGU_VBB_SEL2,
+ .vsel_mask[1] = 0xf0,
+ .vsel_range[1] = vbbp_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vbbp_vsel),
+ },
+ [AB8500_VBBN] = {
+ .name = "Vbbn",
+ .update_regid = AB8500_REGU_ARM_REGU2,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x00},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x20,
+ .vsel_sel_val = {0x00, 0x20, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VBB_SEL1,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vbbn_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vbbn_vsel),
+ .vsel_regid[1] = AB8500_REGU_VBB_SEL2,
+ .vsel_mask[1] = 0x0f,
+ .vsel_range[1] = vbbn_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vbbn_vsel),
+ },
+ [AB8500_VAPE] = {
+ .name = "Vape",
+ .update_regid = AB8500_REGU_VAPE_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_sel_regid = AB8500_REGU_VAPE_REGU,
+ .vsel_sel_mask = 0x24,
+ .vsel_sel_val = {0x00, 0x04, 0x20, 0x24},
+ .vsel_regid[0] = AB8500_REGU_VAPE_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = varm_vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VAPE_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = varm_vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[2] = AB8500_REGU_VAPE_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = varm_vape_vmod_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ },
+ [AB8500_VSMPS1] = {
+ .name = "Vsmps1",
+ .update_regid = AB8500_REGU_VSMPS1_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x01,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x01,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_VSMPS1_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS1_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vsmps1_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps1_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS1_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vsmps1_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps1_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS1_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vsmps1_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps1_vsel),
+ },
+ [AB8500_VSMPS2] = {
+ .name = "Vsmps2",
+ .update_regid = AB8500_REGU_VSMPS2_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x02,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x02,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x08,
+ .vsel_sel_regid = AB8500_REGU_VSMPS2_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS2_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vsmps2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps2_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS2_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vsmps2_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps2_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS2_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vsmps2_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps2_vsel),
+ },
+ [AB8500_VSMPS3] = {
+ .name = "Vsmps3",
+ .update_regid = AB8500_REGU_VSMPS3_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x04,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x04,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x10,
+ .vsel_sel_regid = AB8500_REGU_VSMPS3_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS3_SEL1,
+ .vsel_mask[0] = 0x7f,
+ .vsel_range[0] = vsmps3_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps3_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS3_SEL2,
+ .vsel_mask[1] = 0x7f,
+ .vsel_range[1] = vsmps3_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps3_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS3_SEL3,
+ .vsel_mask[2] = 0x7f,
+ .vsel_range[2] = vsmps3_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps3_vsel),
+ },
+ [AB8500_VPLL] = {
+ .name = "Vpll",
+ .update_regid = AB8500_REGU_VPLL_VANA_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x10,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x10,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x10,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x40,
+ },
+ [AB8500_VREFDDR] = {
+ .name = "VrefDDR",
+ .update_regid = AB8500_REGU_VREF_DDR,
+ .update_mask = 0x01,
+ .update_val = {0x00, 0x01, 0x00, 0x00},
+ },
+ [AB8500_VMOD] = {
+ .name = "Vmod",
+ .update_regid = AB8500_REGU_VMOD_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_VMOD_REGU,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x08,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x08,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x20,
+ .vsel_sel_regid = AB8500_REGU_VMOD_REGU,
+ .vsel_sel_mask = 0x04,
+ .vsel_sel_val = {0x00, 0x04, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VMOD_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = varm_vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VMOD_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = varm_vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(varm_vape_vmod_vsel),
+ },
+ [AB8500_VEXTSUPPLY1] = {
+ .name = "Vextsupply1",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x10,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x01,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x04,
+ },
+ [AB8500_VEXTSUPPLY2] = {
+ .name = "VextSupply2",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x20,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x02,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x08,
+ },
+ [AB8500_VEXTSUPPLY3] = {
+ .name = "VextSupply3",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x30,
+ .update_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x40,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x04,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x10,
+ },
+ [AB8500_VRF1] = {
+ .name = "Vrf1",
+ .update_regid = AB8500_REGU_VRF1_VAUX3_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL,
+ .vsel_mask[0] = 0x30,
+ .vsel_range[0] = vrf1_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vrf1_vsel),
+ },
+ [AB8500_VANA] = {
+ .name = "Vana",
+ .update_regid = AB8500_REGU_VPLL_VANA_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x08,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x08,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x20,
+ },
+ [AB8500_VAUX1] = {
+ .name = "Vaux1",
+ .update_regid = AB8500_REGU_VAUX12_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x20,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x20,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x20,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x80,
+ .vsel_regid[0] = AB8500_REGU_VAUX1_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vaux1_vaux2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux1_vaux2_vsel),
+ },
+ [AB8500_VAUX2] = {
+ .name = "Vaux2",
+ .update_regid = AB8500_REGU_VAUX12_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x40,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x40,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x40,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_regid[0] = AB8500_REGU_VAUX2_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vaux1_vaux2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux1_vaux2_vsel),
+ },
+ [AB8500_VAUX3] = {
+ .name = "Vaux3",
+ .update_regid = AB8500_REGU_VRF1_VAUX3_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL4,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x80,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x80,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x80,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x02,
+ .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL,
+ .vsel_mask[0] = 0x07,
+ .vsel_range[0] = vaux3_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux3_vsel),
+ },
+ [AB8500_VINTCORE] = {
+ .name = "VintCore12",
+ .update_regid = AB8500_REGU_MISC1,
+ .update_mask = 0x44,
+ .update_val = {0x00, 0x04, 0x00, 0x44},
+ .vsel_regid[0] = AB8500_REGU_MISC1,
+ .vsel_mask[0] = 0x38,
+ .vsel_range[0] = vintcore12_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vintcore12_vsel),
+ },
+ [AB8500_VTVOUT] = {
+ .name = "VTVout",
+ .update_regid = AB8500_REGU_MISC1,
+ .update_mask = 0x82,
+ .update_val = {0x00, 0x02, 0x00, 0x82},
+ },
+ [AB8500_VAUDIO] = {
+ .name = "Vaudio",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x02,
+ .update_val = {0x00, 0x02, 0x00, 0x00},
+ },
+ [AB8500_VANAMIC1] = {
+ .name = "Vanamic1",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x08,
+ .update_val = {0x00, 0x08, 0x00, 0x00},
+ },
+ [AB8500_VANAMIC2] = {
+ .name = "Vanamic2",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x10,
+ .update_val = {0x00, 0x10, 0x00, 0x00},
+ },
+ [AB8500_VDMIC] = {
+ .name = "Vdmic",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x04,
+ .update_val = {0x00, 0x04, 0x00, 0x00},
+ },
+ [AB8500_VUSB] = {
+ .name = "Vusb",
+ .update_regid = AB8500_REGU_VUSB_CTRL,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x00, 0x03},
+ },
+ [AB8500_VOTG] = {
+ .name = "VOTG",
+ .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x00, 0x03},
+ },
+ [AB8500_VBUSBIS] = {
+ .name = "Vbusbis",
+ .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL,
+ .update_mask = 0x08,
+ .update_val = {0x00, 0x08, 0x00, 0x00},
+ },
+};
+
+static int status_state = AB8500_REGULATOR_STATE_CURRENT;
+
+static int _get_voltage(struct regulator_volt_range const *volt_range,
+ u8 value, int *volt)
+{
+ u8 start = volt_range->start.value;
+ u8 end = volt_range->end.value;
+ u8 step = volt_range->step.value;
+
+ /* Check if witin range */
+ if (step == 0) {
+ if (value == start) {
+ *volt = volt_range->start.volt;
+ return 1;
+ }
+ } else {
+ if ((start <= value) && (value <= end)) {
+ if ((value - start)%step != 0)
+ return -EINVAL; /* invalid setting */
+ *volt = volt_range->start.volt
+ + volt_range->step.volt
+ *((value - start)/step);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int get_voltage(struct regulator_volt_range const *volt_range,
+ int volt_range_len,
+ u8 value)
+{
+ int volt;
+ int i, ret;
+
+ for (i = 0; i < volt_range_len; i++) {
+ ret = _get_voltage(&volt_range[i], value, &volt);
+ if (ret < 0)
+ break; /* invalid setting */
+ if (ret == 1)
+ return volt; /* successful */
+ }
+
+ return -EINVAL;
+}
+
+static int ab8500_regulator_status_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int id, regid;
+ int i;
+ u8 val;
+ int err;
+
+ /* record current state */
+ ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT);
+
+ /* check if chosen state is recorded */
+ if (!ab8500_register_state_saved[status_state]) {
+ seq_printf(s, "ab8500-regulator status is not recorded.\n");
+ goto exit;
+ }
+
+ /* print dump header */
+ err = seq_printf(s, "ab8500-regulator status:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print state */
+ for (i = 0; i < NUM_REGULATOR_STATE; i++) {
+ if (i == status_state)
+ err = seq_printf(s, "-> %i. %12s\n",
+ i, regulator_state_name[i]);
+ else
+ err = seq_printf(s, " %i. %12s\n",
+ i, regulator_state_name[i]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+ }
+
+ /* print labels */
+ err = seq_printf(s,
+ "+-----------+----+--------------+-------------------------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "| name|man |auto |voltage |\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "+-----------+----+--------------+ +-----------------------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "| |mode|mode |0|1|2|3| | 1 | 2 | 3 |\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ /* dump registers */
+ for (id = 0; id < AB8500_NUM_REGULATORS; id++) {
+ /* print name */
+ err = seq_printf(s, "|%11s|",
+ ab8500_regulator[id].name);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print manual mode */
+ regid = ab8500_regulator[id].update_regid;
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].update_mask;
+ for (i = 0; i < 4; i++) {
+ if (val == ab8500_regulator[id].update_val[i])
+ break;
+ }
+ err = seq_printf(s, "%4s|",
+ update_val_name[i]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print auto mode */
+ regid = ab8500_regulator[id].hw_mode_regid;
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].hw_mode_mask;
+ for (i = 0; i < 4; i++) {
+ if (val == ab8500_regulator[id].hw_mode_val[i])
+ break;
+ }
+ err = seq_printf(s, "%6s|",
+ hw_mode_val_name[i]);
+ } else {
+ err = seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print valid bits */
+ for (i = 0; i < 4; i++) {
+ regid = ab8500_regulator[id].hw_valid_regid[i];
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].hw_valid_mask[i];
+ if (val)
+ err = seq_printf(s, "1|");
+ else
+ err = seq_printf(s, "0|");
+ } else {
+ err = seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+ }
+
+ /* print voltage selection */
+ regid = ab8500_regulator[id].vsel_sel_regid;
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].vsel_sel_mask;
+ for (i = 0; i < 3; i++) {
+ if (val == ab8500_regulator[id].vsel_sel_val[i])
+ break;
+ }
+ if (i < 3)
+ seq_printf(s, "%i|", i + 1);
+ else
+ seq_printf(s, "-|");
+ } else {
+ seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+
+ for (i = 0; i < 3; i++) {
+ int volt;
+
+ regid = ab8500_regulator[id].vsel_regid[i];
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].vsel_mask[i];
+ volt = get_voltage(
+ ab8500_regulator[id].vsel_range[i],
+ ab8500_regulator[id].vsel_range_len[i],
+ val);
+ seq_printf(s, "%7i|", volt);
+ } else {
+ seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+ }
+
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+
+ }
+ err = seq_printf(s,
+ "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "Note! In HW mode, voltage selection is controlled by HW.\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+
+exit:
+ return 0;
+}
+
+static int ab8500_regulator_status_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* copy user data */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* convert */
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+
+ /* set suspend force setting */
+ if (user_val > NUM_REGULATOR_STATE) {
+ dev_err(dev, "debugfs error input > number of states\n");
+ return -EINVAL;
+ }
+
+ status_state = user_val;
+
+ return buf_size;
+}
+
+
+static int ab8500_regulator_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_regulator_status_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_status_fops = {
+ .open = ab8500_regulator_status_open,
+ .write = ab8500_regulator_status_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_PM
+
+struct ab8500_force_reg {
+ char *name;
+ u8 bank;
+ u8 addr;
+ u8 mask;
+ u8 val;
+ bool restore;
+ u8 restore_val;
+};
+
+static struct ab8500_force_reg ab8500_force_reg[] = {
+ {
+ /*
+ * SysClkCtrl
+ * OTP: 0x00, HSI: 0x06, suspend: 0x00/0x07 (value/mask)
+ * [ 2] USBClkEna = disable SysClk path to USB block
+ * [ 1] TVoutClkEna = disable 27Mhz clock to TVout block
+ * [ 0] TVoutPllEna = disable TVout pll
+ * (generate 27Mhz from SysClk)
+ */
+ .name = "SysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x0c,
+ .mask = 0x07,
+ .val = 0x00,
+ },
+ {
+ /*
+ * VsimSysClkCtrl
+ * OTP: 0x01, HSI: 0x21, suspend: 0x01/0xff (value/mask)
+ * [ 7] VsimSysClkReq8Valid = no connection
+ * [ 6] VsimSysClkReq7Valid = no connection
+ * [ 5] VsimSysClkReq6Valid = no connection
+ * [ 4] VsimSysClkReq5Valid = no connection
+ * [ 3] VsimSysClkReq4Valid = no connection
+ * [ 2] VsimSysClkReq3Valid = no connection
+ * [ 1] VsimSysClkReq2Valid = no connection
+ * [ 0] VsimSysClkReq1Valid = Vsim set by SysClkReq1
+ */
+ .name = "VsimSysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x33,
+ .mask = 0xff,
+ .val = 0x01,
+ },
+ {
+ /*
+ * SysUlpClkCtrl1
+ * OTP: 0x00, HSI: 0x00, suspend: 0x00/0x0f (value/mask)
+ * [ 3] 4500SysClkReq = inactive
+ * [ 2] UlpClkReq = inactive
+ * [1:0] SysUlpClkIntSel[1:0] = no internal clock switching.
+ * Internal clock is SysClk.
+ */
+ .name = "SysUlpClkCtrl1",
+ .bank = 0x02,
+ .addr = 0x0b,
+ .mask = 0x0f,
+ .val = 0x00,
+ },
+ {
+ /*
+ * TVoutCtrl
+ * OTP: N/A, HSI: N/A, suspend: 0x00/0x03 (value/mask)
+ * [ 2] PlugTvOn = plug/unplug detection disabled
+ * [1:0] TvoutDacCtrl[1:0] = "0" forced on DAC input (test)
+ */
+ .name = "TVoutCtrl",
+ .bank = 0x06,
+ .addr = 0x80,
+ .mask = 0x03,
+ .val = 0x00,
+ },
+};
+
+void ab8500_regulator_debug_force(void)
+{
+ int ret, i;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_SUSPEND);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record suspend state.\n");
+
+ /* check if registers should be forced */
+ if (!setting_suspend_force)
+ goto exit;
+
+ /*
+ * Optimize href v2_v50_pwr board for ApSleep/ApDeepSleep
+ * power consumption measurements
+ */
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) {
+ dev_vdbg(&pdev->dev, "Save and set %s: "
+ "0x%02x, 0x%02x, 0x%02x, 0x%02x.\n",
+ ab8500_force_reg[i].name,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].val);
+
+ /* assume that register should be restored */
+ ab8500_force_reg[i].restore = true;
+
+ /* get register value before forcing it */
+ ret = abx500_get_register_interruptible(&pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ &ab8500_force_reg[i].restore_val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read %s.\n",
+ ab8500_force_reg[i].name);
+ ab8500_force_reg[i].restore = false;
+ break;
+ }
+
+ /* force register value */
+ ret = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].val);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to write %s.\n",
+ ab8500_force_reg[i].name);
+ ab8500_force_reg[i].restore = false;
+ }
+ }
+
+exit:
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(
+ AB8500_REGULATOR_STATE_SUSPEND_CORE);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record suspend state.\n");
+
+ return;
+}
+
+void ab8500_regulator_debug_restore(void)
+{
+ int ret, i;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME_CORE);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record resume state.\n");
+ for (i = ARRAY_SIZE(ab8500_force_reg) - 1; i >= 0; i--) {
+ /* restore register value */
+ if (ab8500_force_reg[i].restore) {
+ ret = abx500_mask_and_set_register_interruptible(
+ &pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].restore_val);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to restore %s.\n",
+ ab8500_force_reg[i].name);
+ dev_vdbg(&pdev->dev, "Restore %s: "
+ "0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ ab8500_force_reg[i].name,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].restore_val);
+ }
+ }
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record resume state.\n");
+
+ return;
+}
+
+#endif
+
+static int ab8500_regulator_suspend_force_show(struct seq_file *s, void *p)
+{
+ /* print suspend standby status */
+ if (setting_suspend_force)
+ return seq_printf(s, "suspend force enabled\n");
+ else
+ return seq_printf(s, "no suspend force\n");
+}
+
+static int ab8500_regulator_suspend_force_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* copy user data */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* convert */
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+
+ /* set suspend force setting */
+ if (user_val > 1) {
+ dev_err(dev, "debugfs error input > 1\n");
+ return -EINVAL;
+ }
+
+ if (user_val)
+ setting_suspend_force = true;
+ else
+ setting_suspend_force = false;
+
+ return buf_size;
+}
+
+static int ab8500_regulator_suspend_force_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_regulator_suspend_force_show,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_suspend_force_fops = {
+ .open = ab8500_regulator_suspend_force_open,
+ .write = ab8500_regulator_suspend_force_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab8500_regulator_dir;
+static struct dentry *ab8500_regulator_dump_file;
+static struct dentry *ab8500_regulator_status_file;
+static struct dentry *ab8500_regulator_suspend_force_file;
+
+int __devinit ab8500_regulator_debug_init(struct platform_device *plf)
+{
+ void __iomem *boot_info_backupram;
+ int ret;
+
+ /* setup dev pointers */
+ dev = &plf->dev;
+ pdev = plf;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_INIT);
+ if (ret < 0)
+ dev_err(&plf->dev, "Failed to record init state.\n");
+
+ /* make suspend-force default if board profile is v5x-power */
+ boot_info_backupram = ioremap(BOOT_INFO_BACKUPRAM1, 0x4);
+
+ if (boot_info_backupram) {
+ u8 board_profile;
+ board_profile = readb(
+ boot_info_backupram + BOARD_PROFILE_BACKUPRAM1);
+ dev_dbg(dev, "Board profile is 0x%02x\n", board_profile);
+
+ if (board_profile >= OPTION_BOARD_VERSION_V5X)
+ setting_suspend_force = true;
+
+ iounmap(boot_info_backupram);
+ } else {
+ dev_err(dev, "Failed to read backupram.\n");
+ }
+
+ /* create directory */
+ ab8500_regulator_dir = debugfs_create_dir("ab8500-regulator", NULL);
+ if (!ab8500_regulator_dir)
+ goto exit_no_debugfs;
+
+ /* create "dump" file */
+ ab8500_regulator_dump_file = debugfs_create_file("dump",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_dump_fops);
+ if (!ab8500_regulator_dump_file)
+ goto exit_destroy_dir;
+
+ /* create "status" file */
+ ab8500_regulator_status_file = debugfs_create_file("status",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_status_fops);
+ if (!ab8500_regulator_status_file)
+ goto exit_destroy_dump_file;
+
+ /*
+ * create "suspend-force-v5x" file. As indicated by the name, this is
+ * only applicable for v2_v5x hardware versions.
+ */
+ ab8500_regulator_suspend_force_file = debugfs_create_file(
+ "suspend-force-v5x",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_suspend_force_fops);
+ if (!ab8500_regulator_suspend_force_file)
+ goto exit_destroy_status_file;
+
+ return 0;
+
+exit_destroy_status_file:
+ debugfs_remove(ab8500_regulator_status_file);
+exit_destroy_dump_file:
+ debugfs_remove(ab8500_regulator_dump_file);
+exit_destroy_dir:
+ debugfs_remove(ab8500_regulator_dir);
+exit_no_debugfs:
+ dev_err(&plf->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
+}
+
+int __devexit ab8500_regulator_debug_exit(struct platform_device *plf)
+{
+ debugfs_remove(ab8500_regulator_suspend_force_file);
+ debugfs_remove(ab8500_regulator_status_file);
+ debugfs_remove(ab8500_regulator_dump_file);
+ debugfs_remove(ab8500_regulator_dir);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com");
+MODULE_DESCRIPTION("AB8500 Regulator Debug");
+MODULE_ALIAS("platform:ab8500-regulator-debug");
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
new file mode 100644
index 00000000000..f5971de1dc5
--- /dev/null
+++ b/drivers/regulator/ab8500-ext.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ *
+ * This file is based on drivers/regulator/ab8500.c
+ *
+ * AB8500 external regulators
+ *
+ * ab8500-ext supports the following regulators:
+ * - VextSupply3
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/regulator/ab8500.h>
+
+/**
+ * struct ab8500_ext_regulator_info - ab8500 regulator information
+ * @dev: device pointer
+ * @desc: regulator description
+ * @rdev: regulator device
+ * @cfg: regulator configuration (extension of regulator FW configuration)
+ * @is_enabled: status of regulator (on/off)
+ * @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @update_bank: bank to control on/off
+ * @update_reg: register to control on/off
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_hp: bits to set EN pin active (LPn pin deactive)
+ * normally this means high power mode
+ * @update_val_lp: bits to set EN pin active and LPn pin active
+ * normally this means low power mode
+ * @update_val_hw: bits to set regulator pins in HW control
+ * SysClkReq pins and logic will choose mode
+ */
+struct ab8500_ext_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct ab8500_ext_regulator_cfg *cfg;
+ bool is_enabled;
+ int fixed_uV;
+ u8 update_bank;
+ u8 update_reg;
+ u8 update_mask;
+ u8 update_val;
+ u8 update_val_hp;
+ u8 update_val_lp;
+ u8 update_val_hw;
+};
+
+static int enable(struct ab8500_ext_regulator_info *info, u8 *regval)
+{
+ int ret;
+
+ *regval = info->update_val;
+
+ /*
+ * To satisfy both HW high power request and SW request, the regulator
+ * must be on in high power.
+ */
+ if (info->cfg && info->cfg->hwreq)
+ *regval = info->update_val_hp;
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, *regval);
+ if (ret < 0)
+ dev_err(rdev_get_dev(info->rdev),
+ "couldn't set enable bits for regulator\n");
+
+ info->is_enabled = true;
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = enable(info, &regval);
+
+ dev_dbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_set_suspend_enable(struct regulator_dev *rdev)
+{
+ dev_dbg(rdev_get_dev(rdev), "suspend: ");
+
+ return ab8500_ext_regulator_enable(rdev);
+}
+
+static int disable(struct ab8500_ext_regulator_info *info, u8 *regval)
+{
+ int ret;
+
+ *regval = 0x0;
+
+ /*
+ * Set the regulator in HW request mode if configured
+ */
+ if (info->cfg && info->cfg->hwreq)
+ *regval = info->update_val_hw;
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, *regval);
+ if (ret < 0)
+ dev_err(rdev_get_dev(info->rdev),
+ "couldn't set disable bits for regulator\n");
+
+ info->is_enabled = false;
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = disable(info, &regval);
+
+ dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+ dev_dbg(rdev_get_dev(rdev), "suspend: ");
+
+ return ab8500_ext_regulator_disable(rdev);
+}
+
+static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_get_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, &regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read 0x%x register\n", info->update_reg);
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ if (((regval & info->update_mask) == info->update_val_lp) ||
+ ((regval & info->update_mask) == info->update_val_hp))
+ info->is_enabled = true;
+ else
+ info->is_enabled = false;
+
+ return info->is_enabled;
+}
+
+static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ info->update_val = info->update_val_hp;
+ break;
+ case REGULATOR_MODE_IDLE:
+ info->update_val = info->update_val_lp;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (info->is_enabled) {
+ u8 regval;
+
+ ret = enable(info, &regval);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "Could not set regulator mode.\n");
+
+ dev_dbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+ }
+
+ return ret;
+}
+
+static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->update_val == info->update_val_hp)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (info->update_val == info->update_val_lp)
+ ret = REGULATOR_MODE_IDLE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int ab8500_ext_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ return info->fixed_uV;
+}
+
+static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ /* return the uV for the fixed regulators */
+ if (info->fixed_uV)
+ return info->fixed_uV;
+
+ return -EINVAL;
+}
+
+static struct regulator_ops ab8500_ext_regulator_ops = {
+ .enable = ab8500_ext_regulator_enable,
+ .set_suspend_enable = ab8500_ext_regulator_set_suspend_enable,
+ .disable = ab8500_ext_regulator_disable,
+ .set_suspend_disable = ab8500_ext_regulator_set_suspend_disable,
+ .is_enabled = ab8500_ext_regulator_is_enabled,
+ .set_mode = ab8500_ext_regulator_set_mode,
+ .get_mode = ab8500_ext_regulator_get_mode,
+ .get_voltage = ab8500_ext_fixed_get_voltage,
+ .list_voltage = ab8500_ext_list_voltage,
+};
+
+static struct ab8500_ext_regulator_info
+ ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = {
+ [AB8500_EXT_SUPPLY1] = {
+ .desc = {
+ .name = "VEXTSUPPLY1",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1800000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_hp = 0x01,
+ .update_val_lp = 0x03,
+ .update_val_hw = 0x02,
+ },
+ [AB8500_EXT_SUPPLY2] = {
+ .desc = {
+ .name = "VEXTSUPPLY2",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1360000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_hp = 0x04,
+ .update_val_lp = 0x0c,
+ .update_val_hw = 0x08,
+ },
+ [AB8500_EXT_SUPPLY3] = {
+ .desc = {
+ .name = "VEXTSUPPLY3",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY3,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 3400000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x30,
+ .update_val = 0x10,
+ .update_val_hp = 0x10,
+ .update_val_lp = 0x30,
+ .update_val_hw = 0x20,
+ },
+};
+
+__devinit int ab8500_ext_regulator_init(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
+ int i, err;
+
+ if (!ab8500) {
+ dev_err(&pdev->dev, "null mfd parent\n");
+ return -EINVAL;
+ }
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
+ if (!pdata) {
+ dev_err(&pdev->dev, "null pdata\n");
+ return -EINVAL;
+ }
+
+ /* make sure the platform data has the correct size */
+ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
+ dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
+ return -EINVAL;
+ }
+
+ /* check for AB8500 2.x */
+ if (abx500_get_chip_id(&pdev->dev) < 0x30) {
+ struct ab8500_ext_regulator_info *info;
+
+ /* VextSupply3LPn is inverted on AB8500 2.x */
+ info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3];
+ info->update_val = 0x30;
+ info->update_val_hp = 0x30;
+ info->update_val_lp = 0x10;
+ }
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+
+ /* assign per-regulator data */
+ info = &ab8500_ext_regulator_info[i];
+ info->dev = &pdev->dev;
+ info->cfg = (struct ab8500_ext_regulator_cfg *)
+ pdata->ext_regulator[i].driver_data;
+
+ /* register regulator with framework */
+ info->rdev = regulator_register(&info->desc, &pdev->dev,
+ &pdata->ext_regulator[i], info, NULL);
+ if (IS_ERR(info->rdev)) {
+ err = PTR_ERR(info->rdev);
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ /* when we fail, un-register all earlier regulators */
+ while (--i >= 0) {
+ info = &ab8500_ext_regulator_info[i];
+ regulator_unregister(info->rdev);
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->rdev),
+ "%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+__devexit int ab8500_ext_regulator_exit(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+ info = &ab8500_ext_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->rdev),
+ "%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->rdev);
+ }
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 external regulator driver");
+MODULE_ALIAS("platform:ab8500-ext-regulator");
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index c9b92531ae6..ffef433f167 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -21,43 +21,54 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpio.h> /* for sysclkreq pins */
/**
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @regulator_dev: regulator device
+ * @is_enabled: status of regulator (on/off)
* @max_uV: maximum voltage (for variable voltage supplies)
* @min_uV: minimum voltage (for variable voltage supplies)
* @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
- * @update_mask: mask to enable/disable regulator
- * @update_val_enable: bits to enable the regulator in normal (high power) mode
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_idle: bits to enable the regulator in idle (low power) mode
+ * @update_val_normal: bits to enable the regulator in normal (high power) mode
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
* @voltages: supported voltage table
* @voltages_len: number of supported voltages for the regulator
* @delay: startup/set voltage delay in us
+ * @gpio_pin: ab8500 gpio pin offset number (for sysclkreq regulator only)
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *regulator;
+ bool is_enabled;
int max_uV;
int min_uV;
int fixed_uV;
+ int load_lp_uA;
u8 update_bank;
u8 update_reg;
u8 update_mask;
- u8 update_val_enable;
+ u8 update_val;
+ u8 update_val_idle;
+ u8 update_val_normal;
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
int const *voltages;
int voltages_len;
unsigned int delay;
+ enum ab8500_pin gpio_pin;
};
/* voltage tables for the vauxn/vintcore supplies */
@@ -113,15 +124,17 @@ static int ab8500_regulator_enable(struct regulator_dev *rdev)
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
+ info->is_enabled = true;
+
dev_vdbg(rdev_get_dev(rdev),
"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
return ret;
}
@@ -143,6 +156,8 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
+ info->is_enabled = false;
+
dev_vdbg(rdev_get_dev(rdev),
"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
@@ -151,6 +166,88 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
return ret;
}
+static unsigned int ab8500_regulator_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV,
+ int output_uV, int load_uA)
+{
+ unsigned int mode;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (load_uA <= info->load_lp_uA)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static int ab8500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ info->update_val = info->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ info->update_val = info->update_val_idle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info->is_enabled) {
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set regulator mode\n");
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ }
+
+ return ret;
+}
+
+static unsigned int ab8500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->update_val == info->update_val_normal)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (info->update_val == info->update_val_idle)
+ ret = REGULATOR_MODE_IDLE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
{
int ret;
@@ -177,9 +274,11 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
info->update_mask, regval);
if (regval & info->update_mask)
- return true;
+ info->is_enabled = true;
else
- return false;
+ info->is_enabled = false;
+
+ return info->is_enabled;
}
static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
@@ -275,8 +374,13 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
*selector = ret;
+ /* vintcore register has a different layout */
+ if (info->desc.id == AB8500_LDO_INTCORE)
+ regval = ((u8)ret) << 3;
+ else
+ regval = (u8)ret;
+
/* set the registers for the request */
- regval = (u8)ret;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg,
info->voltage_mask, regval);
@@ -316,9 +420,12 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
return info->delay;
}
-static struct regulator_ops ab8500_regulator_ops = {
+static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage = ab8500_regulator_get_voltage,
.set_voltage = ab8500_regulator_set_voltage,
@@ -339,16 +446,115 @@ static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
return info->fixed_uV;
}
-static struct regulator_ops ab8500_regulator_fixed_ops = {
+static struct regulator_ops ab8500_regulator_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
.get_voltage = ab8500_fixed_get_voltage,
.list_voltage = ab8500_list_voltage,
.enable_time = ab8500_regulator_enable_time,
.set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
+static struct regulator_ops ab8500_regulator_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
+static int ab8500_sysclkreq_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, false);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set sysclkreq pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = true;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-enable (gpio_pin, gpio_select): %i, false\n",
+ info->desc.name, info->gpio_pin);
+
+ return ret;
+}
+
+static int ab8500_sysclkreq_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, true);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set gpio pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = false;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-disable (gpio_pin, gpio_select): %i, true\n",
+ info->desc.name, info->gpio_pin);
+
+ return ret;
+}
+
+static int ab8500_sysclkreq_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ bool gpio_select;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_get_select(info->dev, info->gpio_pin,
+ &gpio_select);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read gpio pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = !gpio_select;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-is_enabled (gpio_pin, is_enabled): %i, %i\n",
+ info->desc.name, info->gpio_pin, !gpio_select);
+
+ return info->is_enabled;
+}
+
+static struct regulator_ops ab8500_sysclkreq_ops = {
+ .enable = ab8500_sysclkreq_enable,
+ .disable = ab8500_sysclkreq_disable,
+ .is_enabled = ab8500_sysclkreq_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
static struct ab8500_regulator_info
ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
/*
@@ -360,7 +566,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX1] = {
.desc = {
.name = "LDO-AUX1",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
@@ -368,10 +574,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
@@ -381,7 +590,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX2] = {
.desc = {
.name = "LDO-AUX2",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
@@ -389,10 +598,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
@@ -402,7 +614,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX3] = {
.desc = {
.name = "LDO-AUX3",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
@@ -410,10 +622,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
@@ -423,7 +638,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_INTCORE] = {
.desc = {
.name = "LDO-INTCORE",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
@@ -431,10 +646,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
- .update_val_enable = 0x04,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
@@ -450,7 +668,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_TVOUT] = {
.desc = {
.name = "LDO-TVOUT",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
@@ -458,30 +676,18 @@ static struct ab8500_regulator_info
},
.delay = 10000,
.fixed_uV = 2000000,
+ .load_lp_uA = 1000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
- .update_val_enable = 0x02,
- },
- [AB8500_LDO_USB] = {
- .desc = {
- .name = "LDO-USB",
- .ops = &ab8500_regulator_fixed_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_USB,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- },
- .fixed_uV = 3300000,
- .update_bank = 0x03,
- .update_reg = 0x82,
- .update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
},
[AB8500_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUDIO,
.owner = THIS_MODULE,
@@ -491,12 +697,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
},
[AB8500_LDO_ANAMIC1] = {
.desc = {
.name = "LDO-ANAMIC1",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC1,
.owner = THIS_MODULE,
@@ -506,12 +712,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
- .update_val_enable = 0x08,
+ .update_val = 0x08,
},
[AB8500_LDO_ANAMIC2] = {
.desc = {
.name = "LDO-ANAMIC2",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC2,
.owner = THIS_MODULE,
@@ -521,12 +727,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
- .update_val_enable = 0x10,
+ .update_val = 0x10,
},
[AB8500_LDO_DMIC] = {
.desc = {
.name = "LDO-DMIC",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_DMIC,
.owner = THIS_MODULE,
@@ -536,25 +742,58 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
},
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
[AB8500_LDO_ANA] = {
.desc = {
.name = "LDO-ANA",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
},
.fixed_uV = 1200000,
+ .load_lp_uA = 1000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
},
-
+ /*
+ * SysClkReq regulators
+ */
+ [AB8500_SYSCLKREQ_2] = {
+ .desc = {
+ .name = "SYSCLKREQ-2",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO1,
+ },
+ [AB8500_SYSCLKREQ_4] = {
+ .desc = {
+ .name = "SYSCLKREQ-4",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_4,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO3,
+ },
};
struct ab8500_reg_init {
@@ -573,10 +812,9 @@ struct ab8500_reg_init {
static struct ab8500_reg_init ab8500_reg_init[] = {
/*
* 0x30, VanaRequestCtrl
- * 0x0C, VpllRequestCtrl
* 0xc0, VextSupply1RequestCtrl
*/
- REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc),
+ REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xf0),
/*
* 0x03, VextSupply2RequestCtrl
* 0x0c, VextSupply3RequestCtrl
@@ -643,13 +881,21 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
/*
* 0x02, SysClkReq2Valid1
- * ...
+ * 0x04, SysClkReq3Valid1
+ * 0x08, SysClkReq4Valid1
+ * 0x10, SysClkReq5Valid1
+ * 0x20, SysClkReq6Valid1
+ * 0x40, SysClkReq7Valid1
* 0x80, SysClkReq8Valid1
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
/*
* 0x02, SysClkReq2Valid2
- * ...
+ * 0x04, SysClkReq3Valid2
+ * 0x08, SysClkReq4Valid2
+ * 0x10, SysClkReq5Valid2
+ * 0x20, SysClkReq6Valid2
+ * 0x40, SysClkReq7Valid2
* 0x80, SysClkReq8Valid2
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
@@ -674,8 +920,8 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
/*
+ * 0x03, VpllRegu (NOTE! PRCMU register bits)
* 0x0c, VanaRegu
- * 0x03, VpllRegu
*/
REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
/*
@@ -701,10 +947,6 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
/*
- * 0x3f, Vsmps1Sel1
- */
- REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f),
- /*
* 0x0f, Vaux1Sel
*/
REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
@@ -737,17 +979,38 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
};
+/* Possibility to add debug */
+int __attribute__((weak)) ab8500_regulator_debug_init(
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+int __attribute__((weak)) ab8500_regulator_debug_exit(
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *pdata;
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
int i, err;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
- pdata = dev_get_platdata(ab8500->dev);
+
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
if (!pdata) {
dev_err(&pdev->dev, "null pdata\n");
return -EINVAL;
@@ -759,32 +1022,30 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /* initialize debug (initial state is recorded with this call) */
+ err = ab8500_regulator_debug_init(pdev);
+ if (err)
+ return err;
+
/* initialize registers */
- for (i = 0; i < pdata->num_regulator_reg_init; i++) {
+ for (i = 0; i < pdata->num_reg_init; i++) {
int id;
- u8 value;
+ u8 mask, value;
- id = pdata->regulator_reg_init[i].id;
- value = pdata->regulator_reg_init[i].value;
+ id = pdata->reg_init[i].id;
+ mask = pdata->reg_init[i].mask;
+ value = pdata->reg_init[i].value;
/* check for configuration errors */
- if (id >= AB8500_NUM_REGULATOR_REGISTERS) {
- dev_err(&pdev->dev,
- "Configuration error: id outside range.\n");
- return -EINVAL;
- }
- if (value & ~ab8500_reg_init[id].mask) {
- dev_err(&pdev->dev,
- "Configuration error: value outside mask.\n");
- return -EINVAL;
- }
+ BUG_ON(id >= AB8500_NUM_REGULATOR_REGISTERS);
+ BUG_ON(value & ~mask);
+ BUG_ON(mask & ~ab8500_reg_init[id].mask);
/* initialize register */
err = abx500_mask_and_set_register_interruptible(&pdev->dev,
ab8500_reg_init[id].bank,
ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ mask, value);
if (err < 0) {
dev_err(&pdev->dev,
"Failed to initialize 0x%02x, 0x%02x.\n",
@@ -796,10 +1057,32 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
" init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
ab8500_reg_init[id].bank,
ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ mask, value);
+ }
+
+ /*
+ * This changes the default setting for VextSupply3Regu to low power.
+ * Active high or low is depending on OTP which is changed from ab8500v3.0.
+ * Remove this when ab8500v2.0 is no longer important.
+ * This only affects power consumption and it depends on the
+ * HREF OTP configurations.
+ */
+ if (abx500_get_chip_id(&pdev->dev) < 0x30) {
+ err = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ AB8500_REGU_CTRL2, 0x08, 0x30, 0x30);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to override 0x%02x, 0x%02x.\n",
+ AB8500_REGU_CTRL2, 0x08);
+ return err;
+ }
}
+ /* register external regulators (before Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_init(pdev);
+ if (err)
+ return err;
+
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
@@ -844,7 +1127,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
{
- int i;
+ int i, err;
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
@@ -856,6 +1139,16 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
regulator_unregister(info->regulator);
}
+ /* remove external regulators (after Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_exit(pdev);
+ if (err)
+ return err;
+
+ /* remove regulator debug */
+ err = ab8500_regulator_debug_exit(pdev);
+ if (err)
+ return err;
+
return 0;
}
@@ -888,5 +1181,6 @@ module_exit(ab8500_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC");
MODULE_ALIAS("platform:ab8500-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e9a83f84ada..83f28611bf9 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -87,6 +87,7 @@ struct regulator {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
+ int use;
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
@@ -205,11 +206,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
*/
if (!regulator->min_uV && !regulator->max_uV)
continue;
-
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+
+ if (regulator->use) {
+ if (*max_uV > regulator->max_uV)
+ *max_uV = regulator->max_uV;
+ if (*min_uV < regulator->min_uV)
+ *min_uV = regulator->min_uV;
+ }
}
if (*min_uV > *max_uV)
@@ -608,6 +611,32 @@ static ssize_t regulator_suspend_standby_state_show(struct device *dev,
static DEVICE_ATTR(suspend_standby_state, 0444,
regulator_suspend_standby_state_show, NULL);
+static ssize_t regulator_use_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct regulator *reg;
+ size_t size = 0;
+
+ if (rdev->use_count == 0)
+ return sprintf(buf, "no users\n");
+
+ list_for_each_entry(reg, &rdev->consumer_list, list) {
+ if (!reg->use)
+ continue;
+
+ if (reg->dev != NULL)
+ size += sprintf((buf + size), "%s (%d) ",
+ dev_name(reg->dev), reg->use);
+ else
+ size += sprintf((buf + size), "unknown (%d) ",
+ reg->use);
+ }
+ size += sprintf((buf + size), "\n");
+
+ return size;
+}
+static DEVICE_ATTR(use, 0444, regulator_use_show, NULL);
/*
* These are the only attributes are present for all regulators.
@@ -1489,6 +1518,8 @@ int regulator_enable(struct regulator *regulator)
if (ret != 0 && rdev->supply)
regulator_disable(rdev->supply);
+ else
+ regulator->use++;
return ret;
}
@@ -1562,6 +1593,9 @@ int regulator_disable(struct regulator *regulator)
if (ret == 0 && rdev->supply)
regulator_disable(rdev->supply);
+ if (ret == 0)
+ regulator->use--;
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
@@ -2601,6 +2635,10 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
struct regulator_ops *ops = rdev->desc->ops;
int status = 0;
+ status = device_create_file(dev, &dev_attr_use);
+ if (status < 0)
+ dev_warn(dev, "Create sysfs file \"use\" failed");
+
/* some attributes need specific methods to be displayed */
if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
(ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) {
diff --git a/drivers/regulator/db5500-prcmu.c b/drivers/regulator/db5500-prcmu.c
new file mode 100644
index 00000000000..189362ab8e0
--- /dev/null
+++ b/drivers/regulator/db5500-prcmu.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * Power domain regulators on DB5500
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/db5500-prcmu.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "dbx500-prcmu.h"
+static int db5500_regulator_enable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
+ info->desc.name);
+
+ if (!info->is_enabled) {
+ info->is_enabled = true;
+ if (!info->exclude_from_power_state)
+ power_state_active_enable();
+ }
+
+ return 0;
+}
+
+static int db5500_regulator_disable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
+ info->desc.name);
+
+ if (info->is_enabled) {
+ info->is_enabled = false;
+ if (!info->exclude_from_power_state)
+ ret = power_state_active_disable();
+ }
+
+ return ret;
+}
+
+static int db5500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
+ " %i\n", info->desc.name, info->is_enabled);
+
+ return info->is_enabled;
+}
+
+/* db5500 regulator operations */
+static struct regulator_ops db5500_regulator_ops = {
+ .enable = db5500_regulator_enable,
+ .disable = db5500_regulator_disable,
+ .is_enabled = db5500_regulator_is_enabled,
+};
+
+/*
+ * EPOD control
+ */
+static bool epod_on[NUM_EPOD_ID];
+static bool epod_ramret[NUM_EPOD_ID];
+
+static inline int epod_id_to_index(u16 epod_id)
+{
+ return epod_id - DB5500_EPOD_ID_BASE;
+}
+
+static int enable_epod(u16 epod_id, bool ramret)
+{
+ int idx = epod_id_to_index(epod_id);
+ int ret;
+
+ if (ramret) {
+ if (!epod_on[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
+ if (ret < 0)
+ return ret;
+ }
+ epod_ramret[idx] = true;
+ } else {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
+ if (ret < 0)
+ return ret;
+ epod_on[idx] = true;
+ }
+
+ return 0;
+}
+
+static int disable_epod(u16 epod_id, bool ramret)
+{
+ int idx = epod_id_to_index(epod_id);
+ int ret;
+
+ if (ramret) {
+ if (!epod_on[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
+ if (ret < 0)
+ return ret;
+ }
+ epod_ramret[idx] = false;
+ } else {
+ if (epod_ramret[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
+ if (ret < 0)
+ return ret;
+ }
+ epod_on[idx] = false;
+ }
+
+ return 0;
+}
+
+/*
+ * Regulator switch
+ */
+static int db5500_regulator_switch_enable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
+ info->desc.name);
+
+ ret = enable_epod(info->epod_id, info->is_ramret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "regulator-switch-%s-enable: prcmu call failed\n",
+ info->desc.name);
+ goto out;
+ }
+
+ info->is_enabled = true;
+out:
+ return ret;
+}
+
+static int db5500_regulator_switch_disable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
+ info->desc.name);
+
+ ret = disable_epod(info->epod_id, info->is_ramret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "regulator_switch-%s-disable: prcmu call failed\n",
+ info->desc.name);
+ goto out;
+ }
+
+ info->is_enabled = 0;
+out:
+ return ret;
+}
+
+static int db5500_regulator_switch_is_enabled(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "regulator-switch-%s-is_enabled (is_enabled): %i\n",
+ info->desc.name, info->is_enabled);
+
+ return info->is_enabled;
+}
+
+static struct regulator_ops db5500_regulator_switch_ops = {
+ .enable = db5500_regulator_switch_enable,
+ .disable = db5500_regulator_switch_disable,
+ .is_enabled = db5500_regulator_switch_is_enabled,
+};
+
+/*
+ * Regulator information
+ */
+#define DB5500_REGULATOR_SWITCH(_name, reg) \
+ [DB5500_REGULATOR_SWITCH_##reg] = { \
+ .desc = { \
+ .name = _name, \
+ .id = DB5500_REGULATOR_SWITCH_##reg, \
+ .ops = &db5500_regulator_switch_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .epod_id = DB5500_EPOD_ID_##reg, \
+}
+
+static struct dbx500_regulator_info
+ dbx500_regulator_info[DB5500_NUM_REGULATORS] = {
+ [DB5500_REGULATOR_VAPE] = {
+ .desc = {
+ .name = "db5500-vape",
+ .id = DB5500_REGULATOR_VAPE,
+ .ops = &db5500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ },
+ DB5500_REGULATOR_SWITCH("db5500-sga", SGA),
+ DB5500_REGULATOR_SWITCH("db5500-hva", HVA),
+ DB5500_REGULATOR_SWITCH("db5500-sia", SIA),
+ DB5500_REGULATOR_SWITCH("db5500-disp", DISP),
+ DB5500_REGULATOR_SWITCH("db5500-esram12", ESRAM12),
+};
+
+static int __devinit db5500_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_init_data *db5500_init_data =
+ dev_get_platdata(&pdev->dev);
+ int i, err;
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ struct regulator_init_data *init_data = &db5500_init_data[i];
+
+ /* assign per-regulator data */
+ info = &dbx500_regulator_info[i];
+ info->dev = &pdev->dev;
+
+ /* register with the regulator framework */
+ info->rdev = regulator_register(&info->desc, &pdev->dev,
+ init_data, info);
+ if (IS_ERR(info->rdev)) {
+ err = PTR_ERR(info->rdev);
+ dev_err(&pdev->dev, "failed to register %s: err %i\n",
+ info->desc.name, err);
+
+ /* if failing, unregister all earlier regulators */
+ i--;
+ while (i >= 0) {
+ info = &dbx500_regulator_info[i];
+ regulator_unregister(info->rdev);
+ i--;
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->rdev),
+ "regulator-%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+static int __exit db5500_regulator_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ info = &dbx500_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->rdev),
+ "regulator-%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver db5500_regulator_driver = {
+ .driver = {
+ .name = "db5500-prcmu-regulators",
+ .owner = THIS_MODULE,
+ },
+ .probe = db5500_regulator_probe,
+ .remove = __exit_p(db5500_regulator_remove),
+};
+
+static int __init db5500_regulator_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&db5500_regulator_driver);
+ if (ret < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit db5500_regulator_exit(void)
+{
+ platform_driver_unregister(&db5500_regulator_driver);
+}
+
+arch_initcall(db5500_regulator_init);
+module_exit(db5500_regulator_exit);
+
+MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
+MODULE_DESCRIPTION("DB5500 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 515443fcd26..4bd25e75efa 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -18,74 +18,11 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/module.h>
-
-/*
- * power state reference count
- */
-static int power_state_active_cnt; /* will initialize to zero */
-static DEFINE_SPINLOCK(power_state_active_lock);
-
-static void power_state_active_enable(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&power_state_active_lock, flags);
- power_state_active_cnt++;
- spin_unlock_irqrestore(&power_state_active_lock, flags);
-}
-
-static int power_state_active_disable(void)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&power_state_active_lock, flags);
- if (power_state_active_cnt <= 0) {
- pr_err("power state: unbalanced enable/disable calls\n");
- ret = -EINVAL;
- goto out;
- }
-
- power_state_active_cnt--;
-out:
- spin_unlock_irqrestore(&power_state_active_lock, flags);
- return ret;
-}
-
-/*
- * Exported interface for CPUIdle only. This function is called when interrupts
- * are turned off. Hence, no locking.
- */
-int power_state_active_is_enabled(void)
-{
- return (power_state_active_cnt > 0);
-}
-
-/**
- * struct db8500_regulator_info - db8500 regulator information
- * @dev: device pointer
- * @desc: regulator description
- * @rdev: regulator device pointer
- * @is_enabled: status of the regulator
- * @epod_id: id for EPOD (power domain)
- * @is_ramret: RAM retention switch for EPOD (power domain)
- * @operating_point: operating point (only for vape, to be removed)
- *
- */
-struct db8500_regulator_info {
- struct device *dev;
- struct regulator_desc desc;
- struct regulator_dev *rdev;
- bool is_enabled;
- u16 epod_id;
- bool is_ramret;
- bool exclude_from_power_state;
- unsigned int operating_point;
-};
+#include "dbx500-prcmu.h"
static int db8500_regulator_enable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
@@ -93,16 +30,18 @@ static int db8500_regulator_enable(struct regulator_dev *rdev)
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
info->desc.name);
- info->is_enabled = true;
- if (!info->exclude_from_power_state)
- power_state_active_enable();
+ if (!info->is_enabled) {
+ info->is_enabled = true;
+ if (!info->exclude_from_power_state)
+ power_state_active_enable();
+ }
return 0;
}
static int db8500_regulator_disable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret = 0;
if (info == NULL)
@@ -111,16 +50,18 @@ static int db8500_regulator_disable(struct regulator_dev *rdev)
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
info->desc.name);
- info->is_enabled = false;
- if (!info->exclude_from_power_state)
- ret = power_state_active_disable();
+ if (info->is_enabled) {
+ info->is_enabled = false;
+ if (!info->exclude_from_power_state)
+ ret = power_state_active_disable();
+ }
return ret;
}
static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
@@ -197,7 +138,7 @@ static int disable_epod(u16 epod_id, bool ramret)
*/
static int db8500_regulator_switch_enable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
@@ -221,7 +162,7 @@ out:
static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
@@ -245,7 +186,7 @@ out:
static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
{
- struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
@@ -266,8 +207,8 @@ static struct regulator_ops db8500_regulator_switch_ops = {
/*
* Regulator information
*/
-static struct db8500_regulator_info
-db8500_regulator_info[DB8500_NUM_REGULATORS] = {
+static struct dbx500_regulator_info
+dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
@@ -476,12 +417,12 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
int i, err;
/* register all regulators */
- for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
- struct db8500_regulator_info *info;
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
struct regulator_init_data *init_data = &db8500_init_data[i];
/* assign per-regulator data */
- info = &db8500_regulator_info[i];
+ info = &dbx500_regulator_info[i];
info->dev = &pdev->dev;
/* register with the regulator framework */
@@ -494,7 +435,7 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
/* if failing, unregister all earlier regulators */
while (--i >= 0) {
- info = &db8500_regulator_info[i];
+ info = &dbx500_regulator_info[i];
regulator_unregister(info->rdev);
}
return err;
@@ -503,17 +444,22 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
dev_dbg(rdev_get_dev(info->rdev),
"regulator-%s-probed\n", info->desc.name);
}
+ err = ux500_regulator_debug_init(pdev,
+ dbx500_regulator_info,
+ ARRAY_SIZE(dbx500_regulator_info));
- return 0;
+ return err;
}
static int __exit db8500_regulator_remove(struct platform_device *pdev)
{
int i;
- for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
- struct db8500_regulator_info *info;
- info = &db8500_regulator_info[i];
+ ux500_regulator_debug_exit();
+
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ info = &dbx500_regulator_info[i];
dev_vdbg(rdev_get_dev(info->rdev),
"regulator-%s-remove\n", info->desc.name);
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
new file mode 100644
index 00000000000..33544db7d63
--- /dev/null
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * UX500 common part of Power domain regulators
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "dbx500-prcmu.h"
+
+/*
+ * power state reference count
+ */
+static int power_state_active_cnt; /* will initialize to zero */
+static DEFINE_SPINLOCK(power_state_active_lock);
+
+int power_state_active_get(void)
+{
+ unsigned long flags;
+ int cnt;
+
+ spin_lock_irqsave(&power_state_active_lock, flags);
+ cnt = power_state_active_cnt;
+ spin_unlock_irqrestore(&power_state_active_lock, flags);
+
+ return cnt;
+}
+
+void power_state_active_enable(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_state_active_lock, flags);
+ power_state_active_cnt++;
+ spin_unlock_irqrestore(&power_state_active_lock, flags);
+}
+
+int power_state_active_disable(void)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_state_active_lock, flags);
+ if (power_state_active_cnt <= 0) {
+ pr_err("power state: unbalanced enable/disable calls\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ power_state_active_cnt--;
+out:
+ spin_unlock_irqrestore(&power_state_active_lock, flags);
+ return ret;
+}
+
+/*
+ * Exported interface for CPUIdle only. This function is called when interrupts
+ * are turned off. Hence, no locking.
+ */
+int power_state_active_is_enabled(void)
+{
+ return (power_state_active_cnt > 0);
+}
+
+struct ux500_regulator {
+ char *name;
+ void (*enable)(void);
+ int (*disable)(void);
+};
+static struct ux500_regulator ux500_atomic_regulators[] = {
+ {
+ .name = "dma40.0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi2",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi3",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "cryp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "hash1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+};
+
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) {
+ if (!strcmp(dev_name(dev), ux500_atomic_regulators[i].name))
+ return &ux500_atomic_regulators[i];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_get);
+
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ if (regulator) {
+ regulator->enable();
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_atomic_enable);
+
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ if (regulator)
+ return regulator->disable();
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_atomic_disable);
+
+void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+ /* Here for symetric reasons and for possible future use */
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_put);
+
+#ifdef CONFIG_REGULATOR_DEBUG
+
+static struct ux500_regulator_debug {
+ struct dentry *dir;
+ struct dentry *status_file;
+ struct dentry *power_state_cnt_file;
+ struct dbx500_regulator_info *regulator_array;
+ int num_regulators;
+ u8 *state_before_suspend;
+ u8 *state_after_suspend;
+} rdebug;
+
+void ux500_regulator_suspend_debug(void)
+{
+ int i;
+ for (i = 0; i < rdebug.num_regulators; i++)
+ rdebug.state_before_suspend[i] =
+ rdebug.regulator_array[i].is_enabled;
+}
+
+void ux500_regulator_resume_debug(void)
+{
+ int i;
+ for (i = 0; i < rdebug.num_regulators; i++)
+ rdebug.state_after_suspend[i] =
+ rdebug.regulator_array[i].is_enabled;
+}
+
+static int ux500_regulator_power_state_cnt_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int err;
+
+ /* print power state count */
+ err = seq_printf(s, "ux500-regulator power state count: %i\n",
+ power_state_active_get());
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ return 0;
+}
+
+static int ux500_regulator_power_state_cnt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ux500_regulator_power_state_cnt_print,
+ inode->i_private);
+}
+
+static const struct file_operations ux500_regulator_power_state_cnt_fops = {
+ .open = ux500_regulator_power_state_cnt_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ux500_regulator_status_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int err;
+ int i;
+
+ /* print dump header */
+ err = seq_printf(s, "ux500-regulator status:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ err = seq_printf(s, "%31s : %8s : %8s\n", "current",
+ "before", "after");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ for (i = 0; i < rdebug.num_regulators; i++) {
+ struct dbx500_regulator_info *info;
+ /* Access per-regulator data */
+ info = &rdebug.regulator_array[i];
+
+ /* print status */
+ err = seq_printf(s, "%20s : %8s : %8s : %8s\n", info->desc.name,
+ info->is_enabled ? "enabled" : "disabled",
+ rdebug.state_before_suspend[i] ? "enabled" : "disabled",
+ rdebug.state_after_suspend[i] ? "enabled" : "disabled");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+ }
+
+ return 0;
+}
+
+static int ux500_regulator_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ux500_regulator_status_print,
+ inode->i_private);
+}
+
+static const struct file_operations ux500_regulator_status_fops = {
+ .open = ux500_regulator_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+int __attribute__((weak)) dbx500_regulator_testcase(
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators)
+{
+ return 0;
+}
+
+int __devinit
+ux500_regulator_debug_init(struct platform_device *pdev,
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators)
+{
+ /* create directory */
+ rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
+ if (!rdebug.dir)
+ goto exit_no_debugfs;
+
+ /* create "status" file */
+ rdebug.status_file = debugfs_create_file("status",
+ S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_status_fops);
+ if (!rdebug.status_file)
+ goto exit_destroy_dir;
+
+ /* create "power-state-count" file */
+ rdebug.power_state_cnt_file = debugfs_create_file("power-state-count",
+ S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_power_state_cnt_fops);
+ if (!rdebug.power_state_cnt_file)
+ goto exit_destroy_status;
+
+ rdebug.regulator_array = regulator_info;
+ rdebug.num_regulators = num_regulators;
+
+ rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL);
+ if (!rdebug.state_before_suspend) {
+ dev_err(&pdev->dev,
+ "could not allocate memory for saving state\n");
+ goto exit_destroy_power_state;
+ }
+
+ rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
+ if (!rdebug.state_after_suspend) {
+ dev_err(&pdev->dev,
+ "could not allocate memory for saving state\n");
+ goto exit_free;
+ }
+
+ dbx500_regulator_testcase(regulator_info, num_regulators);
+ return 0;
+
+exit_free:
+ kfree(rdebug.state_before_suspend);
+exit_destroy_power_state:
+ debugfs_remove(rdebug.power_state_cnt_file);
+exit_destroy_status:
+ debugfs_remove(rdebug.status_file);
+exit_destroy_dir:
+ debugfs_remove(rdebug.dir);
+exit_no_debugfs:
+ dev_err(&pdev->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
+}
+
+int __devexit ux500_regulator_debug_exit(void)
+{
+ debugfs_remove_recursive(rdebug.dir);
+ kfree(rdebug.state_after_suspend);
+ kfree(rdebug.state_before_suspend);
+
+ return 0;
+}
+#endif
diff --git a/drivers/regulator/dbx500-prcmu.h b/drivers/regulator/dbx500-prcmu.h
new file mode 100644
index 00000000000..f7e20fe075a
--- /dev/null
+++ b/drivers/regulator/dbx500-prcmu.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Bengt Jonsson <bengt.jonsson@stericsson.com> for ST-Ericsson,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+
+#ifndef DBX500_REGULATOR_H
+#define DBX500_REGULATOR_H
+
+#include <linux/platform_device.h>
+
+/**
+ * struct dbx500_regulator_info - dbx500 regulator information
+ * @dev: device pointer
+ * @desc: regulator description
+ * @rdev: regulator device pointer
+ * @is_enabled: status of the regulator
+ * @epod_id: id for EPOD (power domain)
+ * @is_ramret: RAM retention switch for EPOD (power domain)
+ * @operating_point: operating point (only for vape, to be removed)
+ *
+ */
+struct dbx500_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ bool is_enabled;
+ u16 epod_id;
+ bool is_ramret;
+ bool exclude_from_power_state;
+ unsigned int operating_point;
+};
+
+void power_state_active_enable(void);
+int power_state_active_disable(void);
+
+
+#ifdef CONFIG_REGULATOR_DEBUG
+int ux500_regulator_debug_init(struct platform_device *pdev,
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators);
+
+int ux500_regulator_debug_exit(void);
+#else
+
+static inline int ux500_regulator_debug_init(struct platform_device *pdev,
+ struct dbx500_regulator_info *regulator_info,
+ int num_regulators) {}
+
+static inline int ux500_regulator_debug_exit(void) {}
+#endif
+#endif
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 3a125b83554..10393974127 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -697,6 +697,13 @@ config RTC_DRV_PCF50633
If you say yes here you get support for the RTC subsystem of the
NXP PCF50633 used in embedded systems.
+config RTC_DRV_AB
+ tristate "ST-Ericsson AB5500 RTC"
+ depends on AB5500_CORE
+ help
+ Select this to enable the ST-Ericsson AB5500 Mixed Signal IC RTC
+ support. This chip contains a battery- and capacitor-backed RTC.
+
config RTC_DRV_AB3100
tristate "ST-Ericsson AB3100 RTC"
depends on AB3100_CORE
@@ -708,6 +715,7 @@ config RTC_DRV_AB3100
config RTC_DRV_AB8500
tristate "ST-Ericsson AB8500 RTC"
depends on AB8500_CORE
+ select RTC_INTF_DEV_UIE_EMUL
help
Select this to enable the ST-Ericsson AB8500 power management IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 6e6982335c1..a69992dd1cb 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -16,6 +16,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
+obj-$(CONFIG_RTC_DRV_AB) += rtc-ab.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8a1c031391d..319e419d4a4 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
+}
+
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
@@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
- return err;
+ return ___rtc_set_alarm(rtc, alarm);
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+
+ __rtc_read_time(rtc, &tm);
+
+ alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+ ktime_set(300, 0)));
+ alarm.enabled = 0;
+
+ ___rtc_set_alarm(rtc, &alarm);
+}
+
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
@@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
- if (!next)
+ if (!next) {
+ rtc_alarm_disable(rtc);
return;
+ }
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +870,8 @@ again:
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
- }
+ } else
+ rtc_alarm_disable(rtc);
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-ab.c b/drivers/rtc/rtc-ab.c
new file mode 100644
index 00000000000..393dd5a0e09
--- /dev/null
+++ b/drivers/rtc/rtc-ab.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+#define AB5500_RTC_CLOCK_RATE 32768
+#define AB5500_RTC 0x00
+#define AB5500_RTC_ALARM (1 << 1)
+#define AB5500_READREQ 0x01
+#define AB5500_READREQ_REQ 0x01
+#define AB5500_AL0 0x02
+#define AB5500_TI0 0x06
+
+/**
+ * struct ab_rtc - variant specific data
+ * @irqname: optional name for the alarm interrupt resource
+ * @epoch: epoch to adjust year to
+ * @bank: AB bank where this block is present
+ * @rtc: address of the "RTC" (control) register
+ * @rtc_alarmon: mask of the alarm enable bit in the above register
+ * @ti0: address of the TI0 register. The rest of the TI
+ * registers are assumed to contiguously follow this one.
+ * @nr_ti: number of TI* registers
+ * @al0: address of the AL0 register. The rest of the
+ * AL registers are assumed to contiguously follow this one.
+ * @nr_al: number of AL* registers
+ * @startup: optional function to initialize the RTC
+ * @alarm_to_regs: function to convert alarm time in seconds
+ * to a list of AL register values
+ * @time_to_regs: function to convert alarm time in seconds
+ * to a list of TI register values
+ * @regs_to_alarm: function to convert a list of AL register
+ * values to the alarm time in seconds
+ * @regs_to_time: function to convert a list of TI register
+ * values to the alarm time in seconds
+ * @request_read: optional function to request a read from the TI* registers
+ * @request_write: optional function to request a write to the TI* registers
+ */
+struct ab_rtc {
+ const char *irqname;
+ unsigned int epoch;
+
+ u8 bank;
+ u8 rtc;
+ u8 rtc_alarmon;
+ u8 ti0;
+ int nr_ti;
+ u8 al0;
+ int nr_al;
+
+ int (*startup)(struct device *dev);
+ void (*alarm_to_regs)(struct device *dev, unsigned long secs, u8 *regs);
+ void (*time_to_regs)(struct device *dev, unsigned long secs, u8 *regs);
+ unsigned long (*regs_to_alarm)(struct device *dev, u8 *regs);
+ unsigned long (*regs_to_time)(struct device *dev, u8 *regs);
+ int (*request_read)(struct device *dev);
+ int (*request_write)(struct device *dev);
+};
+
+static const struct ab_rtc *to_ab_rtc(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ return (struct ab_rtc *)pdev->id_entry->driver_data;
+}
+
+/* Calculate the number of seconds since year, for epoch adjustment */
+static unsigned long ab_rtc_get_elapsed_seconds(unsigned int year)
+{
+ unsigned long secs;
+ struct rtc_time tm = {
+ .tm_year = year - 1900,
+ .tm_mday = 1,
+ };
+
+ rtc_tm_to_time(&tm, &secs);
+
+ return secs;
+}
+
+static int ab5500_rtc_request_read(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned long timeout;
+ int err;
+
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ AB5500_READREQ,
+ AB5500_READREQ_REQ);
+ if (err < 0)
+ return err;
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout)) {
+ u8 value;
+
+ err = abx500_get_register_interruptible(dev, variant->bank,
+ AB5500_READREQ, &value);
+ if (err < 0)
+ return err;
+
+ if (!(value & AB5500_READREQ_REQ))
+ return 0;
+
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static void
+ab5500_rtc_time_to_regs(struct device *dev, unsigned long secs, u8 *regs)
+{
+ unsigned long mins = secs / 60;
+ u64 fat_time;
+
+ secs %= 60;
+
+ fat_time = secs * AB5500_RTC_CLOCK_RATE;
+ fat_time |= (u64)mins << 21;
+
+ regs[0] = (fat_time) & 0xFF;
+ regs[1] = (fat_time >> 8) & 0xFF;
+ regs[2] = (fat_time >> 16) & 0xFF;
+ regs[3] = (fat_time >> 24) & 0xFF;
+ regs[4] = (fat_time >> 32) & 0xFF;
+ regs[5] = (fat_time >> 40) & 0xFF;
+}
+
+static unsigned long
+ab5500_rtc_regs_to_time(struct device *dev, u8 *regs)
+{
+ u64 fat_time = ((u64)regs[5] << 40) | ((u64)regs[4] << 32) |
+ ((u64)regs[3] << 24) | ((u64)regs[2] << 16) |
+ ((u64)regs[1] << 8) | regs[0];
+ unsigned long secs = (fat_time & 0x1fffff) / AB5500_RTC_CLOCK_RATE;
+ unsigned long mins = fat_time >> 21;
+
+ return mins * 60 + secs;
+}
+
+static void
+ab5500_rtc_alarm_to_regs(struct device *dev, unsigned long secs, u8 *regs)
+{
+ unsigned long mins = secs / 60;
+
+#ifdef CONFIG_ANDROID
+ /*
+ * Needed because Android believes all hw have a wake-up resolution in
+ * seconds.
+ */
+ mins++;
+#endif
+
+ regs[0] = mins & 0xFF;
+ regs[1] = (mins >> 8) & 0xFF;
+ regs[2] = (mins >> 16) & 0xFF;
+}
+
+static unsigned long
+ab5500_rtc_regs_to_alarm(struct device *dev, u8 *regs)
+{
+ unsigned long mins = ((unsigned long)regs[2] << 16) |
+ ((unsigned long)regs[1] << 8) |
+ regs[0];
+ unsigned long secs = mins * 60;
+
+ return secs;
+}
+
+static const struct ab_rtc ab5500_rtc = {
+ .irqname = "RTC_Alarm",
+ .bank = AB5500_BANK_RTC,
+ .rtc = AB5500_RTC,
+ .rtc_alarmon = AB5500_RTC_ALARM,
+ .ti0 = AB5500_TI0,
+ .nr_ti = 6,
+ .al0 = AB5500_AL0,
+ .nr_al = 3,
+ .epoch = 2000,
+ .time_to_regs = ab5500_rtc_time_to_regs,
+ .regs_to_time = ab5500_rtc_regs_to_time,
+ .alarm_to_regs = ab5500_rtc_alarm_to_regs,
+ .regs_to_alarm = ab5500_rtc_regs_to_alarm,
+ .request_read = ab5500_rtc_request_read,
+};
+
+static int ab_rtc_request_read(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->request_read)
+ return 0;
+
+ return variant->request_read(dev);
+}
+
+static int ab_rtc_request_write(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->request_write)
+ return 0;
+
+ return variant->request_write(dev);
+}
+
+static bool ab_rtc_valid_time(struct device *dev, struct rtc_time *time)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->epoch)
+ return true;
+
+ return time->tm_year >= variant->epoch - 1900;
+}
+
+static int
+ab_rtc_tm_to_time(struct device *dev, struct rtc_time *tm, unsigned long *secs)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ rtc_tm_to_time(tm, secs);
+
+ if (variant->epoch)
+ *secs -= ab_rtc_get_elapsed_seconds(variant->epoch);
+
+ return 0;
+}
+
+static int
+ab_rtc_time_to_tm(struct device *dev, unsigned long secs, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (variant->epoch)
+ secs += ab_rtc_get_elapsed_seconds(variant->epoch);
+
+ rtc_time_to_tm(secs, tm);
+
+ return 0;
+}
+
+static int ab_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_ti];
+ unsigned long secs;
+ int err;
+
+ err = ab_rtc_request_read(dev);
+ if (err)
+ return err;
+
+ err = abx500_get_register_page_interruptible(dev, variant->bank,
+ variant->ti0,
+ buf, variant->nr_ti);
+ if (err)
+ return err;
+
+ secs = variant->regs_to_time(dev, buf);
+ ab_rtc_time_to_tm(dev, secs, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int ab_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_ti];
+ unsigned long secs;
+ u8 reg = variant->ti0;
+ int err;
+ int i;
+
+ if (!ab_rtc_valid_time(dev, tm))
+ return -EINVAL;
+
+ ab_rtc_tm_to_time(dev, tm, &secs);
+ variant->time_to_regs(dev, secs, buf);
+
+ for (i = 0; i < variant->nr_ti; i++, reg++) {
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ reg, buf[i]);
+ if (err)
+ return err;
+ }
+
+ return ab_rtc_request_write(dev);
+}
+
+static int ab_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned long secs;
+ u8 buf[variant->nr_al];
+ u8 rtcval;
+ int err;
+
+ err = abx500_get_register_interruptible(dev, variant->bank,
+ variant->rtc, &rtcval);
+ if (err)
+ return err;
+
+ alarm->enabled = !!(rtcval & variant->rtc_alarmon);
+ alarm->pending = 0;
+
+ err = abx500_get_register_page_interruptible(dev, variant->bank,
+ variant->al0, buf,
+ variant->nr_al);
+ if (err)
+ return err;
+
+ secs = variant->regs_to_alarm(dev, buf);
+ ab_rtc_time_to_tm(dev, secs, &alarm->time);
+
+ return rtc_valid_tm(&alarm->time);
+}
+
+static int ab_rtc_alarm_enable(struct device *dev, unsigned int enabled)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ u8 mask = variant->rtc_alarmon;
+ u8 value = enabled ? mask : 0;
+
+ return abx500_mask_and_set_register_interruptible(dev, variant->bank,
+ variant->rtc, mask,
+ value);
+}
+
+static int ab_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_al];
+ unsigned long secs;
+ u8 reg = variant->al0;
+ int err;
+ int i;
+
+ if (!ab_rtc_valid_time(dev, &alarm->time))
+ return -EINVAL;
+
+ ab_rtc_tm_to_time(dev, &alarm->time, &secs);
+ variant->alarm_to_regs(dev, secs, buf);
+
+ /*
+ * Disable alarm first. Otherwise the RTC may not detect an alarm
+ * reprogrammed for the same time without disabling the alarm in
+ * between the programmings.
+ */
+ err = ab_rtc_alarm_enable(dev, false);
+ if (err)
+ return err;
+
+ for (i = 0; i < variant->nr_al; i++, reg++) {
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ reg, buf[i]);
+ if (err)
+ return err;
+ }
+
+ return alarm->enabled ? ab_rtc_alarm_enable(dev, true) : 0;
+}
+
+static const struct rtc_class_ops ab_rtc_ops = {
+ .read_time = ab_rtc_read_time,
+ .set_time = ab_rtc_set_time,
+ .read_alarm = ab_rtc_read_alarm,
+ .set_alarm = ab_rtc_set_alarm,
+ .alarm_irq_enable = ab_rtc_alarm_enable,
+};
+
+static irqreturn_t ab_rtc_irq(int irq, void *dev_id)
+{
+ unsigned long events = RTC_IRQF | RTC_AF;
+ struct rtc_device *rtc = dev_id;
+
+ rtc_update_irq(rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ab_rtc_probe(struct platform_device *pdev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(&pdev->dev);
+ int err;
+ struct rtc_device *rtc;
+ int irq = -ENXIO;
+
+ if (variant->irqname) {
+ irq = platform_get_irq_byname(pdev, variant->irqname);
+ if (irq < 0)
+ return irq;
+ }
+
+ if (variant->startup) {
+ err = variant->startup(&pdev->dev);
+ if (err)
+ return err;
+ }
+
+ rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "Registration failed\n");
+ err = PTR_ERR(rtc);
+ return err;
+ }
+
+ if (irq >= 0) {
+ err = request_any_context_irq(irq, ab_rtc_irq,
+ IRQF_NO_SUSPEND,
+ pdev->id_entry->name,
+ rtc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "could not get irq: %d\n", err);
+ goto out_unregister;
+ }
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ return 0;
+
+out_unregister:
+ rtc_device_unregister(rtc);
+ return err;
+}
+
+static int __devexit ab_rtc_remove(struct platform_device *pdev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(&pdev->dev);
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, variant->irqname);
+
+ if (irq >= 0)
+ free_irq(irq, rtc);
+ rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id ab_rtc_id_table[] = {
+ { "ab5500-rtc", (kernel_ulong_t)&ab5500_rtc, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, ab_rtc_id_table);
+
+static struct platform_driver ab_rtc_driver = {
+ .driver.name = "ab-rtc",
+ .driver.owner = THIS_MODULE,
+ .id_table = ab_rtc_id_table,
+ .probe = ab_rtc_probe,
+ .remove = __devexit_p(ab_rtc_remove),
+};
+
+static int __init ab_rtc_init(void)
+{
+ return platform_driver_register(&ab_rtc_driver);
+}
+module_init(ab_rtc_init);
+
+static void __exit ab_rtc_exit(void)
+{
+ platform_driver_unregister(&ab_rtc_driver);
+}
+module_exit(ab_rtc_exit);
+
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_DESCRIPTION("AB5500 RTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 4bcf9ca2818..156430c1d57 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -224,8 +224,8 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
int retval, i;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
- unsigned long mins, secs = 0;
-
+ unsigned long mins, secs = 0, cursec=0;
+ struct rtc_time curtm;
if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
dev_dbg(dev, "year should be equal to or greater than %d\n",
AB8500_RTC_EPOCH);
@@ -235,14 +235,36 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* Get the number of seconds since 1970 */
rtc_tm_to_time(&alarm->time, &secs);
+ /* Check whether alarm is set less than 1min.
+ * Since our RTC doesn't support alarm resolution less than 1min,
+ * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON
+ */
+ ab8500_rtc_read_time(dev, &curtm); /* Read current time */
+ rtc_tm_to_time(&curtm, &cursec);
+ if ((secs - cursec) < 59) {
+ dev_dbg(dev, "Alarm less than 1 minute not supported\n");
+ return -EINVAL;
+ }
+
/*
* Convert it to the number of seconds since 01-01-2000 00:00:00, since
* we only have a small counter in the RTC.
*/
secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+#ifndef CONFIG_ANDROID
+ secs += 30; /* Round to nearest minute */
+#endif
+
mins = secs / 60;
+#ifdef CONFIG_ANDROID
+ /*
+ * Needed due to Android believes all hw have a wake-up resolution
+ * in seconds.
+ */
+ mins++;
+#endif
buf[2] = mins & 0xFF;
buf[1] = (mins >> 8) & 0xFF;
buf[0] = (mins >> 16) & 0xFF;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8293658e7cf..9c467a272f1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -107,6 +107,20 @@ config SPI_BITBANG
need it. You only need to select this explicitly to support driver
modules that aren't part of this kernel tree.
+config STM_MSP_SPI
+ tristate "STM MSP CONTROLLER (SPI master)"
+ default y
+ help
+ This enables using the STM MSP controller in master
+ mode.
+
+config SPI_WORKQUEUE
+ bool "SPI_WORKQUEUE"
+ depends on STM_MSP_SPI
+ default n
+ help
+ This feature allow SPI works to be deferred in MSP driver.
+
config SPI_BUTTERFLY
tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 61c3261c388..56f8e794cfb 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -58,4 +58,5 @@ obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_STM_MSP_SPI) += stm_msp.o
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f37ad2271ad..d7f6a29dad3 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -510,6 +510,13 @@ static void giveback(struct pl022 *pl022)
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
+
+ /* disable the SPI/SSP operation */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ /* This message is completed, so let's turn off the clocks & power */
+ pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -914,6 +921,12 @@ static int configure_dma(struct pl022 *pl022)
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
+ /* DMA burstsize should be same as the FIFO trigger level */
+ rx_conf.src_maxburst = pl022->rx_lev_trig ? 1 <<
+ (pl022->rx_lev_trig + 1) : pl022->rx_lev_trig;
+ tx_conf.dst_maxburst = pl022->tx_lev_trig ? 1 <<
+ (pl022->tx_lev_trig + 1) : pl022->tx_lev_trig;
+
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
@@ -2195,6 +2208,9 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
pl022->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
status = PTR_ERR(pl022->clk);
@@ -2318,6 +2334,7 @@ pl022_remove(struct amba_device *adev)
clk_disable(pl022->clk);
clk_unprepare(pl022->clk);
clk_put(pl022->clk);
+ pm_runtime_disable(&adev->dev);
iounmap(pl022->virtbase);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
diff --git a/drivers/spi/stm_msp.c b/drivers/spi/stm_msp.c
new file mode 100644
index 00000000000..65dc316b37b
--- /dev/null
+++ b/drivers/spi/stm_msp.c
@@ -0,0 +1,1929 @@
+/*
+ * drivers/spi/stm_msp.c
+ *
+ * Copyright (C) 2010 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/bus.h>
+#include <linux/spi/stm_msp.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+/**
+ * MSP Controller Register Offsets
+ */
+#define MSP_DR(r) (r + 0x000)
+#define MSP_GCR(r) (r + 0x004)
+#define MSP_TCF(r) (r + 0x008)
+#define MSP_RCF(r) (r + 0x00C)
+#define MSP_SRG(r) (r + 0x010)
+#define MSP_FLR(r) (r + 0x014)
+#define MSP_DMACR(r) (r + 0x018)
+#define MSP_IMSC(r) (r + 0x020)
+#define MSP_RIS(r) (r + 0x024)
+#define MSP_MIS(r) (r + 0x028)
+#define MSP_ICR(r) (r + 0x02C)
+#define MSP_MCR(r) (r + 0x030)
+#define MSP_RCV(r) (r + 0x034)
+#define MSP_RCM(r) (r + 0x038)
+#define MSP_TCE0(r) (r + 0x040)
+#define MSP_TCE1(r) (r + 0x044)
+#define MSP_TCE2(r) (r + 0x048)
+#define MSP_TCE3(r) (r + 0x04C)
+#define MSP_RCE0(r) (r + 0x060)
+#define MSP_RCE1(r) (r + 0x064)
+#define MSP_RCE2(r) (r + 0x068)
+#define MSP_RCE3(r) (r + 0x06C)
+#define MSP_PID0(r) (r + 0xFE0)
+#define MSP_PID1(r) (r + 0xFE4)
+#define MSP_PID2(r) (r + 0xFE8)
+#define MSP_PID3(r) (r + 0xFEC)
+
+/**
+ * MSP Global Configuration Register - MSP_GCR
+ */
+#define MSP_GCR_MASK_RXEN ((u32)(0x1UL << 0))
+#define MSP_GCR_MASK_RFFEN ((u32)(0x1UL << 1))
+#define MSP_GCR_MASK_RFSPOL ((u32)(0x1UL << 2))
+#define MSP_GCR_MASK_DCM ((u32)(0x1UL << 3))
+#define MSP_GCR_MASK_RFSSEL ((u32)(0x1UL << 4))
+#define MSP_GCR_MASK_RCKPOL ((u32)(0x1UL << 5))
+#define MSP_GCR_MASK_RCKSEL ((u32)(0x1UL << 6))
+#define MSP_GCR_MASK_LBM ((u32)(0x1UL << 7))
+#define MSP_GCR_MASK_TXEN ((u32)(0x1UL << 8))
+#define MSP_GCR_MASK_TFFEN ((u32)(0x1UL << 9))
+#define MSP_GCR_MASK_TFSPOL ((u32)(0x1UL << 10))
+#define MSP_GCR_MASK_TFSSEL ((u32)(0x3UL << 11))
+#define MSP_GCR_MASK_TCKPOL ((u32)(0x1UL << 13))
+#define MSP_GCR_MASK_TCKSEL ((u32)(0x1UL << 14))
+#define MSP_GCR_MASK_TXDDL ((u32)(0x1UL << 15))
+#define MSP_GCR_MASK_SGEN ((u32)(0x1UL << 16))
+#define MSP_GCR_MASK_SCKPOL ((u32)(0x1UL << 17))
+#define MSP_GCR_MASK_SCKSEL ((u32)(0x3UL << 18))
+#define MSP_GCR_MASK_FGEN ((u32)(0x1UL << 20))
+#define MSP_GCR_MASK_SPICKM ((u32)(0x3UL << 21))
+#define MSP_GCR_MASK_SPIBME ((u32)(0x1UL << 23))
+
+/**
+ * MSP Transmit Configuration Register - MSP_TCF
+ */
+#define MSP_TCF_MASK_TP1ELEN ((u32)(0x7UL << 0))
+#define MSP_TCF_MASK_TP1FLEN ((u32)(0x7FUL << 3))
+#define MSP_TCF_MASK_TDTYP ((u32)(0x3UL << 10))
+#define MSP_TCF_MASK_TENDN ((u32)(0x1UL << 12))
+#define MSP_TCF_MASK_TDDLY ((u32)(0x3UL << 13))
+#define MSP_TCF_MASK_TFSIG ((u32)(0x1UL << 15))
+#define MSP_TCF_MASK_TP2ELEN ((u32)(0x7UL << 16))
+#define MSP_TCF_MASK_TP2FLEN ((u32)(0x7FUL << 19))
+#define MSP_TCF_MASK_TP2SM ((u32)(0x1UL << 26))
+#define MSP_TCF_MASK_TP2EN ((u32)(0x1UL << 27))
+#define MSP_TCF_MASK_TBSWAP ((u32)(0x3UL << 28))
+
+/**
+ * MSP Receive Configuration Register - MSP_RCF
+ */
+#define MSP_RCF_MASK_RP1ELEN ((u32)(0x7UL << 0))
+#define MSP_RCF_MASK_RP1FLEN ((u32)(0x7FUL << 3))
+#define MSP_RCF_MASK_RDTYP ((u32)(0x3UL << 10))
+#define MSP_RCF_MASK_RENDN ((u32)(0x1UL << 12))
+#define MSP_RCF_MASK_RDDLY ((u32)(0x3UL << 13))
+#define MSP_RCF_MASK_RFSIG ((u32)(0x1UL << 15))
+#define MSP_RCF_MASK_RP2ELEN ((u32)(0x7UL << 16))
+#define MSP_RCF_MASK_RP2FLEN ((u32)(0x7FUL << 19))
+#define MSP_RCF_MASK_RP2SM ((u32)(0x1UL << 26))
+#define MSP_RCF_MASK_RP2EN ((u32)(0x1UL << 27))
+#define MSP_RCF_MASK_RBSWAP ((u32)(0x3UL << 28))
+
+/**
+ * MSP Sample Rate Generator Register - MSP_SRG
+ */
+#define MSP_SRG_MASK_SCKDIV ((u32)(0x3FFUL << 0))
+#define MSP_SRG_MASK_FRWID ((u32)(0x3FUL << 10))
+#define MSP_SRG_MASK_FRPER ((u32)(0x1FFFUL << 16))
+
+/**
+ * MSP Flag Register - MSP_FLR
+ */
+#define MSP_FLR_MASK_RBUSY ((u32)(0x1UL << 0))
+#define MSP_FLR_MASK_RFE ((u32)(0x1UL << 1))
+#define MSP_FLR_MASK_RFU ((u32)(0x1UL << 2))
+#define MSP_FLR_MASK_TBUSY ((u32)(0x1UL << 3))
+#define MSP_FLR_MASK_TFE ((u32)(0x1UL << 4))
+#define MSP_FLR_MASK_TFU ((u32)(0x1UL << 5))
+
+/**
+ * MSP DMA Control Register - MSP_DMACR
+ */
+#define MSP_DMACR_MASK_RDMAE ((u32)(0x1UL << 0))
+#define MSP_DMACR_MASK_TDMAE ((u32)(0x1UL << 1))
+
+/**
+ * MSP Interrupt Mask Set/Clear Register - MSP_IMSC
+ */
+#define MSP_IMSC_MASK_RXIM ((u32)(0x1UL << 0))
+#define MSP_IMSC_MASK_ROEIM ((u32)(0x1UL << 1))
+#define MSP_IMSC_MASK_RSEIM ((u32)(0x1UL << 2))
+#define MSP_IMSC_MASK_RFSIM ((u32)(0x1UL << 3))
+#define MSP_IMSC_MASK_TXIM ((u32)(0x1UL << 4))
+#define MSP_IMSC_MASK_TUEIM ((u32)(0x1UL << 5))
+#define MSP_IMSC_MASK_TSEIM ((u32)(0x1UL << 6))
+#define MSP_IMSC_MASK_TFSIM ((u32)(0x1UL << 7))
+#define MSP_IMSC_MASK_RFOIM ((u32)(0x1UL << 8))
+#define MSP_IMSC_MASK_TFOIM ((u32)(0x1UL << 9))
+
+/**
+ * MSP Raw Interrupt status Register - MSP_RIS
+ */
+#define MSP_RIS_MASK_RXRIS ((u32)(0x1UL << 0))
+#define MSP_RIS_MASK_ROERIS ((u32)(0x1UL << 1))
+#define MSP_RIS_MASK_RSERIS ((u32)(0x1UL << 2))
+#define MSP_RIS_MASK_RFSRIS ((u32)(0x1UL << 3))
+#define MSP_RIS_MASK_TXRIS ((u32)(0x1UL << 4))
+#define MSP_RIS_MASK_TUERIS ((u32)(0x1UL << 5))
+#define MSP_RIS_MASK_TSERIS ((u32)(0x1UL << 6))
+#define MSP_RIS_MASK_TFSRIS ((u32)(0x1UL << 7))
+#define MSP_RIS_MASK_RFORIS ((u32)(0x1UL << 8))
+#define MSP_RIS_MASK_TFORIS ((u32)(0x1UL << 9))
+
+/**
+ * MSP Masked Interrupt status Register - MSP_MIS
+ */
+#define MSP_MIS_MASK_RXMIS ((u32)(0x1UL << 0))
+#define MSP_MIS_MASK_ROEMIS ((u32)(0x1UL << 1))
+#define MSP_MIS_MASK_RSEMIS ((u32)(0x1UL << 2))
+#define MSP_MIS_MASK_RFSMIS ((u32)(0x1UL << 3))
+#define MSP_MIS_MASK_TXMIS ((u32)(0x1UL << 4))
+#define MSP_MIS_MASK_TUEMIS ((u32)(0x1UL << 5))
+#define MSP_MIS_MASK_TSEMIS ((u32)(0x1UL << 6))
+#define MSP_MIS_MASK_TFSMIS ((u32)(0x1UL << 7))
+#define MSP_MIS_MASK_RFOMIS ((u32)(0x1UL << 8))
+#define MSP_MIS_MASK_TFOMIS ((u32)(0x1UL << 9))
+
+/**
+ * MSP Interrupt Clear Register - MSP_ICR
+ */
+#define MSP_ICR_MASK_ROEIC ((u32)(0x1UL << 1))
+#define MSP_ICR_MASK_RSEIC ((u32)(0x1UL << 2))
+#define MSP_ICR_MASK_RFSIC ((u32)(0x1UL << 3))
+#define MSP_ICR_MASK_TUEIC ((u32)(0x1UL << 5))
+#define MSP_ICR_MASK_TSEIC ((u32)(0x1UL << 6))
+#define MSP_ICR_MASK_TFSIC ((u32)(0x1UL << 7))
+
+#define GEN_MASK_BITS(val, mask, sb) ((u32)((((u32)val) << (sb)) & (mask)))
+#define MSP_WBITS(reg, val, mask, sb) ((reg) = (((reg) & ~(mask)) |\
+ (((val) << (sb)) & (mask))))
+#define DEFAULT_MSP_REG_DMACR 0x00000000
+#define DEFAULT_MSP_REG_SRG 0x1FFF0000
+
+#define DEFAULT_MSP_REG_GCR ( \
+ GEN_MASK_BITS(MSP_RECEIVER_DISABLED, MSP_GCR_MASK_RXEN, 0) |\
+ GEN_MASK_BITS(MSP_RX_FIFO_ENABLED, MSP_GCR_MASK_RFFEN, 1) |\
+ GEN_MASK_BITS(MSP_LOOPBACK_DISABLED, MSP_GCR_MASK_LBM, 7) |\
+ GEN_MASK_BITS(MSP_TRANSMITTER_DISABLED, MSP_GCR_MASK_TXEN, 8) |\
+ GEN_MASK_BITS(MSP_TX_FIFO_ENABLED, MSP_GCR_MASK_TFFEN, 9) |\
+ GEN_MASK_BITS(MSP_TX_FRAME_SYNC_POL_LOW, MSP_GCR_MASK_TFSPOL, 10)|\
+ GEN_MASK_BITS(MSP_TX_FRAME_SYNC_INT, MSP_GCR_MASK_TFSSEL, 11) |\
+ GEN_MASK_BITS(MSP_TX_CLOCK_POL_HIGH, MSP_GCR_MASK_TCKPOL, 13) |\
+ GEN_MASK_BITS(MSP_IS_SPI_MASTER, MSP_GCR_MASK_TCKSEL, 14) |\
+ GEN_MASK_BITS(MSP_TRANSMIT_DATA_WITHOUT_DELAY, MSP_GCR_MASK_TXDDL, 15)|\
+ GEN_MASK_BITS(MSP_SAMPLE_RATE_GEN_ENABLE, MSP_GCR_MASK_SGEN, 16)|\
+ GEN_MASK_BITS(MSP_CLOCK_INTERNAL, MSP_GCR_MASK_SCKSEL, 18) |\
+ GEN_MASK_BITS(MSP_FRAME_GEN_ENABLE, MSP_GCR_MASK_FGEN, 20) |\
+ GEN_MASK_BITS(MSP_SPI_PHASE_ZERO_CYCLE_DELAY, MSP_GCR_MASK_SPICKM, 21)|\
+ GEN_MASK_BITS(SPI_BURST_MODE_DISABLE, MSP_GCR_MASK_SPIBME, 23)\
+ )
+#define DEFAULT_MSP_REG_RCF ( \
+ GEN_MASK_BITS(MSP_DATA_BITS_32, MSP_RCF_MASK_RP1ELEN, 0) | \
+ GEN_MASK_BITS(MSP_IGNORE_RX_FRAME_SYNC_PULSE, MSP_RCF_MASK_RFSIG, 15) |\
+ GEN_MASK_BITS(MSP_RX_1BIT_DATA_DELAY, MSP_RCF_MASK_RDDLY, 13) | \
+ GEN_MASK_BITS(MSP_RX_ENDIANESS_LSB, MSP_RCF_MASK_RENDN, 12) \
+ )
+
+#define DEFAULT_MSP_REG_TCF ( \
+ GEN_MASK_BITS(MSP_DATA_BITS_32, MSP_TCF_MASK_TP1ELEN, 0) | \
+ GEN_MASK_BITS(MSP_IGNORE_TX_FRAME_SYNC_PULSE, MSP_TCF_MASK_TFSIG, 15) |\
+ GEN_MASK_BITS(MSP_TX_1BIT_DATA_DELAY, MSP_TCF_MASK_TDDLY, 13) | \
+ GEN_MASK_BITS(MSP_TX_ENDIANESS_LSB, MSP_TCF_MASK_TENDN, 12) \
+ )
+
+/**
+ * MSP Receiver/Transmitter states (enabled or disabled)
+ */
+#define MSP_RECEIVER_DISABLED 0
+#define MSP_RECEIVER_ENABLED 1
+#define MSP_TRANSMITTER_DISABLED 0
+#define MSP_TRANSMITTER_ENABLED 1
+
+/**
+ * MSP Receiver/Transmitter FIFO constants
+ */
+#define MSP_LOOPBACK_DISABLED 0
+#define MSP_LOOPBACK_ENABLED 1
+
+#define MSP_TX_FIFO_DISABLED 0
+#define MSP_TX_FIFO_ENABLED 1
+#define MSP_TX_ENDIANESS_MSB 0
+#define MSP_TX_ENDIANESS_LSB 1
+
+#define MSP_RX_FIFO_DISABLED 0
+#define MSP_RX_FIFO_ENABLED 1
+#define MSP_RX_ENDIANESS_MSB 0
+#define MSP_RX_ENDIANESS_LSB 1
+
+#define MSP_TX_FRAME_SYNC_EXT 0x0
+#define MSP_TX_FRAME_SYNC_INT 0x2
+#define MSP_TX_FRAME_SYNC_INT_CFG 0x3
+
+#define MSP_TX_FRAME_SYNC_POL_HIGH 0
+#define MSP_TX_FRAME_SYNC_POL_LOW 1
+
+#define MSP_HANDLE_RX_FRAME_SYNC_PULSE 0
+#define MSP_IGNORE_RX_FRAME_SYNC_PULSE 1
+
+#define MSP_RX_NO_DATA_DELAY 0x0
+#define MSP_RX_1BIT_DATA_DELAY 0x1
+#define MSP_RX_2BIT_DATA_DELAY 0x2
+#define MSP_RX_3BIT_DATA_DELAY 0x3
+
+#define MSP_HANDLE_TX_FRAME_SYNC_PULSE 0
+#define MSP_IGNORE_TX_FRAME_SYNC_PULSE 1
+
+#define MSP_TX_NO_DATA_DELAY 0x0
+#define MSP_TX_1BIT_DATA_DELAY 0x1
+#define MSP_TX_2BIT_DATA_DELAY 0x2
+#define MSP_TX_3BIT_DATA_DELAY 0x3
+
+#define MSP_TX_CLOCK_POL_LOW 0
+#define MSP_TX_CLOCK_POL_HIGH 1
+
+#define MSP_SPI_PHASE_ZERO_CYCLE_DELAY 0x2
+#define MSP_SPI_PHASE_HALF_CYCLE_DELAY 0x3
+
+#define MSP_IS_SPI_SLAVE 0
+#define MSP_IS_SPI_MASTER 1
+
+#define MSP_FRAME_GEN_DISABLE 0
+#define MSP_FRAME_GEN_ENABLE 1
+
+#define MSP_SAMPLE_RATE_GEN_DISABLE 0
+#define MSP_SAMPLE_RATE_GEN_ENABLE 1
+
+#define SPI_BURST_MODE_DISABLE 0
+#define SPI_BURST_MODE_ENABLE 1
+
+#define MSP_TRANSMIT_DATA_WITHOUT_DELAY 0
+#define MSP_TRANSMIT_DATA_WITH_DELAY 1
+
+#define MSP_CLOCK_INTERNAL 0x0 /* 48 MHz */
+
+/* SRG is derived from MSPSCK pin but is resynchronized on MSPRFS
+ * (Receive Frame Sync signal) */
+#define MSP_CLOCK_EXTERNAL 0x2
+#define MSP_CLOCK_EXTERNAL_RESYNC 0x3
+
+#define DISABLE_ALL_MSP_INTERRUPTS (0x0)
+#define ENABLE_ALL_MSP_INTERRUPTS (0x333)
+#define CLEAR_ALL_MSP_INTERRUPTS (0xEE)
+#define DEFAULT_MSP_CLK (48000000)
+#define MAX_SCKDIV (1023)
+
+#define MSP_FIFO_DEPTH 8
+
+/**
+ * Queue State
+ */
+#define QUEUE_RUNNING (0)
+#define QUEUE_STOPPED (1)
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+/* Default values */
+#define SPI_DEFAULT_MAX_SPEED_HZ 48000
+#define SPI_TRANSFER_TIMEOUT_MS 5000
+
+/* CONTROLLER COMMANDS */
+enum cntlr_commands {
+ DISABLE_CONTROLLER = 0,
+ ENABLE_CONTROLLER ,
+ DISABLE_ALL_INTERRUPT ,
+ ENABLE_ALL_INTERRUPT ,
+ FLUSH_FIFO ,
+ RESTORE_STATE ,
+ LOAD_DEFAULT_CONFIG ,
+ CLEAR_ALL_INTERRUPT,
+};
+
+struct stm_msp {
+ struct amba_device *adev;
+ struct spi_master *master;
+ struct stm_msp_controller *master_info;
+ void __iomem *regs;
+ struct clk *clk;
+#ifdef CONFIG_SPI_WORKQUEUE
+ struct workqueue_struct *workqueue;
+#endif
+ struct work_struct spi_work;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ int run;
+ struct tasklet_struct pump_transfers;
+ struct timer_list spi_notify_timer;
+ int spi_io_error;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ void (*write)(struct stm_msp *stm_msp);
+ void (*read)(struct stm_msp *stm_msp);
+ void (*delay)(struct stm_msp *stm_msp);
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SPICntlr for each client chip
+ * @ctr_regs: void pointer which is assigned a struct having regs of the cntlr.
+ * @chip_id: Chip Id assigned to this client to identify it.
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @write: function to be used to write when doing xfer for this chip
+ * @null_write: function to be used for dummy write for receiving data.
+ * @read: function to be used to read when doing xfer for this chip
+ * @null_read: function to be used to for dummy read while writting data.
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt
+ *
+ * Runtime state of the SPI controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ void *ctr_regs;
+ u32 chip_id;
+ u8 n_bytes;
+ void (*write) (struct stm_msp *stm_msp);
+ void (*null_write) (struct stm_msp *stm_msp);
+ void (*read) (struct stm_msp *stm_msp);
+ void (*null_read) (struct stm_msp *stm_msp);
+ void (*delay) (struct stm_msp *stm_msp);
+ void (*cs_control) (u32 command);
+ int xfer_type;
+};
+
+/**
+ * struct msp_regs - Used to store MSP controller registry values
+ * used by the driver.
+ * @gcr: global configuration register
+ * @tcf: transmit configuration register
+ * @rcf: receive configuration register
+ * @srg: sample rate generator register
+ * @dmacr: DMA configuration register
+ */
+struct msp_regs {
+ u32 gcr;
+ u32 tcf;
+ u32 rcf;
+ u32 srg;
+ u32 dmacr;
+};
+
+/**
+ * stm_msp_controller_cmd - To execute controller commands for MSP
+ * @stm_msp: SPI driver private data structure
+ * @cmd: Command which is to be executed on the controller
+ */
+static int stm_msp_controller_cmd(struct stm_msp *stm_msp, int cmd)
+{
+ int retval = 0;
+ struct msp_regs *msp_regs = NULL;
+
+ switch (cmd) {
+ case DISABLE_CONTROLLER: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Disabling MSP controller...\n");
+ writel((readl(MSP_GCR(stm_msp->regs)) &
+ (~(MSP_GCR_MASK_TXEN | MSP_GCR_MASK_RXEN))),
+ MSP_GCR(stm_msp->regs));
+ break;
+ }
+ case ENABLE_CONTROLLER: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Enabling MSP controller...\n");
+ writel((readl(MSP_GCR(stm_msp->regs)) |
+ (MSP_GCR_MASK_TXEN | MSP_GCR_MASK_RXEN)),
+ MSP_GCR(stm_msp->regs));
+ break;
+ }
+ case DISABLE_ALL_INTERRUPT: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Disabling all MSP interrupts...\n");
+ writel(DISABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ break;
+ }
+ case ENABLE_ALL_INTERRUPT: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Enabling all MSP interrupts...\n");
+ writel(ENABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ break;
+ }
+ case CLEAR_ALL_INTERRUPT: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Clearing all MSP interrupts...\n");
+ writel(CLEAR_ALL_MSP_INTERRUPTS,
+ MSP_ICR(stm_msp->regs));
+ break;
+ }
+ case FLUSH_FIFO: {
+ unsigned long limit = loops_per_jiffy << 1;
+
+ dev_dbg(&stm_msp->adev->dev, "MSP FIFO flushed\n");
+
+ do {
+ while (!(readl(MSP_FLR(stm_msp->regs)) &
+ MSP_FLR_MASK_RFE)) {
+ readl(MSP_DR(stm_msp->regs));
+ }
+ } while ((readl(MSP_FLR(stm_msp->regs)) &
+ (MSP_FLR_MASK_TBUSY | MSP_FLR_MASK_RBUSY)) &&
+ limit--);
+
+ retval = limit;
+ break;
+ }
+ case RESTORE_STATE: {
+ msp_regs =
+ (struct msp_regs *)stm_msp->cur_chip->ctr_regs;
+
+ dev_dbg(&stm_msp->adev->dev,
+ "Restoring MSP state...\n");
+
+ writel(msp_regs->gcr, MSP_GCR(stm_msp->regs));
+ writel(msp_regs->tcf, MSP_TCF(stm_msp->regs));
+ writel(msp_regs->rcf, MSP_RCF(stm_msp->regs));
+ writel(msp_regs->srg, MSP_SRG(stm_msp->regs));
+ writel(msp_regs->dmacr, MSP_DMACR(stm_msp->regs));
+ writel(DISABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ writel(CLEAR_ALL_MSP_INTERRUPTS,
+ MSP_ICR(stm_msp->regs));
+ break;
+ }
+ case LOAD_DEFAULT_CONFIG: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Loading default MSP config...\n");
+
+ writel(DEFAULT_MSP_REG_GCR, MSP_GCR(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_TCF, MSP_TCF(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_RCF, MSP_RCF(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_SRG, MSP_SRG(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_DMACR, MSP_DMACR(stm_msp->regs));
+ writel(DISABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ writel(CLEAR_ALL_MSP_INTERRUPTS,
+ MSP_ICR(stm_msp->regs));
+ break;
+ }
+ default:
+ dev_dbg(&stm_msp->adev->dev, "Unknown command\n");
+ retval = -1;
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * giveback - current spi_message is over, schedule next spi_message
+ * @message: current SPI message
+ * @stm_msp: spi driver private data structure
+ *
+ * current spi_message is over, schedule next spi_message and call
+ * callback of this msg.
+ */
+static void giveback(struct spi_message *message, struct stm_msp *stm_msp)
+{
+ struct spi_transfer *last_transfer;
+ unsigned long flags;
+ struct spi_message *msg;
+ void (*curr_cs_control)(u32 command);
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+ msg = stm_msp->cur_msg;
+
+ curr_cs_control = stm_msp->cur_chip->cs_control;
+
+ stm_msp->cur_msg = NULL;
+ stm_msp->cur_transfer = NULL;
+ stm_msp->cur_chip = NULL;
+#ifdef CONFIG_SPI_WORKQUEUE
+ queue_work(stm_msp->workqueue, &stm_msp->spi_work);
+#else
+ schedule_work(&stm_msp->spi_work);
+#endif
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+
+ last_transfer = list_entry(msg->transfers.prev,
+ struct spi_transfer, transfer_list);
+
+ if (!last_transfer->cs_change)
+ curr_cs_control(SPI_CHIP_DESELECT);
+
+ msg->state = NULL;
+
+ if (msg->complete)
+ msg->complete(msg->context);
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+ clk_disable(stm_msp->clk);
+}
+
+/**
+ * spi_notify - Handles Polling hang issue over spi bus.
+ * @data: main driver data
+ * Context: Process.
+ *
+ * This is used to handle error condition in transfer and receive function used
+ * in polling mode.
+ * Sometimes due to passing wrong protocol desc , polling transfer may hang.
+ * To prevent this, timer is added.
+ *
+ * Returns void.
+ */
+static void spi_notify(unsigned long data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+ stm_msp->spi_io_error = 1;
+
+ dev_err(&stm_msp->adev->dev,
+ "Polling is taking time, maybe device not responding\n");
+
+ del_timer(&stm_msp->spi_notify_timer);
+}
+
+/**
+ * stm_msp_transfer - transfer function registered to SPI master framework
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will queue the spi_message in the queue of driver if
+ * the queue is not stopped and return.
+ */
+static int stm_msp_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct stm_msp *stm_msp = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ if (stm_msp->run == QUEUE_STOPPED) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return -ESHUTDOWN;
+ }
+ dev_err(&spi->dev, "Regular request (No infinite DMA ongoing)\n");
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ list_add_tail(&msg->queue, &stm_msp->queue);
+
+ if ((stm_msp->run == QUEUE_RUNNING) && (!stm_msp->busy))
+#ifdef CONFIG_SPI_WORKQUEUE
+ queue_work(stm_msp->workqueue, &stm_msp->spi_work);
+#else
+ schedule_work(&stm_msp->spi_work);
+#endif
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return 0;
+}
+
+/**
+ * next_transfer - Move to the Next transfer in the current spi message
+ * @stm_msp: spi driver private data structure
+ *
+ * This function moves though the linked list of spi transfers in the
+ * current spi message and returns with the state of current spi
+ * message i.e whether its last transfer is done(DONE_STATE) or
+ * Next transfer is ready(RUNNING_STATE)
+ */
+static void *next_transfer(struct stm_msp *stm_msp)
+{
+ struct spi_message *msg = stm_msp->cur_msg;
+ struct spi_transfer *trans = stm_msp->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ stm_msp->cur_transfer = list_entry(trans->transfer_list.next,
+ struct spi_transfer,
+ transfer_list);
+ return RUNNING_STATE;
+ }
+ return DONE_STATE;
+}
+
+static void do_interrupt_transfer(void *data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+
+ stm_msp->tx = (void *)stm_msp->cur_transfer->tx_buf;
+ stm_msp->tx_end = stm_msp->tx + stm_msp->cur_transfer->len;
+
+ stm_msp->rx = (void *)stm_msp->cur_transfer->rx_buf;
+ stm_msp->rx_end = stm_msp->rx + stm_msp->cur_transfer->len;
+
+ stm_msp->write = stm_msp->tx ?
+ stm_msp->cur_chip->write : stm_msp->cur_chip->null_write;
+ stm_msp->read = stm_msp->rx ?
+ stm_msp->cur_chip->read : stm_msp->cur_chip->null_read;
+
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+
+ stm_msp_controller_cmd(stm_msp, ENABLE_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, ENABLE_CONTROLLER);
+}
+
+static void do_polling_transfer(void *data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip;
+ unsigned long limit = 0;
+ u32 timer_expire = 0;
+
+ chip = stm_msp->cur_chip;
+ message = stm_msp->cur_msg;
+
+ while (message->state != DONE_STATE) {
+ /* Handle for abort */
+ if (message->state == ERROR_STATE)
+ break;
+
+ transfer = stm_msp->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+
+ if (previous->cs_change)
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+ } else {
+ /* START_STATE */
+ message->state = RUNNING_STATE;
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+ }
+
+ /* Configuration Changing Per Transfer */
+ stm_msp->tx = (void *)transfer->tx_buf;
+ stm_msp->tx_end = stm_msp->tx + stm_msp->cur_transfer->len;
+ stm_msp->rx = (void *)transfer->rx_buf;
+ stm_msp->rx_end = stm_msp->rx + stm_msp->cur_transfer->len;
+
+ stm_msp->write = stm_msp->tx ?
+ stm_msp->cur_chip->write :
+ stm_msp->cur_chip->null_write;
+ stm_msp->read = stm_msp->rx ?
+ stm_msp->cur_chip->read :
+ stm_msp->cur_chip->null_read;
+ stm_msp->delay = stm_msp->cur_chip->delay;
+
+ stm_msp_controller_cmd(stm_msp, FLUSH_FIFO);
+ stm_msp_controller_cmd(stm_msp, ENABLE_CONTROLLER);
+
+ timer_expire = stm_msp->cur_transfer->len / 1024;
+
+ if (!timer_expire)
+ timer_expire = SPI_TRANSFER_TIMEOUT_MS;
+ else
+ timer_expire =
+ (stm_msp->cur_transfer->len / 1024) *
+ SPI_TRANSFER_TIMEOUT_MS;
+
+ stm_msp->spi_notify_timer.expires =
+ jiffies + msecs_to_jiffies(timer_expire);
+
+ add_timer(&stm_msp->spi_notify_timer);
+
+ dev_dbg(&stm_msp->adev->dev, "Polling transfer ongoing...\n");
+
+ while (stm_msp->tx < stm_msp->tx_end) {
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+ stm_msp->read(stm_msp);
+ stm_msp->write(stm_msp);
+
+ stm_msp_controller_cmd(stm_msp, ENABLE_CONTROLLER);
+
+ if (stm_msp->delay)
+ stm_msp->delay(stm_msp);
+
+ if (stm_msp->spi_io_error == 1)
+ break;
+ }
+
+ del_timer(&stm_msp->spi_notify_timer);
+
+ if (stm_msp->spi_io_error == 1)
+ goto out;
+
+ limit = loops_per_jiffy << 1;
+
+ while ((stm_msp->rx < stm_msp->rx_end) && (limit--))
+ stm_msp->read(stm_msp);
+
+ /* Update total byte transfered */
+ message->actual_length += stm_msp->cur_transfer->len;
+
+ if (stm_msp->cur_transfer->cs_change)
+ stm_msp->cur_chip->cs_control(SPI_CHIP_DESELECT);
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+
+ /* Move to next transfer */
+ message->state = next_transfer(stm_msp);
+ }
+out:
+ /* Handle end of message */
+ if (message->state == DONE_STATE)
+ message->status = 0;
+ else
+ message->status = -EIO;
+
+ giveback(message, stm_msp);
+
+ stm_msp->spi_io_error = 0; /* Reset state for further transfers */
+
+ return;
+}
+
+/**
+ * pump_messages - Workqueue function which processes spi message queue
+ * @work: pointer to work
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and delegate control to appropriate function
+ * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
+ * based on the kind of the transfer
+ *
+ */
+static void pump_messages(struct work_struct *work)
+{
+ struct stm_msp *stm_msp = container_of(work, struct stm_msp, spi_work);
+ unsigned long flags;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ if (list_empty(&stm_msp->queue) || stm_msp->run == QUEUE_STOPPED) {
+ dev_dbg(&stm_msp->adev->dev, "work_queue: Queue Empty\n");
+ stm_msp->busy = 0;
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return;
+ }
+ /* Make sure we are not already running a message */
+ if (stm_msp->cur_msg) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return;
+ }
+
+ clk_enable(stm_msp->clk);
+
+ /* Extract head of queue */
+ stm_msp->cur_msg = list_entry(stm_msp->queue.next,
+ struct spi_message,
+ queue);
+
+ list_del_init(&stm_msp->cur_msg->queue);
+ stm_msp->busy = 1;
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+
+ /* Initial message state */
+ stm_msp->cur_msg->state = START_STATE;
+ stm_msp->cur_transfer = list_entry(stm_msp->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+
+ /* Setup the SPI using the per chip configuration */
+ stm_msp->cur_chip = spi_get_ctldata(stm_msp->cur_msg->spi);
+ stm_msp_controller_cmd(stm_msp, RESTORE_STATE);
+ stm_msp_controller_cmd(stm_msp, FLUSH_FIFO);
+
+ if (stm_msp->cur_chip->xfer_type == SPI_POLLING_TRANSFER)
+ do_polling_transfer(stm_msp);
+ else if (stm_msp->cur_chip->xfer_type == SPI_INTERRUPT_TRANSFER)
+ do_interrupt_transfer(stm_msp);
+}
+
+/**
+ * pump_transfers - Tasklet function which schedules next interrupt xfer
+ * @data: spi driver private data structure
+ */
+static void pump_transfers(unsigned long data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+
+ message = stm_msp->cur_msg;
+
+ /* Handle for abort */
+ if (message->state == ERROR_STATE) {
+ message->status = -EIO;
+ giveback(message, stm_msp);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ message->status = 0;
+ giveback(message, stm_msp);
+ return;
+ }
+ transfer = stm_msp->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ if (previous->cs_change)
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+ } else {
+ /* START_STATE */
+ message->state = RUNNING_STATE;
+ }
+ stm_msp->tx = (void *)transfer->tx_buf;
+ stm_msp->tx_end = stm_msp->tx + stm_msp->cur_transfer->len;
+ stm_msp->rx = (void *)transfer->rx_buf;
+ stm_msp->rx_end = stm_msp->rx + stm_msp->cur_transfer->len;
+
+ stm_msp->write = stm_msp->tx ?
+ stm_msp->cur_chip->write : stm_msp->cur_chip->null_write;
+ stm_msp->read = stm_msp->rx ?
+ stm_msp->cur_chip->read : stm_msp->cur_chip->null_read;
+
+ stm_msp_controller_cmd(stm_msp, FLUSH_FIFO);
+ stm_msp_controller_cmd(stm_msp, ENABLE_ALL_INTERRUPT);
+}
+
+static int init_queue(struct stm_msp *stm_msp)
+{
+ INIT_LIST_HEAD(&stm_msp->queue);
+ spin_lock_init(&stm_msp->lock);
+
+ stm_msp->run = QUEUE_STOPPED;
+ stm_msp->busy = 0;
+
+ tasklet_init(&stm_msp->pump_transfers, pump_transfers,
+ (unsigned long)stm_msp);
+ INIT_WORK(&stm_msp->spi_work, pump_messages);
+
+#ifdef CONFIG_SPI_WORKQUEUE
+ stm_msp->workqueue = create_singlethread_workqueue(
+ dev_name(&stm_msp->master->dev));
+
+ if (stm_msp->workqueue == NULL)
+ return -EBUSY;
+#endif /* CONFIG_SPI_WORKQUEUE */
+
+ init_timer(&stm_msp->spi_notify_timer);
+
+ stm_msp->spi_notify_timer.expires = jiffies + msecs_to_jiffies(1000);
+ stm_msp->spi_notify_timer.function = spi_notify;
+ stm_msp->spi_notify_timer.data = (unsigned long)stm_msp;
+
+ return 0;
+}
+
+static int start_queue(struct stm_msp *stm_msp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ if (stm_msp->run == QUEUE_RUNNING || stm_msp->busy) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return -EBUSY;
+ }
+
+ stm_msp->run = QUEUE_RUNNING;
+ stm_msp->cur_msg = NULL;
+ stm_msp->cur_transfer = NULL;
+ stm_msp->cur_chip = NULL;
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return 0;
+}
+
+static int stop_queue(struct stm_msp *stm_msp)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ /* This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the stm_msp->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead */
+
+ stm_msp->run = QUEUE_STOPPED;
+
+ while (!list_empty(&stm_msp->queue) && stm_msp->busy && limit--) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&stm_msp->lock, flags);
+ }
+
+ if (!list_empty(&stm_msp->queue) || stm_msp->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+
+ return status;
+}
+
+static int destroy_queue(struct stm_msp *stm_msp)
+{
+ int status;
+
+ status = stop_queue(stm_msp);
+
+ if (status != 0)
+ return status;
+#ifdef CONFIG_SPI_WORKQUEUE
+ destroy_workqueue(stm_msp->workqueue);
+#endif
+ del_timer_sync(&stm_msp->spi_notify_timer);
+
+ return 0;
+}
+
+/**
+ * stm_msp_null_writer - To Write Dummy Data in Data register
+ * @stm_msp: spi driver private data structure
+ *
+ * This function is set as a write function for transfer which have
+ * Tx transfer buffer as NULL. It simply writes '0' in the Data
+ * register
+ */
+static void stm_msp_null_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ writel(0x0, MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == 8)
+ return;
+ }
+}
+
+/**
+ * stm_msp_null_reader - To read data from Data register and discard it
+ * @stm_msp: spi driver private data structure
+ *
+ * This function is set as a reader function for transfer which have
+ * Rx Transfer buffer as null. Read Data is rejected
+ */
+static void stm_msp_null_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_u8_writer - Write FIFO data in Data register as a 8 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function writes data in Tx FIFO till it is not full
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary write ptr tx in stm_msp which maintains
+ * current write position in transfer buffer. we do not write data more than
+ * FIFO depth
+ */
+void stm_msp_u8_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ writel((u32)(*(u8 *)(stm_msp->tx)), MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == MSP_FIFO_DEPTH)
+ return;
+ }
+}
+
+/**
+ * stm_msp_u8_reader - Read FIFO data in Data register as a 8 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function reads data in Rx FIFO till it is not empty
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary Read ptr rx in stm_msp which maintains
+ * current read position in transfer buffer
+ */
+void stm_msp_u8_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ *(u8 *)(stm_msp->rx) = (u8)readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_u16_writer - Write FIFO data in Data register as a 16 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function writes data in Tx FIFO till it is not full
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary write ptr tx in stm_msp which maintains
+ * current write position in transfer buffer. we do not write data more than
+ * FIFO depth
+ */
+void stm_msp_u16_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ writel((u32)(*(u16 *)(stm_msp->tx)), MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == MSP_FIFO_DEPTH)
+ return;
+ }
+}
+
+/**
+ * stm_msp_u16_reader - Read FIFO data in Data register as a 16 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function reads data in Rx FIFO till it is not empty
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary Read ptr rx in stm_msp which maintains
+ * current read position in transfer buffer
+ */
+void stm_msp_u16_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ *(u16 *)(stm_msp->rx) = (u16)readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_u32_writer - Write FIFO data in Data register as a 32 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function writes data in Tx FIFO till it is not full
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary write ptr tx in stm_msp which maintains
+ * current write position in transfer buffer. we do not write data more than
+ * FIFO depth
+ */
+void stm_msp_u32_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ /* Write Data to Data Register */
+ writel(*(u32 *)(stm_msp->tx), MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == MSP_FIFO_DEPTH)
+ return;
+ }
+}
+
+/**
+ * stm_msp_u32_reader - Read FIFO data in Data register as a 32 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function reads data in Rx FIFO till it is not empty
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary Read ptr rx in stm_msp which maintains
+ * current read position in transfer buffer
+ */
+void stm_msp_u32_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ *(u32 *)(stm_msp->rx) = readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_interrupt_handler - Interrupt hanlder function
+ */
+static irqreturn_t stm_msp_interrupt_handler(int irq, void *dev_id)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)dev_id;
+ struct spi_message *msg = stm_msp->cur_msg;
+ u32 irq_status = 0;
+ u32 flag = 0;
+
+ if (!msg) {
+ dev_err(&stm_msp->adev->dev,
+ "Bad message state in interrupt handler");
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ /* Read the Interrupt Status Register */
+ irq_status = readl(MSP_MIS(stm_msp->regs));
+
+ if (irq_status) {
+ if (irq_status & MSP_MIS_MASK_ROEMIS) { /* Overrun interrupt */
+ /* Bail out our Data has been corrupted */
+ dev_dbg(&stm_msp->adev->dev,
+ "Received ROR interrupt\n");
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, CLEAR_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+ msg->state = ERROR_STATE;
+ tasklet_schedule(&stm_msp->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ stm_msp->read(stm_msp);
+ stm_msp->write(stm_msp);
+
+ if ((stm_msp->tx == stm_msp->tx_end) && (flag == 0)) {
+ flag = 1;
+ /* Disable Transmit interrupt */
+ writel(readl(MSP_IMSC(stm_msp->regs)) &
+ (~MSP_IMSC_MASK_TXIM) & (~MSP_IMSC_MASK_TFOIM),
+ (stm_msp->regs + 0x14));
+ }
+
+ /* Clearing any Xmit underrun error. Overrun already handled */
+ stm_msp_controller_cmd(stm_msp, CLEAR_ALL_INTERRUPT);
+
+ if (stm_msp->rx == stm_msp->rx_end) {
+ stm_msp_controller_cmd(stm_msp, DISABLE_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, CLEAR_ALL_INTERRUPT);
+
+ dev_dbg(&stm_msp->adev->dev,
+ "Interrupt transfer completed.\n");
+
+ /* Update total bytes transfered */
+ msg->actual_length += stm_msp->cur_transfer->len;
+
+ if (stm_msp->cur_transfer->cs_change)
+ stm_msp->cur_chip->cs_control(
+ SPI_CHIP_DESELECT);
+
+ /* Move to next transfer */
+ msg->state = next_transfer(stm_msp);
+ tasklet_schedule(&stm_msp->pump_transfers);
+ return IRQ_HANDLED;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm_msp_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void stm_msp_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+ struct stm_msp *stm_msp = spi_master_get_devdata(spi->master);
+ struct spi_master *master;
+ master = stm_msp->master;
+
+ if (chip) {
+ kfree(chip->ctr_regs);
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+ }
+}
+
+/**
+ * null_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+static void null_cs_control(u32 command)
+{
+ /* Nothing to do */
+ (void)command;
+}
+
+static int verify_msp_controller_parameters(struct stm_msp_config_chip
+ *chip_info)
+{
+
+ /* FIXME: check clock params */
+ if ((chip_info->lbm != SPI_LOOPBACK_ENABLED) &&
+ (chip_info->lbm != SPI_LOOPBACK_DISABLED)) {
+ dev_dbg(chip_info->dev,
+ "Loopback Mode is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->hierarchy != SPI_MASTER) &&
+ (chip_info->hierarchy != SPI_SLAVE)) {
+ dev_dbg(chip_info->dev,
+ "hierarchy is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->endian_rx != SPI_FIFO_MSB) &&
+ (chip_info->endian_rx != SPI_FIFO_LSB)) {
+ dev_dbg(chip_info->dev,
+ "Rx FIFO endianess is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->endian_tx != SPI_FIFO_MSB) &&
+ (chip_info->endian_tx != SPI_FIFO_LSB)) {
+ dev_dbg(chip_info->dev,
+ "Tx FIFO endianess is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->data_size < MSP_DATA_BITS_8) ||
+ (chip_info->data_size > MSP_DATA_BITS_32)) {
+ dev_dbg(chip_info->dev,
+ "MSP DATA Size is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->com_mode != SPI_INTERRUPT_TRANSFER) &&
+ (chip_info->com_mode != SPI_POLLING_TRANSFER)) {
+ dev_dbg(chip_info->dev,
+ "Communication mode is configured incorrectly\n");
+ return -1;
+ }
+ if (((chip_info->proto_params).clk_phase !=
+ SPI_CLK_ZERO_CYCLE_DELAY) &&
+ ((chip_info->proto_params).clk_phase !=
+ SPI_CLK_HALF_CYCLE_DELAY)) {
+ dev_dbg(chip_info->dev,
+ "Clock Phase is configured incorrectly\n");
+ return -1;
+ }
+ if (((chip_info->proto_params).clk_pol !=
+ SPI_CLK_POL_IDLE_LOW) &&
+ ((chip_info->proto_params).clk_pol !=
+ SPI_CLK_POL_IDLE_HIGH)) {
+ dev_dbg(chip_info->dev,
+ "Clk Polarity configured incorrectly\n");
+ return -1;
+ }
+ if (chip_info->cs_control == NULL) {
+ dev_dbg(chip_info->dev,
+ "Chip Select Function is NULL for this chip\n");
+ chip_info->cs_control = null_cs_control;
+ }
+ return 0;
+}
+
+static struct stm_msp_config_chip *allocate_default_msp_chip_cfg(
+ struct spi_device *spi)
+{
+ struct stm_msp_config_chip *chip_info;
+
+ chip_info = kzalloc(sizeof(struct stm_msp_config_chip), GFP_KERNEL);
+
+ if (!chip_info) {
+ dev_err(&spi->dev, "setup - cannot allocate controller data");
+ return NULL;
+ }
+ dev_dbg(&spi->dev, "Allocated Memory for controller data\n");
+
+ chip_info->lbm = SPI_LOOPBACK_DISABLED;
+ chip_info->com_mode = SPI_POLLING_TRANSFER;
+ chip_info->hierarchy = SPI_MASTER;
+ chip_info->endian_tx = SPI_FIFO_LSB;
+ chip_info->endian_rx = SPI_FIFO_LSB;
+ chip_info->data_size = MSP_DATA_BITS_32;
+
+ if (spi->max_speed_hz != 0)
+ chip_info->freq = spi->max_speed_hz;
+ else
+ chip_info->freq = SPI_DEFAULT_MAX_SPEED_HZ;
+
+ chip_info->proto_params.clk_phase = SPI_CLK_HALF_CYCLE_DELAY;
+ chip_info->proto_params.clk_pol = SPI_CLK_POL_IDLE_LOW;
+ chip_info->cs_control = null_cs_control;
+
+ return chip_info;
+}
+
+static void stm_msp_delay(struct stm_msp *stm_msp)
+{
+ udelay(15);
+
+ while (readl(MSP_FLR(stm_msp->regs)) &
+ (MSP_FLR_MASK_RBUSY | MSP_FLR_MASK_TBUSY))
+ udelay(1);
+}
+
+/**
+ * stm_msp_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info.
+ */
+static int stm_msp_setup(struct spi_device *spi)
+{
+ struct stm_msp_config_chip *chip_info;
+ struct chip_data *curr_cfg;
+ struct spi_master *master;
+ int status = 0;
+ u16 sckdiv = 0;
+ s16 bus_num = 0;
+ struct stm_msp *stm_msp = spi_master_get_devdata(spi->master);
+ struct msp_regs *msp_regs;
+ master = stm_msp->master;
+ bus_num = master->bus_num - 1;
+
+ /* Get controller data */
+ chip_info = spi->controller_data;
+ /* Get controller_state */
+ curr_cfg = spi_get_ctldata(spi);
+
+ if (curr_cfg == NULL) {
+ curr_cfg = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+
+ if (!curr_cfg) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - cannot allocate controller state");
+ return -ENOMEM;
+ }
+
+ curr_cfg->chip_id = spi->chip_select;
+ curr_cfg->ctr_regs = kzalloc(sizeof(struct msp_regs),
+ GFP_KERNEL);
+
+ if (curr_cfg->ctr_regs == NULL) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - cannot allocate mem for regs");
+ goto err_first_setup;
+ }
+
+ dev_err(&stm_msp->adev->dev,
+ "chip Id = %d\n", curr_cfg->chip_id);
+
+ if (chip_info == NULL) {
+ chip_info = allocate_default_msp_chip_cfg(spi);
+
+ if (!chip_info) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - cannot allocate cntlr data");
+ status = -ENOMEM;
+ goto err_first_setup;
+ }
+
+ spi->controller_data = chip_info;
+ }
+ }
+
+ /* Pointer back to the SPI device */
+ chip_info->dev = &spi->dev;
+
+ if (chip_info->freq == 0) {
+ /* Calculate Specific Freq. */
+ if ((MSP_INTERNAL_CLK == chip_info->clk_freq.clk_src) ||
+ (MSP_EXTERNAL_CLK == chip_info->clk_freq.clk_src)) {
+ sckdiv = chip_info->clk_freq.sckdiv;
+ } else {
+ status = -1;
+ dev_err(&stm_msp->adev->dev,
+ "setup - controller clock data is incorrect");
+ goto err_config_params;
+ }
+ } else {
+ /* Calculate Effective Freq. */
+ sckdiv = (DEFAULT_MSP_CLK / (chip_info->freq)) - 1;
+
+ if (sckdiv > MAX_SCKDIV) {
+ dev_dbg(&stm_msp->adev->dev,
+ "SPI: Cannot set frequency less than 48Khz,"
+ "setting lowest(48 Khz)\n");
+ sckdiv = MAX_SCKDIV;
+ }
+ }
+
+ status = verify_msp_controller_parameters(chip_info);
+
+ if (status) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - controller data is incorrect");
+ goto err_config_params;
+ }
+
+ /* Now set controller state based on controller data */
+ curr_cfg->xfer_type = chip_info->com_mode;
+ curr_cfg->cs_control = chip_info->cs_control;
+ curr_cfg->delay = stm_msp_delay;
+
+ curr_cfg->null_write = stm_msp_null_writer;
+ curr_cfg->null_read = stm_msp_null_reader;
+
+ if (chip_info->data_size <= MSP_DATA_BITS_8) {
+ dev_dbg(&stm_msp->adev->dev, "Less than 8 bits per word...\n");
+
+ curr_cfg->n_bytes = 1;
+ curr_cfg->read = stm_msp_u8_reader;
+ curr_cfg->write = stm_msp_u8_writer;
+ } else if (chip_info->data_size <= MSP_DATA_BITS_16) {
+ dev_dbg(&stm_msp->adev->dev, "Less than 16 bits per word...\n");
+
+ curr_cfg->n_bytes = 2;
+ curr_cfg->read = stm_msp_u16_reader;
+ curr_cfg->write = stm_msp_u16_writer;
+ } else {
+ dev_dbg(&stm_msp->adev->dev, "Less than 32 bits per word...\n");
+
+ curr_cfg->n_bytes = 4;
+ curr_cfg->read = stm_msp_u32_reader;
+ curr_cfg->write = stm_msp_u32_writer;
+ }
+
+ /* Now initialize all register settings reqd. for this chip */
+
+ msp_regs = (struct msp_regs *)(curr_cfg->ctr_regs);
+ msp_regs->gcr = 0x0;
+ msp_regs->tcf = 0x0;
+ msp_regs->rcf = 0x0;
+ msp_regs->srg = 0x0;
+ msp_regs->dmacr = 0x0;
+
+ MSP_WBITS(msp_regs->dmacr, 0x0, MSP_DMACR_MASK_RDMAE, 0);
+ MSP_WBITS(msp_regs->dmacr, 0x0, MSP_DMACR_MASK_TDMAE, 1);
+
+ /* GCR Reg Config */
+
+ MSP_WBITS(msp_regs->gcr,
+ MSP_RECEIVER_DISABLED, MSP_GCR_MASK_RXEN, 0);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_RX_FIFO_ENABLED, MSP_GCR_MASK_RFFEN, 1);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TRANSMITTER_DISABLED, MSP_GCR_MASK_TXEN, 8);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_FIFO_ENABLED, MSP_GCR_MASK_TFFEN, 9);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_FRAME_SYNC_POL_LOW, MSP_GCR_MASK_TFSPOL, 10);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_FRAME_SYNC_INT, MSP_GCR_MASK_TFSSEL, 11);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TRANSMIT_DATA_WITH_DELAY, MSP_GCR_MASK_TXDDL, 15);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_SAMPLE_RATE_GEN_ENABLE, MSP_GCR_MASK_SGEN, 16);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_CLOCK_INTERNAL, MSP_GCR_MASK_SCKSEL, 18);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_FRAME_GEN_ENABLE, MSP_GCR_MASK_FGEN, 20);
+ MSP_WBITS(msp_regs->gcr,
+ SPI_BURST_MODE_DISABLE, MSP_GCR_MASK_SPIBME, 23);
+
+ if (chip_info->lbm == SPI_LOOPBACK_ENABLED)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_LOOPBACK_ENABLED, MSP_GCR_MASK_LBM, 7);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_LOOPBACK_DISABLED, MSP_GCR_MASK_LBM, 7);
+
+ if (chip_info->hierarchy == SPI_MASTER)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_IS_SPI_MASTER, MSP_GCR_MASK_TCKSEL, 14);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_IS_SPI_SLAVE, MSP_GCR_MASK_TCKSEL, 14);
+
+ if (chip_info->proto_params.clk_phase == SPI_CLK_ZERO_CYCLE_DELAY)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_SPI_PHASE_ZERO_CYCLE_DELAY,
+ MSP_GCR_MASK_SPICKM, 21);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_SPI_PHASE_HALF_CYCLE_DELAY,
+ MSP_GCR_MASK_SPICKM, 21);
+
+ if (chip_info->proto_params.clk_pol == SPI_CLK_POL_IDLE_HIGH)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_CLOCK_POL_HIGH, MSP_GCR_MASK_TCKPOL, 13);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_CLOCK_POL_LOW, MSP_GCR_MASK_TCKPOL, 13);
+
+ /* RCF Reg Config */
+ MSP_WBITS(msp_regs->rcf,
+ MSP_IGNORE_RX_FRAME_SYNC_PULSE, MSP_RCF_MASK_RFSIG, 15);
+ MSP_WBITS(msp_regs->rcf,
+ MSP_RX_1BIT_DATA_DELAY, MSP_RCF_MASK_RDDLY, 13);
+
+ if (chip_info->endian_rx == SPI_FIFO_LSB)
+ MSP_WBITS(msp_regs->rcf,
+ MSP_RX_ENDIANESS_LSB, MSP_RCF_MASK_RENDN, 12);
+ else
+ MSP_WBITS(msp_regs->rcf,
+ MSP_RX_ENDIANESS_MSB, MSP_RCF_MASK_RENDN, 12);
+
+ MSP_WBITS(msp_regs->rcf, chip_info->data_size, MSP_RCF_MASK_RP1ELEN, 0);
+
+ /* TCF Reg Config */
+
+ MSP_WBITS(msp_regs->tcf,
+ MSP_IGNORE_TX_FRAME_SYNC_PULSE, MSP_TCF_MASK_TFSIG, 15);
+ MSP_WBITS(msp_regs->tcf,
+ MSP_TX_1BIT_DATA_DELAY, MSP_TCF_MASK_TDDLY, 13);
+
+ if (chip_info->endian_rx == SPI_FIFO_LSB)
+ MSP_WBITS(msp_regs->tcf,
+ MSP_TX_ENDIANESS_LSB, MSP_TCF_MASK_TENDN, 12);
+ else
+ MSP_WBITS(msp_regs->tcf,
+ MSP_TX_ENDIANESS_MSB, MSP_TCF_MASK_TENDN, 12);
+ MSP_WBITS(msp_regs->tcf, chip_info->data_size, MSP_TCF_MASK_TP1ELEN, 0);
+
+ /* SRG Reg Config */
+
+ MSP_WBITS(msp_regs->srg, sckdiv, MSP_SRG_MASK_SCKDIV, 0);
+
+ /* Save controller_state */
+ spi_set_ctldata(spi, curr_cfg);
+
+ return status;
+
+err_config_params:
+err_first_setup:
+
+ kfree(curr_cfg);
+ return status;
+}
+
+static int __init stm_msp_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct stm_msp_controller *platform_info = adev->dev.platform_data;
+ struct spi_master *master;
+ struct stm_msp *stm_msp = NULL; /* Data for this driver */
+ int irq, status = 0;
+
+ dev_info(dev, "STM MSP driver, device ID: 0x%08x\n", adev->periphid);
+
+ if (platform_info == NULL) {
+ dev_err(dev, "probe - no platform data supplied\n");
+ status = -ENODEV;
+ goto err_no_pdata;
+ }
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct stm_msp));
+
+ if (master == NULL) {
+ dev_err(dev, "probe - cannot alloc spi_master\n");
+ status = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ stm_msp = spi_master_get_devdata(master);
+ stm_msp->master = master;
+ stm_msp->master_info = platform_info;
+ stm_msp->adev = adev;
+
+ stm_msp->clk = clk_get(&adev->dev, NULL);
+
+ if (IS_ERR(stm_msp->clk)) {
+ dev_err(dev, "probe - cannot find clock\n");
+ status = PTR_ERR(stm_msp->clk);
+ goto free_master;
+ }
+
+ /* Fetch the Resources, using platform data */
+ status = amba_request_regions(adev, NULL);
+
+ if (status) {
+ status = -ENODEV;
+ goto disable_clk;
+ }
+
+ /* Get Hold of Device Register Area... */
+ stm_msp->regs = ioremap(adev->res.start, resource_size(&adev->res));
+
+ if (stm_msp->regs == NULL) {
+ status = -ENODEV;
+ goto disable_clk;
+ }
+
+ irq = adev->irq[0];
+
+ if (irq <= 0) {
+ status = -ENODEV;
+ goto err_no_iores;
+ }
+
+ stm_msp_controller_cmd(stm_msp, LOAD_DEFAULT_CONFIG);
+
+ /* Required Info for an SPI controller */
+ /* Bus Number Which Assigned to this SPI controller on this board */
+ master->bus_num = (u16) platform_info->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->setup = stm_msp_setup;
+ master->cleanup = (void *)stm_msp_cleanup;
+ master->transfer = stm_msp_transfer;
+
+ dev_dbg(dev, "BUSNO: %d\n", master->bus_num);
+
+ /* Initialize and start queue */
+ status = init_queue(stm_msp);
+
+ if (status != 0) {
+ dev_err(dev, "probe - problem initializing queue\n");
+ goto err_init_queue;
+ }
+
+ status = start_queue(stm_msp);
+
+ if (status != 0) {
+ dev_err(dev, "probe - problem starting queue\n");
+ goto err_start_queue;
+ }
+
+ amba_set_drvdata(adev, stm_msp);
+
+ dev_dbg(dev, "probe succeded\n");
+ dev_dbg(dev, "Bus No = %d, IRQ Line = %d, Virtual Addr: %x\n",
+ master->bus_num, irq, (u32)(stm_msp->regs));
+
+ status = request_irq(stm_msp->adev->irq[0],
+ stm_msp_interrupt_handler,
+ 0, stm_msp->master_info->device_name,
+ stm_msp);
+
+ if (status < 0) {
+ dev_err(dev, "probe - cannot get IRQ (%d)\n", status);
+ goto err_irq;
+ }
+
+ /* Register with the SPI framework */
+ status = spi_register_master(master);
+
+ if (status != 0) {
+ dev_err(dev, "probe - problem registering spi master\n");
+ goto err_spi_register;
+ }
+
+ return 0;
+
+err_spi_register:
+ free_irq(stm_msp->adev->irq[0], stm_msp);
+err_irq:
+err_init_queue:
+err_start_queue:
+ destroy_queue(stm_msp);
+err_no_iores:
+ iounmap(stm_msp->regs);
+disable_clk:
+ clk_put(stm_msp->clk);
+free_master:
+ spi_master_put(master);
+err_no_mem:
+err_no_pdata:
+ return status;
+}
+
+static int __exit stm_msp_remove(struct amba_device *adev)
+{
+ struct stm_msp *stm_msp = amba_get_drvdata(adev);
+ int status = 0;
+
+ if (!stm_msp)
+ return 0;
+
+ /* Remove the queue */
+ status = destroy_queue(stm_msp);
+
+ if (status != 0) {
+ dev_err(&adev->dev, "queue remove failed (%d)\n", status);
+ return status;
+ }
+
+ stm_msp_controller_cmd(stm_msp, LOAD_DEFAULT_CONFIG);
+
+ /* Release map resources */
+ iounmap(stm_msp->regs);
+ amba_release_regions(adev);
+ tasklet_disable(&stm_msp->pump_transfers);
+ free_irq(stm_msp->adev->irq[0], stm_msp);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(stm_msp->master);
+
+ clk_put(stm_msp->clk);
+
+ /* Prevent double remove */
+ amba_set_drvdata(adev, NULL);
+ dev_dbg(&adev->dev, "remove succeded\n");
+ return status;
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * stm_msp_suspend - MSP suspend function registered with PM framework.
+ * @dev: Reference to amba device structure of the device
+ * @state: power mgmt state.
+ *
+ * This function is invoked when the system is going into sleep, called
+ * by the power management framework of the linux kernel.
+ */
+static int stm_msp_suspend(struct amba_device *adev, pm_message_t state)
+{
+ struct stm_msp *stm_msp = amba_get_drvdata(adev);
+ int status = 0;
+
+ status = stop_queue(stm_msp);
+
+ if (status != 0) {
+ dev_warn(&adev->dev, "suspend cannot stop queue\n");
+ return status;
+ }
+
+ dev_dbg(&adev->dev, "suspended\n");
+ return 0;
+}
+
+/**
+ * stm_msp_resume - MSP Resume function registered with PM framework.
+ * @dev: Reference to amba device structure of the device
+ *
+ * This function is invoked when the system is coming out of sleep, called
+ * by the power management framework of the linux kernel.
+ */
+static int stm_msp_resume(struct amba_device *adev)
+{
+ struct stm_msp *stm_msp = amba_get_drvdata(adev);
+ int status = 0;
+
+ /* Start the queue running */
+ status = start_queue(stm_msp);
+
+ if (status)
+ dev_err(&adev->dev, "problem starting queue (%d)\n", status);
+ else
+ dev_dbg(&adev->dev, "resumed\n");
+
+ return status;
+}
+
+#else
+#define stm_msp_suspend NULL
+#define stm_msp_resume NULL
+#endif /* CONFIG_PM */
+
+static struct amba_id stm_msp_ids[] = {
+ {
+ .id = 0x00280021,
+ .mask = 0x00ffffff,
+ },
+ {
+ 0,
+ 0,
+ },
+};
+
+static struct amba_driver __refdata stm_msp_driver = {
+ .drv = {
+ .name = "MSP",
+ },
+ .id_table = stm_msp_ids,
+ .probe = stm_msp_probe,
+ .remove = __exit_p(stm_msp_remove),
+ .resume = stm_msp_resume,
+ .suspend = stm_msp_suspend,
+};
+
+static int __init stm_msp_init(void)
+{
+ return amba_driver_register(&stm_msp_driver);
+}
+
+static void __exit stm_msp_exit(void)
+{
+ amba_driver_unregister(&stm_msp_driver);
+}
+
+module_init(stm_msp_init);
+module_exit(stm_msp_exit);
+
+MODULE_AUTHOR("Sachin Verma <sachin.verma@st.com>");
+MODULE_DESCRIPTION("STM MSP (SPI protocol) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9e634724978..350f6df2bf4 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,6 +24,16 @@ menuconfig STAGING
if STAGING
+config AB5500_SIM
+ bool "ST-Ericsson AB5500 SIM Interface driver"
+ depends on AB5500_CORE
+ help
+ SIM Interface driver provides interface to configure
+ various parameters of AB5550 SIM Level Shifter.Support provided are:
+ Configure Pull up on sim lines
+ Configure Operation Mode
+ Notify Sim Insert/Extract Interrupt
+
source "drivers/staging/serial/Kconfig"
source "drivers/staging/et131x/Kconfig"
@@ -38,6 +48,8 @@ source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/echo/Kconfig"
+source "drivers/staging/cg2900/Kconfig"
+
source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
@@ -128,4 +140,12 @@ source "drivers/staging/omapdrm/Kconfig"
source "drivers/staging/android/Kconfig"
+source "drivers/staging/cw1200/Kconfig"
+
+source "drivers/staging/mmio/Kconfig"
+
+source "drivers/staging/nmf-cm/Kconfig"
+
+source "drivers/staging/camera_flash/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 943e1483075..f1f5eaad334 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USBIP_CORE) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
+obj-$(CONFIG_CG2900) += cg2900/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_ASUS_OLED) += asus_oled/
@@ -55,3 +56,8 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
+obj-$(CONFIG_CW1200) += cw1200/
+obj-$(CONFIG_U8500_MMIO) += mmio/
+obj-$(CONFIG_U8500_FLASH) += camera_flash/
+obj-$(CONFIG_U8500_CM) += nmf-cm/
+obj-$(CONFIG_AB5500_SIM) += ab5500_sim/
diff --git a/drivers/staging/ab5500_sim/Makefile b/drivers/staging/ab5500_sim/Makefile
new file mode 100644
index 00000000000..520717e4dd7
--- /dev/null
+++ b/drivers/staging/ab5500_sim/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AB5500_SIM) += ab5500-sim.o
diff --git a/drivers/staging/ab5500_sim/ab5500-sim.c b/drivers/staging/ab5500_sim/ab5500-sim.c
new file mode 100644
index 00000000000..d222a22ed24
--- /dev/null
+++ b/drivers/staging/ab5500_sim/ab5500-sim.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) ST Ericsson SA 2010
+ *
+ * Sim Interface driver for AB5500
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Bibek Basu <bibek.basu@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#define USIM_SUP2_REG 0x13
+#define USIM_SUP_REG 0x14
+#define USIM_SIMCTRL_REG 0x17
+#define USIM_SIMCTRL2_REG 0x18
+#define USIM_USBUICC_REG 0x19
+#define USIM_USBUICC2_REG 0x20
+#define SIM_DAT_PULLUP_10K 0x0F
+#define SIM_LDO_1_8V 1875000
+#define SIM_LDO_2_8V 2800000
+#define SIM_LDO_2_9V 2900000
+
+enum shift {
+ SHIFT0,
+ SHIFT1,
+ SHIFT2,
+ SHIFT3,
+ SHIFT4,
+ SHIFT5,
+ SHIFT6,
+ SHIFT7,
+};
+
+enum mask {
+ MASK1 = 1,
+ MASK3 = 3,
+ MASK7 = 7,
+};
+
+enum sim_mode {
+ OFF_MODE,
+ LOW_PWR,
+ PWRCTRL,
+ FULL_PWR,
+};
+/**
+ * struct ab5500_sim - ab5500 Sim Interface device information
+ * @dev: pointer to the structure device
+ * @lock: mutex lock
+ * @sim_int_status: Sim presence status
+ * @irq_base: Base of the two irqs
+ */
+struct ab5500_sim {
+ struct device *dev;
+ struct mutex lock;
+ bool sim_int_status;
+ u8 irq_base;
+};
+
+/* Exposure to the sysfs interface */
+int ab5500_sim_weak_pulldforce(struct device *dev,
+ struct device_attribute *attr,
+ const char *user_buf, size_t count)
+{
+ unsigned long user_val;
+ int err;
+ bool enable;
+
+ err = strict_strtoul(user_buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ enable = user_val ? true : false;
+ err = abx500_mask_and_set(dev, AB5500_BANK_SIM_USBSIM,
+ USIM_USBUICC2_REG, MASK1 << SHIFT5, user_val << SHIFT5);
+ if (err)
+ return -EINVAL;
+ return count;
+}
+
+int ab5500_sim_load_sel(struct device *dev,
+ struct device_attribute *attr,
+ const char *user_buf, size_t count)
+{
+ unsigned long user_val;
+ int err;
+ bool enable;
+
+ err = strict_strtoul(user_buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ enable = user_val ? true : false;
+ err = abx500_mask_and_set(dev, AB5500_BANK_SIM_USBSIM,
+ USIM_USBUICC_REG, MASK1 << SHIFT1, user_val << SHIFT1);
+ if (err)
+ return -EINVAL;
+ return count;
+}
+
+int ab5500_sim_mode_sel(struct device *dev,
+ struct device_attribute *attr,
+ const char *user_buf, size_t count)
+{
+ unsigned long user_val;
+ int err;
+
+ err = strict_strtoul(user_buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ err = abx500_mask_and_set(dev, AB5500_BANK_SIM_USBSIM,
+ USIM_SIMCTRL2_REG, MASK3 << SHIFT4, user_val << SHIFT4);
+ if (err)
+ return -EINVAL;
+ return count;
+}
+
+int ab5500_sim_dat_pullup(struct device *dev,
+ struct device_attribute *attr,
+ const char *user_buf, size_t count)
+{
+ unsigned long user_val;
+ int err;
+
+ err = strict_strtoul(user_buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ err = abx500_mask_and_set(dev, AB5500_BANK_SIM_USBSIM,
+ USIM_SIMCTRL_REG, MASK7, user_val);
+ if (err)
+ return -EINVAL;
+ return count;
+}
+
+int ab5500_sim_enable_pullup(struct device *dev,
+ struct device_attribute *attr,
+ const char *user_buf, size_t count)
+{
+ unsigned long user_val;
+ int err;
+ bool enable;
+
+ err = strict_strtoul(user_buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ enable = user_val ? true : false;
+ err = abx500_mask_and_set(dev, AB5500_BANK_SIM_USBSIM,
+ USIM_SIMCTRL_REG, MASK1 << SHIFT3, enable << SHIFT3);
+ if (err)
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t ab5500_simoff_int(struct device *dev,
+ struct device_attribute *devattr, char *user_buf)
+{
+ struct ab5500_sim *di = dev_get_drvdata(dev);
+ int len;
+
+ mutex_lock(&di->lock);
+ len = sprintf(user_buf, "%d\n", di->sim_int_status);
+ mutex_unlock(&di->lock);
+ return len;
+}
+
+static DEVICE_ATTR(enable_pullup, S_IWUSR, NULL, ab5500_sim_enable_pullup);
+static DEVICE_ATTR(dat_pullup, S_IWUSR, NULL, ab5500_sim_dat_pullup);
+static DEVICE_ATTR(mode_sel, S_IWUSR, NULL, ab5500_sim_mode_sel);
+static DEVICE_ATTR(load_sel, S_IWUSR, NULL, ab5500_sim_load_sel);
+static DEVICE_ATTR(weak_pulldforce, S_IWUSR, NULL, ab5500_sim_weak_pulldforce);
+static DEVICE_ATTR(simoff_int, S_IRUGO, ab5500_simoff_int, NULL);
+
+static struct attribute *ab5500_sim_attributes[] = {
+ &dev_attr_enable_pullup.attr,
+ &dev_attr_dat_pullup.attr,
+ &dev_attr_mode_sel.attr,
+ &dev_attr_load_sel.attr,
+ &dev_attr_weak_pulldforce.attr,
+ &dev_attr_simoff_int.attr,
+ NULL
+};
+
+static const struct attribute_group ab5500sim_attr_grp = {
+ .attrs = ab5500_sim_attributes,
+};
+
+static irqreturn_t ab5500_sim_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct ab5500_sim *data = platform_get_drvdata(pdev);
+
+ if (irq == data->irq_base)
+ data->sim_int_status = true;
+ else
+ data->sim_int_status = false;
+ sysfs_notify(&pdev->dev.kobj, NULL, "simoff_int");
+
+ return IRQ_HANDLED;
+}
+
+static int __devexit ab5500_sim_remove(struct platform_device *pdev)
+{
+ struct ab5500_sim *di = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, "SIMOFF");
+
+ if (irq >= 0) {
+ free_irq(irq, di);
+ irq++;
+ free_irq(irq, di);
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &ab5500sim_attr_grp);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_sim_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int irq;
+ struct ab5500_sim *di =
+ kzalloc(sizeof(struct ab5500_sim), GFP_KERNEL);
+ if (!di) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ dev_info(&pdev->dev, "ab5500_sim_driver PROBE\n");
+ irq = platform_get_irq_byname(pdev, "SIMOFF");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get irq by name failed\n");
+ ret = irq;
+ goto exit;
+ }
+ di->irq_base = irq;
+ di->dev = &pdev->dev;
+ mutex_init(&di->lock);
+ platform_set_drvdata(pdev, di);
+ /* sysfs interface to configure sim reg from user space */
+ if (sysfs_create_group(&pdev->dev.kobj, &ab5500sim_attr_grp) < 0) {
+ dev_err(&pdev->dev, " Failed creating sysfs group\n");
+ ret = -ENOMEM;
+ goto error_sysfs;
+ }
+ ret = request_threaded_irq(irq, NULL, ab5500_sim_irq_handler,
+ IRQF_NO_SUSPEND , "ab5500-sim", pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
+ goto error_irq;
+ }
+ /* this is the contiguous irq for sim removal,falling edge */
+ irq = irq + 1;
+ ret = request_threaded_irq(irq, NULL, ab5500_sim_irq_handler,
+ IRQF_NO_SUSPEND , "ab5500-sim", pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
+ free_irq(--irq, di);
+ goto error_irq;
+ }
+ return ret;
+error_irq:
+ sysfs_remove_group(&pdev->dev.kobj, &ab5500sim_attr_grp);
+error_sysfs:
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(di);
+error_alloc:
+ return ret;
+}
+
+static struct platform_driver ab5500_sim_driver = {
+ .probe = ab5500_sim_probe,
+ .remove = __devexit_p(ab5500_sim_remove),
+ .driver = {
+ .name = "ab5500-sim",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_sim_init(void)
+{
+ return platform_driver_register(&ab5500_sim_driver);
+}
+
+static void __exit ab5500_sim_exit(void)
+{
+ platform_driver_unregister(&ab5500_sim_driver);
+}
+
+module_init(ab5500_sim_init);
+module_exit(ab5500_sim_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bibek Basu");
+MODULE_ALIAS("platform:ab5500-sim");
+MODULE_DESCRIPTION("AB5500 sim interface driver");
diff --git a/drivers/staging/ab5500_sim/sysfs-sim b/drivers/staging/ab5500_sim/sysfs-sim
new file mode 100644
index 00000000000..b809b21e39e
--- /dev/null
+++ b/drivers/staging/ab5500_sim/sysfs-sim
@@ -0,0 +1,83 @@
+What: /sys/devices/platform/ab5500-core.0/ab5500-sim.4/
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4 directory contains attributes
+ allowing the user space to check and configure ab5500 sim level
+ shifter interface caracteristics for communication to SIM card
+
+What: /sys/devices/.../enable_pullup
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4/enable_pullup attribute allows
+ the user space to configure if internal pull up in SIMIO lines
+ has to be enabled or disabled. For enabling write 1 to the file
+ and 0 for disabling
+
+
+What: /sys/devices/.../dat_pullup
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4/dat_pullup attribute allows
+ the user space to configure the resistance value for internal
+ pull up in SIMIO lines. Following value can be written on the file
+ 0 SIM_DAT pull-up disabled
+ 1 SIM_DAT pull-up 4kOhm
+ 2 SIM_DAT pull-up 5kOhm
+ 3 SIM_DAT pull-up 6kOhm
+ 4 SIM_DAT pull-up 7kOhm
+ 5 SIM_DAT pull-up 8kOhm
+ 6 SIM_DAT pull-up 9kOhm
+ 7 SIM_DAT pull-up 10kOhm
+
+What: /sys/devices/.../mode_sel
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4/mode_sel attribute allows
+ the user space to configure the mode at which the level shifter
+ will work. Following value can be written on the file
+ 0 TG mode and LI mode off
+ 1 TG mode on
+ 2 LI mode on
+ 3 TG mode and LI mode off
+
+What: /sys/devices/.../load_sel
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4/load_sel attribute allows
+ the user space to configure the load on the USBUICC lines.
+ Following value can be written on the file.
+ 0 Data line load < 21pF
+ 1 Data line load 21-30pF
+
+What: /sys/devices/.../weak_pulldforce
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4/weak_pulldforce attribute allows
+ the user space to configure the weak pull down on the USBUICC lines.
+ Following value can be written on the file.
+ 0 USB-UICC data lines weak pull down active
+ 1 USB-UICC data lines weak pull down not active
+
+What: /sys/devices/.../simoff_int
+Date: June 2011
+KernelVersion: 2.6.35
+Contact: Bibek Basu <bibek.basu@stericsson.com>
+Description:
+ The /sys/devices/.../ab5500-sim.4/simoff_int attribute allows
+ the user space to poll this file and get notified in case a sim
+ hot swap has happened. a zero means sim extracetd and a one means
+ inserted.
+
+
diff --git a/drivers/staging/camera_flash/Kconfig b/drivers/staging/camera_flash/Kconfig
new file mode 100644
index 00000000000..187217d763f
--- /dev/null
+++ b/drivers/staging/camera_flash/Kconfig
@@ -0,0 +1,7 @@
+
+config U8500_FLASH
+ bool "ST-Ericsson Flash (Camera) Driver"
+ depends on ARCH_U8500
+ help
+ Adds ST-Ericsson Flash (Camera) Driver
+
diff --git a/drivers/staging/camera_flash/Makefile b/drivers/staging/camera_flash/Makefile
new file mode 100644
index 00000000000..bf2f5aa2dd3
--- /dev/null
+++ b/drivers/staging/camera_flash/Makefile
@@ -0,0 +1,5 @@
+export ADP1653_SUPPORT
+EXTRA_CFLAGS += -DADP1653_SUPPORT
+obj-$(CONFIG_U8500_FLASH) := camera_flash.o
+camera_flash-y := flash_common.o
+camera_flash-y += adp1653.o
diff --git a/drivers/staging/camera_flash/adp1653.c b/drivers/staging/camera_flash/adp1653.c
new file mode 100644
index 00000000000..f7483eac11f
--- /dev/null
+++ b/drivers/staging/camera_flash/adp1653.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * adp1653: Driver Adp1653 HPLED flash driver chip. This driver
+ * currently support I2C interface, 2bit interface is not supported.
+ * Author: Pankaj Chauhan/pankaj.chauhan@stericsson.com for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <asm/mach-types.h>
+#include "flash_common.h"
+#include "adp1653.h"
+#include "camera_flash.h"
+#include "adp1653_plat.h"
+
+/* This data is platform specific for 8500 href-v1 platform,
+ * Ideally this should be supplied from platform code
+ */
+
+static int adapter_i2c2 = 2;
+static int flash_position = 0;
+module_param(adapter_i2c2, int, S_IRUGO);
+MODULE_PARM_DESC(adapter_i2c2, "use the given I2C adaptater to communicate with the chip");
+module_param(flash_position, int, S_IRUGO);
+MODULE_PARM_DESC(flash_position, "the position of the flash chip (0=PRIMARY, 1=SECONDARY)");
+
+
+int __flash_gpio_to_irq(int gpio)
+{
+
+ return NOMADIK_GPIO_TO_IRQ(gpio);
+}
+
+#define DEBUG_LOG(...) printk(KERN_DEBUG "Adp1653 flash driver: " __VA_ARGS__)
+
+#define ADP1653_SUPPORTED_MODES (FLASH_MODE_VIDEO_LED | FLASH_MODE_STILL_LED | \
+ FLASH_MODE_STILL_LED_EXTERNAL_STROBE | \
+ FLASH_MODE_AF_ASSISTANT | FLASH_MODE_INDICATOR)
+
+#define ADP1653_SELFTEST_SUPPORTED_MODES (FLASH_SELFTEST_CONNECTION | FLASH_SELFTEST_FLASH_WITH_STROBE | \
+ FLASH_SELFTEST_VIDEO_LIGHT | FLASH_SELFTEST_AF_LIGHT | FLASH_SELFTEST_INDICATOR | FLASH_SELFTEST_TORCH_LIGHT)
+
+static int adp1653_trigger_strobe(void *priv_data, int enable);
+
+static int adp1653_get_modes(void *priv_data,unsigned long *modes)
+{
+ int err;
+ struct adp1653_priv_data *priv_p = (struct adp1653_priv_data *)priv_data;
+ err = i2c_smbus_read_byte_data(priv_p->i2c_client, FAULT_STATUS_REG);
+ if (err)
+ *modes = 0x0;
+ else
+ *modes = ADP1653_SUPPORTED_MODES;
+ return 0;
+}
+
+static int adp1653_get_mode_details(void *priv_data, unsigned long mode,
+struct flash_mode_details *details_p)
+{
+ int err = 0;
+ memset(details_p,0,sizeof(struct flash_mode_details));
+
+ details_p->led_type = 2;
+
+ /* Still LED settings*/
+ details_p->nbFaultRegisters = 1;
+ if(mode & (FLASH_MODE_STILL_LED | FLASH_MODE_STILL_LED_EXTERNAL_STROBE)){
+ details_p->max_intensity_uAmp = FLASH_MAX_INTENSITY;
+ details_p->min_intensity_uAmp = FLASH_MIN_INTENSITY;
+ details_p->max_strobe_duration_uSecs = FLASH_MAX_STROBE_DURATION;
+ details_p->feature_bitmap = INTENSITY_PROGRAMMABLE | DURATION_PROGRAMMABLE;
+ goto out;
+ }
+ /*Video LED settings*/
+ if(mode & FLASH_MODE_VIDEO_LED){
+ details_p->max_intensity_uAmp = TORCH_MAX_INTENSITY;
+ details_p->min_intensity_uAmp = TORCH_MIN_INTENSITY;
+ details_p->max_strobe_duration_uSecs = 0;
+ details_p->feature_bitmap = INTENSITY_PROGRAMMABLE;
+ goto out;
+ }
+ /*Privacy Indicator settings */
+ if(mode & FLASH_MODE_INDICATOR){
+ details_p->max_intensity_uAmp = ILED_MAX_INTENSITY;
+ details_p->min_intensity_uAmp = ILED_MIN_INTENSITY;
+ details_p->max_strobe_duration_uSecs = 0;
+ details_p->feature_bitmap = INTENSITY_PROGRAMMABLE;
+ goto out;
+ }
+ DEBUG_LOG("Mode %lx, not supported\n",mode);
+ err = EINVAL;
+out:
+ return err;
+}
+
+static int adp1653_enable_flash_mode(void *priv_data,
+ unsigned long mode, int enable)
+{
+ int err = 0;
+ struct adp1653_priv_data *priv_p = (struct adp1653_priv_data *)priv_data;
+
+ if(enable){
+
+ if((!(mode & ADP1653_SUPPORTED_MODES)) &&
+ (mode != FLASH_MODE_NONE)) {
+ DEBUG_LOG("Unsupported mode %lx\n",mode);
+ err = -EINVAL;
+ goto out;
+ }
+ /*Nothing to be done in enabling, just set current mode and return*/
+ /*May be enable disable can be done here but why not enable in
+ *probe and keep it on always
+ */
+ adp1653_trigger_strobe(priv_p,0);
+ priv_p->curr_mode = mode;
+ }else{
+ adp1653_trigger_strobe(priv_p,0);
+ priv_p->curr_mode =0;
+ }
+out:
+ return err;
+}
+
+static int adp1653_configure_flash_mode(void *priv,unsigned long mode,
+struct flash_mode_params *params_p)
+{
+ int err = 0;
+ unsigned char intensity_code;
+ struct adp1653_priv_data *priv_p = (struct adp1653_priv_data *)priv;
+
+ if(!(mode & ADP1653_SUPPORTED_MODES)){
+ DEBUG_LOG("Mode %lx not supported\n",mode);
+ err = -EINVAL;
+ goto out;
+ }
+ switch(mode){
+ case FLASH_MODE_STILL_LED:
+ case FLASH_MODE_STILL_LED_EXTERNAL_STROBE:
+ {
+ FLASH_UAMP_TO_CODE(intensity_code,params_p->intensity_uAmp);
+ if(params_p->duration_uSecs){
+ DURATION_USEC_TO_CODE(priv_p->flash_duration,
+ params_p->duration_uSecs);
+ DEBUG_LOG("Duration %lu, code 0x%x\n",params_p->duration_uSecs,
+ priv_p->flash_duration);
+ priv_p->flash_duration |= TIMER_ENABLE;
+ }else{
+ priv_p->flash_duration = 0;
+ }
+ priv_p->flash_intensity = intensity_code << 3;
+ }
+ break;
+ case FLASH_MODE_VIDEO_LED:
+ {
+ TORCH_UAMP_TO_CODE(intensity_code,params_p->intensity_uAmp);
+ DEBUG_LOG("Torch mode setting intensity 0x%x, current(uA) %lu\n",
+ intensity_code,params_p->intensity_uAmp);
+ priv_p->torch_intensity = intensity_code << 3;
+ }
+ break;
+ case FLASH_MODE_INDICATOR:
+ {
+ ILED_UAMP_TO_CODE(intensity_code,params_p->intensity_uAmp);
+ DEBUG_LOG("ILED setting intensity 0x%x, current(uA) %lu\n",
+ intensity_code,params_p->intensity_uAmp);
+ priv_p->indicator_intensity = intensity_code;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ DEBUG_LOG("Unsupported mode %lx\n",mode);
+ break;
+ }
+
+ if((mode == FLASH_MODE_STILL_LED_EXTERNAL_STROBE) || (mode == FLASH_MODE_STILL_LED))
+ {
+ adp1653_trigger_strobe(priv_p,0);
+ DEBUG_LOG("CONFIG_TIMER_REG : 0x%x\n",priv_p->flash_duration);
+ DEBUG_LOG("OUTPUT_SEL_REG : 0x%x\n",priv_p->flash_intensity);
+
+ /*TimeOut Must be programmed before Intensity*/
+ err = i2c_smbus_write_byte_data(priv_p->i2c_client,CONFIG_TIMER_REG,
+ priv_p->flash_duration);
+ if(err){
+ DEBUG_LOG("I2C: Unsable to write timer config, err %d\n",err);
+ goto out;
+ }
+ err = i2c_smbus_write_byte_data(priv_p->i2c_client,OUTPUT_SEL_REG,
+ priv_p->flash_intensity);
+ if(err){
+ DEBUG_LOG("I2C: Unable to write OUTPUT_SEL_REG , err %d\n",err);
+ goto out;
+ }
+ }
+out:
+ return err;
+}
+
+static int adp1653_set_intensity(struct adp1653_priv_data *priv_p, uint8_t intensity)
+{
+ return i2c_smbus_write_byte_data(priv_p->i2c_client,OUTPUT_SEL_REG,intensity);
+}
+
+static int adp1653_strobe_still_led(struct adp1653_priv_data *priv_p,int enable)
+{
+ int err=0,gpio_val;
+ uint8_t intensity,duration;
+
+ if(enable){
+ intensity = priv_p->flash_intensity;
+ duration = priv_p->flash_duration;
+ gpio_val = 1;
+ }else{
+ intensity = 0;
+ duration = 0;
+ gpio_val = 0;
+ }
+
+ err = adp1653_set_intensity(priv_p,intensity);
+ if(err){
+ DEBUG_LOG("I2C: Unable to write OUTPUT_SEL_REG reg, err %d\n",err);
+ goto out;
+ }
+
+ /*TimeOut Must be programmed before Intensity*/
+ err = i2c_smbus_write_byte_data(priv_p->i2c_client,CONFIG_TIMER_REG,
+ priv_p->flash_duration);
+ if(err){
+ DEBUG_LOG("I2C: Unsable to write timer config, err %d\n",err);
+ goto out;
+ }
+ err = i2c_smbus_write_byte_data(priv_p->i2c_client,OUTPUT_SEL_REG,intensity);
+ if(err){
+ DEBUG_LOG("I2C: Unable to write OUTPUT_SEL_REG, err %d\n",err);
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static int adp1653_trigger_strobe(void *priv, int enable)
+{
+ int err = 0;
+ uint8_t intensity;
+ struct adp1653_priv_data *priv_p = (struct adp1653_priv_data *)priv;
+
+ switch(priv_p->curr_mode){
+ case FLASH_MODE_STILL_LED:
+ case FLASH_MODE_STILL_LED_EXTERNAL_STROBE:
+ err = adp1653_strobe_still_led(priv_p,enable);
+ break;
+ case FLASH_MODE_VIDEO_LED:
+ {
+ if(enable)
+ intensity = priv_p->torch_intensity;
+ else
+ intensity = 0;
+ err = adp1653_set_intensity(priv_p,intensity);
+ }
+ break;
+ case FLASH_MODE_INDICATOR:
+ {
+ if(enable)
+ intensity = priv_p->indicator_intensity;
+ else
+ intensity =0;
+ err = adp1653_set_intensity(priv_p,intensity);
+ }
+ break;
+ default:
+ DEBUG_LOG("Unsupported mode %lx\n",priv_p->curr_mode);
+ goto out;
+ }
+ if(err){
+ DEBUG_LOG("Unable to enable/disable %d, strobe. Mode %lx, err %d\n",enable,
+ priv_p->curr_mode,err);
+ goto out;
+ }
+ disable_irq(priv_p->i2c_client->irq);
+ if(enable)
+ SET_FLASH_STATUS(priv_p->status,FLASH_STATUS_LIT);
+ else
+ CLR_FLASH_STATUS(priv_p->status,FLASH_STATUS_LIT);
+
+ enable_irq(priv_p->i2c_client->irq);
+
+out:
+ return err;
+}
+#define FLASH_ERR_ALL (FLASH_ERR_OVER_CHARGE |FLASH_ERR_OVER_HEAT | \
+ FLASH_ERR_SHORT_CIRCUIT | FLASH_ERR_TIMEOUT | \
+ FLASH_ERR_OVER_VOLTAGE)
+int adp1653_get_status(void *priv_data,unsigned long *status)
+{
+ struct adp1653_priv_data *priv_p= (struct adp1653_priv_data *)priv_data;
+ disable_irq(priv_p->i2c_client->irq);
+ if(priv_p->fault){
+ if(priv_p->fault & OVER_VOLTAGE_FAULT)
+ SET_FLASH_ERROR(priv_p->status,FLASH_ERR_OVER_VOLTAGE);
+ if(priv_p->fault & TIMEOUT_FAULT)
+ SET_FLASH_ERROR(priv_p->status,FLASH_ERR_TIMEOUT);
+ if(priv_p->fault & OVER_TEMPERATURE_FAULT)
+ SET_FLASH_ERROR(priv_p->status,FLASH_ERR_OVER_HEAT);
+ if(priv_p->fault & SHORT_CIRCUIT_FAULT){
+ CLR_FLASH_STATUS(priv_p->status,FLASH_STATUS_READY);
+ SET_FLASH_STATUS(priv_p->status,FLASH_STATUS_BROKEN);
+ SET_FLASH_ERROR(priv_p->status,FLASH_ERR_SHORT_CIRCUIT);
+ }
+ priv_p->fault =0;
+ }else{
+ CLR_FLASH_ERROR(priv_p->status,FLASH_ERR_ALL);
+ }
+ enable_irq(priv_p->i2c_client->irq);
+ *status = priv_p->status;
+ return 0;
+}
+
+int adp1653_get_selftest_modes(void *priv_data, unsigned long *modes)
+{
+ int err;
+ struct adp1653_priv_data *priv_p = (struct adp1653_priv_data *)priv_data;
+ err = i2c_smbus_read_byte_data(priv_p->i2c_client, FAULT_STATUS_REG);
+ if (err) *modes = 0x0;
+ else *modes = ADP1653_SELFTEST_SUPPORTED_MODES;
+ return 0;
+}
+
+int adp1653_get_fault_registers(void *priv_data, unsigned long mode, unsigned long *status)
+{
+ int err = 0;
+ struct adp1653_priv_data *priv_p = (struct adp1653_priv_data *)priv_data;
+
+ *status = i2c_smbus_read_byte_data(priv_p->i2c_client, FAULT_STATUS_REG);
+
+ /* clear fault register */
+ err = i2c_smbus_write_byte_data(priv_p->i2c_client,OUTPUT_SEL_REG,0);
+ if(0 != err)
+ {
+ DEBUG_LOG("Unable to write OUTPUT_SEL_REG, err %d\n",err);
+ }
+ return err;
+}
+
+struct flash_chip_ops adp1653_ops = {
+ .get_modes = adp1653_get_modes,
+ .get_mode_details = adp1653_get_mode_details,
+ .get_status = adp1653_get_status,
+ .enable_flash_mode = adp1653_enable_flash_mode,
+ .configure_flash_mode = adp1653_configure_flash_mode,
+ .trigger_strobe = adp1653_trigger_strobe,
+ .get_selftest_modes = adp1653_get_selftest_modes,
+ .get_fault_registers = adp1653_get_fault_registers
+};
+
+static irqreturn_t adp1653_irq_hdlr(int irq_no,void *data)
+{
+ int err;
+ struct adp1653_priv_data *priv_p= (struct adp1653_priv_data *)data;
+
+ priv_p->fault = i2c_smbus_read_byte_data(priv_p->i2c_client,
+ FAULT_STATUS_REG);
+ DEBUG_LOG("Got Fault, status 0x%x\n",priv_p->fault);
+ /*Writing 0 to OUTPUT_SEL_REG clears the interrtup
+ *and FAULT_STATUS_REG register
+ */
+ err = i2c_smbus_write_byte_data(priv_p->i2c_client,OUTPUT_SEL_REG,0);
+ if(err)
+ DEBUG_LOG("Unable to write OUTPUT_SEL_REG to clr intr, err %d\n",err);
+ /*TBD: send even to user process*/
+ return IRQ_HANDLED;
+}
+static int __devinit adp1653_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = 0;
+ struct flash_chip *flash_chip_p=NULL;
+ struct adp1653_priv_data *priv_p=NULL;
+ struct adp1653_platform_data *pdata = client->dev.platform_data;
+
+ DEBUG_LOG("> adp1653_probe\n");
+
+ priv_p = kzalloc(sizeof(struct adp1653_priv_data),GFP_KERNEL);
+ if(!priv_p){
+ DEBUG_LOG("Kmalloc failed for priv data\n");
+ err = ENOMEM;
+ goto err_priv;
+ }
+ priv_p->i2c_client = client;
+ flash_chip_p = kzalloc(sizeof(struct flash_chip),GFP_KERNEL);
+ if(!flash_chip_p){
+ DEBUG_LOG("Kmalloc failed for flash_chip_p");
+ err = ENOMEM;
+ goto err_flash_chip_alloc;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev,
+ "%s: No platform data supplied.\n", __func__);
+ err = -EINVAL;
+ goto err_pdata;
+ }
+
+ flash_chip_p->priv_data = priv_p;
+ flash_chip_p->ops = &adp1653_ops;
+ SET_FLASHCHIP_TYPE(flash_chip_p,FLASH_TYPE_HPLED);
+ SET_FLASHCHIP_ID(flash_chip_p,ADP1653_ID);
+
+ strncpy(flash_chip_p->name,"Adp1653",FLASH_NAME_SIZE);
+
+ i2c_set_clientdata(client,priv_p);
+ /*Request GPIO and Register IRQ if supported by platform and flash chip*/
+
+ err = gpio_request(pdata->enable_gpio,"Camera LED flash Enable");
+ if(err){
+ DEBUG_LOG("Unable to get GPIO %d, for enable\n",pdata->enable_gpio);
+ goto err_pdata;
+ }
+
+ err = gpio_direction_output(pdata->enable_gpio, 1);
+ if(err){
+ DEBUG_LOG("Unable to set GPIO %u in output mode, err %d\n",pdata->enable_gpio,err);
+ gpio_free(pdata->enable_gpio);
+ goto err_gpio_set;
+ }
+ gpio_set_value_cansleep(pdata->enable_gpio, 1);
+
+ err = request_threaded_irq(gpio_to_irq(pdata->irq_no),NULL,adp1653_irq_hdlr,
+ IRQF_ONESHOT|IRQF_TRIGGER_FALLING,
+ "Adp1653 flash",priv_p);
+ if(err){
+ DEBUG_LOG("Unable to register flash IRQ handler, irq %d, err %d\n",
+ pdata->irq_no,err);
+ goto err_irq;
+ }
+
+ err = register_flash_chip(flash_position,flash_chip_p);
+ if(err){
+ DEBUG_LOG("Failed to register Adp1653 as flash for %s camera\n",
+ (flash_position?"Primary":"Secondary"));
+ goto err_register;
+ }
+ SET_FLASH_STATUS(priv_p->status,FLASH_STATUS_READY);
+ DEBUG_LOG("< adp1653_probe ok\n");
+ return err;
+err_register:
+ if(pdata->irq_no)
+ free_irq(pdata->irq_no,NULL);
+err_irq:
+ gpio_set_value_cansleep(pdata->enable_gpio, 0);
+err_gpio_set:
+ if(pdata->enable_gpio)
+ gpio_free(pdata->enable_gpio);
+err_pdata:
+ if(flash_chip_p)
+ kfree(flash_chip_p);
+err_flash_chip_alloc:
+ if(priv_p)
+ kfree(priv_p);
+err_priv:
+ DEBUG_LOG("< adp1653_probe (%d)\n", err);
+ return err;
+}
+
+static int __devexit adp1653_remove(struct i2c_client *client)
+{
+ int err=0;
+ /*Nothing here yet, implement it later.*/
+ return err;
+}
+static const struct i2c_device_id adp1653_id[] = {
+ { "adp1653", 0},
+ {}
+};
+static struct i2c_driver adp1653_i2c_driver = {
+ .driver = {
+ .name = "adp1653",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp1653_probe,
+ .remove = __devexit_p(adp1653_remove),
+ .id_table = adp1653_id,
+};
+
+int adp1653_init(void){
+ int err = 0;
+ struct i2c_adapter *adap_p;
+ struct i2c_board_info info;
+
+ /* Registration of I2C flash device is platform specific code
+ * Ideally it should be done from kernel (arch/arm/mach-XXX).
+ * Do it locally till the time it gets into platform code
+ * OR This portion (registration of device) and flash chip init
+ * Routine can be moved to Flash chip module init. */
+ DEBUG_LOG("getting I2C adaptor %d\n",adapter_i2c2);
+ adap_p = i2c_get_adapter(adapter_i2c2);
+ if(!adap_p){
+ DEBUG_LOG("Unable to get I2C adaptor\n");
+ goto out;
+ }
+ memset(&info,0,sizeof( struct i2c_board_info));
+
+ strcpy(&info.type[0],"adp1653");
+ DEBUG_LOG("trying to register %s at position %d\n",
+ info.type,
+ flash_position);
+
+ /* I2C framework expects least significant 7 bits as address, not complete
+ * 8 bits with bit 0 (read/write bit)
+ */
+ info.addr = 0x60 >> 1;
+
+ err = i2c_add_driver(&adp1653_i2c_driver);
+ if(err)
+ {
+ DEBUG_LOG("Failed to register i2c driver\n");
+ goto out;
+ }
+
+ DEBUG_LOG("Initialized adp1653\n");
+ if(!i2c_new_device(adap_p,&info)){
+ DEBUG_LOG("Unable to add i2c dev: %s (err=%d)\n",info.type, err);
+ goto out;
+ }
+out:
+ return err;
+}
+
+/*
+MODULE_DEPEND
+*/
diff --git a/drivers/staging/camera_flash/adp1653.h b/drivers/staging/camera_flash/adp1653.h
new file mode 100755
index 00000000000..3035ab56d99
--- /dev/null
+++ b/drivers/staging/camera_flash/adp1653.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __ADP1653_H__
+#define __ADP1653_H__
+
+#include <linux/types.h>
+#define ADP1653_ID (0) /*chip does not give any id :) so be it zero!*/
+
+#define OUTPUT_SEL_REG (0x00)
+#define CONFIG_TIMER_REG (0x01)
+#define SW_STROBE_REG (0x02)
+#define FAULT_STATUS_REG (0x03)
+
+/* Fault codes, FALUT_STATUS_REG bits */
+#define OVER_VOLTAGE_FAULT (0x01)
+#define TIMEOUT_FAULT (0x02)
+#define OVER_TEMPERATURE_FAULT (0x04)
+#define SHORT_CIRCUIT_FAULT (0x08)
+
+/*CONFIG_TIMER_REG bits*/
+#define TIMER_ENABLE (0x10)
+
+struct adp1653_priv_data{
+ struct i2c_client *i2c_client;
+ unsigned long curr_mode;
+ unsigned long enable_gpio;
+ unsigned long strobe_gpio;
+ unsigned long irq_no;
+ unsigned long status;
+ uint8_t fault;
+ uint8_t flash_intensity;
+ uint8_t flash_duration;
+ uint8_t torch_intensity;
+ uint8_t indicator_intensity;
+};
+
+/*Intensity current limits in Micro Amps*/
+/* over 250mA flash current is reduced */
+/* do not know why, neither really care about */
+//#define FLASH_MAX_INTENSITY (500000) /*code - 31*/
+#define FLASH_MAX_INTENSITY (250000)
+#define FLASH_MIN_INTENSITY (215000) /*code - 12*/
+#define TORCH_MAX_INTENSITY (200000) /*code - 11*/
+#define TORCH_MIN_INTENSITY (50000) /*code - 1*/
+#define ILED_MAX_INTENSITY (17500) /*Code - 7*/
+#define ILED_MIN_INTENSITY (2500) /*code - 1*/
+
+#define FLASH_MAX_STROBE_DURATION (820000) /*820 uSec*/
+
+#define DURATION_USEC_TO_CODE(_code,_duration) do{ \
+ if(_duration > FLASH_MAX_STROBE_DURATION) \
+ _duration = FLASH_MAX_STROBE_DURATION; \
+ _code = (FLASH_MAX_STROBE_DURATION - _duration) / 54600;\
+}while(0);
+
+#define HPLED_UAMP_TO_CODE(_current) ((_current - 35000) / 15000)
+
+#define FLASH_UAMP_TO_CODE(_code,_current){ \
+ if(_current > FLASH_MAX_INTENSITY) \
+ _current = FLASH_MAX_INTENSITY; \
+ if(_current < FLASH_MIN_INTENSITY) \
+ _current = FLASH_MIN_INTENSITY; \
+ _code = HPLED_UAMP_TO_CODE(_current); \
+}while(0)
+
+#define TORCH_UAMP_TO_CODE(_code,_current){ \
+ if(_current > TORCH_MAX_INTENSITY) \
+ _current = TORCH_MAX_INTENSITY; \
+ if(_current < TORCH_MIN_INTENSITY) \
+ _current = TORCH_MIN_INTENSITY; \
+ _code = HPLED_UAMP_TO_CODE(_current); \
+}while(0)
+
+#define ILED_UAMP_TO_CODE(_code,_current) do { \
+ if(_current > ILED_MAX_INTENSITY) \
+ _current = ILED_MAX_INTENSITY; \
+ _code = _current / ILED_MIN_INTENSITY; /* Min current: 2.5mA/2500uA*/ \
+}while(0)
+
+#endif
diff --git a/drivers/staging/camera_flash/adp1653_plat.h b/drivers/staging/camera_flash/adp1653_plat.h
new file mode 100755
index 00000000000..325097aa2a8
--- /dev/null
+++ b/drivers/staging/camera_flash/adp1653_plat.h
@@ -0,0 +1,24 @@
+/*
+ * adp1653_plat.h
+ * ADP1653 Led Flash Driver platform specific structures
+ *
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Rajat Verma <rajat.verma@stericsson.com>
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __LINUX_I2C_ADP1653_PLAT_H__
+#define __LINUX_I2C_ADP1653_PLAT_H__
+
+/**
+ * struct adp1653_platform_data - platform data structure for adp1653
+ * @enable_gpio: gpio for chip enable/disable
+ * @irq_no: interrupt line for flash ic
+ */
+struct adp1653_platform_data {
+ u32 enable_gpio;
+ u32 irq_no;
+};
+
+#endif //__LINUX_I2C_ADP1653_PLAT_H__
diff --git a/drivers/staging/camera_flash/camera_flash.h b/drivers/staging/camera_flash/camera_flash.h
new file mode 100644
index 00000000000..15faf706dc9
--- /dev/null
+++ b/drivers/staging/camera_flash/camera_flash.h
@@ -0,0 +1,74 @@
+#ifndef __CAMERA_FLASH_H__
+#define __CAMERA_FLASH_H__
+
+#define FLASH_NAME_SIZE (20)
+
+struct flash_mode_details {
+ unsigned long led_type;
+ unsigned long max_intensity_uAmp;
+ unsigned long min_intensity_uAmp;
+ unsigned long max_strobe_duration_uSecs;
+ unsigned long feature_bitmap;
+ unsigned char nbFaultRegisters;
+};
+
+/*feature_bitmap (in struct flash_mode_details) bit values*/
+#define INTENSITY_PROGRAMMABLE (0x01)
+#define DURATION_PROGRAMMABLE (0x02)
+#define TIMEOUT_PROGRAMMABLE (0x04)
+
+/*Status word returned by driver has status in lower 16 bits
+ *and Error in higher 16 bits. definition of status and error
+ *bits are there in flash_bitfields.h
+ */
+#define SET_FLASH_STATUS(_bitmap, _status) (_bitmap |= (_status & 0xffff))
+#define CLR_FLASH_STATUS(_bitmap, _status) (_bitmap &= ~(_status & 0xffff))
+#define SET_FLASH_ERROR(_bitmap, _status) (_bitmap |= (_status << 16))
+#define CLR_FLASH_ERROR(_bitmap, _status) (_bitmap &= ~(_status << 16))
+#define GET_FLASH_STATUS(_bitmap) (_bitmap & 0xffff)
+#define GET_FLASH_ERROR(_bitmap) (_bitmap >> 16)
+
+struct flash_mode_params {
+ unsigned long duration_uSecs;
+ unsigned long intensity_uAmp;
+ unsigned long timeout_uSecs;
+};
+
+struct flash_ioctl_args_t {
+ unsigned long flash_mode;
+ unsigned long cam;
+ unsigned long status;
+ union mode_arg{
+ struct flash_mode_details details;
+ struct flash_mode_params params;
+ unsigned long strobe_enable;
+ } mode_arg;
+};
+
+#define FLASH_MAGIC_NUMBER 0x17
+#define FLASH_GET_MODES _IOR(FLASH_MAGIC_NUMBER, 1,\
+struct flash_ioctl_args_t *)
+#define FLASH_GET_MODE_DETAILS _IOWR(FLASH_MAGIC_NUMBER, 2,\
+struct flash_ioctl_args_t *)
+#define FLASH_ENABLE_MODE _IOW(FLASH_MAGIC_NUMBER, 3,\
+struct flash_ioctl_args_t *)
+#define FLASH_DISABLE_MODE _IOW(FLASH_MAGIC_NUMBER, 4,\
+struct flash_ioctl_args_t *)
+#define FLASH_CONFIGURE_MODE _IOW(FLASH_MAGIC_NUMBER, 5,\
+struct flash_ioctl_args_t *)
+#define FLASH_TRIGGER_STROBE _IOW(FLASH_MAGIC_NUMBER, 6,\
+struct flash_ioctl_args_t *)
+#define FLASH_GET_STATUS _IOW(FLASH_MAGIC_NUMBER, 7,\
+struct flash_ioctl_args_t *)
+#define FLASH_GET_LIFE_COUNTER _IOW(FLASH_MAGIC_NUMBER, 8,\
+struct flash_ioctl_args_t *)
+#define FLASH_GET_SELF_TEST_MODES _IOR(FLASH_MAGIC_NUMBER, 9,\
+struct flash_ioctl_args_t *)
+#define FLASH_SELF_TEST _IOW(FLASH_MAGIC_NUMBER, 10,\
+struct flash_ioctl_args_t *)
+#define FLASH_GET_FAULT_REGISTERS _IOR(FLASH_MAGIC_NUMBER, 11,\
+struct flash_ioctl_args_t *)
+#define FLASH_GET_SELF_TEST_RESULT _IOR(FLASH_MAGIC_NUMBER, 12,\
+struct flash_ioctl_args_t *)
+
+#endif
diff --git a/drivers/staging/camera_flash/camera_flash_bitfields.h b/drivers/staging/camera_flash/camera_flash_bitfields.h
new file mode 100644
index 00000000000..05da9c5ef58
--- /dev/null
+++ b/drivers/staging/camera_flash/camera_flash_bitfields.h
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+/**
+* \file camera_flash_bitfields.h
+* \brief Define some constants for the flash drivers API.
+* \author ST-Ericsson
+*/
+#ifndef __CAMERA_FLASH_BITFIELDS_H__
+#define __CAMERA_FLASH_BITFIELDS_H__
+
+/* Flash Mode definitions */
+/* All Operating Modes are off (shutdown low power state)*/
+#define FLASH_MODE_NONE (0x000)
+/* Enables the xenon driver. Strobe is managed by the flash driver itself.
+Charges the xenon. Automatic periodic recharge is abstracted by the driver */
+#define FLASH_MODE_XENON (0x001)
+/* Enables the xenon driver. Strobe is managed externally to the driver */
+#define FLASH_MODE_XENON_EXTERNAL_STROBE (0x002)
+/* Enables the video led driver. Strobing is managed by the driver */
+#define FLASH_MODE_VIDEO_LED (0x004)
+/* Enables the video led driver. Strobing is managed externally to driver */
+#define FLASH_MODE_VIDEO_LED_EXTERNAL_STROBE (0x008)
+/* Enables the still LED driver. Strobing is managed by the driver itself */
+#define FLASH_MODE_STILL_LED (0x010)
+/* Enables the still LED driver. Strobe is managed externally to the driver */
+#define FLASH_MODE_STILL_LED_EXTERNAL_STROBE (0x020)
+/* Enables the AF assistant driver. Strobe is managed by the driver */
+#define FLASH_MODE_AF_ASSISTANT (0x040)
+/* Enable the driver. Strobe is managed by the driver */
+#define FLASH_MODE_INDICATOR (0x080)
+/* Enables the still HP LED driver. Strobing is managed by the driver itself */
+#define FLASH_MODE_STILL_HPLED (0x100)
+/* Enables the still HP LED driver. Strobe is managed externally to the
+driver */
+#define FLASH_MODE_STILL_HPLED_EXTERNAL_STROBE (0x200)
+
+
+/* The flash is not usable anymore */
+#define FLASH_STATUS_BROKEN (0x00)
+/* The flash is ready to be fired and unlit */
+#define FLASH_STATUS_READY (0x01)
+/* The flash is discharged and by construction, charging; usually an
+application shall not try to fire it in that state (although possible
+typically in sport mode flash) */
+#define FLASH_STATUS_NOT_READY (0x02)
+/* The flash is in shutdown state */
+#define FLASH_STATUS_SHUTDOWN (0x04)
+/* Intermediate state that may exist where I2C registers can be programmed */
+#define FLASH_STATUS_STANDBY (0x08)
+/* The flash is already strobing */
+#define FLASH_STATUS_LIT (0x10)
+
+#define FLASH_SELFTEST_NONE 0x000
+/* tests connections to flash driver ICs */
+#define FLASH_SELFTEST_CONNECTION 0x001
+/* tests capture flash without using strobe signal from camera */
+#define FLASH_SELFTEST_FLASH 0x002
+/* tests capture flash using strobe signal from camera: ONLY this one needs to
+be done in idle state from flash tests cases */
+#define FLASH_SELFTEST_FLASH_WITH_STROBE 0x004
+/* tests video light */
+#define FLASH_SELFTEST_VIDEO_LIGHT 0x008
+/* tests AF assistance light */
+#define FLASH_SELFTEST_AF_LIGHT 0x010
+/* tests capture indicator light */
+#define FLASH_SELFTEST_INDICATOR 0x020
+/* tests flash in torch mode */
+#define FLASH_SELFTEST_TORCH_LIGHT 0x040
+
+/** \brief Flash Error */
+enum TFlashError {
+ FLASH_ERR_NONE , /* None */
+ FLASH_ERR_OVER_CHARGE , /* Error happened during the charge */
+ FLASH_ERR_OVER_HEAT , /* Over temperature */
+ FLASH_ERR_SHORT_CIRCUIT , /* Short circuit */
+ FLASH_ERR_TIMEOUT , /* Timeout */
+ FLASH_ERR_OVER_VOLTAGE /* Over voltage */
+} ;
+
+#endif
diff --git a/drivers/staging/camera_flash/flash_common.c b/drivers/staging/camera_flash/flash_common.c
new file mode 100644
index 00000000000..fc59879a170
--- /dev/null
+++ b/drivers/staging/camera_flash/flash_common.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * camera flash: Flash driver to export camera flash to user space application.
+ * It supports two flashes, one for primary and one for secondary camera
+ * Author: Pankaj Chauhan/pankaj.chauhan@stericsson.com for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/kthread.h>
+#include <linux/jiffies.h>
+#include <linux/miscdevice.h>
+#include "camera_flash.h"
+#include "flash_common.h"
+
+#define DEBUG_LOG(...) printk(KERN_DEBUG "Camera Flash driver: " __VA_ARGS__)
+
+#define PRIMARY_CAMERA (0)
+#define SECONDARY_CAMERA (1)
+static struct miscdevice misc_dev;
+struct flash_chip *flash_chips[2];
+struct fasync_struct * async_queue;
+struct task_struct* ptaskStruct;
+wait_queue_head_t waitQueue;
+int waitCondition = 0;
+struct flash_ioctl_args_t flashArg;
+
+#define COPY_ARG_FROM_USER(_to,_from_usr) do{ \
+ memset((_to),0,sizeof(struct flash_ioctl_args_t)); \
+ if (copy_from_user((_to), (struct flash_ioctl_args_t*) (_from_usr), sizeof(struct flash_ioctl_args_t))) { \
+ DEBUG_LOG("Could not copy data from userspace successfully\n"); \
+ break; \
+ } \
+}while(0)
+
+#define COPY_ARG_TO_USER(_to_usr,_from) do{ \
+ if (copy_to_user((struct flash_ioctl_args_t *)(_to_usr), (_from), sizeof(struct flash_ioctl_args_t))) { \
+ DEBUG_LOG("Could not copy data from userspace successfully\n"); \
+ break; \
+ } \
+}while(0)
+
+
+static long flash_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
+{
+ int err=0;
+ struct flash_chip *flash_p = NULL;
+ struct flash_chip_ops *ops = NULL;
+ char *my_name=NULL;
+ struct flash_ioctl_args_t flash_arg;
+
+ if (_IOC_TYPE(cmd) != FLASH_MAGIC_NUMBER) {
+ printk(KERN_ALERT "Flash driver: Not an ioctl for this module\n");
+ err = -EINVAL;
+ }
+
+ COPY_ARG_FROM_USER(&flash_arg,arg);
+
+ if(flash_arg.cam == SECONDARY_CAMERA || flash_arg.cam == PRIMARY_CAMERA)
+ flash_p = flash_chips[flash_arg.cam];
+ else{
+ DEBUG_LOG("unsupported cam %lu\n",flash_arg.cam);
+ err = -ENODEV;
+ goto out;
+ }
+ my_name = flash_arg.cam ?"Secondary":"Primary";
+
+ if (flash_arg.cam == PRIMARY_CAMERA)
+ {
+ ops = flash_p->ops;
+ }
+
+ switch(cmd){
+ case FLASH_GET_MODES:
+ {
+ if (flash_arg.cam == PRIMARY_CAMERA)
+ {
+ err = ops->get_modes(flash_p->priv_data,&flash_arg.flash_mode);
+ if(!err){
+ DEBUG_LOG("Supported flash modes for %s camera: %lx\n",
+ my_name,flash_arg.flash_mode);
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }else{
+ DEBUG_LOG("unable to get supported modes for %s camera\n",my_name);
+ }
+ }
+ else
+ {
+ flash_arg.flash_mode = FLASH_MODE_NONE;
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }
+ }
+ break;
+ case FLASH_GET_MODE_DETAILS:
+ {
+ err = ops->get_mode_details(flash_p->priv_data,flash_arg.flash_mode,
+ &flash_arg.mode_arg.details);
+ if(!err){
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }else{
+ DEBUG_LOG("Unable to get mode details for %s camera, flash mode %lx\n",
+ my_name,flash_arg.flash_mode);
+ }
+ }
+ break;
+ case FLASH_ENABLE_MODE:
+ case FLASH_DISABLE_MODE:
+ {
+ int enable=0;
+ if(cmd == FLASH_ENABLE_MODE){
+ enable = 1;
+ }
+ err = ops->enable_flash_mode(flash_p->priv_data,flash_arg.flash_mode,enable);
+ if(err){
+ DEBUG_LOG("Unable to %s: %s camera, flash mode %lx\n",
+ (enable ?"Enable":"Disable"), my_name,flash_arg.flash_mode);
+ }
+ }
+ break;
+ case FLASH_CONFIGURE_MODE:
+ err = ops->configure_flash_mode(flash_p->priv_data,flash_arg.flash_mode,
+ &flash_arg.mode_arg.params);
+ if(err){
+ DEBUG_LOG("Unable to configure %s camera, flash mode %lx\n",
+ my_name,flash_arg.flash_mode);
+ }
+ break;
+ case FLASH_TRIGGER_STROBE:
+ err = ops->trigger_strobe(flash_p->priv_data,flash_arg.mode_arg.strobe_enable);
+ if(err){
+ DEBUG_LOG("Unable to %s: %s camera strobe trigger, mode %lx\n",
+ (arg ?"Enable":"Disable"), my_name,flash_arg.flash_mode);
+ }
+ break;
+ case FLASH_GET_STATUS:
+ err = ops->get_status(flash_p->priv_data,&flash_arg.status);
+ if(!err){
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }else{
+ DEBUG_LOG("Unable to get status details for %s camera, flash mode %lx\n",
+ my_name,flash_arg.flash_mode);
+ }
+ break;
+ case FLASH_GET_LIFE_COUNTER:
+ DEBUG_LOG("Not Implemented\n");
+ break;
+ case FLASH_SELF_TEST:
+ flashArg = flash_arg;
+ if (0 != (flashArg.cam & (FLASH_SELFTEST_FLASH | FLASH_SELFTEST_FLASH_WITH_STROBE)))
+ {
+ err = ENODEV;
+ }
+ else
+ {
+ /* wake up worker thread */
+ waitCondition = 1;
+ wake_up_interruptible(&waitQueue);
+ }
+ break;
+ case FLASH_GET_SELF_TEST_MODES:
+ {
+ if (flash_arg.cam == PRIMARY_CAMERA)
+ {
+ err = ops->get_selftest_modes(flash_p->priv_data,&flash_arg.flash_mode);
+ if(!err){
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }else{
+ DEBUG_LOG("unable to get supported modes for %s camera\n",my_name);
+ }
+ }
+ else
+ {
+ flash_arg.flash_mode = FLASH_SELFTEST_NONE;
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }
+ break;
+ }
+ case FLASH_GET_FAULT_REGISTERS:
+ {
+ err = ops->get_fault_registers(flash_p->priv_data,flash_arg.flash_mode,&flash_arg.status);
+ if(!err){
+ COPY_ARG_TO_USER(arg,&flash_arg);
+ }else{
+ DEBUG_LOG("unable to get supported modes for %s camera\n",my_name);
+ }
+
+ break;
+ }
+ case FLASH_GET_SELF_TEST_RESULT:
+ {
+ COPY_ARG_TO_USER(arg,&flashArg);
+ DEBUG_LOG("FLASH_GET_SELF_TEST_RESULT arg : 0x%lx\n", flashArg.status);
+ break;
+ }
+ default:
+ DEBUG_LOG("Unknown command %x\n",cmd);
+
+ }
+out:
+ return err;
+}
+
+int worker_thread (void* data)
+{
+ int err = 0;
+ struct flash_chip *flash_p=NULL;
+ struct flash_chip_ops *ops=NULL;
+ struct flash_mode_params params;
+ struct flash_mode_details details;
+
+ while (1)
+ {
+ /* waiting for some job to do */
+ wait_event_interruptible(waitQueue, (waitCondition != 0));
+ waitCondition = 0;
+
+ DEBUG_LOG("worker_thread wakes up\n");
+ /* do we need to stop ? */
+ err = kthread_should_stop();
+ if (0 != err)
+ {
+ DEBUG_LOG("worker_thread stops\n");
+ break;
+ }
+
+ /* do the job */
+ flash_p = flash_chips[flashArg.cam];
+ ops = flash_p->ops;
+
+ /* clear fault registers */
+ err = ops->get_fault_registers(flash_p->priv_data, FLASH_MODE_INDICATOR, &flashArg.status);
+ if (0 != err)
+ {
+ flashArg.status = flashArg.flash_mode;
+ flashArg.flash_mode = 0;
+ }
+ flashArg.status = 0;
+
+ /* do all selftests */
+ while (flashArg.flash_mode != FLASH_SELFTEST_NONE)
+ {
+ if (0 != (flashArg.flash_mode & FLASH_SELFTEST_CONNECTION))
+ {
+ err = ops->get_mode_details(flash_p->priv_data, FLASH_MODE_INDICATOR, &details);
+ if (0 != err)
+ {
+ DEBUG_LOG("not able to get mode FLASH_MODE_INDICATOR details\n");
+ flashArg.status |= FLASH_SELFTEST_CONNECTION;
+ }
+ flashArg.flash_mode &= ~FLASH_SELFTEST_CONNECTION;
+ }
+ else if (0 != (flashArg.flash_mode & (FLASH_SELFTEST_FLASH | FLASH_SELFTEST_FLASH_WITH_STROBE)))
+ {
+ if (0 != (flashArg.flash_mode & FLASH_SELFTEST_FLASH))
+ {
+ flashArg.status |= FLASH_SELFTEST_FLASH;
+ flashArg.flash_mode &= ~FLASH_SELFTEST_FLASH;
+ }
+ else
+ {
+ flashArg.status |= FLASH_SELFTEST_FLASH_WITH_STROBE;
+ flashArg.flash_mode &= ~FLASH_SELFTEST_FLASH_WITH_STROBE;
+ }
+ }
+ /* FLASH_SELFTEST_VIDEO_LIGHT | FLASH_SELFTEST_AF_LIGHT | FLASH_SELFTEST_INDICATOR | FLASH_SELFTEST_TORCH_LIGHT */
+ else
+ {
+ unsigned long currentSelftest = FLASH_SELFTEST_NONE;
+ unsigned long currentFlashMode = FLASH_MODE_NONE;
+
+ if (0 != (flashArg.flash_mode & FLASH_SELFTEST_VIDEO_LIGHT))
+ {
+ currentSelftest = FLASH_SELFTEST_VIDEO_LIGHT;
+ currentFlashMode = FLASH_MODE_VIDEO_LED;
+ }
+ else if (0 != (flashArg.flash_mode & FLASH_SELFTEST_AF_LIGHT))
+ {
+ currentSelftest = FLASH_SELFTEST_AF_LIGHT;
+ currentFlashMode = FLASH_MODE_AF_ASSISTANT;
+ }
+ else if (0 != (flashArg.flash_mode & FLASH_SELFTEST_INDICATOR))
+ {
+ currentSelftest = FLASH_SELFTEST_INDICATOR;
+ currentFlashMode = FLASH_MODE_INDICATOR;
+ }
+ else
+ {
+ currentSelftest = FLASH_SELFTEST_TORCH_LIGHT;
+ currentFlashMode = FLASH_MODE_VIDEO_LED;
+ }
+
+ err = ops->get_mode_details(flash_p->priv_data, currentFlashMode, &details);
+ if (0 != err)
+ {
+ DEBUG_LOG("not able to get mode 0x%lx details\n",currentFlashMode);
+ flashArg.status |= currentSelftest;
+ flashArg.flash_mode &= ~currentSelftest;
+ continue;
+ }
+
+ err = ops->enable_flash_mode(flash_p->priv_data, currentFlashMode, 1);
+ if (0 != err)
+ {
+ DEBUG_LOG("not able to enable flash mode 0x%lx\n",currentFlashMode);
+ flashArg.status |= currentSelftest;
+ flashArg.flash_mode &= ~currentSelftest;
+ continue;
+ }
+
+ params.duration_uSecs = 0;
+ params.intensity_uAmp = details.max_intensity_uAmp;
+ params.timeout_uSecs = 0;
+ err = ops->configure_flash_mode(flash_p->priv_data, currentFlashMode, &params);
+ if (0 != err)
+ {
+ DEBUG_LOG("not able to configure flash mode 0x%lx\n",currentFlashMode);
+ flashArg.status |= currentSelftest;
+ flashArg.flash_mode &= ~currentSelftest;
+ continue;
+ }
+
+ err = ops->trigger_strobe(flash_p->priv_data,1);
+ if (0 != err)
+ {
+ DEBUG_LOG("not able to strobe, mode : 0x%lx\n",currentFlashMode);
+ flashArg.status |= currentSelftest;
+ flashArg.flash_mode &= ~currentSelftest;
+ continue;
+ }
+
+ wait_event_timeout(waitQueue, 0, msecs_to_jiffies(1000));
+
+ err = ops->trigger_strobe(flash_p->priv_data,0);
+ if (0 != err)
+ {
+ DEBUG_LOG("not able to strobe, mode : 0x%lx\n",currentFlashMode);
+ flashArg.status |= currentSelftest;
+ flashArg.flash_mode &= ~currentSelftest;
+ continue;
+ }
+ flashArg.flash_mode &= ~currentSelftest;
+ }
+ }
+
+ /* job's done ! */
+ flash_async_notify();
+ }
+ return 0;
+}
+
+int flash_open(struct inode *node, struct file *file_p)
+{
+ // init sleep queue
+ init_waitqueue_head(&waitQueue);
+
+ // start worker thread
+ ptaskStruct = kthread_run (&worker_thread, NULL, "flashDriverWorker");
+
+ return 0;
+}
+
+int register_flash_chip(unsigned int cam, struct flash_chip *flash_p)
+{
+ int err =0;
+ DEBUG_LOG("Registering cam %d\n", cam);
+ DEBUG_LOG("flash_p: name=%s\n", flash_p->name);
+ if(cam > 1 || !flash_p){
+ DEBUG_LOG("Registration: something is wrong! cam %d, flash_p %x \n",cam,(int)flash_p);
+ err = EINVAL;
+ goto out;
+ }
+ if(!flash_chips[cam]){
+ flash_chips[cam] = flash_p;
+ DEBUG_LOG("Registered flash: id %lx, %s for camera %d\n",
+ flash_p->id,flash_p->name,cam);
+ }else{
+ DEBUG_LOG("%s flash already registered for camera %d, ignore flash %s\n",
+ flash_chips[cam]->name,cam, flash_p->name);
+ }
+out:
+ return err;
+}
+
+int flash_async_notify ()
+{
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ return 0;
+}
+
+static int flash_fasync(int fd, struct file *filp, int mode)
+{
+ DEBUG_LOG("registered async notification on %d fd\n",fd);
+ return fasync_helper(fd, filp, mode, &async_queue);
+}
+
+static int flash_release(struct inode *node, struct file *file_p)
+{
+ int err = 0;
+
+ fasync_helper(-1, file_p, 0, &async_queue);
+
+ // stop worker thread
+ waitCondition = 1;
+ err = kthread_stop(ptaskStruct);
+ return err;
+}
+
+static struct file_operations flash_fops = {
+ owner:THIS_MODULE,
+ unlocked_ioctl:flash_ioctl,
+ open:flash_open,
+ release:flash_release,
+ fasync:flash_fasync,
+};
+
+int major_device_number;
+
+/*Temporary here (adp_init)*/
+extern int adp1653_init(void);
+static int __init flash_init(void)
+{
+ int err = 0;
+ err = adp1653_init();
+ if(err){
+ DEBUG_LOG("Unable to initialize adp1653, err %d\n",err);
+ goto out;
+ }
+ /* Register misc device */
+ misc_dev.minor = MISC_DYNAMIC_MINOR;
+ misc_dev.name = "camera_flash";
+ misc_dev.fops = &flash_fops;
+ err = misc_register(&misc_dev);
+ if (err < 0) {
+ printk(KERN_INFO "camera_flash driver misc_register failed (%d)\n", err);
+ return err;
+ } else {
+ major_device_number = err;
+ printk(KERN_INFO "camera_flash driver initialized with minor=%d\n", misc_dev.minor);
+ }
+out:
+ return err;
+}
+
+static void __exit flash_exit(void)
+{
+ misc_deregister(&misc_dev);
+ printk(KERN_INFO"Camera flash driver unregistered\n");
+}
+
+module_init(flash_init);
+module_exit(flash_exit);
+MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(register_flash_chip);
+EXPORT_SYMBOL(flash_async_notify);
diff --git a/drivers/staging/camera_flash/flash_common.h b/drivers/staging/camera_flash/flash_common.h
new file mode 100755
index 00000000000..d1f63631e82
--- /dev/null
+++ b/drivers/staging/camera_flash/flash_common.h
@@ -0,0 +1,57 @@
+#ifndef __FLASH_COMMON_H__
+#define __FLASH_COMMON_H__
+
+#include "camera_flash_bitfields.h"
+#include "camera_flash.h"
+
+struct flash_chip_ops{
+ int (*get_modes)( void *priv_data, unsigned long *modes);
+ int (*get_mode_details)(void *priv_data,unsigned long mode,
+ struct flash_mode_details *details_p);
+ int (*enable_flash_mode) (void *priv_data,unsigned long mode,
+ int enable);
+ int (*configure_flash_mode) (void *priv_data, unsigned long mode,
+ struct flash_mode_params *params_p);
+ int (*trigger_strobe) (void *priv_data, int enable);
+ int (*get_life_counter) (void *priv_data);
+ int (*get_status) (void *priv_data, unsigned long *status);
+ int (*get_selftest_modes) (void *priv_data,
+ unsigned long *modes);
+ int (*get_fault_registers) (void *priv_data, unsigned long mode,
+ unsigned long *status);
+};
+
+#define FLASH_TYPE_XENON (0x1)
+#define FLASH_TYPE_HPLED (0x2)
+
+#define SET_FLASHCHIP_TYPE(flash_chip_p,_TYPE) ((flash_chip_p)->id |= _TYPE)
+#define GET_FLASHHIP_TYPE(flash_chip_p) ((flash_chip_p)->id & 0xffff)
+#define GET_FLASHCHIP_ID(flash_chip_p) ((flash_chip_p)->id >> 16)
+#define SET_FLASHCHIP_ID(flash_chip_p,_ID) ((flash_chip_p)->id |= (_ID << 16))
+
+struct flash_chip {
+ unsigned long id;
+ struct flash_chip_ops *ops;
+ void *priv_data;
+ unsigned char name[FLASH_NAME_SIZE];
+};
+
+/**
+ * struct flash_platform_data:
+ * platform specific data For flash chip driver
+ * @cam : 0 - primary, 1 - secondary
+ * @strobe_gpio: GPIO used as strobe
+ * @enable_gpio: GPIO used for enable/reset input
+ */
+struct flash_platform_data{
+ unsigned long cam;
+ unsigned long strobe_gpio;
+ unsigned long strobe_gpio_alt_func;
+ unsigned long enable_gpio;
+ unsigned long enable_gpio_alt_func;
+};
+
+extern int register_flash_chip(unsigned int cam, struct flash_chip *flash_p);
+extern int flash_async_notify (void );
+
+#endif
diff --git a/drivers/staging/cg2900/Kconfig b/drivers/staging/cg2900/Kconfig
new file mode 100644
index 00000000000..92046b9bf76
--- /dev/null
+++ b/drivers/staging/cg2900/Kconfig
@@ -0,0 +1,73 @@
+#
+# CG2900
+#
+
+config CG2900
+ tristate "Support ST-Ericsson CG2900 main structure"
+ depends on NET && HAS_IOMEM
+ select MFD_CORE
+ help
+ ST-Ericsson CG2900 Connectivity Combo controller main
+ structure.
+ Supports multiple functionalities muxed over a Bluetooth HCI H:4
+ interface.
+ CG2900 support Bluetooth, FM radio, and GPS.
+
+config CG2900_CHIP
+ tristate "Support CG2900 Connectivity controller"
+ depends on CG2900
+ help
+ ST-Ericsson CG2900 Connectivity Controller chip handler.
+ Contains chip handler performing driver initialization
+ such as patchdownload and also instantiates the supported
+ MFD devices.
+
+config STLC2690_CHIP
+ tristate "Support STLC2690 Connectivity controller"
+ depends on CG2900
+ help
+ ST-Ericsson STLC2690 Connectivity Controller chip handler.
+ Contains chip handler performing driver initialization
+ such as patchdownload and also instantiates the supported
+ MFD devices.
+
+config CG2900_UART
+ tristate "Support CG2900 UART transport"
+ depends on CG2900
+ select BT
+ select BT_HCIUART
+ help
+ UART driver for ST-Ericsson CG2900 Connectivity Controller.
+ Contains functions for setting baud rate and to transport
+ data to and from the CG2900 controller over UART.
+ Also handles low power handling for the CG2900 when using UART as
+ transport.
+
+config CG2900_AUDIO
+ tristate "Support CG2900 audio interface"
+ depends on CG2900
+ help
+ ST-Ericsson CG2900 Connectivity audio interface driver.
+ Gives a module the ability to setup audio paths
+ within the CG2900 controller.
+ Supports both a normal function API and using character device
+ from user space.
+
+config CG2900_TEST
+ tristate "Support CG2900 Test Char Device"
+ depends on CG2900
+ help
+ ST-Ericsson CG2900 Test Character Device driver.
+ Creates a character device which can be used by
+ a test framework in user space to emulate a connected chip.
+ Note that this is used to test the chip handler driver,
+ not to test the connected chip.
+
+config BT_CG2900
+ tristate "ST-Ericsson CG2900 Bluetooth driver"
+ depends on CG2900 && BT
+ help
+ Select if ST-Ericsson CG2900 Connectivity controller shall be used as
+ Bluetooth controller for BlueZ.
+ This driver registers to the Bluetooth stack and when opened,
+ enables the CG2900 controller in a proper way.
diff --git a/drivers/staging/cg2900/Makefile b/drivers/staging/cg2900/Makefile
new file mode 100644
index 00000000000..14e2847bacf
--- /dev/null
+++ b/drivers/staging/cg2900/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for ST-Ericsson CG2900 connectivity combo controller
+#
+
+ccflags-y := \
+ -Idrivers/staging/cg2900/include \
+ -Iarch/arm/mach-ux500
+
+obj-$(CONFIG_CG2900) += devices-cg2900.o \
+ devices-cg2900-ux500.o \
+ board-ux500-cg2900.o \
+ clock-cg2900.o
+
+obj-y += mfd/
+obj-y += bluetooth/
diff --git a/drivers/staging/cg2900/TODO b/drivers/staging/cg2900/TODO
new file mode 100644
index 00000000000..e122eba6d53
--- /dev/null
+++ b/drivers/staging/cg2900/TODO
@@ -0,0 +1,23 @@
+TODO
+----
+
+ - Decide upon main driver architecture.
+
+ - Decide if the CG2900 driver should be a separate driver as today or if it
+ should be a sub-driver using the TI-ST (Shared Transport) driver that is also
+ written for a combo connectivity controller.
+
+ - Decide if cg2900_uart should register on top of hci_ldisc.c (as now) or if it
+ should instead register on top of hci_h4.c thereby reusing hci_h4
+ implementation.
+
+ - Update the hci_ldisc.c so that it will allow drivers to be registered without
+ registering them directly to the Bluetooth stack. Also extend the hci_ldisc.c
+ with more functions to abstract the tty API in a conformative way (currently
+ sometimes the tty API used, sometimes the hci_ldisc interface).
+
+ - Decide if the CG2900 driver should use imported structs and defines to create
+ Bluetooth packets as today or if the Bluetooth stack in the Kernel should be
+ extended so it is possible to use generic functions to send and receive
+ commands and events both from the Bluetooth stack itself and from external
+ drivers such as the CG2900 driver.
diff --git a/drivers/staging/cg2900/bluetooth/Makefile b/drivers/staging/cg2900/bluetooth/Makefile
new file mode 100644
index 00000000000..936a4a257da
--- /dev/null
+++ b/drivers/staging/cg2900/bluetooth/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for ST-Ericsson CG2900 connectivity combo controller
+#
+
+ccflags-y := \
+ -Idrivers/staging/cg2900/include
+
+obj-$(CONFIG_BT_CG2900) += btcg2900.o
+obj-$(CONFIG_CG2900_UART) += cg2900_uart.o hci_ldisc.o
diff --git a/drivers/staging/cg2900/bluetooth/btcg2900.c b/drivers/staging/cg2900/bluetooth/btcg2900.c
new file mode 100644
index 00000000000..07aae9a32ca
--- /dev/null
+++ b/drivers/staging/cg2900/bluetooth/btcg2900.c
@@ -0,0 +1,1198 @@
+/*
+ * Bluetooth driver for ST-Ericsson CG2900 connectivity controller.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com)
+ * Henrik Possung (henrik.possung@stericsson.com)
+ * Josef Kindberg (josef.kindberg@stericsson.com)
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com)
+ * Kjell Andersson (kjell.k.andersson@stericsson.com)
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <asm/byteorder.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/core.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "cg2900.h"
+
+#define BT_HEADER_LENGTH 0x03
+
+#define STLC2690_HCI_REV 0x0600
+#define CG2900_PG1_HCI_REV 0x0101
+#define CG2900_PG2_HCI_REV 0x0200
+#define CG2900_PG1_SPECIAL_HCI_REV 0x0700
+
+#define NAME "BTCG2900 "
+
+/* Wait for 5 seconds for a response to our requests */
+#define RESP_TIMEOUT 5000
+
+/* Bluetooth error codes */
+#define HCI_ERR_NO_ERROR 0x00
+#define HCI_ERR_CMD_DISALLOWED 0x0C
+
+/**
+ * enum reset_state - RESET-states of the HCI driver.
+ *
+ * @RESET_IDLE: No reset in progress.
+ * @RESET_ACTIVATED: Reset in progress.
+ * @RESET_UNREGISTERED: hdev is unregistered.
+ */
+
+enum reset_state {
+ RESET_IDLE,
+ RESET_ACTIVATED,
+ RESET_UNREGISTERED
+};
+
+/**
+ * enum enable_state - ENABLE-states of the HCI driver.
+ *
+ * @ENABLE_IDLE: The HCI driver is loaded but not opened.
+ * @ENABLE_WAITING_BT_ENABLED_CC: The HCI driver is waiting for a command
+ * complete event from the BT chip as a
+ * response to a BT Enable (true) command.
+ * @ENABLE_BT_ENABLED: The BT chip is enabled.
+ * @ENABLE_WAITING_BT_DISABLED_CC: The HCI driver is waiting for a command
+ * complete event from the BT chip as a
+ * response to a BT Enable (false) command.
+ * @ENABLE_BT_DISABLED: The BT chip is disabled.
+ * @ENABLE_BT_ERROR: The HCI driver is in a bad state, some
+ * thing has failed and is not expected to
+ * work properly.
+ */
+enum enable_state {
+ ENABLE_IDLE,
+ ENABLE_WAITING_BT_ENABLED_CC,
+ ENABLE_BT_ENABLED,
+ ENABLE_WAITING_BT_DISABLED_CC,
+ ENABLE_BT_DISABLED,
+ ENABLE_BT_ERROR
+};
+
+/* Defines which state the driver has when BT is active */
+#define BTCG2900_ACTIVE_STATE ENABLE_BT_ENABLED
+
+/**
+ * struct btcg2900_info - Specifies HCI driver private data.
+ *
+ * This type specifies CG2900 HCI driver private data.
+ *
+ * @list: list_head struct.
+ * @parent: Parent to this BT device. All BT channels will have
+ * common parent.
+ * @cmd: Device structure for BT command channel.
+ * @evt: Device structure for BT event channel.
+ * @acl: Device structure for BT ACL channel.
+ * @pdev: Device structure for platform device.
+ * @hdev: Device structure for HCI device.
+ * @reset_state: Device enum for HCI driver reset state.
+ * @enable_state: Device enum for HCI driver BT enable state.
+ */
+struct btcg2900_info {
+ struct list_head list;
+ struct device *parent;
+ struct device *cmd;
+ struct device *evt;
+ struct device *acl;
+ struct hci_dev *hdev;
+ enum reset_state reset_state;
+ enum enable_state enable_state;
+};
+
+/**
+ * struct enable_info - Specifies data for sending enable commands.
+ *
+ * @enable: True if command should enable the functionality.
+ * @name: Name of the command, only informative.
+ * @get_cmd: Function for retrieving command.
+ * @success: State to set upon success.
+ * @awaiting_cc: State to set while waiting for response.
+ * @failed: State to set upon failure.
+ */
+struct enable_info {
+ bool enable;
+ char *name;
+ struct sk_buff* (*get_cmd)(struct btcg2900_info *info, bool enable);
+ enum enable_state success;
+ enum enable_state awaiting_cc;
+ enum enable_state failed;
+};
+
+/**
+ * struct dev_info - Specifies private data used when receiving callbacks from CG2900 driver.
+ *
+ * @hci_data_type: Type of data according to BlueZ.
+ */
+struct dev_info {
+ u8 hci_data_type;
+};
+
+/* Defines for vs_bt_enable_cmd */
+#define BT_VS_BT_ENABLE 0xFF10
+#define VS_BT_DISABLE 0x00
+#define VS_BT_ENABLE 0x01
+
+/**
+ * struct vs_bt_enable_cmd - Specifies HCI VS Bluetooth_Enable command.
+ *
+ * @op_code: HCI command op code.
+ * @len: Parameter length of command.
+ * @enable: 0 for disable BT, 1 for enable BT.
+ */
+struct vs_bt_enable_cmd {
+ __le16 op_code;
+ u8 len;
+ u8 enable;
+} __packed;
+
+/*
+ * hci_wait_queue - Main Wait Queue in HCI driver.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(hci_wait_queue);
+
+/*
+ * btcg2900_devices - List of active CG2900 BT devices.
+ */
+static LIST_HEAD(btcg2900_devices);
+
+/* Internal function declarations */
+static int register_bluetooth(struct btcg2900_info *info);
+
+/* Internal functions */
+
+/**
+ * get_bt_enable_cmd() - Get HCI BT enable command.
+ * @info: Device info structure.
+ * @bt_enable: true if Bluetooth IP shall be enabled, false otherwise.
+ *
+ * Returns:
+ * NULL if no command shall be sent,
+ * sk_buffer with command otherwise.
+ */
+static struct sk_buff *get_bt_enable_cmd(struct btcg2900_info *info,
+ bool bt_enable)
+{
+ struct sk_buff *skb;
+ struct vs_bt_enable_cmd *cmd;
+ struct cg2900_rev_data rev_data;
+ struct cg2900_user_data *pf_data;
+
+ pf_data = dev_get_platdata(info->cmd);
+
+ if (!pf_data->get_local_revision(pf_data, &rev_data)) {
+ BT_ERR(NAME "Couldn't get revision");
+ return NULL;
+ }
+
+ /* If connected chip does not support the command return NULL */
+ if (CG2900_PG1_SPECIAL_HCI_REV != rev_data.revision &&
+ CG2900_PG1_HCI_REV != rev_data.revision &&
+ CG2900_PG2_HCI_REV != rev_data.revision)
+ return NULL;
+
+ /* CG2900 used */
+ skb = pf_data->alloc_skb(sizeof(*cmd), GFP_KERNEL);
+ if (!skb) {
+ BT_ERR(NAME "Could not allocate skb");
+ return NULL;
+ }
+
+ cmd = (struct vs_bt_enable_cmd *)skb_put(skb, sizeof(*cmd));
+ cmd->op_code = cpu_to_le16(BT_VS_BT_ENABLE);
+ cmd->len = sizeof(*cmd) - BT_HEADER_LENGTH;
+ if (bt_enable)
+ cmd->enable = VS_BT_ENABLE;
+ else
+ cmd->enable = VS_BT_DISABLE;
+
+ return skb;
+}
+
+/**
+ * close_bt_users() - Close all BT channels.
+ * @info: HCI driver info structure.
+ */
+static void close_bt_users(struct btcg2900_info *info)
+{
+ struct cg2900_user_data *pf_data;
+
+ pf_data = dev_get_platdata(info->cmd);
+ if (pf_data->opened)
+ pf_data->close(pf_data);
+
+ pf_data = dev_get_platdata(info->acl);
+ if (pf_data->opened)
+ pf_data->close(pf_data);
+
+ pf_data = dev_get_platdata(info->evt);
+ if (pf_data->opened)
+ pf_data->close(pf_data);
+}
+
+/**
+ * handle_bt_enable_comp() - Handle received BtEnable Complete event.
+ * @info: Info structure.
+ * @skb: Buffer with data coming from device.
+ *
+ * Returns:
+ * true if data has been handled internally,
+ * false otherwise.
+ */
+static bool handle_bt_enable_comp(struct btcg2900_info *info, u8 status)
+{
+ if (info->enable_state != ENABLE_WAITING_BT_ENABLED_CC &&
+ info->enable_state != ENABLE_WAITING_BT_DISABLED_CC)
+ return false;
+ /*
+ * This is the command complete event for
+ * the HCI_Cmd_VS_Bluetooth_Enable.
+ * Check result and update state.
+ *
+ * The BT chip is enabled/disabled. Either it was enabled/
+ * disabled now (status NO_ERROR) or it was already enabled/
+ * disabled (assuming status CMD_DISALLOWED is already enabled/
+ * disabled).
+ */
+ if (status != HCI_ERR_NO_ERROR && status != HCI_ERR_CMD_DISALLOWED) {
+ BT_ERR(NAME "Could not enable/disable BT core (0x%X)",
+ status);
+ BT_DBG("New enable_state: ENABLE_BT_ERROR");
+ info->enable_state = ENABLE_BT_ERROR;
+ goto finished;
+ }
+
+ if (info->enable_state == ENABLE_WAITING_BT_ENABLED_CC) {
+ BT_DBG("New enable_state: ENABLE_BT_ENABLED");
+ info->enable_state = ENABLE_BT_ENABLED;
+ BT_INFO("CG2900 BT core is enabled");
+ } else {
+ BT_DBG("New enable_state: ENABLE_BT_DISABLED");
+ info->enable_state = ENABLE_BT_DISABLED;
+ BT_INFO("CG2900 BT core is disabled");
+ }
+
+finished:
+ /* Wake up whoever is waiting for this result. */
+ wake_up_all(&hci_wait_queue);
+ return true;
+}
+
+/**
+ * handle_bt_enable_stat() - Handle received BtEnable Status event.
+ * @info: Info structure.
+ * @skb: Buffer with data coming from device.
+ *
+ * Returns:
+ * true if data has been handled internally,
+ * false otherwise.
+ */
+static bool handle_bt_enable_stat(struct btcg2900_info *info, u8 status)
+{
+ if (info->enable_state != ENABLE_WAITING_BT_DISABLED_CC &&
+ info->enable_state != ENABLE_WAITING_BT_ENABLED_CC)
+ return false;
+
+ BT_DBG("HCI Driver received Command Status (BT enable): 0x%X", status);
+ /*
+ * This is the command status event for the HCI_Cmd_VS_Bluetooth_Enable.
+ * Just free the packet.
+ */
+ return true;
+}
+
+/**
+ * handle_rx_evt() - Check if received data is response to internal command.
+ * @info: Info structure.
+ * @skb: Buffer with data coming from device.
+ *
+ * Returns:
+ * true if data has been handled internally,
+ * false otherwise.
+ */
+static bool handle_rx_evt(struct btcg2900_info *info, struct sk_buff *skb)
+{
+ struct hci_event_hdr *evt = (struct hci_event_hdr *)skb->data;
+ struct hci_ev_cmd_complete *cmd_complete;
+ struct hci_ev_cmd_status *cmd_status;
+ u16 op_code;
+ u8 status;
+ bool pkt_handled = false;
+
+ /* If BT is active no internal packets shall be generated */
+ if (info->enable_state == BTCG2900_ACTIVE_STATE)
+ return false;
+
+ if (evt->evt == HCI_EV_CMD_COMPLETE) {
+ cmd_complete = (struct hci_ev_cmd_complete *)(evt + 1);
+ status = *((u8 *)(cmd_complete + 1));
+ op_code = le16_to_cpu(cmd_complete->opcode);
+
+ if (op_code == BT_VS_BT_ENABLE)
+ pkt_handled = handle_bt_enable_comp(info, status);
+ } else if (evt->evt == HCI_EV_CMD_STATUS) {
+ cmd_status = (struct hci_ev_cmd_status *)(evt + 1);
+ op_code = le16_to_cpu(cmd_status->opcode);
+ status = cmd_status->status;
+
+ if (op_code == BT_VS_BT_ENABLE)
+ pkt_handled = handle_bt_enable_stat(info, status);
+ }
+
+ if (pkt_handled)
+ kfree_skb(skb);
+
+ return pkt_handled;
+}
+
+/**
+ * hci_read_cb() - Callback for handling data received from CG2900 driver.
+ * @dev: Device receiving data.
+ * @skb: Buffer with data coming from device.
+ */
+static void hci_read_cb(struct cg2900_user_data *user, struct sk_buff *skb)
+{
+ int err = 0;
+ struct dev_info *dev_info;
+ struct btcg2900_info *info;
+
+ dev_info = cg2900_get_usr(user);
+ info = dev_get_drvdata(user->dev);
+
+ if (user->dev != info->evt || !handle_rx_evt(info, skb)) {
+ bt_cb(skb)->pkt_type = dev_info->hci_data_type;
+ skb->dev = (struct net_device *)info->hdev;
+ /* Update BlueZ stats */
+ info->hdev->stat.byte_rx += skb->len;
+ if (bt_cb(skb)->pkt_type == HCI_ACLDATA_PKT)
+ info->hdev->stat.acl_rx++;
+ else
+ info->hdev->stat.evt_rx++;
+
+ BT_DBG("Data receive %d bytes", skb->len);
+
+ /* Provide BlueZ with received frame*/
+ err = hci_recv_frame(skb);
+ /* If err, skb have been freed in hci_recv_frame() */
+ if (err)
+ BT_ERR(NAME "Failed in supplying packet to Bluetooth"
+ " stack (%d)", err);
+ }
+}
+
+/**
+ * hci_reset_cb() - Callback for handling reset from CG2900 driver.
+ * @dev: CPD device resetting.
+ */
+static void hci_reset_cb(struct cg2900_user_data *dev)
+{
+ int err;
+ struct btcg2900_info *info;
+ struct cg2900_user_data *pf_data;
+
+ BT_INFO(NAME "hci_reset_cb");
+
+ info = dev_get_drvdata(dev->dev);
+
+ BT_DBG("New reset_state: RESET_ACTIVATED");
+ info->reset_state = RESET_ACTIVATED;
+
+ /*
+ * Continue to deregister hdev if all channels has been reset else
+ * return.
+ */
+ pf_data = dev_get_platdata(info->acl);
+ if (pf_data->opened)
+ return;
+ pf_data = dev_get_platdata(info->cmd);
+ if (pf_data->opened)
+ return;
+ pf_data = dev_get_platdata(info->evt);
+ if (pf_data->opened)
+ return;
+
+ /*
+ * Deregister HCI device. Close and Destruct functions should
+ * in turn be called by BlueZ.
+ */
+ BT_DBG("Deregister HCI device");
+ hci_unregister_dev(info->hdev);
+
+ wait_event_timeout(hci_wait_queue,
+ (RESET_UNREGISTERED == info->reset_state),
+ msecs_to_jiffies(RESP_TIMEOUT));
+ if (RESET_UNREGISTERED != info->reset_state)
+ /*
+ * Now we are in trouble. Try to register a new hdev
+ * anyway even though this will cost some memory.
+ */
+ BT_ERR(NAME "Timeout expired. Could not deregister HCI device");
+
+ /* Init and register hdev */
+ BT_DBG("Register HCI device");
+ err = register_bluetooth(info);
+ if (err)
+ BT_ERR(NAME "HCI Device registration error (%d)", err);
+}
+
+/**
+ * send_enable_cmd() - Send a command with only enable/disable functionality.
+ * @info: Info structure.
+ * @en_info: Enable info structure.
+ *
+ * Returns:
+ * 0 if successful,
+ * -EACCES if correct response to command is not received,
+ * Error codes from CG2900 write.
+ */
+static int send_enable_cmd(struct btcg2900_info *info,
+ struct enable_info *en_info)
+{
+ struct sk_buff *enable_cmd;
+ int err;
+ struct cg2900_user_data *pf_data;
+
+ /*
+ * Call function that returns the chip dependent enable HCI command.
+ * If NULL is returned, then no bt_enable command should be sent to the
+ * chip.
+ */
+ enable_cmd = en_info->get_cmd(info, en_info->enable);
+ if (!enable_cmd) {
+ BT_DBG("%s New enable_state: %d", en_info->name,
+ en_info->success);
+ info->enable_state = en_info->success;
+ return 0;
+ }
+
+ /* Set the HCI state before sending command to chip. */
+ BT_DBG("%s New enable_state: %d", en_info->name, en_info->awaiting_cc);
+ info->enable_state = en_info->awaiting_cc;
+
+ /* Send command to chip */
+ pf_data = dev_get_platdata(info->cmd);
+ err = pf_data->write(pf_data, enable_cmd);
+ if (err) {
+ BT_ERR("Couldn't send %s command (%d)", en_info->name, err);
+ kfree_skb(enable_cmd);
+ info->enable_state = en_info->failed;
+ return err;
+ }
+
+ /*
+ * Wait for callback to receive command complete and then wake us up
+ * again.
+ */
+ wait_event_timeout(hci_wait_queue,
+ info->enable_state == en_info->success,
+ msecs_to_jiffies(RESP_TIMEOUT));
+ /* Check the current state to see if it worked */
+ if (info->enable_state != en_info->success) {
+ BT_ERR("Could not change %s state (%d)",
+ en_info->name, info->enable_state);
+ BT_DBG("%s New enable_state: %d", en_info->name,
+ en_info->failed);
+ info->enable_state = en_info->failed;
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+/**
+ * btcg2900_open() - Open HCI interface.
+ * @hdev: HCI device being opened.
+ *
+ * BlueZ callback function for opening HCI interface to device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if NULL pointer is supplied.
+ * -EOPNOTSUPP if supplied packet type is not supported.
+ * -EBUSY if device is already opened.
+ * -EACCES if opening of channels failed.
+ */
+static int btcg2900_open(struct hci_dev *hdev)
+{
+ struct btcg2900_info *info;
+ struct cg2900_user_data *pf_data;
+ int err;
+ struct enable_info en_info;
+
+ BT_INFO("Open ST-Ericsson CG2900 driver");
+
+ if (!hdev) {
+ BT_ERR(NAME "NULL supplied for hdev");
+ return -EINVAL;
+ }
+
+ info = (struct btcg2900_info *)hdev->driver_data;
+ if (!info) {
+ BT_ERR(NAME "NULL supplied for driver_data");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(HCI_RUNNING, &(hdev->flags))) {
+ BT_ERR(NAME "Device already opened!");
+ return -EBUSY;
+ }
+
+ pf_data = dev_get_platdata(info->acl);
+ err = pf_data->open(pf_data);
+ if (err) {
+ BT_ERR("Couldn't open BT ACL channel (%d)", err);
+ goto handle_error;
+ }
+
+ pf_data = dev_get_platdata(info->cmd);
+ err = pf_data->open(pf_data);
+ if (err) {
+ BT_ERR("Couldn't open BT CMD channel (%d)", err);
+ goto handle_error;
+ }
+
+ pf_data = dev_get_platdata(info->evt);
+ err = pf_data->open(pf_data);
+ if (err) {
+ BT_ERR("Couldn't open BT EVT channel (%d)", err);
+ goto handle_error;
+ }
+
+ if (info->reset_state == RESET_ACTIVATED) {
+ BT_DBG("New reset_state: RESET_IDLE");
+ info->reset_state = RESET_IDLE;
+ }
+
+ /* First enable the BT core */
+ en_info.enable = true;
+ en_info.get_cmd = get_bt_enable_cmd;
+ en_info.name = "VS BT Enable (true)";
+ en_info.success = ENABLE_BT_ENABLED;
+ en_info.awaiting_cc = ENABLE_WAITING_BT_ENABLED_CC;
+ en_info.failed = ENABLE_BT_DISABLED;
+
+ err = send_enable_cmd(info, &en_info);
+ if (err) {
+ BT_ERR("Couldn't enable BT core (%d)", err);
+ goto handle_error;
+ }
+
+ return 0;
+
+handle_error:
+ close_bt_users(info);
+ clear_bit(HCI_RUNNING, &(hdev->flags));
+ return err;
+
+}
+
+/**
+ * btcg2900_close() - Close HCI interface.
+ * @hdev: HCI device being closed.
+ *
+ * BlueZ callback function for closing HCI interface.
+ * It flushes the interface first.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if NULL pointer is supplied.
+ * -EOPNOTSUPP if supplied packet type is not supported.
+ * -EBUSY if device is not opened.
+ */
+static int btcg2900_close(struct hci_dev *hdev)
+{
+ struct btcg2900_info *info = NULL;
+ int err;
+ struct enable_info en_info;
+
+ BT_DBG("btcg2900_close");
+
+ if (!hdev) {
+ BT_ERR(NAME "NULL supplied for hdev");
+ return -EINVAL;
+ }
+
+ info = (struct btcg2900_info *)hdev->driver_data;
+ if (!info) {
+ BT_ERR(NAME "NULL supplied for driver_data");
+ return -EINVAL;
+ }
+
+ if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) {
+ BT_ERR(NAME "Device already closed!");
+ return -EBUSY;
+ }
+
+ /* Do not do this if there is an reset ongoing */
+ if (info->reset_state == RESET_ACTIVATED)
+ goto remove_users;
+
+ /* Now disable the BT core */
+ en_info.enable = false;
+ en_info.get_cmd = get_bt_enable_cmd;
+ en_info.name = "VS BT Enable (false)";
+ en_info.success = ENABLE_BT_DISABLED;
+ en_info.awaiting_cc = ENABLE_WAITING_BT_DISABLED_CC;
+ en_info.failed = ENABLE_BT_ENABLED;
+
+ err = send_enable_cmd(info, &en_info);
+ if (err)
+ BT_ERR("Couldn't disable BT core (%d)", err);
+
+remove_users:
+ /* Finally deregister all users and free allocated data */
+ close_bt_users(info);
+ return 0;
+}
+
+/**
+ * btcg2900_send() - Send packet to device.
+ * @skb: sk buffer to be sent.
+ *
+ * BlueZ callback function for sending sk buffer.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if NULL pointer is supplied.
+ * -EOPNOTSUPP if supplied packet type is not supported.
+ * Error codes from cg2900_write.
+ */
+static int btcg2900_send(struct sk_buff *skb)
+{
+ struct hci_dev *hdev;
+ struct btcg2900_info *info;
+ struct cg2900_user_data *pf_data;
+ int err = 0;
+
+ if (!skb) {
+ BT_ERR(NAME "NULL supplied for skb");
+ return -EINVAL;
+ }
+
+ hdev = (struct hci_dev *)(skb->dev);
+ if (!hdev) {
+ BT_ERR(NAME "NULL supplied for hdev");
+ return -EINVAL;
+ }
+
+ info = (struct btcg2900_info *)hdev->driver_data;
+ if (!info) {
+ BT_ERR(NAME "NULL supplied for info");
+ return -EINVAL;
+ }
+
+ /* Update BlueZ stats */
+ hdev->stat.byte_tx += skb->len;
+
+ BT_DBG("Data transmit %d bytes", skb->len);
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ BT_DBG("Sending HCI_COMMAND_PKT");
+ pf_data = dev_get_platdata(info->cmd);
+ err = pf_data->write(pf_data, skb);
+ hdev->stat.cmd_tx++;
+ break;
+ case HCI_ACLDATA_PKT:
+ BT_DBG("Sending HCI_ACLDATA_PKT");
+ pf_data = dev_get_platdata(info->acl);
+ err = pf_data->write(pf_data, skb);
+ hdev->stat.acl_tx++;
+ break;
+ default:
+ BT_ERR(NAME "Trying to transmit unsupported packet type"
+ " (0x%.2X)", bt_cb(skb)->pkt_type);
+ err = -EOPNOTSUPP;
+ break;
+ };
+
+ return err;
+}
+
+/**
+ * btcg2900_destruct() - Destruct HCI interface.
+ * @hdev: HCI device being destructed.
+ */
+static void btcg2900_destruct(struct hci_dev *hdev)
+{
+ struct btcg2900_info *info;
+
+ BT_DBG("btcg2900_destruct");
+
+ info = hdev->driver_data;
+ if (!info) {
+ BT_ERR(NAME "NULL supplied for info");
+ return;
+ }
+
+ /*
+ * When destruct is called it means that the Bluetooth stack is done
+ * with the HCI device and we can now free it.
+ * Normally we do this only when removing the whole module through
+ * btcg2900_remove(), but when being reset we free the device here and
+ * we then set the reset state so that the reset handler can allocate a
+ * new HCI device and then register it to the Bluetooth stack.
+ */
+ if (info->reset_state == RESET_ACTIVATED) {
+ if (info->hdev) {
+ hci_free_dev(info->hdev);
+ info->hdev = NULL;
+ }
+ BT_DBG("New reset_state: RESET_UNREGISTERED");
+ info->reset_state = RESET_UNREGISTERED;
+ wake_up_all(&hci_wait_queue);
+ }
+}
+
+/**
+ * get_info() - Return info structure for this device.
+ * @dev: Current device.
+ *
+ * Returns:
+ * Pointer to info struct if there is no error.
+ * ERR_PTR(-ENOMEM) if allocation fails.
+ */
+static struct btcg2900_info *get_info(struct device *dev)
+{
+ struct list_head *cursor;
+ struct btcg2900_info *tmp;
+ struct btcg2900_info *info = NULL;
+
+ /* Find the info structure */
+ list_for_each(cursor, &btcg2900_devices) {
+ tmp = list_entry(cursor, struct btcg2900_info, list);
+ if (tmp->parent == dev->parent) {
+ info = tmp;
+ break;
+ }
+ }
+
+ if (info)
+ return info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ BT_ERR("Could not allocate info struct");
+ return ERR_PTR(-ENOMEM);
+ }
+ info->parent = dev->parent;
+ list_add_tail(&info->list, &btcg2900_devices);
+ BT_DBG("CG2900 device added");
+ return info;
+}
+
+/**
+ * device_removed() - Remove device from list if there are no channels left.
+ * @info: BTCG2900 info structure.
+ */
+static void device_removed(struct btcg2900_info *info)
+{
+ struct list_head *cursor;
+ struct btcg2900_info *tmp;
+
+ if (info->acl || info->cmd || info->evt)
+ /* There are still devices active */
+ return;
+
+ /* Find the info structure and delete it */
+ list_for_each(cursor, &btcg2900_devices) {
+ tmp = list_entry(cursor, struct btcg2900_info, list);
+ if (tmp == info) {
+ list_del(cursor);
+ break;
+ }
+ }
+ kfree(info);
+ BT_DBG("CG2900 device removed");
+}
+
+/**
+ * register_bluetooth() - Initialize module.
+ *
+ * Alloc, init, and register HCI device to BlueZ.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if allocation fails.
+ * Error codes from hci_register_dev.
+ */
+static int register_bluetooth(struct btcg2900_info *info)
+{
+ int err;
+ struct cg2900_user_data *pf_data;
+
+ /* Check if all channels have been probed */
+ if (!info->acl || !info->cmd || !info->evt)
+ return 0;
+
+ pf_data = dev_get_platdata(info->cmd);
+
+ info->hdev = hci_alloc_dev();
+ if (!info->hdev) {
+ BT_ERR("Could not allocate mem for CG2900 BT driver");
+ return -ENOMEM;
+ }
+
+ SET_HCIDEV_DEV(info->hdev, info->parent);
+ info->hdev->bus = pf_data->channel_data.bt_bus;
+ info->hdev->driver_data = info;
+ info->hdev->owner = THIS_MODULE;
+ info->hdev->open = btcg2900_open;
+ info->hdev->close = btcg2900_close;
+ info->hdev->send = btcg2900_send;
+ info->hdev->destruct = btcg2900_destruct;
+
+ err = hci_register_dev(info->hdev);
+ if (err) {
+ BT_ERR("Can not register BTCG2900 HCI device (%d)", err);
+ hci_free_dev(info->hdev);
+ info->hdev = NULL;
+ }
+
+ BT_INFO("CG2900 registered");
+
+ BT_DBG("New enable_state: ENABLE_IDLE");
+ info->enable_state = ENABLE_IDLE;
+ BT_DBG("New reset_state: RESET_IDLE");
+ info->reset_state = RESET_IDLE;
+
+ return err;
+}
+
+/**
+ * probe_common() - Initialize channel and register to BT stack.
+ * @dev: Current device.
+ * @info: BTCG2900 info structure.
+ * @hci_data_type: Data type of this channel, e.g. ACL.
+ *
+ * Allocate and initialize private data. Register to Bluetooth stack.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if allocation fails.
+ * Error codes from register_bluetooth.
+ */
+static int probe_common(struct platform_device *pdev,
+ struct btcg2900_info *info,
+ u8 hci_data_type)
+{
+ int err;
+ struct cg2900_user_data *pf_data;
+ struct dev_info *dev_info;
+ struct device *dev = &pdev->dev;
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+ if (!dev_info) {
+ BT_ERR("Could not allocate dev_info");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, info);
+
+ pf_data = dev_get_platdata(dev);
+ pf_data->dev = dev;
+ pf_data->read_cb = hci_read_cb;
+ pf_data->reset_cb = hci_reset_cb;
+
+ /* Init and register hdev */
+ err = register_bluetooth(info);
+ if (err) {
+ BT_ERR("HCI Device registration error (%d)", err);
+ kfree(dev_info);
+ return err;
+ }
+ dev_info->hci_data_type = hci_data_type;
+ cg2900_set_usr(pf_data, dev_info);
+
+ return 0;
+}
+
+/**
+ * btcg2900_cmd_probe() - Initialize command channel.
+ * @pdev: Platform device.
+ *
+ * Allocate and initialize private data.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from get_info and probe_common.
+ */
+static int __devinit btcg2900_cmd_probe(struct platform_device *pdev)
+{
+ int err;
+ struct btcg2900_info *info;
+
+ BT_DBG("Starting CG2900 Command channel");
+
+ info = get_info(&pdev->dev);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->cmd = &pdev->dev;
+
+ err = probe_common(pdev, info, HCI_COMMAND_PKT);
+ if (err) {
+ BT_ERR("Failed to initialize channel");
+ info->cmd = NULL;
+ device_removed(info);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * btcg2900_acl_probe() - Initialize command channel.
+ * @pdev: Platform device.
+ *
+ * Allocate and initialize private data.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from get_info and probe_common.
+ */
+static int __devinit btcg2900_acl_probe(struct platform_device *pdev)
+{
+ int err;
+ struct btcg2900_info *info;
+
+ BT_DBG("Starting CG2900 ACL channel");
+
+ info = get_info(&pdev->dev);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->acl = &pdev->dev;
+
+ err = probe_common(pdev, info, HCI_ACLDATA_PKT);
+ if (err) {
+ BT_ERR("Failed to initialize channel");
+ info->acl = NULL;
+ device_removed(info);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * btcg2900_evt_probe() - Initialize event channel.
+ * @pdev: Platform device.
+ *
+ * Allocate and initialize private data.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from get_info and probe_common.
+ */
+static int __devinit btcg2900_evt_probe(struct platform_device *pdev)
+{
+ int err;
+ struct btcg2900_info *info;
+
+ BT_DBG("Starting CG2900 Event channel");
+
+ info = get_info(&pdev->dev);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->evt = &pdev->dev;
+
+ err = probe_common(pdev, info, HCI_EVENT_PKT);
+ if (err) {
+ BT_ERR("Failed to initialize channel");
+ info->evt = NULL;
+ device_removed(info);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * remove_common() - Remove channel.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from hci_unregister_dev.
+ */
+static int remove_common(struct platform_device *pdev,
+ struct btcg2900_info *info)
+{
+ int err = 0;
+ struct cg2900_user_data *pf_data;
+ struct dev_info *dev_info;
+
+ pf_data = dev_get_platdata(&pdev->dev);
+ dev_info = cg2900_get_usr(pf_data);
+
+ kfree(dev_info);
+ cg2900_set_usr(pf_data, NULL);
+
+ if (!info->hdev)
+ goto finished;
+
+ BT_INFO("Unregistering CG2900");
+ info->hdev->driver_data = NULL;
+ hci_unregister_dev(info->hdev);
+ hci_free_dev(info->hdev);
+ info->hdev = NULL;
+
+finished:
+ device_removed(info);
+ return err;
+}
+
+/**
+ * btcg2900_cmd_remove() - Remove command channel.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from remove_common.
+ */
+static int __devexit btcg2900_cmd_remove(struct platform_device *pdev)
+{
+ struct btcg2900_info *info;
+
+ BT_DBG("Removing CG2900 Command channel");
+
+ info = dev_get_drvdata(&pdev->dev);
+ info->cmd = NULL;
+ return remove_common(pdev, info);
+}
+
+/**
+ * btcg2900_acl_remove() - Remove ACL channel.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from remove_common.
+ */
+static int __devexit btcg2900_acl_remove(struct platform_device *pdev)
+{
+ struct btcg2900_info *info;
+
+ BT_DBG("Removing CG2900 ACL channel");
+
+ info = dev_get_drvdata(&pdev->dev);
+ info->acl = NULL;
+ return remove_common(pdev, info);
+}
+
+/**
+ * btcg2900_evt_remove() - Remove event channel.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from remove_common.
+ */
+static int __devexit btcg2900_evt_remove(struct platform_device *pdev)
+{
+ struct btcg2900_info *info;
+
+ BT_DBG("Removing CG2900 Event channel");
+
+ info = dev_get_drvdata(&pdev->dev);
+ info->evt = NULL;
+ return remove_common(pdev, info);
+}
+
+static struct platform_driver btcg2900_cmd_driver = {
+ .driver = {
+ .name = "cg2900-btcmd",
+ .owner = THIS_MODULE,
+ },
+ .probe = btcg2900_cmd_probe,
+ .remove = __devexit_p(btcg2900_cmd_remove),
+};
+
+static struct platform_driver btcg2900_acl_driver = {
+ .driver = {
+ .name = "cg2900-btacl",
+ .owner = THIS_MODULE,
+ },
+ .probe = btcg2900_acl_probe,
+ .remove = __devexit_p(btcg2900_acl_remove),
+};
+
+static struct platform_driver btcg2900_evt_driver = {
+ .driver = {
+ .name = "cg2900-btevt",
+ .owner = THIS_MODULE,
+ },
+ .probe = btcg2900_evt_probe,
+ .remove = __devexit_p(btcg2900_evt_remove),
+};
+
+/**
+ * btcg2900_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init btcg2900_init(void)
+{
+ int err;
+
+ BT_DBG("btcg2900_init");
+
+ err = platform_driver_register(&btcg2900_cmd_driver);
+ if (err) {
+ BT_ERR("Failed to register cmd (%d)", err);
+ return err;
+ }
+ err = platform_driver_register(&btcg2900_acl_driver);
+ if (err) {
+ BT_ERR("Failed to register acl (%d)", err);
+ return err;
+ }
+ err = platform_driver_register(&btcg2900_evt_driver);
+ if (err) {
+ BT_ERR("Failed to register evt (%d)", err);
+ return err;
+ }
+ return err;
+}
+
+/**
+ * btcg2900_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit btcg2900_exit(void)
+{
+ BT_DBG("btcg2900_exit");
+ platform_driver_unregister(&btcg2900_cmd_driver);
+ platform_driver_unregister(&btcg2900_acl_driver);
+ platform_driver_unregister(&btcg2900_evt_driver);
+}
+
+module_init(btcg2900_init);
+module_exit(btcg2900_exit);
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_AUTHOR("Henrik Possung ST-Ericsson");
+MODULE_AUTHOR("Josef Kindberg ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux Bluetooth HCI H:4 Driver for ST-Ericsson controller");
diff --git a/drivers/staging/cg2900/bluetooth/cg2900_uart.c b/drivers/staging/cg2900/bluetooth/cg2900_uart.c
new file mode 100644
index 00000000000..6cde9d75c21
--- /dev/null
+++ b/drivers/staging/cg2900/bluetooth/cg2900_uart.c
@@ -0,0 +1,2169 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * Lukasz Rymanowski (lukasz.rymanowski@tieto.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth UART Driver for ST-Ericsson CG2900 connectivity controller.
+ */
+#define NAME "cg2900_uart"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_qos.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include "cg2900.h"
+
+#include "hci_uart.h"
+
+#define MAIN_DEV (uart_info->dev)
+
+/* Workqueues' names */
+#define UART_WQ_NAME "cg2900_uart_wq"
+#define UART_NAME "cg2900_uart"
+
+/*
+ * A BT command complete event without any parameters is the defined size plus
+ * 1 byte extra for the status field which is always present in a
+ * command complete event.
+ */
+#define HCI_BT_CMD_COMPLETE_LEN (sizeof(struct hci_ev_cmd_complete) + 1)
+
+/* Timers used in milliseconds */
+#define UART_TX_TIMEOUT 100
+#define UART_RX_TIMEOUT 20
+#define UART_RESP_TIMEOUT 1000
+#define UART_RESUME_TIMEOUT 20
+
+/* Max latency in microseconds for PM QoS to achieve max throughput */
+#define CG2900_PM_QOS_LATENCY 30
+
+/* Number of bytes to reserve at start of sk_buffer when receiving packet */
+#define RX_SKB_RESERVE 8
+/* Max size of received packet (not including reserved bytes) */
+#define RX_SKB_MAX_SIZE 1024
+
+/* Size of the header in the different packets */
+#define HCI_BT_EVT_HDR_SIZE 2
+#define HCI_BT_ACL_HDR_SIZE 4
+#define HCI_FM_RADIO_HDR_SIZE 1
+#define HCI_GNSS_HDR_SIZE 3
+
+/* Position of length field in the different packets */
+#define HCI_EVT_LEN_POS 2
+#define HCI_ACL_LEN_POS 3
+#define FM_RADIO_LEN_POS 1
+#define GNSS_LEN_POS 2
+
+/* Baud rate defines */
+#define ZERO_BAUD_RATE 0
+#define DEFAULT_BAUD_RATE 115200
+#define HIGH_BAUD_RATE 3000000
+
+#define BT_SIZE_OF_HDR (sizeof(__le16) + sizeof(__u8))
+#define BT_PARAM_LEN(__pkt_len) (__pkt_len - BT_SIZE_OF_HDR)
+
+/* Standardized Bluetooth H:4 channels */
+#define HCI_BT_CMD_H4_CHANNEL 0x01
+#define HCI_BT_ACL_H4_CHANNEL 0x02
+#define HCI_BT_SCO_H4_CHANNEL 0x03
+#define HCI_BT_EVT_H4_CHANNEL 0x04
+
+#define BT_BDADDR_SIZE 6
+
+/* Reserve 1 byte for the HCI H:4 header */
+#define HCI_H4_SIZE 1
+#define CG2900_SKB_RESERVE HCI_H4_SIZE
+
+/* Default H4 channels which may change depending on connected controller */
+#define HCI_FM_RADIO_H4_CHANNEL 0x08
+#define HCI_GNSS_H4_CHANNEL 0x09
+
+/* Bluetooth error codes */
+#define HCI_BT_ERROR_NO_ERROR 0x00
+
+/* Bytes in the command Hci_Cmd_ST_Set_Uart_Baud_Rate */
+#define CG2900_BAUD_RATE_57600 0x03
+#define CG2900_BAUD_RATE_115200 0x02
+#define CG2900_BAUD_RATE_230400 0x01
+#define CG2900_BAUD_RATE_460800 0x00
+#define CG2900_BAUD_RATE_921600 0x20
+#define CG2900_BAUD_RATE_2000000 0x25
+#define CG2900_BAUD_RATE_3000000 0x27
+#define CG2900_BAUD_RATE_3250000 0x28
+#define CG2900_BAUD_RATE_4000000 0x2B
+
+/* GNSS */
+struct gnss_hci_hdr {
+ __u8 op_code;
+ __le16 plen;
+} __packed;
+
+/* FM legacy command packet */
+struct fm_leg_cmd {
+ __u8 length;
+ __u8 opcode;
+ __u8 read_write;
+ __u8 fm_function;
+ union { /* Payload varies with function */
+ __le16 irqmask;
+ struct fm_leg_fm_cmd {
+ __le16 head;
+ __le16 data[];
+ } fm_cmd;
+ };
+} __packed;
+
+/* FM legacy command complete packet */
+struct fm_leg_cmd_cmpl {
+ __u8 param_length;
+ __u8 status;
+ __u8 opcode;
+ __u8 read_write;
+ __u8 cmd_status;
+ __u8 fm_function;
+ __le16 response_head;
+ __le16 data[];
+} __packed;
+
+/* FM legacy interrupt packet, PG2 style */
+struct fm_leg_irq_v2 {
+ __u8 param_length;
+ __u8 status;
+ __u8 opcode;
+ __u8 event_type;
+ __u8 event_id;
+ __le16 irq;
+} __packed;
+
+/* FM legacy interrupt packet, PG1 style */
+struct fm_leg_irq_v1 {
+ __u8 param_length;
+ __u8 opcode;
+ __u8 event_id;
+ __le16 irq;
+} __packed;
+
+union fm_leg_evt_or_irq {
+ __u8 param_length;
+ struct fm_leg_cmd_cmpl evt;
+ struct fm_leg_irq_v2 irq_v2;
+ struct fm_leg_irq_v1 irq_v1;
+} __packed;
+
+/* BT VS SetBaudRate command */
+#define CG2900_BT_OP_VS_SET_BAUD_RATE 0xFC09
+struct bt_vs_set_baud_rate_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 baud_rate;
+} __packed;
+
+/**
+ * enum uart_rx_state - UART RX-state for UART.
+ * @W4_PACKET_TYPE: Waiting for packet type.
+ * @W4_EVENT_HDR: Waiting for BT event header.
+ * @W4_ACL_HDR: Waiting for BT ACL header.
+ * @W4_FM_RADIO_HDR: Waiting for FM header.
+ * @W4_GNSS_HDR: Waiting for GNSS header.
+ * @W4_DATA: Waiting for data in rest of the packet (after header).
+ */
+enum uart_rx_state {
+ W4_PACKET_TYPE,
+ W4_EVENT_HDR,
+ W4_ACL_HDR,
+ W4_FM_RADIO_HDR,
+ W4_GNSS_HDR,
+ W4_DATA
+};
+
+/**
+ * enum sleep_state - Sleep-state for UART.
+ * @CHIP_AWAKE: Chip is awake.
+ * @CHIP_FALLING_ASLEEP: Chip is falling asleep.
+ * @CHIP_ASLEEP: Chip is asleep.
+ * @CHIP_SUSPENDED: Chip in suspend state.
+ * @CHIP_RESUMING: Chip is going back from suspend state.
+ * @CHIP_POWERED_DOWN: Chip is off.
+ */
+enum sleep_state {
+ CHIP_AWAKE,
+ CHIP_FALLING_ASLEEP,
+ CHIP_ASLEEP,
+ CHIP_SUSPENDED,
+ CHIP_RESUMING,
+ CHIP_POWERED_DOWN
+};
+
+/**
+ * enum baud_rate_change_state - Baud rate-state for UART.
+ * @BAUD_IDLE: No baud rate change is ongoing.
+ * @BAUD_SENDING_RESET: HCI reset has been sent. Waiting for command complete
+ * event.
+ * @BAUD_START: Set baud rate cmd scheduled for sending.
+ * @BAUD_SENDING: Set baud rate cmd sending in progress.
+ * @BAUD_WAITING: Set baud rate cmd sent, waiting for command complete
+ * event.
+ * @BAUD_SUCCESS: Baud rate change has succeeded.
+ * @BAUD_FAIL: Baud rate change has failed.
+ */
+enum baud_rate_change_state {
+ BAUD_IDLE,
+ BAUD_SENDING_RESET,
+ BAUD_START,
+ BAUD_SENDING,
+ BAUD_WAITING,
+ BAUD_SUCCESS,
+ BAUD_FAIL
+};
+
+/**
+ * struct uart_work_struct - Work structure for UART module.
+ * @work: Work structure.
+ * @data: Pointer to private data.
+ *
+ * This structure is used to pack work for work queue.
+ */
+struct uart_work_struct {
+ struct work_struct work;
+ void *data;
+};
+
+/**
+ * struct uart_delayed_work_struct - Work structure for UART module.
+ * @delayed_work: Work structure.
+ * @data: Pointer to private data.
+ *
+ * This structure is used to pack work for work queue.
+ */
+struct uart_delayed_work_struct {
+ struct delayed_work work;
+ void *data;
+};
+
+/**
+ * struct uart_info - Main UART info structure.
+ * @rx_state: Current RX state.
+ * @rx_count: Number of bytes left to receive.
+ * @rx_skb: SK_buffer to store the received data into.
+ * @tx_queue: TX queue for sending data to chip.
+ * @rx_skb_lock Spin lock to protect rx_skb.
+ * @hu: Hci uart structure.
+ * @wq: UART work queue.
+ * @baud_rate_state: UART baud rate change state.
+ * @baud_rate: Current baud rate setting.
+ * @sleep_state: UART sleep state.
+ * @sleep_work: Delayed sleep work struct.
+ * @wakeup_work: Wake-up work struct.
+ * @restart_sleep_work: Reschedule sleep_work and wake-up work struct.
+ * @sleep_state_lock: Used to protect chip state.
+ * @sleep_allowed: Indicates if tty has functions needed for sleep mode.
+ * @tx_in_progress: Indicates data sending in progress.
+ * @rx_in_progress: Indicates data receiving in progress.
+ * @transmission_lock: Spin_lock to protect tx/rx_in_progress.
+ * @regulator: Regulator.
+ * @regulator_enabled: True if regulator is enabled.
+ * @dev: Pointer to CG2900 uart device.
+ * @chip_dev: Chip device for current UART transport.
+ * @cts_irq: CTS interrupt for this UART.
+ * @cts_gpio: CTS GPIO for this UART.
+ * @suspend_blocked: True if suspend operation is blocked in the framework.
+ * @pm_qos_latency: PM QoS structure.
+ */
+struct uart_info {
+ enum uart_rx_state rx_state;
+ unsigned long rx_count;
+ struct sk_buff *rx_skb;
+ struct sk_buff_head tx_queue;
+ spinlock_t rx_skb_lock;
+
+ struct hci_uart *hu;
+
+ struct workqueue_struct *wq;
+ enum baud_rate_change_state baud_rate_state;
+ int baud_rate;
+ enum sleep_state sleep_state;
+ struct uart_delayed_work_struct sleep_work;
+ struct uart_work_struct wakeup_work;
+ struct uart_work_struct restart_sleep_work;
+ struct mutex sleep_state_lock;
+ bool sleep_allowed;
+ bool tx_in_progress;
+ bool rx_in_progress;
+ spinlock_t transmission_lock;
+ struct regulator *regulator;
+ bool regulator_enabled;
+ struct device *dev;
+ struct cg2900_chip_dev chip_dev;
+ int cts_irq;
+ int cts_gpio;
+ bool suspend_blocked;
+ struct pm_qos_request pm_qos_latency;
+};
+
+/* Module parameters */
+static int uart_default_baud = DEFAULT_BAUD_RATE;
+static int uart_high_baud = HIGH_BAUD_RATE;
+static int uart_debug;
+
+static DECLARE_WAIT_QUEUE_HEAD(uart_wait_queue);
+
+static void wake_up_chip(struct uart_info *uart_info);
+
+/**
+ * is_chip_flow_off() - Check if chip has set flow off.
+ * @tty: Pointer to tty.
+ *
+ * Returns:
+ * true - chip flows off.
+ * false - chip flows on.
+ */
+static bool is_chip_flow_off(struct uart_info *uart_info)
+{
+ int lines = 0;
+
+ if (uart_info->hu)
+ lines = hci_uart_tiocmget(uart_info->hu);
+
+ if (lines & TIOCM_CTS)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * create_work_item() - Create work item and add it to the work queue.
+ * @uart_info: Main Uart structure.
+ * @work_func: Work function.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EBUSY if not possible to queue work.
+ * -ENOMEM if allocation fails.
+ */
+static int create_work_item(struct uart_info *uart_info,
+ work_func_t work_func)
+{
+ struct uart_work_struct *new_work;
+ int res;
+
+ new_work = kmalloc(sizeof(*new_work), GFP_ATOMIC);
+ if (!new_work) {
+ dev_err(MAIN_DEV,
+ "Failed to alloc memory for uart_work_struct\n");
+ return -ENOMEM;
+ }
+
+ new_work->data = uart_info;
+ INIT_WORK(&new_work->work, work_func);
+
+ res = queue_work(uart_info->wq, &new_work->work);
+ if (!res) {
+ dev_err(MAIN_DEV,
+ "Failed to queue work_struct because it's already "
+ "in the queue\n");
+ kfree(new_work);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * handle_cts_irq() - Called to handle CTS interrupt in work context.
+ * @work: work which needs to be done.
+ *
+ * The handle_cts_irq() function is a work handler called if interrupt on CTS
+ * occurred. It wakes up the transport.
+ */
+static void handle_cts_irq(struct work_struct *work)
+{
+ struct uart_work_struct *current_work =
+ container_of(work, struct uart_work_struct, work);
+ struct uart_info *uart_info = (struct uart_info *)current_work->data;
+
+ pm_qos_update_request(&uart_info->pm_qos_latency,
+ CG2900_PM_QOS_LATENCY);
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+ /* Mark that there is an ongoing transfer. */
+ uart_info->rx_in_progress = true;
+ spin_unlock_bh(&(uart_info->transmission_lock));
+
+ /* Cancel pending sleep work if there is any. */
+ cancel_delayed_work_sync(&uart_info->sleep_work.work);
+
+ mutex_lock(&(uart_info->sleep_state_lock));
+
+ if (uart_info->sleep_state == CHIP_SUSPENDED) {
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_RESUMING\n");
+ uart_info->sleep_state = CHIP_RESUMING;
+ mutex_unlock(&(uart_info->sleep_state_lock));
+ } else {
+ mutex_unlock(&(uart_info->sleep_state_lock));
+ wake_up_chip(uart_info);
+ }
+
+ kfree(current_work);
+}
+
+/**
+ * cts_interrupt() - Called to handle CTS interrupt.
+ * @irq: Interrupt that occurred.
+ * @dev_id: Device ID where interrupt occurred.
+ *
+ * The cts_interrupt() function is called if interrupt on CTS occurred.
+ * It disables the interrupt and starts a new work thread to handle
+ * the interrupt.
+ */
+static irqreturn_t cts_interrupt(int irq, void *dev_id)
+{
+ struct uart_info *uart_info = dev_get_drvdata(dev_id);
+#ifdef CONFIG_PM
+ disable_irq_wake(irq);
+#endif
+ disable_irq_nosync(irq);
+
+ /* Create work and leave IRQ context. */
+ (void)create_work_item(uart_info, handle_cts_irq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * set_cts_irq() - Enable interrupt on CTS.
+ * @uart_info: Main Uart structure.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from request_irq and disable_uart.
+ */
+static int set_cts_irq(struct uart_info *uart_info)
+{
+ int err;
+ int cts_val = 0;
+
+ /* Set IRQ on CTS. */
+ err = request_irq(uart_info->cts_irq,
+ cts_interrupt,
+ IRQF_TRIGGER_FALLING,
+ UART_NAME,
+ uart_info->dev);
+ if (err) {
+ dev_err(MAIN_DEV, "Could not request CTS IRQ (%d)\n", err);
+ return err;
+ }
+
+ /*
+ * It may happen that there was already an interrupt on CTS just before
+ * the enable_irq() call above. If the CTS line is low now it means that
+ * it's happened, so disable the CTS interrupt and return -ECANCELED.
+ */
+ cts_val = gpio_get_value(uart_info->cts_gpio);
+ if (!cts_val) {
+ dev_dbg(MAIN_DEV, "Missed interrupt, going back to "
+ "awake state\n");
+ free_irq(uart_info->cts_irq, uart_info->dev);
+ return -ECANCELED;
+ }
+
+#ifdef CONFIG_PM
+ enable_irq_wake(uart_info->cts_irq);
+#endif
+ return 0;
+}
+
+/**
+ * disable_uart_pins() - Disable the UART pins.
+ * @uart_info: Main Uart structure.
+ */
+static void disable_uart_pins(struct uart_info *uart_info)
+{
+ struct cg2900_platform_data *pf_data;
+
+ pf_data = dev_get_platdata(uart_info->dev);
+
+ if (pf_data->uart.disable_uart) {
+ int err = pf_data->uart.disable_uart(&uart_info->chip_dev);
+ if (err)
+ dev_err(MAIN_DEV,
+ "Unable to disable UART Hardware (%d)\n", err);
+ }
+}
+
+/**
+ * enable_uart_pins() - Enable the UART pins.
+ * @uart_info: Main Uart structure.
+ */
+static void enable_uart_pins(struct uart_info *uart_info)
+{
+ struct cg2900_platform_data *pf_data;
+
+ pf_data = dev_get_platdata(uart_info->dev);
+
+ if (pf_data->uart.enable_uart) {
+ int err = pf_data->uart.enable_uart(&uart_info->chip_dev);
+ if (err)
+ dev_err(MAIN_DEV,
+ "Unable to enable UART Hardware (%d)\n", err);
+ }
+}
+
+/**
+ * unset_cts_irq() - Disable interrupt on CTS.
+ * @uart_info: Main Uart structure.
+ */
+static void unset_cts_irq(struct uart_info *uart_info)
+{
+ /* Free CTS interrupt */
+ free_irq(uart_info->cts_irq, uart_info->dev);
+}
+
+/**
+ * get_sleep_timeout() - Get sleep timeout.
+ * @uart_info: Main Uart structure.
+ *
+ * Check all conditions for sleep and return sleep timeout.
+ * Return:
+ * 0: sleep not allowed.
+ * other: Timeout value in ms.
+ */
+static unsigned long get_sleep_timeout(struct uart_info *uart_info)
+{
+ unsigned long timeout_jiffies = cg2900_get_sleep_timeout();
+
+ if (timeout_jiffies &&
+ uart_info->hu &&
+ uart_info->hu->fd &&
+ uart_info->sleep_allowed)
+ return timeout_jiffies;
+
+ return 0;
+}
+
+/**
+ * work_wake_up_chip() - Called to wake up of the transport in work context.
+ * @work: work which needs to be done.
+ */
+static void work_wake_up_chip(struct work_struct *work)
+{
+ struct uart_work_struct *current_work =
+ container_of(work, struct uart_work_struct, work);
+ struct uart_info *uart_info = (struct uart_info *)current_work->data;
+
+ wake_up_chip(uart_info);
+}
+
+/**
+ * wake_up_chip() - Wakes up the chip and transport.
+ * @work: pointer to a work struct if the function was called that way.
+ *
+ * Depending on the current sleep state it may wake up the transport.
+ */
+static void wake_up_chip(struct uart_info *uart_info)
+{
+ unsigned long timeout_jiffies = get_sleep_timeout(uart_info);
+
+ /* Resuming state is special. Need to get back chip to awake state. */
+ if (!timeout_jiffies && uart_info->sleep_state != CHIP_RESUMING)
+ return;
+
+ if (!uart_info->hu) {
+ dev_err(MAIN_DEV, "wake_up_chip: UART not open\n");
+ return;
+ }
+
+ mutex_lock(&(uart_info->sleep_state_lock));
+
+ /*
+ * If chip is powered down we cannot wake it up here. It has to be woken
+ * up through a call to uart_set_chip_power()
+ */
+ if (CHIP_POWERED_DOWN == uart_info->sleep_state)
+ goto finished;
+
+ if (!uart_info->suspend_blocked) {
+ uart_info->suspend_blocked = true;
+ pm_qos_update_request(&uart_info->pm_qos_latency,
+ CG2900_PM_QOS_LATENCY);
+ }
+
+ /*
+ * This function indicates data is transmitted.
+ * Therefore see to that the chip is awake.
+ */
+ if (CHIP_AWAKE == uart_info->sleep_state)
+ goto finished;
+
+ if (CHIP_ASLEEP == uart_info->sleep_state ||
+ CHIP_RESUMING == uart_info->sleep_state) {
+ /* Wait before disabling IRQ */
+ schedule_timeout_killable(
+ msecs_to_jiffies(UART_RESUME_TIMEOUT));
+
+ /* Disable IRQ only when it was enabled. */
+ unset_cts_irq(uart_info);
+ (void)hci_uart_set_baudrate(uart_info->hu,
+ uart_info->baud_rate);
+
+ enable_uart_pins(uart_info);
+
+ /*
+ * Wait before flowing on. Otherwise UART might not be ready in
+ * time
+ */
+ schedule_timeout_killable(
+ msecs_to_jiffies(UART_RESUME_TIMEOUT));
+
+ /* Set FLOW on. */
+ hci_uart_flow_ctrl(uart_info->hu, FLOW_ON);
+ }
+
+ /* Unset BREAK. */
+ dev_dbg(MAIN_DEV, "wake_up_chip: Clear break\n");
+ hci_uart_set_break(uart_info->hu, BREAK_OFF);
+
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_AWAKE\n");
+ uart_info->sleep_state = CHIP_AWAKE;
+
+finished:
+ mutex_unlock(&(uart_info->sleep_state_lock));
+}
+
+/**
+ * set_chip_sleep_mode() - Put the chip and transport to sleep mode.
+ * @work: pointer to work_struct.
+ *
+ * The set_chip_sleep_mode() function is called if there are no ongoing data
+ * transmissions. It tries to put the chip in sleep mode.
+ *
+ */
+static void set_chip_sleep_mode(struct work_struct *work)
+{
+ int err = 0;
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct uart_delayed_work_struct *current_work = container_of(
+ delayed_work, struct uart_delayed_work_struct, work);
+ struct uart_info *uart_info = (struct uart_info *)current_work->data;
+ unsigned long timeout_jiffies = get_sleep_timeout(uart_info);
+ int chars_in_buffer;
+
+ if (!timeout_jiffies)
+ return;
+
+ if (!uart_info->hu) {
+ dev_err(MAIN_DEV, "set_chip_sleep_mode: UART not open\n");
+ return;
+ }
+
+ if (uart_info->tx_in_progress || uart_info->rx_in_progress) {
+ dev_dbg(MAIN_DEV, "Not going to sleep, TX/RX in progress\n");
+ return;
+ }
+
+ mutex_lock(&(uart_info->sleep_state_lock));
+
+ switch (uart_info->sleep_state) {
+ case CHIP_FALLING_ASLEEP:
+ if (!is_chip_flow_off(uart_info)) {
+ dev_dbg(MAIN_DEV, "Chip flow is on, it's not ready to"
+ "sleep yet\n");
+ goto schedule_sleep_work;
+ }
+
+ /* Flow OFF. */
+ hci_uart_flow_ctrl(uart_info->hu, FLOW_OFF);
+
+ disable_uart_pins(uart_info);
+
+ /*
+ * Set baud zero.
+ * This cause shut off UART clock as well.
+ */
+ (void)hci_uart_set_baudrate(uart_info->hu, ZERO_BAUD_RATE);
+ err = set_cts_irq(uart_info);
+ if (err < 0) {
+ enable_uart_pins(uart_info);
+ (void)hci_uart_set_baudrate(uart_info->hu,
+ uart_info->baud_rate);
+ hci_uart_flow_ctrl(uart_info->hu, FLOW_ON);
+ hci_uart_set_break(uart_info->hu, BREAK_OFF);
+
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_AWAKE\n");
+ uart_info->sleep_state = CHIP_AWAKE;
+
+ if (err == -ECANCELED)
+ goto finished;
+ else {
+ dev_err(MAIN_DEV, "Can not set interrupt on "
+ "CTS, err:%d\n", err);
+ goto error;
+ }
+ }
+
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_ASLEEP\n");
+ uart_info->sleep_state = CHIP_ASLEEP;
+ if (uart_info->suspend_blocked) {
+ uart_info->suspend_blocked = false;
+ pm_qos_update_request(&uart_info->pm_qos_latency,
+ PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE);
+ }
+ break;
+ case CHIP_AWAKE:
+ chars_in_buffer = hci_uart_chars_in_buffer(uart_info->hu);
+ if (chars_in_buffer) {
+ dev_dbg(MAIN_DEV, "sleep_timer_expired: "
+ "tx not finished, stay awake and "
+ "restart the sleep timer\n");
+ goto schedule_sleep_work;
+ }
+
+ dev_dbg(MAIN_DEV, "sleep_timer_expired: Set break\n");
+ hci_uart_set_break(uart_info->hu, BREAK_ON);
+
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_FALLING_ASLEEP\n");
+ uart_info->sleep_state = CHIP_FALLING_ASLEEP;
+ goto schedule_sleep_work;
+
+ case CHIP_POWERED_DOWN:
+ case CHIP_SUSPENDED:
+ case CHIP_ASLEEP: /* Fallthrough. */
+ default:
+ dev_dbg(MAIN_DEV,
+ "Chip sleeps, is suspended or powered down\n");
+ break;
+ }
+
+ mutex_unlock(&(uart_info->sleep_state_lock));
+
+ return;
+
+finished:
+ mutex_unlock(&(uart_info->sleep_state_lock));
+ return;
+schedule_sleep_work:
+ mutex_unlock(&(uart_info->sleep_state_lock));
+ if (timeout_jiffies)
+ queue_delayed_work(uart_info->wq, &uart_info->sleep_work.work,
+ timeout_jiffies);
+ return;
+error:
+ /* Disable sleep mode.*/
+ dev_err(MAIN_DEV, "Disable sleep mode\n");
+ uart_info->sleep_allowed = false;
+ mutex_unlock(&(uart_info->sleep_state_lock));
+}
+
+#ifdef CONFIG_PM
+/**
+ * cg2900_uart_suspend() - Called by Linux PM to put the device in a low power mode.
+ * @pdev: Pointer to platform device.
+ * @state: New state.
+ *
+ * In UART case, CG2900 driver does nothing on suspend.
+ *
+ * Returns:
+ * 0 - Success.
+ */
+static int cg2900_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ struct uart_info *uart_info = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&(uart_info->sleep_state_lock));
+
+ if (uart_info->sleep_state == CHIP_POWERED_DOWN)
+ goto finished;
+
+ if (uart_info->sleep_state != CHIP_ASLEEP) {
+ err = -EBUSY;
+ goto finished;
+ }
+
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_SUSPENDED\n");
+ uart_info->sleep_state = CHIP_SUSPENDED;
+
+finished:
+ mutex_unlock(&(uart_info->sleep_state_lock));
+ return err;
+}
+
+/**
+ * cg2900_uart_resume() - Called to bring a device back from a low power state.
+ * @pdev: Pointer to platform device.
+ *
+ * In UART case, CG2900 driver does nothing on resume.
+ *
+ * Returns:
+ * 0 - Success.
+ */
+static int cg2900_uart_resume(struct platform_device *pdev)
+{
+ struct uart_info *uart_info = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&(uart_info->sleep_state_lock));
+
+ if (uart_info->sleep_state == CHIP_RESUMING)
+ /* System resume because of trafic on UART. Lets wakeup.*/
+ (void)queue_work(uart_info->wq, &uart_info->wakeup_work.work);
+ else if (uart_info->sleep_state != CHIP_POWERED_DOWN) {
+ /* No need to wakeup chip. Go back to Asleep state.*/
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_ASLEEP\n");
+ uart_info->sleep_state = CHIP_ASLEEP;
+ }
+
+ mutex_unlock(&(uart_info->sleep_state_lock));
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * cg2900_enable_regulator() - Enable regulator.
+ * @uart_info: Main Uart structure.
+ *
+ * Returns:
+ * 0 - Success.
+ * Error from regulator_get, regulator_enable.
+ */
+static int cg2900_enable_regulator(struct uart_info *uart_info)
+{
+#ifdef CONFIG_REGULATOR
+ int err;
+
+ /* Get and enable regulator. */
+ uart_info->regulator = regulator_get(uart_info->dev, "gbf_1v8");
+ if (IS_ERR(uart_info->regulator)) {
+ dev_err(MAIN_DEV, "Not able to find regulator\n");
+ err = PTR_ERR(uart_info->regulator);
+ } else {
+ err = regulator_enable(uart_info->regulator);
+ if (err)
+ dev_err(MAIN_DEV, "Not able to enable regulator\n");
+ else
+ uart_info->regulator_enabled = true;
+ }
+ return err;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * cg2900_disable_regulator() - Disable regulator.
+ * @uart_info: Main Uart structure.
+ *
+ */
+static void cg2900_disable_regulator(struct uart_info *uart_info)
+{
+#ifdef CONFIG_REGULATOR
+ /* Disable and put regulator. */
+ if (uart_info->regulator && uart_info->regulator_enabled) {
+ regulator_disable(uart_info->regulator);
+ uart_info->regulator_enabled = false;
+ }
+ regulator_put(uart_info->regulator);
+ uart_info->regulator = NULL;
+#endif
+}
+
+/**
+ * is_set_baud_rate_cmd() - Checks if data contains set baud rate hci cmd.
+ * @data: Pointer to data array to check.
+ *
+ * Returns:
+ * true - if cmd found;
+ * false - otherwise.
+ */
+static bool is_set_baud_rate_cmd(const char *data)
+{
+ struct hci_command_hdr *cmd;
+
+ if (data[0] != HCI_BT_CMD_H4_CHANNEL)
+ return false;
+
+ cmd = (struct hci_command_hdr *)&data[1];
+ if (le16_to_cpu(cmd->opcode) == CG2900_BT_OP_VS_SET_BAUD_RATE &&
+ cmd->plen == BT_PARAM_LEN(sizeof(struct bt_vs_set_baud_rate_cmd)))
+ return true;
+
+ return false;
+}
+
+/**
+ * is_bt_cmd_complete_no_param() - Checks if data contains command complete event for a certain command.
+ * @skb: sk_buffer containing the data including H:4 header.
+ * @opcode: Command op code.
+ * @status: Command status.
+ *
+ * Returns:
+ * true - If this is the command complete we were looking for;
+ * false - otherwise.
+ */
+static bool is_bt_cmd_complete_no_param(struct sk_buff *skb, u16 opcode,
+ u8 *status)
+{
+ struct hci_event_hdr *event;
+ struct hci_ev_cmd_complete *complete;
+ u8 *data = &(skb->data[0]);
+
+ if (HCI_BT_EVT_H4_CHANNEL != *data)
+ return false;
+
+ data += HCI_H4_SIZE;
+ event = (struct hci_event_hdr *)data;
+ if (HCI_EV_CMD_COMPLETE != event->evt ||
+ HCI_BT_CMD_COMPLETE_LEN != event->plen)
+ return false;
+
+ data += sizeof(*event);
+ complete = (struct hci_ev_cmd_complete *)data;
+ if (opcode != le16_to_cpu(complete->opcode))
+ return false;
+
+ if (status) {
+ /*
+ * All command complete have the status field at first byte of
+ * packet data.
+ */
+ data += sizeof(*complete);
+ *status = *data;
+ }
+ return true;
+}
+
+/**
+ * alloc_rx_skb() - Alloc an sk_buff structure for receiving data from controller.
+ * @size: Size in number of octets.
+ * @priority: Allocation priority, e.g. GFP_KERNEL.
+ *
+ * Returns:
+ * Pointer to sk_buff structure.
+ */
+static struct sk_buff *alloc_rx_skb(unsigned int size, gfp_t priority)
+{
+ struct sk_buff *skb;
+
+ /* Allocate the SKB and reserve space for the header */
+ skb = alloc_skb(size + RX_SKB_RESERVE, priority);
+ if (skb)
+ skb_reserve(skb, RX_SKB_RESERVE);
+
+ return skb;
+}
+
+/**
+ * finish_setting_baud_rate() - Handles sending the ste baud rate hci cmd.
+ * @hu: Pointer to associated Hci uart structure.
+ *
+ * finish_setting_baud_rate() makes sure that the set baud rate cmd has
+ * been really sent out on the wire and then switches the tty driver to new
+ * baud rate.
+ */
+static void finish_setting_baud_rate(struct hci_uart *hu)
+{
+ struct uart_info *uart_info =
+ (struct uart_info *)dev_get_drvdata(hu->proto->dev);
+ /*
+ * Give the tty driver time to send data and proceed. If it hasn't
+ * been sent we can't do much about it anyway.
+ */
+ schedule_timeout_killable(msecs_to_jiffies(UART_TX_TIMEOUT));
+
+ /*
+ * Now set the termios struct to the new baudrate. Start by storing
+ * the old termios.
+ */
+ if (hci_uart_set_baudrate(hu, uart_info->baud_rate) < 0) {
+ /* Something went wrong.*/
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_IDLE\n");
+ uart_info->baud_rate_state = BAUD_IDLE;
+ } else {
+ dev_dbg(MAIN_DEV, "Setting termios to new baud rate\n");
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_WAITING\n");
+ uart_info->baud_rate_state = BAUD_WAITING;
+ }
+
+ hci_uart_flow_ctrl(hu, FLOW_ON);
+}
+
+/**
+ * alloc_set_baud_rate_cmd() - Allocates new sk_buff and fills in the change baud rate hci cmd.
+ * @uart_info: Main Uart structure.
+ * @baud: (in/out) Requested new baud rate. Updated to default baud rate
+ * upon invalid value.
+ *
+ * Returns:
+ * Pointer to allocated sk_buff if successful;
+ * NULL otherwise.
+ */
+static struct sk_buff *alloc_set_baud_rate_cmd(struct uart_info *uart_info,
+ int *baud)
+{
+ struct sk_buff *skb;
+ u8 *h4;
+ struct bt_vs_set_baud_rate_cmd *cmd;
+
+ skb = alloc_skb(sizeof(*cmd) + CG2900_SKB_RESERVE, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(MAIN_DEV,
+ "alloc_set_baud_rate_cmd: Failed to alloc skb\n");
+ return NULL;
+ }
+ skb_reserve(skb, CG2900_SKB_RESERVE);
+
+ cmd = (struct bt_vs_set_baud_rate_cmd *)skb_put(skb, sizeof(cmd));
+
+ /* Create the Hci_Cmd_ST_Set_Uart_Baud_Rate packet */
+ cmd->opcode = cpu_to_le16(CG2900_BT_OP_VS_SET_BAUD_RATE);
+ cmd->plen = BT_PARAM_LEN(sizeof(cmd));
+
+ switch (*baud) {
+ case 57600:
+ cmd->baud_rate = CG2900_BAUD_RATE_57600;
+ break;
+ case 115200:
+ cmd->baud_rate = CG2900_BAUD_RATE_115200;
+ break;
+ case 230400:
+ cmd->baud_rate = CG2900_BAUD_RATE_230400;
+ break;
+ case 460800:
+ cmd->baud_rate = CG2900_BAUD_RATE_460800;
+ break;
+ case 921600:
+ cmd->baud_rate = CG2900_BAUD_RATE_921600;
+ break;
+ case 2000000:
+ cmd->baud_rate = CG2900_BAUD_RATE_2000000;
+ break;
+ case 3000000:
+ cmd->baud_rate = CG2900_BAUD_RATE_3000000;
+ break;
+ case 3250000:
+ cmd->baud_rate = CG2900_BAUD_RATE_3250000;
+ break;
+ case 4000000:
+ cmd->baud_rate = CG2900_BAUD_RATE_4000000;
+ break;
+ default:
+ dev_err(MAIN_DEV,
+ "Invalid speed requested (%d), using 115200 bps "
+ "instead\n", *baud);
+ cmd->baud_rate = CG2900_BAUD_RATE_115200;
+ *baud = 115200;
+ break;
+ };
+
+ h4 = skb_push(skb, HCI_H4_SIZE);
+ *h4 = HCI_BT_CMD_H4_CHANNEL;
+
+ return skb;
+}
+
+/**
+ * work_do_transmit() - Transmit data packet to connectivity controller over UART.
+ * @work: Pointer to work info structure. Contains uart_info structure
+ * pointer.
+ */
+static void work_do_transmit(struct work_struct *work)
+{
+ struct uart_work_struct *current_work =
+ container_of(work, struct uart_work_struct, work);
+ struct uart_info *uart_info = (struct uart_info *)current_work->data;
+
+ kfree(current_work);
+
+ if (!uart_info->hu) {
+ dev_err(MAIN_DEV, "work_do_transmit: UART not open\n");
+ return;
+ }
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+ /* Mark that there is an ongoing transfer. */
+ uart_info->tx_in_progress = true;
+ spin_unlock_bh(&(uart_info->transmission_lock));
+
+ /* Cancel pending sleep work if there is any. */
+ cancel_delayed_work_sync(&uart_info->sleep_work.work);
+
+ /* Wake up the chip and transport. */
+ wake_up_chip(uart_info);
+
+ (void)hci_uart_tx_wakeup(uart_info->hu);
+}
+
+/**
+ * work_hw_deregistered() - Handle HW deregistered.
+ * @work: Reference to work data.
+ */
+static void work_hw_deregistered(struct work_struct *work)
+{
+ struct uart_work_struct *current_work;
+ struct uart_info *uart_info;
+ int err;
+ current_work = container_of(work, struct uart_work_struct, work);
+ uart_info = (struct uart_info *)current_work->data;
+
+ err = cg2900_deregister_trans_driver(&uart_info->chip_dev);
+ if (err)
+ dev_err(MAIN_DEV, "Could not deregister UART from Core (%d)\n",
+ err);
+
+ kfree(current_work);
+}
+
+/**
+ * set_baud_rate() - Sets new baud rate for the UART.
+ * @hu: Pointer to hci_uart structure.
+ * @baud: New baud rate.
+ *
+ * This function first sends the HCI command
+ * Hci_Cmd_ST_Set_Uart_Baud_Rate. It then changes the baud rate in HW, and
+ * finally it waits for the Command Complete event for the
+ * Hci_Cmd_ST_Set_Uart_Baud_Rate command.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EALREADY if baud rate change is already in progress.
+ * -EFAULT if one or more of the UART related structs is not allocated.
+ * -ENOMEM if skb allocation has failed.
+ * -EPERM if setting the new baud rate has failed.
+ * Errors from create_work_item.
+ */
+static int set_baud_rate(struct hci_uart *hu, int baud)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ int old_baud_rate;
+ struct uart_info *uart_info =
+ (struct uart_info *)dev_get_drvdata(hu->proto->dev);
+
+ dev_dbg(MAIN_DEV, "set_baud_rate (%d baud)\n", baud);
+
+ if (uart_info->baud_rate_state != BAUD_IDLE) {
+ dev_err(MAIN_DEV,
+ "Trying to set new baud rate before old setting "
+ "is finished\n");
+ return -EALREADY;
+ }
+
+ if (!uart_info->hu) {
+ dev_err(MAIN_DEV, "set_baud_rate: UART not open\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Wait some time to be sure that any RX process has finished (which
+ * flows on RTS in the end) before flowing off the RTS.
+ */
+ schedule_timeout_killable(msecs_to_jiffies(UART_RX_TIMEOUT));
+ hci_uart_flow_ctrl(uart_info->hu, FLOW_OFF);
+
+ /*
+ * Store old baud rate so that we can restore it if something goes
+ * wrong.
+ */
+ old_baud_rate = uart_info->baud_rate;
+
+ skb = alloc_set_baud_rate_cmd(uart_info, &baud);
+ if (!skb) {
+ dev_err(MAIN_DEV, "alloc_set_baud_rate_cmd failed\n");
+ return -ENOMEM;
+ }
+
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_START\n");
+ uart_info->baud_rate_state = BAUD_START;
+ uart_info->baud_rate = baud;
+
+ /* Queue the sk_buffer... */
+ skb_queue_tail(&uart_info->tx_queue, skb);
+
+ /* ... and call the common UART TX function */
+ err = create_work_item(uart_info, work_do_transmit);
+ if (err) {
+ dev_err(MAIN_DEV,
+ "Failed to send change baud rate cmd, freeing skb\n");
+ skb = skb_dequeue_tail(&uart_info->tx_queue);
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_IDLE\n");
+ uart_info->baud_rate_state = BAUD_IDLE;
+ uart_info->baud_rate = old_baud_rate;
+ kfree_skb(skb);
+ return err;
+ }
+
+ dev_dbg(MAIN_DEV, "Set baud rate cmd scheduled for sending\n");
+
+ /*
+ * Now wait for the command complete.
+ * It will come at the new baudrate.
+ */
+ wait_event_timeout(uart_wait_queue,
+ ((BAUD_SUCCESS == uart_info->baud_rate_state) ||
+ (BAUD_FAIL == uart_info->baud_rate_state)),
+ msecs_to_jiffies(UART_RESP_TIMEOUT));
+ if (BAUD_SUCCESS == uart_info->baud_rate_state)
+ dev_info(MAIN_DEV, "Baud rate changed to %d baud\n", baud);
+ else {
+ dev_err(MAIN_DEV, "Failed to set new baud rate (%d)\n",
+ uart_info->baud_rate_state);
+ err = -EPERM;
+ }
+
+ /* Finally flush the TTY so we are sure that is no bad data there */
+ hci_uart_flush_buffer(hu);
+ dev_dbg(MAIN_DEV, "Flushing TTY after baud rate change\n");
+ /* Finished. Set state to IDLE */
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_IDLE\n");
+ uart_info->baud_rate_state = BAUD_IDLE;
+
+ return err;
+}
+
+/**
+ * uart_write() - Transmit data to CG2900 over UART.
+ * @dev: Transport device information.
+ * @skb: SK buffer to transmit.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Errors from create_work_item.
+ */
+static int uart_write(struct cg2900_chip_dev *dev, struct sk_buff *skb)
+{
+ int err;
+ struct uart_info *uart_info = dev_get_drvdata(dev->dev);
+
+ if (uart_debug)
+ dev_dbg(MAIN_DEV, "uart_write: data len = %d\n", skb->len);
+
+ /* Queue the sk_buffer... */
+ skb_queue_tail(&uart_info->tx_queue, skb);
+
+ /* ...and start TX operation */
+
+ err = create_work_item(uart_info, work_do_transmit);
+ if (err)
+ dev_err(MAIN_DEV,
+ "Failed to create work item (%d) uart_tty_wakeup\n",
+ err);
+
+ return err;
+}
+
+/**
+ * uart_open() - Open the CG2900 UART for data transfers.
+ * @dev: Transport device information.
+ *
+ * Returns:
+ * 0 if there is no error,
+ * -EACCES if write to transport failed,
+ * -EIO if chip did not answer to commands.
+ * Errors from set_baud_rate.
+ */
+static int uart_open(struct cg2900_chip_dev *dev)
+{
+ u8 *h4;
+ struct sk_buff *skb;
+ struct hci_command_hdr *cmd;
+ struct uart_info *uart_info = dev_get_drvdata(dev->dev);
+
+ if (!uart_info->hu) {
+ dev_err(MAIN_DEV, "uart_open: UART not open\n");
+ return -EACCES;
+ }
+
+ /*
+ * Chip has just been started up. It has a system to autodetect
+ * exact baud rate and transport to use. There are only a few commands
+ * it will recognize and HCI Reset is one of them.
+ * We therefore start with sending that before actually changing
+ * baud rate.
+ *
+ * Create the Hci_Reset packet
+ */
+
+ skb = alloc_skb(sizeof(*cmd) + HCI_H4_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(MAIN_DEV, "Couldn't allocate sk_buff with length %d\n",
+ sizeof(*cmd));
+ return -EACCES;
+ }
+ skb_reserve(skb, HCI_H4_SIZE);
+ cmd = (struct hci_command_hdr *)skb_put(skb, sizeof(*cmd));
+ cmd->opcode = cpu_to_le16(HCI_OP_RESET);
+ cmd->plen = 0; /* No parameters for HCI reset */
+
+ h4 = skb_push(skb, HCI_H4_SIZE);
+ *h4 = HCI_BT_CMD_H4_CHANNEL;
+
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_SENDING_RESET\n");
+ uart_info->baud_rate_state = BAUD_SENDING_RESET;
+ dev_dbg(MAIN_DEV, "Sending HCI reset before baud rate change\n");
+
+
+ /* Queue the sk_buffer... */
+ skb_queue_tail(&uart_info->tx_queue, skb);
+
+ (void)hci_uart_tx_wakeup(uart_info->hu);
+
+ /*
+ * Wait for command complete. If error, exit without changing
+ * baud rate.
+ */
+ wait_event_timeout(uart_wait_queue,
+ BAUD_IDLE == uart_info->baud_rate_state,
+ msecs_to_jiffies(UART_RESP_TIMEOUT));
+ if (BAUD_IDLE != uart_info->baud_rate_state) {
+ dev_err(MAIN_DEV, "Failed to send HCI Reset\n");
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_IDLE\n");
+ uart_info->baud_rate_state = BAUD_IDLE;
+ return -EIO;
+ }
+
+ /* Just return if there will be no change of baud rate */
+ if (uart_default_baud != uart_high_baud)
+ return set_baud_rate(uart_info->hu, uart_high_baud);
+ else
+ return 0;
+}
+
+/**
+ * uart_set_chip_power() - Enable or disable the CG2900.
+ * @chip_on: true if chip shall be enabled, false otherwise.
+ */
+static void uart_set_chip_power(struct cg2900_chip_dev *dev, bool chip_on)
+{
+ int uart_baudrate = uart_default_baud;
+ struct cg2900_platform_data *pf_data;
+ struct uart_info *uart_info;
+
+ pf_data = dev_get_platdata(dev->dev);
+ uart_info = dev_get_drvdata(dev->dev);
+
+ dev_info(MAIN_DEV, "Set chip power: %s\n",
+ (chip_on ? "ENABLE" : "DISABLE"));
+
+ /* Cancel any ongoing works.*/
+ cancel_work_sync(&uart_info->wakeup_work.work);
+ cancel_delayed_work_sync(&uart_info->sleep_work.work);
+
+ mutex_lock(&uart_info->sleep_state_lock);
+
+ if (!uart_info->hu) {
+ dev_err(MAIN_DEV, "Hci uart struct is not allocated\n");
+ goto unlock;
+ }
+
+ if (chip_on) {
+ if (!uart_info->suspend_blocked) {
+ uart_info->suspend_blocked = true;
+ pm_qos_update_request(&uart_info->pm_qos_latency,
+ CG2900_PM_QOS_LATENCY);
+ }
+ if (uart_info->sleep_state != CHIP_POWERED_DOWN) {
+ dev_err(MAIN_DEV, "Chip is already powered up (%d)\n",
+ uart_info->sleep_state);
+ goto unlock;
+ }
+
+ if (cg2900_enable_regulator(uart_info))
+ goto unlock;
+
+ if (pf_data->enable_chip) {
+ pf_data->enable_chip(dev);
+ dev_dbg(MAIN_DEV, "New sleep_state: CHIP_AWAKE\n");
+ uart_info->sleep_state = CHIP_AWAKE;
+ }
+
+ (void)hci_uart_set_baudrate(uart_info->hu, uart_baudrate);
+
+ hci_uart_flow_ctrl(uart_info->hu, FLOW_ON);
+ hci_uart_set_break(uart_info->hu, BREAK_OFF);
+ } else {
+ /* Turn off the chip.*/
+ switch (uart_info->sleep_state) {
+ case CHIP_AWAKE:
+ break;
+ case CHIP_FALLING_ASLEEP:
+ hci_uart_set_break(uart_info->hu, BREAK_OFF);
+ break;
+ case CHIP_SUSPENDED:
+ case CHIP_ASLEEP:
+ unset_cts_irq(uart_info);
+ enable_uart_pins(uart_info);
+ break;
+ default:
+ break;
+ }
+
+ if (uart_info->suspend_blocked) {
+ uart_info->suspend_blocked = false;
+ pm_qos_update_request(&uart_info->pm_qos_latency,
+ PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE);
+ }
+
+ if (pf_data->disable_chip) {
+ pf_data->disable_chip(dev);
+ dev_dbg(MAIN_DEV,
+ "New sleep_state: CHIP_POWERED_DOWN\n");
+ uart_info->sleep_state = CHIP_POWERED_DOWN;
+ }
+
+ cg2900_disable_regulator(uart_info);
+ /*
+ * Setting baud rate to 0 will tell UART driver to shut off its
+ * clocks.
+ */
+ (void)hci_uart_set_baudrate(uart_info->hu, ZERO_BAUD_RATE);
+
+ spin_lock_bh(&uart_info->rx_skb_lock);
+ if (uart_info->rx_skb) {
+ /*
+ * Reset the uart_info state so that
+ * next packet can be handled
+ * correctly by driver.
+ */
+ dev_dbg(MAIN_DEV, "Power off in the middle of data receiving?"
+ "Reseting state machine.\n");
+ kfree_skb(uart_info->rx_skb);
+ uart_info->rx_skb = NULL;
+ uart_info->rx_state = W4_PACKET_TYPE;
+ uart_info->rx_count = 0;
+ }
+ spin_unlock_bh(&uart_info->rx_skb_lock);
+ }
+
+unlock:
+ mutex_unlock(&(uart_info->sleep_state_lock));
+}
+
+/**
+ * uart_chip_startup_finished() - CG2900 startup finished.
+ * @dev: Transport device information.
+ */
+static void uart_chip_startup_finished(struct cg2900_chip_dev *dev)
+{
+ struct uart_info *uart_info = dev_get_drvdata(dev->dev);
+ unsigned long timeout_jiffies = get_sleep_timeout(uart_info);
+
+ /* Schedule work to put the chip and transport to sleep. */
+ if (timeout_jiffies)
+ queue_delayed_work(uart_info->wq, &uart_info->sleep_work.work,
+ timeout_jiffies);
+}
+/**
+ * uart_close() - Close the CG2900 UART for data transfers.
+ * @dev: Transport device information.
+ *
+ * Returns:
+ * 0 if there is no error.
+ */
+static int uart_close(struct cg2900_chip_dev *dev)
+{
+ /* The chip is already shut down. Power off the chip. */
+ uart_set_chip_power(dev, false);
+ return 0;
+}
+
+/**
+ * send_skb_to_core() - Sends packet received from UART to CG2900 Core.
+ * @skb: Received data packet.
+ *
+ * This function checks if UART is waiting for Command complete event,
+ * see set_baud_rate.
+ * If it is waiting it checks if it is the expected packet and the status.
+ * If not is passes the packet to CG2900 Core.
+ */
+static void send_skb_to_core(struct uart_info *uart_info, struct sk_buff *skb)
+{
+ u8 status;
+
+ if (!skb) {
+ dev_err(MAIN_DEV, "send_skb_to_core: Received NULL as skb\n");
+ return;
+ }
+
+ if (BAUD_WAITING == uart_info->baud_rate_state) {
+ /*
+ * Should only really be one packet received now:
+ * the CmdComplete for the SetBaudrate command
+ * Let's see if this is the packet we are waiting for.
+ */
+ if (!is_bt_cmd_complete_no_param(skb,
+ CG2900_BT_OP_VS_SET_BAUD_RATE, &status)) {
+ /*
+ * Received other event. Should not really happen,
+ * but pass the data to CG2900 Core anyway.
+ */
+ dev_dbg(MAIN_DEV, "Sending packet to CG2900 Core while "
+ "waiting for BaudRate CmdComplete\n");
+ uart_info->chip_dev.c_cb.data_from_chip
+ (&uart_info->chip_dev, skb);
+ return;
+ }
+
+ /*
+ * We have received complete event for our baud rate
+ * change command
+ */
+ if (HCI_BT_ERROR_NO_ERROR == status) {
+ dev_dbg(MAIN_DEV, "Received baud rate change complete "
+ "event OK\n");
+ dev_dbg(MAIN_DEV,
+ "New baud_rate_state: BAUD_SUCCESS\n");
+ uart_info->baud_rate_state = BAUD_SUCCESS;
+ } else {
+ dev_err(MAIN_DEV,
+ "Received baud rate change complete event "
+ "with status 0x%X\n", status);
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_FAIL\n");
+ uart_info->baud_rate_state = BAUD_FAIL;
+ }
+ wake_up_all(&uart_wait_queue);
+ kfree_skb(skb);
+ } else if (BAUD_SENDING_RESET == uart_info->baud_rate_state) {
+ /*
+ * Should only really be one packet received now:
+ * the CmdComplete for the Reset command
+ * Let's see if this is the packet we are waiting for.
+ */
+ if (!is_bt_cmd_complete_no_param(skb, HCI_OP_RESET, &status)) {
+ /*
+ * Received other event. Should not really happen,
+ * but pass the data to CG2900 Core anyway.
+ */
+ dev_dbg(MAIN_DEV, "Sending packet to CG2900 Core while "
+ "waiting for Reset CmdComplete\n");
+ uart_info->chip_dev.c_cb.data_from_chip
+ (&uart_info->chip_dev, skb);
+ return;
+ }
+
+ /*
+ * We have received complete event for our baud rate
+ * change command
+ */
+ if (HCI_BT_ERROR_NO_ERROR == status) {
+ dev_dbg(MAIN_DEV,
+ "Received HCI reset complete event OK\n");
+ /*
+ * Go back to BAUD_IDLE since this was not really
+ * baud rate change but just a preparation of the chip
+ * to be ready to receive commands.
+ */
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_IDLE\n");
+ uart_info->baud_rate_state = BAUD_IDLE;
+ } else {
+ dev_err(MAIN_DEV,
+ "Received HCI reset complete event with "
+ "status 0x%X", status);
+ dev_dbg(MAIN_DEV, "New baud_rate_state: BAUD_FAIL\n");
+ uart_info->baud_rate_state = BAUD_FAIL;
+ }
+ wake_up_all(&uart_wait_queue);
+ kfree_skb(skb);
+ } else {
+ /* Just pass data to CG2900 Core */
+ uart_info->chip_dev.c_cb.data_from_chip
+ (&uart_info->chip_dev, skb);
+ }
+}
+
+/**
+ * check_data_len() - Check number of bytes to receive.
+ * @len: Number of bytes left to receive.
+ */
+static void check_data_len(struct uart_info *uart_info, int len)
+{
+ /* First get number of bytes left in the sk_buffer */
+ register int room = skb_tailroom(uart_info->rx_skb);
+
+ if (!len) {
+ /* No data left to receive. Transmit to CG2900 Core */
+ send_skb_to_core(uart_info, uart_info->rx_skb);
+ } else if (len > room) {
+ dev_err(MAIN_DEV, "Data length is too large (%d > %d)\n",
+ len, room);
+ kfree_skb(uart_info->rx_skb);
+ } else {
+ /*
+ * "Normal" case. Switch to data receiving state and store
+ * data length.
+ */
+ uart_info->rx_state = W4_DATA;
+ uart_info->rx_count = len;
+ return;
+ }
+
+ uart_info->rx_state = W4_PACKET_TYPE;
+ uart_info->rx_skb = NULL;
+ uart_info->rx_count = 0;
+}
+
+/**
+ * work_restart_sleep() - Cancel pending sleep_work, wake-up driver and
+ * schedule new sleep_work in a work context.
+ * @work: work which needs to be done.
+ */
+static void work_restart_sleep(struct work_struct *work)
+{
+ struct uart_work_struct *current_work =
+ container_of(work, struct uart_work_struct, work);
+ struct uart_info *uart_info = (struct uart_info *)current_work->data;
+ unsigned long timeout_jiffies = get_sleep_timeout(uart_info);
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+ uart_info->rx_in_progress = false;
+ spin_unlock_bh(&(uart_info->transmission_lock));
+
+ /* Cancel pending sleep work if there is any. */
+ cancel_delayed_work_sync(&uart_info->sleep_work.work);
+
+ wake_up_chip(uart_info);
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+ /*
+ * If there are no ongoing transfers schedule the sleep work.
+ */
+ if (!(uart_info->tx_in_progress) && timeout_jiffies)
+ queue_delayed_work(uart_info->wq,
+ &uart_info->sleep_work.work,
+ timeout_jiffies);
+ spin_unlock_bh(&(uart_info->transmission_lock));
+}
+
+/**
+ * cg2900_hu_receive() - Handles received UART data.
+ * @data: Data received
+ * @count: Number of bytes received
+ *
+ * The cg2900_hu_receive() function handles received UART data and puts it
+ * together to one complete packet.
+ *
+ * Returns:
+ * Number of bytes not handled, i.e. 0 = no error.
+ */
+static int cg2900_hu_receive(struct hci_uart *hu,
+ void *data, int count)
+{
+ const u8 *r_ptr;
+ u8 *w_ptr;
+ int len;
+ struct hci_event_hdr *evt;
+ struct hci_acl_hdr *acl;
+ union fm_leg_evt_or_irq *fm;
+ struct gnss_hci_hdr *gnss;
+ struct uart_info *uart_info = dev_get_drvdata(hu->proto->dev);
+ u8 *tmp;
+
+ r_ptr = (const u8 *)data;
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+ /* Mark that there is an ongoing transfer. */
+ uart_info->rx_in_progress = true;
+ spin_unlock_bh(&(uart_info->transmission_lock));
+
+ /* Cancel pending sleep work if there is any. */
+ cancel_delayed_work(&uart_info->sleep_work.work);
+
+ if (uart_debug)
+ print_hex_dump_bytes(NAME " RX:\t", DUMP_PREFIX_NONE,
+ data, count);
+
+ spin_lock_bh(&uart_info->rx_skb_lock);
+
+ /* Continue while there is data left to handle */
+ while (count) {
+ /*
+ * If we have already received a packet we know how many bytes
+ * there are left.
+ */
+ if (!uart_info->rx_count)
+ goto check_h4_header;
+
+ /* First copy received data into the skb_rx */
+ len = min_t(unsigned int, uart_info->rx_count, count);
+ memcpy(skb_put(uart_info->rx_skb, len), r_ptr, len);
+ /* Update counters from the length and step the data pointer */
+ uart_info->rx_count -= len;
+ count -= len;
+ r_ptr += len;
+
+ if (uart_info->rx_count)
+ /*
+ * More data to receive to current packet. Break and
+ * wait for next data on the UART.
+ */
+ break;
+
+ /* Handle the different states */
+ tmp = uart_info->rx_skb->data + CG2900_SKB_RESERVE;
+ switch (uart_info->rx_state) {
+ case W4_DATA:
+ /*
+ * Whole data packet has been received.
+ * Transmit it to CG2900 Core.
+ */
+ send_skb_to_core(uart_info, uart_info->rx_skb);
+
+ uart_info->rx_state = W4_PACKET_TYPE;
+ uart_info->rx_skb = NULL;
+ continue;
+
+ case W4_EVENT_HDR:
+ evt = (struct hci_event_hdr *)tmp;
+ check_data_len(uart_info, evt->plen);
+ /* Header read. Continue with next bytes */
+ continue;
+
+ case W4_ACL_HDR:
+ acl = (struct hci_acl_hdr *)tmp;
+ check_data_len(uart_info, le16_to_cpu(acl->dlen));
+ /* Header read. Continue with next bytes */
+ continue;
+
+ case W4_FM_RADIO_HDR:
+ fm = (union fm_leg_evt_or_irq *)tmp;
+ check_data_len(uart_info, fm->param_length);
+ /* Header read. Continue with next bytes */
+ continue;
+
+ case W4_GNSS_HDR:
+ gnss = (struct gnss_hci_hdr *)tmp;
+ check_data_len(uart_info, le16_to_cpu(gnss->plen));
+ /* Header read. Continue with next bytes */
+ continue;
+
+ default:
+ dev_err(MAIN_DEV,
+ "Bad state indicating memory overwrite "
+ "(0x%X)\n", (u8)(uart_info->rx_state));
+ break;
+ }
+
+check_h4_header:
+ /* Check which H:4 packet this is and update RX states */
+ if (*r_ptr == HCI_BT_EVT_H4_CHANNEL) {
+ uart_info->rx_state = W4_EVENT_HDR;
+ uart_info->rx_count = HCI_BT_EVT_HDR_SIZE;
+ } else if (*r_ptr == HCI_BT_ACL_H4_CHANNEL) {
+ uart_info->rx_state = W4_ACL_HDR;
+ uart_info->rx_count = HCI_BT_ACL_HDR_SIZE;
+ } else if (*r_ptr == HCI_FM_RADIO_H4_CHANNEL) {
+ uart_info->rx_state = W4_FM_RADIO_HDR;
+ uart_info->rx_count = HCI_FM_RADIO_HDR_SIZE;
+ } else if (*r_ptr == HCI_GNSS_H4_CHANNEL) {
+ uart_info->rx_state = W4_GNSS_HDR;
+ uart_info->rx_count = HCI_GNSS_HDR_SIZE;
+ } else {
+ dev_err(MAIN_DEV, "Unknown HCI packet type 0x%X\n",
+ (u8)*r_ptr);
+ r_ptr++;
+ count--;
+ continue;
+ }
+
+ /*
+ * Allocate packet. We do not yet know the size and therefore
+ * allocate max size.
+ */
+ uart_info->rx_skb = alloc_rx_skb(RX_SKB_MAX_SIZE, GFP_ATOMIC);
+ if (!uart_info->rx_skb) {
+ dev_err(MAIN_DEV,
+ "Can't allocate memory for new packet\n");
+ uart_info->rx_state = W4_PACKET_TYPE;
+ uart_info->rx_count = 0;
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+ uart_info->rx_in_progress = false;
+ spin_unlock_bh(&(uart_info->transmission_lock));
+
+ spin_unlock_bh(&uart_info->rx_skb_lock);
+ return 0;
+ }
+
+ /* Write the H:4 header first in the sk_buffer */
+ w_ptr = skb_put(uart_info->rx_skb, 1);
+ *w_ptr = *r_ptr;
+
+ /* First byte (H4 header) read. Goto next byte */
+ r_ptr++;
+ count--;
+ }
+
+ (void)queue_work(uart_info->wq, &uart_info->restart_sleep_work.work);
+
+ spin_unlock_bh(&uart_info->rx_skb_lock);
+ return count;
+}
+
+/**
+ * cg2900_hu_open() - Called when UART line discipline changed to N_HCI.
+ * @hu: Pointer to associated Hci uart structure.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Errors from cg2900_register_trans_driver.
+ */
+static int cg2900_hu_open(struct hci_uart *hu)
+{
+ int err;
+ struct uart_info *uart_info = dev_get_drvdata(hu->proto->dev);
+
+ if (!uart_info)
+ return -EACCES;
+
+ dev_info(MAIN_DEV, "UART opened\n");
+
+ skb_queue_head_init(&uart_info->tx_queue);
+
+ uart_info->hu = hu;
+
+ /* Tell CG2900 Core that UART is connected */
+ err = cg2900_register_trans_driver(&uart_info->chip_dev);
+ if (err)
+ dev_err(MAIN_DEV, "Could not register transport driver (%d)\n",
+ err);
+
+ if (hu->tty->ops->tiocmget && hu->tty->ops->break_ctl)
+ uart_info->sleep_allowed = true;
+ else {
+ dev_err(MAIN_DEV, "Sleep mode not available\n");
+ uart_info->sleep_allowed = false;
+ }
+
+ return err;
+
+}
+
+/**
+ * cg2900_hu_close() - Close UART tty.
+ * @hu: Pointer to associated hci_uart structure.
+ *
+ * The uart_tty_close() function is called when the line discipline is changed
+ * to something else, the TTY is closed, or the TTY detects a hangup.
+ */
+static int cg2900_hu_close(struct hci_uart *hu)
+{
+ int err;
+ struct uart_info *uart_info = dev_get_drvdata(hu->proto->dev);
+
+
+ BUG_ON(!uart_info);
+ BUG_ON(!uart_info->wq);
+
+ /* Purge any stored sk_buffers */
+ skb_queue_purge(&uart_info->tx_queue);
+
+ spin_lock_bh(&uart_info->rx_skb_lock);
+ if (uart_info->rx_skb) {
+ kfree_skb(uart_info->rx_skb);
+ uart_info->rx_skb = NULL;
+ }
+ spin_unlock_bh(&uart_info->rx_skb_lock);
+
+ dev_info(MAIN_DEV, "UART closed\n");
+ err = create_work_item(uart_info, work_hw_deregistered);
+ if (err)
+ dev_err(MAIN_DEV, "Failed to create work item (%d) "
+ "work_hw_deregistered\n", err);
+
+ uart_info->hu = NULL;
+
+ return 0;
+}
+
+/**
+ * cg2900_hu_dequeue() - Get new skbuff.
+ * @hu: Pointer to associated hci_uart structure.
+ *
+ * The uart_tty_close() function is called when the line discipline is changed
+ * to something else, the TTY is closed, or the TTY detects a hangup.
+ */
+static struct sk_buff *cg2900_hu_dequeue(struct hci_uart *hu)
+{
+ struct sk_buff *skb;
+ struct uart_info *uart_info = dev_get_drvdata(hu->proto->dev);
+ unsigned long timeout_jiffies = get_sleep_timeout(uart_info);
+
+ spin_lock_bh(&(uart_info->transmission_lock));
+
+ skb = skb_dequeue(&uart_info->tx_queue);
+
+ if (!skb)
+ uart_info->tx_in_progress = false;
+
+ /*
+ * If there are no ongoing transfers schedule the sleep work.
+ */
+ if (!(uart_info->rx_in_progress) && timeout_jiffies && !skb)
+ queue_delayed_work(uart_info->wq,
+ &uart_info->sleep_work.work,
+ timeout_jiffies);
+
+ spin_unlock_bh(&(uart_info->transmission_lock));
+
+ if (BAUD_SENDING == uart_info->baud_rate_state && !skb)
+ finish_setting_baud_rate(hu);
+ /*
+ * If it's set baud rate cmd set correct baud state and after
+ * sending is finished inform the tty driver about the new
+ * baud rate.
+ */
+ if ((BAUD_START == uart_info->baud_rate_state) &&
+ skb && (is_set_baud_rate_cmd(skb->data))) {
+ dev_dbg(MAIN_DEV, "UART set baud rate cmd found\n");
+ uart_info->baud_rate_state = BAUD_SENDING;
+ }
+
+ if (uart_debug && skb)
+ print_hex_dump_bytes(NAME " TX:\t", DUMP_PREFIX_NONE,
+ skb->data, skb->len);
+
+ return skb;
+}
+
+/**
+ * cg2900_hu_flush() - Flush buffers.
+ * @hu: Pointer to associated hci_uart structure.
+ *
+ */
+static int cg2900_hu_flush(struct hci_uart *hu)
+{
+ struct uart_info *uart_info = dev_get_drvdata(hu->proto->dev);
+
+ dev_dbg(MAIN_DEV, "ui %p", uart_info);
+ skb_queue_purge(&uart_info->tx_queue);
+ return 0;
+}
+
+/**
+ * cg2900_uart_probe() - Initialize CG2900 UART resources.
+ * @pdev: Platform device.
+ *
+ * This function initializes the module and registers to the UART framework.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM for failed alloc or structure creation.
+ * -ECHILD for failed work queue creation.
+ * Error codes generated by tty_register_ldisc.
+ */
+static int __devinit cg2900_uart_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct uart_info *uart_info;
+ struct hci_uart_proto *p;
+ struct resource *resource;
+
+ pr_debug("cg2900_uart_probe");
+
+ uart_info = kzalloc(sizeof(*uart_info), GFP_KERNEL);
+ if (!uart_info) {
+ pr_err("Couldn't allocate uart_info");
+ return -ENOMEM;
+ }
+
+ uart_info->sleep_state = CHIP_POWERED_DOWN;
+ mutex_init(&(uart_info->sleep_state_lock));
+
+ spin_lock_init(&(uart_info->transmission_lock));
+ spin_lock_init(&(uart_info->rx_skb_lock));
+
+ uart_info->chip_dev.t_cb.open = uart_open;
+ uart_info->chip_dev.t_cb.close = uart_close;
+ uart_info->chip_dev.t_cb.write = uart_write;
+ uart_info->chip_dev.t_cb.set_chip_power = uart_set_chip_power;
+ uart_info->chip_dev.t_cb.chip_startup_finished =
+ uart_chip_startup_finished;
+ uart_info->chip_dev.pdev = pdev;
+ uart_info->chip_dev.dev = &pdev->dev;
+ uart_info->chip_dev.t_data = uart_info;
+
+ pm_qos_add_request(&uart_info->pm_qos_latency, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE);
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "cts_irq");
+ if (!resource) {
+ dev_err(&pdev->dev, "CTS IRQ does not exist\n");
+ err = -EINVAL;
+ goto error_handling_free;
+ }
+ uart_info->cts_irq = resource->start;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "cts_gpio");
+ if (!resource) {
+ dev_err(&pdev->dev, "CTS GPIO does not exist\n");
+ err = -EINVAL;
+ goto error_handling_free;
+ }
+ uart_info->cts_gpio = resource->start;
+
+ /* Init UART TX work queue */
+ uart_info->wq = create_singlethread_workqueue(UART_WQ_NAME);
+ if (!uart_info->wq) {
+ dev_err(MAIN_DEV, "Could not create workqueue\n");
+ err = -ECHILD; /* No child processes */
+ goto error_handling_free;
+ }
+
+ /* Initialize sleep work data */
+ uart_info->sleep_work.data = uart_info;
+ INIT_DELAYED_WORK(&uart_info->sleep_work.work, set_chip_sleep_mode);
+
+ /* Initialize wake-up work data */
+ uart_info->wakeup_work.data = uart_info;
+ INIT_WORK(&uart_info->wakeup_work.work, work_wake_up_chip);
+
+ /* Initialize after_receive work data */
+ uart_info->restart_sleep_work.data = uart_info;
+ INIT_WORK(&uart_info->restart_sleep_work.work, work_restart_sleep);
+
+ uart_info->dev = &pdev->dev;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ dev_err(MAIN_DEV, "cg2900_uart_probe: Could not allocate p\n");
+ goto error_handling_wq;
+ }
+
+ p->dev = uart_info->dev;
+ p->id = HCI_UART_STE;
+ p->open = &cg2900_hu_open;
+ p->close = &cg2900_hu_close;
+ p->recv = &cg2900_hu_receive;
+ p->dequeue = &cg2900_hu_dequeue;
+ p->flush = &cg2900_hu_flush;
+
+ dev_set_drvdata(uart_info->dev, (void *)uart_info);
+
+ err = hci_uart_register_proto(p);
+ if (err) {
+ dev_err(MAIN_DEV, "cg2900_uart_probe: Can not register "
+ "protocol\n");
+ kfree(p);
+ goto error_handling_wq;
+ }
+
+ goto finished;
+
+error_handling_wq:
+ destroy_workqueue(uart_info->wq);
+error_handling_free:
+ kfree(uart_info);
+ uart_info = NULL;
+finished:
+ return err;
+}
+
+/**
+ * cg2900_uart_remove() - Release CG2900 UART resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success.
+ * Error codes generated by tty_unregister_ldisc.
+ */
+static int __devexit cg2900_uart_remove(struct platform_device *pdev)
+{
+ struct uart_info *uart_info = dev_get_drvdata(&pdev->dev);
+
+ pr_debug("cg2900_uart_remove");
+
+ if (!uart_info)
+ return -ECHILD;
+
+ if (uart_info->hu)
+ hci_uart_unregister_proto(uart_info->hu->proto);
+
+ pm_qos_remove_request(&uart_info->pm_qos_latency);
+ destroy_workqueue(uart_info->wq);
+
+ dev_info(MAIN_DEV, "CG2900 UART removed\n");
+ kfree(uart_info);
+ uart_info = NULL;
+ return 0;
+}
+
+static struct platform_driver cg2900_uart_driver = {
+ .driver = {
+ .name = "cg2900-uart",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_uart_probe,
+ .remove = __devexit_p(cg2900_uart_remove),
+#ifdef CONFIG_PM
+ .suspend = cg2900_uart_suspend,
+ .resume = cg2900_uart_resume
+#endif
+};
+
+
+/**
+ * cg2900_uart_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init cg2900_uart_init(void)
+{
+ pr_debug("cg2900_uart_init");
+ return platform_driver_register(&cg2900_uart_driver);
+}
+
+/**
+ * cg2900_uart_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit cg2900_uart_exit(void)
+{
+ pr_debug("cg2900_uart_exit");
+ platform_driver_unregister(&cg2900_uart_driver);
+}
+
+module_init(cg2900_uart_init);
+module_exit(cg2900_uart_exit);
+
+module_param(uart_default_baud, int, S_IRUGO);
+MODULE_PARM_DESC(uart_default_baud,
+ "Default UART baud rate, e.g. 115200. If not set 115200 will "
+ "be used.");
+
+module_param(uart_high_baud, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(uart_high_baud,
+ "High speed UART baud rate, e.g. 4000000. If not set 3000000 "
+ "will be used.");
+
+module_param(uart_debug, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(uart_debug, "Enable/Disable debug. 0 means Debug disabled.");
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ST-Ericsson CG2900 UART Driver");
diff --git a/drivers/staging/cg2900/bluetooth/hci_ldisc.c b/drivers/staging/cg2900/bluetooth/hci_ldisc.c
new file mode 100644
index 00000000000..0ceb5e74255
--- /dev/null
+++ b/drivers/staging/cg2900/bluetooth/hci_ldisc.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * This file is a staging solution and shall be integrated into
+ * /drivers/bluetooth/hci_ldisc.c.
+ *
+ * Original hci_ldisc.c file:
+ * Copyright (C) 2000-2001 Qualcomm Incorporated
+ * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
+ * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/poll.h>
+
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+#include <linux/ioctl.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define VERSION "2.3"
+
+#define TTY_BREAK_ON (-1)
+#define TTY_BREAK_OFF (0)
+
+static bool reset;
+
+static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
+
+int cg2900_hci_uart_register_proto(struct hci_uart_proto *p)
+{
+ if (p->id >= HCI_UART_MAX_PROTO)
+ return -EINVAL;
+
+ if (hup[p->id])
+ return -EEXIST;
+
+ hup[p->id] = p;
+
+ return 0;
+}
+
+int cg2900_hci_uart_unregister_proto(struct hci_uart_proto *p)
+{
+ if (p->id >= HCI_UART_MAX_PROTO)
+ return -EINVAL;
+
+ if (!hup[p->id])
+ return -EINVAL;
+
+ hup[p->id] = NULL;
+
+ return 0;
+}
+
+static struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
+{
+ if (id >= HCI_UART_MAX_PROTO)
+ return NULL;
+
+ return hup[id];
+}
+
+static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
+{
+ struct hci_dev *hdev = hu->hdev;
+
+ if (!hdev)
+ return;
+
+ /* Update HCI stat counters */
+ switch (pkt_type) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+}
+
+static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
+{
+ struct sk_buff *skb = hu->tx_skb;
+
+ if (!skb)
+ skb = hu->proto->dequeue(hu);
+ else
+ hu->tx_skb = NULL;
+
+ return skb;
+}
+
+int cg2900_hci_uart_tx_wakeup(struct hci_uart *hu)
+{
+ struct tty_struct *tty = hu->tty;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+
+ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
+ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ return 0;
+ }
+
+ BT_DBG("");
+
+restart:
+ clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+
+ while ((skb = hci_uart_dequeue(hu))) {
+ int len;
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ len = tty->ops->write(tty, skb->data, skb->len);
+ if (hdev)
+ hdev->stat.byte_tx += len;
+
+ skb_pull(skb, len);
+ if (skb->len) {
+ hu->tx_skb = skb;
+ break;
+ }
+
+ hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
+ kfree_skb(skb);
+ }
+
+ if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
+ goto restart;
+
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ return 0;
+}
+
+int cg2900_hci_uart_set_break(struct hci_uart *hu, bool break_on)
+{
+ struct tty_struct *tty = hu->tty;
+ int state = TTY_BREAK_OFF;
+
+ if (break_on)
+ state = TTY_BREAK_ON;
+
+ if (tty->ops->break_ctl)
+ return tty->ops->break_ctl(tty, state);
+ else
+ return -EOPNOTSUPP;
+}
+
+void cg2900_hci_uart_flow_ctrl(struct hci_uart *hu, bool flow_on)
+{
+ if (flow_on)
+ tty_unthrottle(hu->tty);
+ else
+ tty_throttle(hu->tty);
+}
+
+int cg2900_hci_uart_set_baudrate(struct hci_uart *hu, int baud)
+{
+ struct ktermios old_termios;
+ struct tty_struct *tty = hu->tty;
+
+ if (!tty->ops->set_termios)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&(tty->termios_mutex));
+ /* Start by storing the old termios. */
+ memcpy(&old_termios, tty->termios, sizeof(old_termios));
+
+ tty_encode_baud_rate(tty, baud, baud);
+
+ /* Finally inform the driver */
+ tty->ops->set_termios(tty, &old_termios);
+
+ mutex_unlock(&(tty->termios_mutex));
+
+ return 0;
+}
+
+int cg2900_hci_uart_tiocmget(struct hci_uart *hu)
+{
+ struct tty_struct *tty = hu->tty;
+
+ if (!tty->ops->tiocmget || !hu->fd)
+ return -EOPNOTSUPP;
+
+ return tty->ops->tiocmget(tty);
+}
+
+void cg2900_hci_uart_flush_buffer(struct hci_uart *hu)
+{
+ tty_driver_flush_buffer(hu->tty);
+}
+
+int cg2900_hci_uart_chars_in_buffer(struct hci_uart *hu)
+{
+ return tty_chars_in_buffer(hu->tty);
+}
+
+/* ------- Interface to HCI layer ------ */
+/* Initialize device */
+static int hci_uart_open(struct hci_dev *hdev)
+{
+ BT_DBG("%s %p", hdev->name, hdev);
+
+ /* Nothing to do for UART driver */
+
+ set_bit(HCI_RUNNING, &hdev->flags);
+
+ return 0;
+}
+
+/* Reset device */
+static int hci_uart_flush(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = (struct hci_uart *) hdev->driver_data;
+ struct tty_struct *tty = hu->tty;
+
+ BT_DBG("hdev %p tty %p", hdev, tty);
+
+ if (hu->tx_skb) {
+ kfree_skb(hu->tx_skb); hu->tx_skb = NULL;
+ }
+
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+
+ if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ hu->proto->flush(hu);
+
+ return 0;
+}
+
+/* Close device */
+static int hci_uart_close(struct hci_dev *hdev)
+{
+ BT_DBG("hdev %p", hdev);
+
+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ hci_uart_flush(hdev);
+ hdev->flush = NULL;
+ return 0;
+}
+
+/* Send frames from HCI layer */
+static int hci_uart_send_frame(struct sk_buff *skb)
+{
+ struct hci_dev* hdev = (struct hci_dev *) skb->dev;
+ struct hci_uart *hu;
+
+ if (!hdev) {
+ BT_ERR("Frame for unknown device (hdev=NULL)");
+ return -ENODEV;
+ }
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -EBUSY;
+
+ hu = (struct hci_uart *) hdev->driver_data;
+
+ BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type,
+ skb->len);
+
+ hu->proto->enqueue(hu, skb);
+
+ hci_uart_tx_wakeup(hu);
+
+ return 0;
+}
+
+static void hci_uart_destruct(struct hci_dev *hdev)
+{
+ if (!hdev)
+ return;
+
+ BT_DBG("%s", hdev->name);
+ kfree(hdev->driver_data);
+}
+
+/* ------ LDISC part ------ */
+/* hci_uart_tty_open
+ *
+ * Called when line discipline changed to HCI_UART.
+ *
+ * Arguments:
+ * tty pointer to tty info structure
+ * Return Value:
+ * 0 if success, otherwise error code
+ */
+static int hci_uart_tty_open(struct tty_struct *tty)
+{
+ struct hci_uart *hu = (void *) tty->disc_data;
+
+ BT_DBG("tty %p", tty);
+
+ /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
+ the pointer */
+ if (hu)
+ return -EEXIST;
+
+ /* Error if the tty has no write op instead of leaving an exploitable
+ hole */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+
+ hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
+ if (!hu) {
+ BT_ERR("Can't allocate control structure");
+ return -ENFILE;
+ }
+
+ tty->disc_data = hu;
+ hu->tty = tty;
+ tty->receive_room = 65536;
+
+ spin_lock_init(&hu->rx_lock);
+
+ /* Flush any pending characters in the driver and line discipline. */
+
+ /* FIXME: why is this needed. Note don't use ldisc_ref here as the
+ open path is before the ldisc is referencable */
+
+ if (tty->ldisc->ops->flush_buffer)
+ tty->ldisc->ops->flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+
+ return 0;
+}
+
+/* hci_uart_tty_close()
+ *
+ * Called when the line discipline is changed to something
+ * else, the tty is closed, or the tty detects a hangup.
+ */
+static void hci_uart_tty_close(struct tty_struct *tty)
+{
+ struct hci_uart *hu = (void *)tty->disc_data;
+
+ BT_DBG("tty %p", tty);
+
+ /* Detach from the tty */
+ tty->disc_data = NULL;
+
+ if (hu) {
+ struct hci_dev *hdev = hu->hdev;
+
+ if (hdev)
+ hci_uart_close(hdev);
+
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ hu->proto->close(hu);
+ if (hdev) {
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ }
+ }
+ }
+}
+
+/* hci_uart_tty_wakeup()
+ *
+ * Callback for transmit wakeup. Called when low level
+ * device driver can accept more send data.
+ *
+ * Arguments: tty pointer to associated tty instance data
+ * Return Value: None
+ */
+static void hci_uart_tty_wakeup(struct tty_struct *tty)
+{
+ struct hci_uart *hu = (void *)tty->disc_data;
+
+ BT_DBG("");
+
+ if (!hu)
+ return;
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+ if (tty != hu->tty)
+ return;
+
+ if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ hci_uart_tx_wakeup(hu);
+}
+
+/* hci_uart_tty_receive()
+ *
+ * Called by tty low level driver when receive data is
+ * available.
+ *
+ * Arguments: tty pointer to tty isntance data
+ * data pointer to received data
+ * flags pointer to flags for data
+ * count count of received data in bytes
+ *
+ * Return Value: None
+ */
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
+{
+ struct hci_uart *hu = (void *)tty->disc_data;
+
+ if (!hu || tty != hu->tty)
+ return;
+
+ if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ return;
+
+ spin_lock(&hu->rx_lock);
+ hu->proto->recv(hu, (void *) data, count);
+ if (hu->hdev)
+ hu->hdev->stat.byte_rx += count;
+ spin_unlock(&hu->rx_lock);
+
+ tty_unthrottle(tty);
+}
+
+static int hci_uart_register_dev(struct hci_uart *hu)
+{
+ struct hci_dev *hdev;
+
+ BT_DBG("");
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ BT_ERR("Can't allocate HCI device");
+ return -ENOMEM;
+ }
+
+ hu->hdev = hdev;
+
+ hdev->bus = HCI_UART;
+ hdev->driver_data = hu;
+
+ hdev->open = hci_uart_open;
+ hdev->close = hci_uart_close;
+ hdev->flush = hci_uart_flush;
+ hdev->send = hci_uart_send_frame;
+ hdev->destruct = hci_uart_destruct;
+
+ hdev->owner = THIS_MODULE;
+
+ if (!reset)
+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+
+ if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+
+ if (hci_register_dev(hdev) < 0) {
+ BT_ERR("Can't register HCI device");
+ hci_free_dev(hdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hci_uart_set_proto(struct hci_uart *hu, int id)
+{
+ struct hci_uart_proto *p;
+ int err;
+
+ p = hci_uart_get_proto(id);
+ if (!p)
+ return -EPROTONOSUPPORT;
+
+ hu->proto = p;
+
+ err = p->open(hu);
+ if (err)
+ return err;
+
+ /*
+ * Protocol might register hdev by itself.
+ * In that case, there is no need to register it here.
+ */
+ if (!hu->proto->register_hci_dev)
+ return 0;
+
+ err = hci_uart_register_dev(hu);
+ if (err) {
+ p->close(hu);
+ return err;
+ }
+
+ return 0;
+}
+
+/* hci_uart_tty_ioctl()
+ *
+ * Process IOCTL system call for the tty device.
+ *
+ * Arguments:
+ *
+ * tty pointer to tty instance data
+ * file pointer to open file object for device
+ * cmd IOCTL command code
+ * arg argument for IOCTL call (cmd dependent)
+ *
+ * Return Value: Command dependent
+ */
+static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct hci_uart *hu = (void *)tty->disc_data;
+ int err = 0;
+
+ BT_DBG("");
+
+ /* Verify the status of the device */
+ if (!hu)
+ return -EBADF;
+
+ switch (cmd) {
+ case HCIUARTSETPROTO:
+ if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ err = hci_uart_set_proto(hu, arg);
+ if (err) {
+ clear_bit(HCI_UART_PROTO_SET, &hu->flags);
+ return err;
+ }
+ /* Keep file descriptor.*/
+ hu->fd = file;
+ } else
+ return -EBUSY;
+ break;
+
+ case HCIUARTGETPROTO:
+ if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ return hu->proto->id;
+ return -EUNATCH;
+
+ case HCIUARTGETDEVICE:
+ if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ if (hu->hdev)
+ return hu->hdev->id;
+ else
+ return -ENOMSG;
+ }
+ return -EUNATCH;
+
+ case HCIUARTSETFLAGS:
+ if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ return -EBUSY;
+ hu->hdev_flags = arg;
+ break;
+
+ case HCIUARTGETFLAGS:
+ return hu->hdev_flags;
+
+ default:
+ err = n_tty_ioctl_helper(tty, file, cmd, arg);
+ break;
+ };
+
+ return err;
+}
+
+/*
+ * We don't provide read/write/poll interface for user space.
+ */
+static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ return 0;
+}
+
+static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *data, size_t count)
+{
+ return 0;
+}
+
+static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
+ struct file *filp, poll_table *wait)
+{
+ return 0;
+}
+
+static int __init cg2900_hci_uart_init(void)
+{
+ static struct tty_ldisc_ops hci_uart_ldisc;
+ int err;
+
+ BT_INFO("HCI UART driver ver %s", VERSION);
+
+ /* Register the tty discipline */
+
+ memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
+ hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
+ hci_uart_ldisc.name = "n_cg2900_hci";
+ hci_uart_ldisc.open = hci_uart_tty_open;
+ hci_uart_ldisc.close = hci_uart_tty_close;
+ hci_uart_ldisc.read = hci_uart_tty_read;
+ hci_uart_ldisc.write = hci_uart_tty_write;
+ hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
+ hci_uart_ldisc.poll = hci_uart_tty_poll;
+ hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
+ hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
+ hci_uart_ldisc.owner = THIS_MODULE;
+
+ err = tty_register_ldisc(N_CG2900_HCI, &hci_uart_ldisc);
+ if (err) {
+ BT_ERR("HCI line discipline registration failed. (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit cg2900_hci_uart_exit(void)
+{
+ int err;
+
+ /* Release tty registration of line discipline */
+ err = tty_unregister_ldisc(N_CG2900_HCI);
+ if (err)
+ BT_ERR("Can't unregister HCI line discipline (%d)", err);
+}
+
+module_init(cg2900_hci_uart_init);
+module_exit(cg2900_hci_uart_exit);
+
+module_param(reset, bool, 0644);
+MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl <par-gunnar.p.hjalmdahl@stericsson.com>");
+MODULE_DESCRIPTION("CG2900 Staging Bluetooth HCI UART driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_CG2900_HCI);
diff --git a/drivers/staging/cg2900/bluetooth/hci_uart.h b/drivers/staging/cg2900/bluetooth/hci_uart.h
new file mode 100644
index 00000000000..23a69519ccd
--- /dev/null
+++ b/drivers/staging/cg2900/bluetooth/hci_uart.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * This file is a staging solution and shall be integrated into
+ * /drivers/bluetooth/hci_uart.h.
+ *
+ * Original hci_uart.h file:
+ * Copyright (C) 2000-2001 Qualcomm Incorporated
+ * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
+ * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
+ */
+
+/*
+ * Staging CG2900 Bluetooth HCI UART. Will be replaced by normal N_HCI when
+ * moved to normal driver folder.
+ */
+#ifndef N_CG2900_HCI
+#define N_CG2900_HCI 23
+#endif /* N_CG2900_HCI */
+
+/* Ioctls */
+#define HCIUARTSETPROTO _IOW('U', 200, int)
+#define HCIUARTGETPROTO _IOR('U', 201, int)
+#define HCIUARTGETDEVICE _IOR('U', 202, int)
+#define HCIUARTSETFLAGS _IOW('U', 203, int)
+#define HCIUARTGETFLAGS _IOR('U', 204, int)
+
+/* UART protocols */
+#define HCI_UART_MAX_PROTO 7
+
+#define HCI_UART_H4 0
+#define HCI_UART_BCSP 1
+#define HCI_UART_3WIRE 2
+#define HCI_UART_H4DS 3
+#define HCI_UART_LL 4
+#define HCI_UART_ATH3K 5
+#define HCI_UART_STE 6
+
+#define HCI_UART_RAW_DEVICE 0
+
+/* UART break and flow control parameters */
+#define BREAK_ON true
+#define BREAK_OFF false
+#define FLOW_ON true
+#define FLOW_OFF false
+
+struct hci_uart;
+
+struct hci_uart_proto {
+ unsigned int id;
+ int (*open)(struct hci_uart *hu);
+ int (*close)(struct hci_uart *hu);
+ int (*flush)(struct hci_uart *hu);
+ int (*recv)(struct hci_uart *hu, void *data, int len);
+ int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb);
+ struct sk_buff *(*dequeue)(struct hci_uart *hu);
+ bool register_hci_dev;
+ struct device *dev;
+};
+
+struct hci_uart {
+ struct tty_struct *tty;
+ struct hci_dev *hdev;
+ unsigned long flags;
+ unsigned long hdev_flags;
+
+ struct hci_uart_proto *proto;
+ void *priv;
+
+ struct sk_buff *tx_skb;
+ unsigned long tx_state;
+ spinlock_t rx_lock;
+
+ struct file *fd;
+};
+
+/* HCI_UART proto flag bits */
+#define HCI_UART_PROTO_SET 0
+
+/* TX states */
+#define HCI_UART_SENDING 1
+#define HCI_UART_TX_WAKEUP 2
+
+int cg2900_hci_uart_register_proto(struct hci_uart_proto *p);
+int cg2900_hci_uart_unregister_proto(struct hci_uart_proto *p);
+int cg2900_hci_uart_tx_wakeup(struct hci_uart *hu);
+int cg2900_hci_uart_set_baudrate(struct hci_uart *hu, int baud);
+int cg2900_hci_uart_set_break(struct hci_uart *hu, bool break_on);
+int cg2900_hci_uart_tiocmget(struct hci_uart *hu);
+void cg2900_hci_uart_flush_buffer(struct hci_uart *hu);
+void cg2900_hci_uart_flow_ctrl(struct hci_uart *hu, bool flow_on);
+int cg2900_hci_uart_chars_in_buffer(struct hci_uart *hu);
+
+#define hci_uart_register_proto cg2900_hci_uart_register_proto
+#define hci_uart_unregister_proto cg2900_hci_uart_unregister_proto
+#define hci_uart_tx_wakeup cg2900_hci_uart_tx_wakeup
+#define hci_uart_set_baudrate cg2900_hci_uart_set_baudrate
+#define hci_uart_set_break cg2900_hci_uart_set_break
+#define hci_uart_tiocmget cg2900_hci_uart_tiocmget
+#define hci_uart_flush_buffer cg2900_hci_uart_flush_buffer
+#define hci_uart_flow_ctrl cg2900_hci_uart_flow_ctrl
+#define hci_uart_chars_in_buffer cg2900_hci_uart_chars_in_buffer
diff --git a/drivers/staging/cg2900/board-ux500-cg2900.c b/drivers/staging/cg2900/board-ux500-cg2900.c
new file mode 100644
index 00000000000..ca3902b4e2d
--- /dev/null
+++ b/drivers/staging/cg2900/board-ux500-cg2900.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Par-Gunnar Hjalmdahl <par-gunnar.p.hjalmdahl@stericsson.com>
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <asm/mach-types.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/ioport.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/platform_device.h>
+#include <mach/gpio.h>
+#include <mach/id.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <plat/pincfg.h>
+
+#include "board-mop500.h"
+#include "cg2900.h"
+#include "devices-cg2900.h"
+#include "pins-db5500.h"
+#include "pins-db8500.h"
+#include "pins.h"
+
+#define CG2900_BT_ENABLE_GPIO 170
+#define CG2900_GBF_ENA_RESET_GPIO 171
+#define WLAN_PMU_EN_GPIO 226
+#define WLAN_PMU_EN_GPIO_SNOWBALL 161
+#define WLAN_PMU_EN_GPIO_U9500 AB8500_PIN_GPIO11
+#define CG2900_UX500_BT_CTS_GPIO 0
+#define CG2900_U5500_BT_CTS_GPIO 168
+
+enum cg2900_gpio_pull_sleep ux500_cg2900_sleep_gpio[21] = {
+ CG2900_NO_PULL, /* GPIO 0: PTA_CONFX */
+ CG2900_PULL_DN, /* GPIO 1: PTA_STATUS */
+ CG2900_NO_PULL, /* GPIO 2: UART_CTSN */
+ CG2900_PULL_UP, /* GPIO 3: UART_RTSN */
+ CG2900_PULL_UP, /* GPIO 4: UART_TXD */
+ CG2900_NO_PULL, /* GPIO 5: UART_RXD */
+ CG2900_PULL_DN, /* GPIO 6: IOM_DOUT */
+ CG2900_NO_PULL, /* GPIO 7: IOM_FSC */
+ CG2900_NO_PULL, /* GPIO 8: IOM_CLK */
+ CG2900_NO_PULL, /* GPIO 9: IOM_DIN */
+ CG2900_PULL_DN, /* GPIO 10: PWR_REQ */
+ CG2900_PULL_DN, /* GPIO 11: HOST_WAKEUP */
+ CG2900_PULL_DN, /* GPIO 12: IIS_DOUT */
+ CG2900_NO_PULL, /* GPIO 13: IIS_WS */
+ CG2900_NO_PULL, /* GPIO 14: IIS_CLK */
+ CG2900_NO_PULL, /* GPIO 15: IIS_DIN */
+ CG2900_PULL_DN, /* GPIO 16: PTA_FREQ */
+ CG2900_PULL_DN, /* GPIO 17: PTA_RF_ACTIVE */
+ CG2900_NO_PULL, /* GPIO 18: NotConnected (J6428) */
+ CG2900_NO_PULL, /* GPIO 19: EXT_DUTY_CYCLE */
+ CG2900_NO_PULL, /* GPIO 20: EXT_FRM_SYNCH */
+};
+
+static struct platform_device ux500_cg2900_device = {
+ .name = "cg2900",
+};
+
+static struct platform_device ux500_cg2900_chip_device = {
+ .name = "cg2900-chip",
+ .dev = {
+ .parent = &ux500_cg2900_device.dev,
+ },
+};
+
+static struct platform_device ux500_stlc2690_chip_device = {
+ .name = "stlc2690-chip",
+ .dev = {
+ .parent = &ux500_cg2900_device.dev,
+ },
+};
+
+static struct cg2900_platform_data ux500_cg2900_test_platform_data = {
+ .bus = HCI_VIRTUAL,
+ .gpio_sleep = ux500_cg2900_sleep_gpio,
+};
+
+static struct platform_device ux500_cg2900_test_device = {
+ .name = "cg2900-test",
+ .dev = {
+ .parent = &ux500_cg2900_device.dev,
+ .platform_data = &ux500_cg2900_test_platform_data,
+ },
+};
+
+static struct resource cg2900_uart_resources_pre_v60[] = {
+ {
+ .start = CG2900_GBF_ENA_RESET_GPIO,
+ .end = CG2900_GBF_ENA_RESET_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "gbf_ena_reset",
+ },
+ {
+ .start = CG2900_BT_ENABLE_GPIO,
+ .end = CG2900_BT_ENABLE_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "bt_enable",
+ },
+ {
+ .start = CG2900_UX500_BT_CTS_GPIO,
+ .end = CG2900_UX500_BT_CTS_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "cts_gpio",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .end = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .flags = IORESOURCE_IRQ,
+ .name = "cts_irq",
+ },
+};
+
+static struct resource cg2900_uart_resources_u5500[] = {
+ {
+ .start = CG2900_U5500_BT_CTS_GPIO,
+ .end = CG2900_U5500_BT_CTS_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "cts_gpio",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(CG2900_U5500_BT_CTS_GPIO),
+ .end = NOMADIK_GPIO_TO_IRQ(CG2900_U5500_BT_CTS_GPIO),
+ .flags = IORESOURCE_IRQ,
+ .name = "cts_irq",
+ },
+};
+
+static struct resource cg2900_uart_resources_u8500[] = {
+ {
+ .start = CG2900_GBF_ENA_RESET_GPIO,
+ .end = CG2900_GBF_ENA_RESET_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "gbf_ena_reset",
+ },
+ {
+ .start = WLAN_PMU_EN_GPIO,
+ .end = WLAN_PMU_EN_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "pmu_en",
+ },
+ {
+ .start = CG2900_UX500_BT_CTS_GPIO,
+ .end = CG2900_UX500_BT_CTS_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "cts_gpio",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .end = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .flags = IORESOURCE_IRQ,
+ .name = "cts_irq",
+ },
+};
+
+static struct resource cg2900_uart_resources_snowball[] = {
+ {
+ .start = CG2900_GBF_ENA_RESET_GPIO,
+ .end = CG2900_GBF_ENA_RESET_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "gbf_ena_reset",
+ },
+ {
+ .start = WLAN_PMU_EN_GPIO_SNOWBALL,
+ .end = WLAN_PMU_EN_GPIO_SNOWBALL,
+ .flags = IORESOURCE_IO,
+ .name = "pmu_en",
+ },
+ {
+ .start = CG2900_UX500_BT_CTS_GPIO,
+ .end = CG2900_UX500_BT_CTS_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "cts_gpio",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .end = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .flags = IORESOURCE_IRQ,
+ .name = "cts_irq",
+ },
+};
+
+
+static struct resource cg2900_uart_resources_u9500[] = {
+ {
+ .start = CG2900_GBF_ENA_RESET_GPIO,
+ .end = CG2900_GBF_ENA_RESET_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "gbf_ena_reset",
+ },
+ {
+ .start = WLAN_PMU_EN_GPIO_U9500,
+ .end = WLAN_PMU_EN_GPIO_U9500,
+ .flags = IORESOURCE_IO,
+ .name = "pmu_en",
+ },
+ {
+ .start = CG2900_UX500_BT_CTS_GPIO,
+ .end = CG2900_UX500_BT_CTS_GPIO,
+ .flags = IORESOURCE_IO,
+ .name = "cts_gpio",
+ },
+ {
+ .start = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .end = NOMADIK_GPIO_TO_IRQ(CG2900_UX500_BT_CTS_GPIO),
+ .flags = IORESOURCE_IRQ,
+ .name = "cts_irq",
+ },
+};
+
+static pin_cfg_t u5500_cg2900_uart_enabled[] = {
+ GPIO165_U3_RXD | PIN_INPUT_PULLUP,
+ GPIO166_U3_TXD | PIN_OUTPUT_HIGH,
+ GPIO167_U3_RTSn | PIN_OUTPUT_HIGH,
+ GPIO168_U3_CTSn | PIN_INPUT_PULLUP,
+};
+
+static pin_cfg_t u5500_cg2900_uart_disabled[] = {
+ GPIO165_GPIO | PIN_INPUT_PULLUP, /* RX pull down. */
+ GPIO166_GPIO | PIN_OUTPUT_LOW, /* TX low - break on. */
+ GPIO167_GPIO | PIN_OUTPUT_HIGH, /* RTS high-flow off. */
+ GPIO168_GPIO | PIN_INPUT_PULLUP, /* CTS pull up. */
+};
+
+static pin_cfg_t ux500_cg2900_uart_enabled[] = {
+ GPIO0_U0_CTSn | PIN_INPUT_PULLUP,
+ GPIO1_U0_RTSn | PIN_OUTPUT_HIGH,
+ GPIO2_U0_RXD | PIN_INPUT_PULLUP,
+ GPIO3_U0_TXD | PIN_OUTPUT_HIGH
+};
+
+static pin_cfg_t ux500_cg2900_uart_disabled[] = {
+ GPIO0_GPIO | PIN_INPUT_PULLUP, /* CTS pull up. */
+ GPIO1_GPIO | PIN_OUTPUT_HIGH, /* RTS high-flow off. */
+ GPIO2_GPIO | PIN_INPUT_PULLUP, /* RX pull down. */
+ GPIO3_GPIO | PIN_OUTPUT_LOW /* TX low - break on. */
+};
+
+static struct cg2900_platform_data ux500_cg2900_uart_platform_data = {
+ .bus = HCI_UART,
+ .gpio_sleep = ux500_cg2900_sleep_gpio,
+ .uart = {
+ .n_uart_gpios = 4,
+ },
+};
+
+static struct platform_device ux500_cg2900_uart_device = {
+ .name = "cg2900-uart",
+ .dev = {
+ .platform_data = &ux500_cg2900_uart_platform_data,
+ .parent = &ux500_cg2900_device.dev,
+ },
+};
+
+static bool mach_supported(void)
+{
+ if (machine_is_u8500() ||
+ machine_is_u5500() ||
+ machine_is_hrefv60() ||
+ machine_is_nomadik() ||
+ machine_is_snowball())
+ return true;
+
+ return false;
+}
+
+static int __init board_cg2900_init(void)
+{
+ int err;
+
+ if (!mach_supported())
+ return 0;
+
+ dcg2900_init_platdata(&ux500_cg2900_test_platform_data);
+ if (machine_is_u5500()) {
+ ux500_cg2900_uart_platform_data.uart.uart_enabled =
+ u5500_cg2900_uart_enabled;
+ ux500_cg2900_uart_platform_data.uart.uart_disabled =
+ u5500_cg2900_uart_disabled;
+ } else {
+ ux500_cg2900_uart_platform_data.uart.uart_enabled =
+ ux500_cg2900_uart_enabled;
+ ux500_cg2900_uart_platform_data.uart.uart_disabled =
+ ux500_cg2900_uart_disabled;
+ ux500_cg2900_uart_platform_data.regulator_id = "vdd";
+ }
+ dcg2900_init_platdata(&ux500_cg2900_uart_platform_data);
+
+ if (pins_for_u9500()) {
+ /* u9500 */
+ ux500_cg2900_uart_device.num_resources =
+ ARRAY_SIZE(cg2900_uart_resources_u9500);
+ ux500_cg2900_uart_device.resource =
+ cg2900_uart_resources_u9500;
+ } else if (cpu_is_u8500()) {
+ if (machine_is_hrefv60()) {
+ /* u8500 */
+ ux500_cg2900_uart_device.num_resources =
+ ARRAY_SIZE(cg2900_uart_resources_u8500);
+ ux500_cg2900_uart_device.resource =
+ cg2900_uart_resources_u8500;
+ } else if (machine_is_snowball()) {
+ /* snowball have diffrent PMU_EN gpio */
+ ux500_cg2900_uart_device.num_resources =
+ ARRAY_SIZE(cg2900_uart_resources_snowball);
+ ux500_cg2900_uart_device.resource =
+ cg2900_uart_resources_snowball;
+ } else {
+ /* u8500 pre v60*/
+ ux500_cg2900_uart_device.num_resources =
+ ARRAY_SIZE(cg2900_uart_resources_pre_v60);
+ ux500_cg2900_uart_device.resource =
+ cg2900_uart_resources_pre_v60;
+ }
+ } else if (cpu_is_u5500()) {
+ /* u5500 */
+ ux500_cg2900_uart_device.num_resources =
+ ARRAY_SIZE(cg2900_uart_resources_u5500);
+ ux500_cg2900_uart_device.resource =
+ cg2900_uart_resources_u5500;
+ }
+
+ err = platform_device_register(&ux500_cg2900_device);
+ if (err)
+ return err;
+ err = platform_device_register(&ux500_cg2900_uart_device);
+ if (err)
+ return err;
+ err = platform_device_register(&ux500_cg2900_test_device);
+ if (err)
+ return err;
+ err = platform_device_register(&ux500_cg2900_chip_device);
+ if (err)
+ return err;
+ err = platform_device_register(&ux500_stlc2690_chip_device);
+ if (err)
+ return err;
+
+ dev_info(&ux500_cg2900_device.dev, "CG2900 initialized\n");
+ return 0;
+}
+
+static void __exit board_cg2900_exit(void)
+{
+ if (!mach_supported())
+ return;
+
+ platform_device_unregister(&ux500_stlc2690_chip_device);
+ platform_device_unregister(&ux500_cg2900_chip_device);
+ platform_device_unregister(&ux500_cg2900_test_device);
+ platform_device_unregister(&ux500_cg2900_uart_device);
+ platform_device_unregister(&ux500_cg2900_device);
+
+ dev_info(&ux500_cg2900_device.dev, "CG2900 removed\n");
+}
+
+module_init(board_cg2900_init);
+module_exit(board_cg2900_exit);
diff --git a/drivers/staging/cg2900/clock-cg2900.c b/drivers/staging/cg2900/clock-cg2900.c
new file mode 100644
index 00000000000..6e3c738e49d
--- /dev/null
+++ b/drivers/staging/cg2900/clock-cg2900.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Hemant Gupta <hemant.gupta@stericsson.com>
+ * Author: Tomasz Hliwiak <tomasz.hliwiak@tieto.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <asm/mach-types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include "clock.h"
+#include "cg2900.h"
+
+static DEFINE_MUTEX(cg2900_clk_mutex);
+
+static struct cg2900_user_data *pf_data;
+static struct clk_lookup *cg2900_clk_lookup;
+
+/**
+ * cg2900_clk_enable() - Enables CG2900 Clock
+ *
+ * Enables CG2900 Clock by starting CG2900.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if stored pf_data is NULL.
+ * Error codes generated by open.
+ */
+
+static int cg2900_clk_enable(struct clk *clk)
+{
+ int err = -EINVAL;
+ if (pf_data)
+ err = pf_data->open(pf_data);
+
+ return err;
+}
+
+/**
+ * cg2900_clk_disable() - Disables CG2900 Clock
+ *
+ * Disables CG2900 Clock by switching off CG2900.
+ */
+static void cg2900_clk_disable(struct clk *clk)
+{
+ if (pf_data)
+ pf_data->close(pf_data);
+}
+
+static struct clkops cg2900_clk_ops = {
+ .enable = cg2900_clk_enable,
+ .disable = cg2900_clk_disable,
+};
+
+static struct clk cg2900_clk = {
+ .name = "cg2900_clk",
+ .ops = &cg2900_clk_ops,
+ .mutex = &cg2900_clk_mutex,
+};
+
+/**
+ * cg2900_read_cb() - Dummy callback for cg2900 core read.
+ *
+ * Function is required by cg2900_core->open().
+ */
+static void cg2900_read_cb(struct cg2900_user_data *user, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+/**
+ * cg2900_core_probe() - Initialize resources.
+ *
+ * Function initializes pf_data structure and also adds the cg2900
+ * clock source.
+ */
+static int __devinit cg2900_core_probe(struct platform_device *pdev)
+{
+ cg2900_clk_lookup = clkdev_alloc(&cg2900_clk, "sys_clk_out",
+ "cw1200_wlan");
+
+ if (!cg2900_clk_lookup)
+ return -ENOMEM;
+
+ clkdev_add(cg2900_clk_lookup);
+ pf_data = dev_get_platdata(&pdev->dev);
+ pf_data->dev = &pdev->dev;
+ pf_data->read_cb = cg2900_read_cb;
+
+ return 0;
+}
+
+/**
+ * cg2900_core_remove() - Clean resources.
+ *
+ * Function cleans pf_data structure and removes the clock source.
+ */
+static int __devexit cg2900_core_remove(struct platform_device *pdev)
+{
+ clkdev_drop(cg2900_clk_lookup);
+ pf_data = NULL;
+
+ return 0;
+}
+
+static struct platform_driver cg2900_core_ctrl_driver = {
+ .driver = {
+ .name = "cg2900-core",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_core_probe,
+ .remove = __devexit_p(cg2900_core_remove),
+};
+
+/**
+ * clock_cg2900_init() - Register Platform Data
+ *
+ * Registers the platform data.
+ */
+static int __init clock_cg2900_init(void)
+{
+ return platform_driver_register(&cg2900_core_ctrl_driver);
+}
+
+/**
+ * clock_cg2900_exit() - Unregister Platform Data
+ *
+ * Unregister Platform Data
+ */
+static void __exit clock_cg2900_exit(void)
+{
+ platform_driver_unregister(&cg2900_core_ctrl_driver);
+}
+
+module_init(clock_cg2900_init);
+module_exit(clock_cg2900_exit);
diff --git a/drivers/staging/cg2900/devices-cg2900-ux500.c b/drivers/staging/cg2900/devices-cg2900-ux500.c
new file mode 100644
index 00000000000..7e7c12ce4a0
--- /dev/null
+++ b/drivers/staging/cg2900/devices-cg2900-ux500.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * Hemant Gupta (hemant.gupta@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Board specific device support for the Linux Bluetooth HCI H:4 Driver
+ * for ST-Ericsson connectivity controller.
+ */
+
+#include <asm/byteorder.h>
+#include <asm-generic/errno-base.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <plat/pincfg.h>
+
+#include "devices-cg2900.h"
+
+void dcg2900_u8500_enable_chip(struct cg2900_chip_dev *dev)
+{
+ struct dcg2900_info *info = dev->b_data;
+
+ if (info->gbf_gpio == -1)
+ return;
+
+ /*
+ * Due to a bug in CG2900 we cannot just set GPIO high to enable
+ * the chip. We must wait more than 100 msecs before enabling the
+ * chip.
+ * - Set PDB to low.
+ * - Wait for 100 msecs
+ * - Set PDB to high.
+ */
+ gpio_set_value(info->gbf_gpio, 0);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(
+ CHIP_ENABLE_PDB_LOW_TIMEOUT));
+
+ if (info->pmuen_gpio != -1) {
+ /*
+ * We must first set PMU_EN pin high and then wait 300 us before
+ * setting the GBF_EN high.
+ */
+ gpio_set_value(info->pmuen_gpio, 1);
+ udelay(CHIP_ENABLE_PMU_EN_TIMEOUT);
+ }
+
+ gpio_set_value(info->gbf_gpio, 1);
+}
+
+void dcg2900_u8500_disable_chip(struct cg2900_chip_dev *dev)
+{
+ struct dcg2900_info *info = dev->b_data;
+
+ if (info->gbf_gpio != -1)
+ gpio_set_value(info->gbf_gpio, 0);
+ if (info->pmuen_gpio != -1)
+ gpio_set_value(info->pmuen_gpio, 0);
+}
+
+int dcg2900_u8500_setup(struct cg2900_chip_dev *dev,
+ struct dcg2900_info *info)
+{
+ int err = 0;
+ struct resource *resource;
+ const char *gbf_name;
+ const char *bt_name = NULL;
+ const char *pmuen_name = NULL;
+
+ resource = platform_get_resource_byname(dev->pdev, IORESOURCE_IO,
+ "gbf_ena_reset");
+ if (!resource) {
+ dev_err(dev->dev, "GBF GPIO does not exist\n");
+ err = -EINVAL;
+ goto err_handling;
+ }
+
+ info->gbf_gpio = resource->start;
+ gbf_name = resource->name;
+
+ resource = platform_get_resource_byname(dev->pdev, IORESOURCE_IO,
+ "bt_enable");
+ /* BT Enable GPIO may not exist */
+ if (resource) {
+ info->bt_gpio = resource->start;
+ bt_name = resource->name;
+ }
+
+ resource = platform_get_resource_byname(dev->pdev, IORESOURCE_IO,
+ "pmu_en");
+ /* PMU_EN GPIO may not exist */
+ if (resource) {
+ info->pmuen_gpio = resource->start;
+ pmuen_name = resource->name;
+ }
+
+ /* Now setup the GPIOs */
+ err = gpio_request(info->gbf_gpio, gbf_name);
+ if (err < 0) {
+ dev_err(dev->dev, "gpio_request %s failed with err: %d\n",
+ gbf_name, err);
+ goto err_handling;
+ }
+
+ err = gpio_direction_output(info->gbf_gpio, 0);
+ if (err < 0) {
+ dev_err(dev->dev,
+ "gpio_direction_output %s failed with err: %d\n",
+ gbf_name, err);
+ goto err_handling_free_gpio_gbf;
+ }
+
+ if (!pmuen_name)
+ goto set_bt_gpio;
+
+ err = gpio_request(info->pmuen_gpio, pmuen_name);
+ if (err < 0) {
+ dev_err(dev->dev, "gpio_request %s failed with err: %d\n",
+ pmuen_name, err);
+ goto err_handling_free_gpio_gbf;
+ }
+
+ err = gpio_direction_output(info->pmuen_gpio, 0);
+ if (err < 0) {
+ dev_err(dev->dev,
+ "gpio_direction_output %s failed with err: %d\n",
+ pmuen_name, err);
+ goto err_handling_free_gpio_pmuen;
+ }
+
+set_bt_gpio:
+ if (!bt_name)
+ goto finished;
+
+ err = gpio_request(info->bt_gpio, bt_name);
+ if (err < 0) {
+ dev_err(dev->dev, "gpio_request %s failed with err: %d\n",
+ bt_name, err);
+ goto err_handling_free_gpio_pmuen;
+ }
+
+ err = gpio_direction_output(info->bt_gpio, 1);
+ if (err < 0) {
+ dev_err(dev->dev,
+ "gpio_direction_output %s failed with err: %d\n",
+ bt_name, err);
+ goto err_handling_free_gpio_bt;
+ }
+
+finished:
+
+ return 0;
+
+err_handling_free_gpio_bt:
+ gpio_free(info->bt_gpio);
+ info->bt_gpio = -1;
+err_handling_free_gpio_pmuen:
+ if (info->pmuen_gpio != -1) {
+ gpio_free(info->pmuen_gpio);
+ info->pmuen_gpio = -1;
+ }
+err_handling_free_gpio_gbf:
+ gpio_free(info->gbf_gpio);
+ info->gbf_gpio = -1;
+err_handling:
+
+ return err;
+}
+
+/* prcmu resout1 pin is used for CG2900 reset*/
+void dcg2900_u5500_enable_chip(struct cg2900_chip_dev *dev)
+{
+ struct dcg2900_info *info = dev->b_data;
+
+ clk_enable(info->lpoclk);
+ /*
+ * Due to a bug in CG2900 we cannot just set GPIO high to enable
+ * the chip. We must wait more than 100 msecs before enbling the
+ * chip.
+ * - Set PDB to low.
+ * - Wait for 100 msecs
+ * - Set PDB to high.
+ */
+ prcmu_resetout(1, 0);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(
+ CHIP_ENABLE_PDB_LOW_TIMEOUT));
+ prcmu_resetout(1, 1);
+}
+
+void dcg2900_u5500_disable_chip(struct cg2900_chip_dev *dev)
+{
+ struct dcg2900_info *info = dev->b_data;
+
+ prcmu_resetout(1, 0);
+ clk_disable(info->lpoclk);
+}
+
+int dcg2900_u5500_setup(struct cg2900_chip_dev *dev,
+ struct dcg2900_info *info)
+{
+ info->lpoclk = clk_get(dev->dev, "lpoclk");
+ if (IS_ERR(info->lpoclk))
+ return PTR_ERR(info->lpoclk);
+
+ return 0;
+}
+
diff --git a/drivers/staging/cg2900/devices-cg2900.c b/drivers/staging/cg2900/devices-cg2900.c
new file mode 100644
index 00000000000..e6703c96aa9
--- /dev/null
+++ b/drivers/staging/cg2900/devices-cg2900.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * Hemant Gupta (hemant.gupta@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Board specific device support for the Linux Bluetooth HCI H:4 Driver
+ * for ST-Ericsson connectivity controller.
+ */
+
+#define NAME "devices-cg2900"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <asm/byteorder.h>
+#include <asm-generic/errno-base.h>
+#include <asm/mach-types.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/regulator/consumer.h>
+#include <mach/id.h>
+#include <plat/pincfg.h>
+#include "cg2900.h"
+#include "devices-cg2900.h"
+
+#define BT_VS_POWER_SWITCH_OFF 0xFD40
+
+#define H4_HEADER_LENGTH 0x01
+#define BT_HEADER_LENGTH 0x03
+
+#define STLC2690_HCI_REV 0x0600
+#define CG2900_PG1_HCI_REV 0x0101
+#define CG2900_PG2_HCI_REV 0x0200
+#define CG2900_PG1_SPECIAL_HCI_REV 0x0700
+
+struct vs_power_sw_off_cmd {
+ __le16 op_code;
+ u8 len;
+ u8 gpio_0_7_pull_up;
+ u8 gpio_8_15_pull_up;
+ u8 gpio_16_20_pull_up;
+ u8 gpio_0_7_pull_down;
+ u8 gpio_8_15_pull_down;
+ u8 gpio_16_20_pull_down;
+} __packed;
+
+static struct sk_buff *dcg2900_get_power_switch_off_cmd
+ (struct cg2900_chip_dev *dev, u16 *op_code)
+{
+ struct sk_buff *skb;
+ struct vs_power_sw_off_cmd *cmd;
+ struct dcg2900_info *info;
+ int i;
+
+ /* If connected chip does not support the command return NULL */
+ if (CG2900_PG1_SPECIAL_HCI_REV != dev->chip.hci_revision &&
+ CG2900_PG1_HCI_REV != dev->chip.hci_revision &&
+ CG2900_PG2_HCI_REV != dev->chip.hci_revision)
+ return NULL;
+
+ dev_dbg(dev->dev, "Generating PowerSwitchOff command\n");
+
+ info = dev->b_data;
+
+ skb = alloc_skb(sizeof(*cmd) + H4_HEADER_LENGTH, GFP_KERNEL);
+ if (!skb) {
+ dev_err(dev->dev, "Could not allocate skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, H4_HEADER_LENGTH);
+ cmd = (struct vs_power_sw_off_cmd *)skb_put(skb, sizeof(*cmd));
+ cmd->op_code = cpu_to_le16(BT_VS_POWER_SWITCH_OFF);
+ cmd->len = sizeof(*cmd) - BT_HEADER_LENGTH;
+ /*
+ * Enter system specific GPIO settings here:
+ * Section data[3-5] is GPIO pull-up selection
+ * Section data[6-8] is GPIO pull-down selection
+ * Each section is a bitfield where
+ * - byte 0 bit 0 is GPIO 0
+ * - byte 0 bit 1 is GPIO 1
+ * - up to
+ * - byte 2 bit 4 which is GPIO 20
+ * where each bit means:
+ * - 0: No pull-up / no pull-down
+ * - 1: Pull-up / pull-down
+ * All GPIOs are set as input.
+ */
+ if (!info->sleep_gpio_set) {
+ struct cg2900_platform_data *pf_data;
+
+ pf_data = dev_get_platdata(dev->dev);
+ for (i = 0; i < 8; i++) {
+ if (pf_data->gpio_sleep[i] == CG2900_PULL_UP)
+ info->gpio_0_7_pull_up |= (1 << i);
+ else if (pf_data->gpio_sleep[i] == CG2900_PULL_DN)
+ info->gpio_0_7_pull_down |= (1 << i);
+ }
+ for (i = 8; i < 16; i++) {
+ if (pf_data->gpio_sleep[i] == CG2900_PULL_UP)
+ info->gpio_8_15_pull_up |= (1 << (i - 8));
+ else if (pf_data->gpio_sleep[i] == CG2900_PULL_DN)
+ info->gpio_8_15_pull_down |= (1 << (i - 8));
+ }
+ for (i = 16; i < 21; i++) {
+ if (pf_data->gpio_sleep[i] == CG2900_PULL_UP)
+ info->gpio_16_20_pull_up |= (1 << (i - 16));
+ else if (pf_data->gpio_sleep[i] == CG2900_PULL_DN)
+ info->gpio_16_20_pull_down |= (1 << (i - 16));
+ }
+ info->sleep_gpio_set = true;
+ }
+ cmd->gpio_0_7_pull_up = info->gpio_0_7_pull_up;
+ cmd->gpio_8_15_pull_up = info->gpio_8_15_pull_up;
+ cmd->gpio_16_20_pull_up = info->gpio_16_20_pull_up;
+ cmd->gpio_0_7_pull_down = info->gpio_0_7_pull_down;
+ cmd->gpio_8_15_pull_down = info->gpio_8_15_pull_down;
+ cmd->gpio_16_20_pull_down = info->gpio_16_20_pull_down;
+
+
+ if (op_code)
+ *op_code = BT_VS_POWER_SWITCH_OFF;
+
+ return skb;
+}
+
+static int dcg2900_init(struct cg2900_chip_dev *dev)
+{
+ int err = 0;
+ struct dcg2900_info *info;
+ struct cg2900_platform_data *pdata = dev_get_platdata(dev->dev);
+
+ /* First retrieve and save the resources */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev->dev, "Could not allocate dcg2900_info\n");
+ return -ENOMEM;
+ }
+
+ info->gbf_gpio = -1;
+ info->pmuen_gpio = -1;
+ info->bt_gpio = -1;
+
+ if (!dev->pdev->num_resources) {
+ dev_dbg(dev->dev, "No resources available\n");
+ goto finished;
+ }
+
+ if (cpu_is_u5500())
+ err = dcg2900_u5500_setup(dev, info);
+ else
+ err = dcg2900_u8500_setup(dev, info);
+
+ if (err)
+ goto err_handling;
+
+ /*
+ * Enable the power on snowball
+ */
+ if (machine_is_snowball()) {
+ /* Take the regulator */
+ if (pdata->regulator_id) {
+ info->regulator_wlan = regulator_get(dev->dev,
+ pdata->regulator_id);
+ if (IS_ERR(info->regulator_wlan)) {
+ err = PTR_ERR(info->regulator_wlan);
+ dev_warn(dev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_id);
+ info->regulator_wlan = NULL;
+ goto err_handling_free_gpios;
+ }
+ /* Enable it also */
+ err = regulator_enable(info->regulator_wlan);
+ if (err < 0) {
+ dev_warn(dev->dev, "%s: regulator_enable failed\n",
+ __func__);
+ goto err_handling_put_reg;
+ }
+ } else {
+ dev_warn(dev->dev, "%s: no regulator defined for snowball.\n",
+ __func__);
+ }
+ }
+
+finished:
+ dev->b_data = info;
+ return 0;
+err_handling_put_reg:
+ regulator_put(info->regulator_wlan);
+err_handling_free_gpios:
+ if (info->bt_gpio != -1)
+ gpio_free(info->bt_gpio);
+ if (info->pmuen_gpio != -1)
+ gpio_free(info->pmuen_gpio);
+ if (info->gbf_gpio != -1)
+ gpio_free(info->gbf_gpio);
+
+err_handling:
+ kfree(info);
+ return err;
+}
+
+static void dcg2900_exit(struct cg2900_chip_dev *dev)
+{
+ struct dcg2900_info *info = dev->b_data;
+
+ if (machine_is_snowball()) {
+ /* Turn off power if we have any */
+ if (info->regulator_wlan) {
+ regulator_disable(info->regulator_wlan);
+ regulator_put(info->regulator_wlan);
+ }
+ }
+
+ if (cpu_is_u5500())
+ dcg2900_u5500_disable_chip(dev);
+ else
+ dcg2900_u8500_disable_chip(dev);
+
+ if (info->bt_gpio != -1)
+ gpio_free(info->bt_gpio);
+ if (info->pmuen_gpio != -1)
+ gpio_free(info->pmuen_gpio);
+ if (info->gbf_gpio != -1)
+ gpio_free(info->gbf_gpio);
+ kfree(info);
+ dev->b_data = NULL;
+}
+
+static int dcg2900_disable_uart(struct cg2900_chip_dev *dev)
+{
+ int err;
+ struct cg2900_platform_data *pdata = dev_get_platdata(dev->dev);
+
+ /*
+ * Without this delay we get interrupt on CTS immediately
+ * due to some turbulences on this line.
+ */
+ mdelay(4);
+
+ /* Disable UART functions. */
+ err = nmk_config_pins(pdata->uart.uart_disabled,
+ pdata->uart.n_uart_gpios);
+ if (err)
+ goto error;
+
+ return 0;
+
+error:
+ (void)nmk_config_pins(pdata->uart.uart_enabled,
+ pdata->uart.n_uart_gpios);
+ dev_err(dev->dev, "Cannot set interrupt (%d)\n", err);
+ return err;
+}
+
+static int dcg2900_enable_uart(struct cg2900_chip_dev *dev)
+{
+ int err;
+ struct cg2900_platform_data *pdata = dev_get_platdata(dev->dev);
+
+ /* Restore UART settings. */
+ err = nmk_config_pins(pdata->uart.uart_enabled,
+ pdata->uart.n_uart_gpios);
+ if (err)
+ dev_err(dev->dev, "Unable to enable UART (%d)\n", err);
+
+ return err;
+}
+
+void dcg2900_init_platdata(struct cg2900_platform_data *data)
+{
+ data->init = dcg2900_init;
+ data->exit = dcg2900_exit;
+
+ if (cpu_is_u5500()) {
+ data->enable_chip = dcg2900_u5500_enable_chip;
+ data->disable_chip = dcg2900_u5500_disable_chip;
+ } else {
+ data->enable_chip = dcg2900_u8500_enable_chip;
+ data->disable_chip = dcg2900_u8500_disable_chip;
+ }
+ data->get_power_switch_off_cmd = dcg2900_get_power_switch_off_cmd;
+
+ data->uart.enable_uart = dcg2900_enable_uart;
+ data->uart.disable_uart = dcg2900_disable_uart;
+}
diff --git a/drivers/staging/cg2900/devices-cg2900.h b/drivers/staging/cg2900/devices-cg2900.h
new file mode 100644
index 00000000000..5ca95e1e0a1
--- /dev/null
+++ b/drivers/staging/cg2900/devices-cg2900.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Par-Gunnar Hjalmdahl <par-gunnar.p.hjalmdahl@stericsson.com>
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __DEVICES_CG2900_H
+#define __DEVICES_CG2900_H
+
+#include "cg2900.h"
+#include <linux/clk.h>
+
+#define CHIP_ENABLE_PDB_LOW_TIMEOUT 100 /* ms */
+#define CHIP_ENABLE_PMU_EN_TIMEOUT 300 /* us */
+
+struct dcg2900_info {
+ int gbf_gpio;
+ int pmuen_gpio;
+ int bt_gpio;
+ bool sleep_gpio_set;
+ u8 gpio_0_7_pull_up;
+ u8 gpio_8_15_pull_up;
+ u8 gpio_16_20_pull_up;
+ u8 gpio_0_7_pull_down;
+ u8 gpio_8_15_pull_down;
+ u8 gpio_16_20_pull_down;
+ struct clk *lpoclk;
+ struct regulator *regulator_wlan;
+};
+
+extern void dcg2900_u8500_enable_chip(struct cg2900_chip_dev *dev);
+extern void dcg2900_u8500_disable_chip(struct cg2900_chip_dev *dev);
+extern int dcg2900_u8500_setup(struct cg2900_chip_dev *dev,
+ struct dcg2900_info *info);
+extern void dcg2900_u5500_enable_chip(struct cg2900_chip_dev *dev);
+extern void dcg2900_u5500_disable_chip(struct cg2900_chip_dev *dev);
+extern int dcg2900_u5500_setup(struct cg2900_chip_dev *dev,
+ struct dcg2900_info *info);
+
+/**
+ * enum cg2900_gpio_pull_sleep - GPIO pull setting in sleep.
+ * @CG2900_NO_PULL: Normal input in sleep (no pull up or down).
+ * @CG2900_PULL_UP: Pull up in sleep.
+ * @CG2900_PULL_DN: Pull down in sleep.
+ */
+enum cg2900_gpio_pull_sleep {
+ CG2900_NO_PULL,
+ CG2900_PULL_UP,
+ CG2900_PULL_DN
+};
+
+/**
+ * dcg2900_init_platdata() - Initializes platform data with callback functions.
+ * @data: Platform data.
+ */
+extern void dcg2900_init_platdata(struct cg2900_platform_data *data);
+
+#endif /* __DEVICES_CG2900_H */
diff --git a/drivers/staging/cg2900/include/cg2900.h b/drivers/staging/cg2900/include/cg2900.h
new file mode 100644
index 00000000000..bd165c0048b
--- /dev/null
+++ b/drivers/staging/cg2900/include/cg2900.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 connectivity
+ * controller.
+ */
+
+#ifndef _CG2900_H_
+#define _CG2900_H_
+
+#include <linux/types.h>
+
+/* Perform reset. No parameters used */
+#define CG2900_CHAR_DEV_IOCTL_RESET _IOW('U', 210, int)
+/* Check for reset */
+#define CG2900_CHAR_DEV_IOCTL_CHECK4RESET _IOR('U', 212, int)
+/* Retrieve revision info */
+#define CG2900_CHAR_DEV_IOCTL_GET_REVISION _IOR('U', 213, \
+ struct cg2900_rev_data)
+
+#define CG2900_CHAR_DEV_IOCTL_EVENT_IDLE 0
+#define CG2900_CHAR_DEV_IOCTL_EVENT_RESET 1
+
+/**
+ * struct cg2900_rev_data - Contains revision data for the local controller.
+ * @revision: Revision of the controller, e.g. to indicate that it is
+ * a CG2900 controller.
+ * @sub_version: Subversion of the controller, e.g. to indicate a certain
+ * tape-out of the controller.
+ *
+ * The values to match retrieved values to each controller may be retrieved from
+ * the manufacturer.
+ */
+struct cg2900_rev_data {
+ int revision;
+ int sub_version;
+};
+
+#ifdef __KERNEL__
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+/* Temporary solution while in staging directory */
+#include "cg2900_hci.h"
+
+/**
+ * struct cg2900_chip_rev_info - Chip info structure.
+ * @manufacturer: Chip manufacturer.
+ * @hci_version: Bluetooth version supported over HCI.
+ * @hci_revision: Chip revision, i.e. which chip is this.
+ * @lmp_pal_version: Bluetooth version supported over air.
+ * @hci_sub_version: Chip sub-version, i.e. which tape-out is this.
+ *
+ * Note that these values match the Bluetooth Assigned Numbers,
+ * see http://www.bluetooth.org/
+ */
+struct cg2900_chip_rev_info {
+ u16 manufacturer;
+ u8 hci_version;
+ u16 hci_revision;
+ u8 lmp_pal_version;
+ u16 hci_sub_version;
+};
+
+struct cg2900_chip_dev;
+
+/**
+ * struct cg2900_id_callbacks - Chip handler identification callbacks.
+ * @check_chip_support: Called when chip is connected. If chip is supported by
+ * driver, return true and fill in @callbacks in @dev.
+ *
+ * Note that the callback may be NULL. It must always be NULL checked before
+ * calling.
+ */
+struct cg2900_id_callbacks {
+ bool (*check_chip_support)(struct cg2900_chip_dev *dev);
+};
+
+/**
+ * struct cg2900_chip_callbacks - Callback functions registered by chip handler.
+ * @data_from_chip: Called when data shall be transmitted to user.
+ * @chip_removed: Called when chip is removed.
+ *
+ * Note that some callbacks may be NULL. They must always be NULL checked before
+ * calling.
+ */
+struct cg2900_chip_callbacks {
+ void (*data_from_chip)(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb);
+ void (*chip_removed)(struct cg2900_chip_dev *dev);
+};
+
+/**
+ * struct cg2900_trans_callbacks - Callback functions registered by transport.
+ * @open: CG2900 Core needs a transport.
+ * @close: CG2900 Core does not need a transport.
+ * @write: CG2900 Core transmits to the chip.
+ * @set_chip_power: CG2900 Core enables or disables the chip.
+ * @chip_startup_finished: CG2900 Chip startup finished notification.
+ *
+ * Note that some callbacks may be NULL. They must always be NULL checked before
+ * calling.
+ */
+struct cg2900_trans_callbacks {
+ int (*open)(struct cg2900_chip_dev *dev);
+ int (*close)(struct cg2900_chip_dev *dev);
+ int (*write)(struct cg2900_chip_dev *dev, struct sk_buff *skb);
+ void (*set_chip_power)(struct cg2900_chip_dev *dev, bool chip_on);
+ void (*chip_startup_finished)(struct cg2900_chip_dev *dev);
+};
+
+/**
+ * struct cg2900_chip_dev - Chip handler info structure.
+ * @dev: Device associated with this chip.
+ * @pdev: Platform device associated with this chip.
+ * @chip: Chip info such as manufacturer.
+ * @c_cb: Callback structure for the chip handler.
+ * @t_cb: Callback structure for the transport.
+ * @c_data: Arbitrary data set by chip handler.
+ * @t_data: Arbitrary data set by transport.
+ * @b_data: Arbitrary data set by board handler.
+ * @prv_data: Arbitrary data set by CG2900 Core.
+ */
+struct cg2900_chip_dev {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct cg2900_chip_rev_info chip;
+ struct cg2900_chip_callbacks c_cb;
+ struct cg2900_trans_callbacks t_cb;
+ void *c_data;
+ void *t_data;
+ void *b_data;
+ void *prv_data;
+};
+
+/**
+ * struct cg2900_platform_data - Contains platform data for CG2900.
+ * @init: Callback called upon system start.
+ * @exit: Callback called upon system shutdown.
+ * @enable_chip: Callback called for enabling CG2900 chip.
+ * @disable_chip: Callback called for disabling CG2900 chip.
+ * @get_power_switch_off_cmd: Callback called to retrieve
+ * HCI VS_Power_Switch_Off command (command
+ * HCI requires platform specific GPIO data).
+ * @regulator_id: Id of the regulator that powers on the chip
+ * @bus: Transport used, see @include/net/bluetooth/hci.h.
+ * @gpio_sleep: Array of GPIO sleep settings.
+ * @enable_uart: Callback called when switching from UART GPIO to
+ * UART HW.
+ * @disable_uart: Callback called when switching from UART HW to
+ * UART GPIO.
+ * @n_uart_gpios: Number of UART GPIOs.
+ * @uart_enabled: Array of size @n_uart_gpios with GPIO setting for
+ * enabling UART HW (switching from GPIO mode).
+ * @uart_disabled: Array of size @n_uart_gpios with GPIO setting for
+ * disabling UART HW (switching to GPIO mode).
+ * @uart: Platform data structure for UART transport.
+ *
+ * Any callback may be NULL if not needed.
+ */
+struct cg2900_platform_data {
+ int (*init)(struct cg2900_chip_dev *dev);
+ void (*exit)(struct cg2900_chip_dev *dev);
+ void (*enable_chip)(struct cg2900_chip_dev *dev);
+ void (*disable_chip)(struct cg2900_chip_dev *dev);
+ struct sk_buff* (*get_power_switch_off_cmd)(struct cg2900_chip_dev *dev,
+ u16 *op_code);
+
+ char *regulator_id;
+ __u8 bus;
+ enum cg2900_gpio_pull_sleep *gpio_sleep;
+
+ struct {
+ int (*enable_uart)(struct cg2900_chip_dev *dev);
+ int (*disable_uart)(struct cg2900_chip_dev *dev);
+ int n_uart_gpios;
+ unsigned long *uart_enabled;
+ unsigned long *uart_disabled;
+ } uart;
+};
+
+/**
+ * struct cg2900_user_data - Contains platform data for CG2900 user.
+ * @dev: Current device. Set by CG2900 user upon probe.
+ * @opened: True if channel is opened.
+ * @user_data: Data set and used by CG2900 user.
+ * @private_data: Data set and used by CG2900 driver.
+ * @h4_channel: H4 channel. Set by CG2900 driver.
+ * @is_audio: True if this channel is an audio channel. Set by CG2900
+ * driver.
+ * @chip_independent: True if this channel does not require chip to be
+ * powered. Set by CG2900 driver.
+ * @bt_bus: Transport used, see @include/net/bluetooth/hci.h.
+ * @char_dev_name: Name to be used for character device.
+ * @channel_data: Input data specific to current device.
+ * @open: Open device channel. Set by CG2900 driver.
+ * @close: Close device channel. Set by CG2900 driver.
+ * @reset: Reset connectivity controller. Set by CG2900 driver.
+ * @alloc_skb: Alloc sk_buffer. Set by CG2900 driver.
+ * @write: Write to device channel. Set by CG2900 driver.
+ * @get_local_revision: Get revision data of conncected chip. Set by CG2900
+ * driver.
+ * @read_cb: Callback function called when data is received on the
+ * device channel. Set by CG2900 user. Mandatory.
+ * @reset_cb: Callback function called when the connectivity
+ * controller has been reset. Set by CG2900 user.
+ *
+ * Any callback may be NULL if not needed.
+ */
+struct cg2900_user_data {
+ struct device *dev;
+ bool opened;
+
+ void *user_data;
+ void *private_data;
+
+ int h4_channel;
+ bool is_audio;
+ bool chip_independent;
+
+ union {
+ __u8 bt_bus;
+ char *char_dev_name;
+ } channel_data;
+
+ int (*open)(struct cg2900_user_data *user_data);
+ void (*close)(struct cg2900_user_data *user_data);
+ int (*reset)(struct cg2900_user_data *user_data);
+ struct sk_buff * (*alloc_skb)(unsigned int size, gfp_t priority);
+ int (*write)(struct cg2900_user_data *user_data, struct sk_buff *skb);
+ bool (*get_local_revision)(struct cg2900_user_data *user_data,
+ struct cg2900_rev_data *rev_data);
+
+ void (*read_cb)(struct cg2900_user_data *user_data,
+ struct sk_buff *skb);
+ void (*reset_cb)(struct cg2900_user_data *user_data);
+};
+
+static inline void *cg2900_get_usr(struct cg2900_user_data *dev)
+{
+ if (dev)
+ return dev->user_data;
+ return NULL;
+}
+
+static inline void cg2900_set_usr(struct cg2900_user_data *dev, void *data)
+{
+ if (dev)
+ dev->user_data = data;
+}
+
+static inline void *cg2900_get_prv(struct cg2900_user_data *dev)
+{
+ if (dev)
+ return dev->private_data;
+ return NULL;
+}
+
+static inline void cg2900_set_prv(struct cg2900_user_data *dev, void *data)
+{
+ if (dev)
+ dev->private_data = data;
+}
+
+extern int cg2900_register_chip_driver(struct cg2900_id_callbacks *cb);
+extern void cg2900_deregister_chip_driver(struct cg2900_id_callbacks *cb);
+extern int cg2900_register_trans_driver(struct cg2900_chip_dev *dev);
+extern int cg2900_deregister_trans_driver(struct cg2900_chip_dev *dev);
+extern unsigned long cg2900_get_sleep_timeout(void);
+
+#endif /* __KERNEL__ */
+#endif /* _CG2900_H_ */
diff --git a/drivers/staging/cg2900/include/cg2900_audio.h b/drivers/staging/cg2900/include/cg2900_audio.h
new file mode 100644
index 00000000000..ff0f053fa53
--- /dev/null
+++ b/drivers/staging/cg2900/include/cg2900_audio.h
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth Audio Driver for ST-Ericsson controller.
+ */
+
+#ifndef _CG2900_AUDIO_H_
+#define _CG2900_AUDIO_H_
+
+#include <linux/types.h>
+
+/*
+ * Digital Audio Interface configuration types
+ */
+
+/** CG2900_A2DP_MAX_AVDTP_HDR_LEN - Max length of a AVDTP header.
+ * Max length of a AVDTP header for an A2DP packet.
+ */
+#define CG2900_A2DP_MAX_AVDTP_HDR_LEN 25
+
+/*
+ * Op codes used when writing commands to the audio interface from user space
+ * using the char device.
+ */
+#define CG2900_OPCODE_SET_DAI_CONF 0x01
+#define CG2900_OPCODE_GET_DAI_CONF 0x02
+#define CG2900_OPCODE_CONFIGURE_ENDPOINT 0x03
+#define CG2900_OPCODE_START_STREAM 0x04
+#define CG2900_OPCODE_STOP_STREAM 0x05
+
+/**
+ * enum cg2900_dai_dir - Contains the DAI port directions alternatives.
+ * @DAI_DIR_B_RX_A_TX: Port B as Rx and port A as Tx.
+ * @DAI_DIR_B_TX_A_RX: Port B as Tx and port A as Rx.
+ */
+enum cg2900_dai_dir {
+ DAI_DIR_B_RX_A_TX = 0x00,
+ DAI_DIR_B_TX_A_RX = 0x01
+};
+
+/**
+ * enum cg2900_dai_mode - DAI mode alternatives.
+ * @DAI_MODE_SLAVE: Slave.
+ * @DAI_MODE_MASTER: Master.
+ */
+enum cg2900_dai_mode {
+ DAI_MODE_SLAVE = 0x00,
+ DAI_MODE_MASTER = 0x01
+};
+
+/**
+ * enum cg2900_dai_stream_ratio - Voice stream ratio alternatives.
+ * @STREAM_RATIO_FM16_VOICE16: FM 16kHz, Voice 16kHz.
+ * @STREAM_RATIO_FM16_VOICE8: FM 16kHz, Voice 8kHz.
+ * @STREAM_RATIO_FM48_VOICE16: FM 48kHz, Voice 16Khz.
+ * @STREAM_RATIO_FM48_VOICE8: FM 48kHz, Voice 8kHz.
+ *
+ * Contains the alternatives for the voice stream ratio between the Audio stream
+ * sample rate and the Voice stream sample rate.
+ */
+enum cg2900_dai_stream_ratio {
+ STREAM_RATIO_FM16_VOICE16 = 0x01,
+ STREAM_RATIO_FM16_VOICE8 = 0x02,
+ STREAM_RATIO_FM48_VOICE16 = 0x03,
+ STREAM_RATIO_FM48_VOICE8 = 0x06
+};
+
+/**
+ * enum cg2900_dai_fs_duration - Frame sync duration alternatives.
+ * @SYNC_DURATION_8: 8 frames sync duration.
+ * @SYNC_DURATION_16: 16 frames sync duration.
+ * @SYNC_DURATION_24: 24 frames sync duration.
+ * @SYNC_DURATION_32: 32 frames sync duration.
+ * @SYNC_DURATION_48: 48 frames sync duration.
+ * @SYNC_DURATION_50: 50 frames sync duration.
+ * @SYNC_DURATION_64: 64 frames sync duration.
+ * @SYNC_DURATION_75: 75 frames sync duration.
+ * @SYNC_DURATION_96: 96 frames sync duration.
+ * @SYNC_DURATION_125: 125 frames sync duration.
+ * @SYNC_DURATION_128: 128 frames sync duration.
+ * @SYNC_DURATION_150: 150 frames sync duration.
+ * @SYNC_DURATION_192: 192 frames sync duration.
+ * @SYNC_DURATION_250: 250 frames sync duration.
+ * @SYNC_DURATION_256: 256 frames sync duration.
+ * @SYNC_DURATION_300: 300 frames sync duration.
+ * @SYNC_DURATION_384: 384 frames sync duration.
+ * @SYNC_DURATION_500: 500 frames sync duration.
+ * @SYNC_DURATION_512: 512 frames sync duration.
+ * @SYNC_DURATION_600: 600 frames sync duration.
+ * @SYNC_DURATION_768: 768 frames sync duration.
+ *
+ * This parameter sets the PCM frame sync duration. It is calculated as the
+ * ratio between the bit clock and the frame rate. For example, if the bit
+ * clock is 512 kHz and the stream sample rate is 8 kHz, the PCM frame sync
+ * duration is 512 / 8 = 64.
+ */
+enum cg2900_dai_fs_duration {
+ SYNC_DURATION_8 = 0,
+ SYNC_DURATION_16 = 1,
+ SYNC_DURATION_24 = 2,
+ SYNC_DURATION_32 = 3,
+ SYNC_DURATION_48 = 4,
+ SYNC_DURATION_50 = 5,
+ SYNC_DURATION_64 = 6,
+ SYNC_DURATION_75 = 7,
+ SYNC_DURATION_96 = 8,
+ SYNC_DURATION_125 = 9,
+ SYNC_DURATION_128 = 10,
+ SYNC_DURATION_150 = 11,
+ SYNC_DURATION_192 = 12,
+ SYNC_DURATION_250 = 13,
+ SYNC_DURATION_256 = 14,
+ SYNC_DURATION_300 = 15,
+ SYNC_DURATION_384 = 16,
+ SYNC_DURATION_500 = 17,
+ SYNC_DURATION_512 = 18,
+ SYNC_DURATION_600 = 19,
+ SYNC_DURATION_768 = 20
+};
+
+/**
+ * enum cg2900_dai_bit_clk - Bit Clock alternatives.
+ * @BIT_CLK_128: 128 Kbits clock.
+ * @BIT_CLK_256: 256 Kbits clock.
+ * @BIT_CLK_512: 512 Kbits clock.
+ * @BIT_CLK_768: 768 Kbits clock.
+ * @BIT_CLK_1024: 1024 Kbits clock.
+ * @BIT_CLK_1411_76: 1411.76 Kbits clock.
+ * @BIT_CLK_1536: 1536 Kbits clock.
+ * @BIT_CLK_2000: 2000 Kbits clock.
+ * @BIT_CLK_2048: 2048 Kbits clock.
+ * @BIT_CLK_2400: 2400 Kbits clock.
+ * @BIT_CLK_2823_52: 2823.52 Kbits clock.
+ * @BIT_CLK_3072: 3072 Kbits clock.
+ *
+ * This parameter sets the bit clock speed. This is the clocking of the actual
+ * data. A usual parameter for eSCO voice is 512 kHz.
+ */
+enum cg2900_dai_bit_clk {
+ BIT_CLK_128 = 0x00,
+ BIT_CLK_256 = 0x01,
+ BIT_CLK_512 = 0x02,
+ BIT_CLK_768 = 0x03,
+ BIT_CLK_1024 = 0x04,
+ BIT_CLK_1411_76 = 0x05,
+ BIT_CLK_1536 = 0x06,
+ BIT_CLK_2000 = 0x07,
+ BIT_CLK_2048 = 0x08,
+ BIT_CLK_2400 = 0x09,
+ BIT_CLK_2823_52 = 0x0A,
+ BIT_CLK_3072 = 0x0B
+};
+
+/**
+ * enum cg2900_dai_sample_rate - Sample rates alternatives.
+ * @SAMPLE_RATE_8: 8 kHz sample rate.
+ * @SAMPLE_RATE_16: 16 kHz sample rate.
+ * @SAMPLE_RATE_44_1: 44.1 kHz sample rate.
+ * @SAMPLE_RATE_48: 48 kHz sample rate.
+ */
+enum cg2900_dai_sample_rate {
+ SAMPLE_RATE_8 = 0,
+ SAMPLE_RATE_16 = 1,
+ SAMPLE_RATE_44_1 = 2,
+ SAMPLE_RATE_48 = 3
+};
+
+/**
+ * enum cg2900_dai_port_protocol - Port protocol alternatives.
+ * @PORT_PROTOCOL_PCM: Protocol PCM.
+ * @PORT_PROTOCOL_I2S: Protocol I2S.
+ */
+enum cg2900_dai_port_protocol {
+ PORT_PROTOCOL_PCM = 0x00,
+ PORT_PROTOCOL_I2S = 0x01
+};
+
+/**
+ * enum cg2900_dai_channel_sel - The channel selection alternatives.
+ * @CHANNEL_SELECTION_RIGHT: Right channel used.
+ * @CHANNEL_SELECTION_LEFT: Left channel used.
+ * @CHANNEL_SELECTION_BOTH: Both channels used.
+ */
+enum cg2900_dai_channel_sel {
+ CHANNEL_SELECTION_RIGHT = 0x00,
+ CHANNEL_SELECTION_LEFT = 0x01,
+ CHANNEL_SELECTION_BOTH = 0x02
+};
+
+/**
+ * struct cg2900_dai_conf_i2s_pcm - Port configuration structure.
+ * @mode: Operational mode of the port configured.
+ * @i2s_channel_sel: I2S channels used. Only valid if used in I2S mode.
+ * @slot_0_used: True if SCO slot 0 is used.
+ * @slot_1_used: True if SCO slot 1 is used.
+ * @slot_2_used: True if SCO slot 2 is used.
+ * @slot_3_used: True if SCO slot 3 is used.
+ * @slot_0_dir: Direction of slot 0.
+ * @slot_1_dir: Direction of slot 1.
+ * @slot_2_dir: Direction of slot 2.
+ * @slot_3_dir: Direction of slot 3.
+ * @slot_0_start: Slot 0 start (relative to the PCM frame sync).
+ * @slot_1_start: Slot 1 start (relative to the PCM frame sync)
+ * @slot_2_start: Slot 2 start (relative to the PCM frame sync)
+ * @slot_3_start: Slot 3 start (relative to the PCM frame sync)
+ * @ratio: Voice stream ratio between the Audio stream sample rate
+ * and the Voice stream sample rate.
+ * @protocol: Protocol used on port.
+ * @duration: Frame sync duration.
+ * @clk: Bit clock.
+ * @sample_rate: Sample rate.
+ */
+struct cg2900_dai_conf_i2s_pcm {
+ enum cg2900_dai_mode mode;
+ enum cg2900_dai_channel_sel i2s_channel_sel;
+ bool slot_0_used;
+ bool slot_1_used;
+ bool slot_2_used;
+ bool slot_3_used;
+ enum cg2900_dai_dir slot_0_dir;
+ enum cg2900_dai_dir slot_1_dir;
+ enum cg2900_dai_dir slot_2_dir;
+ enum cg2900_dai_dir slot_3_dir;
+ __u8 slot_0_start;
+ __u8 slot_1_start;
+ __u8 slot_2_start;
+ __u8 slot_3_start;
+ enum cg2900_dai_stream_ratio ratio;
+ enum cg2900_dai_port_protocol protocol;
+ enum cg2900_dai_fs_duration duration;
+ enum cg2900_dai_bit_clk clk;
+ enum cg2900_dai_sample_rate sample_rate;
+};
+
+/**
+ * enum cg2900_dai_half_period - Half period duration alternatives.
+ * @HALF_PER_DUR_8: 8 Bits.
+ * @HALF_PER_DUR_16: 16 Bits.
+ * @HALF_PER_DUR_24: 24 Bits.
+ * @HALF_PER_DUR_25: 25 Bits.
+ * @HALF_PER_DUR_32: 32 Bits.
+ * @HALF_PER_DUR_48: 48 Bits.
+ * @HALF_PER_DUR_64: 64 Bits.
+ * @HALF_PER_DUR_75: 75 Bits.
+ * @HALF_PER_DUR_96: 96 Bits.
+ * @HALF_PER_DUR_128: 128 Bits.
+ * @HALF_PER_DUR_150: 150 Bits.
+ * @HALF_PER_DUR_192: 192 Bits.
+ *
+ * This parameter sets the number of bits contained in each I2S half period,
+ * i.e. each channel slot. A usual value is 16 bits.
+ */
+enum cg2900_dai_half_period {
+ HALF_PER_DUR_8 = 0x00,
+ HALF_PER_DUR_16 = 0x01,
+ HALF_PER_DUR_24 = 0x02,
+ HALF_PER_DUR_25 = 0x03,
+ HALF_PER_DUR_32 = 0x04,
+ HALF_PER_DUR_48 = 0x05,
+ HALF_PER_DUR_64 = 0x06,
+ HALF_PER_DUR_75 = 0x07,
+ HALF_PER_DUR_96 = 0x08,
+ HALF_PER_DUR_128 = 0x09,
+ HALF_PER_DUR_150 = 0x0A,
+ HALF_PER_DUR_192 = 0x0B
+};
+
+/**
+ * enum cg2900_dai_word_width - Word width alternatives.
+ * @WORD_WIDTH_16: 16 bits words.
+ * @WORD_WIDTH_32: 32 bits words.
+ */
+enum cg2900_dai_word_width {
+ WORD_WIDTH_16 = 0x00,
+ WORD_WIDTH_32 = 0x01
+};
+
+/**
+ * struct cg2900_dai_conf_i2s - Port configuration struct for I2S.
+ * @mode: Operational mode of the port.
+ * @half_period: Half period duration.
+ * @channel_sel: Channel selection.
+ * @sample_rate: Sample rate.
+ * @word_width: Word width.
+ */
+struct cg2900_dai_conf_i2s {
+ enum cg2900_dai_mode mode;
+ enum cg2900_dai_half_period half_period;
+ enum cg2900_dai_channel_sel channel_sel;
+ enum cg2900_dai_sample_rate sample_rate;
+ enum cg2900_dai_word_width word_width;
+};
+
+/**
+ * union cg2900_dai_port_conf - DAI port configuration union.
+ * @i2s: The configuration struct for a port supporting only I2S.
+ * @i2s_pcm: The configuration struct for a port supporting both PCM and I2S.
+ */
+union cg2900_dai_port_conf {
+ struct cg2900_dai_conf_i2s i2s;
+ struct cg2900_dai_conf_i2s_pcm i2s_pcm;
+};
+
+/**
+ * enum cg2900_dai_ext_port_id - DAI external port id alternatives.
+ * @PORT_0_I2S: Port id is 0 and it supports only I2S.
+ * @PORT_1_I2S_PCM: Port id is 1 and it supports both I2S and PCM.
+ */
+enum cg2900_dai_ext_port_id {
+ PORT_0_I2S,
+ PORT_1_I2S_PCM
+};
+
+/**
+ * enum cg2900_audio_endpoint_id - Audio endpoint id alternatives.
+ * @ENDPOINT_PORT_0_I2S: Internal audio endpoint of the external I2S
+ * interface.
+ * @ENDPOINT_PORT_1_I2S_PCM: Internal audio endpoint of the external I2S/PCM
+ * interface.
+ * @ENDPOINT_SLIMBUS_VOICE: Internal audio endpoint of the external Slimbus
+ * voice interface. (Currently not supported)
+ * @ENDPOINT_SLIMBUS_AUDIO: Internal audio endpoint of the external Slimbus
+ * audio interface. (Currently not supported)
+ * @ENDPOINT_BT_SCO_INOUT: Bluetooth SCO bidirectional.
+ * @ENDPOINT_BT_A2DP_SRC: Bluetooth A2DP source.
+ * @ENDPOINT_BT_A2DP_SNK: Bluetooth A2DP sink.
+ * @ENDPOINT_FM_RX: FM receive.
+ * @ENDPOINT_FM_TX: FM transmit.
+ * @ENDPOINT_ANALOG_OUT: Analog out.
+ * @ENDPOINT_DSP_AUDIO_IN: DSP audio in.
+ * @ENDPOINT_DSP_AUDIO_OUT: DSP audio out.
+ * @ENDPOINT_DSP_VOICE_IN: DSP voice in.
+ * @ENDPOINT_DSP_VOICE_OUT: DSP voice out.
+ * @ENDPOINT_DSP_TONE_IN: DSP tone in.
+ * @ENDPOINT_BURST_BUFFER_IN: Burst buffer in.
+ * @ENDPOINT_BURST_BUFFER_OUT: Burst buffer out.
+ * @ENDPOINT_MUSIC_DECODER: Music decoder.
+ * @ENDPOINT_HCI_AUDIO_IN: HCI audio in.
+ */
+enum cg2900_audio_endpoint_id {
+ ENDPOINT_PORT_0_I2S,
+ ENDPOINT_PORT_1_I2S_PCM,
+ ENDPOINT_SLIMBUS_VOICE,
+ ENDPOINT_SLIMBUS_AUDIO,
+ ENDPOINT_BT_SCO_INOUT,
+ ENDPOINT_BT_A2DP_SRC,
+ ENDPOINT_BT_A2DP_SNK,
+ ENDPOINT_FM_RX,
+ ENDPOINT_FM_TX,
+ ENDPOINT_ANALOG_OUT,
+ ENDPOINT_DSP_AUDIO_IN,
+ ENDPOINT_DSP_AUDIO_OUT,
+ ENDPOINT_DSP_VOICE_IN,
+ ENDPOINT_DSP_VOICE_OUT,
+ ENDPOINT_DSP_TONE_IN,
+ ENDPOINT_BURST_BUFFER_IN,
+ ENDPOINT_BURST_BUFFER_OUT,
+ ENDPOINT_MUSIC_DECODER,
+ ENDPOINT_HCI_AUDIO_IN
+};
+
+/**
+ * struct cg2900_dai_config - Configuration struct for Digital Audio Interface.
+ * @port: The port id to configure. Acts as a discriminator for @conf parameter
+ * which is a union.
+ * @conf: The configuration union that contains the parameters for the port.
+ */
+struct cg2900_dai_config {
+ enum cg2900_dai_ext_port_id port;
+ union cg2900_dai_port_conf conf;
+};
+
+/*
+ * Endpoint configuration types
+ */
+
+/**
+ * enum cg2900_endpoint_sample_rate - Audio endpoint configuration sample rate alternatives.
+ *
+ * This enum defines the same values as @cg2900_dai_sample_rate, but
+ * is kept to preserve the API.
+ *
+ * @ENDPOINT_SAMPLE_RATE_8_KHZ: 8 kHz sample rate.
+ * @ENDPOINT_SAMPLE_RATE_16_KHZ: 16 kHz sample rate.
+ * @ENDPOINT_SAMPLE_RATE_44_1_KHZ: 44.1 kHz sample rate.
+ * @ENDPOINT_SAMPLE_RATE_48_KHZ: 48 kHz sample rate.
+ */
+enum cg2900_endpoint_sample_rate {
+ ENDPOINT_SAMPLE_RATE_8_KHZ = SAMPLE_RATE_8,
+ ENDPOINT_SAMPLE_RATE_16_KHZ = SAMPLE_RATE_16,
+ ENDPOINT_SAMPLE_RATE_44_1_KHZ = SAMPLE_RATE_44_1,
+ ENDPOINT_SAMPLE_RATE_48_KHZ = SAMPLE_RATE_48
+};
+
+
+/**
+ * struct cg2900_endpoint_config_a2dp_src - A2DP source audio endpoint configurations.
+ * @sample_rate: Sample rate.
+ * @channel_count: Number of channels.
+ */
+struct cg2900_endpoint_config_a2dp_src {
+ enum cg2900_endpoint_sample_rate sample_rate;
+ unsigned int channel_count;
+};
+
+/**
+ * struct cg2900_endpoint_config_fm - Configuration parameters for an FM endpoint.
+ * @sample_rate: The sample rate alternatives for the FM audio endpoints.
+ */
+struct cg2900_endpoint_config_fm {
+ enum cg2900_endpoint_sample_rate sample_rate;
+};
+
+
+/**
+ * struct cg2900_endpoint_config_sco_in_out - SCO audio endpoint configuration structure.
+ * @sample_rate: Sample rate, valid values are
+ * * ENDPOINT_SAMPLE_RATE_8_KHZ
+ * * ENDPOINT_SAMPLE_RATE_16_KHZ.
+ */
+struct cg2900_endpoint_config_sco_in_out {
+ enum cg2900_endpoint_sample_rate sample_rate;
+};
+
+/**
+ * union cg2900_endpoint_config - Different audio endpoint configurations.
+ * @sco: SCO audio endpoint configuration structure.
+ * @a2dp_src: A2DP source audio endpoint configuration structure.
+ * @fm: FM audio endpoint configuration structure.
+ */
+union cg2900_endpoint_config_union {
+ struct cg2900_endpoint_config_sco_in_out sco;
+ struct cg2900_endpoint_config_a2dp_src a2dp_src;
+ struct cg2900_endpoint_config_fm fm;
+};
+
+/**
+ * struct cg2900_endpoint_config - Audio endpoint configuration.
+ * @endpoint_id: Identifies the audio endpoint. Works as a discriminator
+ * for the config union.
+ * @config: Union holding the configuration parameters for
+ * the endpoint.
+ */
+struct cg2900_endpoint_config {
+ enum cg2900_audio_endpoint_id endpoint_id;
+ union cg2900_endpoint_config_union config;
+};
+
+#ifdef __KERNEL__
+#include <linux/device.h>
+
+int cg2900_audio_get_devices(struct device *devices[], __u8 size);
+int cg2900_audio_open(unsigned int *session, struct device *parent);
+int cg2900_audio_close(unsigned int *session);
+int cg2900_audio_set_dai_config(unsigned int session,
+ struct cg2900_dai_config *config);
+int cg2900_audio_get_dai_config(unsigned int session,
+ struct cg2900_dai_config *config);
+int cg2900_audio_config_endpoint(unsigned int session,
+ struct cg2900_endpoint_config *config);
+int cg2900_audio_start_stream(unsigned int session,
+ enum cg2900_audio_endpoint_id ep_1,
+ enum cg2900_audio_endpoint_id ep_2,
+ unsigned int *stream_handle);
+int cg2900_audio_stop_stream(unsigned int session,
+ unsigned int stream_handle);
+
+#endif /* __KERNEL__ */
+#endif /* _CG2900_AUDIO_H_ */
diff --git a/drivers/staging/cg2900/include/cg2900_hci.h b/drivers/staging/cg2900/include/cg2900_hci.h
new file mode 100644
index 00000000000..e094a9dddbd
--- /dev/null
+++ b/drivers/staging/cg2900/include/cg2900_hci.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * This file is a staging solution and shall be integrated into
+ * /include/net/bluetooth/hci.h.
+ */
+
+#ifndef __CG2900_HCI_H
+#define __CG2900_HCI_H
+
+#define HCI_EV_HW_ERROR 0x10
+struct hci_ev_hw_error {
+ __u8 hw_code;
+} __packed;
+
+#endif /* __CG2900_HCI_H */
diff --git a/drivers/staging/cg2900/mfd/Makefile b/drivers/staging/cg2900/mfd/Makefile
new file mode 100644
index 00000000000..bdbd8de90ee
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for ST-Ericsson CG2900 connectivity combo controller
+#
+
+ccflags-y := \
+ -Idrivers/staging/cg2900/include
+
+obj-$(CONFIG_CG2900) += cg2900_core.o cg2900_lib.o
+export-objs := cg2900_core.o cg2900_lib.o
+
+obj-$(CONFIG_CG2900) += cg2900_char_devices.o
+
+obj-$(CONFIG_CG2900_TEST) += cg2900_test.o
+
+obj-$(CONFIG_CG2900_CHIP) += cg2900_chip.o
+obj-$(CONFIG_STLC2690_CHIP) += stlc2690_chip.o
+
+obj-$(CONFIG_CG2900_AUDIO) += cg2900_audio.o
diff --git a/drivers/staging/cg2900/mfd/cg2900_audio.c b/drivers/staging/cg2900/mfd/cg2900_audio.c
new file mode 100644
index 00000000000..2f00d79ed1e
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_audio.c
@@ -0,0 +1,3486 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth Audio Driver for ST-Ericsson CG2900 controller.
+ */
+#define NAME "cg2900_audio"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include "cg2900.h"
+#include "cg2900_audio.h"
+#include "cg2900_chip.h"
+
+#define MAX_NBR_OF_USERS 10
+#define FIRST_USER 1
+
+/*
+ * This is a default ACL handle. It is necessary to provide to the chip, but
+ * does not actually do anything.
+ */
+#define DEFAULT_ACL_HANDLE 0x0001
+
+/* Use a timeout of 5 seconds when waiting for a command response */
+#define RESP_TIMEOUT 5000
+
+#define BT_DEV (info->dev_bt)
+#define FM_DEV (info->dev_fm)
+
+/* Bluetooth error codes */
+#define HCI_BT_ERROR_NO_ERROR 0x00
+
+/* Used to select proper API, ignoring subrevisions etc */
+enum chip_revision {
+ CHIP_REV_PG1,
+ CHIP_REV_PG2
+};
+
+/**
+ * enum chip_resp_state - State when communicating with the CG2900 controller.
+ * @IDLE: No outstanding packets to the controller.
+ * @WAITING: Packet has been sent to the controller. Waiting for
+ * response.
+ * @RESP_RECEIVED: Response from controller has been received but not yet
+ * handled.
+ */
+enum chip_resp_state {
+ IDLE,
+ WAITING,
+ RESP_RECEIVED
+};
+
+/**
+ * enum main_state - Main state for the CG2900 Audio driver.
+ * @OPENED: Audio driver has registered to CG2900 Core.
+ * @CLOSED: Audio driver is not registered to CG2900 Core.
+ * @RESET: A reset of CG2900 Core has occurred and no user has re-opened
+ * the audio driver.
+ */
+enum main_state {
+ OPENED,
+ CLOSED,
+ RESET
+};
+
+/**
+ * struct endpoint_list - List for storing endpoint configuration nodes.
+ * @ep_list: Pointer to first node in list.
+ * @management_mutex: Mutex for handling access to list.
+ */
+struct endpoint_list {
+ struct list_head ep_list;
+ struct mutex management_mutex;
+};
+
+/**
+ * struct endpoint_config_node - Node for storing endpoint configuration.
+ * @list: list_head struct.
+ * @endpoint_id: Endpoint ID.
+ * @config: Stored configuration for this endpoint.
+ */
+struct endpoint_config_node {
+ struct list_head list;
+ enum cg2900_audio_endpoint_id endpoint_id;
+ union cg2900_endpoint_config_union config;
+};
+
+/**
+ * struct audio_info - Main CG2900 Audio driver info structure.
+ * @list: list_head struct.
+ * @state: Current state of the CG2900 Audio driver.
+ * @revision: Chip revision, used to select API.
+ * @misc_dev: The misc device created by this driver.
+ * @misc_registered: True if misc device is registered.
+ * @parent: Parent device.
+ * @dev_bt: Device registered by this driver for the BT
+ * audio channel.
+ * @dev_fm: Device registered by this driver for the FM
+ * audio channel.
+ * @filp: Current char device file pointer.
+ * @management_mutex: Mutex for handling access to CG2900 Audio driver
+ * management.
+ * @bt_mutex: Mutex for handling access to BT audio channel.
+ * @fm_mutex: Mutex for handling access to FM audio channel.
+ * @nbr_of_users_active: Number of sessions open in the CG2900 Audio
+ * driver.
+ * @i2s_config: DAI I2S configuration.
+ * @i2s_pcm_config: DAI PCM_I2S configuration.
+ * @i2s_config_known: @true if @i2s_config has been set,
+ * @false otherwise.
+ * @i2s_pcm_config_known: @true if @i2s_pcm_config has been set,
+ * @false otherwise.
+ * @endpoints: List containing the endpoint configurations.
+ * @stream_ids: Bitmask for in-use stream ids (only used with
+ * PG2 chip API).
+ */
+struct audio_info {
+ struct list_head list;
+ enum main_state state;
+ enum chip_revision revision;
+ struct miscdevice misc_dev;
+ bool misc_registered;
+ struct device *parent;
+ struct device *dev_bt;
+ struct device *dev_fm;
+ struct file *filp;
+ struct mutex management_mutex;
+ struct mutex bt_mutex;
+ struct mutex fm_mutex;
+ int nbr_of_users_active;
+ struct cg2900_dai_conf_i2s i2s_config;
+ struct cg2900_dai_conf_i2s_pcm i2s_pcm_config;
+ bool i2s_config_known;
+ bool i2s_pcm_config_known;
+ struct endpoint_list endpoints;
+ u8 stream_ids[16];
+};
+
+/**
+ * struct audio_user - CG2900 audio user info structure.
+ * @session: Stored session for the char device.
+ * @resp_state: State for controller communications.
+ * @info: CG2900 audio info structure.
+ */
+struct audio_user {
+ int session;
+ enum chip_resp_state resp_state;
+ struct audio_info *info;
+};
+
+/**
+ * struct audio_cb_info - Callback info structure registered in @user_data.
+ * @user: Audio user currently awaiting data on the channel.
+ * @wq: Wait queue for this channel.
+ * @skb_queue: Sk buffer queue.
+ */
+struct audio_cb_info {
+ struct audio_user *user;
+ wait_queue_head_t wq;
+ struct sk_buff_head skb_queue;
+};
+
+/**
+ * struct char_dev_info - CG2900 character device info structure.
+ * @session: Stored session for the char device.
+ * @stored_data: Data returned when executing last command, if any.
+ * @stored_data_len: Length of @stored_data in bytes.
+ * @management_mutex: Mutex for handling access to char dev management.
+ * @rw_mutex: Mutex for handling access to char dev writes and reads.
+ * @info: CG2900 audio info struct.
+ * @rx_queue: Data queue.
+ */
+struct char_dev_info {
+ int session;
+ u8 *stored_data;
+ int stored_data_len;
+ struct mutex management_mutex;
+ struct mutex rw_mutex;
+ struct audio_info *info;
+ struct sk_buff_head rx_queue;
+};
+
+/*
+ * cg2900_audio_devices - List of active CG2900 audio devices.
+ */
+LIST_HEAD(cg2900_audio_devices);
+
+/*
+ * cg2900_audio_sessions - Pointers to currently opened sessions (maps
+ * session ID to user info).
+ */
+static struct audio_user *cg2900_audio_sessions[MAX_NBR_OF_USERS];
+
+/*
+ * Internal conversion functions
+ *
+ * Since the CG2900 APIs uses several different ways to encode the
+ * same parameter in different cases, we have to use translator
+ * functions.
+ */
+
+/**
+ * session_config_sample_rate() - Convert sample rate to format used in VS_Set_SessionConfiguration.
+ * @rate: Sample rate in API encoding.
+ */
+static u8 session_config_sample_rate(enum cg2900_endpoint_sample_rate rate)
+{
+ static const u8 codes[] = {
+ [ENDPOINT_SAMPLE_RATE_8_KHZ] = CG2900_BT_SESSION_RATE_8K,
+ [ENDPOINT_SAMPLE_RATE_16_KHZ] = CG2900_BT_SESSION_RATE_16K,
+ [ENDPOINT_SAMPLE_RATE_44_1_KHZ] = CG2900_BT_SESSION_RATE_44_1K,
+ [ENDPOINT_SAMPLE_RATE_48_KHZ] = CG2900_BT_SESSION_RATE_48K
+ };
+
+ return codes[rate];
+}
+
+/**
+ * mc_i2s_sample_rate() - Convert sample rate to format used in VS_Port_Config for I2S.
+ * @rate: Sample rate in API encoding.
+ */
+static u8 mc_i2s_sample_rate(enum cg2900_dai_sample_rate rate)
+{
+ static const u8 codes[] = {
+ [SAMPLE_RATE_8] = CG2900_MC_I2S_SAMPLE_RATE_8,
+ [SAMPLE_RATE_16] = CG2900_MC_I2S_SAMPLE_RATE_16,
+ [SAMPLE_RATE_44_1] = CG2900_MC_I2S_SAMPLE_RATE_44_1,
+ [SAMPLE_RATE_48] = CG2900_MC_I2S_SAMPLE_RATE_48
+ };
+
+ return codes[rate];
+}
+
+/**
+ * mc_pcm_sample_rate() - Convert sample rate to format used in VS_Port_Config for PCM/I2S.
+ * @rate: Sample rate in API encoding.
+ */
+static u8 mc_pcm_sample_rate(enum cg2900_dai_sample_rate rate)
+{
+ static const u8 codes[] = {
+ [SAMPLE_RATE_8] = CG2900_MC_PCM_SAMPLE_RATE_8,
+ [SAMPLE_RATE_16] = CG2900_MC_PCM_SAMPLE_RATE_16,
+ [SAMPLE_RATE_44_1] = CG2900_MC_PCM_SAMPLE_RATE_44_1,
+ [SAMPLE_RATE_48] = CG2900_MC_PCM_SAMPLE_RATE_48
+ };
+
+ return codes[rate];
+}
+
+/**
+ * mc_i2s_channel_select() - Convert channel selection to format used in VS_Port_Config.
+ * @sel: Channel selection in API encoding.
+ */
+static u8 mc_i2s_channel_select(enum cg2900_dai_channel_sel sel)
+{
+ static const u8 codes[] = {
+ [CHANNEL_SELECTION_RIGHT] = CG2900_MC_I2S_RIGHT_CHANNEL,
+ [CHANNEL_SELECTION_LEFT] = CG2900_MC_I2S_LEFT_CHANNEL,
+ [CHANNEL_SELECTION_BOTH] = CG2900_MC_I2S_BOTH_CHANNELS
+ };
+ return codes[sel];
+}
+
+/**
+ * get_fs_duration() - Convert framesync-enumeration to real value.
+ * @duration: Framsync duration (API encoding).
+ *
+ * Returns:
+ * Duration in bits.
+ */
+static u16 get_fs_duration(enum cg2900_dai_fs_duration duration)
+{
+ static const u16 values[] = {
+ [SYNC_DURATION_8] = 8,
+ [SYNC_DURATION_16] = 16,
+ [SYNC_DURATION_24] = 24,
+ [SYNC_DURATION_32] = 32,
+ [SYNC_DURATION_48] = 48,
+ [SYNC_DURATION_50] = 50,
+ [SYNC_DURATION_64] = 64,
+ [SYNC_DURATION_75] = 75,
+ [SYNC_DURATION_96] = 96,
+ [SYNC_DURATION_125] = 125,
+ [SYNC_DURATION_128] = 128,
+ [SYNC_DURATION_150] = 150,
+ [SYNC_DURATION_192] = 192,
+ [SYNC_DURATION_250] = 250,
+ [SYNC_DURATION_256] = 256,
+ [SYNC_DURATION_300] = 300,
+ [SYNC_DURATION_384] = 384,
+ [SYNC_DURATION_500] = 500,
+ [SYNC_DURATION_512] = 512,
+ [SYNC_DURATION_600] = 600,
+ [SYNC_DURATION_768] = 768
+ };
+ return values[duration];
+}
+
+/**
+ * mc_i2s_role() - Convert master/slave encoding to format for I2S-ports.
+ * @mode: Master/slave in API encoding.
+ */
+static u8 mc_i2s_role(enum cg2900_dai_mode mode)
+{
+ if (mode == DAI_MODE_SLAVE)
+ return CG2900_I2S_MODE_SLAVE;
+ else
+ return CG2900_I2S_MODE_MASTER;
+}
+
+/**
+ * mc_pcm_role() - Convert master/slave encoding to format for PCM/I2S-port.
+ * @mode: Master/slave in API encoding.
+ */
+static u8 mc_pcm_role(enum cg2900_dai_mode mode)
+{
+ if (mode == DAI_MODE_SLAVE)
+ return CG2900_PCM_MODE_SLAVE;
+ else
+ return CG2900_PCM_MODE_MASTER;
+}
+
+/**
+ * fm_get_conversion() - Convert sample rate to convert up/down used in X_Set_Control FM commands.
+ * @srate: Sample rate.
+ */
+static u16 fm_get_conversion(enum cg2900_endpoint_sample_rate srate)
+{
+ if (srate >= ENDPOINT_SAMPLE_RATE_44_1_KHZ)
+ return CG2900_FM_CMD_SET_CTRL_CONV_UP;
+ else
+ return CG2900_FM_CMD_SET_CTRL_CONV_DOWN;
+}
+
+/**
+ * get_info() - Return info structure for this device.
+ * @dev: Current device.
+ *
+ * This function returns the info structure on the following basis:
+ * * If dev is NULL return first info struct found. If none is found return
+ * NULL.
+ * * If dev is valid we will return corresponding info struct if dev is the
+ * parent of the info struct or if dev's parent is the parent of the info
+ * struct.
+ * * If dev is valid and no info structure is found, a new info struct is
+ * allocated, initialized, and returned.
+ *
+ * Returns:
+ * Pointer to info struct if there is no error.
+ * NULL if NULL was supplied and no info structure exist.
+ * ERR_PTR(-ENOMEM) if allocation fails.
+ */
+static struct audio_info *get_info(struct device *dev)
+{
+ struct list_head *cursor;
+ struct audio_info *tmp;
+ struct audio_info *info = NULL;
+
+ /*
+ * Find the info structure for dev. If NULL is supplied for dev
+ * just return first device found.
+ */
+ list_for_each(cursor, &cg2900_audio_devices) {
+ tmp = list_entry(cursor, struct audio_info, list);
+ if (!dev || tmp->parent == dev->parent || tmp->parent == dev) {
+ info = tmp;
+ break;
+ }
+ }
+
+ if (!dev || info)
+ return info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "Could not allocate info struct\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ info->parent = dev->parent;
+
+ /* Initiate the mutexes */
+ mutex_init(&(info->management_mutex));
+ mutex_init(&(info->bt_mutex));
+ mutex_init(&(info->fm_mutex));
+ mutex_init(&(info->endpoints.management_mutex));
+
+ /* Initiate the endpoint list */
+ INIT_LIST_HEAD(&info->endpoints.ep_list);
+
+ list_add_tail(&info->list, &cg2900_audio_devices);
+
+ dev_info(dev, "CG2900 device added\n");
+ return info;
+}
+
+/**
+ * flush_endpoint_list() - Deletes all stored endpoints in @list.
+ * @list: List of endpoints.
+ */
+static void flush_endpoint_list(struct endpoint_list *list)
+{
+ struct list_head *cursor, *next;
+ struct endpoint_config_node *tmp;
+
+ mutex_lock(&list->management_mutex);
+ list_for_each_safe(cursor, next, &(list->ep_list)) {
+ tmp = list_entry(cursor, struct endpoint_config_node, list);
+ list_del(cursor);
+ kfree(tmp);
+ }
+ mutex_unlock(&list->management_mutex);
+}
+
+/**
+ * device_removed() - Remove device from list if there are no channels left.
+ * @info: CG2900 audio info structure.
+ */
+static void device_removed(struct audio_info *info)
+{
+ struct list_head *cursor;
+ struct audio_info *tmp;
+
+ if (info->dev_bt || info->dev_fm)
+ /* There are still devices active */
+ return;
+
+ /* Find the stored info structure */
+ list_for_each(cursor, &cg2900_audio_devices) {
+ tmp = list_entry(cursor, struct audio_info, list);
+ if (tmp == info) {
+ list_del(cursor);
+ break;
+ }
+ }
+
+ flush_endpoint_list(&info->endpoints);
+
+ mutex_destroy(&info->management_mutex);
+ mutex_destroy(&info->bt_mutex);
+ mutex_destroy(&info->fm_mutex);
+ mutex_destroy(&info->endpoints.management_mutex);
+
+ kfree(info);
+ pr_info("CG2900 Audio device removed");
+}
+
+/**
+ * read_cb() - Handle data received from STE connectivity driver.
+ * @dev: Device receiving data.
+ * @skb: Buffer with data coming form device.
+ */
+static void read_cb(struct cg2900_user_data *dev, struct sk_buff *skb)
+{
+ struct audio_cb_info *cb_info;
+
+ cb_info = cg2900_get_usr(dev);
+
+ if (!(cb_info->user)) {
+ dev_err(dev->dev, "NULL supplied as cb_info->user\n");
+ return;
+ }
+
+ /* Mark that packet has been received */
+ dev_dbg(dev->dev, "New resp_state: RESP_RECEIVED");
+ cb_info->user->resp_state = RESP_RECEIVED;
+ skb_queue_tail(&cb_info->skb_queue, skb);
+ wake_up_all(&cb_info->wq);
+}
+
+/**
+ * reset_cb() - Reset callback function.
+ * @dev: CG2900_Core device resetting.
+ */
+static void reset_cb(struct cg2900_user_data *dev)
+{
+ struct audio_info *info;
+
+ dev_dbg(dev->dev, "reset_cb\n");
+
+ info = dev_get_drvdata(dev->dev);
+ mutex_lock(&info->management_mutex);
+ info->nbr_of_users_active = 0;
+ info->state = RESET;
+ mutex_unlock(&info->management_mutex);
+}
+
+/**
+ * get_session_user() - Check that supplied session is within valid range.
+ * @session: Session ID.
+ *
+ * Returns:
+ * Audio_user if there is no error.
+ * NULL for bad session ID.
+ */
+static struct audio_user *get_session_user(int session)
+{
+ struct audio_user *audio_user;
+
+ if (session < FIRST_USER || session >= MAX_NBR_OF_USERS) {
+ pr_err("Calling with invalid session %d", session);
+ return NULL;
+ }
+
+ audio_user = cg2900_audio_sessions[session];
+ if (!audio_user)
+ pr_err("Calling with non-opened session %d", session);
+ return audio_user;
+}
+
+/**
+ * del_endpoint_private() - Deletes an endpoint from @list.
+ * @endpoint_id: Endpoint ID.
+ * @list: List of endpoints.
+ *
+ * Deletes an endpoint from the supplied endpoint list.
+ * This function is not protected by any semaphore.
+ */
+static void del_endpoint_private(enum cg2900_audio_endpoint_id endpoint_id,
+ struct endpoint_list *list)
+{
+ struct list_head *cursor, *next;
+ struct endpoint_config_node *tmp;
+
+ list_for_each_safe(cursor, next, &(list->ep_list)) {
+ tmp = list_entry(cursor, struct endpoint_config_node, list);
+ if (tmp->endpoint_id == endpoint_id) {
+ list_del(cursor);
+ kfree(tmp);
+ }
+ }
+}
+
+/**
+ * add_endpoint() - Add endpoint node to @list.
+ * @ep_config: Endpoint configuration.
+ * @list: List of endpoints.
+ *
+ * Add endpoint node to the supplied list and copies supplied config to node.
+ * If a node already exists for the supplied endpoint, the old node is removed
+ * and replaced by the new node.
+ */
+static void add_endpoint(struct cg2900_endpoint_config *ep_config,
+ struct endpoint_list *list)
+{
+ struct endpoint_config_node *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ pr_err("add_endpoint: Failed to alloc memory");
+ return;
+ }
+
+ /* Store values */
+ item->endpoint_id = ep_config->endpoint_id;
+ memcpy(&(item->config), &(ep_config->config), sizeof(item->config));
+
+ mutex_lock(&(list->management_mutex));
+
+ /*
+ * Check if endpoint ID already exist in list.
+ * If that is the case, remove it.
+ */
+ if (!list_empty(&(list->ep_list)))
+ del_endpoint_private(ep_config->endpoint_id, list);
+
+ list_add_tail(&(item->list), &(list->ep_list));
+
+ mutex_unlock(&(list->management_mutex));
+}
+
+/**
+ * find_endpoint() - Finds endpoint identified by @endpoint_id in @list.
+ * @endpoint_id: Endpoint ID.
+ * @list: List of endpoints.
+ *
+ * Returns:
+ * Endpoint configuration if there is no error.
+ * NULL if no configuration can be found for @endpoint_id.
+ */
+static union cg2900_endpoint_config_union *
+find_endpoint(enum cg2900_audio_endpoint_id endpoint_id,
+ struct endpoint_list *list)
+{
+ struct list_head *cursor, *next;
+ struct endpoint_config_node *tmp;
+ struct endpoint_config_node *ret_ep = NULL;
+
+ mutex_lock(&list->management_mutex);
+ list_for_each_safe(cursor, next, &(list->ep_list)) {
+ tmp = list_entry(cursor, struct endpoint_config_node, list);
+ if (tmp->endpoint_id == endpoint_id) {
+ ret_ep = tmp;
+ break;
+ }
+ }
+ mutex_unlock(&list->management_mutex);
+
+ if (ret_ep)
+ return &(ret_ep->config);
+ else
+ return NULL;
+}
+
+/**
+ * new_stream_id() - Allocate a new stream id.
+ * @info: Current audio info struct.
+ *
+ * Returns:
+ * 0-127 new valid id.
+ * -ENOMEM if no id is available.
+ */
+static s8 new_stream_id(struct audio_info *info)
+{
+ int r;
+
+ mutex_lock(&info->management_mutex);
+
+ r = find_first_zero_bit(info->stream_ids,
+ 8 * sizeof(info->stream_ids));
+
+ if (r >= 8 * sizeof(info->stream_ids)) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ set_bit(r, (unsigned long int *)info->stream_ids);
+
+out:
+ mutex_unlock(&info->management_mutex);
+ return r;
+}
+
+/**
+ * release_stream_id() - Release a stream id.
+ * @info: Current audio info struct.
+ * @id: Stream to release.
+ */
+static void release_stream_id(struct audio_info *info, u8 id)
+{
+ if (id >= 8 * sizeof(info->stream_ids))
+ return;
+
+ mutex_lock(&info->management_mutex);
+ clear_bit(id, (unsigned long int *)info->stream_ids);
+ mutex_unlock(&info->management_mutex);
+}
+
+/**
+ * receive_fm_write_response() - Wait for and handle the response to an FM Legacy WriteCommand request.
+ * @audio_user: Audio user to check for.
+ * @command: FM command to wait for.
+ *
+ * This function first waits (up to 5 seconds) for a response to an FM
+ * write command and when one arrives, it checks that it is the one we
+ * are waiting for and also that no error has occurred.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int receive_fm_write_response(struct audio_user *audio_user,
+ u16 command)
+{
+ int err = 0;
+ int res;
+ struct sk_buff *skb;
+ struct fm_leg_cmd_cmpl *pkt;
+ u16 rsp_cmd;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = audio_user->info;
+ pf_data = dev_get_platdata(info->dev_fm);
+ cb_info = cg2900_get_usr(pf_data);
+
+ /*
+ * Wait for callback to receive command complete and then wake us up
+ * again.
+ */
+ res = wait_event_timeout(cb_info->wq,
+ audio_user->resp_state == RESP_RECEIVED,
+ msecs_to_jiffies(RESP_TIMEOUT));
+ if (!res) {
+ dev_err(FM_DEV, "Timeout while waiting for return packet\n");
+ return -ECOMM;
+ } else if (res < 0) {
+ dev_err(FM_DEV,
+ "Error %d occurred while waiting for return packet\n",
+ res);
+ return -ECOMM;
+ }
+
+ /* OK, now we should have received answer. Let's check it. */
+ skb = skb_dequeue_tail(&cb_info->skb_queue);
+ if (!skb) {
+ dev_err(FM_DEV, "No skb in queue when it should be there\n");
+ return -EIO;
+ }
+
+ pkt = (struct fm_leg_cmd_cmpl *)skb->data;
+
+ /* Check if we received the correct event */
+ if (pkt->opcode != CG2900_FM_GEN_ID_LEGACY) {
+ dev_err(FM_DEV,
+ "Received unknown FM packet. 0x%X %X %X %X %X\n",
+ skb->data[0], skb->data[1], skb->data[2],
+ skb->data[3], skb->data[4]);
+ err = -EIO;
+ goto error_handling_free_skb;
+ }
+
+ /* FM Legacy Command complete event */
+ rsp_cmd = cg2900_get_fm_cmd_id(le16_to_cpu(pkt->response_head));
+
+ if (pkt->fm_function != CG2900_FM_CMD_PARAM_WRITECOMMAND ||
+ rsp_cmd != command) {
+ dev_err(FM_DEV,
+ "Received unexpected packet func 0x%X cmd 0x%04X\n",
+ pkt->fm_function, rsp_cmd);
+ err = -EIO;
+ goto error_handling_free_skb;
+ }
+
+ if (pkt->cmd_status != CG2900_FM_CMD_STATUS_COMMAND_SUCCEEDED) {
+ dev_err(FM_DEV, "FM Command failed (%d)\n", pkt->cmd_status);
+ err = -EIO;
+ goto error_handling_free_skb;
+ }
+ /* Operation succeeded. We are now done */
+
+error_handling_free_skb:
+ kfree_skb(skb);
+ return err;
+}
+
+/**
+ * receive_bt_cmd_complete() - Wait for and handle an BT Command Complete event.
+ * @audio_user: Audio user to check for.
+ * @rsp: Opcode of BT command to wait for.
+ * @data: Pointer to buffer if any received data should be stored (except
+ * status).
+ * @data_len: Length of @data in bytes.
+ *
+ * This function first waits for BT Command Complete event (up to 5 seconds)
+ * and when one arrives, it checks that it is the one we are waiting for and
+ * also that no error has occurred.
+ * If @data is supplied it also copies received data into @data.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int receive_bt_cmd_complete(struct audio_user *audio_user, u16 rsp,
+ void *data, int data_len)
+{
+ int err = 0;
+ int res;
+ struct sk_buff *skb;
+ struct bt_cmd_cmpl_event *evt;
+ u16 opcode;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = audio_user->info;
+ pf_data = dev_get_platdata(info->dev_bt);
+ cb_info = cg2900_get_usr(pf_data);
+
+ /*
+ * Wait for callback to receive command complete and then wake us up
+ * again.
+ */
+ res = wait_event_timeout(cb_info->wq,
+ audio_user->resp_state == RESP_RECEIVED,
+ msecs_to_jiffies(RESP_TIMEOUT));
+ if (!res) {
+ dev_err(BT_DEV, "Timeout while waiting for return packet\n");
+ return -ECOMM;
+ } else if (res < 0) {
+ /* We timed out or an error occurred */
+ dev_err(BT_DEV,
+ "Error %d occurred while waiting for return packet\n",
+ res);
+ return -ECOMM;
+ }
+
+ /* OK, now we should have received answer. Let's check it. */
+ skb = skb_dequeue_tail(&cb_info->skb_queue);
+ if (!skb) {
+ dev_err(BT_DEV, "No skb in queue when it should be there\n");
+ return -EIO;
+ }
+
+ evt = (struct bt_cmd_cmpl_event *)skb->data;
+ if (evt->eventcode != HCI_EV_CMD_COMPLETE) {
+ dev_err(BT_DEV,
+ "We did not receive the event we expected (0x%X)\n",
+ evt->eventcode);
+ err = -EIO;
+ goto error_handling_free_skb;
+ }
+
+ opcode = le16_to_cpu(evt->opcode);
+ if (opcode != rsp) {
+ dev_err(BT_DEV,
+ "Received cmd complete for unexpected command: "
+ "0x%04X\n", opcode);
+ err = -EIO;
+ goto error_handling_free_skb;
+ }
+
+ if (evt->status != HCI_BT_ERROR_NO_ERROR) {
+ dev_err(BT_DEV, "Received command complete with err %d\n",
+ evt->status);
+ err = -EIO;
+ /*
+ * In data there might be more detailed error code.
+ * Let's copy it.
+ */
+ }
+
+ /*
+ * Copy the rest of the parameters if a buffer has been supplied.
+ * The caller must have set the length correctly.
+ */
+ if (data)
+ memcpy(data, evt->data, data_len);
+
+ /* Operation succeeded. We are now done */
+
+error_handling_free_skb:
+ kfree_skb(skb);
+ return err;
+}
+
+/**
+ * send_vs_delete_stream() - Delete an audio stream defined by @stream_handle.
+ * @audio_user: Audio user to check for.
+ * @stream_handle: Handle of the audio stream.
+ *
+ * This function is used to delete an audio stream defined by a stream
+ * handle.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -ENOMEM upon allocation errors.
+ * Errors from @cg2900_write.
+ * -EIO for other errors.
+ */
+static int send_vs_delete_stream(struct audio_user *audio_user,
+ unsigned int stream_handle)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ u16 opcode;
+ struct audio_info *info = audio_user->info;
+ struct cg2900_user_data *pf_data = dev_get_platdata(info->dev_bt);
+ struct audio_cb_info *cb_info = cg2900_get_usr(pf_data);
+
+ /* Now delete the stream - format command... */
+ if (info->revision == CHIP_REV_PG1) {
+ struct bt_vs_reset_session_cfg_cmd *cmd;
+
+ dev_dbg(BT_DEV, "BT: HCI_VS_Reset_Session_Configuration\n");
+
+ skb = pf_data->alloc_skb(sizeof(*cmd), GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "Could not allocate skb\n");
+ err = -ENOMEM;
+ return err;
+ }
+
+ cmd = (struct bt_vs_reset_session_cfg_cmd *)
+ skb_put(skb, sizeof(*cmd));
+
+ opcode = CG2900_BT_VS_RESET_SESSION_CONFIG;
+ cmd->opcode = cpu_to_le16(opcode);
+ cmd->plen = BT_PARAM_LEN(sizeof(*cmd));
+ cmd->id = (u8)stream_handle;
+ } else {
+ struct mc_vs_delete_stream_cmd *cmd;
+
+ dev_dbg(BT_DEV, "BT: HCI_VS_Delete_Stream\n");
+
+ skb = pf_data->alloc_skb(sizeof(*cmd), GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "Could not allocate skb\n");
+ err = -ENOMEM;
+ return err;
+ }
+
+ cmd = (struct mc_vs_delete_stream_cmd *)
+ skb_put(skb, sizeof(*cmd));
+
+ opcode = CG2900_MC_VS_DELETE_STREAM;
+ cmd->opcode = cpu_to_le16(opcode);
+ cmd->plen = BT_PARAM_LEN(sizeof(*cmd));
+ cmd->stream = (u8)stream_handle;
+ }
+
+ /* ...and send it */
+ cb_info->user = audio_user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ audio_user->resp_state = WAITING;
+
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ goto error_handling_free_skb;
+ }
+
+ /* wait for response */
+ if (info->revision == CHIP_REV_PG1) {
+ err = receive_bt_cmd_complete(audio_user, opcode, NULL, 0);
+ } else {
+ u8 vs_err;
+
+ /* All commands in PG2 API returns one byte extra status */
+ err = receive_bt_cmd_complete(audio_user, opcode,
+ &vs_err, sizeof(vs_err));
+
+ if (err)
+ dev_err(BT_DEV,
+ "VS_DELETE_STREAM - failed with error 0x%02X\n",
+ vs_err);
+ else
+ release_stream_id(info, stream_handle);
+ }
+
+ return err;
+
+error_handling_free_skb:
+ kfree_skb(skb);
+ return err;
+}
+
+/**
+ * send_vs_session_ctrl() - Formats an sends a CG2900_BT_VS_SESSION_CTRL command.
+ * @user: Audio user this command belongs to.
+ * @stream_handle: Handle to stream.
+ * @command: Command to execute on stream, should be one of
+ * CG2900_BT_SESSION_START, CG2900_BT_SESSION_STOP,
+ * CG2900_BT_SESSION_PAUSE, CG2900_BT_SESSION_RESUME.
+ *
+ * Packs and sends a command packet and waits for the response. Must
+ * be called with the bt_mutex held.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int send_vs_session_ctrl(struct audio_user *user,
+ u8 stream_handle, u8 command)
+{
+ int err = 0;
+ struct bt_vs_session_ctrl_cmd *pkt;
+ struct sk_buff *skb;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = user->info;
+ pf_data = dev_get_platdata(info->dev_bt);
+ cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(BT_DEV, "BT: HCI_VS_Session_Control handle: %d cmd: %d\n",
+ stream_handle, command);
+
+ skb = pf_data->alloc_skb(sizeof(*pkt), GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV,
+ "send_vs_session_ctrl: Could not allocate skb\n");
+ return -ENOMEM;
+ }
+
+ /* Enter data into the skb */
+ pkt = (struct bt_vs_session_ctrl_cmd *) skb_put(skb, sizeof(*pkt));
+
+ pkt->opcode = cpu_to_le16(CG2900_BT_VS_SESSION_CTRL);
+ pkt->plen = BT_PARAM_LEN(sizeof(*pkt));
+ pkt->id = stream_handle;
+ pkt->control = command; /* Start/stop etc */
+
+ cb_info->user = user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ user->resp_state = WAITING;
+
+ /* Send packet to controller */
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ kfree_skb(skb);
+ goto finished;
+ }
+
+ err = receive_bt_cmd_complete(user, CG2900_BT_VS_SESSION_CTRL,
+ NULL, 0);
+finished:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ user->resp_state = IDLE;
+ return err;
+}
+
+/**
+ * send_vs_session_config() - Formats an sends a CG2900_BT_VS_SESSION_CONFIG command.
+ * @user: Audio user this command belongs to.
+ * @config_stream: Custom function for configuring the stream.
+ * @priv_data: Private data passed to @config_stream untouched.
+ *
+ * Packs and sends a command packet and waits for the response. Must
+ * be called with the bt_mutex held.
+ *
+ * Space is allocated for one stream and a custom function is used to
+ * fill in the stream configuration.
+ *
+ * Returns:
+ * 0-255 stream handle if no error.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int send_vs_session_config(struct audio_user *user,
+ void(*config_stream)(struct audio_info *, void *,
+ struct session_config_stream *),
+ void *priv_data)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ struct bt_vs_session_config_cmd *pkt;
+ u8 session_id;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = user->info;
+ pf_data = dev_get_platdata(info->dev_bt);
+ cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(BT_DEV, "BT: HCI_VS_Set_Session_Configuration\n");
+
+ skb = pf_data->alloc_skb(sizeof(*pkt), GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV,
+ "send_vs_session_config: Could not allocate skb\n");
+ return -ENOMEM;
+ }
+
+ pkt = (struct bt_vs_session_config_cmd *)skb_put(skb, sizeof(*pkt));
+ /* zero the packet so we don't have to set all reserved fields */
+ memset(pkt, 0, sizeof(*pkt));
+
+ /* Common parameters */
+ pkt->opcode = cpu_to_le16(CG2900_BT_VS_SET_SESSION_CONFIG);
+ pkt->plen = BT_PARAM_LEN(sizeof(*pkt));
+ pkt->n_streams = 1; /* 1 stream configuration supplied */
+
+ /* Let the custom-function fill in the rest */
+ config_stream(info, priv_data, &pkt->stream);
+
+ cb_info->user = user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ user->resp_state = WAITING;
+
+ /* Send packet to controller */
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ kfree_skb(skb);
+ goto finished;
+ }
+
+ err = receive_bt_cmd_complete(user,
+ CG2900_BT_VS_SET_SESSION_CONFIG,
+ &session_id, sizeof(session_id));
+ /* Return session id/stream handle if success */
+ if (!err)
+ err = session_id;
+
+finished:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ user->resp_state = IDLE;
+ return err;
+}
+
+/**
+ * send_fm_write_1_param() - Formats and sends an FM legacy write command with one parameter.
+ * @user: Audio user this command belongs to.
+ * @command: Command.
+ * @param: Parameter for command.
+ *
+ * Packs and sends a command packet and waits for the response. Must
+ * be called with the fm_mutex held.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int send_fm_write_1_param(struct audio_user *user,
+ u16 command, u16 param)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ struct fm_leg_cmd *cmd;
+ size_t len;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = user->info;
+ pf_data = dev_get_platdata(info->dev_fm);
+ cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(FM_DEV, "send_fm_write_1_param cmd 0x%X param 0x%X\n",
+ command, param);
+
+ /* base package + one parameter */
+ len = sizeof(*cmd) + sizeof(cmd->fm_cmd.data[0]);
+
+ skb = pf_data->alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+ dev_err(FM_DEV,
+ "send_fm_write_1_param: Could not allocate skb\n");
+ return -ENOMEM;
+ }
+
+ cmd = (struct fm_leg_cmd *)skb_put(skb, len);
+
+ cmd->length = CG2900_FM_CMD_PARAM_LEN(len);
+ cmd->opcode = CG2900_FM_GEN_ID_LEGACY;
+ cmd->read_write = CG2900_FM_CMD_LEG_PARAM_WRITE;
+ cmd->fm_function = CG2900_FM_CMD_PARAM_WRITECOMMAND;
+ /* one parameter - builtin assumption for this function */
+ cmd->fm_cmd.head = cpu_to_le16(cg2900_make_fm_cmd_id(command, 1));
+ cmd->fm_cmd.data[0] = cpu_to_le16(param);
+
+ cb_info->user = user;
+ dev_dbg(FM_DEV, "New resp_state: WAITING\n");
+ user->resp_state = WAITING;
+
+ /* Send packet to controller */
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(FM_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ kfree_skb(skb);
+ goto finished;
+ }
+
+ err = receive_fm_write_response(user, command);
+finished:
+ dev_dbg(FM_DEV, "New resp_state: IDLE\n");
+ user->resp_state = IDLE;
+ return err;
+}
+
+/**
+ * send_vs_stream_ctrl() - Formats an sends a CG2900_MC_VS_STREAM_CONTROL command.
+ * @user: Audio user this command belongs to.
+ * @stream: Stream id.
+ * @command: Start/stop etc.
+ *
+ * Packs and sends a command packet and waits for the response. Must
+ * be called with the bt_mutex held.
+ *
+ * While the HCI command allows for multiple streams in one command,
+ * this function only handles one.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int send_vs_stream_ctrl(struct audio_user *user, u8 stream, u8 command)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ struct mc_vs_stream_ctrl_cmd *cmd;
+ size_t len;
+ u8 vs_err;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = user->info;
+ pf_data = dev_get_platdata(info->dev_bt);
+ cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(BT_DEV, "send_vs_stream_ctrl stream %d command %d\n", stream,
+ command);
+
+ /* basic length + one stream */
+ len = sizeof(*cmd) + sizeof(cmd->stream[0]);
+
+ skb = pf_data->alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "send_vs_stream_ctrl:Could not allocate skb\n");
+ return -ENOMEM;
+ }
+
+ cmd = (struct mc_vs_stream_ctrl_cmd *)skb_put(skb, len);
+
+ cmd->opcode = cpu_to_le16(CG2900_MC_VS_STREAM_CONTROL);
+ cmd->plen = BT_PARAM_LEN(len);
+ cmd->command = command;
+
+ /* one stream */
+ cmd->n_streams = 1;
+ cmd->stream[0] = stream;
+
+ cb_info->user = user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ user->resp_state = WAITING;
+
+ /* Send packet to controller */
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ kfree_skb(skb);
+ goto finished;
+ }
+
+ /* All commands in PG2 API returns one byte with extra status */
+ err = receive_bt_cmd_complete(user,
+ CG2900_MC_VS_STREAM_CONTROL,
+ &vs_err, sizeof(vs_err));
+ if (err)
+ dev_err(BT_DEV,
+ "VS_STREAM_CONTROL - failed with error 0x%02x\n",
+ vs_err);
+
+finished:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ user->resp_state = IDLE;
+ return err;
+}
+
+/**
+ * send_vs_create_stream() - Formats an sends a CG2900_MC_VS_CREATE_STREAM command.
+ * @user: Audio user this command belongs to.
+ * @inport: Stream id.
+ * @outport: Start/stop etc.
+ * @order: Activation order.
+ *
+ * Packs and sends a command packet and waits for the response. Must
+ * be called with the bt_mutex held.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int send_vs_create_stream(struct audio_user *user, u8 inport,
+ u8 outport, u8 order)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ struct mc_vs_create_stream_cmd *cmd;
+ s8 id;
+ u8 vs_err;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = user->info;
+ pf_data = dev_get_platdata(info->dev_bt);
+ cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(BT_DEV,
+ "send_vs_create_stream inport %d outport %d order %d\n",
+ inport, outport, order);
+
+ id = new_stream_id(info);
+ if (id < 0) {
+ dev_err(BT_DEV, "No free stream id\n");
+ err = -EIO;
+ goto finished;
+ }
+
+ skb = pf_data->alloc_skb(sizeof(*cmd), GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV,
+ "send_vs_create_stream: Could not allocate skb\n");
+ err = -ENOMEM;
+ goto finished_release_id;
+ }
+
+ cmd = (struct mc_vs_create_stream_cmd *)skb_put(skb, sizeof(*cmd));
+
+ cmd->opcode = cpu_to_le16(CG2900_MC_VS_CREATE_STREAM);
+ cmd->plen = BT_PARAM_LEN(sizeof(*cmd));
+ cmd->id = (u8)id;
+ cmd->inport = inport;
+ cmd->outport = outport;
+ cmd->order = order;
+
+ cb_info->user = user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ user->resp_state = WAITING;
+
+ /* Send packet to controller */
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ kfree_skb(skb);
+ goto finished_release_id;
+ }
+
+ /* All commands in PG2 API returns one byte with extra status */
+ err = receive_bt_cmd_complete(user,
+ CG2900_MC_VS_CREATE_STREAM,
+ &vs_err, sizeof(vs_err));
+ if (err) {
+ dev_err(BT_DEV,
+ "VS_CREATE_STREAM - failed with error 0x%02x\n",
+ vs_err);
+ goto finished_release_id;
+ }
+
+ err = id;
+ goto finished;
+
+finished_release_id:
+ release_stream_id(info, id);
+finished:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ user->resp_state = IDLE;
+ return err;
+}
+
+/**
+ * send_vs_port_cfg() - Formats an sends a CG2900_MC_VS_PORT_CONFIG command.
+ * @user: Audio user this command belongs to.
+ * @port: Port id to configure.
+ * @cfg: Pointer to specific configuration.
+ * @cfglen: Length of configuration.
+ *
+ * Packs and sends a command packet and waits for the response. Must
+ * be called with the bt_mutex held.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int send_vs_port_cfg(struct audio_user *user, u8 port,
+ const void *cfg, size_t cfglen)
+{
+ int err = 0;
+ struct sk_buff *skb;
+ struct mc_vs_port_cfg_cmd *cmd;
+ void *ptr;
+ u8 vs_err;
+ struct audio_cb_info *cb_info;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data;
+
+ info = user->info;
+ pf_data = dev_get_platdata(info->dev_bt);
+ cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(BT_DEV, "send_vs_port_cfg len %d\n", cfglen);
+
+ skb = pf_data->alloc_skb(sizeof(*cmd) + cfglen, GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "send_vs_port_cfg: Could not allocate skb\n");
+ return -ENOMEM;
+ }
+
+ /* Fill in common part */
+ cmd = (struct mc_vs_port_cfg_cmd *) skb_put(skb, sizeof(*cmd));
+ cmd->opcode = cpu_to_le16(CG2900_MC_VS_PORT_CONFIG);
+ cmd->plen = BT_PARAM_LEN(sizeof(*cmd) + cfglen);
+ cmd->type = port;
+
+ /* Copy specific configuration */
+ ptr = skb_put(skb, cfglen);
+ memcpy(ptr, cfg, cfglen);
+
+ /* Send */
+ cb_info->user = user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ user->resp_state = WAITING;
+
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ kfree_skb(skb);
+ goto finished;
+ }
+
+ /* All commands in PG2 API returns one byte with extra status */
+ err = receive_bt_cmd_complete(user, CG2900_MC_VS_PORT_CONFIG,
+ &vs_err, sizeof(vs_err));
+ if (err)
+ dev_err(BT_DEV, "VS_PORT_CONFIG - failed with error 0x%02x\n",
+ vs_err);
+
+finished:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ user->resp_state = IDLE;
+ return err;
+}
+
+/**
+ * set_dai_config_pg1() - Internal implementation of @cg2900_audio_set_dai_config for PG1 hardware.
+ * @audio_user: Pointer to audio user struct.
+ * @config: Pointer to the configuration to set.
+ *
+ * Sets the Digital Audio Interface (DAI) configuration for PG1
+ * hardware. This is and internal function and basic
+ * argument-verification should have been done by the caller.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EACCESS if port is not supported.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int set_dai_config_pg1(struct audio_user *audio_user,
+ struct cg2900_dai_config *config)
+{
+ int err = 0;
+ struct cg2900_dai_conf_i2s_pcm *i2s_pcm;
+ struct sk_buff *skb = NULL;
+ struct bt_vs_set_hw_cfg_cmd_i2s *i2s_cmd;
+ struct bt_vs_set_hw_cfg_cmd_pcm *pcm_cmd;
+ struct audio_info *info = audio_user->info;
+ struct cg2900_user_data *pf_data = dev_get_platdata(info->dev_bt);
+ struct audio_cb_info *cb_info = cg2900_get_usr(pf_data);
+
+ dev_dbg(BT_DEV, "set_dai_config_pg1 port %d\n", config->port);
+
+ /*
+ * Use mutex to assure that only ONE command is sent at any time on
+ * each channel.
+ */
+ mutex_lock(&info->bt_mutex);
+
+ /* Allocate the sk_buffer. The length is actually a max length since
+ * length varies depending on logical transport.
+ */
+ skb = pf_data->alloc_skb(CG2900_BT_LEN_VS_SET_HARDWARE_CONFIG,
+ GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "set_dai_config_pg1: Could not allocate skb\n");
+ err = -ENOMEM;
+ goto finished_unlock_mutex;
+ }
+
+ /* Fill in hci-command according to received configuration */
+ switch (config->port) {
+ case PORT_0_I2S:
+ i2s_cmd = (struct bt_vs_set_hw_cfg_cmd_i2s *)
+ skb_put(skb, sizeof(*i2s_cmd));
+
+ i2s_cmd->opcode = cpu_to_le16(CG2900_BT_VS_SET_HARDWARE_CONFIG);
+ i2s_cmd->plen = BT_PARAM_LEN(sizeof(*i2s_cmd));
+
+ i2s_cmd->vp_type = PORT_PROTOCOL_I2S;
+ i2s_cmd->port_id = 0x00; /* First/only I2S port */
+ i2s_cmd->half_period = config->conf.i2s.half_period;
+
+ i2s_cmd->master_slave = mc_i2s_role(config->conf.i2s.mode);
+
+ /* Store the new configuration */
+ mutex_lock(&info->management_mutex);
+ memcpy(&info->i2s_config, &config->conf.i2s,
+ sizeof(config->conf.i2s));
+ info->i2s_config_known = true;
+ mutex_unlock(&info->management_mutex);
+ break;
+
+ case PORT_1_I2S_PCM:
+ pcm_cmd = (struct bt_vs_set_hw_cfg_cmd_pcm *)
+ skb_put(skb, sizeof(*pcm_cmd));
+
+ pcm_cmd->opcode = cpu_to_le16(CG2900_BT_VS_SET_HARDWARE_CONFIG);
+ pcm_cmd->plen = BT_PARAM_LEN(sizeof(*pcm_cmd));
+
+ i2s_pcm = &config->conf.i2s_pcm;
+
+ /*
+ * PG1 chips don't support I2S over the PCM/I2S bus,
+ * and PG2 chips don't use this command
+ */
+ if (i2s_pcm->protocol != PORT_PROTOCOL_PCM) {
+ dev_err(BT_DEV,
+ "I2S not supported over the PCM/I2S bus\n");
+ err = -EACCES;
+ goto error_handling_free_skb;
+ }
+
+ pcm_cmd->vp_type = PORT_PROTOCOL_PCM;
+ pcm_cmd->port_id = 0x00; /* First/only PCM port */
+
+ HWCONFIG_PCM_SET_MODE(pcm_cmd, mc_pcm_role(i2s_pcm->mode));
+
+ HWCONFIG_PCM_SET_DIR(pcm_cmd, 0, i2s_pcm->slot_0_dir);
+ HWCONFIG_PCM_SET_DIR(pcm_cmd, 1, i2s_pcm->slot_1_dir);
+ HWCONFIG_PCM_SET_DIR(pcm_cmd, 2, i2s_pcm->slot_2_dir);
+ HWCONFIG_PCM_SET_DIR(pcm_cmd, 3, i2s_pcm->slot_3_dir);
+
+ pcm_cmd->bit_clock = i2s_pcm->clk;
+ pcm_cmd->frame_len =
+ cpu_to_le16(get_fs_duration(i2s_pcm->duration));
+
+ /* Store the new configuration */
+ mutex_lock(&info->management_mutex);
+ memcpy(&info->i2s_pcm_config, &config->conf.i2s_pcm,
+ sizeof(config->conf.i2s_pcm));
+ info->i2s_pcm_config_known = true;
+ mutex_unlock(&info->management_mutex);
+ break;
+
+ default:
+ dev_err(BT_DEV, "Unknown port configuration %d\n",
+ config->port);
+ err = -EACCES;
+ goto error_handling_free_skb;
+ };
+
+ cb_info->user = audio_user;
+ dev_dbg(BT_DEV, "New resp_state: WAITING\n");
+ audio_user->resp_state = WAITING;
+
+ /* Send packet to controller */
+ err = pf_data->write(pf_data, skb);
+ if (err) {
+ dev_err(BT_DEV, "Error %d occurred while transmitting skb\n",
+ err);
+ goto error_handling_free_skb;
+ }
+
+ err = receive_bt_cmd_complete(audio_user,
+ CG2900_BT_VS_SET_HARDWARE_CONFIG,
+ NULL, 0);
+
+ goto finished_unlock_mutex;
+
+error_handling_free_skb:
+ kfree_skb(skb);
+finished_unlock_mutex:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ audio_user->resp_state = IDLE;
+ mutex_unlock(&info->bt_mutex);
+ return err;
+}
+
+/**
+ * set_dai_config_pg2() - Internal implementation of @cg2900_audio_set_dai_config for PG2 hardware.
+ * @audio_user: Pointer to audio user struct.
+ * @config: Pointer to the configuration to set.
+ *
+ * Sets the Digital Audio Interface (DAI) configuration for PG2
+ * hardware. This is an internal function and basic
+ * argument-verification should have been done by the caller.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EACCESS if port is not supported.
+ * -ENOMEM if not possible to allocate packet.
+ * -ECOMM if no response was received.
+ * -EIO for other errors.
+ */
+static int set_dai_config_pg2(struct audio_user *audio_user,
+ struct cg2900_dai_config *config)
+{
+ int err = 0;
+ struct cg2900_dai_conf_i2s *i2s;
+ struct cg2900_dai_conf_i2s_pcm *i2s_pcm;
+
+ struct mc_vs_port_cfg_i2s i2s_cfg;
+ struct mc_vs_port_cfg_pcm_i2s pcm_cfg;
+ struct audio_info *info = audio_user->info;
+
+ dev_dbg(BT_DEV, "set_dai_config_pg2 port %d\n", config->port);
+
+ /*
+ * Use mutex to assure that only ONE command is sent at any time on
+ * each channel.
+ */
+ mutex_lock(&info->bt_mutex);
+
+ switch (config->port) {
+ case PORT_0_I2S:
+ i2s = &config->conf.i2s;
+
+ memset(&i2s_cfg, 0, sizeof(i2s_cfg)); /* just to be safe */
+
+ /* master/slave */
+ PORTCFG_I2S_SET_ROLE(i2s_cfg, mc_i2s_role(i2s->mode));
+
+ PORTCFG_I2S_SET_HALFPERIOD(i2s_cfg, i2s->half_period);
+ PORTCFG_I2S_SET_CHANNELS(i2s_cfg,
+ mc_i2s_channel_select(i2s->channel_sel));
+ PORTCFG_I2S_SET_SRATE(i2s_cfg,
+ mc_i2s_sample_rate(i2s->sample_rate));
+ switch (i2s->word_width) {
+ case WORD_WIDTH_16:
+ PORTCFG_I2S_SET_WORDLEN(i2s_cfg, CG2900_MC_I2S_WORD_16);
+ break;
+ case WORD_WIDTH_32:
+ PORTCFG_I2S_SET_WORDLEN(i2s_cfg, CG2900_MC_I2S_WORD_32);
+ break;
+ }
+
+ /* Store the new configuration */
+ mutex_lock(&info->management_mutex);
+ memcpy(&(info->i2s_config), &(config->conf.i2s),
+ sizeof(config->conf.i2s));
+ info->i2s_config_known = true;
+ mutex_unlock(&info->management_mutex);
+
+ /* Send */
+ err = send_vs_port_cfg(audio_user, CG2900_MC_PORT_I2S,
+ &i2s_cfg, sizeof(i2s_cfg));
+ break;
+
+ case PORT_1_I2S_PCM:
+ i2s_pcm = &config->conf.i2s_pcm;
+
+ memset(&pcm_cfg, 0, sizeof(pcm_cfg)); /* just to be safe */
+
+ /* master/slave */
+ PORTCFG_PCM_SET_ROLE(pcm_cfg, mc_pcm_role(i2s_pcm->mode));
+
+ /* set direction for all 4 slots */
+ PORTCFG_PCM_SET_DIR(pcm_cfg, 0, i2s_pcm->slot_0_dir);
+ PORTCFG_PCM_SET_DIR(pcm_cfg, 1, i2s_pcm->slot_1_dir);
+ PORTCFG_PCM_SET_DIR(pcm_cfg, 2, i2s_pcm->slot_2_dir);
+ PORTCFG_PCM_SET_DIR(pcm_cfg, 3, i2s_pcm->slot_3_dir);
+
+ /* set used SCO slots, other use cases not supported atm */
+ PORTCFG_PCM_SET_SCO_USED(pcm_cfg, 0, i2s_pcm->slot_0_used);
+ PORTCFG_PCM_SET_SCO_USED(pcm_cfg, 1, i2s_pcm->slot_1_used);
+ PORTCFG_PCM_SET_SCO_USED(pcm_cfg, 2, i2s_pcm->slot_2_used);
+ PORTCFG_PCM_SET_SCO_USED(pcm_cfg, 3, i2s_pcm->slot_3_used);
+
+ /* slot starts */
+ pcm_cfg.slot_start[0] = i2s_pcm->slot_0_start;
+ pcm_cfg.slot_start[1] = i2s_pcm->slot_1_start;
+ pcm_cfg.slot_start[2] = i2s_pcm->slot_2_start;
+ pcm_cfg.slot_start[3] = i2s_pcm->slot_3_start;
+
+ /* audio/voice sample-rate ratio */
+ PORTCFG_PCM_SET_RATIO(pcm_cfg, i2s_pcm->ratio);
+
+ /* PCM or I2S mode */
+ PORTCFG_PCM_SET_MODE(pcm_cfg, i2s_pcm->protocol);
+
+ pcm_cfg.frame_len = i2s_pcm->duration;
+
+ PORTCFG_PCM_SET_BITCLK(pcm_cfg, i2s_pcm->clk);
+ PORTCFG_PCM_SET_SRATE(pcm_cfg,
+ mc_pcm_sample_rate(i2s_pcm->sample_rate));
+
+ /* Store the new configuration */
+ mutex_lock(&info->management_mutex);
+ memcpy(&(info->i2s_pcm_config), &(config->conf.i2s_pcm),
+ sizeof(config->conf.i2s_pcm));
+ info->i2s_pcm_config_known = true;
+ mutex_unlock(&info->management_mutex);
+
+ /* Send */
+ err = send_vs_port_cfg(audio_user, CG2900_MC_PORT_PCM_I2S,
+ &pcm_cfg, sizeof(pcm_cfg));
+ break;
+
+ default:
+ dev_err(BT_DEV, "Unknown port configuration %d\n",
+ config->port);
+ err = -EACCES;
+ };
+
+ mutex_unlock(&info->bt_mutex);
+ return err;
+}
+
+/**
+ * struct i2s_fm_stream_config_priv - Helper struct for stream i2s-fm streams.
+ * @fm_config: FM endpoint configuration.
+ * @rx: true for FM-RX, false for FM-TX.
+ */
+struct i2s_fm_stream_config_priv {
+ struct cg2900_endpoint_config_fm *fm_config;
+ bool rx;
+
+};
+
+/**
+ * config_i2s_fm_stream() - Callback for @send_vs_session_config.
+ * @info: Audio info structure.
+ * @_priv: Pointer to a @i2s_fm_stream_config_priv struct.
+ * @cfg: Pointer to stream config block in command packet.
+ *
+ * Fills in stream configuration for I2S-FM RX/TX.
+ */
+
+static void config_i2s_fm_stream(struct audio_info *info, void *_priv,
+ struct session_config_stream *cfg)
+{
+ struct i2s_fm_stream_config_priv *priv = _priv;
+ struct session_config_vport *fm;
+ struct session_config_vport *i2s;
+
+ cfg->media_type = CG2900_BT_SESSION_MEDIA_TYPE_AUDIO;
+
+ if (info->i2s_config.channel_sel == CHANNEL_SELECTION_BOTH)
+ SESSIONCFG_SET_CHANNELS(cfg, CG2900_BT_MEDIA_CONFIG_STEREO);
+ else
+ SESSIONCFG_SET_CHANNELS(cfg, CG2900_BT_MEDIA_CONFIG_MONO);
+
+ SESSIONCFG_I2S_SET_SRATE(cfg,
+ session_config_sample_rate(priv->fm_config->sample_rate));
+
+ cfg->codec_type = CG2900_CODEC_TYPE_NONE;
+ /* codec mode and parameters not used */
+
+ if (priv->rx) {
+ fm = &cfg->inport; /* FM is input */
+ i2s = &cfg->outport; /* I2S is output */
+ } else {
+ i2s = &cfg->inport; /* I2S is input */
+ fm = &cfg->outport; /* FM is output */
+ }
+
+ fm->type = CG2900_BT_VP_TYPE_FM;
+
+ i2s->type = CG2900_BT_VP_TYPE_I2S;
+ i2s->i2s.index = CG2900_BT_SESSION_I2S_INDEX_I2S;
+ i2s->i2s.channel = info->i2s_config.channel_sel;
+}
+
+/**
+ * conn_start_i2s_to_fm_rx() - Start an audio stream connecting FM RX to I2S.
+ * @audio_user: Audio user to check for.
+ * @stream_handle: [out] Pointer where to store the stream handle.
+ *
+ * This function sets up an FM RX to I2S stream.
+ * It does this by first setting the output mode and then the configuration of
+ * the External Sample Rate Converter.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -ENOMEM upon allocation errors.
+ * -EIO for other errors.
+ */
+static int conn_start_i2s_to_fm_rx(struct audio_user *audio_user,
+ unsigned int *stream_handle)
+{
+ int err = 0;
+ union cg2900_endpoint_config_union *fm_config;
+ struct audio_info *info = audio_user->info;
+
+ dev_dbg(FM_DEV, "conn_start_i2s_to_fm_rx\n");
+
+ fm_config = find_endpoint(ENDPOINT_FM_RX, &info->endpoints);
+ if (!fm_config) {
+ dev_err(FM_DEV, "FM RX not configured before stream start\n");
+ return -EIO;
+ }
+
+ if (!(info->i2s_config_known)) {
+ dev_err(FM_DEV,
+ "I2S DAI not configured before stream start\n");
+ return -EIO;
+ }
+
+ /*
+ * Use mutex to assure that only ONE command is sent at any
+ * time on each channel.
+ */
+ mutex_lock(&info->fm_mutex);
+ mutex_lock(&info->bt_mutex);
+
+ /*
+ * Now set the output mode of the External Sample Rate Converter by
+ * sending HCI_Write command with AUP_EXT_SetMode.
+ */
+ err = send_fm_write_1_param(audio_user,
+ CG2900_FM_CMD_ID_AUP_EXT_SET_MODE,
+ CG2900_FM_CMD_AUP_EXT_SET_MODE_PARALLEL);
+ if (err)
+ goto finished_unlock_mutex;
+
+ /*
+ * Now configure the External Sample Rate Converter by sending
+ * HCI_Write command with AUP_EXT_SetControl.
+ */
+ err = send_fm_write_1_param(
+ audio_user, CG2900_FM_CMD_ID_AUP_EXT_SET_CTRL,
+ fm_get_conversion(fm_config->fm.sample_rate));
+ if (err)
+ goto finished_unlock_mutex;
+
+ /* Set up the stream */
+ if (info->revision == CHIP_REV_PG1) {
+ struct i2s_fm_stream_config_priv stream_priv;
+
+ /* Now send HCI_VS_Set_Session_Configuration command */
+ stream_priv.fm_config = &fm_config->fm;
+ stream_priv.rx = true;
+ err = send_vs_session_config(audio_user, config_i2s_fm_stream,
+ &stream_priv);
+ } else {
+ struct mc_vs_port_cfg_fm fm_cfg;
+
+ memset(&fm_cfg, 0, sizeof(fm_cfg));
+
+ /* Configure port FM RX */
+ /* Expects 0-3 - same as user API - so no conversion needed */
+ PORTCFG_FM_SET_SRATE(fm_cfg, (u8)fm_config->fm.sample_rate);
+
+ err = send_vs_port_cfg(audio_user, CG2900_MC_PORT_FM_RX_1,
+ &fm_cfg, sizeof(fm_cfg));
+ if (err)
+ goto finished_unlock_mutex;
+
+ /* CreateStream */
+ err = send_vs_create_stream(audio_user,
+ CG2900_MC_PORT_FM_RX_1,
+ CG2900_MC_PORT_I2S,
+ 0); /* chip doesn't care */
+ }
+
+ if (err < 0)
+ goto finished_unlock_mutex;
+
+ /* Store the stream handle (used for start and stop stream) */
+ *stream_handle = (u8)err;
+ dev_dbg(FM_DEV, "stream_handle set to %d\n", *stream_handle);
+
+ /* Now start the stream */
+ if (info->revision == CHIP_REV_PG1)
+ err = send_vs_session_ctrl(audio_user, *stream_handle,
+ CG2900_BT_SESSION_START);
+ else
+ err = send_vs_stream_ctrl(audio_user, *stream_handle,
+ CG2900_MC_STREAM_START);
+ /*Let's delete a stream.*/
+ if (err < 0) {
+ dev_dbg(BT_DEV, "Could not start a stream.");
+ (void)send_vs_delete_stream(audio_user, *stream_handle);
+ }
+
+finished_unlock_mutex:
+ dev_dbg(FM_DEV, "New resp_state: IDLE\n");
+ audio_user->resp_state = IDLE;
+ mutex_unlock(&info->bt_mutex);
+ mutex_unlock(&info->fm_mutex);
+ return err;
+}
+
+/**
+ * conn_start_i2s_to_fm_tx() - Start an audio stream connecting FM TX to I2S.
+ * @audio_user: Audio user to check for.
+ * @stream_handle: [out] Pointer where to store the stream handle.
+ *
+ * This function sets up an I2S to FM TX stream.
+ * It does this by first setting the Audio Input source and then setting the
+ * configuration and input source of BT sample rate converter.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -ENOMEM upon allocation errors.
+ * -EIO for other errors.
+ */
+static int conn_start_i2s_to_fm_tx(struct audio_user *audio_user,
+ unsigned int *stream_handle)
+{
+ int err = 0;
+ union cg2900_endpoint_config_union *fm_config;
+ struct audio_info *info = audio_user->info;
+
+ dev_dbg(FM_DEV, "conn_start_i2s_to_fm_tx\n");
+
+ fm_config = find_endpoint(ENDPOINT_FM_TX, &info->endpoints);
+ if (!fm_config) {
+ dev_err(FM_DEV, "FM TX not configured before stream start\n");
+ return -EIO;
+ }
+
+ if (!(info->i2s_config_known)) {
+ dev_err(FM_DEV,
+ "I2S DAI not configured before stream start\n");
+ return -EIO;
+ }
+
+ /*
+ * Use mutex to assure that only ONE command is sent at any time
+ * on each channel.
+ */
+ mutex_lock(&info->fm_mutex);
+ mutex_lock(&info->bt_mutex);
+
+ /*
+ * Select Audio Input Source by sending HCI_Write command with
+ * AIP_SetMode.
+ */
+ dev_dbg(FM_DEV, "FM: AIP_SetMode\n");
+ err = send_fm_write_1_param(audio_user, CG2900_FM_CMD_ID_AIP_SET_MODE,
+ CG2900_FM_CMD_AIP_SET_MODE_INPUT_DIG);
+ if (err)
+ goto finished_unlock_mutex;
+
+ /*
+ * Now configure the BT sample rate converter by sending HCI_Write
+ * command with AIP_BT_SetControl.
+ */
+ dev_dbg(FM_DEV, "FM: AIP_BT_SetControl\n");
+ err = send_fm_write_1_param(
+ audio_user, CG2900_FM_CMD_ID_AIP_BT_SET_CTRL,
+ fm_get_conversion(fm_config->fm.sample_rate));
+ if (err)
+ goto finished_unlock_mutex;
+
+ /*
+ * Now set input of the BT sample rate converter by sending HCI_Write
+ * command with AIP_BT_SetMode.
+ */
+ dev_dbg(FM_DEV, "FM: AIP_BT_SetMode\n");
+ err = send_fm_write_1_param(audio_user,
+ CG2900_FM_CMD_ID_AIP_BT_SET_MODE,
+ CG2900_FM_CMD_AIP_BT_SET_MODE_INPUT_PAR);
+ if (err)
+ goto finished_unlock_mutex;
+
+ /* Set up the stream */
+ if (info->revision == CHIP_REV_PG1) {
+ struct i2s_fm_stream_config_priv stream_priv;
+
+ /* Now send HCI_VS_Set_Session_Configuration command */
+ stream_priv.fm_config = &fm_config->fm;
+ stream_priv.rx = false;
+ err = send_vs_session_config(audio_user, config_i2s_fm_stream,
+ &stream_priv);
+ } else {
+ struct mc_vs_port_cfg_fm fm_cfg;
+
+ memset(&fm_cfg, 0, sizeof(fm_cfg));
+
+ /* Configure port FM TX */
+ /* Expects 0-3 - same as user API - so no conversion needed */
+ PORTCFG_FM_SET_SRATE(fm_cfg, (u8)fm_config->fm.sample_rate);
+
+ err = send_vs_port_cfg(audio_user, CG2900_MC_PORT_FM_TX,
+ &fm_cfg, sizeof(fm_cfg));
+ if (err)
+ goto finished_unlock_mutex;
+
+ /* CreateStream */
+ err = send_vs_create_stream(audio_user,
+ CG2900_MC_PORT_I2S,
+ CG2900_MC_PORT_FM_TX,
+ 0); /* chip doesn't care */
+ }
+
+ if (err < 0)
+ goto finished_unlock_mutex;
+
+ /* Store the stream handle (used for start and stop stream) */
+ *stream_handle = (u8)err;
+ dev_dbg(FM_DEV, "stream_handle set to %d\n", *stream_handle);
+
+ /* Now start the stream */
+ if (info->revision == CHIP_REV_PG1)
+ err = send_vs_session_ctrl(audio_user, *stream_handle,
+ CG2900_BT_SESSION_START);
+ else
+ err = send_vs_stream_ctrl(audio_user, *stream_handle,
+ CG2900_MC_STREAM_START);
+ /* Let's delete and release stream.*/
+ if (err < 0) {
+ dev_dbg(BT_DEV, "Could not start a stream.");
+ (void)send_vs_delete_stream(audio_user, *stream_handle);
+ }
+
+finished_unlock_mutex:
+ dev_dbg(FM_DEV, "New resp_state: IDLE\n");
+ audio_user->resp_state = IDLE;
+ mutex_unlock(&info->bt_mutex);
+ mutex_unlock(&info->fm_mutex);
+ return err;
+}
+
+/**
+ * config_pcm_sco_stream() - Callback for @send_vs_session_config.
+ * @info: Audio info structure.
+ * @_priv: Pointer to a @cg2900_endpoint_config_sco_in_out struct.
+ * @cfg: Pointer to stream config block in command packet.
+ *
+ * Fills in stream configuration for PCM-SCO.
+ */
+static void config_pcm_sco_stream(struct audio_info *info, void *_priv,
+ struct session_config_stream *cfg)
+{
+ struct cg2900_endpoint_config_sco_in_out *sco_ep = _priv;
+
+ cfg->media_type = CG2900_BT_SESSION_MEDIA_TYPE_AUDIO;
+
+ SESSIONCFG_SET_CHANNELS(cfg, CG2900_BT_MEDIA_CONFIG_MONO);
+ SESSIONCFG_I2S_SET_SRATE(cfg,
+ session_config_sample_rate(sco_ep->sample_rate));
+
+ cfg->codec_type = CG2900_CODEC_TYPE_NONE;
+ /* codec mode and parameters not used */
+
+ cfg->inport.type = CG2900_BT_VP_TYPE_BT_SCO;
+ cfg->inport.sco.acl_handle = cpu_to_le16(DEFAULT_ACL_HANDLE);
+
+ cfg->outport.type = CG2900_BT_VP_TYPE_PCM;
+ cfg->outport.pcm.index = CG2900_BT_SESSION_PCM_INDEX_PCM_I2S;
+
+ SESSIONCFG_PCM_SET_USED(cfg->outport, 0,
+ info->i2s_pcm_config.slot_0_used);
+ SESSIONCFG_PCM_SET_USED(cfg->outport, 1,
+ info->i2s_pcm_config.slot_1_used);
+ SESSIONCFG_PCM_SET_USED(cfg->outport, 2,
+ info->i2s_pcm_config.slot_2_used);
+ SESSIONCFG_PCM_SET_USED(cfg->outport, 3,
+ info->i2s_pcm_config.slot_3_used);
+
+ cfg->outport.pcm.slot_start[0] =
+ info->i2s_pcm_config.slot_0_start;
+ cfg->outport.pcm.slot_start[1] =
+ info->i2s_pcm_config.slot_1_start;
+ cfg->outport.pcm.slot_start[2] =
+ info->i2s_pcm_config.slot_2_start;
+ cfg->outport.pcm.slot_start[3] =
+ info->i2s_pcm_config.slot_3_start;
+}
+
+/**
+ * conn_start_pcm_to_sco() - Start an audio stream connecting Bluetooth (e)SCO to PCM_I2S.
+ * @audio_user: Audio user to check for.
+ * @stream_handle: [out] Pointer where to store the stream handle.
+ *
+ * This function sets up a BT to_from PCM_I2S stream. It does this by
+ * first setting the Session configuration and then starting the Audio
+ * Stream.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -ENOMEM upon allocation errors.
+ * Errors from @cg2900_write
+ * -EIO for other errors.
+ */
+static int conn_start_pcm_to_sco(struct audio_user *audio_user,
+ unsigned int *stream_handle)
+{
+ int err = 0;
+ union cg2900_endpoint_config_union *bt_config;
+ struct audio_info *info = audio_user->info;
+
+ dev_dbg(BT_DEV, "conn_start_pcm_to_sco\n");
+
+ bt_config = find_endpoint(ENDPOINT_BT_SCO_INOUT, &info->endpoints);
+ if (!bt_config) {
+ dev_err(BT_DEV, "BT not configured before stream start\n");
+ return -EIO;
+ }
+
+ if (!(info->i2s_pcm_config_known)) {
+ dev_err(BT_DEV,
+ "I2S_PCM DAI not configured before stream start\n");
+ return -EIO;
+ }
+
+ /*
+ * Use mutex to assure that only ONE command is sent at any time on each
+ * channel.
+ */
+ mutex_lock(&info->bt_mutex);
+
+ /* Set up the stream */
+ if (info->revision == CHIP_REV_PG1) {
+ err = send_vs_session_config(audio_user, config_pcm_sco_stream,
+ &bt_config->sco);
+ } else {
+ struct mc_vs_port_cfg_sco sco_cfg;
+
+ /* zero codec params etc */
+ memset(&sco_cfg, 0, sizeof(sco_cfg));
+ sco_cfg.acl_id = DEFAULT_ACL_HANDLE;
+ PORTCFG_SCO_SET_CODEC(sco_cfg, CG2900_CODEC_TYPE_NONE);
+
+ err = send_vs_port_cfg(audio_user, CG2900_MC_PORT_BT_SCO,
+ &sco_cfg, sizeof(sco_cfg));
+ if (err)
+ goto finished_unlock_mutex;
+
+ /* CreateStream */
+ err = send_vs_create_stream(audio_user,
+ CG2900_MC_PORT_PCM_I2S,
+ CG2900_MC_PORT_BT_SCO,
+ 0); /* chip doesn't care */
+ }
+
+ if (err < 0)
+ goto finished_unlock_mutex;
+
+ /* Store the stream handle (used for start and stop stream) */
+ *stream_handle = (u8)err;
+ dev_dbg(BT_DEV, "stream_handle set to %d\n", *stream_handle);
+
+ /* Now start the stream */
+ if (info->revision == CHIP_REV_PG1)
+ err = send_vs_session_ctrl(audio_user, *stream_handle,
+ CG2900_BT_SESSION_START);
+ else
+ err = send_vs_stream_ctrl(audio_user, *stream_handle,
+ CG2900_MC_STREAM_START);
+ /* Let's delete and release stream.*/
+ if (err < 0) {
+ dev_dbg(BT_DEV, "Could not start a stream.");
+ (void)send_vs_delete_stream(audio_user, *stream_handle);
+ }
+
+finished_unlock_mutex:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ audio_user->resp_state = IDLE;
+ mutex_unlock(&info->bt_mutex);
+ return err;
+}
+
+/**
+ * conn_stop_stream() - Stops an audio stream defined by @stream_handle.
+ * @audio_user: Audio user to check for.
+ * @stream_handle: Handle of the audio stream.
+ *
+ * This function is used to stop an audio stream defined by a stream
+ * handle. It does this by first stopping the stream and then
+ * resetting the session/stream.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ECOMM if no response was received.
+ * -ENOMEM upon allocation errors.
+ * Errors from @cg2900_write.
+ * -EIO for other errors.
+ */
+static int conn_stop_stream(struct audio_user *audio_user,
+ unsigned int stream_handle)
+{
+ int err = 0;
+ struct audio_info *info = audio_user->info;
+
+ dev_dbg(BT_DEV, "conn_stop_stream handle %d\n", stream_handle);
+
+ /*
+ * Use mutex to assure that only ONE command is sent at any
+ * time on each channel.
+ */
+ mutex_lock(&info->bt_mutex);
+
+ /* Now stop the stream */
+ if (info->revision == CHIP_REV_PG1)
+ err = send_vs_session_ctrl(audio_user, stream_handle,
+ CG2900_BT_SESSION_STOP);
+ else
+ err = send_vs_stream_ctrl(audio_user, stream_handle,
+ CG2900_MC_STREAM_STOP);
+ if (err)
+ goto finished_unlock_mutex;
+
+ err = send_vs_delete_stream(audio_user, stream_handle);
+
+finished_unlock_mutex:
+ dev_dbg(BT_DEV, "New resp_state: IDLE\n");
+ audio_user->resp_state = IDLE;
+ mutex_unlock(&info->bt_mutex);
+ return err;
+}
+
+/**
+ * cg2900_audio_get_devices() - Returns connected CG2900 Audio devices.
+ * @devices: Array of CG2900 Audio devices.
+ * @size: Max number of devices in array.
+ *
+ * Returns:
+ * 0 if no devices exist.
+ * > 0 is the number of devices inserted in the list.
+ * -EINVAL upon bad input parameter.
+ */
+int cg2900_audio_get_devices(struct device *devices[], __u8 size)
+{
+ struct list_head *cursor;
+ struct audio_info *tmp;
+ int i = 0;
+
+ if (!size) {
+ pr_err("No space to insert devices into list\n");
+ return 0;
+ }
+
+ if (!devices) {
+ pr_err("NULL submitted as devices array\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Go through and store the devices. If NULL is supplied for dev
+ * just return first device found.
+ */
+ list_for_each(cursor, &cg2900_audio_devices) {
+ tmp = list_entry(cursor, struct audio_info, list);
+ devices[i] = tmp->parent;
+ i++;
+ if (i == size)
+ break;
+ }
+ return i;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_get_devices);
+
+/**
+ * cg2900_audio_open() - Opens a session to the ST-Ericsson CG2900 Audio control interface.
+ * @session: [out] Address where to store the session identifier.
+ * Allocated by caller, must not be NULL.
+ * @parent: Parent device representing the CG2900 controller connected.
+ * If NULL is supplied the first available device is used.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EACCES if no info structure can be found.
+ * -EINVAL upon bad input parameter.
+ * -ENOMEM upon allocation failure.
+ * -EMFILE if no more user session could be opened.
+ * -EIO upon failure to register to CG2900.
+ * Error codes from get_info.
+ */
+int cg2900_audio_open(unsigned int *session, struct device *parent)
+{
+ int err = 0;
+ int i;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data_bt;
+ struct cg2900_user_data *pf_data_fm;
+
+ pr_debug("cg2900_audio_open");
+
+ info = get_info(parent);
+ if (!info) {
+ pr_err("No audio info exist");
+ return -EACCES;
+ } else if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (!session) {
+ pr_err("NULL supplied as session");
+ return -EINVAL;
+ }
+
+ mutex_lock(&info->management_mutex);
+
+ *session = 0;
+
+ /*
+ * First find a free session to use and allocate the session structure.
+ */
+ for (i = FIRST_USER;
+ i < MAX_NBR_OF_USERS && cg2900_audio_sessions[i];
+ i++)
+ ; /* Just loop until found or end reached */
+
+ if (i >= MAX_NBR_OF_USERS) {
+ pr_err("Couldn't find free user");
+ err = -EMFILE;
+ goto finished;
+ }
+
+ cg2900_audio_sessions[i] =
+ kzalloc(sizeof(*(cg2900_audio_sessions[0])), GFP_KERNEL);
+ if (!cg2900_audio_sessions[i]) {
+ pr_err("Could not allocate user");
+ err = -ENOMEM;
+ goto finished;
+ }
+ pr_debug("Found free session %d", i);
+ *session = i;
+ info->nbr_of_users_active++;
+
+ cg2900_audio_sessions[*session]->resp_state = IDLE;
+ cg2900_audio_sessions[*session]->session = *session;
+ cg2900_audio_sessions[*session]->info = info;
+
+ pf_data_bt = dev_get_platdata(info->dev_bt);
+ pf_data_fm = dev_get_platdata(info->dev_fm);
+
+ if (info->nbr_of_users_active == 1) {
+ struct cg2900_rev_data rev_data;
+
+ /*
+ * First user so register to CG2900 Core.
+ * First the BT audio device.
+ */
+ err = pf_data_bt->open(pf_data_bt);
+ if (err) {
+ dev_err(BT_DEV, "Failed to open BT audio channel\n");
+ goto error_handling;
+ }
+
+ /* Then the FM audio device */
+ err = pf_data_fm->open(pf_data_fm);
+ if (err) {
+ dev_err(FM_DEV, "Failed to open FM audio channel\n");
+ goto error_handling;
+ }
+
+ /* Read chip revision data */
+ if (!pf_data_bt->get_local_revision(pf_data_bt, &rev_data)) {
+ pr_err("Couldn't retrieve revision data");
+ err = -EIO;
+ goto error_handling;
+ }
+
+ /* Decode revision data */
+ switch (rev_data.revision) {
+ case CG2900_PG1_REV:
+ case CG2900_PG1_SPECIAL_REV:
+ info->revision = CHIP_REV_PG1;
+ break;
+
+ case CG2900_PG2_REV:
+ info->revision = CHIP_REV_PG2;
+ break;
+
+ default:
+ pr_err("Chip rev 0x%04X sub 0x%04X not supported",
+ rev_data.revision, rev_data.sub_version);
+ err = -EIO;
+ goto error_handling;
+ }
+
+ info->state = OPENED;
+ }
+
+ pr_info("Session %d opened", *session);
+
+ goto finished;
+
+error_handling:
+ if (pf_data_fm->opened)
+ pf_data_fm->close(pf_data_fm);
+ if (pf_data_bt->opened)
+ pf_data_bt->close(pf_data_bt);
+ info->nbr_of_users_active--;
+ kfree(cg2900_audio_sessions[*session]);
+ cg2900_audio_sessions[*session] = NULL;
+finished:
+ mutex_unlock(&info->management_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_open);
+
+/**
+ * cg2900_audio_close() - Closes an opened session to the ST-Ericsson CG2900 audio control interface.
+ * @session: [in_out] Pointer to session identifier to close.
+ * Will be 0 after this call.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL upon bad input parameter.
+ * -EIO if driver has not been opened.
+ * -EACCES if session has not opened.
+ */
+int cg2900_audio_close(unsigned int *session)
+{
+ int err = 0;
+ struct audio_user *audio_user;
+ struct audio_info *info;
+ struct cg2900_user_data *pf_data_bt;
+ struct cg2900_user_data *pf_data_fm;
+
+ pr_debug("cg2900_audio_close");
+
+ if (!session) {
+ pr_err("NULL pointer supplied");
+ return -EINVAL;
+ }
+
+ audio_user = get_session_user(*session);
+ if (!audio_user) {
+ pr_err("Invalid session ID");
+ return -EINVAL;
+ }
+
+ info = audio_user->info;
+
+ if (info->state != OPENED) {
+ dev_err(BT_DEV, "Audio driver not open\n");
+ return -EIO;
+ }
+
+ mutex_lock(&info->management_mutex);
+
+ pf_data_bt = dev_get_platdata(info->dev_bt);
+ pf_data_fm = dev_get_platdata(info->dev_fm);
+
+ if (!cg2900_audio_sessions[*session]) {
+ dev_err(BT_DEV, "Session %d not opened\n", *session);
+ err = -EACCES;
+ goto err_unlock_mutex;
+ }
+
+ kfree(cg2900_audio_sessions[*session]);
+ cg2900_audio_sessions[*session] = NULL;
+
+ info->nbr_of_users_active--;
+ if (info->nbr_of_users_active == 0) {
+ /* No more sessions open. Close channels */
+ pf_data_fm->close(pf_data_fm);
+ pf_data_bt->close(pf_data_bt);
+ info->state = CLOSED;
+ }
+
+ dev_info(BT_DEV, "Session %d closed\n", *session);
+
+ *session = 0;
+
+err_unlock_mutex:
+ mutex_unlock(&info->management_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_close);
+
+/**
+ * cg2900_audio_set_dai_config() - Sets the Digital Audio Interface configuration.
+ * @session: Session identifier this call is related to.
+ * @config: Pointer to the configuration to set.
+ * Allocated by caller, must not be NULL.
+ *
+ * Sets the Digital Audio Interface (DAI) configuration. The DAI is the external
+ * interface between the combo chip and the platform.
+ * For example the PCM or I2S interface.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL upon bad input parameter.
+ * -EIO if driver has not been opened.
+ * -ENOMEM upon allocation failure.
+ * -EACCES if trying to set unsupported configuration.
+ * Errors from @receive_bt_cmd_complete.
+ */
+int cg2900_audio_set_dai_config(unsigned int session,
+ struct cg2900_dai_config *config)
+{
+ int err = 0;
+ struct audio_user *audio_user;
+ struct audio_info *info;
+
+ pr_debug("cg2900_audio_set_dai_config session %d", session);
+
+ audio_user = get_session_user(session);
+ if (!audio_user)
+ return -EINVAL;
+
+ info = audio_user->info;
+
+ if (info->state != OPENED) {
+ dev_err(BT_DEV, "Audio driver not open\n");
+ return -EIO;
+ }
+
+ /* Different commands are used for PG1 and PG2 */
+ if (info->revision == CHIP_REV_PG1)
+ err = set_dai_config_pg1(audio_user, config);
+ else if (info->revision == CHIP_REV_PG2)
+ err = set_dai_config_pg2(audio_user, config);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_set_dai_config);
+
+/**
+ * cg2900_audio_get_dai_config() - Gets the current Digital Audio Interface configuration.
+ * @session: Session identifier this call is related to.
+ * @config: [out] Pointer to the configuration to get.
+ * Allocated by caller, must not be NULL.
+ *
+ * Gets the current Digital Audio Interface configuration. Currently this method
+ * can only be called after some one has called
+ * cg2900_audio_set_dai_config(), there is today no way of getting
+ * the static settings file parameters from this method.
+ * Note that the @port parameter within @config must be set when calling this
+ * function so that the ST-Ericsson CG2900 Audio driver will know which
+ * configuration to return.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL upon bad input parameter.
+ * -EIO if driver has not been opened or configuration has not been set.
+ */
+int cg2900_audio_get_dai_config(unsigned int session,
+ struct cg2900_dai_config *config)
+{
+ int err = 0;
+ struct audio_user *audio_user;
+ struct audio_info *info;
+
+ pr_debug("cg2900_audio_get_dai_config session %d", session);
+
+ if (!config) {
+ pr_err("NULL supplied as config structure");
+ return -EINVAL;
+ }
+
+ audio_user = get_session_user(session);
+ if (!audio_user)
+ return -EINVAL;
+
+ info = audio_user->info;
+
+ if (info->state != OPENED) {
+ dev_err(BT_DEV, "Audio driver not open\n");
+ return -EIO;
+ }
+
+ /*
+ * Return DAI configuration based on the received port.
+ * If port has not been configured return error.
+ */
+ switch (config->port) {
+ case PORT_0_I2S:
+ mutex_lock(&info->management_mutex);
+ if (info->i2s_config_known)
+ memcpy(&config->conf.i2s,
+ &info->i2s_config,
+ sizeof(config->conf.i2s));
+ else
+ err = -EIO;
+ mutex_unlock(&info->management_mutex);
+ break;
+
+ case PORT_1_I2S_PCM:
+ mutex_lock(&info->management_mutex);
+ if (info->i2s_pcm_config_known)
+ memcpy(&config->conf.i2s_pcm,
+ &info->i2s_pcm_config,
+ sizeof(config->conf.i2s_pcm));
+ else
+ err = -EIO;
+ mutex_unlock(&info->management_mutex);
+ break;
+
+ default:
+ dev_err(BT_DEV, "Unknown port configuration %d\n",
+ config->port);
+ err = -EIO;
+ break;
+ };
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_get_dai_config);
+
+/**
+ * cg2900_audio_config_endpoint() - Configures one endpoint in the combo chip's audio system.
+ * @session: Session identifier this call is related to.
+ * @config: Pointer to the endpoint's configuration structure.
+ *
+ * Configures one endpoint in the combo chip's audio system.
+ * Supported @endpoint_id values are:
+ * * ENDPOINT_BT_SCO_INOUT
+ * * ENDPOINT_BT_A2DP_SRC
+ * * ENDPOINT_FM_RX
+ * * ENDPOINT_FM_TX
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL upon bad input parameter.
+ * -EIO if driver has not been opened.
+ * -EACCES if supplied cg2900_dai_config struct contains not supported
+ * endpoint_id.
+ */
+int cg2900_audio_config_endpoint(unsigned int session,
+ struct cg2900_endpoint_config *config)
+{
+ struct audio_user *audio_user;
+ struct audio_info *info;
+
+ pr_debug("cg2900_audio_config_endpoint\n");
+
+ if (!config) {
+ pr_err("NULL supplied as configuration structure");
+ return -EINVAL;
+ }
+
+ audio_user = get_session_user(session);
+ if (!audio_user)
+ return -EINVAL;
+
+ info = audio_user->info;
+
+ if (info->state != OPENED) {
+ dev_err(BT_DEV, "Audio driver not open\n");
+ return -EIO;
+ }
+
+ switch (config->endpoint_id) {
+ case ENDPOINT_BT_SCO_INOUT:
+ case ENDPOINT_BT_A2DP_SRC:
+ case ENDPOINT_FM_RX:
+ case ENDPOINT_FM_TX:
+ add_endpoint(config, &info->endpoints);
+ break;
+
+ case ENDPOINT_PORT_0_I2S:
+ case ENDPOINT_PORT_1_I2S_PCM:
+ case ENDPOINT_SLIMBUS_VOICE:
+ case ENDPOINT_SLIMBUS_AUDIO:
+ case ENDPOINT_BT_A2DP_SNK:
+ case ENDPOINT_ANALOG_OUT:
+ case ENDPOINT_DSP_AUDIO_IN:
+ case ENDPOINT_DSP_AUDIO_OUT:
+ case ENDPOINT_DSP_VOICE_IN:
+ case ENDPOINT_DSP_VOICE_OUT:
+ case ENDPOINT_DSP_TONE_IN:
+ case ENDPOINT_BURST_BUFFER_IN:
+ case ENDPOINT_BURST_BUFFER_OUT:
+ case ENDPOINT_MUSIC_DECODER:
+ case ENDPOINT_HCI_AUDIO_IN:
+ default:
+ dev_err(BT_DEV, "Unsupported endpoint_id %d\n",
+ config->endpoint_id);
+ return -EACCES;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_config_endpoint);
+
+static bool is_dai_port(enum cg2900_audio_endpoint_id ep)
+{
+ /* These are the only supported ones */
+ return (ep == ENDPOINT_PORT_0_I2S) || (ep == ENDPOINT_PORT_1_I2S_PCM);
+}
+
+/**
+ * cg2900_audio_start_stream() - Connects two endpoints and starts the audio stream.
+ * @session: Session identifier this call is related to.
+ * @ep_1: One of the endpoints, no relation to direction or role.
+ * @ep_2: The other endpoint, no relation to direction or role.
+ * @stream_handle: Pointer where to store the stream handle.
+ * Allocated by caller, must not be NULL.
+ *
+ * Connects two endpoints and starts the audio stream.
+ * Note that the endpoints need to be configured before the stream is started;
+ * DAI endpoints, such as ENDPOINT_PORT_0_I2S, are
+ * configured through @cg2900_audio_set_dai_config() while other
+ * endpoints are configured through @cg2900_audio_config_endpoint().
+ *
+ * Supported @endpoint_id values are:
+ * * ENDPOINT_PORT_0_I2S
+ * * ENDPOINT_PORT_1_I2S_PCM
+ * * ENDPOINT_BT_SCO_INOUT
+ * * ENDPOINT_FM_RX
+ * * ENDPOINT_FM_TX
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL upon bad input parameter or unsupported configuration.
+ * -EIO if driver has not been opened.
+ * Errors from @conn_start_i2s_to_fm_rx, @conn_start_i2s_to_fm_tx, and
+ * @conn_start_pcm_to_sco.
+ */
+int cg2900_audio_start_stream(unsigned int session,
+ enum cg2900_audio_endpoint_id ep_1,
+ enum cg2900_audio_endpoint_id ep_2,
+ unsigned int *stream_handle)
+{
+ int err;
+ struct audio_user *audio_user;
+ struct audio_info *info;
+
+ pr_debug("cg2900_audio_start_stream session %d ep_1 %d ep_2 %d",
+ session, ep_1, ep_2);
+
+ audio_user = get_session_user(session);
+ if (!audio_user)
+ return -EINVAL;
+
+ info = audio_user->info;
+
+ if (info->state != OPENED) {
+ dev_err(BT_DEV, "Audio driver not open\n");
+ return -EIO;
+ }
+
+ /* Put digital interface in ep_1 to simplify comparison below */
+ if (!is_dai_port(ep_1)) {
+ /* Swap endpoints */
+ enum cg2900_audio_endpoint_id t = ep_1;
+ ep_1 = ep_2;
+ ep_2 = t;
+ }
+
+ if (ep_1 == ENDPOINT_PORT_1_I2S_PCM && ep_2 == ENDPOINT_BT_SCO_INOUT) {
+ err = conn_start_pcm_to_sco(audio_user, stream_handle);
+ } else if (ep_1 == ENDPOINT_PORT_0_I2S && ep_2 == ENDPOINT_FM_RX) {
+ err = conn_start_i2s_to_fm_rx(audio_user, stream_handle);
+ } else if (ep_1 == ENDPOINT_PORT_0_I2S && ep_2 == ENDPOINT_FM_TX) {
+ err = conn_start_i2s_to_fm_tx(audio_user, stream_handle);
+ } else {
+ dev_err(BT_DEV, "Endpoint config not handled: ep1: %d, "
+ "ep2: %d\n", ep_1, ep_2);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_start_stream);
+
+/**
+ * cg2900_audio_stop_stream() - Stops a stream and disconnects the endpoints.
+ * @session: Session identifier this call is related to.
+ * @stream_handle: Handle to the stream to stop.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL upon bad input parameter.
+ * -EIO if driver has not been opened.
+ */
+int cg2900_audio_stop_stream(unsigned int session, unsigned int stream_handle)
+{
+ struct audio_user *audio_user;
+ struct audio_info *info;
+
+ pr_debug("cg2900_audio_stop_stream handle %d", stream_handle);
+
+ audio_user = get_session_user(session);
+ if (!audio_user)
+ return -EINVAL;
+
+ info = audio_user->info;
+
+ if (info->state != OPENED) {
+ dev_err(BT_DEV, "Audio driver not open\n");
+ return -EIO;
+ }
+
+ return conn_stop_stream(audio_user, stream_handle);
+}
+EXPORT_SYMBOL_GPL(cg2900_audio_stop_stream);
+
+/**
+ * audio_dev_open() - Open char device.
+ * @inode: Device driver information.
+ * @filp: Pointer to the file struct.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if allocation failed.
+ * Errors from @cg2900_audio_open.
+ */
+static int audio_dev_open(struct inode *inode, struct file *filp)
+{
+ int err;
+ struct char_dev_info *char_dev_info;
+ int minor;
+ struct audio_info *info = NULL;
+ struct audio_info *tmp;
+ struct list_head *cursor;
+
+ pr_debug("audio_dev_open");
+
+ minor = iminor(inode);
+
+ /* Find the info struct for this file */
+ list_for_each(cursor, &cg2900_audio_devices) {
+ tmp = list_entry(cursor, struct audio_info, list);
+ if (tmp->misc_dev.minor == minor) {
+ info = tmp;
+ break;
+ }
+ }
+ if (!info) {
+ pr_err("Could not identify device in inode");
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate the char dev info structure. It will be stored inside
+ * the file pointer and supplied when file_ops are called.
+ * It's free'd in audio_dev_release.
+ */
+ char_dev_info = kzalloc(sizeof(*char_dev_info), GFP_KERNEL);
+ if (!char_dev_info) {
+ dev_err(BT_DEV, "Couldn't allocate char_dev_info\n");
+ return -ENOMEM;
+ }
+ filp->private_data = char_dev_info;
+ char_dev_info->info = info;
+ info->filp = filp;
+
+ mutex_init(&char_dev_info->management_mutex);
+ mutex_init(&char_dev_info->rw_mutex);
+ skb_queue_head_init(&char_dev_info->rx_queue);
+
+ mutex_lock(&char_dev_info->management_mutex);
+ err = cg2900_audio_open(&char_dev_info->session, info->dev_bt->parent);
+ mutex_unlock(&char_dev_info->management_mutex);
+ if (err) {
+ dev_err(BT_DEV, "Failed to open CG2900 Audio driver (%d)\n",
+ err);
+ goto error_handling_free_mem;
+ }
+
+ return 0;
+
+error_handling_free_mem:
+ kfree(char_dev_info);
+ filp->private_data = NULL;
+ return err;
+}
+
+/**
+ * audio_dev_release() - Release char device.
+ * @inode: Device driver information.
+ * @filp: Pointer to the file struct.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EBADF if NULL pointer was supplied in private data.
+ * Errors from @cg2900_audio_close.
+ */
+static int audio_dev_release(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ struct char_dev_info *dev = filp->private_data;
+ struct audio_info *info;
+
+ if (!dev) {
+ pr_err("audio_dev_release: Transport closed");
+ return -EBADF;
+ }
+
+ info = dev->info;
+
+ dev_dbg(BT_DEV, "audio_dev_release\n");
+
+ mutex_lock(&dev->management_mutex);
+ err = cg2900_audio_close(&dev->session);
+ if (err)
+ /*
+ * Just print the error. Still free the char_dev_info since we
+ * don't know the filp structure is valid after this call
+ */
+ dev_err(BT_DEV, "Error %d when closing CG2900 audio driver\n",
+ err);
+
+ mutex_unlock(&dev->management_mutex);
+
+ kfree(dev);
+ filp->private_data = NULL;
+ info->filp = NULL;
+
+ return err;
+}
+
+/**
+ * audio_dev_read() - Return information to the user from last @write call.
+ * @filp: Pointer to the file struct.
+ * @buf: Received buffer.
+ * @count: Size of buffer.
+ * @f_pos: Position in buffer.
+ *
+ * The audio_dev_read() function returns information from
+ * the last @write call to same char device.
+ * The data is in the following format:
+ * * OpCode of command for this data
+ * * Data content (Length of data is determined by the command OpCode, i.e.
+ * fixed for each command)
+ *
+ * Returns:
+ * Bytes successfully read (could be 0).
+ * -EBADF if NULL pointer was supplied in private data.
+ * -EFAULT if copy_to_user fails.
+ * -ENOMEM upon allocation failure.
+ */
+static ssize_t audio_dev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct char_dev_info *dev = filp->private_data;
+ struct audio_info *info;
+ unsigned int bytes_to_copy;
+ int err = 0;
+ struct sk_buff *skb;
+
+ if (!dev) {
+ pr_err("audio_dev_read: Transport closed");
+ return -EBADF;
+ }
+
+ info = dev->info;
+
+ dev_dbg(BT_DEV, "audio_dev_read count %d\n", count);
+
+ mutex_lock(&dev->rw_mutex);
+
+ skb = skb_dequeue(&dev->rx_queue);
+ if (!skb) {
+ /* No data to read */
+ bytes_to_copy = 0;
+ goto finished;
+ }
+
+ bytes_to_copy = min(count, (unsigned int)(skb->len));
+
+ err = copy_to_user(buf, skb->data, bytes_to_copy);
+ if (err) {
+ dev_err(BT_DEV, "copy_to_user error %d\n", err);
+ skb_queue_head(&dev->rx_queue, skb);
+ err = -EFAULT;
+ goto error_handling;
+ }
+
+ skb_pull(skb, bytes_to_copy);
+
+ if (skb->len > 0)
+ skb_queue_head(&dev->rx_queue, skb);
+ else
+ kfree_skb(skb);
+
+ goto finished;
+
+error_handling:
+ mutex_unlock(&dev->rw_mutex);
+ return (ssize_t)err;
+finished:
+ mutex_unlock(&dev->rw_mutex);
+ return bytes_to_copy;
+}
+
+/**
+ * audio_dev_write() - Call CG2900 Audio API function.
+ * @filp: Pointer to the file struct.
+ * @buf: Write buffer.
+ * @count: Size of the buffer write.
+ * @f_pos: Position of buffer.
+ *
+ * audio_dev_write() function executes supplied data and
+ * interprets it as if it was a function call to the CG2900 Audio API.
+ * The data is according to:
+ * * OpCode (4 bytes, see API).
+ * * Data according to OpCode (see API). No padding between parameters.
+ *
+ * Returns:
+ * Bytes successfully written (could be 0). Equals input @count if successful.
+ * -EBADF if NULL pointer was supplied in private data.
+ * -EFAULT if copy_from_user fails.
+ * Error codes from all CG2900 Audio API functions.
+ */
+static ssize_t audio_dev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ u8 *rec_data;
+ struct char_dev_info *dev = filp->private_data;
+ struct audio_info *info;
+ int err = 0;
+ int op_code = 0;
+ u8 *curr_data;
+ unsigned int stream_handle;
+ struct cg2900_dai_config dai_config;
+ struct cg2900_endpoint_config ep_config;
+ enum cg2900_audio_endpoint_id ep_1;
+ enum cg2900_audio_endpoint_id ep_2;
+ int bytes_left = count;
+
+ pr_debug("audio_dev_write count %d", count);
+
+ if (!dev) {
+ pr_err("audio_dev_write: Transport closed");
+ return -EBADF;
+ }
+ info = dev->info;
+
+ rec_data = kmalloc(count, GFP_KERNEL);
+ if (!rec_data) {
+ dev_err(BT_DEV, "kmalloc failed (%d bytes)\n", count);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->rw_mutex);
+
+ err = copy_from_user(rec_data, buf, count);
+ if (err) {
+ dev_err(BT_DEV, "copy_from_user failed (%d)\n", err);
+ err = -EFAULT;
+ goto finished_mutex_unlock;
+ }
+
+ /* Initialize temporary data pointer used to traverse the packet */
+ curr_data = rec_data;
+
+ op_code = curr_data[0];
+ /* OpCode is int size to keep data int aligned */
+ curr_data += sizeof(unsigned int);
+ bytes_left -= sizeof(unsigned int);
+
+ switch (op_code) {
+ case CG2900_OPCODE_SET_DAI_CONF:
+ if (bytes_left < sizeof(dai_config)) {
+ dev_err(BT_DEV, "Not enough data supplied for "
+ "CG2900_OPCODE_SET_DAI_CONF\n");
+ err = -EINVAL;
+ goto finished_mutex_unlock;
+ }
+ memcpy(&dai_config, curr_data, sizeof(dai_config));
+ dev_dbg(BT_DEV, "CG2900_OPCODE_SET_DAI_CONF port %d\n",
+ dai_config.port);
+ err = cg2900_audio_set_dai_config(dev->session, &dai_config);
+ break;
+
+ case CG2900_OPCODE_GET_DAI_CONF:
+ if (bytes_left < sizeof(dai_config)) {
+ dev_err(BT_DEV, "Not enough data supplied for "
+ "CG2900_OPCODE_GET_DAI_CONF\n");
+ err = -EINVAL;
+ goto finished_mutex_unlock;
+ }
+ /*
+ * Only need to copy the port really, but let's copy
+ * like this for simplicity. It's only test functionality
+ * after all.
+ */
+ memcpy(&dai_config, curr_data, sizeof(dai_config));
+ dev_dbg(BT_DEV, "CG2900_OPCODE_GET_DAI_CONF port %d\n",
+ dai_config.port);
+ err = cg2900_audio_get_dai_config(dev->session, &dai_config);
+ if (!err) {
+ int len;
+ struct sk_buff *skb;
+
+ /*
+ * Command succeeded. Store data so it can be returned
+ * when calling read.
+ */
+ len = sizeof(op_code) + sizeof(dai_config);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "CG2900_OPCODE_GET_DAI_CONF: "
+ "Could not allocate skb\n");
+ err = -ENOMEM;
+ goto finished_mutex_unlock;
+ }
+ memcpy(skb_put(skb, sizeof(op_code)), &op_code,
+ sizeof(op_code));
+ memcpy(skb_put(skb, sizeof(dai_config)),
+ &dai_config, sizeof(dai_config));
+ skb_queue_tail(&dev->rx_queue, skb);
+ }
+ break;
+
+ case CG2900_OPCODE_CONFIGURE_ENDPOINT:
+ if (bytes_left < sizeof(ep_config)) {
+ dev_err(BT_DEV, "Not enough data supplied for "
+ "CG2900_OPCODE_CONFIGURE_ENDPOINT\n");
+ err = -EINVAL;
+ goto finished_mutex_unlock;
+ }
+ memcpy(&ep_config, curr_data, sizeof(ep_config));
+ dev_dbg(BT_DEV, "CG2900_OPCODE_CONFIGURE_ENDPOINT ep_id %d\n",
+ ep_config.endpoint_id);
+ err = cg2900_audio_config_endpoint(dev->session, &ep_config);
+ break;
+
+ case CG2900_OPCODE_START_STREAM:
+ if (bytes_left < (sizeof(ep_1) + sizeof(ep_2))) {
+ dev_err(BT_DEV, "Not enough data supplied for "
+ "CG2900_OPCODE_START_STREAM\n");
+ err = -EINVAL;
+ goto finished_mutex_unlock;
+ }
+ memcpy(&ep_1, curr_data, sizeof(ep_1));
+ curr_data += sizeof(ep_1);
+ memcpy(&ep_2, curr_data, sizeof(ep_2));
+ dev_dbg(BT_DEV, "CG2900_OPCODE_START_STREAM ep_1 %d ep_2 %d\n",
+ ep_1, ep_2);
+
+ err = cg2900_audio_start_stream(dev->session,
+ ep_1, ep_2, &stream_handle);
+ if (!err) {
+ int len;
+ struct sk_buff *skb;
+
+ /*
+ * Command succeeded. Store data so it can be returned
+ * when calling read.
+ */
+ len = sizeof(op_code) + sizeof(stream_handle);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+ dev_err(BT_DEV, "CG2900_OPCODE_START_STREAM: "
+ "Could not allocate skb\n");
+ err = -ENOMEM;
+ goto finished_mutex_unlock;
+ }
+ memcpy(skb_put(skb, sizeof(op_code)), &op_code,
+ sizeof(op_code));
+ memcpy(skb_put(skb, sizeof(stream_handle)),
+ &stream_handle, sizeof(stream_handle));
+ skb_queue_tail(&dev->rx_queue, skb);
+
+ dev_dbg(BT_DEV, "stream_handle %d\n", stream_handle);
+ }
+ break;
+
+ case CG2900_OPCODE_STOP_STREAM:
+ if (bytes_left < sizeof(stream_handle)) {
+ dev_err(BT_DEV, "Not enough data supplied for "
+ "CG2900_OPCODE_STOP_STREAM\n");
+ err = -EINVAL;
+ goto finished_mutex_unlock;
+ }
+ memcpy(&stream_handle, curr_data, sizeof(stream_handle));
+ dev_dbg(BT_DEV, "CG2900_OPCODE_STOP_STREAM stream_handle %d\n",
+ stream_handle);
+ err = cg2900_audio_stop_stream(dev->session, stream_handle);
+ break;
+
+ default:
+ dev_err(BT_DEV, "Received bad op_code %d\n", op_code);
+ break;
+ };
+
+finished_mutex_unlock:
+ kfree(rec_data);
+ mutex_unlock(&dev->rw_mutex);
+
+ if (err)
+ return err;
+ else
+ return count;
+}
+
+/**
+ * audio_dev_poll() - Handle POLL call to the interface.
+ * @filp: Pointer to the file struct.
+ * @wait: Poll table supplied to caller.
+ *
+ * This function is used by the User Space application to see if the device is
+ * still open and if there is any data available for reading.
+ *
+ * Returns:
+ * Mask of current set POLL values.
+ */
+static unsigned int audio_dev_poll(struct file *filp, poll_table *wait)
+{
+ struct char_dev_info *dev = filp->private_data;
+ struct audio_info *info;
+ unsigned int mask = 0;
+
+ if (!dev) {
+ pr_err("audio_dev_poll: Transport closed");
+ return POLLERR | POLLRDHUP;
+ }
+ info = dev->info;
+
+ if (RESET == info->state)
+ mask |= POLLERR | POLLRDHUP | POLLPRI;
+ else
+ /* Unless RESET we can transmit */
+ mask |= POLLOUT;
+
+ if (!skb_queue_empty(&dev->rx_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static const struct file_operations char_dev_fops = {
+ .open = audio_dev_open,
+ .release = audio_dev_release,
+ .read = audio_dev_read,
+ .write = audio_dev_write,
+ .poll = audio_dev_poll
+};
+
+/**
+ * probe_common() - Register misc device.
+ * @info: Audio info structure.
+ * @dev: Current device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if allocation fails.
+ * Error codes from misc_register.
+ */
+static int probe_common(struct audio_info *info, struct device *dev)
+{
+ struct audio_cb_info *cb_info;
+ struct cg2900_user_data *pf_data;
+ int err;
+
+ cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
+ if (!cb_info) {
+ dev_err(dev, "Failed to allocate cb_info\n");
+ return -ENOMEM;
+ }
+ init_waitqueue_head(&cb_info->wq);
+ skb_queue_head_init(&cb_info->skb_queue);
+
+ pf_data = dev_get_platdata(dev);
+ cg2900_set_usr(pf_data, cb_info);
+ pf_data->dev = dev;
+ pf_data->read_cb = read_cb;
+ pf_data->reset_cb = reset_cb;
+
+ /* Only register misc device when both devices (BT and FM) are probed */
+ if (!info->dev_bt || !info->dev_fm)
+ return 0;
+
+ /* Prepare and register MISC device */
+ info->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ info->misc_dev.name = NAME;
+ info->misc_dev.fops = &char_dev_fops;
+ info->misc_dev.parent = dev;
+ info->misc_dev.mode = S_IRUGO | S_IWUGO;
+
+ err = misc_register(&info->misc_dev);
+ if (err) {
+ dev_err(dev, "Error %d registering misc dev\n", err);
+ return err;
+ }
+ info->misc_registered = true;
+
+ dev_info(dev, "CG2900 Audio driver started\n");
+ return 0;
+}
+
+/**
+ * cg2900_audio_bt_probe() - Initialize CG2900 BT audio resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if allocation fails.
+ * -EEXIST if device has already been started.
+ * Error codes from probe_common.
+ */
+static int __devinit cg2900_audio_bt_probe(struct platform_device *pdev)
+{
+ int err;
+ struct audio_info *info;
+
+ dev_dbg(&pdev->dev, "cg2900_audio_bt_probe\n");
+
+ info = get_info(&pdev->dev);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->dev_bt = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, info);
+
+ err = probe_common(info, &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Could not probe audio BT (%d)\n", err);
+ dev_set_drvdata(&pdev->dev, NULL);
+ device_removed(info);
+ }
+
+ return err;
+}
+
+/**
+ * cg2900_audio_bt_probe() - Initialize CG2900 FM audio resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -ENOMEM if allocation fails.
+ * -EEXIST if device has already been started.
+ * Error codes from probe_common.
+ */
+static int __devinit cg2900_audio_fm_probe(struct platform_device *pdev)
+{
+ int err;
+ struct audio_info *info;
+
+ dev_dbg(&pdev->dev, "cg2900_audio_fm_probe\n");
+
+ info = get_info(&pdev->dev);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->dev_fm = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, info);
+
+ err = probe_common(info, &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Could not probe audio FM (%d)\n", err);
+ dev_set_drvdata(&pdev->dev, NULL);
+ device_removed(info);
+ }
+
+ return err;
+}
+
+/**
+ * common_remove() - Dergister misc device.
+ * @info: Audio info structure.
+ * @dev: Current device.
+ *
+ * Returns:
+ * 0 if success.
+ * Error codes from misc_deregister.
+ */
+static int common_remove(struct audio_info *info, struct device *dev)
+{
+ int err;
+ struct audio_cb_info *cb_info;
+ struct cg2900_user_data *pf_data;
+
+ pf_data = dev_get_platdata(dev);
+ cb_info = cg2900_get_usr(pf_data);
+ skb_queue_purge(&cb_info->skb_queue);
+ wake_up_all(&cb_info->wq);
+ kfree(cb_info);
+
+ if (!info->misc_registered)
+ return 0;
+
+ err = misc_deregister(&info->misc_dev);
+ if (err)
+ dev_err(dev, "Error %d deregistering misc dev\n", err);
+ info->misc_registered = false;
+
+ if (info->filp)
+ info->filp->private_data = NULL;
+
+ dev_info(dev, "CG2900 Audio driver removed\n");
+ return err;
+}
+
+/**
+ * cg2900_audio_bt_remove() - Release CG2900 audio resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success.
+ * Error codes from common_remove.
+ */
+static int __devexit cg2900_audio_bt_remove(struct platform_device *pdev)
+{
+ int err;
+ struct audio_info *info;
+
+ dev_dbg(&pdev->dev, "cg2900_audio_bt_remove\n");
+
+ info = dev_get_drvdata(&pdev->dev);
+
+ info->dev_bt = NULL;
+
+ err = common_remove(info, &pdev->dev);
+ if (err)
+ dev_err(&pdev->dev,
+ "cg2900_audio_bt_remove:common_remove failed\n");
+
+ device_removed(info);
+
+ return 0;
+}
+
+/**
+ * cg2900_audio_fm_remove() - Release CG2900 audio resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success.
+ * Error codes from common_remove.
+ */
+static int __devexit cg2900_audio_fm_remove(struct platform_device *pdev)
+{
+ int err;
+ struct audio_info *info;
+
+ dev_dbg(&pdev->dev, "cg2900_audio_fm_remove\n");
+
+ info = dev_get_drvdata(&pdev->dev);
+
+ info->dev_fm = NULL;
+
+ err = common_remove(info, &pdev->dev);
+ if (err)
+ dev_err(&pdev->dev,
+ "cg2900_audio_fm_remove:common_remove failed\n");
+
+ device_removed(info);
+
+ return 0;
+}
+
+static struct platform_driver cg2900_audio_bt_driver = {
+ .driver = {
+ .name = "cg2900-audiobt",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_audio_bt_probe,
+ .remove = __devexit_p(cg2900_audio_bt_remove),
+};
+
+static struct platform_driver cg2900_audio_fm_driver = {
+ .driver = {
+ .name = "cg2900-audiofm",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_audio_fm_probe,
+ .remove = __devexit_p(cg2900_audio_fm_remove),
+};
+
+/**
+ * cg2900_audio_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init cg2900_audio_init(void)
+{
+ int err;
+
+ pr_debug("cg2900_audio_init");
+
+ err = platform_driver_register(&cg2900_audio_bt_driver);
+ if (err)
+ return err;
+ return platform_driver_register(&cg2900_audio_fm_driver);
+}
+
+/**
+ * cg2900_audio_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit cg2900_audio_exit(void)
+{
+ pr_debug("cg2900_audio_exit");
+ platform_driver_unregister(&cg2900_audio_fm_driver);
+ platform_driver_unregister(&cg2900_audio_bt_driver);
+}
+
+module_init(cg2900_audio_init);
+module_exit(cg2900_audio_exit);
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_AUTHOR("Kjell Andersson ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux Bluetooth Audio ST-Ericsson controller");
diff --git a/drivers/staging/cg2900/mfd/cg2900_char_devices.c b/drivers/staging/cg2900/mfd/cg2900_char_devices.c
new file mode 100644
index 00000000000..81d230227db
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_char_devices.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson connectivity controller.
+ */
+#define NAME "cg2900_char_dev"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/mfd/core.h>
+
+#include "cg2900.h"
+#include "cg2900_core.h"
+
+#define MAIN_DEV (dev->dev)
+
+/**
+ * struct char_dev_user - Stores device information.
+ * @dev: Current device.
+ * @miscdev: Registered device struct.
+ * @filp: Current file pointer.
+ * @name: Name of device.
+ * @rx_queue: Data queue.
+ * @rx_wait_queue: Wait queue.
+ * @reset_wait_queue: Reset Wait queue.
+ * @read_mutex: Read mutex.
+ * @write_mutex: Write mutex.
+ * @list: List header for inserting into device list.
+ */
+struct char_dev_user {
+ struct device *dev;
+ struct miscdevice miscdev;
+ struct file *filp;
+ char *name;
+ struct sk_buff_head rx_queue;
+ wait_queue_head_t rx_wait_queue;
+ wait_queue_head_t reset_wait_queue;
+ struct mutex read_mutex;
+ struct mutex write_mutex;
+ struct list_head list;
+};
+
+/**
+ * struct char_info - Stores all current users.
+ * @open_mutex: Open mutex (used for both open and release).
+ * @man_mutex: Management mutex.
+ * @dev_users: List of char dev users.
+ */
+struct char_info {
+ struct mutex open_mutex;
+ struct mutex man_mutex;
+ struct list_head dev_users;
+};
+
+static struct char_info *char_info;
+
+/**
+ * char_dev_read_cb() - Handle data received from controller.
+ * @dev: Device receiving data.
+ * @skb: Buffer with data coming from controller.
+ *
+ * The char_dev_read_cb() function handles data received from the CG2900 driver.
+ */
+static void char_dev_read_cb(struct cg2900_user_data *dev, struct sk_buff *skb)
+{
+ struct char_dev_user *char_dev = dev_get_drvdata(dev->dev);
+
+ dev_dbg(dev->dev, "char_dev_read_cb len %d\n", skb->len);
+
+ skb_queue_tail(&char_dev->rx_queue, skb);
+
+ wake_up_interruptible(&char_dev->rx_wait_queue);
+}
+
+/**
+ * char_dev_reset_cb() - Handle reset from controller.
+ * @dev: Device resetting.
+ *
+ * The char_dev_reset_cb() function handles reset from the CG2900 driver.
+ */
+static void char_dev_reset_cb(struct cg2900_user_data *dev)
+{
+ struct char_dev_user *char_dev = dev_get_drvdata(dev->dev);
+
+ dev_dbg(dev->dev, "char_dev_reset_cb\n");
+
+ wake_up_interruptible(&char_dev->rx_wait_queue);
+ wake_up_interruptible(&char_dev->reset_wait_queue);
+}
+
+/**
+ * char_dev_open() - Open char device.
+ * @inode: Device driver information.
+ * @filp: Pointer to the file struct.
+ *
+ * The char_dev_open() function opens the char device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if device cannot be found in device list.
+ * Error codes from cg2900->open.
+ */
+static int char_dev_open(struct inode *inode, struct file *filp)
+{
+ int err;
+ int minor;
+ struct char_dev_user *dev = NULL;
+ struct char_dev_user *tmp;
+ struct list_head *cursor;
+ struct cg2900_user_data *user;
+
+ mutex_lock(&char_info->open_mutex);
+
+ minor = iminor(inode);
+
+ /* Find the device for this file */
+ mutex_lock(&char_info->man_mutex);
+ list_for_each(cursor, &char_info->dev_users) {
+ tmp = list_entry(cursor, struct char_dev_user, list);
+ if (tmp->miscdev.minor == minor) {
+ dev = tmp;
+ break;
+ }
+ }
+ mutex_unlock(&char_info->man_mutex);
+ if (!dev) {
+ pr_err("Could not identify device in inode");
+ err = -EINVAL;
+ goto error_handling;
+ }
+
+ filp->private_data = dev;
+ dev->filp = filp;
+ user = dev_get_platdata(dev->dev);
+
+ /* First initiate wait queues for this device. */
+ init_waitqueue_head(&dev->rx_wait_queue);
+ init_waitqueue_head(&dev->reset_wait_queue);
+
+ /* Register to CG2900 Driver */
+ err = user->open(user);
+ if (err) {
+ dev_err(MAIN_DEV,
+ "Couldn't register to CG2900 for H:4 channel %s\n",
+ dev->name);
+ goto error_handling;
+ }
+ dev_info(MAIN_DEV, "char_dev %s opened\n", dev->name);
+
+error_handling:
+ mutex_unlock(&char_info->open_mutex);
+ return err;
+}
+
+/**
+ * char_dev_release() - Release char device.
+ * @inode: Device driver information.
+ * @filp: Pointer to the file struct.
+ *
+ * The char_dev_release() function release the char device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EBADF if NULL pointer was supplied in private data.
+ */
+static int char_dev_release(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ struct char_dev_user *dev = filp->private_data;
+ struct cg2900_user_data *user;
+
+ pr_debug("char_dev_release");
+
+ if (!dev) {
+ pr_err("char_dev_release: Calling with NULL pointer");
+ return -EBADF;
+ }
+
+ mutex_lock(&char_info->open_mutex);
+ mutex_lock(&dev->read_mutex);
+ mutex_lock(&dev->write_mutex);
+
+ user = dev_get_platdata(dev->dev);
+ if (user->opened)
+ user->close(user);
+
+ dev_info(MAIN_DEV, "char_dev %s closed\n", dev->name);
+
+ filp->private_data = NULL;
+ dev->filp = NULL;
+ wake_up_interruptible(&dev->rx_wait_queue);
+ wake_up_interruptible(&dev->reset_wait_queue);
+
+ /* Purge the queue since the device is closed now */
+ skb_queue_purge(&dev->rx_queue);
+
+ mutex_unlock(&dev->write_mutex);
+ mutex_unlock(&dev->read_mutex);
+ mutex_unlock(&char_info->open_mutex);
+
+ return err;
+}
+
+/**
+ * char_dev_read() - Queue and copy buffer to user.
+ * @filp: Pointer to the file struct.
+ * @buf: Received buffer.
+ * @count: Size of buffer.
+ * @f_pos: Position in buffer.
+ *
+ * The char_dev_read() function queues and copy the received buffer to
+ * the user space char device. If no data is available this function will block.
+ *
+ * Returns:
+ * Bytes successfully read (could be 0).
+ * -EBADF if NULL pointer was supplied in private data.
+ * -EFAULT if copy_to_user fails.
+ * Error codes from wait_event_interruptible.
+ */
+static ssize_t char_dev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct char_dev_user *dev = filp->private_data;
+ struct cg2900_user_data *user;
+ struct sk_buff *skb;
+ int bytes_to_copy;
+ int err = 0;
+
+ pr_debug("char_dev_read");
+
+ if (!dev) {
+ pr_err("char_dev_read: Calling with NULL pointer");
+ return -EBADF;
+ }
+ mutex_lock(&dev->read_mutex);
+
+ user = dev_get_platdata(dev->dev);
+
+ if (user->opened && skb_queue_empty(&dev->rx_queue)) {
+ err = wait_event_interruptible(dev->rx_wait_queue,
+ (!(skb_queue_empty(&dev->rx_queue))) ||
+ !user->opened);
+ if (err) {
+ dev_err(MAIN_DEV, "Failed to wait for event\n");
+ goto error_handling;
+ }
+ }
+
+ if (!user->opened) {
+ dev_err(MAIN_DEV, "Channel has been closed\n");
+ err = -EBADF;
+ goto error_handling;
+ }
+
+ skb = skb_dequeue(&dev->rx_queue);
+ if (!skb) {
+ dev_dbg(MAIN_DEV,
+ "skb queue is empty - return with zero bytes\n");
+ bytes_to_copy = 0;
+ goto finished;
+ }
+
+ bytes_to_copy = min(count, skb->len);
+
+ err = copy_to_user(buf, skb->data, bytes_to_copy);
+ if (err) {
+ dev_err(MAIN_DEV, "Error %d from copy_to_user\n", err);
+ skb_queue_head(&dev->rx_queue, skb);
+ err = -EFAULT;
+ goto error_handling;
+ }
+
+ skb_pull(skb, bytes_to_copy);
+
+ if (skb->len > 0)
+ skb_queue_head(&dev->rx_queue, skb);
+ else
+ kfree_skb(skb);
+
+ goto finished;
+
+error_handling:
+ mutex_unlock(&dev->read_mutex);
+ return (ssize_t)err;
+finished:
+ mutex_unlock(&dev->read_mutex);
+ return bytes_to_copy;
+}
+
+/**
+ * char_dev_write() - Copy buffer from user and write to CG2900 driver.
+ * @filp: Pointer to the file struct.
+ * @buf: Write buffer.
+ * @count: Size of the buffer write.
+ * @f_pos: Position of buffer.
+ *
+ * Returns:
+ * Bytes successfully written (could be 0).
+ * -EBADF if NULL pointer was supplied in private data.
+ * -EFAULT if copy_from_user fails.
+ */
+static ssize_t char_dev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct sk_buff *skb;
+ struct char_dev_user *dev = filp->private_data;
+ struct cg2900_user_data *user;
+ int err = 0;
+
+ pr_debug("char_dev_write");
+
+ if (!dev) {
+ pr_err("char_dev_write: Calling with NULL pointer");
+ return -EBADF;
+ }
+
+ user = dev_get_platdata(dev->dev);
+ if (!user->opened) {
+ dev_err(MAIN_DEV, "char_dev_write: Channel not opened\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&dev->write_mutex);
+
+ skb = user->alloc_skb(count, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(MAIN_DEV, "Couldn't allocate sk_buff with length %d\n",
+ count);
+ goto error_handling;
+ }
+
+ err = copy_from_user(skb_put(skb, count), buf, count);
+ if (err) {
+ dev_err(MAIN_DEV, "Error %d from copy_from_user\n", err);
+ kfree_skb(skb);
+ err = -EFAULT;
+ goto error_handling;
+ }
+
+ err = user->write(user, skb);
+ if (err) {
+ dev_err(MAIN_DEV, "cg2900_write failed (%d)\n", err);
+ kfree_skb(skb);
+ goto error_handling;
+ }
+
+ mutex_unlock(&dev->write_mutex);
+ return count;
+
+error_handling:
+ mutex_unlock(&dev->write_mutex);
+ return err;
+}
+
+/**
+ * char_dev_unlocked_ioctl() - Handle IOCTL call to the interface.
+ * @filp: Pointer to the file struct.
+ * @cmd: IOCTL command.
+ * @arg: IOCTL argument.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if supplied cmd is not supported.
+ * For cmd CG2900_CHAR_DEV_IOCTL_CHECK4RESET 0x01 is returned if device is
+ * reset and 0x02 is returned if device is closed.
+ */
+static long char_dev_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct char_dev_user *dev = filp->private_data;
+ struct cg2900_user_data *user;
+ struct cg2900_rev_data rev_data;
+ int err = 0;
+ int ret_val;
+ void __user *user_arg = (void __user *)arg;
+
+ if (!dev) {
+ pr_err("char_dev_unlocked_ioctl: Calling with NULL pointer");
+ return -EBADF;
+ }
+
+ dev_dbg(dev->dev, "char_dev_unlocked_ioctl for %s\n"
+ "\tDIR: %d\n"
+ "\tTYPE: %d\n"
+ "\tNR: %d\n"
+ "\tSIZE: %d",
+ dev->name, _IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd),
+ _IOC_SIZE(cmd));
+
+ user = dev_get_platdata(dev->dev);
+
+ switch (cmd) {
+ case CG2900_CHAR_DEV_IOCTL_RESET:
+ if (!user->opened)
+ return -EACCES;
+ dev_dbg(MAIN_DEV, "ioctl reset command for device %s\n",
+ dev->name);
+ err = user->reset(user);
+ break;
+
+ case CG2900_CHAR_DEV_IOCTL_CHECK4RESET:
+ if (user->opened)
+ ret_val = CG2900_CHAR_DEV_IOCTL_EVENT_IDLE;
+ else
+ ret_val = CG2900_CHAR_DEV_IOCTL_EVENT_RESET;
+
+ dev_dbg(MAIN_DEV, "ioctl check for reset command for device %s",
+ dev->name);
+
+ err = copy_to_user(user_arg, &ret_val, sizeof(ret_val));
+ if (err) {
+ dev_err(MAIN_DEV,
+ "Error %d from copy_to_user for reset\n", err);
+ return -EFAULT;
+ }
+ break;
+
+ case CG2900_CHAR_DEV_IOCTL_GET_REVISION:
+ if (!user->get_local_revision(user, &rev_data)) {
+ dev_err(MAIN_DEV, "No revision data available\n");
+ return -EIO;
+ }
+ dev_dbg(MAIN_DEV, "ioctl check for local revision info\n"
+ "\trevision 0x%04X\n"
+ "\tsub_version 0x%04X\n",
+ rev_data.revision, rev_data.sub_version);
+ err = copy_to_user(user_arg, &rev_data, sizeof(rev_data));
+ if (err) {
+ dev_err(MAIN_DEV,
+ "Error %d from copy_to_user for "
+ "revision\n", err);
+ return -EFAULT;
+ }
+ break;
+
+ default:
+ dev_err(MAIN_DEV, "Unknown ioctl command %08X\n", cmd);
+ err = -EINVAL;
+ break;
+ };
+
+ return err;
+}
+
+/**
+ * char_dev_poll() - Handle POLL call to the interface.
+ * @filp: Pointer to the file struct.
+ * @wait: Poll table supplied to caller.
+ *
+ * Returns:
+ * Mask of current set POLL values
+ */
+static unsigned int char_dev_poll(struct file *filp, poll_table *wait)
+{
+ struct char_dev_user *dev = filp->private_data;
+ struct cg2900_user_data *user;
+ unsigned int mask = 0;
+
+ if (!dev) {
+ pr_debug("char_dev_poll: Device not open");
+ return POLLERR | POLLRDHUP;
+ }
+
+ user = dev_get_platdata(dev->dev);
+
+ poll_wait(filp, &dev->reset_wait_queue, wait);
+ poll_wait(filp, &dev->rx_wait_queue, wait);
+
+ if (!user->opened)
+ mask |= POLLERR | POLLRDHUP | POLLPRI;
+ else
+ mask |= POLLOUT; /* We can TX unless there is an error */
+
+ if (!(skb_queue_empty(&dev->rx_queue)))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+/*
+ * struct char_dev_fops - Char devices file operations.
+ * @read: Function that reads from the char device.
+ * @write: Function that writes to the char device.
+ * @unlocked_ioctl: Function that performs IO operations with
+ * the char device.
+ * @poll: Function that checks if there are possible operations
+ * with the char device.
+ * @open: Function that opens the char device.
+ * @release: Function that release the char device.
+ */
+static const struct file_operations char_dev_fops = {
+ .read = char_dev_read,
+ .write = char_dev_write,
+ .unlocked_ioctl = char_dev_unlocked_ioctl,
+ .poll = char_dev_poll,
+ .open = char_dev_open,
+ .release = char_dev_release
+};
+
+/**
+ * remove_dev() - Remove char device structure for device.
+ * @dev_usr: Char device user.
+ *
+ * The remove_dev() function releases the char_dev structure for this device.
+ */
+static void remove_dev(struct char_dev_user *dev_usr)
+{
+ if (!dev_usr)
+ return;
+
+ dev_dbg(dev_usr->dev,
+ "Removing char device %s with major %d and minor %d\n",
+ dev_usr->name,
+ MAJOR(dev_usr->miscdev.this_device->devt),
+ MINOR(dev_usr->miscdev.this_device->devt));
+
+ skb_queue_purge(&dev_usr->rx_queue);
+
+ mutex_destroy(&dev_usr->read_mutex);
+ mutex_destroy(&dev_usr->write_mutex);
+
+ dev_usr->dev = NULL;
+ if (dev_usr->filp)
+ dev_usr->filp->private_data = NULL;
+
+ /* Remove device node in file system. */
+ misc_deregister(&dev_usr->miscdev);
+ kfree(dev_usr);
+}
+
+/**
+ * cg2900_char_probe() - Initialize char device module.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM if allocation fails.
+ * -EACCES if device already have been initiated.
+ */
+static int __devinit cg2900_char_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct char_dev_user *dev_usr;
+ struct cg2900_user_data *user;
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "cg2900_char_probe\n");
+
+ user = dev_get_platdata(dev);
+ user->dev = dev;
+ user->read_cb = char_dev_read_cb;
+ user->reset_cb = char_dev_reset_cb;
+
+ dev_usr = kzalloc(sizeof(*dev_usr), GFP_KERNEL);
+ if (!dev_usr) {
+ dev_err(&pdev->dev, "Couldn't allocate dev_usr\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&pdev->dev, dev_usr);
+ dev_usr->dev = &pdev->dev;
+
+ /* Store device name */
+ dev_usr->name = user->channel_data.char_dev_name;
+
+ /* Prepare miscdevice struct before registering the device */
+ dev_usr->miscdev.minor = MISC_DYNAMIC_MINOR;
+ dev_usr->miscdev.name = dev_usr->name;
+ dev_usr->miscdev.nodename = dev_usr->name;
+ dev_usr->miscdev.fops = &char_dev_fops;
+ dev_usr->miscdev.parent = &pdev->dev;
+ dev_usr->miscdev.mode = S_IRUGO | S_IWUGO;
+
+ err = misc_register(&dev_usr->miscdev);
+ if (err) {
+ dev_err(&pdev->dev, "Error %d registering misc dev\n", err);
+ goto err_free_usr;
+ }
+
+ dev_dbg(&pdev->dev, "Added char device %s with major %d and minor %d\n",
+ dev_usr->name, MAJOR(dev_usr->miscdev.this_device->devt),
+ MINOR(dev_usr->miscdev.this_device->devt));
+
+ mutex_init(&dev_usr->read_mutex);
+ mutex_init(&dev_usr->write_mutex);
+
+ skb_queue_head_init(&dev_usr->rx_queue);
+
+ mutex_lock(&char_info->man_mutex);
+ list_add_tail(&dev_usr->list, &char_info->dev_users);
+ mutex_unlock(&char_info->man_mutex);
+
+ return 0;
+
+err_free_usr:
+ kfree(dev_usr);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return err;
+}
+
+/**
+ * cg2900_char_remove() - Release the char device module.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success (always success).
+ */
+static int __devexit cg2900_char_remove(struct platform_device *pdev)
+{
+ struct list_head *cursor, *next;
+ struct char_dev_user *tmp;
+ struct char_dev_user *user;
+
+ dev_dbg(&pdev->dev, "cg2900_char_remove\n");
+
+ user = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&char_info->man_mutex);
+ list_for_each_safe(cursor, next, &char_info->dev_users) {
+ tmp = list_entry(cursor, struct char_dev_user, list);
+ if (tmp == user) {
+ list_del(cursor);
+ remove_dev(tmp);
+ dev_set_drvdata(&pdev->dev, NULL);
+ break;
+ }
+ }
+ mutex_unlock(&char_info->man_mutex);
+ return 0;
+}
+
+static struct platform_driver cg2900_char_driver = {
+ .driver = {
+ .name = "cg2900-chardev",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_char_probe,
+ .remove = __devexit_p(cg2900_char_remove),
+};
+
+/**
+ * cg2900_char_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init cg2900_char_init(void)
+{
+ pr_debug("cg2900_char_init");
+
+ /* Initialize private data. */
+ char_info = kzalloc(sizeof(*char_info), GFP_ATOMIC);
+ if (!char_info) {
+ pr_err("Could not alloc char_info struct");
+ return -ENOMEM;
+ }
+
+ mutex_init(&char_info->open_mutex);
+ mutex_init(&char_info->man_mutex);
+ INIT_LIST_HEAD(&char_info->dev_users);
+
+ return platform_driver_register(&cg2900_char_driver);
+}
+
+/**
+ * cg2900_char_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit cg2900_char_exit(void)
+{
+ struct list_head *cursor, *next;
+ struct char_dev_user *tmp;
+
+ pr_debug("cg2900_char_exit");
+
+ platform_driver_unregister(&cg2900_char_driver);
+
+ if (!char_info)
+ return;
+
+ list_for_each_safe(cursor, next, &char_info->dev_users) {
+ tmp = list_entry(cursor, struct char_dev_user, list);
+ list_del(cursor);
+ remove_dev(tmp);
+ }
+
+ mutex_destroy(&char_info->open_mutex);
+ mutex_destroy(&char_info->man_mutex);
+
+ kfree(char_info);
+ char_info = NULL;
+}
+
+module_init(cg2900_char_init);
+module_exit(cg2900_char_exit);
+
+MODULE_AUTHOR("Henrik Possung ST-Ericsson");
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ST-Ericsson CG2900 Char Devices Driver");
diff --git a/drivers/staging/cg2900/mfd/cg2900_chip.c b/drivers/staging/cg2900/mfd/cg2900_chip.c
new file mode 100644
index 00000000000..020f7c906ad
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_chip.c
@@ -0,0 +1,3618 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 GPS/BT/FM controller.
+ */
+#define NAME "cg2900_chip"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/core.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include "cg2900.h"
+#include "cg2900_chip.h"
+#include "cg2900_core.h"
+#include "cg2900_lib.h"
+
+#ifndef MAX
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+#endif
+
+#define MAIN_DEV (main_info->dev)
+#define BOOT_DEV (info->user_in_charge->dev)
+
+#define WQ_NAME "cg2900_chip_wq"
+
+/*
+ * After waiting the first 500 ms we should just try to get the selftest results
+ * for another number of poll attempts
+ */
+#define MAX_NBR_OF_POLLS 50
+
+#define LINE_TOGGLE_DETECT_TIMEOUT 50 /* ms */
+#define CHIP_READY_TIMEOUT 100 /* ms */
+#define CHIP_STARTUP_TIMEOUT 15000 /* ms */
+#define CHIP_SHUTDOWN_TIMEOUT 15000 /* ms */
+#define POWER_SW_OFF_WAIT 500 /* ms */
+#define SELFTEST_INITIAL 500 /* ms */
+#define SELFTEST_POLLING 20 /* ms */
+
+/** CHANNEL_BT_CMD - Bluetooth HCI H:4 channel
+ * for Bluetooth commands in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_BT_CMD 0x01
+
+/** CHANNEL_BT_ACL - Bluetooth HCI H:4 channel
+ * for Bluetooth ACL data in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_BT_ACL 0x02
+
+/** CHANNEL_BT_EVT - Bluetooth HCI H:4 channel
+ * for Bluetooth events in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_BT_EVT 0x04
+
+/** CHANNEL_FM_RADIO - Bluetooth HCI H:4 channel
+ * for FM radio in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_FM_RADIO 0x08
+
+/** CHANNEL_GNSS - Bluetooth HCI H:4 channel
+ * for GNSS in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_GNSS 0x09
+
+/** CHANNEL_DEBUG - Bluetooth HCI H:4 channel
+ * for internal debug data in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_DEBUG 0x0B
+
+/** CHANNEL_STE_TOOLS - Bluetooth HCI H:4 channel
+ * for development tools data in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_STE_TOOLS 0x0D
+
+/** CHANNEL_HCI_LOGGER - Bluetooth HCI H:4 channel
+ * for logging all transmitted H4 packets (on all channels).
+ */
+#define CHANNEL_HCI_LOGGER 0xFA
+
+/** CHANNEL_CORE - Bluetooth HCI H:4 channel
+ * for user space control of the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_CORE 0xFD
+
+/** CHANNEL_HCI_RAW - Bluetooth HCI H:4 channel
+ * for user space read/write on the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_HCI_RAW 0xFE
+
+/** CG2900_BT_CMD - Bluetooth HCI H4 channel for Bluetooth commands.
+ */
+#define CG2900_BT_CMD "cg2900_bt_cmd"
+
+/** CG2900_BT_ACL - Bluetooth HCI H4 channel for Bluetooth ACL data.
+ */
+#define CG2900_BT_ACL "cg2900_bt_acl"
+
+/** CG2900_BT_EVT - Bluetooth HCI H4 channel for Bluetooth events.
+ */
+#define CG2900_BT_EVT "cg2900_bt_evt"
+
+/** CG2900_FM_RADIO - Bluetooth HCI H4 channel for FM radio.
+ */
+#define CG2900_FM_RADIO "cg2900_fm_radio"
+
+/** CG2900_GNSS - Bluetooth HCI H4 channel for GNSS.
+ */
+#define CG2900_GNSS "cg2900_gnss"
+
+/** CG2900_DEBUG - Bluetooth HCI H4 channel for internal debug data.
+ */
+#define CG2900_DEBUG "cg2900_debug"
+
+/** CG2900_STE_TOOLS - Bluetooth HCI H4 channel for development tools data.
+ */
+#define CG2900_STE_TOOLS "cg2900_ste_tools"
+
+/** CG2900_HCI_LOGGER - BT channel for logging all transmitted H4 packets.
+ * Data read is copy of all data transferred on the other channels.
+ * Only write allowed is configuration of the HCI Logger.
+ */
+#define CG2900_HCI_LOGGER "cg2900_hci_logger"
+
+/** CG2900_BT_AUDIO - HCI Channel for BT audio configuration commands.
+ * Maps to Bluetooth command and event channels.
+ */
+#define CG2900_BT_AUDIO "cg2900_bt_audio"
+
+/** CG2900_FM_AUDIO - HCI channel for FM audio configuration commands.
+ * Maps to FM Radio channel.
+ */
+#define CG2900_FM_AUDIO "cg2900_fm_audio"
+
+/** CG2900_CORE- Channel for keeping ST-Ericsson CG2900 enabled.
+ * Opening this channel forces the chip to stay powered.
+ * No data can be written to or read from this channel.
+ */
+#define CG2900_CORE "cg2900_core"
+
+/** CG2900_HCI_RAW - Channel for HCI RAW data exchange.
+ * Opening this channel will not allow any others HCI Channels
+ * to be opened except Logger Channel.
+ */
+#define CG2900_HCI_RAW "cg2900_hci_raw"
+
+/**
+ * enum main_state - Main-state for CG2900 driver.
+ * @CG2900_INIT: CG2900 initializing.
+ * @CG2900_IDLE: No user registered to CG2900 driver.
+ * @CG2900_BOOTING: CG2900 booting after first user is registered.
+ * @CG2900_CLOSING: CG2900 closing after last user has deregistered.
+ * @CG2900_RESETING: CG2900 reset requested.
+ * @CG2900_ACTIVE: CG2900 up and running with at least one user.
+ */
+enum main_state {
+ CG2900_INIT,
+ CG2900_IDLE,
+ CG2900_BOOTING,
+ CG2900_CLOSING,
+ CG2900_RESETING,
+ CG2900_ACTIVE
+};
+
+/**
+ * enum boot_state - BOOT-state for CG2900 chip driver.
+ * @BOOT_NOT_STARTED: Boot has not yet started.
+ * @BOOT_SEND_BD_ADDRESS: VS Store In FS command with BD address
+ * has been sent.
+ * @BOOT_GET_FILES_TO_LOAD: CG2900 chip driver is retrieving file to
+ * load.
+ * @BOOT_DOWNLOAD_PATCH: CG2900 chip driver is downloading
+ * patches.
+ * @BOOT_ACTIVATE_PATCHES_AND_SETTINGS: CG2900 chip driver is activating patches
+ * and settings.
+ * @BOOT_READ_SELFTEST_RESULT: CG2900 is performing selftests that
+ * shall be read out.
+ * @BOOT_DISABLE_BT: Disable BT Core.
+ * @BOOT_READY: CG2900 chip driver boot is ready.
+ * @BOOT_FAILED: CG2900 chip driver boot failed.
+ */
+enum boot_state {
+ BOOT_NOT_STARTED,
+ BOOT_SEND_BD_ADDRESS,
+ BOOT_GET_FILES_TO_LOAD,
+ BOOT_DOWNLOAD_PATCH,
+ BOOT_ACTIVATE_PATCHES_AND_SETTINGS,
+ BOOT_READ_SELFTEST_RESULT,
+ BOOT_DISABLE_BT,
+ BOOT_READY,
+ BOOT_FAILED
+};
+
+/**
+ * enum closing_state - CLOSING-state for CG2900 chip driver.
+ * @CLOSING_RESET: HCI RESET_CMD has been sent.
+ * @CLOSING_POWER_SWITCH_OFF: HCI VS_POWER_SWITCH_OFF command has been sent.
+ * @CLOSING_SHUT_DOWN: We have now shut down the chip.
+ */
+enum closing_state {
+ CLOSING_RESET,
+ CLOSING_POWER_SWITCH_OFF,
+ CLOSING_SHUT_DOWN
+};
+
+/**
+ * enum file_load_state - BOOT_FILE_LOAD-state for CG2900 chip driver.
+ * @FILE_LOAD_GET_PATCH: Loading patches.
+ * @FILE_LOAD_GET_STATIC_SETTINGS: Loading static settings.
+ * @FILE_LOAD_NO_MORE_FILES: No more files to load.
+ * @FILE_LOAD_FAILED: File loading failed.
+ */
+enum file_load_state {
+ FILE_LOAD_GET_PATCH,
+ FILE_LOAD_GET_STATIC_SETTINGS,
+ FILE_LOAD_NO_MORE_FILES,
+ FILE_LOAD_FAILED
+};
+
+/**
+ * enum download_state - BOOT_DOWNLOAD state.
+ * @DOWNLOAD_PENDING: Download in progress.
+ * @DOWNLOAD_SUCCESS: Download successfully finished.
+ * @DOWNLOAD_FAILED: Downloading failed.
+ */
+enum download_state {
+ DOWNLOAD_PENDING,
+ DOWNLOAD_SUCCESS,
+ DOWNLOAD_FAILED
+};
+
+/**
+ * enum fm_radio_mode - FM Radio mode.
+ * It's needed because some FM do-commands generate interrupts only when
+ * the FM driver is in specific mode and we need to know if we should expect
+ * the interrupt.
+ * @FM_RADIO_MODE_IDLE: Radio mode is Idle (default).
+ * @FM_RADIO_MODE_FMT: Radio mode is set to FMT (transmitter).
+ * @FM_RADIO_MODE_FMR: Radio mode is set to FMR (receiver).
+ */
+enum fm_radio_mode {
+ FM_RADIO_MODE_IDLE = 0,
+ FM_RADIO_MODE_FMT = 1,
+ FM_RADIO_MODE_FMR = 2
+};
+
+
+/**
+ * struct cg2900_channel_item - List object for channel.
+ * @list: list_head struct.
+ * @user: User for this channel.
+ */
+struct cg2900_channel_item {
+ struct list_head list;
+ struct cg2900_user_data *user;
+};
+
+/**
+ * struct cg2900_delayed_work_struct - Work structure for CG2900 chip.
+ * @delayed_work: Work structure.
+ * @data: Pointer to private data.
+ */
+struct cg2900_delayed_work_struct {
+ struct delayed_work work;
+ void *data;
+};
+
+/**
+ * struct cg2900_skb_data - Structure for storing private data in an sk_buffer.
+ * @dev: CG2900 device for this sk_buffer.
+ */
+struct cg2900_skb_data {
+ struct cg2900_user_data *user;
+};
+#define cg2900_skb_data(__skb) ((struct cg2900_skb_data *)((__skb)->cb))
+
+/**
+ * struct cg2900_chip_info - Main info structure for CG2900 chip driver.
+ * @dev: Current device. Same as @chip_dev->dev.
+ * @patch_file_name: Stores patch file name.
+ * @settings_file_name: Stores settings file name.
+ * @file_info: Firmware file info (patch or settings).
+ * @boot_state: Current BOOT-state of CG2900 chip driver.
+ * @closing_state: Current CLOSING-state of CG2900 chip driver.
+ * @file_load_state: Current BOOT_FILE_LOAD-state of CG2900 chip
+ * driver.
+ * @download_state: Current BOOT_DOWNLOAD-state of CG2900 chip
+ * driver.
+ * @wq: CG2900 chip driver workqueue.
+ * @chip_dev: Chip handler info.
+ * @tx_bt_lock: Spinlock used to protect some global structures
+ * related to internal BT command flow control.
+ * @tx_fm_lock: Spinlock used to protect some global structures
+ * related to internal FM command flow control.
+ * @tx_fm_audio_awaiting_irpt: Indicates if an FM interrupt event related to
+ * audio driver command is expected.
+ * @fm_radio_mode: Current FM radio mode.
+ * @tx_nr_pkts_allowed_bt: Number of packets allowed to send on BT HCI CMD
+ * H4 channel.
+ * @audio_bt_cmd_op: Stores the OpCode of the last sent audio driver
+ * HCI BT CMD.
+ * @audio_fm_cmd_id: Stores the command id of the last sent
+ * HCI FM RADIO command by the fm audio user.
+ * @hci_fm_cmd_func: Stores the command function of the last sent
+ * HCI FM RADIO command by the fm radio user.
+ * @tx_queue_bt: TX queue for HCI BT commands when nr of commands
+ * allowed is 0 (CG2900 internal flow control).
+ * @tx_queue_fm: TX queue for HCI FM commands when nr of commands
+ * allowed is 0 (CG2900 internal flow control).
+ * @user_in_charge: User currently operating. Normally used at
+ * channel open and close.
+ * @last_user: Last user of this chip. To avoid complications
+ * this will never be set for bt_audio and
+ * fm_audio.
+ * @logger: Logger user of this chip.
+ * @hci_raw: HCI Raw user of this chip.
+ * @selftest_work: Delayed work for reading selftest results.
+ * @nbr_of_polls: Number of times we should poll for selftest
+ * results.
+ * @startup: True if system is starting up.
+ * @mfd_size: Number of MFD cells.
+ * @mfd_char_size: Number of MFD char device cells.
+ */
+struct cg2900_chip_info {
+ struct device *dev;
+ char *patch_file_name;
+ char *settings_file_name;
+ struct cg2900_file_info file_info;
+ enum main_state main_state;
+ enum boot_state boot_state;
+ enum closing_state closing_state;
+ enum file_load_state file_load_state;
+ enum download_state download_state;
+ struct workqueue_struct *wq;
+ struct cg2900_chip_dev *chip_dev;
+ spinlock_t tx_bt_lock;
+ spinlock_t tx_fm_lock;
+ spinlock_t rw_lock;
+ bool tx_fm_audio_awaiting_irpt;
+ enum fm_radio_mode fm_radio_mode;
+ int tx_nr_pkts_allowed_bt;
+ u16 audio_bt_cmd_op;
+ u16 audio_fm_cmd_id;
+ u16 hci_fm_cmd_func;
+ struct sk_buff_head tx_queue_bt;
+ struct sk_buff_head tx_queue_fm;
+ struct list_head open_channels;
+ struct cg2900_user_data *user_in_charge;
+ struct cg2900_user_data *last_user;
+ struct cg2900_user_data *logger;
+ struct cg2900_user_data *hci_raw;
+ struct cg2900_user_data *bt_audio;
+ struct cg2900_user_data *fm_audio;
+ struct cg2900_delayed_work_struct selftest_work;
+ int nbr_of_polls;
+ bool startup;
+ int mfd_size;
+ int mfd_char_size;
+};
+
+/**
+ * struct main_info - Main info structure for CG2900 chip driver.
+ * @dev: Device structure.
+ * @cell_base_id: Base ID for MFD cells.
+ * @man_mutex: Management mutex.
+ */
+struct main_info {
+ struct device *dev;
+ int cell_base_id;
+ struct mutex man_mutex;
+};
+
+static struct main_info *main_info;
+
+/*
+ * main_wait_queue - Main Wait Queue in CG2900 driver.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(main_wait_queue);
+
+static struct mfd_cell cg2900_devs[];
+static struct mfd_cell cg2900_char_devs[];
+
+static void chip_startup_finished(struct cg2900_chip_info *info, int err);
+static void chip_shutdown(struct cg2900_user_data *user);
+
+/**
+ * bt_is_open() - Checks if any BT user is in open state.
+ * @info: CG2900 info.
+ *
+ * Returns:
+ * true if a BT channel is open.
+ * false if no BT channel is open.
+ */
+static bool bt_is_open(struct cg2900_chip_info *info)
+{
+ struct list_head *cursor;
+ struct cg2900_channel_item *tmp;
+
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor, struct cg2900_channel_item, list);
+ if (tmp->user->h4_channel == CHANNEL_BT_CMD)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * fm_is_open() - Checks if any FM user is in open state.
+ * @info: CG2900 info.
+ *
+ * Returns:
+ * true if a FM channel is open.
+ * false if no FM channel is open.
+ */
+static bool fm_is_open(struct cg2900_chip_info *info)
+{
+ struct list_head *cursor;
+ struct cg2900_channel_item *tmp;
+
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor, struct cg2900_channel_item, list);
+ if (tmp->user->h4_channel == CHANNEL_FM_RADIO)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * fm_irpt_expected() - check if this FM command will generate an interrupt.
+ * @cmd_id: command identifier.
+ *
+ * Returns:
+ * true if the command will generate an interrupt.
+ * false if it won't.
+ */
+static bool fm_irpt_expected(struct cg2900_chip_info *info, u16 cmd_id)
+{
+ bool retval = false;
+
+ switch (cmd_id) {
+ case CG2900_FM_DO_AIP_FADE_START:
+ if (info->fm_radio_mode == FM_RADIO_MODE_FMT)
+ retval = true;
+ break;
+
+ case CG2900_FM_DO_AUP_BT_FADE_START:
+ case CG2900_FM_DO_AUP_EXT_FADE_START:
+ case CG2900_FM_DO_AUP_FADE_START:
+ if (info->fm_radio_mode == FM_RADIO_MODE_FMR)
+ retval = true;
+ break;
+
+ case CG2900_FM_DO_FMR_SETANTENNA:
+ case CG2900_FM_DO_FMR_SP_AFSWITCH_START:
+ case CG2900_FM_DO_FMR_SP_AFUPDATE_START:
+ case CG2900_FM_DO_FMR_SP_BLOCKSCAN_START:
+ case CG2900_FM_DO_FMR_SP_PRESETPI_START:
+ case CG2900_FM_DO_FMR_SP_SCAN_START:
+ case CG2900_FM_DO_FMR_SP_SEARCH_START:
+ case CG2900_FM_DO_FMR_SP_SEARCHPI_START:
+ case CG2900_FM_DO_FMR_SP_TUNE_SETCHANNEL:
+ case CG2900_FM_DO_FMR_SP_TUNE_STEPCHANNEL:
+ case CG2900_FM_DO_FMT_PA_SETCTRL:
+ case CG2900_FM_DO_FMT_PA_SETMODE:
+ case CG2900_FM_DO_FMT_SP_TUNE_SETCHANNEL:
+ case CG2900_FM_DO_GEN_ANTENNACHECK_START:
+ case CG2900_FM_DO_GEN_GOTOMODE:
+ case CG2900_FM_DO_GEN_POWERSUPPLY_SETMODE:
+ case CG2900_FM_DO_GEN_SELECTREFERENCECLOCK:
+ case CG2900_FM_DO_GEN_SETPROCESSINGCLOCK:
+ case CG2900_FM_DO_GEN_SETREFERENCECLOCKPLL:
+ case CG2900_FM_DO_TST_TX_RAMP_START:
+ retval = true;
+ break;
+
+ default:
+ break;
+ }
+
+ if (retval)
+ dev_dbg(info->dev, "Following interrupt event expected for this"
+ " Cmd complete evt: cmd_id = 0x%X\n",
+ cmd_id);
+
+ return retval;
+}
+
+/**
+ * fm_is_do_cmd_irpt() - Check if irpt_val is one of the FM DO command related interrupts.
+ * @irpt_val: interrupt value.
+ *
+ * Returns:
+ * true if it's do-command related interrupt value.
+ * false if it's not.
+ */
+static bool fm_is_do_cmd_irpt(u16 irpt_val)
+{
+ if ((irpt_val & CG2900_FM_IRPT_OPERATION_SUCCEEDED) ||
+ (irpt_val & CG2900_FM_IRPT_OPERATION_FAILED)) {
+ dev_dbg(MAIN_DEV, "Irpt evt for FM do-command found, "
+ "irpt_val = 0x%X\n", irpt_val);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * fm_reset_flow_ctrl - Clears up internal FM flow control.
+ *
+ * Resets outstanding commands and clear FM TX list and set CG2900 FM mode to
+ * idle.
+ */
+static void fm_reset_flow_ctrl(struct cg2900_chip_info *info)
+{
+ dev_dbg(info->dev, "fm_reset_flow_ctrl\n");
+
+ skb_queue_purge(&info->tx_queue_fm);
+
+ /* Reset the fm_cmd_id. */
+ info->audio_fm_cmd_id = CG2900_FM_CMD_NONE;
+ info->hci_fm_cmd_func = CG2900_FM_CMD_PARAM_NONE;
+
+ info->fm_radio_mode = FM_RADIO_MODE_IDLE;
+}
+
+
+/**
+ * fm_parse_cmd - Parses a FM command packet.
+ * @data: FM command packet.
+ * @cmd_func: Out: FM legacy command function.
+ * @cmd_id: Out: FM legacy command ID.
+ */
+static void fm_parse_cmd(u8 *data, u8 *cmd_func, u16 *cmd_id)
+{
+ /* Move past H4-header to start of actual package */
+ struct fm_leg_cmd *pkt = (struct fm_leg_cmd *)(data + HCI_H4_SIZE);
+
+ *cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ *cmd_id = CG2900_FM_CMD_NONE;
+
+ if (pkt->opcode != CG2900_FM_GEN_ID_LEGACY) {
+ dev_err(MAIN_DEV, "fm_parse_cmd: Not an FM legacy command "
+ "0x%02X\n", pkt->opcode);
+ return;
+ }
+
+ *cmd_func = pkt->fm_function;
+ if (*cmd_func == CG2900_FM_CMD_PARAM_WRITECOMMAND)
+ *cmd_id = cg2900_get_fm_cmd_id(le16_to_cpu(pkt->fm_cmd.head));
+}
+
+
+/**
+ * fm_parse_event - Parses a FM event packet
+ * @data: FM event packet.
+ * @event: Out: FM event.
+ * @cmd_func: Out: FM legacy command function.
+ * @cmd_id: Out: FM legacy command ID.
+ * @intr_val: Out: FM interrupt value.
+ */
+static void fm_parse_event(u8 *data, u8 *event, u8 *cmd_func, u16 *cmd_id,
+ u16 *intr_val)
+{
+ /* Move past H4-header to start of actual package */
+ union fm_leg_evt_or_irq *pkt = (union fm_leg_evt_or_irq *)data;
+
+ *cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ *cmd_id = CG2900_FM_CMD_NONE;
+ *intr_val = 0;
+ *event = CG2900_FM_EVENT_UNKNOWN;
+
+ if (pkt->evt.opcode == CG2900_FM_GEN_ID_LEGACY &&
+ pkt->evt.read_write == CG2900_FM_CMD_LEG_PARAM_WRITE) {
+ /* Command complete */
+ *event = CG2900_FM_EVENT_CMD_COMPLETE;
+ *cmd_func = pkt->evt.fm_function;
+ if (*cmd_func == CG2900_FM_CMD_PARAM_WRITECOMMAND)
+ *cmd_id = cg2900_get_fm_cmd_id(
+ le16_to_cpu(pkt->evt.response_head));
+ } else if (pkt->irq_v2.opcode == CG2900_FM_GEN_ID_LEGACY &&
+ pkt->irq_v2.event_type == CG2900_FM_CMD_LEG_PARAM_IRQ) {
+ /* Interrupt, PG2 style */
+ *event = CG2900_FM_EVENT_INTERRUPT;
+ *intr_val = le16_to_cpu(pkt->irq_v2.irq);
+ } else if (pkt->irq_v1.opcode == CG2900_FM_GEN_ID_LEGACY) {
+ /* Interrupt, PG1 style */
+ *event = CG2900_FM_EVENT_INTERRUPT;
+ *intr_val = le16_to_cpu(pkt->irq_v1.irq);
+ } else
+ dev_err(MAIN_DEV, "fm_parse_event: Not an FM legacy command "
+ "0x%X %X %X %X\n", data[0], data[1], data[2], data[3]);
+}
+
+/**
+ * fm_update_mode - Updates the FM mode state machine.
+ * @data: FM command packet.
+ *
+ * Parses a FM command packet and updates the FM mode state machine.
+ */
+static void fm_update_mode(struct cg2900_chip_info *info, u8 *data)
+{
+ u8 cmd_func;
+ u16 cmd_id;
+
+ fm_parse_cmd(data, &cmd_func, &cmd_id);
+
+ if (cmd_func == CG2900_FM_CMD_PARAM_WRITECOMMAND &&
+ cmd_id == CG2900_FM_DO_GEN_GOTOMODE) {
+ /* Move past H4-header to start of actual package */
+ struct fm_leg_cmd *pkt =
+ (struct fm_leg_cmd *)(data + HCI_H4_SIZE);
+
+ info->fm_radio_mode = le16_to_cpu(pkt->fm_cmd.data[0]);
+ dev_dbg(info->dev, "FM Radio mode changed to %d\n",
+ info->fm_radio_mode);
+ }
+}
+
+
+/**
+ * transmit_skb_from_tx_queue_bt() - Check flow control info and transmit skb.
+ *
+ * The transmit_skb_from_tx_queue_bt() function checks if there are tickets
+ * available and commands waiting in the TX queue and if so transmits them
+ * to the controller.
+ * It shall always be called within spinlock_bh.
+ */
+static void transmit_skb_from_tx_queue_bt(struct cg2900_chip_dev *dev)
+{
+ struct cg2900_user_data *user;
+ struct cg2900_chip_info *info = dev->c_data;
+ struct sk_buff *skb;
+
+ dev_dbg(dev->dev, "transmit_skb_from_tx_queue_bt\n");
+
+ /* Dequeue an skb from the head of the list */
+ skb = skb_dequeue(&info->tx_queue_bt);
+ while (skb) {
+ if (info->tx_nr_pkts_allowed_bt <= 0) {
+ /*
+ * If no more packets allowed just return, we'll get
+ * back here after next Command Complete/Status event.
+ * Put skb back at head of queue.
+ */
+ skb_queue_head(&info->tx_queue_bt, skb);
+ return;
+ }
+
+ (info->tx_nr_pkts_allowed_bt)--;
+ dev_dbg(dev->dev, "tx_nr_pkts_allowed_bt = %d\n",
+ info->tx_nr_pkts_allowed_bt);
+
+ user = cg2900_skb_data(skb)->user; /* user is never NULL */
+
+ /*
+ * If it's a command from audio application, store the OpCode,
+ * it'll be used later to decide where to dispatch
+ * the Command Complete event.
+ */
+ if (info->bt_audio == user) {
+ struct hci_command_hdr *hdr = (struct hci_command_hdr *)
+ (skb->data + HCI_H4_SIZE);
+
+ info->audio_bt_cmd_op = le16_to_cpu(hdr->opcode);
+ dev_dbg(user->dev,
+ "Sending cmd from audio driver, saving "
+ "OpCode = 0x%04X\n", info->audio_bt_cmd_op);
+ }
+
+ cg2900_tx_to_chip(user, info->logger, skb);
+
+ /* Dequeue an skb from the head of the list */
+ skb = skb_dequeue(&info->tx_queue_bt);
+ }
+}
+
+/**
+ * transmit_skb_from_tx_queue_fm() - Check flow control info and transmit skb.
+ *
+ * The transmit_skb_from_tx_queue_fm() function checks if it possible to
+ * transmit and commands waiting in the TX queue and if so transmits them
+ * to the controller.
+ * It shall always be called within spinlock_bh.
+ */
+static void transmit_skb_from_tx_queue_fm(struct cg2900_chip_dev *dev)
+{
+ struct cg2900_user_data *user;
+ struct cg2900_chip_info *info = dev->c_data;
+ struct sk_buff *skb;
+
+ dev_dbg(dev->dev, "transmit_skb_from_tx_queue_fm\n");
+
+ /* Dequeue an skb from the head of the list */
+ skb = skb_dequeue(&info->tx_queue_fm);
+ while (skb) {
+ u16 cmd_id;
+ u8 cmd_func;
+
+ if (info->audio_fm_cmd_id != CG2900_FM_CMD_NONE ||
+ info->hci_fm_cmd_func != CG2900_FM_CMD_PARAM_NONE) {
+ /*
+ * There are currently outstanding FM commands.
+ * Wait for them to finish. We will get back here later.
+ * Queue back the skb at head of list.
+ */
+ skb_queue_head(&info->tx_queue_bt, skb);
+ return;
+ }
+
+ user = cg2900_skb_data(skb)->user; /* user is never NULL */
+
+ if (!user->opened) {
+ /*
+ * Channel is not open. That means that the user that
+ * originally sent it has deregistered.
+ * Just throw it away and check the next skb in the
+ * queue.
+ */
+ kfree_skb(skb);
+ /* Dequeue an skb from the head of the list */
+ skb = skb_dequeue(&info->tx_queue_fm);
+ continue;
+ }
+
+ fm_parse_cmd(&(skb->data[0]), &cmd_func, &cmd_id);
+
+ /*
+ * Store the FM command function , it'll be used later to decide
+ * where to dispatch the Command Complete event.
+ */
+ if (info->fm_audio == user) {
+ info->audio_fm_cmd_id = cmd_id;
+ dev_dbg(user->dev, "Sending FM audio cmd 0x%04X\n",
+ info->audio_fm_cmd_id);
+ } else {
+ /* FM radio command */
+ info->hci_fm_cmd_func = cmd_func;
+ fm_update_mode(info, &skb->data[0]);
+ dev_dbg(user->dev, "Sending FM radio cmd 0x%04X\n",
+ info->hci_fm_cmd_func);
+ }
+
+ /*
+ * We have only one ticket on FM. Just return after
+ * sending the skb.
+ */
+ cg2900_tx_to_chip(user, info->logger, skb);
+ return;
+ }
+}
+
+/**
+ * update_flow_ctrl_bt() - Update number of outstanding commands for BT CMD.
+ * @dev: Current chip device.
+ * @skb: skb with received packet.
+ *
+ * The update_flow_ctrl_bt() checks if incoming data packet is
+ * BT Command Complete/Command Status Event and if so updates number of tickets
+ * and number of outstanding commands. It also calls function to send queued
+ * commands (if the list of queued commands is not empty).
+ */
+static void update_flow_ctrl_bt(struct cg2900_chip_dev *dev,
+ const struct sk_buff * const skb)
+{
+ u8 *data = skb->data;
+ struct hci_event_hdr *event;
+ struct cg2900_chip_info *info = dev->c_data;
+
+ event = (struct hci_event_hdr *)data;
+ data += sizeof(*event);
+
+ if (HCI_EV_CMD_COMPLETE == event->evt) {
+ struct hci_ev_cmd_complete *complete;
+ complete = (struct hci_ev_cmd_complete *)data;
+
+ /*
+ * If it's HCI Command Complete Event then we might get some
+ * HCI tickets back. Also we can decrease the number outstanding
+ * HCI commands (if it's not NOP command or one of the commands
+ * that generate both Command Status Event and Command Complete
+ * Event).
+ * Check if we have any HCI commands waiting in the TX list and
+ * send them if there are tickets available.
+ */
+ spin_lock_bh(&info->tx_bt_lock);
+ info->tx_nr_pkts_allowed_bt = complete->ncmd;
+ dev_dbg(dev->dev, "New tx_nr_pkts_allowed_bt = %d\n",
+ info->tx_nr_pkts_allowed_bt);
+
+ if (!skb_queue_empty(&info->tx_queue_bt))
+ transmit_skb_from_tx_queue_bt(dev);
+ spin_unlock_bh(&info->tx_bt_lock);
+ } else if (HCI_EV_CMD_STATUS == event->evt) {
+ struct hci_ev_cmd_status *status;
+ status = (struct hci_ev_cmd_status *)data;
+
+ /*
+ * If it's HCI Command Status Event then we might get some
+ * HCI tickets back. Also we can decrease the number outstanding
+ * HCI commands (if it's not NOP command).
+ * Check if we have any HCI commands waiting in the TX queue and
+ * send them if there are tickets available.
+ */
+ spin_lock_bh(&info->tx_bt_lock);
+ info->tx_nr_pkts_allowed_bt = status->ncmd;
+ dev_dbg(dev->dev, "New tx_nr_pkts_allowed_bt = %d\n",
+ info->tx_nr_pkts_allowed_bt);
+
+ if (!skb_queue_empty(&info->tx_queue_bt))
+ transmit_skb_from_tx_queue_bt(dev);
+ spin_unlock_bh(&info->tx_bt_lock);
+ }
+}
+
+/**
+ * update_flow_ctrl_fm() - Update packets allowed for FM channel.
+ * @dev: Current chip device.
+ * @skb: skb with received packet.
+ *
+ * The update_flow_ctrl_fm() checks if incoming data packet is FM packet
+ * indicating that the previous command has been handled and if so update
+ * packets. It also calls function to send queued commands (if the list of
+ * queued commands is not empty).
+ */
+static void update_flow_ctrl_fm(struct cg2900_chip_dev *dev,
+ const struct sk_buff * const skb)
+{
+ u8 cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ u16 cmd_id = CG2900_FM_CMD_NONE;
+ u16 irpt_val = 0;
+ u8 event = CG2900_FM_EVENT_UNKNOWN;
+ struct cg2900_chip_info *info = dev->c_data;
+
+ fm_parse_event(&(skb->data[0]), &event, &cmd_func, &cmd_id, &irpt_val);
+
+ if (event == CG2900_FM_EVENT_CMD_COMPLETE) {
+ /* FM legacy command complete event */
+ spin_lock_bh(&info->tx_fm_lock);
+ /*
+ * Check if it's not an write command complete event, because
+ * then it cannot be a DO command.
+ * If it's a write command complete event check that is not a
+ * DO command complete event before setting the outstanding
+ * FM packets to none.
+ */
+ if (cmd_func != CG2900_FM_CMD_PARAM_WRITECOMMAND ||
+ !fm_irpt_expected(info, cmd_id)) {
+ info->hci_fm_cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ info->audio_fm_cmd_id = CG2900_FM_CMD_NONE;
+ dev_dbg(dev->dev,
+ "FM_Write: Outstanding FM commands:\n"
+ "\tRadio: 0x%04X\n"
+ "\tAudio: 0x%04X\n",
+ info->hci_fm_cmd_func,
+ info->audio_fm_cmd_id);
+ transmit_skb_from_tx_queue_fm(dev);
+
+ /*
+ * If there was a write do command complete event check if it is
+ * DO command previously sent by the FM audio user. If that's
+ * the case we need remember that in order to be able to
+ * dispatch the interrupt to the correct user.
+ */
+ } else if (cmd_id == info->audio_fm_cmd_id) {
+ info->tx_fm_audio_awaiting_irpt = true;
+ dev_dbg(dev->dev,
+ "FM Audio waiting for interrupt = true\n");
+ }
+ spin_unlock_bh(&info->tx_fm_lock);
+ } else if (event == CG2900_FM_EVENT_INTERRUPT) {
+ /* FM legacy interrupt */
+ if (fm_is_do_cmd_irpt(irpt_val)) {
+ /*
+ * If it is an interrupt related to a DO command update
+ * the outstanding flow control and transmit blocked
+ * FM commands.
+ */
+ spin_lock_bh(&info->tx_fm_lock);
+ info->hci_fm_cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ info->audio_fm_cmd_id = CG2900_FM_CMD_NONE;
+ dev_dbg(dev->dev,
+ "FM_INT: Outstanding FM commands:\n"
+ "\tRadio: 0x%04X\n"
+ "\tAudio: 0x%04X\n",
+ info->hci_fm_cmd_func,
+ info->audio_fm_cmd_id);
+ info->tx_fm_audio_awaiting_irpt = false;
+ dev_dbg(dev->dev,
+ "FM Audio waiting for interrupt = false\n");
+ transmit_skb_from_tx_queue_fm(dev);
+ spin_unlock_bh(&info->tx_fm_lock);
+ }
+ }
+}
+
+/**
+ * send_bt_enable() - Send HCI VS BT Enable command to the chip.
+ * @info: Chip info structure.
+ * @bt_enable: Value for BT Enable parameter (e.g. CG2900_BT_DISABLE).
+ */
+static void send_bt_enable(struct cg2900_chip_info *info, u8 bt_enable)
+{
+ struct bt_vs_bt_enable_cmd cmd;
+
+ cmd.op_code = cpu_to_le16(CG2900_BT_OP_VS_BT_ENABLE);
+ cmd.plen = BT_PARAM_LEN(sizeof(cmd));
+ cmd.enable = bt_enable;
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger,
+ &cmd, sizeof(cmd));
+}
+
+/**
+ * send_bd_address() - Send HCI VS command with BD address to the chip.
+ */
+static void send_bd_address(struct cg2900_chip_info *info)
+{
+ struct bt_vs_store_in_fs_cmd *cmd;
+ u8 plen = sizeof(*cmd) + BT_BDADDR_SIZE;
+
+ cmd = kmalloc(plen, GFP_KERNEL);
+ if (!cmd) {
+ dev_err(info->dev, "send_bd_address could not allocate cmd\n");
+ return;
+ }
+
+ cmd->opcode = cpu_to_le16(CG2900_BT_OP_VS_STORE_IN_FS);
+ cmd->plen = BT_PARAM_LEN(plen);
+ cmd->user_id = CG2900_VS_STORE_IN_FS_USR_ID_BD_ADDR;
+ cmd->len = BT_BDADDR_SIZE;
+ /* Now copy the BD address received from user space control app. */
+ memcpy(cmd->data, bd_address, BT_BDADDR_SIZE);
+
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_SEND_BD_ADDRESS\n");
+ info->boot_state = BOOT_SEND_BD_ADDRESS;
+
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger, cmd, plen);
+
+ kfree(cmd);
+}
+
+/**
+ * send_settings_file() - Transmit settings file.
+ *
+ * The send_settings_file() function transmit settings file.
+ * The file is read in parts to fit in HCI packets. When finished,
+ * close the settings file and send HCI reset to activate settings and patches.
+ */
+static void send_settings_file(struct cg2900_chip_info *info)
+{
+ int bytes_sent;
+
+ bytes_sent = cg2900_read_and_send_file_part(info->user_in_charge,
+ info->logger,
+ &info->file_info);
+ if (bytes_sent > 0) {
+ /* Data sent. Wait for CmdComplete */
+ return;
+ } else if (bytes_sent < 0) {
+ dev_err(BOOT_DEV, "send_settings_file: Error %d occurred\n",
+ bytes_sent);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, bytes_sent);
+ return;
+ }
+
+ /* No data was sent. This file is finished */
+ info->download_state = DOWNLOAD_SUCCESS;
+
+ /* Settings file finished. Release used resources */
+ dev_dbg(BOOT_DEV, "Settings file finished, release used resources\n");
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+
+ dev_dbg(BOOT_DEV, "New file_load_state: FILE_LOAD_NO_MORE_FILES\n");
+ info->file_load_state = FILE_LOAD_NO_MORE_FILES;
+
+ /* Create and send HCI VS Store In FS command with bd address. */
+ send_bd_address(info);
+}
+
+/**
+ * send_patch_file - Transmit patch file.
+ *
+ * The send_patch_file() function transmit patch file.
+ * The file is read in parts to fit in HCI packets. When the complete file is
+ * transmitted, the file is closed.
+ * When finished, continue with settings file.
+ */
+static void send_patch_file(struct cg2900_chip_dev *dev)
+{
+ int err;
+ int bytes_sent;
+ struct cg2900_chip_info *info = dev->c_data;
+ int file_name_size = strlen("CG2900_XXXX_XXXX_settings.fw");
+
+ bytes_sent = cg2900_read_and_send_file_part(info->user_in_charge,
+ info->logger,
+ &info->file_info);
+ if (bytes_sent > 0) {
+ /* Data sent. Wait for CmdComplete */
+ return;
+ } else if (bytes_sent < 0) {
+ dev_err(BOOT_DEV, "send_patch_file: Error %d occurred\n",
+ bytes_sent);
+ err = bytes_sent;
+ goto error_handling;
+ }
+
+ /* No data was sent. This file is finished */
+ info->download_state = DOWNLOAD_SUCCESS;
+
+ dev_dbg(BOOT_DEV, "Patch file finished, release used resources\n");
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+
+ /*
+ * Create the settings file name from HCI revision and sub_version.
+ * file_name_size does not include terminating NULL character
+ * so add 1.
+ */
+ err = snprintf(info->settings_file_name, file_name_size + 1,
+ "CG2900_%04X_%04X_settings.fw", dev->chip.hci_revision,
+ dev->chip.hci_sub_version);
+ if (err == file_name_size) {
+ dev_dbg(BOOT_DEV, "Downloading settings file %s\n",
+ info->settings_file_name);
+ } else {
+ dev_err(BOOT_DEV, "Settings file name failed! err=%d\n", err);
+ goto error_handling;
+ }
+
+ /* Retrieve the settings file */
+ err = request_firmware(&info->file_info.fw_file,
+ info->settings_file_name,
+ info->dev);
+ if (err) {
+ dev_err(BOOT_DEV, "Couldn't get settings file (%d)\n", err);
+ goto error_handling;
+ }
+ /* Now send the settings file */
+ dev_dbg(BOOT_DEV,
+ "New file_load_state: FILE_LOAD_GET_STATIC_SETTINGS\n");
+ info->file_load_state = FILE_LOAD_GET_STATIC_SETTINGS;
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_PENDING\n");
+ info->download_state = DOWNLOAD_PENDING;
+ send_settings_file(info);
+ return;
+
+error_handling:
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, err);
+}
+
+/**
+ * work_power_off_chip() - Work item to power off the chip.
+ * @work: Reference to work data.
+ *
+ * The work_power_off_chip() function handles transmission of the HCI command
+ * vs_power_switch_off and then informs the CG2900 Core that this chip driver is
+ * finished and the Core driver can now shut off the chip.
+ */
+static void work_power_off_chip(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ u8 *h4_header;
+ struct cg2900_platform_data *pf_data;
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ if (!work) {
+ dev_err(MAIN_DEV, "work_power_off_chip: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ /*
+ * Get the VS Power Switch Off command to use based on connected
+ * connectivity controller
+ */
+ pf_data = dev_get_platdata(dev->dev);
+ if (pf_data->get_power_switch_off_cmd)
+ skb = pf_data->get_power_switch_off_cmd(dev, NULL);
+
+ /*
+ * Transmit the received command.
+ * If no command found for the device, just continue
+ */
+ if (!skb) {
+ dev_err(dev->dev,
+ "Could not retrieve PowerSwitchOff command\n");
+ goto shut_down_chip;
+ }
+
+ dev_dbg(dev->dev,
+ "Got power_switch_off command. Add H4 header and transmit\n");
+
+ /*
+ * Move the data pointer to the H:4 header position and store
+ * the H4 header
+ */
+ h4_header = skb_push(skb, CG2900_SKB_RESERVE);
+ *h4_header = CHANNEL_BT_CMD;
+
+ dev_dbg(dev->dev, "New closing_state: CLOSING_POWER_SWITCH_OFF\n");
+ info->closing_state = CLOSING_POWER_SWITCH_OFF;
+
+ if (info->user_in_charge)
+ cg2900_tx_to_chip(info->user_in_charge, info->logger, skb);
+ else
+ cg2900_tx_no_user(dev, skb);
+
+ /*
+ * Mandatory to wait 500ms after the power_switch_off command has been
+ * transmitted, in order to make sure that the controller is ready.
+ */
+ schedule_timeout_killable(msecs_to_jiffies(POWER_SW_OFF_WAIT));
+
+shut_down_chip:
+ dev_dbg(dev->dev, "New closing_state: CLOSING_SHUT_DOWN\n");
+ info->closing_state = CLOSING_SHUT_DOWN;
+
+ /* Close the transport, which will power off the chip */
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ /* Chip shut-down finished, set correct state and wake up the chip. */
+ dev_dbg(dev->dev, "New main_state: CG2900_IDLE\n");
+ info->main_state = CG2900_IDLE;
+ wake_up_all(&main_wait_queue);
+
+ /* If this is called during system startup, register the devices. */
+ if (info->startup) {
+ int err;
+
+ err = mfd_add_devices(dev->dev, main_info->cell_base_id,
+ cg2900_devs, info->mfd_size, NULL, 0);
+ if (err) {
+ dev_err(dev->dev, "Failed to add cg2900_devs (%d)\n",
+ err);
+ goto finished;
+ }
+
+ err = mfd_add_devices(dev->dev, main_info->cell_base_id,
+ cg2900_char_devs, info->mfd_char_size,
+ NULL, 0);
+ if (err) {
+ dev_err(dev->dev, "Failed to add cg2900_char_devs (%d)"
+ "\n", err);
+ mfd_remove_devices(dev->dev);
+ goto finished;
+ }
+
+ /*
+ * Increase base ID so next connected transport will not get the
+ * same device IDs.
+ */
+ main_info->cell_base_id += MAX(info->mfd_size,
+ info->mfd_char_size);
+ info->startup = false;
+ }
+
+finished:
+ kfree(my_work);
+}
+
+/**
+ * work_chip_shutdown() - Shut down the chip.
+ * @work: Reference to work data.
+ */
+static void work_chip_shutdown(struct work_struct *work)
+{
+ struct cg2900_work *my_work;
+ struct cg2900_user_data *user;
+
+ if (!work) {
+ dev_err(MAIN_DEV, "work_chip_shutdown: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ user = my_work->user_data;
+
+ chip_shutdown(user);
+
+ kfree(my_work);
+}
+
+/**
+ * work_reset_after_error() - Handle reset.
+ * @work: Reference to work data.
+ *
+ * Handle a reset after received Command Complete event.
+ */
+static void work_reset_after_error(struct work_struct *work)
+{
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ if (!work) {
+ dev_err(MAIN_DEV, "work_reset_after_error: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ chip_startup_finished(info, -EIO);
+
+ kfree(my_work);
+}
+
+/**
+ * work_load_patch_and_settings() - Start loading patches and settings.
+ * @work: Reference to work data.
+ */
+static void work_load_patch_and_settings(struct work_struct *work)
+{
+ int err = 0;
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+ int file_name_size = strlen("CG2900_XXXX_XXXX_patch.fw");
+
+ if (!work) {
+ dev_err(MAIN_DEV,
+ "work_load_patch_and_settings: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ /* Check that we are in the right state */
+ if (info->boot_state != BOOT_GET_FILES_TO_LOAD)
+ goto finished;
+
+ /*
+ * Create the patch file name from HCI revision and sub_version.
+ * file_name_size does not include terminating NULL character
+ * so add 1.
+ */
+ err = snprintf(info->patch_file_name, file_name_size + 1,
+ "CG2900_%04X_%04X_patch.fw", dev->chip.hci_revision,
+ dev->chip.hci_sub_version);
+ if (err == file_name_size) {
+ dev_dbg(BOOT_DEV, "Downloading patch file %s\n",
+ info->patch_file_name);
+ } else {
+ dev_err(BOOT_DEV, "Patch file name failed! err=%d\n", err);
+ goto error_handling;
+ }
+
+ /* We now all info needed */
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_DOWNLOAD_PATCH\n");
+ info->boot_state = BOOT_DOWNLOAD_PATCH;
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_PENDING\n");
+ info->download_state = DOWNLOAD_PENDING;
+ dev_dbg(BOOT_DEV, "New file_load_state: FILE_LOAD_GET_PATCH\n");
+ info->file_load_state = FILE_LOAD_GET_PATCH;
+ info->file_info.chunk_id = 0;
+ info->file_info.file_offset = 0;
+ info->file_info.fw_file = NULL;
+
+ /* OK. Now it is time to download the patches */
+ err = request_firmware(&(info->file_info.fw_file),
+ info->patch_file_name,
+ dev->dev);
+ if (err < 0) {
+ dev_err(BOOT_DEV, "Couldn't get patch file (%d)\n", err);
+ goto error_handling;
+ }
+ send_patch_file(dev);
+
+ goto finished;
+
+error_handling:
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, -EIO);
+finished:
+ kfree(my_work);
+}
+
+/**
+ * work_cont_file_download() - A file block has been written.
+ * @work: Reference to work data.
+ *
+ * Handle a received HCI VS Write File Block Complete event.
+ * Normally this means continue to send files to the controller.
+ */
+static void work_cont_file_download(struct work_struct *work)
+{
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ if (!work) {
+ dev_err(MAIN_DEV, "work_cont_file_download: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ /* Continue to send patches or settings to the controller */
+ if (info->file_load_state == FILE_LOAD_GET_PATCH)
+ send_patch_file(dev);
+ else if (info->file_load_state == FILE_LOAD_GET_STATIC_SETTINGS)
+ send_settings_file(info);
+ else
+ dev_dbg(BOOT_DEV, "No more files to load\n");
+
+ kfree(my_work);
+}
+
+/**
+ * work_send_read_selftest_cmd() - HCI VS Read_SelfTests_Result command shall be sent.
+ * @work: Reference to work data.
+ */
+static void work_send_read_selftest_cmd(struct work_struct *work)
+{
+ struct delayed_work *del_work;
+ struct cg2900_delayed_work_struct *current_work;
+ struct cg2900_chip_info *info;
+ struct hci_command_hdr cmd;
+
+ if (!work) {
+ dev_err(MAIN_DEV,
+ "work_send_read_selftest_cmd: work == NULL\n");
+ return;
+ }
+
+ del_work = to_delayed_work(work);
+ current_work = container_of(del_work,
+ struct cg2900_delayed_work_struct, work);
+ info = current_work->data;
+
+ if (info->boot_state != BOOT_READ_SELFTEST_RESULT)
+ return;
+
+ cmd.opcode = cpu_to_le16(CG2900_BT_OP_VS_READ_SELTESTS_RESULT);
+ cmd.plen = 0; /* No parameters for Read Selftests Result */
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger, &cmd,
+ sizeof(cmd));
+}
+
+/**
+ * handle_reset_cmd_complete() - Handles HCI Reset Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_reset_cmd_complete(struct cg2900_chip_dev *dev, u8 *data)
+{
+ u8 status = data[0];
+ struct cg2900_chip_info *info = dev->c_data;
+
+ dev_dbg(BOOT_DEV, "Received Reset complete event with status 0x%X\n",
+ status);
+
+ if (CG2900_CLOSING != info->main_state &&
+ CLOSING_RESET != info->closing_state)
+ return false;
+
+ if (HCI_BT_ERROR_NO_ERROR != status) {
+ /*
+ * Continue in case of error, the chip is going to be shut down
+ * anyway.
+ */
+ dev_err(BOOT_DEV, "Command complete for HciReset received with "
+ "error 0x%X\n", status);
+ }
+
+ cg2900_create_work_item(info->wq, work_power_off_chip, dev);
+
+ return true;
+}
+
+/**
+ * handle_vs_store_in_fs_cmd_complete() - Handles HCI VS StoreInFS Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_store_in_fs_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct cg2900_chip_info *info = dev->c_data;
+
+ dev_dbg(BOOT_DEV,
+ "Received Store_in_FS complete event with status 0x%X\n",
+ status);
+
+ if (info->boot_state != BOOT_SEND_BD_ADDRESS)
+ return false;
+
+ if (HCI_BT_ERROR_NO_ERROR == status) {
+ struct hci_command_hdr cmd;
+
+ /* Send HCI SystemReset command to activate patches */
+ dev_dbg(BOOT_DEV,
+ "New boot_state: BOOT_ACTIVATE_PATCHES_AND_SETTINGS\n");
+ info->boot_state = BOOT_ACTIVATE_PATCHES_AND_SETTINGS;
+
+ cmd.opcode = cpu_to_le16(CG2900_BT_OP_VS_SYSTEM_RESET);
+ cmd.plen = 0; /* No parameters for System Reset */
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger, &cmd,
+ sizeof(cmd));
+ } else {
+ dev_err(BOOT_DEV,
+ "Command complete for StoreInFS received with error "
+ "0x%X\n", status);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_write_file_block_cmd_complete() - Handles HCI VS WriteFileBlock Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_write_file_block_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_DOWNLOAD_PATCH ||
+ info->download_state != DOWNLOAD_PENDING)
+ return false;
+
+ if (HCI_BT_ERROR_NO_ERROR == status)
+ cg2900_create_work_item(info->wq, work_cont_file_download, dev);
+ else {
+ dev_err(BOOT_DEV,
+ "Command complete for WriteFileBlock received with"
+ " error 0x%X\n", status);
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_FAILED\n");
+ info->download_state = DOWNLOAD_FAILED;
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ if (info->file_info.fw_file) {
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+ }
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_write_file_block_cmd_status() - Handles HCI VS WriteFileBlock Command Status event.
+ * @status: Returned status of WriteFileBlock command.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_write_file_block_cmd_status(struct cg2900_chip_dev *dev,
+ u8 status)
+{
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_DOWNLOAD_PATCH ||
+ info->download_state != DOWNLOAD_PENDING)
+ return false;
+
+ /*
+ * Only do something if there is an error. Otherwise we will wait for
+ * CmdComplete.
+ */
+ if (HCI_BT_ERROR_NO_ERROR != status) {
+ dev_err(BOOT_DEV,
+ "Command status for WriteFileBlock received with"
+ " error 0x%X\n", status);
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_FAILED\n");
+ info->download_state = DOWNLOAD_FAILED;
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ if (info->file_info.fw_file) {
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+ }
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_power_switch_off_cmd_complete() - Handles HCI VS PowerSwitchOff Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_power_switch_off_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (CLOSING_POWER_SWITCH_OFF != info->closing_state)
+ return false;
+
+ dev_dbg(BOOT_DEV,
+ "handle_vs_power_switch_off_cmd_complete status %d\n", status);
+
+ /*
+ * We were waiting for this but we don't need to do anything upon
+ * reception except warn for error status
+ */
+ if (HCI_BT_ERROR_NO_ERROR != status)
+ dev_err(BOOT_DEV,
+ "Command Complete for PowerSwitchOff received with "
+ "error 0x%X", status);
+
+ return true;
+}
+
+/**
+ * handle_vs_system_reset_cmd_complete() - Handle HCI VS SystemReset Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_system_reset_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_ACTIVATE_PATCHES_AND_SETTINGS)
+ return false;
+
+ dev_dbg(BOOT_DEV, "handle_vs_system_reset_cmd_complete status %d\n",
+ status);
+
+ if (HCI_BT_ERROR_NO_ERROR == status) {
+ if (dev->chip.hci_revision == CG2900_PG2_REV) {
+ /*
+ * We must now wait for the selftest results. They will
+ * take a certain amount of time to finish so start a
+ * delayed work that will then send the command.
+ */
+ dev_dbg(BOOT_DEV,
+ "New boot_state: BOOT_READ_SELFTEST_RESULT\n");
+ info->boot_state = BOOT_READ_SELFTEST_RESULT;
+ queue_delayed_work(info->wq, &info->selftest_work.work,
+ msecs_to_jiffies(SELFTEST_INITIAL));
+ info->nbr_of_polls = 0;
+ } else {
+ /*
+ * We are now almost finished. Shut off BT Core. It will
+ * be re-enabled by the Bluetooth driver when needed.
+ */
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_DISABLE_BT\n");
+ info->boot_state = BOOT_DISABLE_BT;
+ send_bt_enable(info, CG2900_BT_DISABLE);
+ }
+ } else {
+ dev_err(BOOT_DEV,
+ "Received Reset complete event with status 0x%X\n",
+ status);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, -EIO);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_read_selftests_cmd_complete() - Handle HCI VS ReadSelfTestsResult Command Complete event.
+ * @dev: Current chip.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_read_selftests_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ struct bt_vs_read_selftests_result_evt *evt =
+ (struct bt_vs_read_selftests_result_evt *)data;
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_READ_SELFTEST_RESULT)
+ return false;
+
+ dev_dbg(BOOT_DEV,
+ "handle_vs_read_selftests_cmd_complete status %d result %d\n",
+ evt->status, evt->result);
+
+ if (HCI_BT_ERROR_NO_ERROR != evt->status)
+ goto err_handling;
+
+ if (CG2900_BT_SELFTEST_SUCCESSFUL == evt->result ||
+ CG2900_BT_SELFTEST_FAILED == evt->result) {
+ if (CG2900_BT_SELFTEST_FAILED == evt->result)
+ dev_err(BOOT_DEV, "CG2900 self test failed\n");
+
+ /*
+ * We are now almost finished. Shut off BT Core. It will
+ * be re-enabled by the Bluetooth driver when needed.
+ */
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_DISABLE_BT\n");
+ info->boot_state = BOOT_DISABLE_BT;
+ send_bt_enable(info, CG2900_BT_DISABLE);
+ return true;
+ } else if (CG2900_BT_SELFTEST_NOT_COMPLETED == evt->result) {
+ /*
+ * Self tests are not yet finished. Wait some more time
+ * before resending the command
+ */
+ if (info->nbr_of_polls > MAX_NBR_OF_POLLS) {
+ dev_err(BOOT_DEV, "Selftest results reached max"
+ " number of polls\n");
+ goto err_handling;
+ }
+ queue_delayed_work(info->wq, &info->selftest_work.work,
+ msecs_to_jiffies(SELFTEST_POLLING));
+ info->nbr_of_polls++;
+ return true;
+ }
+
+err_handling:
+ dev_err(BOOT_DEV,
+ "Received Read SelfTests Result complete event with "
+ "status 0x%X and result 0x%X\n",
+ evt->status, evt->result);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, -EIO);
+ return true;
+}
+
+/**
+ * handle_vs_bt_enable_cmd_status() - Handles HCI VS BtEnable Command Status event.
+ * @status: Returned status of BtEnable command.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_bt_enable_cmd_status(struct cg2900_chip_dev *dev,
+ u8 status)
+{
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_DISABLE_BT)
+ return false;
+
+ dev_dbg(BOOT_DEV, "handle_vs_bt_enable_cmd_status status %d\n", status);
+
+ /*
+ * Only do something if there is an error. Otherwise we will wait for
+ * CmdComplete.
+ */
+ if (HCI_BT_ERROR_NO_ERROR != status) {
+ dev_err(BOOT_DEV,
+ "Received BtEnable status event with status 0x%X\n",
+ status);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, -EIO);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_bt_enable_cmd_complete() - Handle HCI VS BtEnable Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_bt_enable_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct cg2900_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_DISABLE_BT)
+ return false;
+
+ dev_dbg(BOOT_DEV, "handle_vs_bt_enable_cmd_complete status %d\n",
+ status);
+
+ if (HCI_BT_ERROR_NO_ERROR == status) {
+ /*
+ * The boot sequence is now finished successfully.
+ * Set states and signal to waiting thread.
+ */
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_READY\n");
+ info->boot_state = BOOT_READY;
+ chip_startup_finished(info, 0);
+ } else {
+ dev_err(BOOT_DEV,
+ "Received BtEnable complete event with status 0x%X\n",
+ status);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, -EIO);
+ }
+
+ return true;
+}
+
+/**
+ * handle_rx_data_bt_evt() - Check if received data should be handled in CG2900 chip driver.
+ * @skb: Data packet
+ *
+ * The handle_rx_data_bt_evt() function checks if received data should be
+ * handled in CG2900 chip driver. If so handle it correctly.
+ * Received data is always HCI BT Event.
+ *
+ * Returns:
+ * True, if packet was handled internally,
+ * False, otherwise.
+ */
+static bool handle_rx_data_bt_evt(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb)
+{
+ bool pkt_handled = false;
+ /* skb cannot be NULL here so it is safe to de-reference */
+ u8 *data = skb->data;
+ struct hci_event_hdr *evt;
+ u16 op_code;
+
+ evt = (struct hci_event_hdr *)data;
+ data += sizeof(*evt);
+
+ /* First check the event code. */
+ if (HCI_EV_CMD_COMPLETE == evt->evt) {
+ struct hci_ev_cmd_complete *cmd_complete;
+
+ cmd_complete = (struct hci_ev_cmd_complete *)data;
+ op_code = le16_to_cpu(cmd_complete->opcode);
+ dev_dbg(dev->dev,
+ "Received Command Complete: op_code = 0x%04X\n",
+ op_code);
+ /* Move to first byte after OCF */
+ data += sizeof(*cmd_complete);
+
+ if (op_code == HCI_OP_RESET)
+ pkt_handled = handle_reset_cmd_complete(dev, data);
+ else if (op_code == CG2900_BT_OP_VS_STORE_IN_FS)
+ pkt_handled = handle_vs_store_in_fs_cmd_complete(dev,
+ data);
+ else if (op_code == CG2900_BT_OP_VS_WRITE_FILE_BLOCK)
+ pkt_handled =
+ handle_vs_write_file_block_cmd_complete(dev,
+ data);
+ else if (op_code == CG2900_BT_OP_VS_POWER_SWITCH_OFF)
+ pkt_handled =
+ handle_vs_power_switch_off_cmd_complete(dev,
+ data);
+ else if (op_code == CG2900_BT_OP_VS_SYSTEM_RESET)
+ pkt_handled = handle_vs_system_reset_cmd_complete(dev,
+ data);
+ else if (op_code == CG2900_BT_OP_VS_BT_ENABLE)
+ pkt_handled = handle_vs_bt_enable_cmd_complete(dev,
+ data);
+ else if (op_code == CG2900_BT_OP_VS_READ_SELTESTS_RESULT)
+ pkt_handled = handle_vs_read_selftests_cmd_complete(dev,
+ data);
+ } else if (HCI_EV_CMD_STATUS == evt->evt) {
+ struct hci_ev_cmd_status *cmd_status;
+
+ cmd_status = (struct hci_ev_cmd_status *)data;
+
+ op_code = le16_to_cpu(cmd_status->opcode);
+
+ dev_dbg(dev->dev, "Received Command Status: op_code = 0x%04X\n",
+ op_code);
+
+ if (op_code == CG2900_BT_OP_VS_WRITE_FILE_BLOCK)
+ pkt_handled = handle_vs_write_file_block_cmd_status
+ (dev, cmd_status->status);
+ else if (op_code == CG2900_BT_OP_VS_BT_ENABLE)
+ pkt_handled = handle_vs_bt_enable_cmd_status
+ (dev, cmd_status->status);
+ } else if (HCI_EV_HW_ERROR == evt->evt) {
+ struct hci_ev_hw_error *hw_error;
+
+ hw_error = (struct hci_ev_hw_error *)data;
+ /*
+ * Only do a printout. There might be a receiving stack that can
+ * handle this event
+ */
+ dev_err(dev->dev, "HW Error event received with error 0x%02X\n",
+ hw_error->hw_code);
+ return false;
+ } else
+ return false;
+
+ if (pkt_handled)
+ kfree_skb(skb);
+
+ return pkt_handled;
+}
+
+/**
+ * transmit_skb_with_flow_ctrl_bt() - Send the BT skb to the controller if it is allowed or queue it.
+ * @user: Current user.
+ * @skb: Data packet.
+ *
+ * The transmit_skb_with_flow_ctrl_bt() function checks if there are
+ * tickets available and if so transmits buffer to controller. Otherwise the skb
+ * and user name is stored in a list for later sending.
+ * If enabled, copy the transmitted data to the HCI logger as well.
+ */
+static void transmit_skb_with_flow_ctrl_bt(struct cg2900_user_data *user,
+ struct sk_buff *skb)
+{
+ struct cg2900_chip_dev *dev = cg2900_get_prv(user);
+ struct cg2900_chip_info *info = dev->c_data;
+
+ /*
+ * Because there are more users of some H4 channels (currently audio
+ * application for BT command and FM channel) we need to have an
+ * internal HCI command flow control in CG2900 driver.
+ * So check here how many tickets we have and store skb in a queue if
+ * there are no tickets left. The skb will be sent later when we get
+ * more ticket(s).
+ */
+ spin_lock_bh(&info->tx_bt_lock);
+
+ if (info->tx_nr_pkts_allowed_bt > 0) {
+ info->tx_nr_pkts_allowed_bt--;
+ dev_dbg(user->dev, "New tx_nr_pkts_allowed_bt = %d\n",
+ info->tx_nr_pkts_allowed_bt);
+
+ /*
+ * If it's command from audio app store the OpCode,
+ * it'll be used later to decide where to dispatch Command
+ * Complete event.
+ */
+ if (info->bt_audio == user) {
+ struct hci_command_hdr *hdr = (struct hci_command_hdr *)
+ (skb->data + HCI_H4_SIZE);
+
+ info->audio_bt_cmd_op = le16_to_cpu(hdr->opcode);
+ dev_dbg(user->dev,
+ "Sending cmd from audio driver, saving "
+ "OpCode = 0x%X\n",
+ info->audio_bt_cmd_op);
+ }
+
+ cg2900_tx_to_chip(user, info->logger, skb);
+ } else {
+ dev_dbg(user->dev, "Not allowed to send cmd to controller, "
+ "storing in TX queue\n");
+
+ cg2900_skb_data(skb)->user = user;
+ skb_queue_tail(&info->tx_queue_bt, skb);
+ }
+ spin_unlock_bh(&info->tx_bt_lock);
+}
+
+/**
+ * transmit_skb_with_flow_ctrl_fm() - Send the FM skb to the controller if it is allowed or queue it.
+ * @user: Current user.
+ * @skb: Data packet.
+ *
+ * The transmit_skb_with_flow_ctrl_fm() function checks if chip is available and
+ * if so transmits buffer to controller. Otherwise the skb and user name is
+ * stored in a list for later sending.
+ * Also it updates the FM radio mode if it's FM GOTOMODE command, this is needed
+ * to know how to handle some FM DO commands complete events.
+ * If enabled, copy the transmitted data to the HCI logger as well.
+ */
+static void transmit_skb_with_flow_ctrl_fm(struct cg2900_user_data *user,
+ struct sk_buff *skb)
+{
+ u8 cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ u16 cmd_id = CG2900_FM_CMD_NONE;
+ struct cg2900_chip_dev *dev = cg2900_get_prv(user);
+ struct cg2900_chip_info *info = dev->c_data;
+
+ fm_parse_cmd(&(skb->data[0]), &cmd_func, &cmd_id);
+
+ /*
+ * If this is an FM IP disable or reset send command and also reset
+ * the flow control and audio user.
+ */
+ if (cmd_func == CG2900_FM_CMD_PARAM_DISABLE ||
+ cmd_func == CG2900_FM_CMD_PARAM_RESET) {
+ spin_lock_bh(&info->tx_fm_lock);
+ fm_reset_flow_ctrl(info);
+ spin_unlock_bh(&info->tx_fm_lock);
+ cg2900_tx_to_chip(user, info->logger, skb);
+ return;
+ }
+
+ /*
+ * If this is a FM user and no FM audio user command pending just send
+ * FM command. It is up to the user of the FM channel to handle its own
+ * flow control.
+ */
+ spin_lock_bh(&info->tx_fm_lock);
+ if (info->fm_audio != user &&
+ info->audio_fm_cmd_id == CG2900_FM_CMD_NONE) {
+ info->hci_fm_cmd_func = cmd_func;
+ dev_dbg(user->dev, "Sending FM radio command 0x%04X\n",
+ info->hci_fm_cmd_func);
+ /* If a GotoMode command update FM mode */
+ fm_update_mode(info, &skb->data[0]);
+ cg2900_tx_to_chip(user, info->logger, skb);
+ } else if (info->fm_audio == user &&
+ info->hci_fm_cmd_func == CG2900_FM_CMD_PARAM_NONE &&
+ info->audio_fm_cmd_id == CG2900_FM_CMD_NONE) {
+ /*
+ * If it's command from fm audio user store the command id.
+ * It'll be used later to decide where to dispatch
+ * command complete event.
+ */
+ info->audio_fm_cmd_id = cmd_id;
+ dev_dbg(user->dev, "Sending FM audio command 0x%04X\n",
+ info->audio_fm_cmd_id);
+ cg2900_tx_to_chip(user, info->logger, skb);
+ } else {
+ dev_dbg(user->dev,
+ "Not allowed to send FM cmd to controller, storing in "
+ "TX queue\n");
+
+ cg2900_skb_data(skb)->user = user;
+ skb_queue_tail(&info->tx_queue_fm, skb);
+ }
+ spin_unlock_bh(&info->tx_fm_lock);
+}
+
+/**
+ * is_bt_audio_user() - Checks if this packet is for the BT audio user.
+ * @info: CG2900 info.
+ * @h4_channel: H:4 channel for this packet.
+ * @skb: Packet to check.
+ *
+ * Returns:
+ * true if packet is for BT audio user.
+ * false otherwise.
+ */
+static bool is_bt_audio_user(struct cg2900_chip_info *info, int h4_channel,
+ const struct sk_buff * const skb)
+{
+ struct hci_event_hdr *hdr;
+ u8 *payload;
+ u16 opcode;
+
+ if (h4_channel != CHANNEL_BT_EVT)
+ return false;
+
+ hdr = (struct hci_event_hdr *)skb->data;
+ payload = (u8 *)(hdr + 1); /* follows header */
+
+ if (HCI_EV_CMD_COMPLETE == hdr->evt)
+ opcode = le16_to_cpu(
+ ((struct hci_ev_cmd_complete *)payload)->opcode);
+ else if (HCI_EV_CMD_STATUS == hdr->evt)
+ opcode = le16_to_cpu(
+ ((struct hci_ev_cmd_status *)payload)->opcode);
+ else
+ return false;
+
+ if (opcode != info->audio_bt_cmd_op)
+ return false;
+
+ dev_dbg(info->bt_audio->dev, "Audio BT OpCode match = 0x%04X\n",
+ opcode);
+ info->audio_bt_cmd_op = CG2900_BT_OPCODE_NONE;
+ return true;
+}
+
+/**
+ * is_fm_audio_user() - Checks if this packet is for the FM audio user.
+ * @info: CG2900 info.
+ * @h4_channel: H:4 channel for this packet.
+ * @skb: Packet to check.
+ *
+ * Returns:
+ * true if packet is for BT audio user.
+ * false otherwise.
+ */
+static bool is_fm_audio_user(struct cg2900_chip_info *info, int h4_channel,
+ const struct sk_buff * const skb)
+{
+ u8 cmd_func;
+ u16 cmd_id;
+ u16 irpt_val;
+ u8 event;
+
+ if (h4_channel != CHANNEL_FM_RADIO)
+ return false;
+
+ cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ cmd_id = CG2900_FM_CMD_NONE;
+ irpt_val = 0;
+ event = CG2900_FM_EVENT_UNKNOWN;
+
+ fm_parse_event(&skb->data[0], &event, &cmd_func, &cmd_id,
+ &irpt_val);
+ /* Check if command complete event FM legacy interface. */
+ if ((event == CG2900_FM_EVENT_CMD_COMPLETE) &&
+ (cmd_func == CG2900_FM_CMD_PARAM_WRITECOMMAND) &&
+ (cmd_id == info->audio_fm_cmd_id)) {
+ dev_dbg(info->fm_audio->dev,
+ "FM Audio Function Code match = 0x%04X\n",
+ cmd_id);
+ return true;
+ }
+
+ /* Check if Interrupt legacy interface. */
+ if ((event == CG2900_FM_EVENT_INTERRUPT) &&
+ (fm_is_do_cmd_irpt(irpt_val)) &&
+ (info->tx_fm_audio_awaiting_irpt))
+ return true;
+
+ return false;
+}
+
+/**
+ * data_from_chip() - Called when data is received from the chip.
+ * @dev: Chip info.
+ * @cg2900_dev: CG2900 user for this packet.
+ * @skb: Packet received.
+ *
+ * The data_from_chip() function updates flow control and checks
+ * if packet is a response for a packet it itself has transmitted. If not it
+ * finds the correct user and sends the packet* to the user.
+ */
+static void data_from_chip(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb)
+{
+ int h4_channel;
+ struct list_head *cursor;
+ struct cg2900_channel_item *tmp;
+ struct cg2900_chip_info *info = dev->c_data;
+ struct cg2900_user_data *user = NULL;
+
+ spin_lock_bh(&info->rw_lock);
+ /* Copy RX Data into logger.*/
+ if (info->logger)
+ cg2900_send_to_hci_logger(info->logger, skb,
+ LOGGER_DIRECTION_RX);
+
+ /*
+ * HCI Raw user can only have exclusive access to chip, there won't be
+ * other users once it's opened.
+ */
+ if (info->hci_raw && info->hci_raw->opened) {
+ info->hci_raw->read_cb(info->hci_raw, skb);
+ spin_unlock_bh(&info->rw_lock);
+ return;
+ }
+
+ h4_channel = skb->data[0];
+ skb_pull(skb, HCI_H4_SIZE);
+
+ /* First check if it is a BT or FM audio event */
+ if (is_bt_audio_user(info, h4_channel, skb))
+ user = info->bt_audio;
+ else if (is_fm_audio_user(info, h4_channel, skb))
+ user = info->fm_audio;
+ spin_unlock_bh(&info->rw_lock);
+
+ /* Now check if we should update flow control */
+ if (h4_channel == CHANNEL_BT_EVT)
+ update_flow_ctrl_bt(dev, skb);
+ else if (h4_channel == CHANNEL_FM_RADIO)
+ update_flow_ctrl_fm(dev, skb);
+
+ /* Then check if this is a response to data we have sent */
+ if (h4_channel == CHANNEL_BT_EVT && handle_rx_data_bt_evt(dev, skb))
+ return;
+
+ spin_lock_bh(&info->rw_lock);
+
+ if (user)
+ goto user_found;
+
+ /* Let's see if it is the last user */
+ if (info->last_user && info->last_user->h4_channel == h4_channel) {
+ user = info->last_user;
+ goto user_found;
+ }
+
+ /*
+ * Search through the list of all open channels to find the user.
+ * We skip the audio channels since they have already been checked
+ * earlier in this function.
+ */
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor, struct cg2900_channel_item, list);
+ if (tmp->user->h4_channel == h4_channel &&
+ !tmp->user->is_audio) {
+ user = tmp->user;
+ goto user_found;
+ }
+ }
+
+user_found:
+ if (user != info->bt_audio && user != info->fm_audio)
+ info->last_user = user;
+
+ spin_unlock_bh(&info->rw_lock);
+
+ if (user)
+ user->read_cb(user, skb);
+ else {
+ dev_err(dev->dev,
+ "Could not find corresponding user to h4_channel %d\n",
+ h4_channel);
+ kfree_skb(skb);
+ }
+}
+
+/**
+ * chip_removed() - Called when transport has been removed.
+ * @dev: Chip device.
+ *
+ * Removes registered MFD devices and frees internal resources.
+ */
+static void chip_removed(struct cg2900_chip_dev *dev)
+{
+ struct cg2900_chip_info *info = dev->c_data;
+
+ cancel_delayed_work(&info->selftest_work.work);
+ mfd_remove_devices(dev->dev);
+ kfree(info->settings_file_name);
+ kfree(info->patch_file_name);
+ destroy_workqueue(info->wq);
+ kfree(info);
+ dev->c_data = NULL;
+ dev->c_cb.chip_removed = NULL;
+ dev->c_cb.data_from_chip = NULL;
+}
+
+/**
+ * last_bt_user_removed() - Called when last BT user is removed.
+ * @info: Chip handler info.
+ *
+ * Clears out TX queue for BT.
+ */
+static void last_bt_user_removed(struct cg2900_chip_info *info)
+{
+ spin_lock_bh(&info->tx_bt_lock);
+ skb_queue_purge(&info->tx_queue_bt);
+
+ /*
+ * Reset number of packets allowed and number of outstanding
+ * BT commands.
+ */
+ info->tx_nr_pkts_allowed_bt = 1;
+ /* Reset the audio_bt_cmd_op. */
+ info->audio_bt_cmd_op = CG2900_BT_OPCODE_NONE;
+ spin_unlock_bh(&info->tx_bt_lock);
+}
+
+/**
+ * last_fm_user_removed() - Called when last FM user is removed.
+ * @info: Chip handler info.
+ *
+ * Clears out TX queue for BT.
+ */
+static void last_fm_user_removed(struct cg2900_chip_info *info)
+{
+ spin_lock_bh(&info->tx_fm_lock);
+ fm_reset_flow_ctrl(info);
+ spin_unlock_bh(&info->tx_fm_lock);
+}
+
+/**
+ * chip_shutdown() - Reset and power the chip off.
+ * @user: MFD device.
+ */
+static void chip_shutdown(struct cg2900_user_data *user)
+{
+ struct hci_command_hdr cmd;
+ struct cg2900_chip_dev *dev = cg2900_get_prv(user);
+ struct cg2900_chip_info *info = dev->c_data;
+
+ dev_dbg(user->dev, "chip_shutdown\n");
+
+ /* First do a quick power switch of the chip to assure a good state */
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, false);
+
+ /*
+ * Wait 50ms before continuing to be sure that the chip detects
+ * chip power off.
+ */
+ schedule_timeout_killable(
+ msecs_to_jiffies(LINE_TOGGLE_DETECT_TIMEOUT));
+
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, true);
+
+ /* Wait 100ms before continuing to be sure that the chip is ready */
+ schedule_timeout_killable(msecs_to_jiffies(CHIP_READY_TIMEOUT));
+
+ if (user != info->bt_audio && user != info->fm_audio)
+ info->last_user = user;
+ info->user_in_charge = user;
+
+ /*
+ * Transmit HCI reset command to ensure the chip is using
+ * the correct transport and to put BT part in reset.
+ */
+ dev_dbg(user->dev, "New closing_state: CLOSING_RESET\n");
+ info->closing_state = CLOSING_RESET;
+ cmd.opcode = cpu_to_le16(HCI_OP_RESET);
+ cmd.plen = 0; /* No parameters for HCI reset */
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger, &cmd,
+ sizeof(cmd));
+}
+
+/**
+ * chip_startup_finished() - Called when chip startup has finished.
+ * @info: Chip handler info.
+ * @err: Result of chip startup, 0 for no error.
+ *
+ * Shuts down the chip upon error, sets state to active, wakes waiting threads,
+ * and informs transport that startup has finished.
+ */
+static void chip_startup_finished(struct cg2900_chip_info *info, int err)
+{
+ dev_dbg(BOOT_DEV, "chip_startup_finished (%d)\n", err);
+
+ if (err)
+ /* Shutdown the chip */
+ cg2900_create_work_item(info->wq, work_chip_shutdown,
+ info->user_in_charge);
+ else {
+ dev_dbg(BOOT_DEV, "New main_state: CG2900_ACTIVE\n");
+ info->main_state = CG2900_ACTIVE;
+ }
+
+ wake_up_all(&main_wait_queue);
+
+ if (err)
+ return;
+
+ if (!info->chip_dev->t_cb.chip_startup_finished)
+ dev_dbg(BOOT_DEV, "chip_startup_finished callback not found\n");
+ else
+ info->chip_dev->t_cb.chip_startup_finished(info->chip_dev);
+}
+
+/**
+ * cg2900_open() - Called when user wants to open an H4 channel.
+ * @user: MFD device to open.
+ *
+ * Checks that H4 channel is not already opened. If chip is not started, starts
+ * up the chip. Sets channel as opened and adds user to active users.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user is NULL or read_cb is NULL.
+ * -EBUSY if chip is in transit state (being started or shutdown).
+ * -EACCES if H4 channel is already opened.
+ * -ENOMEM if allocation fails.
+ * -EIO if chip startup fails.
+ * Error codes generated by t_cb.open.
+ */
+static int cg2900_open(struct cg2900_user_data *user)
+{
+ int err;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+ struct list_head *cursor;
+ struct cg2900_channel_item *tmp;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV, "cg2900_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ if (!user->read_cb) {
+ dev_err(user->dev, "cg2900_open: read_cb missing\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "cg2900_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ /* HCI Raw channel shall have exclusive access to chip. */
+ if (info->hci_raw && user->h4_channel != CHANNEL_HCI_RAW &&
+ user->h4_channel != CHANNEL_HCI_LOGGER) {
+ dev_err(user->dev, "cg2900_open: Cannot open %s "
+ "channel while HCI Raw channel is opened\n",
+ user->channel_data.char_dev_name);
+ return -EACCES;
+ }
+
+ mutex_lock(&main_info->man_mutex);
+
+ /*
+ * Add a minor wait in order to avoid CPU blocking, looping openings.
+ * Note there will of course be no wait if we are already in the right
+ * state.
+ */
+ err = wait_event_timeout(main_wait_queue,
+ (CG2900_IDLE == info->main_state ||
+ CG2900_ACTIVE == info->main_state),
+ msecs_to_jiffies(LINE_TOGGLE_DETECT_TIMEOUT));
+ if (err <= 0) {
+ if (CG2900_INIT == info->main_state)
+ dev_err(user->dev, "Transport not opened\n");
+ else
+ dev_err(user->dev, "cg2900_open currently busy (0x%X). "
+ "Try again\n", info->main_state);
+ err = -EBUSY;
+ goto err_free_mutex;
+ }
+
+ err = 0;
+
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor, struct cg2900_channel_item, list);
+ if (tmp->user->h4_channel == user->h4_channel &&
+ tmp->user->is_audio == user->is_audio) {
+ dev_err(user->dev, "Channel %d is already opened\n",
+ user->h4_channel);
+ err = -EACCES;
+ goto err_free_mutex;
+ }
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ dev_err(user->dev, "Could not allocate tmp\n");
+ err = -ENOMEM;
+ goto err_free_mutex;
+ }
+ tmp->user = user;
+
+ if (CG2900_ACTIVE != info->main_state &&
+ !user->chip_independent) {
+ /* Open transport and start-up the chip */
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, true);
+
+ /* Wait to be sure that the chip is ready */
+ schedule_timeout_killable(
+ msecs_to_jiffies(CHIP_READY_TIMEOUT));
+
+ if (dev->t_cb.open) {
+ err = dev->t_cb.open(dev);
+ if (err) {
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, false);
+ goto err_free_list_item;
+ }
+ }
+
+ /* Start the boot sequence */
+ info->user_in_charge = user;
+ if (user != info->bt_audio && user != info->fm_audio)
+ info->last_user = user;
+ dev_dbg(user->dev, "New boot_state: BOOT_GET_FILES_TO_LOAD\n");
+ info->boot_state = BOOT_GET_FILES_TO_LOAD;
+ dev_dbg(user->dev, "New main_state: CG2900_BOOTING\n");
+ info->main_state = CG2900_BOOTING;
+ cg2900_create_work_item(info->wq, work_load_patch_and_settings,
+ dev);
+
+ dev_dbg(user->dev, "Wait up to 15 seconds for chip to start\n");
+ wait_event_timeout(main_wait_queue,
+ (CG2900_ACTIVE == info->main_state ||
+ CG2900_IDLE == info->main_state),
+ msecs_to_jiffies(CHIP_STARTUP_TIMEOUT));
+ if (CG2900_ACTIVE != info->main_state) {
+ dev_err(user->dev, "CG2900 driver failed to start\n");
+
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ dev_dbg(user->dev, "New main_state: CG2900_IDLE\n");
+ info->main_state = CG2900_IDLE;
+ err = -EIO;
+ goto err_free_list_item;
+ }
+ }
+
+ list_add_tail(&tmp->list, &info->open_channels);
+
+ user->opened = true;
+
+ dev_dbg(user->dev, "H:4 channel opened\n");
+
+ mutex_unlock(&main_info->man_mutex);
+ return 0;
+err_free_list_item:
+ kfree(tmp);
+err_free_mutex:
+ mutex_unlock(&main_info->man_mutex);
+ return err;
+}
+
+/**
+ * cg2900_hci_log_open() - Called when user wants to open HCI logger channel.
+ * @user: MFD device to open.
+ *
+ * Registers user as hci_logger and calls @cg2900_open to open the channel.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user is NULL.
+ * -EEXIST if H4 channel is already opened.
+ * Error codes generated by cg2900_open.
+ */
+static int cg2900_hci_log_open(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+ int err;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_hci_log_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "cg2900_hci_log_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (info->logger) {
+ dev_err(user->dev, "HCI Logger already stored\n");
+ return -EEXIST;
+ }
+
+ info->logger = user;
+ err = cg2900_open(user);
+ if (err)
+ info->logger = NULL;
+ return err;
+}
+
+/**
+ * cg2900_hci_raw_open() - Called when user wants to open HCI Raw channel.
+ * @user: MFD device to open.
+ *
+ * Registers user as hci_raw and calls @cg2900_open to open the channel.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user is NULL.
+ * -EEXIST if H4 channel is already opened.
+ * -EACCES if H4 channel iother than HCI RAW is already opened.
+ * Error codes generated by cg2900_open.
+ */
+static int cg2900_hci_raw_open(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+ struct list_head *cursor;
+ struct cg2900_channel_item *tmp;
+ int err;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_hci_raw_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "cg2900_hci_raw_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (info->hci_raw) {
+ dev_err(user->dev, "HCI Raw Channel already stored\n");
+ return -EEXIST;
+ }
+
+ if (!list_empty(&info->open_channels)) {
+ /*
+ * Go through each open channel to check if it is logger
+ * channel or some other channel.
+ */
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor,
+ struct cg2900_channel_item, list);
+ if (tmp->user->h4_channel != CHANNEL_HCI_LOGGER) {
+ dev_err(user->dev, "Other channels other than "
+ "Logger is already opened. Cannot open "
+ "HCI Raw Channel\n");
+ return -EACCES;
+ }
+ }
+ }
+
+ info->hci_raw = user;
+ err = cg2900_open(user);
+ if (err)
+ info->hci_raw = NULL;
+ return err;
+}
+
+/**
+ * cg2900_bt_audio_open() - Called when user wants to open BT audio channel.
+ * @user: MFD device to open.
+ *
+ * Registers user as bt_audio and calls @cg2900_open to open the channel.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user is NULL.
+ * -EEXIST if H4 channel is already opened.
+ * Error codes generated by cg2900_open.
+ */
+static int cg2900_bt_audio_open(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+ int err;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_bt_audio_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "cg2900_bt_audio_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (info->bt_audio) {
+ dev_err(user->dev, "BT Audio already stored\n");
+ return -EEXIST;
+ }
+
+ info->bt_audio = user;
+ err = cg2900_open(user);
+ if (err)
+ info->bt_audio = NULL;
+ return err;
+}
+
+/**
+ * cg2900_fm_audio_open() - Called when user wants to open FM audio channel.
+ * @user: MFD device to open.
+ *
+ * Registers user as fm_audio and calls @cg2900_open to open the channel.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user is NULL.
+ * -EEXIST if H4 channel is already opened.
+ * Error codes generated by cg2900_open.
+ */
+static int cg2900_fm_audio_open(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+ int err;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_fm_audio_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "cg2900_fm_audio_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (info->fm_audio) {
+ dev_err(user->dev, "FM Audio already stored\n");
+ return -EEXIST;
+ }
+
+ info->fm_audio = user;
+ err = cg2900_open(user);
+ if (err)
+ info->fm_audio = NULL;
+ return err;
+}
+
+/**
+ * cg2900_close() - Called when user wants to close an H4 channel.
+ * @user: MFD device to close.
+ *
+ * Clears up internal resources, sets channel as closed, and shuts down chip if
+ * this was the last user.
+ */
+static void cg2900_close(struct cg2900_user_data *user)
+{
+ bool keep_powered = false;
+ struct list_head *cursor, *next;
+ struct cg2900_channel_item *tmp;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV, "cg2900_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "cg2900_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ mutex_lock(&main_info->man_mutex);
+
+ /*
+ * Go through each open channel. Remove our channel and check if there
+ * is any other channel that want to keep the chip running
+ */
+ list_for_each_safe(cursor, next, &info->open_channels) {
+ tmp = list_entry(cursor, struct cg2900_channel_item, list);
+ if (tmp->user == user) {
+ list_del(cursor);
+ kfree(tmp);
+ } else if (!tmp->user->chip_independent)
+ keep_powered = true;
+ }
+
+ if (user->h4_channel == CHANNEL_BT_CMD && !bt_is_open(info))
+ last_bt_user_removed(info);
+ else if (user->h4_channel == CHANNEL_FM_RADIO && !fm_is_open(info))
+ last_fm_user_removed(info);
+
+ if (keep_powered)
+ /* This was not the last user, we're done. */
+ goto finished;
+
+ if (CG2900_IDLE == info->main_state)
+ /* Chip has already been shut down. */
+ goto finished;
+
+ dev_dbg(user->dev, "New main_state: CG2900_CLOSING\n");
+ info->main_state = CG2900_CLOSING;
+ chip_shutdown(user);
+
+ dev_dbg(user->dev, "Wait up to 15 seconds for chip to shut-down\n");
+ wait_event_timeout(main_wait_queue,
+ (CG2900_IDLE == info->main_state),
+ msecs_to_jiffies(CHIP_SHUTDOWN_TIMEOUT));
+
+ /* Force shutdown if we timed out */
+ if (CG2900_IDLE != info->main_state) {
+ dev_err(user->dev,
+ "ST-Ericsson CG2900 Core Driver was shut-down with "
+ "problems\n");
+
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ dev_dbg(user->dev, "New main_state: CG2900_IDLE\n");
+ info->main_state = CG2900_IDLE;
+ }
+
+finished:
+ mutex_unlock(&main_info->man_mutex);
+ user->opened = false;
+ dev_dbg(user->dev, "H:4 channel closed\n");
+}
+
+/**
+ * cg2900_hci_log_close() - Called when user wants to close HCI logger channel.
+ * @user: MFD device to close.
+ *
+ * Clears hci_logger user and calls @cg2900_close to close the channel.
+ */
+static void cg2900_hci_log_close(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_hci_log_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "cg2900_hci_log_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (user != info->logger) {
+ dev_err(user->dev, "cg2900_hci_log_close: Trying to remove "
+ "another user\n");
+ return;
+ }
+
+ info->logger = NULL;
+ cg2900_close(user);
+}
+
+/**
+ * cg2900_hci_raw_close() - Called when user wants to close HCI Raw channel.
+ * @user: MFD device to close.
+ *
+ * Clears hci_raw user and calls @cg2900_close to close the channel.
+ */
+static void cg2900_hci_raw_close(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_hci_raw_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "cg2900_hci_raw_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (user != info->hci_raw) {
+ dev_err(user->dev, "cg2900_hci_raw_close: Trying to remove "
+ "another user\n");
+ return;
+ }
+
+ info->hci_raw = NULL;
+ cg2900_close(user);
+}
+
+/**
+ * cg2900_bt_audio_close() - Called when user wants to close BT audio channel.
+ * @user: MFD device to close.
+ *
+ * Clears bt_audio user and calls @cg2900_close to close the channel.
+ */
+static void cg2900_bt_audio_close(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_bt_audio_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "cg2900_bt_audio_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (user != info->bt_audio) {
+ dev_err(user->dev, "cg2900_bt_audio_close: Trying to remove "
+ "another user\n");
+ return;
+ }
+
+ info->bt_audio = NULL;
+ cg2900_close(user);
+}
+
+/**
+ * cg2900_fm_audio_close() - Called when user wants to close FM audio channel.
+ * @user: MFD device to close.
+ *
+ * Clears fm_audio user and calls @cg2900_close to close the channel.
+ */
+static void cg2900_fm_audio_close(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "cg2900_fm_audio_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "cg2900_fm_audio_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (user != info->fm_audio) {
+ dev_err(user->dev, "cg2900_fm_audio_close: Trying to remove "
+ "another user\n");
+ return;
+ }
+
+ info->fm_audio = NULL;
+ cg2900_close(user);
+}
+
+/**
+ * cg2900_reset() - Called when user wants to reset the chip.
+ * @user: MFD device to reset.
+ *
+ * Closes down the chip and calls reset_cb for all open users.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user is NULL.
+ */
+static int cg2900_reset(struct cg2900_user_data *user)
+{
+ struct list_head *cursor, *next;
+ struct cg2900_channel_item *tmp;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ if (!user) {
+ dev_err(MAIN_DEV, "cg2900_reset: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ dev_info(user->dev, "cg2900_reset\n");
+
+ BUG_ON(!main_info);
+
+ mutex_lock(&main_info->man_mutex);
+
+ dev_dbg(user->dev, "New main_state: CG2900_RESETING\n");
+ info->main_state = CG2900_RESETING;
+
+ chip_shutdown(user);
+
+ /*
+ * Inform all opened channels about the reset and free the user devices
+ */
+ list_for_each_safe(cursor, next, &info->open_channels) {
+ tmp = list_entry(cursor, struct cg2900_channel_item, list);
+ list_del(cursor);
+ tmp->user->opened = false;
+ tmp->user->reset_cb(tmp->user);
+ kfree(tmp);
+ }
+
+ /* Reset finished. We are now idle until first channel is opened */
+ dev_dbg(user->dev, "New main_state: CG2900_IDLE\n");
+ info->main_state = CG2900_IDLE;
+
+ mutex_unlock(&main_info->man_mutex);
+
+ /*
+ * Send wake-up since this might have been called from a failed boot.
+ * No harm done if it is a CG2900 chip user who called.
+ */
+ wake_up_all(&main_wait_queue);
+
+ return 0;
+}
+
+/**
+ * cg2900_alloc_skb() - Allocates socket buffer.
+ * @size: Sk_buffer size in bytes.
+ * @priority: GFP priorit for allocation.
+ *
+ * Allocates a sk_buffer and reserves space for H4 header.
+ *
+ * Returns:
+ * sk_buffer if success.
+ * NULL if allocation fails.
+ */
+static struct sk_buff *cg2900_alloc_skb(unsigned int size, gfp_t priority)
+{
+ struct sk_buff *skb;
+
+ dev_dbg(MAIN_DEV, "cg2900_alloc_skb size %d bytes\n", size);
+
+ /* Allocate the SKB and reserve space for the header */
+ skb = alloc_skb(size + CG2900_SKB_RESERVE, priority);
+ if (skb)
+ skb_reserve(skb, CG2900_SKB_RESERVE);
+
+ return skb;
+}
+
+/**
+ * cg2900_write() - Called when user wants to write to the chip.
+ * @user: MFD device representing H4 channel to write to.
+ * @skb: Sk_buffer to transmit.
+ *
+ * Transmits the sk_buffer to the chip. If it is a BT cmd or FM audio packet it
+ * is checked that it is allowed to transmit the chip.
+ * Note that if error is returned it is up to the user to free the skb.
+ *
+ * Returns:
+ * 0 if success.
+ * -EINVAL if user or skb is NULL.
+ * -EACCES if channel is closed.
+ */
+static int cg2900_write(struct cg2900_user_data *user, struct sk_buff *skb)
+{
+ u8 *h4_header;
+ struct cg2900_chip_dev *dev;
+ struct cg2900_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV, "cg2900_write: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ if (!skb) {
+ dev_err(user->dev, "cg2900_write with no sk_buffer\n");
+ return -EINVAL;
+ }
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ dev_dbg(user->dev, "cg2900_write length %d bytes\n", skb->len);
+
+ if (!user->opened) {
+ dev_err(user->dev,
+ "Trying to transmit data on a closed channel\n");
+ return -EACCES;
+ }
+
+ if (user->h4_channel == CHANNEL_HCI_RAW) {
+ /*
+ * Since the data transmitted on HCI Raw channel
+ * can be byte by byte, flow control cannot be used.
+ * This should be handled by user space application
+ * of the HCI Raw channel, so just transmit the
+ * received data to chip.
+ */
+ cg2900_tx_to_chip(user, info->logger, skb);
+ return 0;
+ }
+
+ /*
+ * Move the data pointer to the H:4 header position and
+ * store the H4 header.
+ */
+ h4_header = skb_push(skb, CG2900_SKB_RESERVE);
+ *h4_header = (u8)user->h4_channel;
+
+ if (user->h4_channel == CHANNEL_BT_CMD)
+ transmit_skb_with_flow_ctrl_bt(user, skb);
+ else if (user->h4_channel == CHANNEL_FM_RADIO)
+ transmit_skb_with_flow_ctrl_fm(user, skb);
+ else
+ cg2900_tx_to_chip(user, info->logger, skb);
+
+ return 0;
+}
+
+/**
+ * cg2900_no_write() - Used for channels where it is not allowed to write.
+ * @user: MFD device representing H4 channel to write to.
+ * @skb: Sk_buffer to transmit.
+ *
+ * Returns:
+ * -EPERM.
+ */
+static int cg2900_no_write(struct cg2900_user_data *user,
+ __attribute__((unused)) struct sk_buff *skb)
+{
+ dev_err(user->dev, "Not allowed to send on this channel\n");
+ return -EPERM;
+}
+
+/**
+ * cg2900_get_local_revision() - Called to retrieve revision data for the chip.
+ * @user: MFD device to check.
+ * @rev_data: Revision data to fill in.
+ *
+ * Returns:
+ * true if success.
+ * false upon failure.
+ */
+static bool cg2900_get_local_revision(struct cg2900_user_data *user,
+ struct cg2900_rev_data *rev_data)
+{
+ struct cg2900_chip_dev *dev;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV, "cg2900_get_local_revision: Calling with "
+ "NULL pointer\n");
+ return false;
+ }
+
+ if (!rev_data) {
+ dev_err(user->dev, "Calling with rev_data NULL\n");
+ return false;
+ }
+
+ dev = cg2900_get_prv(user);
+
+ rev_data->revision = dev->chip.hci_revision;
+ rev_data->sub_version = dev->chip.hci_sub_version;
+
+ return true;
+}
+
+static struct cg2900_user_data btcmd_data = {
+ .h4_channel = CHANNEL_BT_CMD,
+};
+static struct cg2900_user_data btacl_data = {
+ .h4_channel = CHANNEL_BT_ACL,
+};
+static struct cg2900_user_data btevt_data = {
+ .h4_channel = CHANNEL_BT_EVT,
+};
+static struct cg2900_user_data fm_data = {
+ .h4_channel = CHANNEL_FM_RADIO,
+};
+static struct cg2900_user_data gnss_data = {
+ .h4_channel = CHANNEL_GNSS,
+};
+static struct cg2900_user_data debug_data = {
+ .h4_channel = CHANNEL_DEBUG,
+};
+static struct cg2900_user_data ste_tools_data = {
+ .h4_channel = CHANNEL_STE_TOOLS,
+};
+static struct cg2900_user_data hci_logger_data = {
+ .h4_channel = CHANNEL_HCI_LOGGER,
+ .chip_independent = true,
+ .write = cg2900_no_write,
+ .open = cg2900_hci_log_open,
+ .close = cg2900_hci_log_close,
+};
+static struct cg2900_user_data core_data = {
+ .h4_channel = CHANNEL_CORE,
+ .write = cg2900_no_write,
+};
+static struct cg2900_user_data audio_bt_data = {
+ .h4_channel = CHANNEL_BT_CMD,
+ .is_audio = true,
+ .open = cg2900_bt_audio_open,
+ .close = cg2900_bt_audio_close,
+};
+static struct cg2900_user_data audio_fm_data = {
+ .h4_channel = CHANNEL_FM_RADIO,
+ .is_audio = true,
+ .open = cg2900_fm_audio_open,
+ .close = cg2900_fm_audio_close,
+};
+static struct cg2900_user_data hci_raw_data = {
+ .h4_channel = CHANNEL_HCI_RAW,
+ .open = cg2900_hci_raw_open,
+ .close = cg2900_hci_raw_close,
+};
+
+static struct mfd_cell cg2900_devs[] = {
+ {
+ .name = "cg2900-btcmd",
+ .platform_data = &btcmd_data,
+ .pdata_size = sizeof(btcmd_data),
+ },
+ {
+ .name = "cg2900-btacl",
+ .platform_data = &btacl_data,
+ .pdata_size = sizeof(btacl_data),
+ },
+ {
+ .name = "cg2900-btevt",
+ .platform_data = &btevt_data,
+ .pdata_size = sizeof(btevt_data),
+ },
+ {
+ .name = "cg2900-fm",
+ .platform_data = &fm_data,
+ .pdata_size = sizeof(fm_data),
+ },
+ {
+ .name = "cg2900-gnss",
+ .platform_data = &gnss_data,
+ .pdata_size = sizeof(gnss_data),
+ },
+ {
+ .name = "cg2900-debug",
+ .platform_data = &debug_data,
+ .pdata_size = sizeof(debug_data),
+ },
+ {
+ .name = "cg2900-stetools",
+ .platform_data = &ste_tools_data,
+ .pdata_size = sizeof(ste_tools_data),
+ },
+ {
+ .name = "cg2900-hcilogger",
+ .platform_data = &hci_logger_data,
+ .pdata_size = sizeof(hci_logger_data),
+ },
+ {
+ .name = "cg2900-core",
+ .platform_data = &core_data,
+ .pdata_size = sizeof(core_data),
+ },
+ {
+ .name = "cg2900-audiobt",
+ .platform_data = &audio_bt_data,
+ .pdata_size = sizeof(audio_bt_data),
+ },
+ {
+ .name = "cg2900-audiofm",
+ .platform_data = &audio_fm_data,
+ .pdata_size = sizeof(audio_fm_data),
+ },
+ {
+ .name = "cg2900-hciraw",
+ .platform_data = &hci_raw_data,
+ .pdata_size = sizeof(hci_raw_data),
+ },
+};
+
+static struct cg2900_user_data char_btcmd_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_BT_CMD,
+ },
+ .h4_channel = CHANNEL_BT_CMD,
+};
+static struct cg2900_user_data char_btacl_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_BT_ACL,
+ },
+ .h4_channel = CHANNEL_BT_ACL,
+};
+static struct cg2900_user_data char_btevt_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_BT_EVT,
+ },
+ .h4_channel = CHANNEL_BT_EVT,
+};
+static struct cg2900_user_data char_fm_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_FM_RADIO,
+ },
+ .h4_channel = CHANNEL_FM_RADIO,
+};
+static struct cg2900_user_data char_gnss_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_GNSS,
+ },
+ .h4_channel = CHANNEL_GNSS,
+};
+static struct cg2900_user_data char_debug_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_DEBUG,
+ },
+ .h4_channel = CHANNEL_DEBUG,
+};
+static struct cg2900_user_data char_ste_tools_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_STE_TOOLS,
+ },
+ .h4_channel = CHANNEL_STE_TOOLS,
+};
+static struct cg2900_user_data char_hci_logger_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_HCI_LOGGER,
+ },
+ .h4_channel = CHANNEL_HCI_LOGGER,
+ .chip_independent = true,
+ .write = cg2900_no_write,
+ .open = cg2900_hci_log_open,
+ .close = cg2900_hci_log_close,
+};
+static struct cg2900_user_data char_core_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_CORE,
+ },
+ .h4_channel = CHANNEL_CORE,
+ .write = cg2900_no_write,
+};
+static struct cg2900_user_data char_audio_bt_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_BT_AUDIO,
+ },
+ .h4_channel = CHANNEL_BT_CMD,
+ .is_audio = true,
+};
+static struct cg2900_user_data char_audio_fm_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_FM_AUDIO,
+ },
+ .h4_channel = CHANNEL_FM_RADIO,
+ .is_audio = true,
+};
+static struct cg2900_user_data char_hci_raw_data = {
+ .channel_data = {
+ .char_dev_name = CG2900_HCI_RAW,
+ },
+ .h4_channel = CHANNEL_HCI_RAW,
+ .open = cg2900_hci_raw_open,
+ .close = cg2900_hci_raw_close,
+};
+
+
+static struct mfd_cell cg2900_char_devs[] = {
+ {
+ .name = "cg2900-chardev",
+ .id = 0,
+ .platform_data = &char_btcmd_data,
+ .pdata_size = sizeof(char_btcmd_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 1,
+ .platform_data = &char_btacl_data,
+ .pdata_size = sizeof(char_btacl_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 2,
+ .platform_data = &char_btevt_data,
+ .pdata_size = sizeof(char_btevt_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 3,
+ .platform_data = &char_fm_data,
+ .pdata_size = sizeof(char_fm_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 4,
+ .platform_data = &char_gnss_data,
+ .pdata_size = sizeof(char_gnss_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 5,
+ .platform_data = &char_debug_data,
+ .pdata_size = sizeof(char_debug_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 6,
+ .platform_data = &char_ste_tools_data,
+ .pdata_size = sizeof(char_ste_tools_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 7,
+ .platform_data = &char_hci_logger_data,
+ .pdata_size = sizeof(char_hci_logger_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 8,
+ .platform_data = &char_core_data,
+ .pdata_size = sizeof(char_core_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 9,
+ .platform_data = &char_audio_bt_data,
+ .pdata_size = sizeof(char_audio_bt_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 10,
+ .platform_data = &char_audio_fm_data,
+ .pdata_size = sizeof(char_audio_fm_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 11,
+ .platform_data = &char_hci_raw_data,
+ .pdata_size = sizeof(char_hci_raw_data),
+ },
+};
+
+/**
+ * set_plat_data() - Initializes data for an MFD cell.
+ * @cell: MFD cell.
+ * @dev: Current chip.
+ *
+ * Sets each callback to default function unless already set.
+ */
+static void set_plat_data(struct mfd_cell *cell, struct cg2900_chip_dev *dev)
+{
+ struct cg2900_user_data *pf_data = cell->platform_data;
+
+ if (!pf_data->open)
+ pf_data->open = cg2900_open;
+ if (!pf_data->close)
+ pf_data->close = cg2900_close;
+ if (!pf_data->reset)
+ pf_data->reset = cg2900_reset;
+ if (!pf_data->alloc_skb)
+ pf_data->alloc_skb = cg2900_alloc_skb;
+ if (!pf_data->write)
+ pf_data->write = cg2900_write;
+ if (!pf_data->get_local_revision)
+ pf_data->get_local_revision = cg2900_get_local_revision;
+
+ cg2900_set_prv(pf_data, dev);
+}
+
+/**
+ * check_chip_support() - Checks if connected chip is handled by this driver.
+ * @dev: Chip info structure.
+ *
+ * First check if chip is supported by this driver. If that is the case fill in
+ * the callbacks in @dev and initiate internal variables. Finally create MFD
+ * devices for all supported H4 channels. When finished power off the chip.
+ *
+ * Returns:
+ * true if chip is handled by this driver.
+ * false otherwise.
+ */
+static bool check_chip_support(struct cg2900_chip_dev *dev)
+{
+ struct cg2900_platform_data *pf_data;
+ struct cg2900_chip_info *info;
+ int i;
+
+ dev_dbg(dev->dev, "check_chip_support\n");
+
+ /*
+ * Check if this is a CG2900 revision.
+ * We do not care about the sub-version at the moment. Change this if
+ * necessary.
+ */
+ if ((dev->chip.manufacturer != CG2900_SUPP_MANUFACTURER) ||
+ (dev->chip.hci_revision != CG2900_PG1_SPECIAL_REV &&
+ (dev->chip.hci_revision < CG2900_SUPP_REVISION_MIN ||
+ dev->chip.hci_revision > CG2900_SUPP_REVISION_MAX))) {
+ dev_dbg(dev->dev, "Chip not supported by CG2900 driver\n"
+ "\tMan: 0x%02X\n"
+ "\tRev: 0x%04X\n"
+ "\tSub: 0x%04X\n",
+ dev->chip.manufacturer, dev->chip.hci_revision,
+ dev->chip.hci_sub_version);
+ return false;
+ }
+
+ /* Store needed data */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev->dev, "Couldn't allocate info struct\n");
+ return false;
+ }
+
+ /* Initialize all variables */
+ skb_queue_head_init(&info->tx_queue_bt);
+ skb_queue_head_init(&info->tx_queue_fm);
+
+ INIT_LIST_HEAD(&info->open_channels);
+
+ spin_lock_init(&info->tx_bt_lock);
+ spin_lock_init(&info->tx_fm_lock);
+ spin_lock_init(&info->rw_lock);
+
+ info->tx_nr_pkts_allowed_bt = 1;
+ info->audio_bt_cmd_op = CG2900_BT_OPCODE_NONE;
+ info->audio_fm_cmd_id = CG2900_FM_CMD_NONE;
+ info->hci_fm_cmd_func = CG2900_FM_CMD_PARAM_NONE;
+ info->fm_radio_mode = FM_RADIO_MODE_IDLE;
+ info->chip_dev = dev;
+ info->dev = dev->dev;
+
+ info->wq = create_singlethread_workqueue(WQ_NAME);
+ if (!info->wq) {
+ dev_err(dev->dev, "Could not create workqueue\n");
+ goto err_handling_free_info;
+ }
+
+ info->patch_file_name = kzalloc(NAME_MAX + 1, GFP_ATOMIC);
+ if (!info->patch_file_name) {
+ dev_err(dev->dev,
+ "Couldn't allocate name buffer for patch file\n");
+ goto err_handling_destroy_wq;
+ }
+
+ info->settings_file_name = kzalloc(NAME_MAX + 1, GFP_ATOMIC);
+ if (!info->settings_file_name) {
+ dev_err(dev->dev,
+ "Couldn't allocate name buffers settings file\n");
+ goto err_handling_free_patch_name;
+ }
+
+ info->selftest_work.data = info;
+ INIT_DELAYED_WORK(&info->selftest_work.work,
+ work_send_read_selftest_cmd);
+
+ dev->c_data = info;
+ /* Set the callbacks */
+ dev->c_cb.data_from_chip = data_from_chip;
+ dev->c_cb.chip_removed = chip_removed;
+
+ mutex_lock(&main_info->man_mutex);
+
+ pf_data = dev_get_platdata(dev->dev);
+ btcmd_data.channel_data.bt_bus = pf_data->bus;
+ btacl_data.channel_data.bt_bus = pf_data->bus;
+ btevt_data.channel_data.bt_bus = pf_data->bus;
+
+ for (i = 0; i < ARRAY_SIZE(cg2900_devs); i++)
+ set_plat_data(&cg2900_devs[i], dev);
+ for (i = 0; i < ARRAY_SIZE(cg2900_char_devs); i++)
+ set_plat_data(&cg2900_char_devs[i], dev);
+
+ info->startup = true;
+ info->mfd_size = ARRAY_SIZE(cg2900_devs);
+ info->mfd_char_size = ARRAY_SIZE(cg2900_char_devs);
+
+ /*
+ * The devices will be registered when chip has been powered down, i.e.
+ * when the system startup is ready.
+ */
+
+ mutex_unlock(&main_info->man_mutex);
+
+ dev_info(dev->dev, "Chip supported by the CG2900 chip driver\n");
+
+ /* Finish by turning off the chip */
+ cg2900_create_work_item(info->wq, work_power_off_chip, dev);
+
+ return true;
+
+err_handling_free_patch_name:
+ kfree(info->patch_file_name);
+err_handling_destroy_wq:
+ destroy_workqueue(info->wq);
+err_handling_free_info:
+ kfree(info);
+ return false;
+}
+
+static struct cg2900_id_callbacks chip_support_callbacks = {
+ .check_chip_support = check_chip_support,
+};
+
+/**
+ * cg2900_chip_probe() - Initialize CG2900 chip handler resources.
+ * @pdev: Platform device.
+ *
+ * This function initializes the CG2900 driver, then registers to
+ * the CG2900 Core.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM for failed alloc or structure creation.
+ * Error codes generated by cg2900_register_chip_driver.
+ */
+static int __devinit cg2900_chip_probe(struct platform_device *pdev)
+{
+ int err;
+
+ dev_dbg(&pdev->dev, "cg2900_chip_probe\n");
+
+ main_info = kzalloc(sizeof(*main_info), GFP_ATOMIC);
+ if (!main_info) {
+ dev_err(&pdev->dev, "Couldn't allocate main_info\n");
+ return -ENOMEM;
+ }
+
+ main_info->dev = &pdev->dev;
+ mutex_init(&main_info->man_mutex);
+
+ err = cg2900_register_chip_driver(&chip_support_callbacks);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't register chip driver (%d)\n", err);
+ goto error_handling;
+ }
+
+ dev_info(&pdev->dev, "CG2900 chip driver started\n");
+
+ return 0;
+
+error_handling:
+ mutex_destroy(&main_info->man_mutex);
+ kfree(main_info);
+ main_info = NULL;
+ return err;
+}
+
+/**
+ * cg2900_chip_remove() - Release CG2900 chip handler resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success (always success).
+ */
+static int __devexit cg2900_chip_remove(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "CG2900 chip driver removed\n");
+
+ cg2900_deregister_chip_driver(&chip_support_callbacks);
+
+ if (!main_info)
+ return 0;
+ mutex_destroy(&main_info->man_mutex);
+ kfree(main_info);
+ main_info = NULL;
+ return 0;
+}
+
+static struct platform_driver cg2900_chip_driver = {
+ .driver = {
+ .name = "cg2900-chip",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_chip_probe,
+ .remove = __devexit_p(cg2900_chip_remove),
+};
+
+/**
+ * cg2900_chip_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init cg2900_chip_init(void)
+{
+ pr_debug("cg2900_chip_init");
+ return platform_driver_register(&cg2900_chip_driver);
+}
+
+/**
+ * cg2900_chip_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit cg2900_chip_exit(void)
+{
+ pr_debug("cg2900_chip_exit");
+ platform_driver_unregister(&cg2900_chip_driver);
+}
+
+module_init(cg2900_chip_init);
+module_exit(cg2900_chip_exit);
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux CG2900 Connectivity Device Driver");
diff --git a/drivers/staging/cg2900/mfd/cg2900_chip.h b/drivers/staging/cg2900/mfd/cg2900_chip.h
new file mode 100644
index 00000000000..b3fd556b7d7
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_chip.h
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 GPS/BT/FM controller.
+ */
+
+#ifndef _CG2900_CHIP_H_
+#define _CG2900_CHIP_H_
+
+/*
+ * Utility
+ */
+
+static inline void set_low_nibble(__u8 *var, __u8 value)
+{
+ *var = (*var & 0xf0) | (value & 0x0f);
+}
+
+static inline void set_high_nibble(__u8 *var, __u8 value)
+{
+ *var = (*var & 0x0f) | (value << 4);
+}
+
+static inline void store_bit(__u8 *var, size_t bit, __u8 value)
+{
+ *var = (*var & ~(1u << bit)) | (value << bit);
+}
+
+/*
+ * General chip defines
+ */
+
+/* Supported chips */
+#define CG2900_SUPP_MANUFACTURER 0x30
+#define CG2900_SUPP_REVISION_MIN 0x0100
+#define CG2900_SUPP_REVISION_MAX 0x0200
+
+/* Specific chip version data */
+#define CG2900_PG1_REV 0x0101
+#define CG2900_PG2_REV 0x0200
+#define CG2900_PG1_SPECIAL_REV 0x0700
+
+/*
+ * Bluetooth
+ */
+
+#define BT_SIZE_OF_HDR (sizeof(__le16) + sizeof(__u8))
+#define BT_PARAM_LEN(__pkt_len) (__pkt_len - BT_SIZE_OF_HDR)
+
+struct bt_cmd_cmpl_event {
+ __u8 eventcode;
+ __u8 plen;
+ __u8 n_commands;
+ __le16 opcode;
+ /*
+ * According to BT-specification what follows is "parameters"
+ * and unique to every command, but all commands start the
+ * parameters with the status field so include it here for
+ * convenience
+ */
+ __u8 status;
+ __u8 data[];
+} __packed;
+
+/* BT VS Store In FS command */
+#define CG2900_BT_OP_VS_STORE_IN_FS 0xFC22
+struct bt_vs_store_in_fs_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 user_id;
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+#define CG2900_VS_STORE_IN_FS_USR_ID_BD_ADDR 0xFE
+
+/* BT VS Write File Block command */
+#define CG2900_BT_OP_VS_WRITE_FILE_BLOCK 0xFC2E
+struct bt_vs_write_file_block_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 id;
+ __u8 data[];
+} __packed;
+
+#define CG2900_BT_DISABLE 0x00
+#define CG2900_BT_ENABLE 0x01
+
+/* BT VS BT Enable command */
+#define CG2900_BT_OP_VS_BT_ENABLE 0xFF10
+struct bt_vs_bt_enable_cmd {
+ __le16 op_code;
+ u8 plen;
+ u8 enable;
+} __packed;
+
+/* Bytes in the command Hci_Cmd_ST_Set_Uart_Baud_Rate */
+#define CG2900_BAUD_RATE_57600 0x03
+#define CG2900_BAUD_RATE_115200 0x02
+#define CG2900_BAUD_RATE_230400 0x01
+#define CG2900_BAUD_RATE_460800 0x00
+#define CG2900_BAUD_RATE_921600 0x20
+#define CG2900_BAUD_RATE_2000000 0x25
+#define CG2900_BAUD_RATE_3000000 0x27
+#define CG2900_BAUD_RATE_3250000 0x28
+#define CG2900_BAUD_RATE_4000000 0x2B
+
+/* BT VS SetBaudRate command */
+#define CG2900_BT_OP_VS_SET_BAUD_RATE 0xFC09
+struct bt_vs_set_baud_rate_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 baud_rate;
+} __packed;
+
+#define CG2900_BT_SELFTEST_SUCCESSFUL 0x00
+#define CG2900_BT_SELFTEST_FAILED 0x01
+#define CG2900_BT_SELFTEST_NOT_COMPLETED 0x02
+
+/* BT VS ReadSelfTestsResult command & event */
+#define CG2900_BT_OP_VS_READ_SELTESTS_RESULT 0xFC10
+struct bt_vs_read_selftests_result_evt {
+ __u8 status;
+ __u8 result;
+} __packed;
+
+/* Bluetooth Vendor Specific Opcodes */
+#define CG2900_BT_OP_VS_POWER_SWITCH_OFF 0xFD40
+#define CG2900_BT_OP_VS_SYSTEM_RESET 0xFF12
+
+#define CG2900_BT_OPCODE_NONE 0xFFFF
+
+/*
+ * Common multimedia
+ */
+
+#define CG2900_CODEC_TYPE_NONE 0x00
+#define CG2900_CODEC_TYPE_SBC 0x01
+
+#define CG2900_PCM_MODE_SLAVE 0x00
+#define CG2900_PCM_MODE_MASTER 0x01
+
+#define CG2900_I2S_MODE_MASTER 0x00
+#define CG2900_I2S_MODE_SLAVE 0x01
+
+/*
+ * CG2900 PG1 multimedia API
+ */
+
+#define CG2900_BT_VP_TYPE_PCM 0x00
+#define CG2900_BT_VP_TYPE_I2S 0x01
+#define CG2900_BT_VP_TYPE_SLIMBUS 0x02
+#define CG2900_BT_VP_TYPE_FM 0x03
+#define CG2900_BT_VP_TYPE_BT_SCO 0x04
+#define CG2900_BT_VP_TYPE_BT_A2DP 0x05
+#define CG2900_BT_VP_TYPE_ANALOG 0x07
+
+#define CG2900_BT_VS_SET_HARDWARE_CONFIG 0xFD54
+/* These don't have the same length, so a union won't work */
+struct bt_vs_set_hw_cfg_cmd_pcm {
+ __le16 opcode;
+ __u8 plen;
+ __u8 vp_type;
+ __u8 port_id;
+ __u8 mode_dir; /* NB: mode is in bit 1 (not 0) */
+ __u8 bit_clock;
+ __le16 frame_len;
+} __packed;
+#define HWCONFIG_PCM_SET_MODE(pcfg, mode) \
+ set_low_nibble(&(pcfg)->mode_dir, (mode) << 1)
+#define HWCONFIG_PCM_SET_DIR(pcfg, idx, dir) \
+ store_bit(&(pcfg)->mode_dir, (idx) + 4, (dir))
+
+struct bt_vs_set_hw_cfg_cmd_i2s {
+ __le16 opcode;
+ __u8 plen;
+ __u8 vp_type;
+ __u8 port_id;
+ __u8 half_period;
+ __u8 master_slave;
+} __packed;
+
+/* Max length for allocating */
+#define CG2900_BT_LEN_VS_SET_HARDWARE_CONFIG \
+ (sizeof(struct bt_vs_set_hw_cfg_cmd_pcm))
+
+#define CG2900_BT_VS_SET_SESSION_CONFIG 0xFD55
+struct session_config_vport {
+ __u8 type;
+ union {
+ struct {
+ __le16 acl_handle;
+ __u8 reserved[10];
+ } sco;
+ struct {
+ __u8 reserved[12];
+ } fm;
+ struct {
+ __u8 index;
+ __u8 slots_used;
+ __u8 slot_start[4];
+ __u8 reserved[6];
+ } pcm;
+ struct {
+ __u8 index;
+ __u8 channel;
+ __u8 reserved[10];
+ } i2s;
+ };
+} __packed;
+#define SESSIONCFG_PCM_SET_USED(port, idx, use) \
+ store_bit(&(port).pcm.slots_used, (idx), (use))
+
+struct session_config_stream {
+ __u8 media_type;
+ __u8 csel_srate;
+ __u8 codec_type;
+ __u8 codec_mode;
+ __u8 codec_params[3];
+ struct session_config_vport inport;
+ struct session_config_vport outport;
+} __packed;
+#define SESSIONCFG_SET_CHANNELS(pcfg, chnl) \
+ set_low_nibble(&(pcfg)->csel_srate, (chnl))
+#define SESSIONCFG_I2S_SET_SRATE(pcfg, rate) \
+ set_high_nibble(&(pcfg)->csel_srate, (rate))
+
+struct bt_vs_session_config_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 n_streams; /* we only support one here */
+ struct session_config_stream stream;
+} __packed;
+
+#define CG2900_BT_SESSION_MEDIA_TYPE_AUDIO 0x00
+
+#define CG2900_BT_SESSION_RATE_8K 0x01
+#define CG2900_BT_SESSION_RATE_16K 0x02
+#define CG2900_BT_SESSION_RATE_44_1K 0x04
+#define CG2900_BT_SESSION_RATE_48K 0x05
+
+#define CG2900_BT_MEDIA_CONFIG_MONO 0x00
+#define CG2900_BT_MEDIA_CONFIG_STEREO 0x01
+#define CG2900_BT_MEDIA_CONFIG_JOINT_STEREO 0x02
+#define CG2900_BT_MEDIA_CONFIG_DUAL_CHANNEL 0x03
+
+#define CG2900_BT_SESSION_I2S_INDEX_I2S 0x00
+#define CG2900_BT_SESSION_PCM_INDEX_PCM_I2S 0x00
+
+
+#define CG2900_BT_VS_SESSION_CTRL 0xFD57
+struct bt_vs_session_ctrl_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 id;
+ __u8 control;
+} __packed;
+
+#define CG2900_BT_SESSION_START 0x00
+#define CG2900_BT_SESSION_STOP 0x01
+#define CG2900_BT_SESSION_PAUSE 0x02
+#define CG2900_BT_SESSION_RESUME 0x03
+
+#define CG2900_BT_VS_RESET_SESSION_CONFIG 0xFD56
+struct bt_vs_reset_session_cfg_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 id;
+} __packed;
+
+/*
+ * CG2900 PG2 multimedia API
+ */
+
+#define CG2900_MC_PORT_PCM_I2S 0x00
+#define CG2900_MC_PORT_I2S 0x01
+#define CG2900_MC_PORT_BT_SCO 0x04
+#define CG2900_MC_PORT_FM_RX_0 0x07
+#define CG2900_MC_PORT_FM_RX_1 0x08
+#define CG2900_MC_PORT_FM_TX 0x09
+
+#define CG2900_MC_VS_PORT_CONFIG 0xFD64
+struct mc_vs_port_cfg_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 type;
+ /*
+ * one of the following configuration structs should follow, but they
+ * have different lengths so a union will not work
+ */
+} __packed;
+
+struct mc_vs_port_cfg_pcm_i2s {
+ __u8 role_dir;
+ __u8 sco_a2dp_slots_used;
+ __u8 fm_slots_used;
+ __u8 ring_slots_used;
+ __u8 slot_start[4];
+ __u8 ratio_mode;
+ __u8 frame_len;
+ __u8 bitclk_srate;
+} __packed;
+#define PORTCFG_PCM_SET_ROLE(cfg, role) \
+ set_low_nibble(&(cfg).role_dir, (role))
+#define PORTCFG_PCM_SET_DIR(cfg, idx, dir) \
+ store_bit(&(cfg).role_dir, (idx) + 4, (dir))
+static inline void portcfg_pcm_set_sco_used(struct mc_vs_port_cfg_pcm_i2s *cfg,
+ size_t index, __u8 use)
+{
+ if (use) {
+ /* clear corresponding slot in all cases */
+ cfg->sco_a2dp_slots_used &= ~(0x11 << index);
+ cfg->fm_slots_used &= ~(0x11 << index);
+ cfg->ring_slots_used &= ~(0x11 << index);
+ /* set for sco */
+ cfg->sco_a2dp_slots_used |= (1u << index);
+ } else {
+ /* only clear for sco */
+ cfg->sco_a2dp_slots_used &= ~(1u << index);
+ }
+}
+#define PORTCFG_PCM_SET_SCO_USED(cfg, idx, use) \
+ portcfg_pcm_set_sco_used(&cfg, idx, use)
+#define PORTCFG_PCM_SET_RATIO(cfg, r) \
+ set_low_nibble(&(cfg).ratio_mode, (r))
+#define PORTCFG_PCM_SET_MODE(cfg, mode) \
+ set_high_nibble(&(cfg).ratio_mode, (mode))
+#define PORTCFG_PCM_SET_BITCLK(cfg, clk) \
+ set_low_nibble(&(cfg).bitclk_srate, (clk))
+#define PORTCFG_PCM_SET_SRATE(cfg, rate) \
+ set_high_nibble(&(cfg).bitclk_srate, (rate))
+
+#define CG2900_MC_PCM_SAMPLE_RATE_8 1
+#define CG2900_MC_PCM_SAMPLE_RATE_16 2
+#define CG2900_MC_PCM_SAMPLE_RATE_44_1 4
+#define CG2900_MC_PCM_SAMPLE_RATE_48 6
+
+struct mc_vs_port_cfg_i2s {
+ __u8 role_hper;
+ __u8 csel_srate;
+ __u8 wordlen;
+};
+#define PORTCFG_I2S_SET_ROLE(cfg, role) \
+ set_low_nibble(&(cfg).role_hper, (role))
+#define PORTCFG_I2S_SET_HALFPERIOD(cfg, hper) \
+ set_high_nibble(&(cfg).role_hper, (hper))
+#define PORTCFG_I2S_SET_CHANNELS(cfg, chnl) \
+ set_low_nibble(&(cfg).csel_srate, (chnl))
+#define PORTCFG_I2S_SET_SRATE(cfg, rate) \
+ set_high_nibble(&(cfg).csel_srate, (rate))
+#define PORTCFG_I2S_SET_WORDLEN(cfg, len) \
+ set_low_nibble(&(cfg).wordlen, len)
+
+#define CG2900_MC_I2S_RIGHT_CHANNEL 1
+#define CG2900_MC_I2S_LEFT_CHANNEL 2
+#define CG2900_MC_I2S_BOTH_CHANNELS 3
+
+#define CG2900_MC_I2S_SAMPLE_RATE_8 0
+#define CG2900_MC_I2S_SAMPLE_RATE_16 1
+#define CG2900_MC_I2S_SAMPLE_RATE_44_1 2
+#define CG2900_MC_I2S_SAMPLE_RATE_48 4
+
+#define CG2900_MC_I2S_WORD_16 1
+#define CG2900_MC_I2S_WORD_32 3
+
+struct mc_vs_port_cfg_fm {
+ __u8 srate; /* NB: value goes in _upper_ nibble! */
+};
+#define PORTCFG_FM_SET_SRATE(cfg, rate) \
+ set_high_nibble(&(cfg).srate, (rate))
+
+struct mc_vs_port_cfg_sco {
+ __le16 acl_id;
+ __u8 wbs_codec;
+} __packed;
+#define PORTCFG_SCO_SET_CODEC(cfg, codec) \
+ set_high_nibble(&(cfg).wbs_codec, (codec))
+
+#define CG2900_MC_VS_CREATE_STREAM 0xFD66
+struct mc_vs_create_stream_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 id;
+ __u8 inport;
+ __u8 outport;
+ __u8 order; /* NB: not used by chip */
+} __packed;
+
+#define CG2900_MC_VS_DELETE_STREAM 0xFD67
+struct mc_vs_delete_stream_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 stream;
+} __packed;
+
+#define CG2900_MC_VS_STREAM_CONTROL 0xFD68
+struct mc_vs_stream_ctrl_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 command;
+ __u8 n_streams;
+ __u8 stream[];
+} __packed;
+
+#define CG2900_MC_STREAM_START 0x00
+#define CG2900_MC_STREAM_STOP 0x01
+#define CG2900_MC_STREAM_STOP_FLUSH 0x02
+
+#define CG2900_MC_VS_SET_FM_START_MODE 0xFD69
+
+/*
+ * FM
+ */
+
+/* FM legacy command packet */
+struct fm_leg_cmd {
+ __u8 length;
+ __u8 opcode;
+ __u8 read_write;
+ __u8 fm_function;
+ union { /* Payload varies with function */
+ __le16 irqmask;
+ struct fm_leg_fm_cmd {
+ __le16 head;
+ __le16 data[];
+ } fm_cmd;
+ };
+} __packed;
+
+/* FM legacy command complete packet */
+struct fm_leg_cmd_cmpl {
+ __u8 param_length;
+ __u8 status;
+ __u8 opcode;
+ __u8 read_write;
+ __u8 cmd_status;
+ __u8 fm_function;
+ __le16 response_head;
+ __le16 data[];
+} __packed;
+
+/* FM legacy interrupt packet, PG2 style */
+struct fm_leg_irq_v2 {
+ __u8 param_length;
+ __u8 status;
+ __u8 opcode;
+ __u8 event_type;
+ __u8 event_id;
+ __le16 irq;
+} __packed;
+
+/* FM legacy interrupt packet, PG1 style */
+struct fm_leg_irq_v1 {
+ __u8 param_length;
+ __u8 opcode;
+ __u8 event_id;
+ __le16 irq;
+} __packed;
+
+union fm_leg_evt_or_irq {
+ __u8 param_length;
+ struct fm_leg_cmd_cmpl evt;
+ struct fm_leg_irq_v2 irq_v2;
+ struct fm_leg_irq_v1 irq_v1;
+} __packed;
+
+/* FM Opcode generic*/
+#define CG2900_FM_GEN_ID_LEGACY 0xFE
+
+/* FM event*/
+#define CG2900_FM_EVENT_UNKNOWN 0
+#define CG2900_FM_EVENT_CMD_COMPLETE 1
+#define CG2900_FM_EVENT_INTERRUPT 2
+
+/* FM do-command identifiers. */
+#define CG2900_FM_DO_AIP_FADE_START 0x0046
+#define CG2900_FM_DO_AUP_BT_FADE_START 0x01C2
+#define CG2900_FM_DO_AUP_EXT_FADE_START 0x0102
+#define CG2900_FM_DO_AUP_FADE_START 0x00A2
+#define CG2900_FM_DO_FMR_SETANTENNA 0x0663
+#define CG2900_FM_DO_FMR_SP_AFSWITCH_START 0x04A3
+#define CG2900_FM_DO_FMR_SP_AFUPDATE_START 0x0463
+#define CG2900_FM_DO_FMR_SP_BLOCKSCAN_START 0x0683
+#define CG2900_FM_DO_FMR_SP_PRESETPI_START 0x0443
+#define CG2900_FM_DO_FMR_SP_SCAN_START 0x0403
+#define CG2900_FM_DO_FMR_SP_SEARCH_START 0x03E3
+#define CG2900_FM_DO_FMR_SP_SEARCHPI_START 0x0703
+#define CG2900_FM_DO_FMR_SP_TUNE_SETCHANNEL 0x03C3
+#define CG2900_FM_DO_FMR_SP_TUNE_STEPCHANNEL 0x04C3
+#define CG2900_FM_DO_FMT_PA_SETCTRL 0x01A4
+#define CG2900_FM_DO_FMT_PA_SETMODE 0x01E4
+#define CG2900_FM_DO_FMT_SP_TUNE_SETCHANNEL 0x0064
+#define CG2900_FM_DO_GEN_ANTENNACHECK_START 0x02A1
+#define CG2900_FM_DO_GEN_GOTOMODE 0x0041
+#define CG2900_FM_DO_GEN_POWERSUPPLY_SETMODE 0x0221
+#define CG2900_FM_DO_GEN_SELECTREFERENCECLOCK 0x0201
+#define CG2900_FM_DO_GEN_SETPROCESSINGCLOCK 0x0241
+#define CG2900_FM_DO_GEN_SETREFERENCECLOCKPLL 0x01A1
+#define CG2900_FM_DO_TST_TX_RAMP_START 0x0147
+#define CG2900_FM_CMD_NONE 0xFFFF
+#define CG2900_FM_CMD_ID_GEN_GOTO_POWER_DOWN 0x0081
+#define CG2900_FM_CMD_ID_GEN_GOTO_STANDBY 0x0061
+
+/* FM Command IDs */
+#define CG2900_FM_CMD_ID_AUP_EXT_SET_MODE 0x0162
+#define CG2900_FM_CMD_ID_AUP_EXT_SET_CTRL 0x0182
+#define CG2900_FM_CMD_ID_AIP_SET_MODE 0x01C6
+#define CG2900_FM_CMD_ID_AIP_BT_SET_CTRL 0x01A6
+#define CG2900_FM_CMD_ID_AIP_BT_SET_MODE 0x01E6
+
+/* FM Command Parameters. */
+#define CG2900_FM_CMD_PARAM_ENABLE 0x00
+#define CG2900_FM_CMD_PARAM_DISABLE 0x01
+#define CG2900_FM_CMD_PARAM_RESET 0x02
+#define CG2900_FM_CMD_PARAM_WRITECOMMAND 0x10
+#define CG2900_FM_CMD_PARAM_SET_INT_MASK_ALL 0x20
+#define CG2900_FM_CMD_PARAM_GET_INT_MASK_ALL 0x21
+#define CG2900_FM_CMD_PARAM_SET_INT_MASK 0x22
+#define CG2900_FM_CMD_PARAM_GET_INT_MASK 0x23
+#define CG2900_FM_CMD_PARAM_FM_FW_DOWNLOAD 0x30
+#define CG2900_FM_CMD_PARAM_NONE 0xFF
+
+/* FM Legacy Command Parameters */
+#define CG2900_FM_CMD_LEG_PARAM_WRITE 0x00
+#define CG2900_FM_CMD_LEG_PARAM_IRQ 0x01
+
+/* FM Command Status. */
+#define CG2900_FM_CMD_STATUS_COMMAND_SUCCEEDED 0x00
+#define CG2900_FM_CMD_STATUS_HW_FAILURE 0x03
+#define CG2900_FM_CMD_STATUS_INVALID_PARAMS 0x12
+#define CG2900_FM_CMD_STATUS_UNINITILIZED 0x15
+#define CG2900_FM_CMD_STATUS_UNSPECIFIED_ERROR 0x1F
+#define CG2900_FM_CMD_STATUS_COMMAND_DISALLOWED 0x0C
+#define CG2900_FM_CMD_STATUS_FW_WRONG_SEQUENCE_NR 0xF1
+#define CG2900_FM_CMD_STATUS_FW_UNKNOWN_FILE 0xF2
+#define CG2900_FM_CMD_STATUS_FW_FILE_VER_MISMATCH 0xF3
+
+/* FM Interrupts. */
+#define CG2900_FM_IRPT_FIQ 0x0000
+#define CG2900_FM_IRPT_OPERATION_SUCCEEDED 0x0001
+#define CG2900_FM_IRPT_OPERATION_FAILED 0x0002
+#define CG2900_FM_IRPT_BUFFER_FULL 0x0008
+#define CG2900_FM_IRPT_BUFFER_EMPTY 0x0008
+#define CG2900_FM_IRPT_SIGNAL_QUALITY_LOW 0x0010
+#define CG2900_FM_IRPT_MUTE_STATUS_CHANGED 0x0010
+#define CG2900_FM_IRPT_MONO_STEREO_TRANSITION 0x0020
+#define CG2900_FM_IRPT_OVER_MODULATION 0x0020
+#define CG2900_FM_IRPT_RDS_SYNC_FOUND 0x0040
+#define CG2900_FM_IRPT_INPUT_OVERDRIVE 0x0040
+#define CG2900_FM_IRPT_RDS_SYNC_LOST 0x0080
+#define CG2900_FM_IRPT_PI_CODE_CHANGED 0x0100
+#define CG2900_FM_IRPT_REQUEST_BLOCK_AVALIBLE 0x0200
+#define CG2900_FM_IRPT_BUFFER_CLEARED 0x2000
+#define CG2900_FM_IRPT_WARM_BOOT_READY 0x4000
+#define CG2900_FM_IRPT_COLD_BOOT_READY 0x8000
+
+/* FM Legacy Function Command Parameters */
+
+/* AUP_EXT_SetMode Output enum */
+#define CG2900_FM_CMD_AUP_EXT_SET_MODE_DISABLED 0x0000
+#define CG2900_FM_CMD_AUP_EXT_SET_MODE_I2S 0x0001
+#define CG2900_FM_CMD_AUP_EXT_SET_MODE_PARALLEL 0x0002
+
+/* SetControl Conversion enum */
+#define CG2900_FM_CMD_SET_CTRL_CONV_UP 0x0000
+#define CG2900_FM_CMD_SET_CTRL_CONV_DOWN 0x0001
+
+/* AIP_SetMode Input enum */
+#define CG2900_FM_CMD_AIP_SET_MODE_INPUT_ANA 0x0000
+#define CG2900_FM_CMD_AIP_SET_MODE_INPUT_DIG 0x0001
+
+/* AIP_BT_SetMode Input enum */
+#define CG2900_FM_CMD_AIP_BT_SET_MODE_INPUT_RESERVED 0x0000
+#define CG2900_FM_CMD_AIP_BT_SET_MODE_INPUT_I2S 0x0001
+#define CG2900_FM_CMD_AIP_BT_SET_MODE_INPUT_PAR 0x0002
+#define CG2900_FM_CMD_AIP_BT_SET_MODE_INPUT_FIFO 0x0003
+
+/* FM Parameter Lengths = FM command length - length field (1 byte) */
+#define CG2900_FM_CMD_PARAM_LEN(len) (len - 1)
+
+/*
+ * FM Command ID mapped per byte and shifted 3 bits left
+ * Also adds number of parameters at first 3 bits of LSB.
+ */
+static inline __u16 cg2900_get_fm_cmd_id(__u16 opcode)
+{
+ return opcode >> 3;
+}
+
+static inline __u16 cg2900_make_fm_cmd_id(__u16 id, __u8 num_params)
+{
+ return (id << 3) | num_params;
+}
+
+/*
+ * GNSS
+ */
+
+struct gnss_hci_hdr {
+ __u8 op_code;
+ __le16 plen;
+} __packed;
+
+#endif /* _CG2900_CHIP_H_ */
diff --git a/drivers/staging/cg2900/mfd/cg2900_core.c b/drivers/staging/cg2900/mfd/cg2900_core.c
new file mode 100644
index 00000000000..6ac27748e44
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_core.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 GPS/BT/FM controller.
+ */
+#define NAME "cg2900_core"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <asm/byteorder.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/core.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include "cg2900.h"
+#include "cg2900_core.h"
+
+/* Device names */
+#define CG2900_CDEV_NAME "cg2900_core_test"
+#define CG2900_CLASS_NAME "cg2900_class"
+#define CG2900_DEVICE_NAME "cg2900_driver"
+#define CORE_WQ_NAME "cg2900_core_wq"
+
+#define LOGGER_DIRECTION_TX 0
+#define LOGGER_DIRECTION_RX 1
+
+/*
+ * Timeout values
+ */
+#define CHIP_READY_TIMEOUT (100) /* ms */
+#define REVISION_READOUT_TIMEOUT (500) /* ms */
+#define SLEEP_TIMEOUT_MS (10000) /* ms */
+
+/**
+ * enum boot_state - BOOT-state for CG2900 Core.
+ * @BOOT_RESET: HCI Reset has been sent.
+ * @BOOT_READ_LOCAL_VERSION_INFORMATION: ReadLocalVersionInformation
+ * command has been sent.
+ * @BOOT_READY: CG2900 Core boot is ready.
+ * @BOOT_FAILED: CG2900 Core boot failed.
+ */
+enum boot_state {
+ BOOT_RESET,
+ BOOT_READ_LOCAL_VERSION_INFORMATION,
+ BOOT_READY,
+ BOOT_FAILED
+};
+
+/**
+ * struct chip_handler_item - Structure to store chip handler cb.
+ * @list: list_head struct.
+ * @cb: Chip handler callback struct.
+ */
+struct chip_handler_item {
+ struct list_head list;
+ struct cg2900_id_callbacks cb;
+};
+
+/**
+ * struct core_info - Main info structure for CG2900 Core.
+ * @boot_state: Current BOOT-state of CG2900 Core.
+ * @wq: CG2900 Core workqueue.
+ * @chip_dev: Device structure for chip driver.
+ * @work: Work structure.
+ */
+struct core_info {
+ enum boot_state boot_state;
+ struct workqueue_struct *wq;
+ struct cg2900_chip_dev *chip_dev;
+ struct work_struct work;
+};
+
+/**
+ * struct main_info - Main info structure for CG2900 Core.
+ * @dev: Device structure for STE Connectivity driver.
+ * @man_mutex: Management mutex.
+ * @chip_handlers: List of the register handlers for different chips.
+ * @wq: Wait queue.
+ */
+struct main_info {
+ struct device *dev;
+ struct mutex man_mutex;
+ struct list_head chip_handlers;
+ wait_queue_head_t wq;
+};
+
+/* core_info - Main information object for CG2900 Core. */
+static struct main_info *main_info;
+
+/* Module parameters */
+u8 bd_address[] = {0x00, 0xBE, 0xAD, 0xDE, 0x80, 0x00};
+EXPORT_SYMBOL_GPL(bd_address);
+int bd_addr_count = BT_BDADDR_SIZE;
+
+static int sleep_timeout_ms = SLEEP_TIMEOUT_MS;
+
+/**
+ * send_bt_cmd() - Copy and send sk_buffer with no assigned user.
+ * @dev: Current chip to transmit to.
+ * @data: Data to send.
+ * @length: Length in bytes of data.
+ *
+ * The send_bt_cmd() function allocate sk_buffer, copy supplied
+ * data to it, and send the sk_buffer to controller.
+ */
+void send_bt_cmd(struct cg2900_chip_dev *dev, void *data, int length)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = alloc_skb(length + HCI_H4_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(dev->dev, "send_bt_cmd: Couldn't alloc sk_buff with "
+ "length %d\n", length);
+ return;
+ }
+
+ skb_reserve(skb, HCI_H4_SIZE);
+ memcpy(skb_put(skb, length), data, length);
+ skb_push(skb, HCI_H4_SIZE);
+ skb->data[0] = HCI_BT_CMD_H4_CHANNEL;
+
+ err = dev->t_cb.write(dev, skb);
+ if (err) {
+ dev_err(dev->dev, "send_bt_cmd: Transport write failed (%d)\n",
+ err);
+ kfree_skb(skb);
+ }
+}
+
+/**
+ * handle_reset_cmd_complete_evt() - Handle a received HCI Command Complete event for a Reset command.
+ * @dev: Current device.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * True, if packet was handled internally,
+ * False, otherwise.
+ */
+static bool handle_reset_cmd_complete_evt(struct cg2900_chip_dev *dev, u8 *data)
+{
+ bool pkt_handled = false;
+ u8 status = data[0];
+ struct hci_command_hdr cmd;
+ struct core_info *info = dev->prv_data;
+
+ dev_dbg(dev->dev, "Received Reset complete event with status 0x%X\n",
+ status);
+
+ if (info->boot_state == BOOT_RESET) {
+ /* Transmit HCI Read Local Version Information command */
+ dev_dbg(dev->dev, "New boot_state: "
+ "BOOT_READ_LOCAL_VERSION_INFORMATION\n");
+ info->boot_state = BOOT_READ_LOCAL_VERSION_INFORMATION;
+ cmd.opcode = cpu_to_le16(HCI_OP_READ_LOCAL_VERSION);
+ cmd.plen = 0; /* No parameters for HCI reset */
+ send_bt_cmd(dev, &cmd, sizeof(cmd));
+
+ pkt_handled = true;
+ }
+
+ return pkt_handled;
+}
+
+/**
+ * handle_read_local_version_info_cmd_complete_evt() - Handle a received HCI Command Complete event for a ReadLocalVersionInformation command.
+ * @dev: Current device.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * True, if packet was handled internally,
+ * False, otherwise.
+ */
+static bool
+handle_read_local_version_info_cmd_complete_evt(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ struct hci_rp_read_local_version *evt;
+ struct core_info *info = dev->prv_data;
+
+ /* Check we're in the right state */
+ if (info->boot_state != BOOT_READ_LOCAL_VERSION_INFORMATION)
+ return false;
+
+ /* We got an answer for our HCI command. Extract data */
+ evt = (struct hci_rp_read_local_version *)data;
+
+ /* We will handle the packet */
+ if (HCI_BT_ERROR_NO_ERROR != evt->status) {
+ dev_err(dev->dev, "Received Read Local Version Information "
+ "with status 0x%X\n", evt->status);
+ dev_dbg(dev->dev, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ wake_up_all(&main_info->wq);
+ return true;
+ }
+
+ /* The command worked. Store the data */
+ dev->chip.hci_version = evt->hci_ver;
+ dev->chip.hci_revision = le16_to_cpu(evt->hci_rev);
+ dev->chip.lmp_pal_version = evt->lmp_ver;
+ dev->chip.manufacturer = le16_to_cpu(evt->manufacturer);
+ dev->chip.hci_sub_version = le16_to_cpu(evt->lmp_subver);
+ dev_info(dev->dev, "Received Read Local Version Information with:\n"
+ "\thci_version: 0x%02X\n"
+ "\thci_revision: 0x%04X\n"
+ "\tlmp_pal_version: 0x%02X\n"
+ "\tmanufacturer: 0x%04X\n"
+ "\thci_sub_version: 0x%04X\n",
+ dev->chip.hci_version, dev->chip.hci_revision,
+ dev->chip.lmp_pal_version, dev->chip.manufacturer,
+ dev->chip.hci_sub_version);
+
+ dev_dbg(dev->dev, "New boot_state: BOOT_READY\n");
+ info->boot_state = BOOT_READY;
+ wake_up_all(&main_info->wq);
+
+ return true;
+}
+
+/**
+ * handle_rx_data_bt_evt() - Check if data should be handled in CG2900 Core.
+ * @dev: Current chip
+ * @skb: Data packet
+ *
+ * The handle_rx_data_bt_evt() function checks if received data should be
+ * handled in CG2900 Core. If so handle it correctly.
+ * Received data is always HCI BT Event.
+ *
+ * Returns:
+ * True, if packet was handled internally,
+ * False, otherwise.
+ */
+static bool handle_rx_data_bt_evt(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb)
+{
+ bool pkt_handled = false;
+ u8 *data = &skb->data[CG2900_SKB_RESERVE];
+ struct hci_event_hdr *evt;
+ struct hci_ev_cmd_complete *cmd_complete;
+ u16 op_code;
+
+ evt = (struct hci_event_hdr *)data;
+
+ /* First check the event code */
+ if (HCI_EV_CMD_COMPLETE != evt->evt)
+ return false;
+
+ data += sizeof(*evt);
+ cmd_complete = (struct hci_ev_cmd_complete *)data;
+
+ op_code = le16_to_cpu(cmd_complete->opcode);
+
+ dev_dbg(dev->dev, "Received Command Complete: op_code = 0x%04X\n",
+ op_code);
+ data += sizeof(*cmd_complete); /* Move to first byte after OCF */
+
+ if (op_code == HCI_OP_RESET)
+ pkt_handled = handle_reset_cmd_complete_evt(dev, data);
+ else if (op_code == HCI_OP_READ_LOCAL_VERSION)
+ pkt_handled = handle_read_local_version_info_cmd_complete_evt
+ (dev, data);
+
+ if (pkt_handled)
+ kfree_skb(skb);
+
+ return pkt_handled;
+}
+
+static void cg2900_data_from_chip(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb)
+{
+ u8 h4_channel;
+
+ dev_dbg(dev->dev, "cg2900_data_from_chip\n");
+
+ if (!skb) {
+ dev_err(dev->dev, "No data supplied\n");
+ return;
+ }
+
+ h4_channel = skb->data[0];
+
+ /*
+ * First check if this is the response for something
+ * we have sent internally.
+ */
+ if (HCI_BT_EVT_H4_CHANNEL == h4_channel &&
+ handle_rx_data_bt_evt(dev, skb)) {
+ dev_dbg(dev->dev, "Received packet handled internally\n");
+ } else {
+ dev_err(dev->dev,
+ "cg2900_data_from_chip: Received unexpected packet\n");
+ kfree_skb(skb);
+ }
+}
+
+/**
+ * work_hw_registered() - Called when the interface to HW has been established.
+ * @work: Reference to work data.
+ *
+ * Since there now is a transport identify the connected chip and decide which
+ * chip handler to use.
+ */
+static void work_hw_registered(struct work_struct *work)
+{
+ struct hci_command_hdr cmd;
+ struct cg2900_chip_dev *dev;
+ struct core_info *info;
+ bool chip_handled = false;
+ struct list_head *cursor;
+ struct chip_handler_item *tmp;
+
+ dev_dbg(main_info->dev, "work_hw_registered\n");
+
+ if (!work) {
+ dev_err(main_info->dev, "work_hw_registered: work == NULL\n");
+ return;
+ }
+
+ info = container_of(work, struct core_info, work);
+ dev = info->chip_dev;
+
+ /*
+ * This might look strange, but we need to read out
+ * the revision info in order to be able to shutdown the chip properly.
+ */
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, true);
+
+ /* Wait 100ms before continuing to be sure that the chip is ready */
+ schedule_timeout_killable(msecs_to_jiffies(CHIP_READY_TIMEOUT));
+
+ /* Set our function to receive data from chip */
+ dev->c_cb.data_from_chip = cg2900_data_from_chip;
+
+ /*
+ * Transmit HCI reset command to ensure the chip is using
+ * the correct transport
+ */
+ dev_dbg(dev->dev, "New boot_state: BOOT_RESET\n");
+ info->boot_state = BOOT_RESET;
+ cmd.opcode = cpu_to_le16(HCI_OP_RESET);
+ cmd.plen = 0; /* No parameters for HCI reset */
+ send_bt_cmd(dev, &cmd, sizeof(cmd));
+
+ dev_dbg(dev->dev,
+ "Wait up to 500 milliseconds for revision to be read\n");
+ wait_event_timeout(main_info->wq,
+ (BOOT_READY == info->boot_state ||
+ BOOT_FAILED == info->boot_state),
+ msecs_to_jiffies(REVISION_READOUT_TIMEOUT));
+
+ if (BOOT_READY != info->boot_state) {
+ dev_err(dev->dev,
+ "Could not read out revision from the chip\n");
+ info->boot_state = BOOT_FAILED;
+ dev->t_cb.set_chip_power(dev, false);
+ return;
+ }
+
+ dev->c_cb.data_from_chip = NULL;
+
+ mutex_lock(&main_info->man_mutex);
+ list_for_each(cursor, &main_info->chip_handlers) {
+ tmp = list_entry(cursor, struct chip_handler_item, list);
+ chip_handled = tmp->cb.check_chip_support(dev);
+ if (chip_handled) {
+ dev_info(dev->dev, "Chip handler found\n");
+ break;
+ }
+ }
+ mutex_unlock(&main_info->man_mutex);
+
+ if (!chip_handled)
+ dev_info(dev->dev, "No chip handler found\n");
+}
+
+/**
+ * cg2900_register_chip_driver() - Register a chip handler.
+ * @cb: Callbacks to call when chip is connected.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if NULL is supplied as @cb.
+ * -ENOMEM if allocation fails or work queue can't be created.
+ */
+int cg2900_register_chip_driver(struct cg2900_id_callbacks *cb)
+{
+ struct chip_handler_item *item;
+
+ dev_dbg(main_info->dev, "cg2900_register_chip_driver\n");
+
+ if (!cb) {
+ dev_err(main_info->dev, "NULL supplied as cb\n");
+ return -EINVAL;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ dev_err(main_info->dev,
+ "cg2900_register_chip_driver: "
+ "Failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(&item->cb, cb, sizeof(cb));
+ mutex_lock(&main_info->man_mutex);
+ list_add_tail(&item->list, &main_info->chip_handlers);
+ mutex_unlock(&main_info->man_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cg2900_register_chip_driver);
+
+/**
+ * cg2900_deregister_chip_driver() - Deregister a chip handler.
+ * @cb: Callbacks to call when chip is connected.
+ */
+void cg2900_deregister_chip_driver(struct cg2900_id_callbacks *cb)
+{
+ struct chip_handler_item *tmp;
+ struct list_head *cursor, *next;
+
+ dev_dbg(main_info->dev, "cg2900_deregister_chip_driver\n");
+
+ if (!cb) {
+ dev_err(main_info->dev, "NULL supplied as cb\n");
+ return;
+ }
+ mutex_lock(&main_info->man_mutex);
+ list_for_each_safe(cursor, next, &main_info->chip_handlers) {
+ tmp = list_entry(cursor, struct chip_handler_item, list);
+ if (tmp->cb.check_chip_support == cb->check_chip_support) {
+ list_del(cursor);
+ kfree(tmp);
+ break;
+ }
+ }
+ mutex_unlock(&main_info->man_mutex);
+}
+EXPORT_SYMBOL_GPL(cg2900_deregister_chip_driver);
+
+/**
+ * cg2900_register_trans_driver() - Register a transport driver.
+ * @dev: Transport device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if NULL is supplied as @cb.
+ * -ENOMEM if allocation fails or work queue can't be created.
+ * -EACCES if work can't be queued.
+ */
+int cg2900_register_trans_driver(struct cg2900_chip_dev *dev)
+{
+ int err;
+ struct cg2900_platform_data *pf_data;
+ struct core_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!dev || !dev->dev) {
+ dev_err(main_info->dev, "cg2900_register_trans_driver: "
+ "Received NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "cg2900_register_trans_driver\n");
+
+ if (!dev->t_cb.write) {
+ dev_err(dev->dev, "cg2900_register_trans_driver: Write function"
+ " missing\n");
+ return -EINVAL;
+ }
+
+ pf_data = dev_get_platdata(dev->dev);
+ if (!pf_data) {
+ dev_err(dev->dev, "cg2900_register_trans_driver: Missing "
+ "platform data\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev->dev, "Couldn't allocate info\n");
+ return -ENOMEM;
+ }
+
+ if (pf_data->init) {
+ err = pf_data->init(dev);
+ if (err) {
+ dev_err(dev->dev, "Platform init failed (%d)\n", err);
+ goto error_handling;
+ }
+ }
+
+ info->chip_dev = dev;
+ dev->prv_data = info;
+
+ info->wq = create_singlethread_workqueue(CORE_WQ_NAME);
+ if (!info->wq) {
+ dev_err(dev->dev, "Could not create workqueue\n");
+ err = -ENOMEM;
+ goto error_handling_exit;
+ }
+
+ dev_info(dev->dev, "Transport connected\n");
+
+ INIT_WORK(&info->work, work_hw_registered);
+ if (!queue_work(info->wq, &info->work)) {
+ dev_err(dev->dev, "Failed to queue work_hw_registered because "
+ "it's already in the queue\n");
+ err = -EACCES;
+ goto error_handling_wq;
+ }
+
+ return 0;
+
+error_handling_wq:
+ destroy_workqueue(info->wq);
+error_handling_exit:
+ if (pf_data->exit)
+ pf_data->exit(dev);
+error_handling:
+ kfree(info);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cg2900_register_trans_driver);
+
+/**
+ * cg2900_deregister_trans_driver() - Deregister a transport driver.
+ * @dev: Transport device.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EINVAL if NULL is supplied as @cb.
+ * -ENOMEM if allocation fails or work queue can't be created.
+ */
+int cg2900_deregister_trans_driver(struct cg2900_chip_dev *dev)
+{
+ struct cg2900_platform_data *pf_data;
+ struct core_info *info = dev->prv_data;
+
+ BUG_ON(!main_info);
+
+ dev_dbg(dev->dev, "cg2900_deregister_trans_driver\n");
+
+ if (dev->c_cb.chip_removed)
+ dev->c_cb.chip_removed(dev);
+
+ destroy_workqueue(info->wq);
+
+ dev->prv_data = NULL;
+ kfree(info);
+
+ dev_info(dev->dev, "Transport disconnected\n");
+
+ pf_data = dev_get_platdata(dev->dev);
+ if (!pf_data) {
+ dev_err(dev->dev, "Missing platform data\n");
+ return -EINVAL;
+ }
+
+ if (pf_data->exit)
+ pf_data->exit(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cg2900_deregister_trans_driver);
+
+/**
+ * cg2900_get_sleep_timeout() - Return sleep timeout in jiffies.
+ *
+ * Returns:
+ * Sleep timeout in jiffies. 0 means that sleep timeout shall not be used.
+ */
+unsigned long cg2900_get_sleep_timeout(void)
+{
+ if (!sleep_timeout_ms)
+ return 0;
+
+ return msecs_to_jiffies(sleep_timeout_ms);
+}
+EXPORT_SYMBOL_GPL(cg2900_get_sleep_timeout);
+
+/**
+ * cg2900_probe() - Initialize module.
+ *
+ * @pdev: Platform device.
+ *
+ * This function initialize the transport and CG2900 Core, then
+ * register to the transport framework.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM for failed alloc or structure creation.
+ */
+static int __devinit cg2900_probe(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "cg2900_probe\n");
+
+ main_info = kzalloc(sizeof(*main_info), GFP_KERNEL);
+ if (!main_info) {
+ dev_err(&pdev->dev, "Couldn't allocate main_info\n");
+ return -ENOMEM;
+ }
+
+ main_info->dev = &pdev->dev;
+ mutex_init(&main_info->man_mutex);
+ INIT_LIST_HEAD(&main_info->chip_handlers);
+ init_waitqueue_head(&main_info->wq);
+
+ dev_info(&pdev->dev, "CG2900 Core driver started\n");
+
+ return 0;
+}
+
+/**
+ * cg2900_remove() - Remove module.
+ *
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM if core_info does not exist.
+ * -EINVAL if platform data does not exist in the device.
+ */
+static int __devexit cg2900_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "cg2900_remove\n");
+
+ kfree(main_info);
+ main_info = NULL;
+
+ dev_info(&pdev->dev, "CG2900 Core driver removed\n");
+
+ return 0;
+}
+
+static struct platform_driver cg2900_driver = {
+ .driver = {
+ .name = "cg2900",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_probe,
+ .remove = __devexit_p(cg2900_remove),
+};
+
+/**
+ * cg2900_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init cg2900_init(void)
+{
+ pr_debug("cg2900_init");
+ return platform_driver_register(&cg2900_driver);
+}
+
+/**
+ * cg2900_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit cg2900_exit(void)
+{
+ pr_debug("cg2900_exit");
+ platform_driver_unregister(&cg2900_driver);
+}
+
+module_init(cg2900_init);
+module_exit(cg2900_exit);
+
+module_param(sleep_timeout_ms, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(sleep_timeout_ms,
+ "Sleep timeout for data transmissions:\n"
+ "\tDefault 10000 ms\n"
+ "\t0 = disable\n"
+ "\t>0 = sleep timeout in milliseconds");
+
+module_param_array(bd_address, byte, &bd_addr_count,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(bd_address,
+ "Bluetooth Device address. "
+ "Default 0x00 0x80 0xDE 0xAD 0xBE 0xEF. "
+ "Enter as comma separated value.");
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux Bluetooth HCI H:4 CG2900 Connectivity Device Driver");
diff --git a/drivers/staging/cg2900/mfd/cg2900_core.h b/drivers/staging/cg2900/mfd/cg2900_core.h
new file mode 100644
index 00000000000..bdd951a501d
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_core.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 GPS/BT/FM controller.
+ */
+
+#ifndef _CG2900_CORE_H_
+#define _CG2900_CORE_H_
+
+#include <linux/device.h>
+#include <linux/skbuff.h>
+
+/* Reserve 1 byte for the HCI H:4 header */
+#define HCI_H4_SIZE 1
+#define CG2900_SKB_RESERVE HCI_H4_SIZE
+
+/* Number of bytes to reserve at start of sk_buffer when receiving packet */
+#define RX_SKB_RESERVE 8
+
+#define BT_BDADDR_SIZE 6
+
+/* Standardized Bluetooth H:4 channels */
+#define HCI_BT_CMD_H4_CHANNEL 0x01
+#define HCI_BT_ACL_H4_CHANNEL 0x02
+#define HCI_BT_SCO_H4_CHANNEL 0x03
+#define HCI_BT_EVT_H4_CHANNEL 0x04
+
+/* Default H4 channels which may change depending on connected controller */
+#define HCI_FM_RADIO_H4_CHANNEL 0x08
+#define HCI_GNSS_H4_CHANNEL 0x09
+
+/* Bluetooth error codes */
+#define HCI_BT_ERROR_NO_ERROR 0x00
+
+/* Bluetooth lengths */
+#define HCI_BT_SEND_FILE_MAX_CHUNK_SIZE 254
+
+#define LOGGER_DIRECTION_TX 0
+#define LOGGER_DIRECTION_RX 1
+
+/* module_param declared in cg2900_core.c */
+extern u8 bd_address[BT_BDADDR_SIZE];
+
+#endif /* _CG2900_CORE_H_ */
diff --git a/drivers/staging/cg2900/mfd/cg2900_lib.c b/drivers/staging/cg2900/mfd/cg2900_lib.c
new file mode 100644
index 00000000000..84f7eb5eb62
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_lib.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 GPS/BT/FM controller.
+ */
+#define NAME "cg2900_lib"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "cg2900.h"
+#include "cg2900_chip.h"
+#include "cg2900_core.h"
+#include "cg2900_lib.h"
+
+/*
+ * Max length in bytes for line buffer used to parse settings and patch file.
+ * Must be max length of name plus characters used to define chip version.
+ */
+#define LINE_BUFFER_LENGTH (NAME_MAX + 30)
+#define LOGGER_HEADER_SIZE 1
+/**
+ * cg2900_tx_to_chip() - Transmit buffer to the transport.
+ * @user: User data for BT command channel.
+ * @logger: User data for logger channel.
+ * @skb: Data packet.
+ *
+ * The transmit_skb_to_chip() function transmit buffer to the transport.
+ * If enabled, copy the transmitted data to the HCI logger as well.
+ */
+void cg2900_tx_to_chip(struct cg2900_user_data *user,
+ struct cg2900_user_data *logger, struct sk_buff *skb)
+{
+ int err;
+ struct cg2900_chip_dev *chip_dev;
+
+ dev_dbg(user->dev, "cg2900_tx_to_chip %d bytes.\n", skb->len);
+
+ if (logger)
+ cg2900_send_to_hci_logger(logger, skb, LOGGER_DIRECTION_TX);
+
+ chip_dev = cg2900_get_prv(user);
+ err = chip_dev->t_cb.write(chip_dev, skb);
+ if (err) {
+ dev_err(user->dev, "cg2900_tx_to_chip: Transport write failed "
+ "(%d)\n", err);
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(cg2900_tx_to_chip);
+
+/**
+ * cg2900_tx_no_user() - Transmit buffer to the transport.
+ * @dev: Current chip to transmit to.
+ * @skb: Data packet.
+ *
+ * This function transmits buffer to the transport when no user exist (system
+ * startup for example).
+ */
+void cg2900_tx_no_user(struct cg2900_chip_dev *dev, struct sk_buff *skb)
+{
+ int err;
+
+ dev_dbg(dev->dev, "cg2900_tx_no_user %d bytes.\n", skb->len);
+
+ err = dev->t_cb.write(dev, skb);
+ if (err) {
+ dev_err(dev->dev, "cg2900_tx_no_user: Transport write failed "
+ "(%d)\n", err);
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(cg2900_tx_no_user);
+
+/**
+ * create_and_send_bt_cmd() - Copy and send sk_buffer.
+ * @user: User data for current channel.
+ * @logger: User data for logger channel.
+ * @data: Data to send.
+ * @length: Length in bytes of data.
+ *
+ * The create_and_send_bt_cmd() function allocate sk_buffer, copy supplied data
+ * to it, and send the sk_buffer to controller.
+ */
+void cg2900_send_bt_cmd(struct cg2900_user_data *user,
+ struct cg2900_user_data *logger,
+ void *data, int length)
+{
+ struct sk_buff *skb;
+
+ skb = user->alloc_skb(length, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(user->dev, "cg2900_send_bt_cmd: Couldn't alloc "
+ "sk_buff with length %d\n", length);
+ return;
+ }
+
+ memcpy(skb_put(skb, length), data, length);
+ skb_push(skb, HCI_H4_SIZE);
+ skb->data[0] = HCI_BT_CMD_H4_CHANNEL;
+
+ cg2900_tx_to_chip(user, logger, skb);
+}
+EXPORT_SYMBOL_GPL(cg2900_send_bt_cmd);
+
+/**
+ * cg2900_send_bt_cmd_no_user() - Copy and send sk_buffer with no assigned user.
+ * @dev: Current chip to transmit to.
+ * @data: Data to send.
+ * @length: Length in bytes of data.
+ *
+ * The cg2900_send_bt_cmd_no_user() function allocate sk_buffer, copy supplied
+ * data to it, and send the sk_buffer to controller.
+ */
+void cg2900_send_bt_cmd_no_user(struct cg2900_chip_dev *dev, void *data,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + HCI_H4_SIZE, GFP_KERNEL);
+ if (!skb) {
+ dev_err(dev->dev, "cg2900_send_bt_cmd_no_user: Couldn't alloc "
+ "sk_buff with length %d\n", length);
+ return;
+ }
+
+ skb_reserve(skb, HCI_H4_SIZE);
+ memcpy(skb_put(skb, length), data, length);
+ skb_push(skb, HCI_H4_SIZE);
+ skb->data[0] = HCI_BT_CMD_H4_CHANNEL;
+
+ cg2900_tx_no_user(dev, skb);
+}
+EXPORT_SYMBOL_GPL(cg2900_send_bt_cmd_no_user);
+
+/**
+ * create_work_item() - Create work item and add it to the work queue.
+ * @wq: Work queue.
+ * @work_func: Work function.
+ * @user_data: Arbitrary data set by user.
+ *
+ * The create_work_item() function creates work item and add it to
+ * the work queue.
+ * Note that work is allocated by kmalloc and work must be freed when work
+ * function is started.
+ */
+void cg2900_create_work_item(struct workqueue_struct *wq, work_func_t work_func,
+ void *user_data)
+{
+ struct cg2900_work *new_work;
+ int err;
+
+ new_work = kmalloc(sizeof(*new_work), GFP_ATOMIC);
+ if (!new_work) {
+ pr_err("Failed to alloc memory for new_work");
+ return;
+ }
+
+ INIT_WORK(&new_work->work, work_func);
+ new_work->user_data = user_data;
+
+ err = queue_work(wq, &new_work->work);
+ if (!err) {
+ pr_err("Failed to queue work_struct because it's already "
+ "in the queue");
+ kfree(new_work);
+ }
+}
+EXPORT_SYMBOL_GPL(cg2900_create_work_item);
+
+/**
+ * read_and_send_file_part() - Transmit a part of the supplied file.
+ * @user: User data for current channel.
+ * @logger: User data for logger channel.
+ * @info: File information.
+ *
+ * The cg2900_read_and_send_file_part() function transmit a part of the supplied
+ * file to the controller.
+ *
+ * Returns:
+ * 0 if there is no more data in the file.
+ * >0 for number of bytes sent.
+ * -ENOMEM if skb allocation failed.
+ */
+int cg2900_read_and_send_file_part(struct cg2900_user_data *user,
+ struct cg2900_user_data *logger,
+ struct cg2900_file_info *info)
+{
+ int bytes_to_copy;
+ struct sk_buff *skb;
+ struct bt_vs_write_file_block_cmd *cmd;
+ int plen;
+
+ /*
+ * Calculate number of bytes to copy;
+ * either max bytes for HCI packet or number of bytes left in file
+ */
+ bytes_to_copy = min((int)HCI_BT_SEND_FILE_MAX_CHUNK_SIZE,
+ (int)(info->fw_file->size - info->file_offset));
+
+ if (bytes_to_copy <= 0) {
+ /* Nothing more to read in file. */
+ dev_dbg(user->dev, "File download finished\n");
+ info->chunk_id = 0;
+ info->file_offset = 0;
+ return 0;
+ }
+
+ /* There is more data to send */
+ plen = sizeof(*cmd) + bytes_to_copy;
+ skb = user->alloc_skb(plen, GFP_KERNEL);
+ if (!skb) {
+ dev_err(user->dev, "Couldn't allocate sk_buffer\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, plen);
+
+ cmd = (struct bt_vs_write_file_block_cmd *)skb->data;
+ cmd->opcode = cpu_to_le16(CG2900_BT_OP_VS_WRITE_FILE_BLOCK);
+ cmd->plen = BT_PARAM_LEN(plen);
+ cmd->id = info->chunk_id;
+ info->chunk_id++;
+
+ /* Copy the data from offset position */
+ memcpy(cmd->data,
+ &(info->fw_file->data[info->file_offset]),
+ bytes_to_copy);
+
+ /* Increase offset with number of bytes copied */
+ info->file_offset += bytes_to_copy;
+
+ skb_push(skb, CG2900_SKB_RESERVE);
+ skb->data[0] = HCI_BT_CMD_H4_CHANNEL;
+
+ cg2900_tx_to_chip(user, logger, skb);
+
+ return bytes_to_copy;
+}
+EXPORT_SYMBOL_GPL(cg2900_read_and_send_file_part);
+
+void cg2900_send_to_hci_logger(struct cg2900_user_data *logger,
+ struct sk_buff *skb,
+ u8 direction)
+{
+ struct sk_buff *skb_log;
+ u8 *p;
+
+ /*
+ * Alloc a new sk_buff and copy the data into it. Then send it to
+ * the HCI logger.
+ */
+ skb_log = alloc_skb(skb->len + LOGGER_HEADER_SIZE, GFP_NOWAIT);
+ if (!skb_log) {
+ pr_err("cg2900_send_to_hci_logger:\
+ Couldn't allocate skb_log\n");
+ return;
+ }
+ /* Reserve 1 byte for direction.*/
+ skb_reserve(skb_log, LOGGER_HEADER_SIZE);
+
+ memcpy(skb_put(skb_log, skb->len), skb->data, skb->len);
+ p = skb_push(skb_log, LOGGER_HEADER_SIZE);
+ *p = (u8) direction;
+
+ if (logger->read_cb)
+ logger->read_cb(logger, skb_log);
+ else
+ kfree_skb(skb_log);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(cg2900_send_to_hci_logger);
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux CG2900 Library functions");
diff --git a/drivers/staging/cg2900/mfd/cg2900_lib.h b/drivers/staging/cg2900/mfd/cg2900_lib.h
new file mode 100644
index 00000000000..99d5ce6cfdb
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_lib.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson CG2900 GPS/BT/FM controller.
+ */
+
+#ifndef _CG2900_LIB_H_
+#define _CG2900_LIB_H_
+
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+
+#include "cg2900.h"
+
+/**
+ * struct cg2900_work - Generic work structure.
+ * @work: Work structure.
+ * @user_data: Arbitrary data set by user.
+ */
+struct cg2900_work {
+ struct work_struct work;
+ void *user_data;
+};
+
+/**
+ * struct cg2900_file_info - Info structure for file to download.
+ * @fw_file: Stores firmware file.
+ * @file_offset: Current read offset in firmware file.
+ * @chunk_id: Stores current chunk ID of write file
+ * operations.
+ */
+struct cg2900_file_info {
+ const struct firmware *fw_file;
+ int file_offset;
+ u8 chunk_id;
+};
+
+extern void cg2900_tx_to_chip(struct cg2900_user_data *user,
+ struct cg2900_user_data *logger,
+ struct sk_buff *skb);
+extern void cg2900_tx_no_user(struct cg2900_chip_dev *dev, struct sk_buff *skb);
+extern void cg2900_send_bt_cmd(struct cg2900_user_data *user,
+ struct cg2900_user_data *logger,
+ void *data, int length);
+extern void cg2900_send_bt_cmd_no_user(struct cg2900_chip_dev *dev, void *data,
+ int length);
+extern void cg2900_create_work_item(struct workqueue_struct *wq,
+ work_func_t work_func,
+ void *user_data);
+extern int cg2900_read_and_send_file_part(struct cg2900_user_data *user,
+ struct cg2900_user_data *logger,
+ struct cg2900_file_info *info);
+extern void cg2900_send_to_hci_logger(struct cg2900_user_data *logger,
+ struct sk_buff *skb,
+ u8 direction);
+
+#endif /* _CG2900_LIB_H_ */
diff --git a/drivers/staging/cg2900/mfd/cg2900_test.c b/drivers/staging/cg2900/mfd/cg2900_test.c
new file mode 100644
index 00000000000..58ac6166af6
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/cg2900_test.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Driver for ST-Ericsson CG2900 test character device.
+ */
+#define NAME "cg2900_test"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <asm/byteorder.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "cg2900.h"
+#include "cg2900_core.h"
+
+#define MISC_DEV (info->misc_dev.this_device)
+
+/* Device names */
+#define CG2900_CDEV_NAME "cg2900_core_test"
+
+/**
+ * struct test_info - Main info structure for CG2900 test char device.
+ * @misc_dev: Registered Misc Device.
+ * @rx_queue: RX data queue.
+ * @dev: Device structure for STE Connectivity driver.
+ * @pdev: Platform device structure for STE Connectivity driver.
+ */
+struct test_info {
+ struct miscdevice misc_dev;
+ struct sk_buff_head rx_queue;
+ struct device *dev;
+ struct platform_device *pdev;
+};
+
+static struct test_info *test_info;
+
+/*
+ * main_wait_queue - Char device Wait Queue in CG2900 Core.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(char_wait_queue);
+
+/**
+ * tx_to_char_dev() - Handle data received from CG2900 Core.
+ * @dev: Current chip device information.
+ * @skb: Buffer with data coming form device.
+ */
+static int tx_to_char_dev(struct cg2900_chip_dev *dev, struct sk_buff *skb)
+{
+ struct test_info *info = dev->t_data;
+ skb_queue_tail(&info->rx_queue, skb);
+ wake_up_interruptible_all(&char_wait_queue);
+ return 0;
+}
+
+/**
+ * cg2900_test_open() - User space char device has been opened.
+ * @inode: Device driver information.
+ * @filp: Pointer to the file struct.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * -EACCES if transport already exists.
+ * -ENOMEM if allocation fails.
+ * Errors from create_work_item.
+ */
+static int cg2900_test_open(struct inode *inode, struct file *filp)
+{
+ struct test_info *info = test_info;
+ struct cg2900_chip_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(MISC_DEV, "Cannot allocate test_dev\n");
+ return -ENOMEM;
+ }
+ dev->dev = info->dev;
+ dev->pdev = info->pdev;
+ dev->t_data = info;
+ dev->t_cb.write = tx_to_char_dev;
+ filp->private_data = dev;
+
+ dev_info(MISC_DEV, "CG2900 test char dev opened\n");
+ return cg2900_register_trans_driver(dev);
+}
+
+/**
+ * cg2900_test_release() - User space char device has been closed.
+ * @inode: Device driver information.
+ * @filp: Pointer to the file struct.
+ *
+ * Returns:
+ * 0 if there is no error.
+ */
+static int cg2900_test_release(struct inode *inode, struct file *filp)
+{
+ struct cg2900_chip_dev *dev = filp->private_data;
+ struct test_info *info = dev->t_data;
+
+ dev_info(MISC_DEV, "CG2900 test char dev closed\n");
+ skb_queue_purge(&info->rx_queue);
+ cg2900_deregister_trans_driver(dev);
+ kfree(dev);
+
+ return 0;
+}
+
+/**
+ * cg2900_test_read() - Queue and copy buffer to user space char device.
+ * @filp: Pointer to the file struct.
+ * @buf: Received buffer.
+ * @count: Count of received data in bytes.
+ * @f_pos: Position in buffer.
+ *
+ * Returns:
+ * >= 0 is number of bytes read.
+ * -EFAULT if copy_to_user fails.
+ */
+static ssize_t cg2900_test_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct sk_buff *skb;
+ int bytes_to_copy;
+ int err;
+ struct cg2900_chip_dev *dev = filp->private_data;
+ struct test_info *info = dev->t_data;
+ struct sk_buff_head *rx_queue = &info->rx_queue;
+
+ dev_dbg(MISC_DEV, "cg2900_test_read count %d\n", count);
+
+ if (skb_queue_empty(rx_queue))
+ wait_event_interruptible(char_wait_queue,
+ !(skb_queue_empty(rx_queue)));
+
+ skb = skb_dequeue(rx_queue);
+ if (!skb) {
+ dev_dbg(MISC_DEV,
+ "skb queue is empty - return with zero bytes\n");
+ bytes_to_copy = 0;
+ goto finished;
+ }
+
+ bytes_to_copy = min(count, skb->len);
+ err = copy_to_user(buf, skb->data, bytes_to_copy);
+ if (err) {
+ skb_queue_head(rx_queue, skb);
+ return -EFAULT;
+ }
+
+ skb_pull(skb, bytes_to_copy);
+
+ if (skb->len > 0)
+ skb_queue_head(rx_queue, skb);
+ else
+ kfree_skb(skb);
+
+finished:
+ return bytes_to_copy;
+}
+
+/**
+ * cg2900_test_write() - Copy buffer from user and write to CG2900 Core.
+ * @filp: Pointer to the file struct.
+ * @buf: Read buffer.
+ * @count: Size of the buffer write.
+ * @f_pos: Position in buffer.
+ *
+ * Returns:
+ * >= 0 is number of bytes written.
+ * -EFAULT if copy_from_user fails.
+ */
+static ssize_t cg2900_test_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct sk_buff *skb;
+ struct cg2900_chip_dev *dev = filp->private_data;
+ struct test_info *info = dev->t_data;
+
+ dev_dbg(MISC_DEV, "cg2900_test_write count %d\n", count);
+
+ /* Allocate the SKB and reserve space for the header */
+ skb = alloc_skb(count + RX_SKB_RESERVE, GFP_KERNEL);
+ if (!skb) {
+ dev_err(MISC_DEV, "cg2900_test_write: Failed to alloc skb\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, RX_SKB_RESERVE);
+
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ dev->c_cb.data_from_chip(dev, skb);
+
+ return count;
+}
+
+/**
+ * cg2900_test_poll() - Handle POLL call to the interface.
+ * @filp: Pointer to the file struct.
+ * @wait: Poll table supplied to caller.
+ *
+ * Returns:
+ * Mask of current set POLL values (0 or (POLLIN | POLLRDNORM))
+ */
+static unsigned int cg2900_test_poll(struct file *filp, poll_table *wait)
+{
+ struct cg2900_chip_dev *dev = filp->private_data;
+ struct test_info *info = dev->t_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &char_wait_queue, wait);
+
+ if (!(skb_queue_empty(&info->rx_queue)))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static const struct file_operations test_char_dev_fops = {
+ .open = cg2900_test_open,
+ .release = cg2900_test_release,
+ .read = cg2900_test_read,
+ .write = cg2900_test_write,
+ .poll = cg2900_test_poll
+};
+
+/**
+ * test_char_dev_create() - Create a char device for testing.
+ * @info: Test device info.
+ *
+ * Creates a separate char device that will interact directly with userspace
+ * test application.
+ *
+ * Returns:
+ * 0 if there is no error.
+ * Error codes from misc_register.
+ */
+static int test_char_dev_create(struct test_info *info)
+{
+ int err;
+
+ /* Initialize the RX queue */
+ skb_queue_head_init(&info->rx_queue);
+
+ /* Prepare miscdevice struct before registering the device */
+ info->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ info->misc_dev.name = CG2900_CDEV_NAME;
+ info->misc_dev.fops = &test_char_dev_fops;
+ info->misc_dev.parent = info->dev;
+ info->misc_dev.mode = S_IRUGO | S_IWUGO;
+
+ err = misc_register(&info->misc_dev);
+ if (err) {
+ dev_err(info->dev, "Error %d registering misc dev", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * test_char_dev_destroy() - Clean up after test_char_dev_create().
+ * @info: Test device info.
+ */
+static void test_char_dev_destroy(struct test_info *info)
+{
+ int err;
+
+ err = misc_deregister(&info->misc_dev);
+ if (err)
+ dev_err(info->dev, "Error %d deregistering misc dev\n", err);
+
+ /* Clean the message queue */
+ skb_queue_purge(&info->rx_queue);
+}
+
+/**
+ * cg2900_test_probe() - Initialize module.
+ *
+ * @pdev: Platform device.
+ *
+ * This function initializes and registers the test misc char device.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM for failed alloc or structure creation.
+ * -EEXIST if device already exists.
+ * Error codes generated by test_char_dev_create.
+ */
+static int __devinit cg2900_test_probe(struct platform_device *pdev)
+{
+ int err;
+
+ dev_dbg(&pdev->dev, "cg2900_test_probe\n");
+
+ if (test_info) {
+ dev_err(&pdev->dev, "test_info exists\n");
+ return -EEXIST;
+ }
+
+ test_info = kzalloc(sizeof(*test_info), GFP_KERNEL);
+ if (!test_info) {
+ dev_err(&pdev->dev, "Couldn't allocate test_info\n");
+ return -ENOMEM;
+ }
+
+ test_info->dev = &pdev->dev;
+ test_info->pdev = pdev;
+
+ /* Create and add test char device. */
+ err = test_char_dev_create(test_info);
+ if (err) {
+ kfree(test_info);
+ test_info = NULL;
+ return err;
+ }
+
+ dev_set_drvdata(&pdev->dev, test_info);
+
+ dev_info(&pdev->dev, "CG2900 test char device driver started\n");
+
+ return 0;
+}
+
+/**
+ * cg2900_test_remove() - Remove module.
+ *
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM if core_info does not exist.
+ * -EINVAL if platform data does not exist in the device.
+ */
+static int __devexit cg2900_test_remove(struct platform_device *pdev)
+{
+ struct test_info *test_info;
+
+ dev_dbg(&pdev->dev, "cg2900_test_remove\n");
+ test_info = dev_get_drvdata(&pdev->dev);
+ test_char_dev_destroy(test_info);
+ dev_set_drvdata(&pdev->dev, NULL);
+ kfree(test_info);
+ test_info = NULL;
+ dev_info(&pdev->dev, "CG2900 Test char device driver removed\n");
+ return 0;
+}
+
+static struct platform_driver cg2900_test_driver = {
+ .driver = {
+ .name = "cg2900-test",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg2900_test_probe,
+ .remove = __devexit_p(cg2900_test_remove),
+};
+
+/**
+ * cg2900_test_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init cg2900_test_init(void)
+{
+ pr_debug("cg2900_test_init");
+ return platform_driver_register(&cg2900_test_driver);
+}
+
+/**
+ * cg2900_test_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit cg2900_test_exit(void)
+{
+ pr_debug("cg2900_test_exit");
+ platform_driver_unregister(&cg2900_test_driver);
+}
+
+module_init(cg2900_test_init);
+module_exit(cg2900_test_exit);
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux CG2900 Test Char Device Driver");
diff --git a/drivers/staging/cg2900/mfd/stlc2690_chip.c b/drivers/staging/cg2900/mfd/stlc2690_chip.c
new file mode 100644
index 00000000000..84de0d7a976
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/stlc2690_chip.c
@@ -0,0 +1,1671 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson STLC2690 BT/FM controller.
+ */
+#define NAME "stlc2690_chip"
+#define pr_fmt(fmt) NAME ": " fmt "\n"
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/core.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include "cg2900.h"
+#include "cg2900_core.h"
+#include "cg2900_lib.h"
+#include "stlc2690_chip.h"
+
+#ifndef MAX
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+#endif
+
+#define MAIN_DEV (main_info->dev)
+#define BOOT_DEV (info->user_in_charge->dev)
+
+#define WQ_NAME "stlc2690_chip_wq"
+
+#define LINE_TOGGLE_DETECT_TIMEOUT 50 /* ms */
+#define CHIP_READY_TIMEOUT 100 /* ms */
+#define CHIP_STARTUP_TIMEOUT 15000 /* ms */
+#define CHIP_SHUTDOWN_TIMEOUT 15000 /* ms */
+
+/** CHANNEL_BT_CMD - Bluetooth HCI H:4 channel
+ * for Bluetooth commands in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_BT_CMD 0x01
+
+/** CHANNEL_BT_ACL - Bluetooth HCI H:4 channel
+ * for Bluetooth ACL data in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_BT_ACL 0x02
+
+/** CHANNEL_BT_EVT - Bluetooth HCI H:4 channel
+ * for Bluetooth events in the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_BT_EVT 0x04
+
+/** CHANNEL_HCI_LOGGER - Bluetooth HCI H:4 channel
+ * for logging all transmitted H4 packets (on all channels).
+ */
+#define CHANNEL_HCI_LOGGER 0xFA
+
+/** CHANNEL_CORE - Bluetooth HCI H:4 channel
+ * for user space control of the ST-Ericsson connectivity controller.
+ */
+#define CHANNEL_CORE 0xFD
+
+/*
+ * For the char dev names we keep the same names in order to be able to reuse
+ * the users and to keep a consistent interface.
+ */
+
+/** STLC2690_BT_CMD - Bluetooth HCI H4 channel for Bluetooth commands.
+ */
+#define STLC2690_BT_CMD "cg2900_bt_cmd"
+
+/** STLC2690_BT_ACL - Bluetooth HCI H4 channel for Bluetooth ACL data.
+ */
+#define STLC2690_BT_ACL "cg2900_bt_acl"
+
+/** STLC2690_BT_EVT - Bluetooth HCI H4 channel for Bluetooth events.
+ */
+#define STLC2690_BT_EVT "cg2900_bt_evt"
+
+/** STLC2690_HCI_LOGGER - BT channel for logging all transmitted H4 packets.
+ * Data read is copy of all data transferred on the other channels.
+ * Only write allowed is configuration of the HCI Logger.
+ */
+#define STLC2690_HCI_LOGGER "cg2900_hci_logger"
+
+/** STLC2690_CORE- Channel for keeping ST-Ericsson STLC2690 enabled.
+ * Opening this channel forces the chip to stay powered.
+ * No data can be written to or read from this channel.
+ */
+#define STLC2690_CORE "cg2900_core"
+
+/**
+ * enum main_state - Main-state for STLC2690 driver.
+ * @STLC2690_INIT: STLC2690 initializing.
+ * @STLC2690_IDLE: No user registered to STLC2690 driver.
+ * @STLC2690_BOOTING: STLC2690 booting after first user is registered.
+ * @STLC2690_CLOSING: STLC2690 closing after last user has deregistered.
+ * @STLC2690_RESETING: STLC2690 reset requested.
+ * @STLC2690_ACTIVE: STLC2690 up and running with at least one user.
+ */
+enum main_state {
+ STLC2690_INIT,
+ STLC2690_IDLE,
+ STLC2690_BOOTING,
+ STLC2690_CLOSING,
+ STLC2690_RESETING,
+ STLC2690_ACTIVE
+};
+
+/**
+ * enum boot_state - BOOT-state for STLC2690 chip driver.
+ * @BOOT_RESET: HCI Reset has been sent.
+ * @BOOT_SEND_BD_ADDRESS: VS Store In FS command with BD address
+ * has been sent.
+ * @BOOT_GET_FILES_TO_LOAD: STLC2690 chip driver is retrieving file
+ * to load.
+ * @BOOT_DOWNLOAD_PATCH: STLC2690 chip driver is downloading
+ * patches.
+ * @BOOT_ACTIVATE_PATCHES_AND_SETTINGS: STLC2690 chip driver is activating
+ * patches and settings.
+ * @BOOT_READY: STLC2690 chip driver boot is ready.
+ * @BOOT_FAILED: STLC2690 chip driver boot failed.
+ */
+enum boot_state {
+ BOOT_RESET,
+ BOOT_SEND_BD_ADDRESS,
+ BOOT_GET_FILES_TO_LOAD,
+ BOOT_DOWNLOAD_PATCH,
+ BOOT_ACTIVATE_PATCHES_AND_SETTINGS,
+ BOOT_READY,
+ BOOT_FAILED
+};
+
+/**
+ * enum file_load_state - BOOT_FILE_LOAD-state for STLC2690 chip driver.
+ * @FILE_LOAD_GET_PATCH: Loading patches.
+ * @FILE_LOAD_GET_STATIC_SETTINGS: Loading static settings.
+ * @FILE_LOAD_NO_MORE_FILES: No more files to load.
+ * @FILE_LOAD_FAILED: File loading failed.
+ */
+enum file_load_state {
+ FILE_LOAD_GET_PATCH,
+ FILE_LOAD_GET_STATIC_SETTINGS,
+ FILE_LOAD_NO_MORE_FILES,
+ FILE_LOAD_FAILED
+};
+
+/**
+ * enum download_state - BOOT_DOWNLOAD state.
+ * @DOWNLOAD_PENDING: Download in progress.
+ * @DOWNLOAD_SUCCESS: Download successfully finished.
+ * @DOWNLOAD_FAILED: Downloading failed.
+ */
+enum download_state {
+ DOWNLOAD_PENDING,
+ DOWNLOAD_SUCCESS,
+ DOWNLOAD_FAILED
+};
+
+
+/**
+ * struct stlc2690_channel_item - List object for channel.
+ * @list: list_head struct.
+ * @user: User for this channel.
+ */
+struct stlc2690_channel_item {
+ struct list_head list;
+ struct cg2900_user_data *user;
+};
+
+/**
+ * struct stlc2690_skb_data - Structure for storing private data in an sk_buffer.
+ * @dev: STLC2690 device for this sk_buffer.
+ */
+struct stlc2690_skb_data {
+ struct cg2900_user_data *user;
+};
+#define stlc2690_skb_data(__skb) ((struct stlc2690_skb_data *)((__skb)->cb))
+
+/**
+ * struct stlc2690_chip_info - Main info structure for STLC2690 chip driver.
+ * @patch_file_name: Stores patch file name.
+ * @settings_file_name: Stores settings file name.
+ * @file_info: Firmware file info (patch or settings).
+ * @main_state: Current MAIN-state of STLC2690 chip driver.
+ * @boot_state: Current BOOT-state of STLC2690 chip driver.
+ * @file_load_state: Current BOOT_FILE_LOAD-state of STLC2690 chip
+ * driver.
+ * @download_state: Current BOOT_DOWNLOAD-state of STLC2690 chip
+ * driver.
+ * @wq: STLC2690 chip driver workqueue.
+ * @chip_dev: Chip handler info.
+ * @user_in_charge: User currently operating. Normally used at
+ * channel open and close.
+ * @last_user: Last user of this chip.
+ * @logger: Logger user of this chip.
+ * @startup: True if system is starting up.
+ * @mfd_size: Number of MFD cells.
+ * @mfd_char_size: Number of MFD char device cells.
+ */
+struct stlc2690_chip_info {
+ char *patch_file_name;
+ char *settings_file_name;
+ struct cg2900_file_info file_info;
+ enum main_state main_state;
+ enum boot_state boot_state;
+ enum file_load_state file_load_state;
+ enum download_state download_state;
+ struct workqueue_struct *wq;
+ struct cg2900_chip_dev *chip_dev;
+ spinlock_t rw_lock;
+ struct list_head open_channels;
+ struct cg2900_user_data *user_in_charge;
+ struct cg2900_user_data *last_user;
+ struct cg2900_user_data *logger;
+ bool startup;
+ int mfd_size;
+ int mfd_char_size;
+};
+
+/**
+ * struct main_info - Main info structure for STLC2690 chip driver.
+ * @dev: Device structure.
+ * @cell_base_id: Base ID for MFD cells.
+ * @man_mutex: Management mutex.
+ */
+struct main_info {
+ struct device *dev;
+ int cell_base_id;
+ struct mutex man_mutex;
+};
+
+static struct main_info *main_info;
+
+/*
+ * main_wait_queue - Main Wait Queue in STLC2690 driver.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(main_wait_queue);
+
+static struct mfd_cell stlc2690_devs[];
+static struct mfd_cell stlc2690_char_devs[];
+
+static void chip_startup_finished(struct stlc2690_chip_info *info, int err);
+
+/**
+ * send_bd_address() - Send HCI VS command with BD address to the chip.
+ */
+static void send_bd_address(struct stlc2690_chip_info *info)
+{
+ struct bt_vs_store_in_fs_cmd *cmd;
+ u8 plen = sizeof(*cmd) + BT_BDADDR_SIZE;
+
+ cmd = kmalloc(plen, GFP_KERNEL);
+ if (!cmd)
+ return;
+
+ cmd->opcode = cpu_to_le16(STLC2690_BT_OP_VS_STORE_IN_FS);
+ cmd->plen = BT_PARAM_LEN(plen);
+ cmd->user_id = STLC2690_VS_STORE_IN_FS_USR_ID_BD_ADDR;
+ cmd->len = BT_BDADDR_SIZE;
+ /* Now copy the BD address received from user space control app. */
+ memcpy(cmd->data, bd_address, BT_BDADDR_SIZE);
+
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_SEND_BD_ADDRESS\n");
+ info->boot_state = BOOT_SEND_BD_ADDRESS;
+
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger, cmd, plen);
+
+ kfree(cmd);
+}
+
+/**
+ * send_settings_file() - Transmit settings file.
+ *
+ * The send_settings_file() function transmit settings file.
+ * The file is read in parts to fit in HCI packets. When finished,
+ * close the settings file and send HCI reset to activate settings and patches.
+ */
+static void send_settings_file(struct stlc2690_chip_info *info)
+{
+ int bytes_sent;
+
+ bytes_sent = cg2900_read_and_send_file_part(info->user_in_charge,
+ info->logger,
+ &info->file_info);
+ if (bytes_sent > 0) {
+ /* Data sent. Wait for CmdComplete */
+ return;
+ } else if (bytes_sent < 0) {
+ dev_err(BOOT_DEV, "send_settings_file: Error %d occurred\n",
+ bytes_sent);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, bytes_sent);
+ return;
+ }
+
+ /* No data was sent. This file is finished */
+ info->download_state = DOWNLOAD_SUCCESS;
+
+ /* Settings file finished. Release used resources */
+ dev_dbg(BOOT_DEV, "Settings file finished, release used resources\n");
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+
+ dev_dbg(BOOT_DEV, "New file_load_state: FILE_LOAD_NO_MORE_FILES\n");
+ info->file_load_state = FILE_LOAD_NO_MORE_FILES;
+
+ /* Create and send HCI VS Store In FS command with bd address. */
+ send_bd_address(info);
+}
+
+/**
+ * send_patch_file - Transmit patch file.
+ *
+ * The send_patch_file() function transmit patch file.
+ * The file is read in parts to fit in HCI packets. When the complete file is
+ * transmitted, the file is closed.
+ * When finished, continue with settings file.
+ */
+static void send_patch_file(struct cg2900_chip_dev *dev)
+{
+ int err;
+ int bytes_sent;
+ struct stlc2690_chip_info *info = dev->c_data;
+ int file_name_size = strlen("STLC2690_XXXX_XXXX_settings.fw");
+
+ bytes_sent = cg2900_read_and_send_file_part(info->user_in_charge,
+ info->logger,
+ &info->file_info);
+ if (bytes_sent > 0) {
+ /* Data sent. Wait for CmdComplete */
+ return;
+ } else if (bytes_sent < 0) {
+ dev_err(BOOT_DEV, "send_patch_file: Error %d occurred\n",
+ bytes_sent);
+ err = bytes_sent;
+ goto error_handling;
+ }
+
+ /* No data was sent. This file is finished */
+ info->download_state = DOWNLOAD_SUCCESS;
+
+ dev_dbg(BOOT_DEV, "Patch file finished, release used resources\n");
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+
+ /*
+ * Create the settings file name from HCI revision and sub_version.
+ * file_name_size does not include terminating NULL character
+ * so add 1.
+ */
+ err = snprintf(info->settings_file_name, file_name_size + 1,
+ "STLC2690_%04X_%04X_settings.fw",
+ dev->chip.hci_revision, dev->chip.hci_sub_version);
+ if (err == file_name_size) {
+ dev_dbg(BOOT_DEV, "Downloading settings file %s\n",
+ info->settings_file_name);
+ } else {
+ dev_err(BOOT_DEV, "Settings file name failed! err=%d\n", err);
+ goto error_handling;
+ }
+
+ /* Retrieve the settings file */
+ err = request_firmware(&info->file_info.fw_file,
+ info->settings_file_name,
+ info->chip_dev->dev);
+ if (err) {
+ dev_err(BOOT_DEV, "Couldn't get settings file (%d)\n", err);
+ goto error_handling;
+ }
+ /* Now send the settings file */
+ dev_dbg(BOOT_DEV,
+ "New file_load_state: FILE_LOAD_GET_STATIC_SETTINGS\n");
+ info->file_load_state = FILE_LOAD_GET_STATIC_SETTINGS;
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_PENDING\n");
+ info->download_state = DOWNLOAD_PENDING;
+ send_settings_file(info);
+ return;
+
+error_handling:
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, err);
+}
+
+/**
+ * work_reset_after_error() - Handle reset.
+ * @work: Reference to work data.
+ *
+ * Handle a reset after received Command Complete event.
+ */
+static void work_reset_after_error(struct work_struct *work)
+{
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+
+ if (!work) {
+ dev_err(MAIN_DEV, "work_reset_after_error: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ chip_startup_finished(info, -EIO);
+
+ kfree(my_work);
+}
+
+/**
+ * work_load_patch_and_settings() - Start loading patches and settings.
+ * @work: Reference to work data.
+ */
+static void work_load_patch_and_settings(struct work_struct *work)
+{
+ int err = 0;
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+ int file_name_size = strlen("STLC2690_XXXX_XXXX_patch.fw");
+
+ if (!work) {
+ dev_err(MAIN_DEV,
+ "work_load_patch_and_settings: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ /* Check that we are in the right state */
+ if (info->boot_state != BOOT_GET_FILES_TO_LOAD)
+ goto finished;
+
+ /*
+ * Create the patch file name from HCI revision and sub_version.
+ * file_name_size does not include terminating NULL character
+ * so add 1.
+ */
+ err = snprintf(info->patch_file_name, file_name_size + 1,
+ "STLC2690_%04X_%04X_patch.fw", dev->chip.hci_revision,
+ dev->chip.hci_sub_version);
+ if (err == file_name_size) {
+ dev_dbg(BOOT_DEV, "Downloading patch file %s\n",
+ info->patch_file_name);
+ } else {
+ dev_err(BOOT_DEV, "Patch file name failed! err=%d\n", err);
+ goto error_handling;
+ }
+
+ /* We now all info needed */
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_DOWNLOAD_PATCH\n");
+ info->boot_state = BOOT_DOWNLOAD_PATCH;
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_PENDING\n");
+ info->download_state = DOWNLOAD_PENDING;
+ dev_dbg(BOOT_DEV, "New file_load_state: FILE_LOAD_GET_PATCH\n");
+ info->file_load_state = FILE_LOAD_GET_PATCH;
+ info->file_info.chunk_id = 0;
+ info->file_info.file_offset = 0;
+ info->file_info.fw_file = NULL;
+
+ /* OK. Now it is time to download the patches */
+ err = request_firmware(&(info->file_info.fw_file),
+ info->patch_file_name,
+ dev->dev);
+ if (err < 0) {
+ dev_err(BOOT_DEV, "Couldn't get patch file (%d)\n", err);
+ goto error_handling;
+ }
+ send_patch_file(dev);
+
+ goto finished;
+
+error_handling:
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ chip_startup_finished(info, -EIO);
+finished:
+ kfree(my_work);
+}
+
+/**
+ * work_cont_file_download() - A file block has been written.
+ * @work: Reference to work data.
+ *
+ * Handle a received HCI VS Write File Block Complete event.
+ * Normally this means continue to send files to the controller.
+ */
+static void work_cont_file_download(struct work_struct *work)
+{
+ struct cg2900_work *my_work;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+
+ if (!work) {
+ dev_err(MAIN_DEV, "work_cont_file_download: work == NULL\n");
+ return;
+ }
+
+ my_work = container_of(work, struct cg2900_work, work);
+ dev = my_work->user_data;
+ info = dev->c_data;
+
+ /* Continue to send patches or settings to the controller */
+ if (info->file_load_state == FILE_LOAD_GET_PATCH)
+ send_patch_file(dev);
+ else if (info->file_load_state == FILE_LOAD_GET_STATIC_SETTINGS)
+ send_settings_file(info);
+ else
+ dev_dbg(BOOT_DEV, "No more files to load\n");
+
+ kfree(my_work);
+}
+
+/**
+ * handle_reset_cmd_complete() - Handles HCI Reset Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_reset_cmd_complete(struct cg2900_chip_dev *dev, u8 *data)
+{
+ u8 status = data[0];
+ struct stlc2690_chip_info *info = dev->c_data;
+
+ dev_dbg(BOOT_DEV, "Received Reset complete event with status 0x%X\n",
+ status);
+
+ if (BOOT_RESET != info->boot_state &&
+ BOOT_ACTIVATE_PATCHES_AND_SETTINGS != info->boot_state)
+ return false;
+
+ if (HCI_BT_ERROR_NO_ERROR != status) {
+ dev_err(BOOT_DEV, "Command complete for HciReset received with "
+ "error 0x%X\n", status);
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ return true;
+ }
+
+ if (BOOT_RESET == info->boot_state) {
+ info->boot_state = BOOT_GET_FILES_TO_LOAD;
+ cg2900_create_work_item(info->wq, work_load_patch_and_settings,
+ dev);
+ } else {
+ /*
+ * The boot sequence is now finished successfully.
+ * Set states and signal to waiting thread.
+ */
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_READY\n");
+ info->boot_state = BOOT_READY;
+ chip_startup_finished(info, 0);
+ }
+
+ return true;
+}
+
+
+/**
+ * handle_vs_store_in_fs_cmd_complete() - Handles HCI VS StoreInFS Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_store_in_fs_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct stlc2690_chip_info *info = dev->c_data;
+
+ dev_dbg(BOOT_DEV,
+ "Received Store_in_FS complete event with status 0x%X\n",
+ status);
+
+ if (info->boot_state != BOOT_SEND_BD_ADDRESS)
+ return false;
+
+ if (HCI_BT_ERROR_NO_ERROR == status) {
+ struct hci_command_hdr cmd;
+
+ /* Send HCI Reset command to activate patches */
+ dev_dbg(BOOT_DEV,
+ "New boot_state: BOOT_ACTIVATE_PATCHES_AND_SETTINGS\n");
+ info->boot_state = BOOT_ACTIVATE_PATCHES_AND_SETTINGS;
+
+ cmd.opcode = cpu_to_le16(HCI_OP_RESET);
+ cmd.plen = 0; /* No parameters for Reset */
+ cg2900_send_bt_cmd(info->user_in_charge, info->logger, &cmd,
+ sizeof(cmd));
+ } else {
+ dev_err(BOOT_DEV,
+ "Command complete for StoreInFS received with error "
+ "0x%X\n", status);
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_write_file_block_cmd_complete() - Handles HCI VS WriteFileBlock Command Complete event.
+ * @data: Pointer to received HCI data packet.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_write_file_block_cmd_complete(struct cg2900_chip_dev *dev,
+ u8 *data)
+{
+ u8 status = data[0];
+ struct stlc2690_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_DOWNLOAD_PATCH ||
+ info->download_state != DOWNLOAD_PENDING)
+ return false;
+
+ if (HCI_BT_ERROR_NO_ERROR == status)
+ cg2900_create_work_item(info->wq, work_cont_file_download, dev);
+ else {
+ dev_err(BOOT_DEV,
+ "Command complete for WriteFileBlock received with"
+ " error 0x%X\n", status);
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_FAILED\n");
+ info->download_state = DOWNLOAD_FAILED;
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ if (info->file_info.fw_file) {
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+ }
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ }
+
+ return true;
+}
+
+/**
+ * handle_vs_write_file_block_cmd_status() - Handles HCI VS WriteFileBlock Command Status event.
+ * @status: Returned status of WriteFileBlock command.
+ *
+ * Returns:
+ * true, if packet was handled internally,
+ * false, otherwise.
+ */
+static bool handle_vs_write_file_block_cmd_status(struct cg2900_chip_dev *dev,
+ u8 status)
+{
+ struct stlc2690_chip_info *info = dev->c_data;
+
+ if (info->boot_state != BOOT_DOWNLOAD_PATCH ||
+ info->download_state != DOWNLOAD_PENDING)
+ return false;
+
+ /*
+ * Only do something if there is an error. Otherwise we will wait for
+ * CmdComplete.
+ */
+ if (HCI_BT_ERROR_NO_ERROR != status) {
+ dev_err(BOOT_DEV,
+ "Command status for WriteFileBlock received with"
+ " error 0x%X\n", status);
+ dev_dbg(BOOT_DEV, "New download_state: DOWNLOAD_FAILED\n");
+ info->download_state = DOWNLOAD_FAILED;
+ dev_dbg(BOOT_DEV, "New boot_state: BOOT_FAILED\n");
+ info->boot_state = BOOT_FAILED;
+ if (info->file_info.fw_file) {
+ release_firmware(info->file_info.fw_file);
+ info->file_info.fw_file = NULL;
+ }
+ cg2900_create_work_item(info->wq, work_reset_after_error, dev);
+ }
+
+ return true;
+}
+
+/**
+ * handle_rx_data_bt_evt() - Check if received data should be handled in STLC2690 chip driver.
+ * @skb: Data packet
+ *
+ * The handle_rx_data_bt_evt() function checks if received data should be
+ * handled in STLC2690 chip driver. If so handle it correctly.
+ * Received data is always HCI BT Event.
+ *
+ * Returns:
+ * True, if packet was handled internally,
+ * False, otherwise.
+ */
+static bool handle_rx_data_bt_evt(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb)
+{
+ bool pkt_handled = false;
+ /* skb cannot be NULL here so it is safe to de-reference */
+ u8 *data = skb->data;
+ struct hci_event_hdr *evt;
+ u16 op_code;
+
+ evt = (struct hci_event_hdr *)data;
+ data += sizeof(*evt);
+
+ /* First check the event code. */
+ if (HCI_EV_CMD_COMPLETE == evt->evt) {
+ struct hci_ev_cmd_complete *cmd_complete;
+
+ cmd_complete = (struct hci_ev_cmd_complete *)data;
+ op_code = le16_to_cpu(cmd_complete->opcode);
+ dev_dbg(dev->dev,
+ "Received Command Complete: op_code = 0x%04X\n",
+ op_code);
+ /* Move to first byte after OCF */
+ data += sizeof(*cmd_complete);
+
+ if (op_code == HCI_OP_RESET)
+ pkt_handled = handle_reset_cmd_complete(dev, data);
+ else if (op_code == STLC2690_BT_OP_VS_STORE_IN_FS)
+ pkt_handled = handle_vs_store_in_fs_cmd_complete(dev,
+ data);
+ else if (op_code == STLC2690_BT_OP_VS_WRITE_FILE_BLOCK)
+ pkt_handled =
+ handle_vs_write_file_block_cmd_complete(dev,
+ data);
+ } else if (HCI_EV_CMD_STATUS == evt->evt) {
+ struct hci_ev_cmd_status *cmd_status;
+
+ cmd_status = (struct hci_ev_cmd_status *)data;
+
+ op_code = le16_to_cpu(cmd_status->opcode);
+
+ dev_dbg(dev->dev, "Received Command Status: op_code = 0x%04X\n",
+ op_code);
+
+ if (op_code == STLC2690_BT_OP_VS_WRITE_FILE_BLOCK)
+ pkt_handled = handle_vs_write_file_block_cmd_status
+ (dev, cmd_status->status);
+ } else if (HCI_EV_HW_ERROR == evt->evt) {
+ struct hci_ev_hw_error *hw_error;
+
+ hw_error = (struct hci_ev_hw_error *)data;
+ /*
+ * Only do a printout. There might be a receiving stack that can
+ * handle this event
+ */
+ dev_err(dev->dev, "HW Error event received with error 0x%02X\n",
+ hw_error->hw_code);
+ return false;
+ } else
+ return false;
+
+ if (pkt_handled)
+ kfree_skb(skb);
+
+ return pkt_handled;
+}
+
+/**
+ * data_from_chip() - Called when data is received from the chip.
+ * @dev: Chip info.
+ * @skb: Packet received.
+ *
+ * The data_from_chip() function checks if packet is a response for a packet it
+ * itself has transmitted. If not it finds the correct user and sends the packet
+ * to the user.
+ */
+static void data_from_chip(struct cg2900_chip_dev *dev,
+ struct sk_buff *skb)
+{
+ int h4_channel;
+ struct list_head *cursor;
+ struct stlc2690_channel_item *tmp;
+ struct stlc2690_chip_info *info = dev->c_data;
+ struct cg2900_user_data *user = NULL;
+
+ h4_channel = skb->data[0];
+ skb_pull(skb, HCI_H4_SIZE);
+
+ /* Then check if this is a response to data we have sent */
+ if (h4_channel == CHANNEL_BT_EVT && handle_rx_data_bt_evt(dev, skb))
+ return;
+
+ spin_lock_bh(&info->rw_lock);
+
+ /* Let's see if this packet has the same user as the last one */
+ if (info->last_user && info->last_user->h4_channel == h4_channel) {
+ user = info->last_user;
+ goto user_found;
+ }
+
+ /* Search through the list of all open channels to find the user */
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor, struct stlc2690_channel_item, list);
+ if (tmp->user->h4_channel == h4_channel) {
+ user = tmp->user;
+ goto user_found;
+ }
+ }
+
+user_found:
+ info->last_user = user;
+ spin_unlock_bh(&info->rw_lock);
+
+ if (user)
+ user->read_cb(user, skb);
+ else {
+ dev_err(dev->dev,
+ "Could not find corresponding user to h4_channel %d\n",
+ h4_channel);
+ kfree_skb(skb);
+ }
+}
+
+static void chip_removed(struct cg2900_chip_dev *dev)
+{
+ struct stlc2690_chip_info *info = dev->c_data;
+
+ mfd_remove_devices(dev->dev);
+ kfree(info->settings_file_name);
+ kfree(info->patch_file_name);
+ destroy_workqueue(info->wq);
+ kfree(info);
+ dev->c_data = NULL;
+ dev->c_cb.chip_removed = NULL;
+ dev->c_cb.data_from_chip = NULL;
+}
+
+/**
+ * chip_shutdown() - Reset and power the chip off.
+ */
+static void chip_shutdown(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev = cg2900_get_prv(user);
+ struct stlc2690_chip_info *info = dev->c_data;
+
+ dev_dbg(user->dev, "chip_shutdown\n");
+
+ /* Close the transport, which will power off the chip */
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ /* Chip shut-down finished, set correct state and wake up the chip. */
+ dev_dbg(dev->dev, "New main_state: STLC2690_IDLE\n");
+ info->main_state = STLC2690_IDLE;
+ wake_up_all(&main_wait_queue);
+}
+
+static void chip_startup_finished(struct stlc2690_chip_info *info, int err)
+{
+ dev_dbg(BOOT_DEV, "chip_startup_finished (%d)\n", err);
+
+ if (err)
+ /* Shutdown the chip */
+ chip_shutdown(info->user_in_charge);
+ else {
+ dev_dbg(BOOT_DEV, "New main_state: CORE_ACTIVE\n");
+ info->main_state = STLC2690_ACTIVE;
+ }
+
+ wake_up_all(&main_wait_queue);
+
+ if (err)
+ return;
+
+ if (!info->chip_dev->t_cb.chip_startup_finished)
+ dev_err(BOOT_DEV, "chip_startup_finished callback not found\n");
+ else
+ info->chip_dev->t_cb.chip_startup_finished(info->chip_dev);
+}
+
+static int stlc2690_open(struct cg2900_user_data *user)
+{
+ int err;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+ struct list_head *cursor;
+ struct stlc2690_channel_item *tmp;
+ struct hci_command_hdr cmd;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV, "stlc2690_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "stlc2690_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ mutex_lock(&main_info->man_mutex);
+
+ /* Add a minor wait in order to avoid CPU blocking, looping openings */
+ err = wait_event_timeout(main_wait_queue,
+ (STLC2690_IDLE == info->main_state ||
+ STLC2690_ACTIVE == info->main_state),
+ msecs_to_jiffies(LINE_TOGGLE_DETECT_TIMEOUT));
+ if (err <= 0) {
+ if (STLC2690_INIT == info->main_state)
+ dev_err(user->dev, "Transport not opened\n");
+ else
+ dev_err(user->dev, "stlc2690_open currently busy "
+ "(0x%X). Try again\n", info->main_state);
+ err = -EBUSY;
+ goto err_free_mutex;
+ }
+
+ err = 0;
+
+ list_for_each(cursor, &info->open_channels) {
+ tmp = list_entry(cursor, struct stlc2690_channel_item, list);
+ if (tmp->user->h4_channel == user->h4_channel) {
+ dev_err(user->dev, "Channel %d is already opened\n",
+ user->h4_channel);
+ err = -EACCES;
+ goto err_free_mutex;
+ }
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ dev_err(user->dev, "Could not allocate tmp\n");
+ err = -ENOMEM;
+ goto err_free_mutex;
+ }
+ tmp->user = user;
+
+ if (STLC2690_ACTIVE != info->main_state &&
+ !user->chip_independent) {
+ /* Open transport and start-up the chip */
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, true);
+
+ /* Wait to be sure that the chip is ready */
+ schedule_timeout_killable(
+ msecs_to_jiffies(CHIP_READY_TIMEOUT));
+
+ if (dev->t_cb.open)
+ err = dev->t_cb.open(dev);
+ if (err) {
+ if (dev->t_cb.set_chip_power)
+ dev->t_cb.set_chip_power(dev, false);
+ goto err_free_list_item;
+ }
+
+ /* Start the boot sequence */
+ info->user_in_charge = user;
+ info->last_user = user;
+ dev_dbg(user->dev, "New boot_state: BOOT_RESET\n");
+ info->boot_state = BOOT_RESET;
+ dev_dbg(user->dev, "New main_state: STLC2690_BOOTING\n");
+ info->main_state = STLC2690_BOOTING;
+ cmd.opcode = cpu_to_le16(HCI_OP_RESET);
+ cmd.plen = 0; /* No parameters for HCI reset */
+ cg2900_send_bt_cmd(user, info->logger, &cmd, sizeof(cmd));
+
+ dev_dbg(user->dev, "Wait up to 15 seconds for chip to start\n");
+ wait_event_timeout(main_wait_queue,
+ (STLC2690_ACTIVE == info->main_state ||
+ STLC2690_IDLE == info->main_state),
+ msecs_to_jiffies(CHIP_STARTUP_TIMEOUT));
+ if (STLC2690_ACTIVE != info->main_state) {
+ dev_err(user->dev, "STLC2690 driver failed to start\n");
+
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ dev_dbg(user->dev, "New main_state: CORE_IDLE\n");
+ info->main_state = STLC2690_IDLE;
+ err = -EIO;
+ goto err_free_list_item;
+ }
+ }
+
+ list_add_tail(&tmp->list, &info->open_channels);
+
+ user->opened = true;
+
+ dev_dbg(user->dev, "H:4 channel opened\n");
+
+ mutex_unlock(&main_info->man_mutex);
+ return 0;
+err_free_list_item:
+ kfree(tmp);
+err_free_mutex:
+ mutex_unlock(&main_info->man_mutex);
+ return err;
+}
+
+static int stlc2690_hci_log_open(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+ int err;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "stlc2690_hci_log_open: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(user->dev, "stlc2690_hci_log_open\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ if (info->logger) {
+ dev_err(user->dev, "HCI Logger already stored\n");
+ return -EACCES;
+ }
+
+ info->logger = user;
+ err = stlc2690_open(user);
+ if (err)
+ info->logger = NULL;
+ return err;
+}
+
+static void stlc2690_close(struct cg2900_user_data *user)
+{
+ bool keep_powered = false;
+ struct list_head *cursor, *next;
+ struct stlc2690_channel_item *tmp;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "stlc2690_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "stlc2690_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ mutex_lock(&main_info->man_mutex);
+
+ /*
+ * Go through each open channel. Remove our channel and check if there
+ * is any other channel that want to keep the chip running
+ */
+ list_for_each_safe(cursor, next, &info->open_channels) {
+ tmp = list_entry(cursor, struct stlc2690_channel_item, list);
+ if (tmp->user == user) {
+ list_del(cursor);
+ kfree(tmp);
+ } else if (!tmp->user->chip_independent)
+ keep_powered = true;
+ }
+
+ if (keep_powered)
+ /* This was not the last user, we're done. */
+ goto finished;
+
+ if (STLC2690_IDLE == info->main_state)
+ /* Chip has already been shut down. */
+ goto finished;
+
+ dev_dbg(user->dev, "New main_state: CORE_CLOSING\n");
+ info->main_state = STLC2690_CLOSING;
+ chip_shutdown(user);
+
+ dev_dbg(user->dev, "Wait up to 15 seconds for chip to shut-down\n");
+ wait_event_timeout(main_wait_queue,
+ STLC2690_IDLE == info->main_state,
+ msecs_to_jiffies(CHIP_SHUTDOWN_TIMEOUT));
+
+ /* Force shutdown if we timed out */
+ if (STLC2690_IDLE != info->main_state) {
+ dev_err(user->dev,
+ "ST-Ericsson STLC2690 Core Driver was shut-down with "
+ "problems\n");
+
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ dev_dbg(user->dev, "New main_state: CORE_IDLE\n");
+ info->main_state = STLC2690_IDLE;
+ }
+
+finished:
+ mutex_unlock(&main_info->man_mutex);
+ user->opened = false;
+ dev_dbg(user->dev, "H:4 channel closed\n");
+}
+
+static void stlc2690_hci_log_close(struct cg2900_user_data *user)
+{
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "stlc2690_hci_log_close: Calling with NULL pointer\n");
+ return;
+ }
+
+ dev_dbg(user->dev, "stlc2690_hci_log_close\n");
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ info->logger = NULL;
+ stlc2690_close(user);
+}
+
+static int stlc2690_reset(struct cg2900_user_data *user)
+{
+ struct list_head *cursor, *next;
+ struct stlc2690_channel_item *tmp;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "stlc2690_reset: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ dev_info(user->dev, "stlc2690_reset\n");
+
+ BUG_ON(!main_info);
+
+ mutex_lock(&main_info->man_mutex);
+
+ dev_dbg(user->dev, "New main_state: CORE_RESETING\n");
+ info->main_state = STLC2690_RESETING;
+
+ chip_shutdown(user);
+
+ /*
+ * Inform all opened channels about the reset and free the user devices
+ */
+ list_for_each_safe(cursor, next, &info->open_channels) {
+ tmp = list_entry(cursor, struct stlc2690_channel_item, list);
+ list_del(cursor);
+ tmp->user->opened = false;
+ tmp->user->reset_cb(tmp->user);
+ kfree(tmp);
+ }
+
+ /* Reset finished. We are now idle until first channel is opened */
+ dev_dbg(user->dev, "New main_state: STLC2690_IDLE\n");
+ info->main_state = STLC2690_IDLE;
+
+ mutex_unlock(&main_info->man_mutex);
+
+ /*
+ * Send wake-up since this might have been called from a failed boot.
+ * No harm done if it is a STLC2690 chip user who called.
+ */
+ wake_up_all(&main_wait_queue);
+
+ return 0;
+}
+
+static struct sk_buff *stlc2690_alloc_skb(unsigned int size, gfp_t priority)
+{
+ struct sk_buff *skb;
+
+ dev_dbg(MAIN_DEV, "stlc2690_alloc_skb size %d bytes\n", size);
+
+ /* Allocate the SKB and reserve space for the header */
+ skb = alloc_skb(size + CG2900_SKB_RESERVE, priority);
+ if (skb)
+ skb_reserve(skb, CG2900_SKB_RESERVE);
+
+ return skb;
+}
+
+static int stlc2690_write(struct cg2900_user_data *user, struct sk_buff *skb)
+{
+ int err = 0;
+ u8 *h4_header;
+ struct cg2900_chip_dev *dev;
+ struct stlc2690_chip_info *info;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV,
+ "stlc2690_write: Calling with NULL pointer\n");
+ return -EINVAL;
+ }
+
+ if (!skb) {
+ dev_err(user->dev, "stlc2690_write with no sk_buffer\n");
+ return -EINVAL;
+ }
+
+ dev = cg2900_get_prv(user);
+ info = dev->c_data;
+
+ dev_dbg(user->dev, "stlc2690_write length %d bytes\n", skb->len);
+
+ if (!user->opened) {
+ dev_err(user->dev,
+ "Trying to transmit data on a closed channel\n");
+ return -EACCES;
+ }
+
+ /*
+ * Move the data pointer to the H:4 header position and
+ * store the H4 header.
+ */
+ h4_header = skb_push(skb, CG2900_SKB_RESERVE);
+ *h4_header = (u8)user->h4_channel;
+ cg2900_tx_to_chip(user, info->logger, skb);
+
+ return err;
+}
+
+static int stlc2690_no_write(struct cg2900_user_data *user,
+ struct sk_buff *skb)
+{
+ dev_err(user->dev, "Not allowed to send on this channel\n");
+ return -EPERM;
+}
+
+static bool stlc2690_get_local_revision(struct cg2900_user_data *user,
+ struct cg2900_rev_data *rev_data)
+{
+ struct cg2900_chip_dev *dev;
+
+ BUG_ON(!main_info);
+
+ if (!user) {
+ dev_err(MAIN_DEV, "stlc2690_get_local_revision: Calling with "
+ "NULL pointer\n");
+ return false;
+ }
+
+ if (!rev_data) {
+ dev_err(user->dev, "Calling with rev_data NULL\n");
+ return false;
+ }
+
+ dev = cg2900_get_prv(user);
+
+ rev_data->revision = dev->chip.hci_revision;
+ rev_data->sub_version = dev->chip.hci_sub_version;
+
+ return true;
+}
+
+static struct cg2900_user_data btcmd_data = {
+ .h4_channel = CHANNEL_BT_CMD,
+};
+static struct cg2900_user_data btacl_data = {
+ .h4_channel = CHANNEL_BT_ACL,
+};
+static struct cg2900_user_data btevt_data = {
+ .h4_channel = CHANNEL_BT_EVT,
+};
+static struct cg2900_user_data hci_logger_data = {
+ .h4_channel = CHANNEL_HCI_LOGGER,
+ .chip_independent = true,
+ .write = stlc2690_no_write,
+ .open = stlc2690_hci_log_open,
+ .close = stlc2690_hci_log_close,
+};
+static struct cg2900_user_data core_data = {
+ .h4_channel = CHANNEL_CORE,
+ .write = stlc2690_no_write,
+};
+
+static struct mfd_cell stlc2690_devs[] = {
+ {
+ .name = "cg2900-btcmd",
+ .platform_data = &btcmd_data,
+ .pdata_size = sizeof(btcmd_data),
+ },
+ {
+ .name = "cg2900-btacl",
+ .platform_data = &btacl_data,
+ .pdata_size = sizeof(btacl_data),
+ },
+ {
+ .name = "cg2900-btevt",
+ .platform_data = &btevt_data,
+ .pdata_size = sizeof(btevt_data),
+ },
+ {
+ .name = "cg2900-hcilogger",
+ .platform_data = &hci_logger_data,
+ .pdata_size = sizeof(hci_logger_data),
+ },
+ {
+ .name = "cg2900-core",
+ .platform_data = &core_data,
+ .pdata_size = sizeof(core_data),
+ },
+};
+
+static struct cg2900_user_data char_btcmd_data = {
+ .channel_data = {
+ .char_dev_name = STLC2690_BT_CMD,
+ },
+ .h4_channel = CHANNEL_BT_CMD,
+};
+static struct cg2900_user_data char_btacl_data = {
+ .channel_data = {
+ .char_dev_name = STLC2690_BT_ACL,
+ },
+ .h4_channel = CHANNEL_BT_ACL,
+};
+static struct cg2900_user_data char_btevt_data = {
+ .channel_data = {
+ .char_dev_name = STLC2690_BT_EVT,
+ },
+ .h4_channel = CHANNEL_BT_EVT,
+};
+static struct cg2900_user_data char_hci_logger_data = {
+ .channel_data = {
+ .char_dev_name = STLC2690_HCI_LOGGER,
+ },
+ .h4_channel = CHANNEL_HCI_LOGGER,
+ .chip_independent = true,
+ .write = stlc2690_no_write,
+ .open = stlc2690_hci_log_open,
+ .close = stlc2690_hci_log_close,
+};
+static struct cg2900_user_data char_core_data = {
+ .channel_data = {
+ .char_dev_name = STLC2690_CORE,
+ },
+ .h4_channel = CHANNEL_CORE,
+ .write = stlc2690_no_write,
+};
+
+static struct mfd_cell stlc2690_char_devs[] = {
+ {
+ .name = "cg2900-chardev",
+ .id = 0,
+ .platform_data = &char_btcmd_data,
+ .pdata_size = sizeof(char_btcmd_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 1,
+ .platform_data = &char_btacl_data,
+ .pdata_size = sizeof(char_btacl_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 2,
+ .platform_data = &char_btevt_data,
+ .pdata_size = sizeof(char_btevt_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 7,
+ .platform_data = &char_hci_logger_data,
+ .pdata_size = sizeof(char_hci_logger_data),
+ },
+ {
+ .name = "cg2900-chardev",
+ .id = 8,
+ .platform_data = &char_core_data,
+ .pdata_size = sizeof(char_core_data),
+ },
+};
+
+/**
+ * set_plat_data() - Initializes data for an MFD cell.
+ * @cell: MFD cell.
+ * @dev: Current chip.
+ *
+ * Sets each callback to default function unless already set.
+ */
+static void set_plat_data(struct mfd_cell *cell, struct cg2900_chip_dev *dev)
+{
+ struct cg2900_user_data *user = cell->platform_data;
+
+ if (!user->open)
+ user->open = stlc2690_open;
+ if (!user->close)
+ user->close = stlc2690_close;
+ if (!user->reset)
+ user->reset = stlc2690_reset;
+ if (!user->alloc_skb)
+ user->alloc_skb = stlc2690_alloc_skb;
+ if (!user->write)
+ user->write = stlc2690_write;
+ if (!user->get_local_revision)
+ user->get_local_revision = stlc2690_get_local_revision;
+
+ cg2900_set_prv(user, dev);
+}
+
+/**
+ * check_chip_support() - Checks if connected chip is handled by this driver.
+ * @dev: Chip info structure.
+ *
+ * If supported return true and fill in @callbacks.
+ *
+ * Returns:
+ * true if chip is handled by this driver.
+ * false otherwise.
+ */
+static bool check_chip_support(struct cg2900_chip_dev *dev)
+{
+ struct cg2900_platform_data *pf_data;
+ struct stlc2690_chip_info *info;
+ int i;
+ int err;
+
+ dev_dbg(dev->dev, "check_chip_support\n");
+
+ /*
+ * Check if this is a STLC2690 revision.
+ * We do not care about the sub-version at the moment. Change this if
+ * necessary.
+ */
+ if (dev->chip.manufacturer != STLC2690_SUPP_MANUFACTURER ||
+ dev->chip.hci_revision < STLC2690_SUPP_REVISION_MIN ||
+ dev->chip.hci_revision > STLC2690_SUPP_REVISION_MAX) {
+ dev_dbg(dev->dev, "Chip not supported by STLC2690 driver\n"
+ "\tMan: 0x%02X\n"
+ "\tRev: 0x%04X\n"
+ "\tSub: 0x%04X\n",
+ dev->chip.manufacturer, dev->chip.hci_revision,
+ dev->chip.hci_sub_version);
+ return false;
+ }
+
+ /* Store needed data */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev->dev, "Couldn't allocate info struct\n");
+ return false;
+ }
+
+ /* Initialize all variables */
+ INIT_LIST_HEAD(&info->open_channels);
+ spin_lock_init(&info->rw_lock);
+ info->chip_dev = dev;
+
+ info->wq = create_singlethread_workqueue(WQ_NAME);
+ if (!info->wq) {
+ dev_err(dev->dev, "Could not create workqueue\n");
+ goto err_handling_free_info;
+ }
+
+ info->patch_file_name = kzalloc(NAME_MAX + 1, GFP_ATOMIC);
+ if (!info->patch_file_name) {
+ dev_err(dev->dev,
+ "Couldn't allocate name buffer for patch file\n");
+ goto err_handling_destroy_wq;
+ }
+
+ info->settings_file_name = kzalloc(NAME_MAX + 1, GFP_ATOMIC);
+ if (!info->settings_file_name) {
+ dev_err(dev->dev,
+ "Couldn't allocate name buffers settings file\n");
+ goto err_handling_free_patch_name;
+ }
+
+ dev->c_data = info;
+ /* Set the callbacks */
+ dev->c_cb.data_from_chip = data_from_chip;
+ dev->c_cb.chip_removed = chip_removed,
+ info->chip_dev = dev;
+
+ mutex_lock(&main_info->man_mutex);
+
+ pf_data = dev_get_platdata(dev->dev);
+ btcmd_data.channel_data.bt_bus = pf_data->bus;
+ btacl_data.channel_data.bt_bus = pf_data->bus;
+ btevt_data.channel_data.bt_bus = pf_data->bus;
+
+ for (i = 0; i < ARRAY_SIZE(stlc2690_devs); i++)
+ set_plat_data(&stlc2690_devs[i], dev);
+ for (i = 0; i < ARRAY_SIZE(stlc2690_char_devs); i++)
+ set_plat_data(&stlc2690_char_devs[i], dev);
+ mutex_unlock(&main_info->man_mutex);
+
+ dev_info(dev->dev, "Chip supported by the STLC2690 chip driver\n");
+
+ /* Close the transport, which will power off the chip */
+ if (dev->t_cb.close)
+ dev->t_cb.close(dev);
+
+ dev_dbg(dev->dev, "New main_state: STLC2690_IDLE\n");
+ info->main_state = STLC2690_IDLE;
+
+ err = mfd_add_devices(dev->dev, main_info->cell_base_id, stlc2690_devs,
+ ARRAY_SIZE(stlc2690_devs), NULL, 0);
+ if (err) {
+ dev_err(dev->dev, "Failed to add stlc2690_devs (%d)\n", err);
+ goto err_handling_free_settings_name;
+ }
+
+ err = mfd_add_devices(dev->dev, main_info->cell_base_id,
+ stlc2690_char_devs,
+ ARRAY_SIZE(stlc2690_char_devs), NULL, 0);
+ if (err) {
+ dev_err(dev->dev, "Failed to add stlc2690_char_devs (%d)\n",
+ err);
+ goto err_handling_remove_devs;
+ }
+
+ /*
+ * Increase base ID so next connected transport will not get the
+ * same device IDs.
+ */
+ main_info->cell_base_id += MAX(ARRAY_SIZE(stlc2690_devs),
+ ARRAY_SIZE(stlc2690_char_devs));
+
+ return true;
+
+err_handling_remove_devs:
+ mfd_remove_devices(dev->dev);
+err_handling_free_settings_name:
+ kfree(info->settings_file_name);
+err_handling_free_patch_name:
+ kfree(info->patch_file_name);
+err_handling_destroy_wq:
+ destroy_workqueue(info->wq);
+err_handling_free_info:
+ kfree(info);
+ return false;
+}
+
+static struct cg2900_id_callbacks chip_support_callbacks = {
+ .check_chip_support = check_chip_support,
+};
+
+/**
+ * stlc2690_chip_probe() - Initialize STLC2690 chip handler resources.
+ * @pdev: Platform device.
+ *
+ * This function initializes the STLC2690 driver, then registers to
+ * the CG2900 Core.
+ *
+ * Returns:
+ * 0 if success.
+ * -ENOMEM for failed alloc or structure creation.
+ * Error codes generated by cg2900_register_chip_driver.
+ */
+static int __devinit stlc2690_chip_probe(struct platform_device *pdev)
+{
+ int err;
+
+ dev_dbg(&pdev->dev, "stlc2690_chip_probe\n");
+
+ main_info = kzalloc(sizeof(*main_info), GFP_ATOMIC);
+ if (!main_info) {
+ dev_err(&pdev->dev, "Couldn't allocate main_info\n");
+ return -ENOMEM;
+ }
+
+ main_info->dev = &pdev->dev;
+ mutex_init(&main_info->man_mutex);
+
+ err = cg2900_register_chip_driver(&chip_support_callbacks);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't register chip driver (%d)\n", err);
+ goto error_handling;
+ }
+
+ dev_info(&pdev->dev, "STLC2690 chip driver started\n");
+
+ return 0;
+
+error_handling:
+ mutex_destroy(&main_info->man_mutex);
+ kfree(main_info);
+ main_info = NULL;
+ return err;
+}
+
+/**
+ * stlc2690_chip_remove() - Release STLC2690 chip handler resources.
+ * @pdev: Platform device.
+ *
+ * Returns:
+ * 0 if success (always success).
+ */
+static int __devexit stlc2690_chip_remove(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "STLC2690 chip driver removed\n");
+
+ cg2900_deregister_chip_driver(&chip_support_callbacks);
+
+ if (!main_info)
+ return 0;
+
+ mutex_destroy(&main_info->man_mutex);
+ kfree(main_info);
+ main_info = NULL;
+ return 0;
+}
+
+static struct platform_driver stlc2690_chip_driver = {
+ .driver = {
+ .name = "stlc2690-chip",
+ .owner = THIS_MODULE,
+ },
+ .probe = stlc2690_chip_probe,
+ .remove = __devexit_p(stlc2690_chip_remove),
+};
+
+/**
+ * stlc2690_chip_init() - Initialize module.
+ *
+ * Registers platform driver.
+ */
+static int __init stlc2690_chip_init(void)
+{
+ pr_debug("stlc2690_chip_init");
+ return platform_driver_register(&stlc2690_chip_driver);
+}
+
+/**
+ * stlc2690_chip_exit() - Remove module.
+ *
+ * Unregisters platform driver.
+ */
+static void __exit stlc2690_chip_exit(void)
+{
+ pr_debug("stlc2690_chip_exit");
+ platform_driver_unregister(&stlc2690_chip_driver);
+}
+
+module_init(stlc2690_chip_init);
+module_exit(stlc2690_chip_exit);
+
+MODULE_AUTHOR("Par-Gunnar Hjalmdahl ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Linux STLC2690 Connectivity Device Driver");
diff --git a/drivers/staging/cg2900/mfd/stlc2690_chip.h b/drivers/staging/cg2900/mfd/stlc2690_chip.h
new file mode 100644
index 00000000000..d14e7737636
--- /dev/null
+++ b/drivers/staging/cg2900/mfd/stlc2690_chip.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors:
+ * Par-Gunnar Hjalmdahl (par-gunnar.p.hjalmdahl@stericsson.com) for ST-Ericsson.
+ * Henrik Possung (henrik.possung@stericsson.com) for ST-Ericsson.
+ * Josef Kindberg (josef.kindberg@stericsson.com) for ST-Ericsson.
+ * Dariusz Szymszak (dariusz.xd.szymczak@stericsson.com) for ST-Ericsson.
+ * Kjell Andersson (kjell.k.andersson@stericsson.com) for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Linux Bluetooth HCI H:4 Driver for ST-Ericsson STLC2690 BT/FM controller.
+ */
+
+#ifndef _STLC2690_CHIP_H_
+#define _STLC2690_CHIP_H_
+
+/* Supported chips */
+#define STLC2690_SUPP_MANUFACTURER 0x30
+#define STLC2690_SUPP_REVISION_MIN 0x0500
+#define STLC2690_SUPP_REVISION_MAX 0x06FF
+
+#define BT_SIZE_OF_HDR (sizeof(__le16) + sizeof(__u8))
+#define BT_PARAM_LEN(__pkt_len) (__pkt_len - BT_SIZE_OF_HDR)
+
+/* BT VS Store In FS command */
+#define STLC2690_BT_OP_VS_STORE_IN_FS 0xFC22
+struct bt_vs_store_in_fs_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 user_id;
+ __u8 len;
+ __u8 data[];
+} __packed;
+
+/* BT VS Write File Block command */
+#define STLC2690_BT_OP_VS_WRITE_FILE_BLOCK 0xFC2E
+struct bt_vs_write_file_block_cmd {
+ __le16 opcode;
+ __u8 plen;
+ __u8 id;
+ __u8 data[];
+} __packed;
+
+/* User ID for storing BD address in chip using Store_In_FS command */
+#define STLC2690_VS_STORE_IN_FS_USR_ID_BD_ADDR 0xFE
+
+#endif /* _STLC2690_CHIP_H_ */
diff --git a/drivers/staging/cw1200/.gitignore b/drivers/staging/cw1200/.gitignore
new file mode 100644
index 00000000000..6ad0d1ec58e
--- /dev/null
+++ b/drivers/staging/cw1200/.gitignore
@@ -0,0 +1,10 @@
+*.o
+*.ko
+*.ko.cmd
+.tmp_versions
+modules.order
+Module.symvers
+Module.markers
+*.o.cmd
+*.mod.c
+*.swp
diff --git a/drivers/staging/cw1200/Kconfig b/drivers/staging/cw1200/Kconfig
new file mode 100644
index 00000000000..4be840c1576
--- /dev/null
+++ b/drivers/staging/cw1200/Kconfig
@@ -0,0 +1,105 @@
+config CW1200
+ tristate "CW1200 WLAN support"
+ select MAC80211
+ select CFG80211
+ help
+
+ This is an experimental driver for the cw1200 chip-set.
+ Enabling this option enables the generic driver without
+ any platform support.
+
+ Please select the appropriate platform below.
+
+if CW1200
+
+config CW1200_NON_POWER_OF_TWO_BLOCKSIZES
+ bool "Platform supports non-power-of-two SDIO transfer"
+ depends on CW1200
+ help
+ Say N here only if you are running the driver on a platform
+ which does not have support for non-power-of-two SDIO transfer.
+ If unsure, say Y.
+
+config CW1200_USE_GPIO_IRQ
+ bool "Use GPIO interrupt"
+ depends on CW1200
+ help
+ Say Y here if you want to include GPIO IRQ support instead of SDIO IRQ.
+ If unsure, say N.
+
+config CW1200_5GHZ_SUPPORT
+ bool "5GHz band support"
+ depends on CW1200
+ help
+ Say Y if your device supports 5GHz band. Should be disabled for
+ CW1100 silicon.
+ If unsure, say N.
+
+config CW1200_WAPI_SUPPORT
+ bool "WAPI support"
+ depends on CW1200
+ help
+ Say Y if your compat-wireless support WAPI.
+ If unsure, say N.
+
+config CW1200_USE_STE_EXTENSIONS
+ bool "STE extensions"
+ depends on CW1200
+ help
+ Say Y if you want to include STE extensions.
+ If unsure, say N.
+
+config CW1200_DISABLE_BEACON_HINTS
+ bool "Disable 11d beacon hints"
+ depends on CW1200
+ help
+ Say Y if you want to disable 11d beacon hints.
+ If unsure, say N.
+
+config CW1200_U5500_SUPPORT
+ bool "Enable U5500 support"
+ depends on CW1200
+ help
+ Say Y if you want to enable wlan on u5500 platform support.
+ If unsure, say N.
+
+menu "Driver debug features"
+ depends on CW1200
+
+config CW1200_DEBUGFS
+ bool "Expose driver internals to DebugFS (DEVELOPMENT)"
+
+config CW1200_BH_DEBUG
+ bool "Enable low-level device communication logs (DEVELOPMENT)"
+
+config CW1200_WSM_DEBUG
+ bool "Enable WSM API debug messages (DEVELOPMENT)"
+
+config CW1200_WSM_DUMPS
+ bool "Verbose WSM API logging (DEVELOPMENT)"
+
+config CW1200_WSM_DUMPS_SHORT
+ bool "Dump only first x bytes (default 20) (DEVELOPMENT)"
+
+config CW1200_TXRX_DEBUG
+ bool "Enable TX/RX debug messages (DEVELOPMENT)"
+
+config CW1200_TX_POLICY_DEBUG
+ bool "Enable TX policy debug (DEVELOPMENT)"
+
+config CW1200_STA_DEBUG
+ bool "Enable STA/AP debug (DEVELOPMENT)"
+
+config CW1200_DUMP_ON_ERROR
+ bool "Dump kernel in case of critical error (DEVELOPMENT)"
+
+endmenu
+
+config CW1200_ITP
+ bool "Enable ITP DebugFS"
+ depends on CW1200
+ help
+ Say Y if you want to include ITP code.
+ If unsure, say N.
+
+endif
diff --git a/drivers/staging/cw1200/Makefile b/drivers/staging/cw1200/Makefile
new file mode 100644
index 00000000000..67d7867c1b5
--- /dev/null
+++ b/drivers/staging/cw1200/Makefile
@@ -0,0 +1,20 @@
+cw1200_core-y := \
+ fwio.o \
+ txrx.o \
+ main.o \
+ queue.o \
+ hwio.o \
+ bh.o \
+ wsm.o \
+ sta.o \
+ ap.o \
+ scan.o
+cw1200_core-$(CONFIG_CW1200_DEBUGFS) += debug.o
+cw1200_core-$(CONFIG_CW1200_ITP) += itp.o
+cw1200_core-$(CONFIG_PM) += pm.o
+
+cw1200_wlan-y := cw1200_sdio.o
+
+obj-$(CONFIG_CW1200) += cw1200_core.o
+obj-$(CONFIG_CW1200) += cw1200_wlan.o
+
diff --git a/drivers/staging/cw1200/TODO b/drivers/staging/cw1200/TODO
new file mode 100644
index 00000000000..0d2be40e1f4
--- /dev/null
+++ b/drivers/staging/cw1200/TODO
@@ -0,0 +1,10 @@
+TODO:
+ - IBSS: Not implemented (3-10 m*d).
+ - 11n: Almost done. WSM API upgrade is required fo finish implementation. (2-3 m*d).
+ - 11n: verification (??? m*d Resources? WLAN RF lab? 11n sniffers
+ availability? Bring up of the test equipment?).
+ - memory leakage verification and proper cleanup: not done (1-3 m*d).
+ - AP (hot-spot) mode: Implemented, some problems with WEP104/WPA/WPA2 security.
+ FW bug? To be investigated.
+ - U-APSD configuration (0.5-1 m*d).
+ - Cleanup of debug printouts (1 m*d).
diff --git a/drivers/staging/cw1200/ap.c b/drivers/staging/cw1200/ap.c
new file mode 100644
index 00000000000..55096cf0e2c
--- /dev/null
+++ b/drivers/staging/cw1200/ap.c
@@ -0,0 +1,1149 @@
+/*
+ * mac80211 STA and AP API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cw1200.h"
+#include "sta.h"
+#include "ap.h"
+#include "bh.h"
+
+#if defined(CONFIG_CW1200_STA_DEBUG)
+#define ap_printk(...) printk(__VA_ARGS__)
+#else
+#define ap_printk(...)
+#endif
+
+#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+#ifndef ERP_INFO_BYTE_OFFSET
+#define ERP_INFO_BYTE_OFFSET 2
+#endif
+
+static int cw1200_upload_beacon(struct cw1200_common *priv);
+static int cw1200_upload_pspoll(struct cw1200_common *priv);
+static int cw1200_upload_null(struct cw1200_common *priv);
+static int cw1200_start_ap(struct cw1200_common *priv);
+static int cw1200_update_beaconing(struct cw1200_common *priv);
+static int cw1200_enable_beaconing(struct cw1200_common *priv,
+ bool enable);
+static void __cw1200_sta_notify(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ int link_id);
+
+/* ******************************************************************** */
+/* AP API */
+
+int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_sta_priv *sta_priv =
+ (struct cw1200_sta_priv *)&sta->drv_priv;
+ struct cw1200_link_entry *entry;
+ struct sk_buff *skb;
+
+ if (priv->mode != NL80211_IFTYPE_AP)
+ return 0;
+
+ sta_priv->link_id = cw1200_find_link_id(priv, sta->addr);
+ if (WARN_ON(!sta_priv->link_id)) {
+ /* Impossible error */
+ wiphy_info(priv->hw->wiphy,
+ "[AP] No more link IDs available.\n");
+ return -ENOENT;
+ }
+
+ entry = &priv->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&priv->ps_state_lock);
+ if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ priv->sta_asleep_mask |= BIT(sta_priv->link_id);
+ entry->status = CW1200_LINK_HARD;
+ while ((skb = skb_dequeue(&entry->rx_queue)))
+ ieee80211_rx_irqsafe(priv->hw, skb);
+ spin_unlock_bh(&priv->ps_state_lock);
+ return 0;
+}
+
+int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_sta_priv *sta_priv =
+ (struct cw1200_sta_priv *)&sta->drv_priv;
+ struct cw1200_link_entry *entry;
+
+ if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ return 0;
+
+ entry = &priv->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&priv->ps_state_lock);
+ entry->status = CW1200_LINK_RESERVE;
+ entry->timestamp = jiffies;
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ spin_unlock_bh(&priv->ps_state_lock);
+ flush_workqueue(priv->workqueue);
+ return 0;
+}
+
+static void __cw1200_sta_notify(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ int link_id)
+{
+ struct cw1200_common *priv = dev->priv;
+ u32 bit, prev;
+
+ /* Zero link id means "for all link IDs" */
+ if (link_id)
+ bit = BIT(link_id);
+ else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE))
+ bit = 0;
+ else
+ bit = priv->link_id_map;
+ prev = priv->sta_asleep_mask & bit;
+
+ switch (notify_cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (!prev) {
+ if (priv->buffered_multicasts &&
+ !priv->sta_asleep_mask)
+ queue_work(priv->workqueue,
+ &priv->multicast_start_work);
+ priv->sta_asleep_mask |= bit;
+ }
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (prev) {
+ priv->sta_asleep_mask &= ~bit;
+ priv->pspoll_mask &= ~bit;
+ if (priv->tx_multicast && link_id &&
+ !priv->sta_asleep_mask)
+ queue_work(priv->workqueue,
+ &priv->multicast_stop_work);
+ cw1200_bh_wakeup(priv);
+ }
+ break;
+ }
+}
+
+void cw1200_sta_notify(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ struct ieee80211_sta *sta)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct cw1200_sta_priv *sta_priv =
+ (struct cw1200_sta_priv *)&sta->drv_priv;
+
+ spin_lock_bh(&priv->ps_state_lock);
+ __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id);
+ spin_unlock_bh(&priv->ps_state_lock);
+}
+
+static void cw1200_ps_notify(struct cw1200_common *priv,
+ int link_id, bool ps)
+{
+ if (link_id > CW1200_MAX_STA_IN_AP_MODE)
+ return;
+
+ txrx_printk(KERN_DEBUG "%s for LinkId: %d. STAs asleep: %.8X\n",
+ ps ? "Stop" : "Start",
+ link_id, priv->sta_asleep_mask);
+
+ __cw1200_sta_notify(priv->hw, priv->vif,
+ ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id);
+}
+
+static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set)
+{
+ struct sk_buff *skb;
+ struct wsm_update_ie update_ie = {
+ .what = WSM_UPDATE_IE_BEACON,
+ .count = 1,
+ };
+ u16 tim_offset, tim_length;
+
+ ap_printk(KERN_DEBUG "[AP] %s mcast: %s.\n",
+ __func__, aid0_bit_set ? "ena" : "dis");
+
+ skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
+ &tim_offset, &tim_length);
+ if (!skb) {
+ if (!__cw1200_flush(priv, true))
+ wsm_unlock_tx(priv);
+ return -ENOENT;
+ }
+
+ if (tim_offset && tim_length >= 6) {
+ /* Ignore DTIM count from mac80211:
+ * firmware handles DTIM internally. */
+ skb->data[tim_offset + 2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (aid0_bit_set)
+ skb->data[tim_offset + 4] |= 1;
+ else
+ skb->data[tim_offset + 4] &= ~1;
+ }
+
+ update_ie.ies = &skb->data[tim_offset];
+ update_ie.length = tim_length;
+ WARN_ON(wsm_update_ie(priv, &update_ie));
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+void cw1200_set_tim_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, set_tim_work);
+ (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set);
+}
+
+int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct cw1200_common *priv = dev->priv;
+ queue_work(priv->workqueue, &priv->set_tim_work);
+ return 0;
+}
+
+void cw1200_set_cts_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, set_cts_work.work);
+
+ u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0};
+ struct wsm_update_ie update_ie = {
+ .what = WSM_UPDATE_IE_BEACON,
+ .count = 1,
+ .ies = erp_ie,
+ .length = 3,
+ };
+ u32 erp_info;
+ __le32 use_cts_prot;
+ mutex_lock(&priv->conf_mutex);
+ erp_info = priv->erp_info;
+ mutex_unlock(&priv->conf_mutex);
+ use_cts_prot =
+ erp_info & WLAN_ERP_USE_PROTECTION ?
+ __cpu_to_le32(1) : 0;
+
+ erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info;
+
+ ap_printk(KERN_DEBUG "[STA] ERP information 0x%x\n", erp_info);
+
+ WARN_ON(wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION,
+ &use_cts_prot, sizeof(use_cts_prot)));
+ WARN_ON(wsm_update_ie(priv, &update_ie));
+
+ return;
+}
+
+static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
+{
+ struct wsm_override_internal_txrate arg;
+ int ret = 0;
+
+ if (priv->mode == NL80211_IFTYPE_STATION) {
+ /* Plumb PSPOLL and NULL template */
+ WARN_ON(cw1200_upload_pspoll(priv));
+ WARN_ON(cw1200_upload_null(priv));
+ } else {
+ return 0;
+ }
+
+ memset(&arg, 0, sizeof(struct wsm_override_internal_txrate));
+
+ if (!priv->vif->p2p) {
+ /* STATION mode */
+ if (priv->bss_params.operationalRateSet & ~0xF) {
+ ap_printk(KERN_DEBUG "[STA] STA has ERP rates\n");
+ /* G or BG mode */
+ arg.internalTxRate = (__ffs(
+ priv->bss_params.operationalRateSet & ~0xF));
+ } else {
+ ap_printk(KERN_DEBUG "[STA] STA has non ERP rates\n");
+ /* B only mode */
+ arg.internalTxRate = (__ffs(
+ priv->association_mode.basicRateSet));
+ }
+ arg.nonErpInternalTxRate = (__ffs(
+ priv->association_mode.basicRateSet));
+ } else {
+ /* P2P mode */
+ arg.internalTxRate = (__ffs(
+ priv->bss_params.operationalRateSet & ~0xF));
+ arg.nonErpInternalTxRate = (__ffs(
+ priv->bss_params.operationalRateSet & ~0xF));
+ }
+
+ ap_printk(KERN_DEBUG "[STA] BTCOEX_INFO"
+ "MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n",
+ priv->mode,
+ arg.internalTxRate,
+ arg.nonErpInternalTxRate);
+
+ ret = WARN_ON(wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+ &arg, sizeof(arg)));
+
+ return ret;
+}
+
+void cw1200_bss_info_changed(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct cw1200_common *priv = dev->priv;
+
+ mutex_lock(&priv->conf_mutex);
+ if (changed & BSS_CHANGED_BSSID) {
+ memcpy(priv->bssid, info->bssid, ETH_ALEN);
+ cw1200_setup_mac(priv);
+ }
+
+ /* TODO: BSS_CHANGED_IBSS */
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ struct wsm_arp_ipv4_filter filter = {0};
+ int i;
+
+ ap_printk(KERN_DEBUG "[STA] BSS_CHANGED_ARP_FILTER "
+ "enabled: %d, cnt: %d\n",
+ info->arp_filter_enabled,
+ info->arp_addr_cnt);
+
+ if (info->arp_filter_enabled)
+ filter.enable = __cpu_to_le32(1);
+
+ /* Currently only one IP address is supported by firmware.
+ * In case of more IPs arp filtering will be disabled. */
+ if (info->arp_addr_cnt > 0 &&
+ info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
+ for (i = 0; i < info->arp_addr_cnt; i++) {
+ filter.ipv4Address[i] = info->arp_addr_list[i];
+ ap_printk(KERN_DEBUG "[STA] addr[%d]: 0x%X\n",
+ i, filter.ipv4Address[i]);
+ }
+ } else
+ filter.enable = 0;
+
+ ap_printk(KERN_DEBUG "[STA] arp ip filter enable: %d\n",
+ __le32_to_cpu(filter.enable));
+
+ if (wsm_set_arp_ipv4_filter(priv, &filter))
+ WARN_ON(1);
+ }
+
+
+ if (changed & BSS_CHANGED_BEACON) {
+ ap_printk(KERN_DEBUG "BSS_CHANGED_BEACON\n");
+ WARN_ON(cw1200_update_beaconing(priv));
+ WARN_ON(cw1200_upload_beacon(priv));
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ ap_printk(KERN_DEBUG "BSS_CHANGED_BEACON_ENABLED\n");
+
+ if (priv->enable_beacon != info->enable_beacon) {
+ WARN_ON(cw1200_enable_beaconing(priv,
+ info->enable_beacon));
+ priv->enable_beacon = info->enable_beacon;
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ ap_printk(KERN_DEBUG "CHANGED_BEACON_INT\n");
+ /* Restart AP only when connected */
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ WARN_ON(cw1200_update_beaconing(priv));
+ }
+
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ wsm_lock_tx(priv);
+ priv->wep_default_key_id = -1;
+ wsm_unlock_tx(priv);
+
+ if (!info->assoc /* && !info->ibss_joined */) {
+ priv->cqm_link_loss_count = 60;
+ priv->cqm_beacon_loss_count = 20;
+ priv->cqm_tx_failure_thold = 0;
+ }
+ priv->cqm_tx_failure_count = 0;
+ }
+
+ if (changed &
+ (BSS_CHANGED_ASSOC |
+ BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_ERP_PREAMBLE |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_ERP_SLOT)) {
+ ap_printk(KERN_DEBUG "BSS_CHANGED_ASSOC.\n");
+ if (info->assoc) { /* TODO: ibss_joined */
+ struct ieee80211_sta *sta = NULL;
+ priv->join_dtim_period = info->dtim_period;
+ priv->beacon_int = info->beacon_int;
+
+ /* Associated: kill join timeout */
+ cancel_delayed_work_sync(&priv->join_timeout);
+
+ rcu_read_lock();
+ if (info->bssid)
+ sta = ieee80211_find_sta(vif, info->bssid);
+ if (sta) {
+ BUG_ON(!priv->channel);
+ priv->ht_info.ht_cap = sta->ht_cap;
+ priv->bss_params.operationalRateSet =
+ __cpu_to_le32(
+ cw1200_rate_mask_to_wsm(priv,
+ sta->supp_rates[priv->channel->band]));
+ priv->ht_info.channel_type =
+ info->channel_type;
+ priv->ht_info.operation_mode =
+ info->ht_operation_mode;
+ } else {
+ memset(&priv->ht_info, 0,
+ sizeof(priv->ht_info));
+ priv->bss_params.operationalRateSet = -1;
+ }
+ rcu_read_unlock();
+
+ if (sta) {
+ __le32 val = 0;
+ if (priv->ht_info.operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) {
+ ap_printk(KERN_DEBUG"[STA]"
+ " Non-GF STA present\n");
+ /* Non Green field capable STA */
+ val = __cpu_to_le32(BIT(1));
+ }
+ WARN_ON(wsm_write_mib(priv,
+ WSM_MID_ID_SET_HT_PROTECTION,
+ &val, sizeof(val)));
+ }
+
+ priv->association_mode.greenfieldMode =
+ cw1200_ht_greenfield(&priv->ht_info);
+ priv->association_mode.flags =
+ WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES |
+ WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE |
+ WSM_ASSOCIATION_MODE_USE_HT_MODE |
+ WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET |
+ WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING;
+ priv->association_mode.preambleType =
+ info->use_short_preamble ?
+ WSM_JOIN_PREAMBLE_SHORT :
+ WSM_JOIN_PREAMBLE_LONG;
+ priv->association_mode.basicRateSet = __cpu_to_le32(
+ cw1200_rate_mask_to_wsm(priv,
+ info->basic_rates));
+ priv->association_mode.mpduStartSpacing =
+ cw1200_ht_ampdu_density(&priv->ht_info);
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ priv->cqm_beacon_loss_count =
+ info->cqm_beacon_miss_thold;
+ priv->cqm_tx_failure_thold =
+ info->cqm_tx_fail_thold;
+ priv->cqm_tx_failure_count = 0;
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ cancel_delayed_work_sync(&priv->connection_loss_work);
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+
+ priv->bss_params.beaconLostCount =
+ priv->cqm_beacon_loss_count ?
+ priv->cqm_beacon_loss_count :
+ priv->cqm_link_loss_count;
+
+ priv->bss_params.aid = info->aid;
+
+ if (priv->join_dtim_period < 1)
+ priv->join_dtim_period = 1;
+
+ ap_printk(KERN_DEBUG "[STA] DTIM %d, interval: %d\n",
+ priv->join_dtim_period, priv->beacon_int);
+ ap_printk(KERN_DEBUG "[STA] Preamble: %d, " \
+ "Greenfield: %d, Aid: %d, " \
+ "Rates: 0x%.8X, Basic: 0x%.8X\n",
+ priv->association_mode.preambleType,
+ priv->association_mode.greenfieldMode,
+ priv->bss_params.aid,
+ priv->bss_params.operationalRateSet,
+ priv->association_mode.basicRateSet);
+ WARN_ON(wsm_set_association_mode(priv,
+ &priv->association_mode));
+ WARN_ON(wsm_keep_alive_period(priv, 30 /* sec */));
+ WARN_ON(wsm_set_bss_params(priv, &priv->bss_params));
+ priv->setbssparams_done = true;
+ WARN_ON(wsm_set_beacon_wakeup_period(priv,
+ priv->beacon_int * priv->join_dtim_period >
+ MAX_BEACON_SKIP_TIME_MS ? 1 :
+ priv->join_dtim_period, 0));
+
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ if (priv->vif->p2p) {
+ ap_printk(KERN_DEBUG
+ "[STA] Setting p2p powersave "
+ "configuration.\n");
+ WARN_ON(wsm_set_p2p_ps_modeinfo(priv,
+ &priv->p2p_ps_modeinfo));
+ }
+
+ if (priv->is_BT_Present)
+ WARN_ON(cw1200_set_btcoexinfo(priv));
+#if 0
+ /* It's better to override internal TX rete; otherwise
+ * device sends RTS at too high rate. However device
+ * can't receive CTS at 1 and 2 Mbps. Well, 5.5 is a
+ * good choice for RTS/CTS, but that means PS poll
+ * will be sent at the same rate - impact on link
+ * budget. Not sure what is better.. */
+
+ /* Update: internal rate selection algorythm is not
+ * bad: if device is not receiving CTS at high rate,
+ * it drops RTS rate.
+ * So, conclusion: if-0 the code. Keep code just for
+ * information:
+ * Do not touch WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE! */
+
+ /* ~3 is a bug in device: RTS/CTS is not working at
+ * low rates */
+
+ __le32 internal_tx_rate = __cpu_to_le32(__ffs(
+ priv->association_mode.basicRateSet & ~3));
+ WARN_ON(wsm_write_mib(priv,
+ WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+ &internal_tx_rate,
+ sizeof(internal_tx_rate)));
+#endif
+ } else {
+ memset(&priv->association_mode, 0,
+ sizeof(priv->association_mode));
+ memset(&priv->bss_params, 0, sizeof(priv->bss_params));
+ }
+ }
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT)) {
+ u32 prev_erp_info = priv->erp_info;
+
+ if (info->use_cts_prot)
+ priv->erp_info |= WLAN_ERP_USE_PROTECTION;
+ else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
+ priv->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+
+ if (prev_erp_info != priv->erp_info)
+ queue_delayed_work(priv->workqueue,
+ &priv->set_cts_work, 0*HZ);
+ }
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) {
+ __le32 slot_time = info->use_short_slot ?
+ __cpu_to_le32(9) : __cpu_to_le32(20);
+ ap_printk(KERN_DEBUG "[STA] Slot time :%d us.\n",
+ __le32_to_cpu(slot_time));
+ WARN_ON(wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME,
+ &slot_time, sizeof(slot_time)));
+ }
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
+ struct wsm_rcpi_rssi_threshold threshold = {
+ .rollingAverageCount = 8,
+ };
+
+#if 0
+ /* For verification purposes */
+ info->cqm_rssi_thold = -50;
+ info->cqm_rssi_hyst = 4;
+#endif /* 0 */
+
+ ap_printk(KERN_DEBUG "[CQM] RSSI threshold "
+ "subscribe: %d +- %d\n",
+ info->cqm_rssi_thold, info->cqm_rssi_hyst);
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ ap_printk(KERN_DEBUG "[CQM] Beacon loss subscribe: %d\n",
+ info->cqm_beacon_miss_thold);
+ ap_printk(KERN_DEBUG "[CQM] TX failure subscribe: %d\n",
+ info->cqm_tx_fail_thold);
+ priv->cqm_rssi_thold = info->cqm_rssi_thold;
+ priv->cqm_rssi_hyst = info->cqm_rssi_hyst;
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+ if (info->cqm_rssi_thold || info->cqm_rssi_hyst) {
+ /* RSSI subscription enabled */
+ /* TODO: It's not a correct way of setting threshold.
+ * Upper and lower must be set equal here and adjusted
+ * in callback. However current implementation is much
+ * more relaible and stable. */
+ threshold.upperThreshold =
+ info->cqm_rssi_thold + info->cqm_rssi_hyst;
+ threshold.lowerThreshold =
+ info->cqm_rssi_thold;
+ threshold.rssiRcpiMode |=
+ WSM_RCPI_RSSI_THRESHOLD_ENABLE;
+ } else {
+ /* There is a bug in FW, see sta.c. We have to enable
+ * dummy subscription to get correct RSSI values. */
+ threshold.rssiRcpiMode |=
+ WSM_RCPI_RSSI_THRESHOLD_ENABLE |
+ WSM_RCPI_RSSI_DONT_USE_UPPER |
+ WSM_RCPI_RSSI_DONT_USE_LOWER;
+ }
+ WARN_ON(wsm_set_rcpi_rssi_threshold(priv, &threshold));
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ priv->cqm_tx_failure_thold = info->cqm_tx_fail_thold;
+ priv->cqm_tx_failure_count = 0;
+
+ if (priv->cqm_beacon_loss_count !=
+ info->cqm_beacon_miss_thold) {
+ priv->cqm_beacon_loss_count =
+ info->cqm_beacon_miss_thold;
+ priv->bss_params.beaconLostCount =
+ priv->cqm_beacon_loss_count ?
+ priv->cqm_beacon_loss_count :
+ priv->cqm_link_loss_count;
+ /* Make sure we are associated before sending
+ * set_bss_params to firmware */
+ if (priv->bss_params.aid) {
+ WARN_ON(wsm_set_bss_params(priv,
+ &priv->bss_params));
+ priv->setbssparams_done = true;
+ }
+ }
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+ }
+ mutex_unlock(&priv->conf_mutex);
+}
+
+void cw1200_multicast_start_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, multicast_start_work);
+ long tmo = priv->join_dtim_period *
+ (priv->beacon_int + 20) * HZ / 1024;
+
+ cancel_work_sync(&priv->multicast_stop_work);
+
+ if (!priv->aid0_bit_set) {
+ wsm_lock_tx(priv);
+ cw1200_set_tim_impl(priv, true);
+ priv->aid0_bit_set = true;
+ mod_timer(&priv->mcast_timeout, jiffies + tmo);
+ wsm_unlock_tx(priv);
+ }
+}
+
+void cw1200_multicast_stop_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, multicast_stop_work);
+
+ if (priv->aid0_bit_set) {
+ del_timer_sync(&priv->mcast_timeout);
+ wsm_lock_tx(priv);
+ priv->aid0_bit_set = false;
+ cw1200_set_tim_impl(priv, false);
+ wsm_unlock_tx(priv);
+ }
+}
+
+void cw1200_mcast_timeout(unsigned long arg)
+{
+ struct cw1200_common *priv =
+ (struct cw1200_common *)arg;
+
+ wiphy_warn(priv->hw->wiphy,
+ "Multicast delivery timeout.\n");
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->tx_multicast = priv->aid0_bit_set &&
+ priv->buffered_multicasts;
+ if (priv->tx_multicast)
+ cw1200_bh_wakeup(priv);
+ spin_unlock_bh(&priv->ps_state_lock);
+}
+
+int cw1200_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ /* Aggregation is implemented fully in firmware,
+ * including block ack negotiation. Do not allow
+ * mac80211 stack to do anything: it interferes with
+ * the firmware. */
+ return -ENOTSUPP;
+}
+
+/* ******************************************************************** */
+/* WSM callback */
+void cw1200_suspend_resume(struct cw1200_common *priv,
+ struct wsm_suspend_resume *arg)
+{
+ ap_printk(KERN_DEBUG "[AP] %s: %s\n",
+ arg->stop ? "stop" : "start",
+ arg->multicast ? "broadcast" : "unicast");
+
+ if (arg->multicast) {
+ bool cancel_tmo = false;
+ spin_lock_bh(&priv->ps_state_lock);
+ if (arg->stop) {
+ priv->tx_multicast = false;
+ } else {
+ /* Firmware sends this indication every DTIM if there
+ * is a STA in powersave connected. There is no reason
+ * to suspend, following wakeup will consume much more
+ * power than it could be saved. */
+ cw1200_pm_stay_awake(&priv->pm_state,
+ priv->join_dtim_period *
+ (priv->beacon_int + 20) * HZ / 1024);
+ priv->tx_multicast = priv->aid0_bit_set &&
+ priv->buffered_multicasts;
+ if (priv->tx_multicast) {
+ cancel_tmo = true;
+ cw1200_bh_wakeup(priv);
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (cancel_tmo)
+ del_timer_sync(&priv->mcast_timeout);
+ } else {
+ spin_lock_bh(&priv->ps_state_lock);
+ cw1200_ps_notify(priv, arg->link_id, arg->stop);
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (!arg->stop)
+ cw1200_bh_wakeup(priv);
+ }
+ return;
+}
+
+/* ******************************************************************** */
+/* AP privates */
+
+static int cw1200_upload_beacon(struct cw1200_common *priv)
+{
+ int ret = 0;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_BEACON,
+ };
+ struct ieee80211_mgmt *mgmt;
+ u8 *erp_inf, *ies;
+ u32 ies_len;
+
+ if (priv->vif->p2p)
+ frame.rate = WSM_TRANSMIT_RATE_6;
+
+ frame.skb = ieee80211_beacon_get(priv->hw, priv->vif);
+ if (WARN_ON(!frame.skb))
+ return -ENOMEM;
+
+ mgmt = (void *)frame.skb->data;
+ ies = mgmt->u.beacon.variable;
+ ies_len = frame.skb->len - (u32)(ies - (u8 *)mgmt);
+ erp_inf = (u8 *)cfg80211_find_ie(WLAN_EID_ERP_INFO, ies, ies_len);
+ if (erp_inf) {
+ if (erp_inf[ERP_INFO_BYTE_OFFSET]
+ & WLAN_ERP_BARKER_PREAMBLE)
+ priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
+ else
+ priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
+
+ if (erp_inf[ERP_INFO_BYTE_OFFSET]
+ & WLAN_ERP_NON_ERP_PRESENT) {
+ priv->erp_info |= WLAN_ERP_USE_PROTECTION;
+ priv->erp_info |= WLAN_ERP_NON_ERP_PRESENT;
+ } else {
+ priv->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+ priv->erp_info &= ~WLAN_ERP_NON_ERP_PRESENT;
+ }
+ }
+
+ ret = wsm_set_template_frame(priv, &frame);
+ if (!ret) {
+ /* TODO: Distille probe resp; remove TIM
+ * and other beacon-specific IEs */
+ *(__le16 *)frame.skb->data =
+ __cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+ frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE;
+ if (priv->vif->p2p)
+ frame.disable = true;
+ ret = wsm_set_template_frame(priv, &frame);
+ }
+ dev_kfree_skb(frame.skb);
+
+ return ret;
+}
+
+static int cw1200_upload_pspoll(struct cw1200_common *priv)
+{
+ int ret = 0;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_PS_POLL,
+ .rate = 0xFF,
+ };
+
+
+ frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif);
+ if (WARN_ON(!frame.skb))
+ return -ENOMEM;
+
+ ret = wsm_set_template_frame(priv, &frame);
+
+ dev_kfree_skb(frame.skb);
+
+ return ret;
+}
+
+static int cw1200_upload_null(struct cw1200_common *priv)
+{
+ int ret = 0;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_NULL,
+ .rate = 0xFF,
+ };
+
+
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ if (WARN_ON(!frame.skb))
+ return -ENOMEM;
+
+ ret = wsm_set_template_frame(priv, &frame);
+
+ dev_kfree_skb(frame.skb);
+
+ return ret;
+}
+
+static int cw1200_enable_beaconing(struct cw1200_common *priv,
+ bool enable)
+{
+ struct wsm_beacon_transmit transmit = {
+ .enableBeaconing = enable,
+ };
+
+ return wsm_beacon_transmit(priv, &transmit);
+}
+
+static int cw1200_start_ap(struct cw1200_common *priv)
+{
+ int ret;
+ const u8 *ssidie;
+ struct sk_buff *skb;
+ int offset;
+ struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+ struct wsm_start start = {
+ .mode = priv->vif->p2p ?
+ WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
+ .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
+ .channelNumber = priv->channel->hw_value,
+ .beaconInterval = conf->beacon_int,
+ .DTIMPeriod = conf->dtim_period,
+ .preambleType = conf->use_short_preamble ?
+ WSM_JOIN_PREAMBLE_SHORT :
+ WSM_JOIN_PREAMBLE_LONG,
+ .probeDelay = 100,
+ .basicRateSet = cw1200_rate_mask_to_wsm(priv,
+ conf->basic_rates),
+ };
+ struct wsm_operational_mode mode = {
+ .power_mode = wsm_power_mode_quiescent,
+ .disableMoreFlagUsage = true,
+ };
+
+ /* Get SSID */
+ skb = ieee80211_beacon_get(priv->hw, priv->vif);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
+ offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ ssidie = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
+ skb->len - offset);
+
+ memset(priv->ssid, 0, sizeof(priv->ssid));
+ if (ssidie) {
+ priv->ssid_length = ssidie[1];
+ if (WARN_ON(priv->ssid_length > sizeof(priv->ssid)))
+ priv->ssid_length = sizeof(priv->ssid);
+ memcpy(priv->ssid, &ssidie[2], priv->ssid_length);
+ } else {
+ priv->ssid_length = 0;
+ }
+ dev_kfree_skb(skb);
+
+ priv->beacon_int = conf->beacon_int;
+ priv->join_dtim_period = conf->dtim_period;
+
+ start.ssidLength = priv->ssid_length;
+ memcpy(&start.ssid[0], priv->ssid, start.ssidLength);
+
+ memset(&priv->link_id_db, 0, sizeof(priv->link_id_db));
+
+ ap_printk(KERN_DEBUG "[AP] ch: %d(%d), bcn: %d(%d), "
+ "brt: 0x%.8X, ssid: %.*s.\n",
+ start.channelNumber, start.band,
+ start.beaconInterval, start.DTIMPeriod,
+ start.basicRateSet,
+ start.ssidLength, start.ssid);
+ ret = WARN_ON(wsm_start(priv, &start));
+ if (!ret)
+ ret = WARN_ON(cw1200_upload_keys(priv));
+ if (!ret && priv->vif->p2p) {
+ ap_printk(KERN_DEBUG
+ "[AP] Setting p2p powersave "
+ "configuration.\n");
+ WARN_ON(wsm_set_p2p_ps_modeinfo(priv,
+ &priv->p2p_ps_modeinfo));
+ }
+ if (!ret) {
+ WARN_ON(wsm_set_block_ack_policy(priv,
+ 0, 0));
+ priv->join_status = CW1200_JOIN_STATUS_AP;
+ cw1200_update_filtering(priv);
+ }
+ WARN_ON(wsm_set_operational_mode(priv, &mode));
+ return ret;
+}
+
+static int cw1200_update_beaconing(struct cw1200_common *priv)
+{
+ struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+ struct wsm_reset reset = {
+ .link_id = 0,
+ .reset_statistics = true,
+ };
+
+ if (priv->mode == NL80211_IFTYPE_AP) {
+ /* TODO: check if changed channel, band */
+ if (priv->join_status != CW1200_JOIN_STATUS_AP ||
+ priv->beacon_int != conf->beacon_int) {
+ ap_printk(KERN_DEBUG "ap restarting\n");
+ wsm_lock_tx(priv);
+ if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE)
+ WARN_ON(wsm_reset(priv, &reset));
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ WARN_ON(cw1200_start_ap(priv));
+ wsm_unlock_tx(priv);
+ } else
+ ap_printk(KERN_DEBUG "ap started join_status: %d\n",
+ priv->join_status);
+ }
+ return 0;
+}
+
+int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac)
+{
+ int i, ret = 0;
+ spin_lock_bh(&priv->ps_state_lock);
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) &&
+ priv->link_id_db[i].status) {
+ priv->link_id_db[i].timestamp = jiffies;
+ ret = i + 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ return ret;
+}
+
+int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac)
+{
+ int i, ret = 0;
+ unsigned long max_inactivity = 0;
+ unsigned long now = jiffies;
+
+ spin_lock_bh(&priv->ps_state_lock);
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ if (!priv->link_id_db[i].status) {
+ ret = i + 1;
+ break;
+ } else if (priv->link_id_db[i].status != CW1200_LINK_HARD &&
+ !priv->tx_queue_stats.link_map_cache[i + 1]) {
+
+ unsigned long inactivity =
+ now - priv->link_id_db[i].timestamp;
+ if (inactivity < max_inactivity)
+ continue;
+ max_inactivity = inactivity;
+ ret = i + 1;
+ }
+ }
+ if (ret) {
+ struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1];
+ ap_printk(KERN_DEBUG "[AP] STA added, link_id: %d\n",
+ ret);
+ entry->status = CW1200_LINK_RESERVE;
+ memcpy(&entry->mac, mac, ETH_ALEN);
+ memset(&entry->buffered, 0, CW1200_MAX_TID);
+ skb_queue_head_init(&entry->rx_queue);
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ } else {
+ wiphy_info(priv->hw->wiphy,
+ "[AP] Early: no more link IDs available.\n");
+ }
+
+ spin_unlock_bh(&priv->ps_state_lock);
+ return ret;
+}
+
+void cw1200_link_id_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, link_id_work);
+ wsm_flush_tx(priv);
+ cw1200_link_id_gc_work(&priv->link_id_gc_work.work);
+ wsm_unlock_tx(priv);
+}
+
+void cw1200_link_id_gc_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, link_id_gc_work.work);
+ struct wsm_reset reset = {
+ .reset_statistics = false,
+ };
+ struct wsm_map_link map_link = {
+ .link_id = 0,
+ };
+ unsigned long now = jiffies;
+ unsigned long next_gc = -1;
+ long ttl;
+ bool need_reset;
+ u32 mask;
+ int i;
+
+ if (priv->join_status != CW1200_JOIN_STATUS_AP)
+ return;
+
+ wsm_lock_tx(priv);
+ spin_lock_bh(&priv->ps_state_lock);
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ need_reset = false;
+ mask = BIT(i + 1);
+ if (priv->link_id_db[i].status == CW1200_LINK_RESERVE ||
+ (priv->link_id_db[i].status == CW1200_LINK_HARD &&
+ !(priv->link_id_map & mask))) {
+ if (priv->link_id_map & mask) {
+ priv->sta_asleep_mask &= ~mask;
+ priv->pspoll_mask &= ~mask;
+ need_reset = true;
+ }
+ priv->link_id_map |= mask;
+ if (priv->link_id_db[i].status != CW1200_LINK_HARD)
+ priv->link_id_db[i].status = CW1200_LINK_SOFT;
+ memcpy(map_link.mac_addr, priv->link_id_db[i].mac,
+ ETH_ALEN);
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (need_reset) {
+ reset.link_id = i + 1;
+ WARN_ON(wsm_reset(priv, &reset));
+ }
+ map_link.link_id = i + 1;
+ WARN_ON(wsm_map_link(priv, &map_link));
+ next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT);
+ spin_lock_bh(&priv->ps_state_lock);
+ } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) {
+ ttl = priv->link_id_db[i].timestamp - now +
+ CW1200_LINK_ID_GC_TIMEOUT;
+ if (ttl <= 0) {
+ need_reset = true;
+ priv->link_id_db[i].status = CW1200_LINK_OFF;
+ priv->link_id_map &= ~mask;
+ priv->sta_asleep_mask &= ~mask;
+ priv->pspoll_mask &= ~mask;
+ memset(map_link.mac_addr, 0, ETH_ALEN);
+ spin_unlock_bh(&priv->ps_state_lock);
+ reset.link_id = i + 1;
+ WARN_ON(wsm_reset(priv, &reset));
+ spin_lock_bh(&priv->ps_state_lock);
+ } else {
+ next_gc = min_t(unsigned long, next_gc, ttl);
+ }
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ } else if (priv->link_id_db[i].status == CW1200_LINK_RESET ||
+ priv->link_id_db[i].status ==
+ CW1200_LINK_RESET_REMAP) {
+ int status = priv->link_id_db[i].status;
+ priv->link_id_db[i].status =
+ priv->link_id_db[i].prev_status;
+ priv->link_id_db[i].timestamp = now;
+ reset.link_id = i + 1;
+ spin_unlock_bh(&priv->ps_state_lock);
+ WARN_ON(wsm_reset(priv, &reset));
+ if (status == CW1200_LINK_RESET_REMAP) {
+ memcpy(map_link.mac_addr,
+ priv->link_id_db[i].mac,
+ ETH_ALEN);
+ map_link.link_id = i + 1;
+ WARN_ON(wsm_map_link(priv, &map_link));
+ next_gc = min(next_gc,
+ CW1200_LINK_ID_GC_TIMEOUT);
+ }
+ spin_lock_bh(&priv->ps_state_lock);
+#endif
+ }
+ if (need_reset) {
+ skb_queue_purge(&priv->link_id_db[i].rx_queue);
+ ap_printk(KERN_DEBUG "[AP] STA removed, link_id: %d\n",
+ reset.link_id);
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ if (next_gc != -1)
+ queue_delayed_work(priv->workqueue,
+ &priv->link_id_gc_work, next_gc);
+ wsm_unlock_tx(priv);
+}
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+void cw1200_notify_noa(struct cw1200_common *priv, int delay)
+{
+ struct cfg80211_p2p_ps p2p_ps = {0};
+ struct wsm_p2p_ps_modeinfo *modeinfo;
+ modeinfo = &priv->p2p_ps_modeinfo;
+
+ ap_printk(KERN_DEBUG "[AP]: %s called\n", __func__);
+
+ if (priv->join_status != CW1200_JOIN_STATUS_AP)
+ return;
+
+ if (delay)
+ msleep(delay);
+
+ if (!WARN_ON(wsm_get_p2p_ps_modeinfo(priv, modeinfo))) {
+#if defined(CONFIG_CW1200_STA_DEBUG)
+ print_hex_dump_bytes("[AP] p2p_get_ps_modeinfo: ",
+ DUMP_PREFIX_NONE,
+ (u8 *)modeinfo,
+ sizeof(*modeinfo));
+#endif /* CONFIG_CW1200_STA_DEBUG */
+ p2p_ps.opp_ps = !!(modeinfo->oppPsCTWindow & BIT(7));
+ p2p_ps.ctwindow = modeinfo->oppPsCTWindow & (~BIT(7));
+ p2p_ps.count = modeinfo->count;
+ p2p_ps.start = __le32_to_cpu(modeinfo->startTime);
+ p2p_ps.duration = __le32_to_cpu(modeinfo->duration);
+ p2p_ps.interval = __le32_to_cpu(modeinfo->interval);
+ p2p_ps.index = modeinfo->reserved;
+
+ ieee80211_p2p_noa_notify(priv->vif,
+ &p2p_ps,
+ GFP_KERNEL);
+ }
+}
+#endif
diff --git a/drivers/staging/cw1200/ap.h b/drivers/staging/cw1200/ap.h
new file mode 100644
index 00000000000..c10e4ef16d2
--- /dev/null
+++ b/drivers/staging/cw1200/ap.h
@@ -0,0 +1,49 @@
+/*
+ * mac80211 STA and AP API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AP_H_INCLUDED
+#define AP_H_INCLUDED
+
+int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
+ bool set);
+int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd,
+ struct ieee80211_sta *sta);
+void cw1200_bss_info_changed(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed);
+int cw1200_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
+
+void cw1200_suspend_resume(struct cw1200_common *priv,
+ struct wsm_suspend_resume *arg);
+void cw1200_set_tim_work(struct work_struct *work);
+void cw1200_set_cts_work(struct work_struct *work);
+void cw1200_multicast_start_work(struct work_struct *work);
+void cw1200_multicast_stop_work(struct work_struct *work);
+void cw1200_mcast_timeout(unsigned long arg);
+int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac);
+int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac);
+void cw1200_link_id_work(struct work_struct *work);
+void cw1200_link_id_gc_work(struct work_struct *work);
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+void cw1200_notify_noa(struct cw1200_common *priv, int delay);
+#endif
+
+#endif
diff --git a/drivers/staging/cw1200/bh.c b/drivers/staging/cw1200/bh.c
new file mode 100644
index 00000000000..427f3e2ba52
--- /dev/null
+++ b/drivers/staging/cw1200/bh.c
@@ -0,0 +1,622 @@
+/*
+ * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver, which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/kthread.h>
+
+#include "cw1200.h"
+#include "bh.h"
+#include "hwio.h"
+#include "wsm.h"
+#include "sbus.h"
+#include "debug.h"
+
+#if defined(CONFIG_CW1200_BH_DEBUG)
+#define bh_printk(...) printk(__VA_ARGS__)
+#else
+#define bh_printk(...)
+#endif
+
+static int cw1200_bh(void *arg);
+
+/* TODO: Verify these numbers with WSM specification. */
+#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
+/* an SPI message cannot be bigger than (2"12-1)*2 bytes
+ * "*2" to cvt to bytes */
+#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
+#define PIGGYBACK_CTRL_REG (2)
+#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
+
+/* Suspend state privates */
+enum cw1200_bh_pm_state {
+ CW1200_BH_RESUMED = 0,
+ CW1200_BH_SUSPEND,
+ CW1200_BH_SUSPENDED,
+ CW1200_BH_RESUME,
+};
+
+typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
+ u8 *data, size_t size);
+
+
+int cw1200_register_bh(struct cw1200_common *priv)
+{
+ int err = 0;
+ struct sched_param param = { .sched_priority = 1 };
+ bh_printk(KERN_DEBUG "[BH] register.\n");
+ BUG_ON(priv->bh_thread);
+ atomic_set(&priv->bh_rx, 0);
+ atomic_set(&priv->bh_tx, 0);
+ atomic_set(&priv->bh_term, 0);
+ atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
+ priv->buf_id_tx = 0;
+ priv->buf_id_rx = 0;
+ init_waitqueue_head(&priv->bh_wq);
+ init_waitqueue_head(&priv->bh_evt_wq);
+ priv->bh_thread = kthread_create(&cw1200_bh, priv, "cw1200_bh");
+ if (IS_ERR(priv->bh_thread)) {
+ err = PTR_ERR(priv->bh_thread);
+ priv->bh_thread = NULL;
+ } else {
+ WARN_ON(sched_setscheduler(priv->bh_thread,
+ SCHED_FIFO, &param));
+#ifdef HAS_PUT_TASK_STRUCT
+ get_task_struct(priv->bh_thread);
+#endif
+ wake_up_process(priv->bh_thread);
+ }
+ return err;
+}
+
+void cw1200_unregister_bh(struct cw1200_common *priv)
+{
+ struct task_struct *thread = priv->bh_thread;
+ if (WARN_ON(!thread))
+ return;
+
+ priv->bh_thread = NULL;
+ bh_printk(KERN_DEBUG "[BH] unregister.\n");
+ atomic_add(1, &priv->bh_term);
+ wake_up(&priv->bh_wq);
+ kthread_stop(thread);
+#ifdef HAS_PUT_TASK_STRUCT
+ put_task_struct(thread);
+#endif
+}
+
+void cw1200_irq_handler(struct cw1200_common *priv)
+{
+ bh_printk(KERN_DEBUG "[BH] irq.\n");
+ if (/* WARN_ON */(priv->bh_error))
+ return;
+
+ if (atomic_add_return(1, &priv->bh_rx) == 1)
+ wake_up(&priv->bh_wq);
+}
+
+void cw1200_bh_wakeup(struct cw1200_common *priv)
+{
+ bh_printk(KERN_DEBUG "[BH] wakeup.\n");
+ if (WARN_ON(priv->bh_error))
+ return;
+
+ if (atomic_add_return(1, &priv->bh_tx) == 1)
+ wake_up(&priv->bh_wq);
+}
+
+int cw1200_bh_suspend(struct cw1200_common *priv)
+{
+ bh_printk(KERN_DEBUG "[BH] suspend.\n");
+ if (WARN_ON(priv->bh_error))
+ return 0;
+
+ atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
+ wake_up(&priv->bh_wq);
+ return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
+ (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
+ 1 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+int cw1200_bh_resume(struct cw1200_common *priv)
+{
+ bh_printk(KERN_DEBUG "[BH] resume.\n");
+ if (WARN_ON(priv->bh_error))
+ return 0;
+
+ atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
+ wake_up(&priv->bh_wq);
+ return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
+ (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
+ 1 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
+{
+ ++priv->hw_bufs_used;
+}
+
+int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
+{
+ int ret = 0;
+ int hw_bufs_used = priv->hw_bufs_used;
+
+ priv->hw_bufs_used -= count;
+ if (WARN_ON(priv->hw_bufs_used < 0))
+ ret = -1;
+ else if (hw_bufs_used >= priv->wsm_caps.numInpChBufs)
+ ret = 1;
+ if (!priv->hw_bufs_used)
+ wake_up(&priv->bh_evt_wq);
+ return ret;
+}
+
+static struct sk_buff *cw1200_get_skb(struct cw1200_common *priv, size_t len)
+{
+ struct sk_buff *skb;
+ size_t alloc_len = (len > SDIO_BLOCK_SIZE) ? len : SDIO_BLOCK_SIZE;
+
+ if (len > SDIO_BLOCK_SIZE || !priv->skb_cache) {
+ skb = dev_alloc_skb(alloc_len
+ + WSM_TX_EXTRA_HEADROOM
+ + 8 /* TKIP IV */
+ + 12 /* TKIP ICV + MIC */
+ - 2 /* Piggyback */);
+ /* In AP mode RXed SKB can be looped back as a broadcast.
+ * Here we reserve enough space for headers. */
+ skb_reserve(skb, WSM_TX_EXTRA_HEADROOM
+ + 8 /* TKIP IV */
+ - WSM_RX_EXTRA_HEADROOM);
+ } else {
+ skb = priv->skb_cache;
+ priv->skb_cache = NULL;
+ }
+ return skb;
+}
+
+static void cw1200_put_skb(struct cw1200_common *priv, struct sk_buff *skb)
+{
+ if (priv->skb_cache)
+ dev_kfree_skb(skb);
+ else
+ priv->skb_cache = skb;
+}
+
+static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
+ u16 *ctrl_reg)
+{
+ int ret;
+
+ ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONTROL_REG_ID, ctrl_reg);
+ if (ret) {
+ ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONTROL_REG_ID, ctrl_reg);
+ if (ret)
+ printk(KERN_ERR
+ "[BH] Failed to read control register.\n");
+ }
+
+ return ret;
+}
+
+static int cw1200_device_wakeup(struct cw1200_common *priv)
+{
+ u16 ctrl_reg;
+ int ret;
+
+ bh_printk(KERN_DEBUG "[BH] Device wakeup.\n");
+
+ /* To force the device to be always-on, the host sets WLAN_UP to 1 */
+ ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
+ ST90TDS_CONT_WUP_BIT);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* If the device returns WLAN_RDY as 1, the device is active and will
+ * remain active. */
+ if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
+ bh_printk(KERN_DEBUG "[BH] Device awake.\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Must be called from BH thraed. */
+void cw1200_enable_powersave(struct cw1200_common *priv,
+ bool enable)
+{
+ bh_printk(KERN_DEBUG "[BH] Powerave is %s.\n",
+ enable ? "enabled" : "disabled");
+ priv->powersave_enabled = enable;
+}
+
+static int cw1200_bh(void *arg)
+{
+ struct cw1200_common *priv = arg;
+ struct sk_buff *skb_rx = NULL;
+ size_t read_len = 0;
+ int rx, tx, term, suspend;
+ struct wsm_hdr *wsm;
+ size_t wsm_len;
+ int wsm_id;
+ u8 wsm_seq;
+ int rx_resync = 1;
+ u16 ctrl_reg = 0;
+ int tx_allowed;
+ int pending_tx = 0;
+ int tx_burst;
+ int rx_burst = 0;
+ long status;
+#if defined(CONFIG_CW1200_WSM_DUMPS)
+ size_t wsm_dump_max = -1;
+#endif
+ u32 dummy;
+
+ for (;;) {
+ if (!priv->hw_bufs_used
+ && priv->powersave_enabled
+ && !priv->device_can_sleep)
+ status = 1 * HZ;
+ else if (priv->hw_bufs_used)
+ /* Interrupt loss detection */
+ status = 1 * HZ;
+ else
+ status = MAX_SCHEDULE_TIMEOUT;
+
+ /* Dummy Read for SDIO retry mechanism*/
+ if (((atomic_read(&priv->bh_rx) == 0) &&
+ (atomic_read(&priv->bh_tx) == 0)))
+ cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
+ &dummy, sizeof(dummy));
+#if defined(CONFIG_CW1200_WSM_DUMPS_SHORT)
+ wsm_dump_max = priv->wsm_dump_max_size;
+#endif /* CONFIG_CW1200_WSM_DUMPS_SHORT */
+
+ status = wait_event_interruptible_timeout(priv->bh_wq, ({
+ rx = atomic_xchg(&priv->bh_rx, 0);
+ tx = atomic_xchg(&priv->bh_tx, 0);
+ term = atomic_xchg(&priv->bh_term, 0);
+ suspend = pending_tx ?
+ 0 : atomic_read(&priv->bh_suspend);
+ (rx || tx || term || suspend || priv->bh_error);
+ }), status);
+
+ if (status < 0 || term || priv->bh_error)
+ break;
+
+ if (!status && priv->hw_bufs_used) {
+ unsigned long timestamp = jiffies;
+ long timeout;
+ bool pending = false;
+ int i;
+
+ wiphy_warn(priv->hw->wiphy, "Missed interrupt?\n");
+ rx = 1;
+
+ /* Get a timestamp of "oldest" frame */
+ for (i = 0; i < 4; ++i)
+ pending |= cw1200_queue_get_xmit_timestamp(
+ &priv->tx_queue[i],
+ &timestamp);
+
+ /* Check if frame transmission is timed out.
+ * Add an extra second with respect to possible
+ * interrupt loss. */
+ timeout = timestamp +
+ WSM_CMD_LAST_CHANCE_TIMEOUT +
+ 1 * HZ -
+ jiffies;
+
+ /* And terminate BH tread if the frame is "stuck" */
+ if (pending && timeout < 0) {
+ wiphy_warn(priv->hw->wiphy,
+ "Timeout waiting for TX confirm.\n");
+ break;
+ }
+
+#if defined(CONFIG_CW1200_DUMP_ON_ERROR)
+ BUG_ON(1);
+#endif /* CONFIG_CW1200_DUMP_ON_ERROR */
+ } else if (!status) {
+ bh_printk(KERN_DEBUG "[BH] Device wakedown.\n");
+ WARN_ON(cw1200_reg_write_16(priv,
+ ST90TDS_CONTROL_REG_ID, 0));
+ priv->device_can_sleep = true;
+ continue;
+ } else if (suspend) {
+ bh_printk(KERN_DEBUG "[BH] Device suspend.\n");
+ if (priv->powersave_enabled) {
+ WARN_ON(cw1200_reg_write_16(priv,
+ ST90TDS_CONTROL_REG_ID, 0));
+ priv->device_can_sleep = true;
+ }
+
+ atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
+ wake_up(&priv->bh_evt_wq);
+ status = wait_event_interruptible(priv->bh_wq,
+ CW1200_BH_RESUME == atomic_read(
+ &priv->bh_suspend));
+ if (status < 0) {
+ wiphy_err(priv->hw->wiphy,
+ "%s: Failed to wait for resume: %ld.\n",
+ __func__, status);
+ break;
+ }
+ bh_printk(KERN_DEBUG "[BH] Device resume.\n");
+ atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
+ wake_up(&priv->bh_evt_wq);
+ atomic_add(1, &priv->bh_rx);
+ continue;
+ }
+
+ tx += pending_tx;
+ pending_tx = 0;
+
+ if (rx) {
+ size_t alloc_len;
+ u8 *data;
+
+ if (WARN_ON(cw1200_bh_read_ctrl_reg(
+ priv, &ctrl_reg)))
+ break;
+rx:
+ read_len = (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
+ if (!read_len) {
+ rx_burst = 0;
+ goto tx;
+ }
+
+ if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
+ (read_len > EFFECTIVE_BUF_SIZE))) {
+ printk(KERN_DEBUG "Invalid read len: %d",
+ read_len);
+ break;
+ }
+
+ /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
+ * to the NEXT Message length + 2 Bytes for SKB */
+ read_len = read_len + 2;
+
+#if defined(CONFIG_CW1200_NON_POWER_OF_TWO_BLOCKSIZES)
+ alloc_len = priv->sbus_ops->align_size(
+ priv->sbus_priv, read_len);
+#else /* CONFIG_CW1200_NON_POWER_OF_TWO_BLOCKSIZES */
+ /* Platform's SDIO workaround */
+ alloc_len = read_len & ~(SDIO_BLOCK_SIZE - 1);
+ if (read_len & (SDIO_BLOCK_SIZE - 1))
+ alloc_len += SDIO_BLOCK_SIZE;
+#endif /* CONFIG_CW1200_NON_POWER_OF_TWO_BLOCKSIZES */
+
+ /* Check if not exceeding CW1200 capabilities */
+ if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
+ printk(KERN_DEBUG "Read aligned len: %d\n",
+ alloc_len);
+ }
+
+ skb_rx = cw1200_get_skb(priv, alloc_len);
+ if (WARN_ON(!skb_rx))
+ break;
+
+ skb_trim(skb_rx, 0);
+ skb_put(skb_rx, read_len);
+ data = skb_rx->data;
+ if (WARN_ON(!data))
+ break;
+
+ if (WARN_ON(cw1200_data_read(priv, data, alloc_len)))
+ break;
+
+ /* Piggyback */
+ ctrl_reg = __le16_to_cpu(
+ ((__le16 *)data)[alloc_len / 2 - 1]);
+
+ wsm = (struct wsm_hdr *)data;
+ wsm_len = __le32_to_cpu(wsm->len);
+ if (WARN_ON(wsm_len > read_len))
+ break;
+
+#if defined(CONFIG_CW1200_WSM_DUMPS)
+ if (unlikely(priv->wsm_enable_wsm_dumps))
+ print_hex_dump_bytes("<-- ",
+ DUMP_PREFIX_NONE,
+ data, min(wsm_len, wsm_dump_max));
+#endif /* CONFIG_CW1200_WSM_DUMPS */
+
+ wsm_id = __le32_to_cpu(wsm->id) & 0xFFF;
+ wsm_seq = (__le32_to_cpu(wsm->id) >> 13) & 7;
+
+ skb_trim(skb_rx, wsm_len);
+
+ if (unlikely(wsm_id == 0x0800)) {
+ wsm_handle_exception(priv,
+ &data[sizeof(*wsm)],
+ wsm_len - sizeof(*wsm));
+ break;
+ } else if (unlikely(!rx_resync)) {
+ if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) {
+#if defined(CONFIG_CW1200_DUMP_ON_ERROR)
+ BUG_ON(1);
+#endif /* CONFIG_CW1200_DUMP_ON_ERROR */
+ break;
+ }
+ }
+ priv->wsm_rx_seq = (wsm_seq + 1) & 7;
+ rx_resync = 0;
+
+ if (wsm_id & 0x0400) {
+ int rc = wsm_release_tx_buffer(priv, 1);
+ if (WARN_ON(rc < 0))
+ break;
+ else if (rc > 0)
+ tx = 1;
+ }
+
+ /* cw1200_wsm_rx takes care on SKB livetime */
+ if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
+ break;
+
+ if (skb_rx) {
+ cw1200_put_skb(priv, skb_rx);
+ skb_rx = NULL;
+ }
+
+ read_len = 0;
+
+ if (rx_burst) {
+ cw1200_debug_rx_burst(priv);
+ --rx_burst;
+ goto rx;
+ }
+ }
+
+tx:
+ BUG_ON(priv->hw_bufs_used > priv->wsm_caps.numInpChBufs);
+ tx_burst = priv->wsm_caps.numInpChBufs - priv->hw_bufs_used;
+ tx_allowed = tx_burst > 0;
+
+ if (tx && tx_allowed) {
+ size_t tx_len;
+ u8 *data;
+ int ret;
+
+ if (priv->device_can_sleep) {
+ ret = cw1200_device_wakeup(priv);
+ if (WARN_ON(ret < 0))
+ break;
+ else if (ret)
+ priv->device_can_sleep = false;
+ else {
+ /* Wait for "awake" interrupt */
+ pending_tx = tx;
+ continue;
+ }
+ }
+
+ wsm_alloc_tx_buffer(priv);
+ ret = wsm_get_tx(priv, &data, &tx_len, &tx_burst);
+ if (ret <= 0) {
+ wsm_release_tx_buffer(priv, 1);
+ if (WARN_ON(ret < 0))
+ break;
+ } else {
+ wsm = (struct wsm_hdr *)data;
+ BUG_ON(tx_len < sizeof(*wsm));
+ BUG_ON(__le32_to_cpu(wsm->len) != tx_len);
+
+#if 0 /* count is not implemented */
+ if (ret > 1)
+ atomic_add(1, &priv->bh_tx);
+#else
+ atomic_add(1, &priv->bh_tx);
+#endif
+
+
+#if defined(CONFIG_CW1200_NON_POWER_OF_TWO_BLOCKSIZES)
+ tx_len = priv->sbus_ops->align_size(
+ priv->sbus_priv, tx_len);
+#else /* CONFIG_CW1200_NON_POWER_OF_TWO_BLOCKSIZES */
+ /* HACK!!! Platform limitation.
+ * It is also supported by upper layer:
+ * there is always enough space at the
+ * end of the buffer. */
+ if (tx_len & (SDIO_BLOCK_SIZE - 1)) {
+ tx_len &= ~(SDIO_BLOCK_SIZE - 1);
+ tx_len += SDIO_BLOCK_SIZE;
+ }
+#endif /* CONFIG_CW1200_NON_POWER_OF_TWO_BLOCKSIZES */
+
+ /* Check if not exceeding CW1200
+ capabilities */
+ if (WARN_ON_ONCE(
+ tx_len > EFFECTIVE_BUF_SIZE)) {
+ printk(KERN_DEBUG "Write aligned len:"
+ " %d\n", tx_len);
+ }
+
+ wsm->id &= __cpu_to_le32(
+ ~WSM_TX_SEQ(WSM_TX_SEQ_MAX));
+ wsm->id |= cpu_to_le32(
+ WSM_TX_SEQ(priv->wsm_tx_seq));
+
+ if (WARN_ON(cw1200_data_write(priv,
+ data, tx_len))) {
+ wsm_release_tx_buffer(priv, 1);
+ break;
+ }
+
+#if defined(CONFIG_CW1200_WSM_DUMPS)
+ if (unlikely(priv->wsm_enable_wsm_dumps))
+ print_hex_dump_bytes("--> ",
+ DUMP_PREFIX_NONE,
+ data,
+ min(__le32_to_cpu(wsm->len),
+ wsm_dump_max));
+#endif /* CONFIG_CW1200_WSM_DUMPS */
+
+ wsm_txed(priv, data);
+ priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) &
+ WSM_TX_SEQ_MAX;
+
+ if (tx_burst > 1) {
+ cw1200_debug_tx_burst(priv);
+ ++rx_burst;
+ goto tx;
+ }
+ }
+ }
+
+ if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
+ goto rx;
+ }
+
+ if (skb_rx) {
+ cw1200_put_skb(priv, skb_rx);
+ skb_rx = NULL;
+ }
+
+
+ if (!term) {
+ cw1200_dbg(CW1200_DBG_ERROR, "[BH] Fatal error, exitting.\n");
+#if defined(CONFIG_CW1200_DUMP_ON_ERROR)
+ BUG_ON(1);
+#endif /* CONFIG_CW1200_DUMP_ON_ERROR */
+ priv->bh_error = 1;
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ ieee80211_driver_hang_notify(priv->vif, GFP_KERNEL);
+ cw1200_pm_stay_awake(&priv->pm_state, 3*HZ);
+#endif
+ /* TODO: schedule_work(recovery) */
+#ifndef HAS_PUT_TASK_STRUCT
+ /* The only reason of having this stupid code here is
+ * that __put_task_struct is not exported by kernel. */
+ for (;;) {
+ int status = wait_event_interruptible(priv->bh_wq, ({
+ term = atomic_xchg(&priv->bh_term, 0);
+ (term);
+ }));
+
+ if (status || term)
+ break;
+ }
+#endif
+ }
+ return 0;
+}
diff --git a/drivers/staging/cw1200/bh.h b/drivers/staging/cw1200/bh.h
new file mode 100644
index 00000000000..ea4598afc43
--- /dev/null
+++ b/drivers/staging/cw1200/bh.h
@@ -0,0 +1,30 @@
+/*
+ * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_BH_H
+#define CW1200_BH_H
+
+/* extern */ struct cw1200_common;
+
+#define SDIO_BLOCK_SIZE (512)
+
+int cw1200_register_bh(struct cw1200_common *priv);
+void cw1200_unregister_bh(struct cw1200_common *priv);
+void cw1200_irq_handler(struct cw1200_common *priv);
+void cw1200_bh_wakeup(struct cw1200_common *priv);
+int cw1200_bh_suspend(struct cw1200_common *priv);
+int cw1200_bh_resume(struct cw1200_common *priv);
+/* Must be called from BH thread. */
+void cw1200_enable_powersave(struct cw1200_common *priv,
+ bool enable);
+int wsm_release_tx_buffer(struct cw1200_common *priv, int count);
+
+#endif /* CW1200_BH_H */
diff --git a/drivers/staging/cw1200/cw1200.h b/drivers/staging/cw1200/cw1200.h
new file mode 100644
index 00000000000..5869c917fe6
--- /dev/null
+++ b/drivers/staging/cw1200/cw1200.h
@@ -0,0 +1,313 @@
+/*
+ * Common private data for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on the mac80211 Prism54 code, which is
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ *
+ * Based on the islsm (softmac prism54) driver, which is:
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_H
+#define CW1200_H
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wsm.h"
+#include "scan.h"
+#include "txrx.h"
+#include "ht.h"
+#include "pm.h"
+
+/* extern */ struct sbus_ops;
+/* extern */ struct task_struct;
+/* extern */ struct cw1200_debug_priv;
+/* extern */ struct firmware;
+
+#if defined(CONFIG_CW1200_TXRX_DEBUG)
+#define txrx_printk(...) printk(__VA_ARGS__)
+#else
+#define txrx_printk(...)
+#endif
+
+#define CW1200_MAX_CTRL_FRAME_LEN (0x1000)
+
+#define CW1200_MAX_STA_IN_AP_MODE (5)
+#define CW1200_LINK_ID_AFTER_DTIM (CW1200_MAX_STA_IN_AP_MODE + 1)
+#define CW1200_LINK_ID_UAPSD (CW1200_MAX_STA_IN_AP_MODE + 2)
+#define CW1200_LINK_ID_MAX (CW1200_MAX_STA_IN_AP_MODE + 3)
+#define CW1200_MAX_REQUEUE_ATTEMPTS (5)
+
+#define CW1200_MAX_TID (8)
+
+#define CW1200_BLOCK_ACK_CNT (30)
+#define CW1200_BLOCK_ACK_THLD (800)
+#define CW1200_BLOCK_ACK_HIST (3)
+#define CW1200_BLOCK_ACK_INTERVAL (1 * HZ / CW1200_BLOCK_ACK_HIST)
+
+/* Please keep order */
+enum cw1200_join_status {
+ CW1200_JOIN_STATUS_PASSIVE = 0,
+ CW1200_JOIN_STATUS_MONITOR,
+ CW1200_JOIN_STATUS_STA,
+ CW1200_JOIN_STATUS_AP,
+};
+
+enum cw1200_link_status {
+ CW1200_LINK_OFF,
+ CW1200_LINK_RESERVE,
+ CW1200_LINK_SOFT,
+ CW1200_LINK_HARD,
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ CW1200_LINK_RESET,
+ CW1200_LINK_RESET_REMAP,
+#endif
+};
+
+enum cw1200_bss_loss_status {
+ CW1200_BSS_LOSS_NONE,
+ CW1200_BSS_LOSS_CHECKING,
+ CW1200_BSS_LOSS_CONFIRMING,
+ CW1200_BSS_LOSS_CONFIRMED,
+};
+
+struct cw1200_link_entry {
+ unsigned long timestamp;
+ enum cw1200_link_status status;
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ enum cw1200_link_status prev_status;
+#endif
+ u8 mac[ETH_ALEN];
+ u8 buffered[CW1200_MAX_TID];
+ struct sk_buff_head rx_queue;
+};
+
+struct cw1200_common {
+ struct cw1200_queue tx_queue[4];
+ struct cw1200_queue_stats tx_queue_stats;
+ int tx_burst_idx;
+ struct cw1200_debug_priv *debug;
+
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ struct device *pdev;
+ struct workqueue_struct *workqueue;
+
+ struct mutex conf_mutex;
+
+ const struct sbus_ops *sbus_ops;
+ struct sbus_priv *sbus_priv;
+
+ /* HW type (HIF_...) */
+ int hw_type;
+ int hw_revision;
+
+ /* firmware/hardware info */
+ unsigned int tx_hdr_len;
+
+ /* Radio data */
+ int output_power;
+ int noise;
+
+ /* calibration, output power limit and rssi<->dBm conversation data */
+
+ /* BBP/MAC state */
+ const struct firmware *sdd;
+ struct ieee80211_rate *rates;
+ struct ieee80211_rate *mcs_rates;
+ u8 mac_addr[ETH_ALEN];
+ struct ieee80211_channel *channel;
+ u8 bssid[ETH_ALEN];
+ struct wsm_edca_params edca;
+ struct wsm_tx_queue_params tx_queue_params;
+ struct wsm_association_mode association_mode;
+ struct wsm_set_bss_params bss_params;
+ struct cw1200_ht_info ht_info;
+ struct wsm_set_pm powersave_mode;
+ struct wsm_set_pm firmware_ps_mode;
+ int cqm_rssi_thold;
+ unsigned cqm_rssi_hyst;
+ unsigned cqm_tx_failure_thold;
+ unsigned cqm_tx_failure_count;
+ bool cqm_use_rssi;
+ int cqm_link_loss_count;
+ int cqm_beacon_loss_count;
+ int channel_switch_in_progress;
+ wait_queue_head_t channel_switch_done;
+ u8 long_frame_max_tx_count;
+ u8 short_frame_max_tx_count;
+ int mode;
+ bool enable_beacon;
+ int beacon_int;
+ size_t ssid_length;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool listening;
+ struct wsm_rx_filter rx_filter;
+ struct wsm_beacon_filter_table bf_table;
+ struct wsm_beacon_filter_control bf_control;
+ struct wsm_multicast_filter multicast_filter;
+ bool has_multicast_subscription;
+ bool disable_beacon_filter;
+ struct work_struct update_filtering_work;
+ u8 ba_tid_mask;
+ int ba_acc;
+ int ba_cnt;
+ int ba_hist;
+ struct timer_list ba_timer;
+ spinlock_t ba_lock;
+ bool ba_ena;
+ struct work_struct ba_work;
+ struct cw1200_pm_state pm_state;
+ struct wsm_p2p_ps_modeinfo p2p_ps_modeinfo;
+ struct wsm_uapsd_info uapsd_info;
+ bool setbssparams_done;
+ bool is_BT_Present;
+ u8 conf_listen_interval;
+ u32 listen_interval;
+ u32 erp_info;
+
+ /* BH */
+ atomic_t bh_rx;
+ atomic_t bh_tx;
+ atomic_t bh_term;
+ atomic_t bh_suspend;
+ struct task_struct *bh_thread;
+ int bh_error;
+ wait_queue_head_t bh_wq;
+ wait_queue_head_t bh_evt_wq;
+ int buf_id_tx; /* byte */
+ int buf_id_rx; /* byte */
+ int wsm_rx_seq; /* byte */
+ int wsm_tx_seq; /* byte */
+ int hw_bufs_used;
+ struct sk_buff *skb_cache;
+ bool powersave_enabled;
+ bool device_can_sleep;
+
+ /* WSM */
+ struct wsm_caps wsm_caps;
+ struct mutex wsm_cmd_mux;
+ struct wsm_buf wsm_cmd_buf;
+ struct wsm_cmd wsm_cmd;
+ wait_queue_head_t wsm_cmd_wq;
+ wait_queue_head_t wsm_startup_done;
+ struct wsm_cbc wsm_cbc;
+ atomic_t tx_lock;
+
+ /* WSM debug */
+ int wsm_enable_wsm_dumps;
+ u32 wsm_dump_max_size;
+
+ /* Scan status */
+ struct cw1200_scan scan;
+
+ /* WSM Join */
+ enum cw1200_join_status join_status;
+ u8 join_bssid[ETH_ALEN];
+ u32 pending_frame_id;
+ struct work_struct join_work;
+ struct delayed_work join_timeout;
+ struct work_struct unjoin_work;
+ struct work_struct offchannel_work;
+ int join_dtim_period;
+ bool delayed_unjoin;
+
+ /* TX/RX and security */
+ s8 wep_default_key_id;
+ struct work_struct wep_key_work;
+ u32 key_map;
+ struct wsm_add_key keys[WSM_KEY_MAX_INDEX + 1];
+ unsigned long rx_timestamp;
+
+ /* AP powersave */
+ u32 link_id_map;
+ struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE];
+ struct work_struct link_id_work;
+ struct delayed_work link_id_gc_work;
+ u32 sta_asleep_mask;
+ u32 pspoll_mask;
+ bool aid0_bit_set;
+ spinlock_t ps_state_lock;
+ bool buffered_multicasts;
+ bool tx_multicast;
+ struct work_struct set_tim_work;
+ struct delayed_work set_cts_work;
+ struct work_struct multicast_start_work;
+ struct work_struct multicast_stop_work;
+ struct timer_list mcast_timeout;
+
+
+ /* WSM events and CQM implementation */
+ spinlock_t event_queue_lock;
+ struct list_head event_queue;
+ struct work_struct event_handler;
+ struct delayed_work bss_loss_work;
+ struct delayed_work connection_loss_work;
+ struct work_struct tx_failure_work;
+ int delayed_link_loss;
+ spinlock_t bss_loss_lock;
+ int bss_loss_status;
+ int bss_loss_confirm_id;
+
+ /* TX rate policy cache */
+ struct tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+
+ /* cryptographic engine information */
+
+ /* bit field of glowing LEDs */
+ u16 softled_state;
+
+ /* statistics */
+ struct ieee80211_low_level_stats stats;
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ /* Workaround for WFD testcase 6.1.10*/
+ struct work_struct linkid_reset_work;
+ u8 action_frame_sa[ETH_ALEN];
+ u8 action_linkid;
+#endif
+};
+
+struct cw1200_sta_priv {
+ int link_id;
+};
+
+/* interfaces for the drivers */
+int cw1200_core_probe(const struct sbus_ops *sbus_ops,
+ struct sbus_priv *sbus,
+ struct device *pdev,
+ struct cw1200_common **pself);
+void cw1200_core_release(struct cw1200_common *self);
+
+#define CW1200_DBG_MSG 0x00000001
+#define CW1200_DBG_NIY 0x00000002
+#define CW1200_DBG_SBUS 0x00000004
+#define CW1200_DBG_INIT 0x00000008
+#define CW1200_DBG_ERROR 0x00000010
+#define CW1200_DBG_LEVEL 0xFFFFFFFF
+
+#define cw1200_dbg(level, ...) \
+ do { \
+ if ((level) & CW1200_DBG_LEVEL) \
+ printk(KERN_DEBUG __VA_ARGS__); \
+ } while (0)
+
+#define STUB() \
+ do { \
+ cw1200_dbg(CW1200_DBG_NIY, "%s: STUB at line %d.\n", \
+ __func__, __LINE__); \
+ } while (0)
+
+#endif /* CW1200_H */
diff --git a/drivers/staging/cw1200/cw1200_plat.h b/drivers/staging/cw1200/cw1200_plat.h
new file mode 100644
index 00000000000..3a73183c9f8
--- /dev/null
+++ b/drivers/staging/cw1200/cw1200_plat.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CW1200_PLAT_H_INCLUDED
+#define CW1200_PLAT_H_INCLUDED
+
+#include <linux/ioport.h>
+
+struct cw1200_platform_data {
+ const char *mmc_id;
+ const struct resource *irq;
+ const struct resource *reset;
+ int (*power_ctrl)(const struct cw1200_platform_data *pdata,
+ bool enable);
+ int (*clk_ctrl)(const struct cw1200_platform_data *pdata,
+ bool enable);
+ int (*prcmu_ctrl)(const struct cw1200_platform_data *pdata,
+ bool enable);
+};
+
+/* Declaration only. Should be implemented in arch/xxx/mach-yyy */
+const struct cw1200_platform_data *cw1200_get_platform_data(void);
+
+#endif /* CW1200_PLAT_H_INCLUDED */
diff --git a/drivers/staging/cw1200/cw1200_sdio.c b/drivers/staging/cw1200/cw1200_sdio.c
new file mode 100644
index 00000000000..4b9d622689f
--- /dev/null
+++ b/drivers/staging/cw1200/cw1200_sdio.c
@@ -0,0 +1,469 @@
+/*
+ * Mac80211 SDIO driver for ST-Ericsson CW1200 device
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/spinlock.h>
+#include <asm/mach-types.h>
+#include <net/mac80211.h>
+
+#include "cw1200.h"
+#include "sbus.h"
+#include "cw1200_plat.h"
+
+MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>");
+MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("cw1200_wlan");
+
+struct sbus_priv {
+ struct sdio_func *func;
+ struct cw1200_common *core;
+ const struct cw1200_platform_data *pdata;
+ spinlock_t lock;
+ sbus_irq_handler irq_handler;
+ void *irq_priv;
+};
+
+static const struct sdio_device_id cw1200_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_ANY_ID, SDIO_ANY_ID) },
+ { /* end: all zeroes */ },
+};
+
+/* sbus_ops implemetation */
+
+static int cw1200_sdio_memcpy_fromio(struct sbus_priv *self,
+ unsigned int addr,
+ void *dst, int count)
+{
+ return sdio_memcpy_fromio(self->func, dst, addr, count);
+}
+
+static int cw1200_sdio_memcpy_toio(struct sbus_priv *self,
+ unsigned int addr,
+ const void *src, int count)
+{
+ return sdio_memcpy_toio(self->func, addr, (void *)src, count);
+}
+
+static void cw1200_sdio_lock(struct sbus_priv *self)
+{
+ sdio_claim_host(self->func);
+}
+
+static void cw1200_sdio_unlock(struct sbus_priv *self)
+{
+ sdio_release_host(self->func);
+}
+
+#ifndef CONFIG_CW1200_USE_GPIO_IRQ
+static void cw1200_sdio_irq_handler(struct sdio_func *func)
+{
+ struct sbus_priv *self = sdio_get_drvdata(func);
+ unsigned long flags;
+
+ BUG_ON(!self);
+ spin_lock_irqsave(&self->lock, flags);
+ if (self->irq_handler)
+ self->irq_handler(self->irq_priv);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+#else /* CONFIG_CW1200_USE_GPIO_IRQ */
+static irqreturn_t cw1200_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct sbus_priv *self = dev_id;
+
+ BUG_ON(!self);
+ if (self->irq_handler)
+ self->irq_handler(self->irq_priv);
+ return IRQ_HANDLED;
+}
+
+static int cw1200_request_irq(struct sbus_priv *self,
+ irq_handler_t handler)
+{
+ int ret;
+ int func_num;
+ const struct resource *irq = self->pdata->irq;
+ u8 cccr;
+
+ ret = request_any_context_irq(irq->start, handler,
+ IRQF_TRIGGER_RISING, irq->name, self);
+ if (WARN_ON(ret < 0))
+ goto exit;
+
+ /* Hack to access Fuction-0 */
+ func_num = self->func->num;
+ self->func->num = 0;
+
+ cccr = sdio_readb(self->func, SDIO_CCCR_IENx, &ret);
+ if (WARN_ON(ret))
+ goto set_func;
+
+ /* Master interrupt enable ... */
+ cccr |= BIT(0);
+
+ /* ... for our function */
+ cccr |= BIT(func_num);
+
+ sdio_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret);
+ if (WARN_ON(ret))
+ goto set_func;
+
+ /* Restore the WLAN function number */
+ self->func->num = func_num;
+ return 0;
+
+set_func:
+ self->func->num = func_num;
+ free_irq(irq->start, self);
+exit:
+ return ret;
+}
+#endif /* CONFIG_CW1200_USE_GPIO_IRQ */
+
+static int cw1200_sdio_irq_subscribe(struct sbus_priv *self,
+ sbus_irq_handler handler,
+ void *priv)
+{
+ int ret;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->irq_priv = priv;
+ self->irq_handler = handler;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ printk(KERN_DEBUG "SW IRQ subscribe\n");
+ sdio_claim_host(self->func);
+#ifndef CONFIG_CW1200_USE_GPIO_IRQ
+ ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler);
+#else
+ ret = cw1200_request_irq(self, cw1200_gpio_irq_handler);
+#endif
+ sdio_release_host(self->func);
+ return ret;
+}
+
+static int cw1200_sdio_irq_unsubscribe(struct sbus_priv *self)
+{
+ int ret = 0;
+ unsigned long flags;
+#ifdef CONFIG_CW1200_USE_GPIO_IRQ
+ const struct resource *irq = self->pdata->irq;
+#endif
+
+ WARN_ON(!self->irq_handler);
+ if (!self->irq_handler)
+ return 0;
+
+ printk(KERN_DEBUG "SW IRQ unsubscribe\n");
+#ifndef CONFIG_CW1200_USE_GPIO_IRQ
+ sdio_claim_host(self->func);
+ ret = sdio_release_irq(self->func);
+ sdio_release_host(self->func);
+#else
+ free_irq(irq->start, self);
+#endif
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->irq_priv = NULL;
+ self->irq_handler = NULL;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return ret;
+}
+
+static int cw1200_detect_card(const struct cw1200_platform_data *pdata)
+{
+ /* HACK!!!
+ * Rely on mmc->class_dev.class set in mmc_alloc_host
+ * Tricky part: a new mmc hook is being (temporary) created
+ * to discover mmc_host class.
+ * Do you know more elegant way how to enumerate mmc_hosts?
+ */
+
+ struct mmc_host *mmc = NULL;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ mmc = mmc_alloc_host(0, NULL);
+ if (!mmc)
+ return -ENOMEM;
+
+ BUG_ON(!mmc->class_dev.class);
+ class_dev_iter_init(&iter, mmc->class_dev.class, NULL, NULL);
+ for (;;) {
+ dev = class_dev_iter_next(&iter);
+ if (!dev) {
+ printk(KERN_ERR "cw1200: %s is not found.\n",
+ pdata->mmc_id);
+ break;
+ } else {
+ struct mmc_host *host = container_of(dev,
+ struct mmc_host, class_dev);
+
+ if (dev_name(&host->class_dev) &&
+ strcmp(dev_name(&host->class_dev),
+ pdata->mmc_id))
+ continue;
+
+ mmc_detect_change(host, 10);
+ break;
+ }
+ }
+ mmc_free_host(mmc);
+ return 0;
+}
+
+static int cw1200_sdio_off(const struct cw1200_platform_data *pdata)
+{
+ int ret = 0;
+#ifndef CONFIG_CW1200_U5500_SUPPORT
+ const struct resource *reset = pdata->reset;
+ gpio_set_value(reset->start, 0);
+ gpio_free(reset->start);
+#else
+ if (pdata->prcmu_ctrl)
+ ret = pdata->prcmu_ctrl(pdata, false);
+ msleep(50);
+#endif
+ cw1200_detect_card(pdata);
+ return ret;
+}
+
+static int cw1200_sdio_on(const struct cw1200_platform_data *pdata)
+{
+ int ret = 0;
+#ifndef CONFIG_CW1200_U5500_SUPPORT
+ const struct resource *reset = pdata->reset;
+ gpio_request(reset->start, reset->name);
+ gpio_direction_output(reset->start, 1);
+ /* It is not stated in the datasheet, but at least some of devices
+ * have problems with reset if this stage is omited. */
+ msleep(50);
+ gpio_direction_output(reset->start, 0);
+ /* A valid reset shall be obtained by maintaining WRESETN
+ * active (low) for at least two cycles of LP_CLK after VDDIO
+ * is stable within it operating range. */
+ usleep_range(1000, 20000);
+ gpio_set_value(reset->start, 1);
+ /* The host should wait 32 ms after the WRESETN release
+ * for the on-chip LDO to stabilize */
+ msleep(32);
+#else
+ if (pdata->prcmu_ctrl)
+ ret = pdata->prcmu_ctrl(pdata, true);
+ msleep(50);
+#endif
+ cw1200_detect_card(pdata);
+ return ret;
+}
+
+static int cw1200_sdio_reset(struct sbus_priv *self)
+{
+ cw1200_sdio_off(self->pdata);
+ msleep(1000);
+ cw1200_sdio_on(self->pdata);
+ return 0;
+}
+
+static size_t cw1200_sdio_align_size(struct sbus_priv *self, size_t size)
+{
+ size_t aligned = sdio_align_size(self->func, size);
+ return aligned;
+}
+
+int cw1200_sdio_set_block_size(struct sbus_priv *self, size_t size)
+{
+ return sdio_set_block_size(self->func, size);
+}
+
+static int cw1200_sdio_pm(struct sbus_priv *self, bool suspend)
+{
+ int ret = 0;
+ const struct resource *irq = self->pdata->irq;
+
+ if (irq)
+ ret = irq_set_irq_wake(irq->start, suspend);
+
+ return ret;
+}
+
+static struct sbus_ops cw1200_sdio_sbus_ops = {
+ .sbus_memcpy_fromio = cw1200_sdio_memcpy_fromio,
+ .sbus_memcpy_toio = cw1200_sdio_memcpy_toio,
+ .lock = cw1200_sdio_lock,
+ .unlock = cw1200_sdio_unlock,
+ .irq_subscribe = cw1200_sdio_irq_subscribe,
+ .irq_unsubscribe = cw1200_sdio_irq_unsubscribe,
+ .reset = cw1200_sdio_reset,
+ .align_size = cw1200_sdio_align_size,
+ .power_mgmt = cw1200_sdio_pm,
+ .set_block_size = cw1200_sdio_set_block_size,
+};
+
+/* Probe Function to be called by SDIO stack when device is discovered */
+static int cw1200_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct sbus_priv *self;
+ int status;
+
+ cw1200_dbg(CW1200_DBG_INIT, "Probe called\n");
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ cw1200_dbg(CW1200_DBG_ERROR, "Can't allocate SDIO sbus_priv.");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&self->lock);
+ self->pdata = cw1200_get_platform_data();
+ self->func = func;
+ sdio_set_drvdata(func, self);
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ sdio_release_host(func);
+
+ status = cw1200_core_probe(&cw1200_sdio_sbus_ops,
+ self, &func->dev, &self->core);
+ if (status) {
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ sdio_set_drvdata(func, NULL);
+ kfree(self);
+ }
+
+ return status;
+}
+
+/* Disconnect Function to be called by SDIO stack when
+ * device is disconnected */
+static void cw1200_sdio_disconnect(struct sdio_func *func)
+{
+ struct sbus_priv *self = sdio_get_drvdata(func);
+
+ if (self) {
+ if (self->core) {
+ cw1200_core_release(self->core);
+ self->core = NULL;
+ }
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ sdio_set_drvdata(func, NULL);
+ kfree(self);
+ }
+}
+
+static int cw1200_suspend(struct device *dev)
+{
+ int ret;
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ /* Notify SDIO that CW1200 will remain powered during suspend */
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret)
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "Error setting SDIO pm flags: %i\n", ret);
+
+ return ret;
+}
+
+static int cw1200_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops cw1200_pm_ops = {
+ .suspend = cw1200_suspend,
+ .resume = cw1200_resume,
+};
+
+static struct sdio_driver sdio_driver = {
+ .name = "cw1200_wlan",
+ .id_table = cw1200_sdio_ids,
+ .probe = cw1200_sdio_probe,
+ .remove = cw1200_sdio_disconnect,
+ .drv = {
+ .pm = &cw1200_pm_ops,
+ }
+};
+
+/* Init Module function -> Called by insmod */
+static int __init cw1200_sdio_init(void)
+{
+ const struct cw1200_platform_data *pdata;
+ int ret;
+
+ pdata = cw1200_get_platform_data();
+
+ ret = sdio_register_driver(&sdio_driver);
+ if (ret)
+ goto err_reg;
+
+ if (pdata->clk_ctrl) {
+ ret = pdata->clk_ctrl(pdata, true);
+ if (ret)
+ goto err_clk;
+ }
+
+ if (pdata->power_ctrl) {
+ ret = pdata->power_ctrl(pdata, true);
+ if (ret)
+ goto err_power;
+ }
+
+ ret = cw1200_sdio_on(pdata);
+ if (ret)
+ goto err_on;
+
+ return 0;
+
+err_on:
+ if (pdata->power_ctrl)
+ pdata->power_ctrl(pdata, false);
+err_power:
+ if (pdata->clk_ctrl)
+ pdata->clk_ctrl(pdata, false);
+err_clk:
+ sdio_unregister_driver(&sdio_driver);
+err_reg:
+ return ret;
+}
+
+/* Called at Driver Unloading */
+static void __exit cw1200_sdio_exit(void)
+{
+ const struct cw1200_platform_data *pdata;
+ pdata = cw1200_get_platform_data();
+ sdio_unregister_driver(&sdio_driver);
+ cw1200_sdio_off(pdata);
+ if (pdata->power_ctrl)
+ pdata->power_ctrl(pdata, false);
+ if (pdata->clk_ctrl)
+ pdata->clk_ctrl(pdata, false);
+}
+
+
+module_init(cw1200_sdio_init);
+module_exit(cw1200_sdio_exit);
diff --git a/drivers/staging/cw1200/debug.c b/drivers/staging/cw1200/debug.c
new file mode 100644
index 00000000000..3da603ac87d
--- /dev/null
+++ b/drivers/staging/cw1200/debug.c
@@ -0,0 +1,611 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ * DebugFS code
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "cw1200.h"
+#include "debug.h"
+
+/* join_status */
+static const char * const cw1200_debug_join_status[] = {
+ "passive",
+ "monitor",
+ "station",
+ "access point",
+};
+
+/* WSM_JOIN_PREAMBLE_... */
+static const char * const cw1200_debug_preamble[] = {
+ "long",
+ "short",
+ "long on 1 and 2 Mbps",
+};
+
+static const char * const cw1200_debug_fw_types[] = {
+ "ETF",
+ "WFM",
+ "WSM",
+ "HI test",
+ "Platform test",
+};
+
+static const char * const cw1200_debug_link_id[] = {
+ "OFF",
+ "REQ",
+ "SOFT",
+ "HARD",
+};
+
+static const char *cw1200_debug_mode(int mode)
+{
+ switch (mode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ return "unspecified";
+ case NL80211_IFTYPE_MONITOR:
+ return "monitor";
+ case NL80211_IFTYPE_STATION:
+ return "station";
+ case NL80211_IFTYPE_ADHOC:
+ return "ad-hok";
+ case NL80211_IFTYPE_MESH_POINT:
+ return "mesh point";
+ case NL80211_IFTYPE_AP:
+ return "access point";
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return "p2p client";
+ case NL80211_IFTYPE_P2P_GO:
+ return "p2p go";
+ default:
+ return "unsupported";
+ }
+}
+
+static void cw1200_queue_status_show(struct seq_file *seq,
+ struct cw1200_queue *q)
+{
+ int i;
+ seq_printf(seq, "Queue %d:\n", q->queue_id);
+ seq_printf(seq, " capacity: %d\n", q->capacity);
+ seq_printf(seq, " queued: %d\n", q->num_queued);
+ seq_printf(seq, " pending: %d\n", q->num_pending);
+ seq_printf(seq, " sent: %d\n", q->num_sent);
+ seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no");
+ seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no");
+ seq_puts(seq, " link map: 0-> ");
+ for (i = 0; i < q->stats->map_capacity; ++i)
+ seq_printf(seq, "%.2d ", q->link_map_cache[i]);
+ seq_printf(seq, "<-%d\n", q->stats->map_capacity);
+}
+
+static void cw1200_debug_print_map(struct seq_file *seq,
+ struct cw1200_common *priv,
+ const char *label,
+ u32 map)
+{
+ int i;
+ seq_printf(seq, "%s0-> ", label);
+ for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i)
+ seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : "..");
+ seq_printf(seq, "<-%d\n", priv->tx_queue_stats.map_capacity - 1);
+}
+
+static int cw1200_status_show(struct seq_file *seq, void *v)
+{
+ int i;
+ struct list_head *item;
+ struct cw1200_common *priv = seq->private;
+ struct cw1200_debug_priv *d = priv->debug;
+ int ba_cnt, ba_acc, ba_avg = 0;
+ bool ba_ena;
+
+ spin_lock_bh(&priv->ba_lock);
+ ba_cnt = priv->debug->ba_cnt;
+ ba_acc = priv->debug->ba_acc;
+ ba_ena = priv->ba_ena;
+ if (ba_cnt)
+ ba_avg = ba_acc / ba_cnt;
+ spin_unlock_bh(&priv->ba_lock);
+
+ seq_puts(seq, "CW1200 Wireless LAN driver status\n");
+ seq_printf(seq, "Hardware: %d.%d\n",
+ priv->wsm_caps.hardwareId,
+ priv->wsm_caps.hardwareSubId);
+ seq_printf(seq, "Firmware: %s %d.%d\n",
+ cw1200_debug_fw_types[priv->wsm_caps.firmwareType],
+ priv->wsm_caps.firmwareVersion,
+ priv->wsm_caps.firmwareBuildNumber);
+ seq_printf(seq, "FW API: %d\n",
+ priv->wsm_caps.firmwareApiVer);
+ seq_printf(seq, "FW caps: 0x%.4X\n",
+ priv->wsm_caps.firmwareCap);
+ seq_printf(seq, "Mode: %s%s\n",
+ cw1200_debug_mode(priv->mode),
+ priv->listening ? " (listening)" : "");
+ seq_printf(seq, "Assoc: %s\n",
+ cw1200_debug_join_status[priv->join_status]);
+ if (priv->channel)
+ seq_printf(seq, "Channel: %d%s\n",
+ priv->channel->hw_value,
+ priv->channel_switch_in_progress ?
+ " (switching)" : "");
+ if (priv->rx_filter.promiscuous)
+ seq_puts(seq, "Filter: promisc\n");
+ else if (priv->rx_filter.fcs)
+ seq_puts(seq, "Filter: fcs\n");
+ if (priv->rx_filter.bssid)
+ seq_puts(seq, "Filter: bssid\n");
+ if (priv->bf_control.bcn_count)
+ seq_puts(seq, "Filter: beacons\n");
+
+ if (priv->enable_beacon ||
+ priv->mode == NL80211_IFTYPE_AP ||
+ priv->mode == NL80211_IFTYPE_ADHOC ||
+ priv->mode == NL80211_IFTYPE_MESH_POINT ||
+ priv->mode == NL80211_IFTYPE_P2P_GO)
+ seq_printf(seq, "Beaconing: %s\n",
+ priv->enable_beacon ?
+ "enabled" : "disabled");
+ if (priv->ssid_length ||
+ priv->mode == NL80211_IFTYPE_AP ||
+ priv->mode == NL80211_IFTYPE_ADHOC ||
+ priv->mode == NL80211_IFTYPE_MESH_POINT ||
+ priv->mode == NL80211_IFTYPE_P2P_GO)
+ seq_printf(seq, "SSID: %.*s\n",
+ priv->ssid_length, priv->ssid);
+
+ for (i = 0; i < 4; ++i) {
+ seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i,
+ priv->edca.params[i].cwMin,
+ priv->edca.params[i].cwMax,
+ priv->edca.params[i].aifns,
+ priv->edca.params[i].txOpLimit,
+ priv->edca.params[i].maxReceiveLifetime);
+ }
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ static const char *pmMode = "unknown";
+ switch (priv->powersave_mode.pmMode) {
+ case WSM_PSM_ACTIVE:
+ pmMode = "off";
+ break;
+ case WSM_PSM_PS:
+ pmMode = "on";
+ break;
+ case WSM_PSM_FAST_PS:
+ pmMode = "dynamic";
+ break;
+ }
+ seq_printf(seq, "Preamble: %s\n",
+ cw1200_debug_preamble[
+ priv->association_mode.preambleType]);
+ seq_printf(seq, "AMPDU spcn: %d\n",
+ priv->association_mode.mpduStartSpacing);
+ seq_printf(seq, "Basic rate: 0x%.8X\n",
+ le32_to_cpu(priv->association_mode.basicRateSet));
+ seq_printf(seq, "Bss lost: %d beacons\n",
+ priv->bss_params.beaconLostCount);
+ seq_printf(seq, "AID: %d\n",
+ priv->bss_params.aid);
+ seq_printf(seq, "Rates: 0x%.8X\n",
+ priv->bss_params.operationalRateSet);
+ seq_printf(seq, "Powersave: %s\n", pmMode);
+ }
+ seq_printf(seq, "HT: %s\n",
+ cw1200_is_ht(&priv->ht_info) ? "on" : "off");
+ if (cw1200_is_ht(&priv->ht_info)) {
+ seq_printf(seq, "Greenfield: %s\n",
+ cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no");
+ seq_printf(seq, "AMPDU dens: %d\n",
+ cw1200_ht_ampdu_density(&priv->ht_info));
+ }
+ seq_printf(seq, "RSSI thold: %d\n",
+ priv->cqm_rssi_thold);
+ seq_printf(seq, "RSSI hyst: %d\n",
+ priv->cqm_rssi_hyst);
+ seq_printf(seq, "TXFL thold: %d\n",
+ priv->cqm_tx_failure_thold);
+ seq_printf(seq, "Linkloss: %d\n",
+ priv->cqm_link_loss_count);
+ seq_printf(seq, "Bcnloss: %d\n",
+ priv->cqm_beacon_loss_count);
+ seq_printf(seq, "Long retr: %d\n",
+ priv->long_frame_max_tx_count);
+ seq_printf(seq, "Short retr: %d\n",
+ priv->short_frame_max_tx_count);
+ spin_lock_bh(&priv->tx_policy_cache.lock);
+ i = 0;
+ list_for_each(item, &priv->tx_policy_cache.used)
+ ++i;
+ spin_unlock_bh(&priv->tx_policy_cache.lock);
+ seq_printf(seq, "RC in use: %d\n", i);
+ seq_printf(seq, "BA stat: %d, %d (%d)\n",
+ ba_cnt, ba_acc, ba_avg);
+ seq_printf(seq, "Block ACK: %s\n", ba_ena ? "on" : "off");
+
+ seq_puts(seq, "\n");
+ for (i = 0; i < 4; ++i) {
+ cw1200_queue_status_show(seq, &priv->tx_queue[i]);
+ seq_puts(seq, "\n");
+ }
+
+ cw1200_debug_print_map(seq, priv, "Link map: ",
+ priv->link_id_map);
+ cw1200_debug_print_map(seq, priv, "Asleep map: ",
+ priv->sta_asleep_mask);
+ cw1200_debug_print_map(seq, priv, "PSPOLL map: ",
+ priv->pspoll_mask);
+
+ seq_puts(seq, "\n");
+
+ for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+ if (priv->link_id_db[i].status) {
+ seq_printf(seq, "Link %d: %s, %pM\n",
+ i + 1, cw1200_debug_link_id[
+ priv->link_id_db[i].status],
+ priv->link_id_db[i].mac);
+ }
+ }
+
+ seq_puts(seq, "\n");
+
+ seq_printf(seq, "BH status: %s\n",
+ atomic_read(&priv->bh_term) ? "terminated" : "alive");
+ seq_printf(seq, "Pending RX: %d\n",
+ atomic_read(&priv->bh_rx));
+ seq_printf(seq, "Pending TX: %d\n",
+ atomic_read(&priv->bh_tx));
+ if (priv->bh_error)
+ seq_printf(seq, "BH errcode: %d\n",
+ priv->bh_error);
+ seq_printf(seq, "TX bufs: %d x %d bytes\n",
+ priv->wsm_caps.numInpChBufs,
+ priv->wsm_caps.sizeInpChBuf);
+ seq_printf(seq, "Used bufs: %d\n",
+ priv->hw_bufs_used);
+ seq_printf(seq, "Powermgmt: %s\n",
+ priv->powersave_enabled ? "on" : "off");
+ seq_printf(seq, "Device: %s\n",
+ priv->device_can_sleep ? "alseep" : "awake");
+
+ spin_lock(&priv->wsm_cmd.lock);
+ seq_printf(seq, "WSM status: %s\n",
+ priv->wsm_cmd.done ? "idle" : "active");
+ seq_printf(seq, "WSM cmd: 0x%.4X (%d bytes)\n",
+ priv->wsm_cmd.cmd, priv->wsm_cmd.len);
+ seq_printf(seq, "WSM retval: %d\n",
+ priv->wsm_cmd.ret);
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ seq_printf(seq, "Datapath: %s\n",
+ atomic_read(&priv->tx_lock) ? "locked" : "unlocked");
+ if (atomic_read(&priv->tx_lock))
+ seq_printf(seq, "TXlock cnt: %d\n",
+ atomic_read(&priv->tx_lock));
+
+ seq_printf(seq, "TXed: %d\n",
+ d->tx);
+ seq_printf(seq, "AGG TXed: %d\n",
+ d->tx_agg);
+ seq_printf(seq, "MULTI TXed: %d (%d)\n",
+ d->tx_multi, d->tx_multi_frames);
+ seq_printf(seq, "RXed: %d\n",
+ d->rx);
+ seq_printf(seq, "AGG RXed: %d\n",
+ d->rx_agg);
+ seq_printf(seq, "TX miss: %d\n",
+ d->tx_cache_miss);
+ seq_printf(seq, "TX align: %d\n",
+ d->tx_align);
+ seq_printf(seq, "TX burst: %d\n",
+ d->tx_burst);
+ seq_printf(seq, "RX burst: %d\n",
+ d->rx_burst);
+ seq_printf(seq, "TX TTL: %d\n",
+ d->tx_ttl);
+ seq_printf(seq, "Scan: %s\n",
+ atomic_read(&priv->scan.in_progress) ? "active" : "idle");
+ seq_printf(seq, "Led state: 0x%.2X\n",
+ priv->softled_state);
+
+ return 0;
+}
+
+static int cw1200_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, &cw1200_status_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_status = {
+ .open = cw1200_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int cw1200_counters_show(struct seq_file *seq, void *v)
+{
+ int ret;
+ struct cw1200_common *priv = seq->private;
+ struct wsm_counters_table counters;
+
+ ret = wsm_get_counters_table(priv, &counters);
+ if (ret)
+ return ret;
+
+#define CAT_STR(x, y) x ## y
+#define PUT_COUNTER(tab, name) \
+ seq_printf(seq, "%s:" tab "%d\n", #name, \
+ __le32_to_cpu(counters.CAT_STR(count, name)))
+
+ PUT_COUNTER("\t\t", PlcpErrors);
+ PUT_COUNTER("\t\t", FcsErrors);
+ PUT_COUNTER("\t\t", TxPackets);
+ PUT_COUNTER("\t\t", RxPackets);
+ PUT_COUNTER("\t\t", RxPacketErrors);
+ PUT_COUNTER("\t", RxDecryptionFailures);
+ PUT_COUNTER("\t\t", RxMicFailures);
+ PUT_COUNTER("\t", RxNoKeyFailures);
+ PUT_COUNTER("\t", TxMulticastFrames);
+ PUT_COUNTER("\t", TxFramesSuccess);
+ PUT_COUNTER("\t", TxFrameFailures);
+ PUT_COUNTER("\t", TxFramesRetried);
+ PUT_COUNTER("\t", TxFramesMultiRetried);
+ PUT_COUNTER("\t", RxFrameDuplicates);
+ PUT_COUNTER("\t\t", RtsSuccess);
+ PUT_COUNTER("\t\t", RtsFailures);
+ PUT_COUNTER("\t\t", AckFailures);
+ PUT_COUNTER("\t", RxMulticastFrames);
+ PUT_COUNTER("\t", RxFramesSuccess);
+ PUT_COUNTER("\t", RxCMACICVErrors);
+ PUT_COUNTER("\t\t", RxCMACReplays);
+ PUT_COUNTER("\t", RxMgmtCCMPReplays);
+
+#undef PUT_COUNTER
+#undef CAT_STR
+
+ return 0;
+}
+
+static int cw1200_counters_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, &cw1200_counters_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_counters = {
+ .open = cw1200_counters_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int cw1200_generic_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t cw1200_11n_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ struct ieee80211_supported_band *band =
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ return simple_read_from_buffer(user_buf, count, ppos,
+ band->ht_cap.ht_supported ? "1\n" : "0\n", 2);
+}
+
+static ssize_t cw1200_11n_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ struct ieee80211_supported_band *band[2] = {
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ],
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ],
+ };
+ char buf[1];
+ int ena = 0;
+
+ if (!count)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, 1))
+ return -EFAULT;
+ if (buf[0] == 1)
+ ena = 1;
+
+ band[0]->ht_cap.ht_supported = ena;
+#ifdef CONFIG_CW1200_5GHZ_SUPPORT
+ band[1]->ht_cap.ht_supported = ena;
+#endif /* CONFIG_CW1200_5GHZ_SUPPORT */
+
+ return count;
+}
+
+static const struct file_operations fops_11n = {
+ .open = cw1200_generic_open,
+ .read = cw1200_11n_read,
+ .write = cw1200_11n_write,
+ .llseek = default_llseek,
+};
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+static ssize_t cw1200_hang_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ char buf[1];
+
+ if (!count)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, 1))
+ return -EFAULT;
+
+ if (priv->vif) {
+ cw1200_pm_stay_awake(&priv->pm_state, 3*HZ);
+ ieee80211_driver_hang_notify(priv->vif, GFP_KERNEL);
+ } else
+ return -ENODEV;
+
+ return count;
+}
+
+static const struct file_operations fops_hang = {
+ .open = cw1200_generic_open,
+ .write = cw1200_hang_write,
+ .llseek = default_llseek,
+};
+#endif
+
+static ssize_t cw1200_wsm_dumps(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ char buf[1];
+
+ if (!count)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, 1))
+ return -EFAULT;
+
+ if (buf[0] == '1')
+ priv->wsm_enable_wsm_dumps = 1;
+ else
+ priv->wsm_enable_wsm_dumps = 0;
+
+ return count;
+}
+
+static const struct file_operations fops_wsm_dumps = {
+ .open = cw1200_generic_open,
+ .write = cw1200_wsm_dumps,
+ .llseek = default_llseek,
+};
+
+#if defined(CONFIG_CW1200_WSM_DUMPS_SHORT)
+static ssize_t cw1200_short_dump_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ char buf[20];
+ size_t size = 0;
+
+ sprintf(buf, "Size: %u\n", priv->wsm_dump_max_size);
+ size = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, size);
+}
+
+static ssize_t cw1200_short_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ char buf[20];
+ unsigned long dump_size = 0;
+
+ if (!count || count > 20)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (kstrtoul(buf, 10, &dump_size))
+ return -EINVAL;
+ printk(KERN_ERR "%s get %lu\n", __func__, dump_size);
+
+ priv->wsm_dump_max_size = dump_size;
+
+ return count;
+}
+
+static const struct file_operations fops_short_dump = {
+ .open = cw1200_generic_open,
+ .write = cw1200_short_dump_write,
+ .read = cw1200_short_dump_read,
+ .llseek = default_llseek,
+};
+#endif /* CONFIG_CW1200_WSM_DUMPS_SHORT */
+
+int cw1200_debug_init(struct cw1200_common *priv)
+{
+ int ret = -ENOMEM;
+ struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv),
+ GFP_KERNEL);
+ priv->debug = d;
+ if (!d)
+ return ret;
+
+ d->debugfs_phy = debugfs_create_dir("cw1200",
+ priv->hw->wiphy->debugfsdir);
+ if (!d->debugfs_phy)
+ goto err;
+
+ if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy,
+ priv, &fops_status))
+ goto err;
+
+ if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy,
+ priv, &fops_counters))
+ goto err;
+
+ if (!debugfs_create_file("11n", S_IRUSR | S_IWUSR,
+ d->debugfs_phy, priv, &fops_11n))
+ goto err;
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ if (!debugfs_create_file("hang", S_IWUSR, d->debugfs_phy,
+ priv, &fops_hang))
+ goto err;
+#endif
+
+ if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy,
+ priv, &fops_wsm_dumps))
+ goto err;
+
+#if defined(CONFIG_CW1200_WSM_DUMPS_SHORT)
+ if (!debugfs_create_file("wsm_dump_size", S_IRUSR | S_IWUSR,
+ d->debugfs_phy, priv, &fops_short_dump))
+ goto err;
+#endif /* CONFIG_CW1200_WSM_DUMPS_SHORT */
+
+ ret = cw1200_itp_init(priv);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ priv->debug = NULL;
+ debugfs_remove_recursive(d->debugfs_phy);
+ kfree(d);
+ return ret;
+}
+
+void cw1200_debug_release(struct cw1200_common *priv)
+{
+ struct cw1200_debug_priv *d = priv->debug;
+ if (d) {
+ cw1200_itp_release(priv);
+ priv->debug = NULL;
+ kfree(d);
+ }
+}
+
+int cw1200_print_fw_version(struct cw1200_common *priv, u8 *buf, size_t len)
+{
+ return snprintf(buf, len, "%s %d.%d",
+ cw1200_debug_fw_types[priv->wsm_caps.firmwareType],
+ priv->wsm_caps.firmwareVersion,
+ priv->wsm_caps.firmwareBuildNumber);
+}
diff --git a/drivers/staging/cw1200/debug.h b/drivers/staging/cw1200/debug.h
new file mode 100644
index 00000000000..72b827f296b
--- /dev/null
+++ b/drivers/staging/cw1200/debug.h
@@ -0,0 +1,168 @@
+/*
+ * DebugFS code for ST-Ericsson CW1200 mac80211 driver
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_DEBUG_H_INCLUDED
+#define CW1200_DEBUG_H_INCLUDED
+
+#include "itp.h"
+
+struct cw200_common;
+
+#ifdef CONFIG_CW1200_DEBUGFS
+
+struct cw1200_debug_priv {
+ struct dentry *debugfs_phy;
+ int tx;
+ int tx_agg;
+ int rx;
+ int rx_agg;
+ int tx_multi;
+ int tx_multi_frames;
+ int tx_cache_miss;
+ int tx_align;
+ int tx_ttl;
+ int tx_burst;
+ int rx_burst;
+ int ba_cnt;
+ int ba_acc;
+#ifdef CONFIG_CW1200_ITP
+ struct cw1200_itp itp;
+#endif /* CONFIG_CW1200_ITP */
+};
+
+int cw1200_debug_init(struct cw1200_common *priv);
+void cw1200_debug_release(struct cw1200_common *priv);
+
+static inline void cw1200_debug_txed(struct cw1200_common *priv)
+{
+ ++priv->debug->tx;
+}
+
+static inline void cw1200_debug_txed_agg(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_agg;
+}
+
+static inline void cw1200_debug_txed_multi(struct cw1200_common *priv,
+ int count)
+{
+ ++priv->debug->tx_multi;
+ priv->debug->tx_multi_frames += count;
+}
+
+static inline void cw1200_debug_rxed(struct cw1200_common *priv)
+{
+ ++priv->debug->rx;
+}
+
+static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv)
+{
+ ++priv->debug->rx_agg;
+}
+
+static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_cache_miss;
+}
+
+static inline void cw1200_debug_tx_align(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_align;
+}
+
+static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_ttl;
+}
+
+static inline void cw1200_debug_tx_burst(struct cw1200_common *priv)
+{
+ ++priv->debug->tx_burst;
+}
+
+static inline void cw1200_debug_rx_burst(struct cw1200_common *priv)
+{
+ ++priv->debug->rx_burst;
+}
+
+static inline void cw1200_debug_ba(struct cw1200_common *priv,
+ int ba_cnt, int ba_acc)
+{
+ priv->debug->ba_cnt = ba_cnt;
+ priv->debug->ba_acc = ba_acc;
+}
+
+int cw1200_print_fw_version(struct cw1200_common *priv, u8 *buf, size_t len);
+
+#else /* CONFIG_CW1200_DEBUGFS */
+
+static inline int cw1200_debug_init(struct cw1200_common *priv)
+{
+ return 0;
+}
+
+static inline void cw1200_debug_release(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_txed(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_txed_agg(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_txed_multi(struct cw1200_common *priv,
+ int count)
+{
+}
+
+static inline void cw1200_debug_rxed(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_tx_align(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_tx_burst(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_rx_burst(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_debug_ba(struct cw1200_common *priv,
+ int ba_cnt, int ba_acc)
+{
+}
+
+int cw1200_print_fw_version(struct cw1200_common *priv, u8 *buf, size_t len)
+{
+}
+
+#endif /* CONFIG_CW1200_DEBUGFS */
+
+#endif /* CW1200_DEBUG_H_INCLUDED */
diff --git a/drivers/staging/cw1200/fwio.c b/drivers/staging/cw1200/fwio.c
new file mode 100644
index 00000000000..72b77bc8bb0
--- /dev/null
+++ b/drivers/staging/cw1200/fwio.c
@@ -0,0 +1,594 @@
+/*
+ * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+
+#include "cw1200.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "sbus.h"
+#include "bh.h"
+
+static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision)
+{
+ int hw_type = -1;
+ u32 silicon_type = (config_reg_val >> 24) & 0x3;
+ u32 silicon_vers = (config_reg_val >> 31) & 0x1;
+
+ /* Check if we have CW1200 or STLC9000 */
+ if ((silicon_type == 0x1) || (silicon_type == 0x2)) {
+ *major_revision = silicon_type;
+ if (silicon_vers)
+ hw_type = HIF_8601_VERSATILE;
+ else
+ hw_type = HIF_8601_SILICON;
+ } else {
+ *major_revision = 1;
+ hw_type = HIF_9000_SILICON_VERSTAILE;
+ }
+
+ return hw_type;
+}
+
+static int config_reg_read_stlc9000(struct cw1200_common *priv,
+ u16 reg, u32 *val)
+{
+ u16 val16;
+ int ret = cw1200_reg_read_16(priv, reg, &val16);
+ if (ret < 0)
+ return ret;
+ *val = val16;
+ return 0;
+}
+
+static int config_reg_write_stlc9000(struct cw1200_common *priv,
+ u16 reg, u32 val)
+{
+ return cw1200_reg_write_16(priv, reg, (u16)val);
+}
+
+static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
+{
+ int ret, block, num_blocks;
+ unsigned i;
+ u32 val32;
+ u32 put = 0, get = 0;
+ u8 *buf = NULL;
+ const char *fw_path;
+ const struct firmware *firmware = NULL;
+
+ /* Macroses are local. */
+#define APB_WRITE(reg, val) \
+ do { \
+ ret = cw1200_apb_write_32(priv, CW12000_APB(reg), (val)); \
+ if (ret < 0) { \
+ cw1200_dbg(CW1200_DBG_ERROR, \
+ "%s: can't write %s at line %d.\n", \
+ __func__, #reg, __LINE__); \
+ goto error; \
+ } \
+ } while (0)
+#define APB_READ(reg, val) \
+ do { \
+ ret = cw1200_apb_read_32(priv, CW12000_APB(reg), &(val)); \
+ if (ret < 0) { \
+ cw1200_dbg(CW1200_DBG_ERROR, \
+ "%s: can't read %s at line %d.\n", \
+ __func__, #reg, __LINE__); \
+ goto error; \
+ } \
+ } while (0)
+#define REG_WRITE(reg, val) \
+ do { \
+ ret = cw1200_reg_write_32(priv, (reg), (val)); \
+ if (ret < 0) { \
+ cw1200_dbg(CW1200_DBG_ERROR, \
+ "%s: can't write %s at line %d.\n", \
+ __func__, #reg, __LINE__); \
+ goto error; \
+ } \
+ } while (0)
+#define REG_READ(reg, val) \
+ do { \
+ ret = cw1200_reg_read_32(priv, (reg), &(val)); \
+ if (ret < 0) { \
+ cw1200_dbg(CW1200_DBG_ERROR, \
+ "%s: can't read %s at line %d.\n", \
+ __func__, #reg, __LINE__); \
+ goto error; \
+ } \
+ } while (0)
+
+ switch (priv->hw_revision) {
+ case CW1200_HW_REV_CUT10:
+ fw_path = FIRMWARE_CUT10;
+ break;
+ case CW1200_HW_REV_CUT11:
+ fw_path = FIRMWARE_CUT11;
+ break;
+ case CW1200_HW_REV_CUT20:
+ fw_path = FIRMWARE_CUT20;
+ break;
+ case CW1200_HW_REV_CUT22:
+ fw_path = FIRMWARE_CUT22;
+ break;
+ default:
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: invalid silicon revision %d.\n",
+ __func__, priv->hw_revision);
+ return -EINVAL;
+ }
+
+ /* Initialize common registers */
+ APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE);
+ APB_WRITE(DOWNLOAD_PUT_REG, 0);
+ APB_WRITE(DOWNLOAD_GET_REG, 0);
+ APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING);
+ APB_WRITE(DOWNLOAD_FLAGS_REG, 0);
+
+ /* Write the NOP Instruction */
+ REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000);
+ REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE);
+
+ /* Release CPU from RESET */
+ REG_READ(ST90TDS_CONFIG_REG_ID, val32);
+ val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT;
+ REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
+
+ /* Enable Clock */
+ val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT;
+ REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
+
+ /* Load a firmware file */
+ ret = request_firmware(&firmware, fw_path, priv->pdev);
+ if (ret) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't load firmware file %s.\n",
+ __func__, fw_path);
+ goto error;
+ }
+ BUG_ON(!firmware->data);
+
+ buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't allocate firmware buffer.\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Check if the bootloader is ready */
+ for (i = 0; i < 100; i += 1 + i / 2) {
+ APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32);
+ if (val32 == DOWNLOAD_I_AM_HERE)
+ break;
+ mdelay(i);
+ } /* End of for loop */
+
+ if (val32 != DOWNLOAD_I_AM_HERE) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: bootloader is not ready.\n", __func__);
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Calculcate number of download blocks */
+ num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1;
+
+ /* Updating the length in Download Ctrl Area */
+ val32 = firmware->size; /* Explicit cast from size_t to u32 */
+ APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32);
+
+ /* Firmware downloading loop */
+ for (block = 0; block < num_blocks ; block++) {
+ size_t tx_size;
+ size_t block_size;
+
+ /* check the download status */
+ APB_READ(DOWNLOAD_STATUS_REG, val32);
+ if (val32 != DOWNLOAD_PENDING) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: bootloader reported error %d.\n",
+ __func__, val32);
+ ret = -EIO;
+ goto error;
+ }
+
+ /* loop until put - get <= 24K */
+ for (i = 0; i < 100; i++) {
+ APB_READ(DOWNLOAD_GET_REG, get);
+ if ((put - get) <=
+ (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE))
+ break;
+ mdelay(i);
+ }
+
+ if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Timeout waiting for FIFO.\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* calculate the block size */
+ tx_size = block_size = min((size_t)(firmware->size - put),
+ (size_t)DOWNLOAD_BLOCK_SIZE);
+
+ memcpy(buf, &firmware->data[put], block_size);
+ if (block_size < DOWNLOAD_BLOCK_SIZE) {
+ memset(&buf[block_size],
+ 0, DOWNLOAD_BLOCK_SIZE - block_size);
+ tx_size = DOWNLOAD_BLOCK_SIZE;
+ }
+
+ /* send the block to sram */
+ ret = cw1200_apb_write(priv,
+ CW12000_APB(DOWNLOAD_FIFO_OFFSET +
+ (put & (DOWNLOAD_FIFO_SIZE - 1))),
+ buf, tx_size);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't write block at line %d.\n",
+ __func__, __LINE__);
+ goto error;
+ }
+
+ /* update the put register */
+ put += block_size;
+ APB_WRITE(DOWNLOAD_PUT_REG, put);
+ } /* End of firmware download loop */
+
+ /* Wait for the download completion */
+ for (i = 0; i < 300; i += 1 + i / 2) {
+ APB_READ(DOWNLOAD_STATUS_REG, val32);
+ if (val32 != DOWNLOAD_PENDING)
+ break;
+ mdelay(i);
+ }
+ if (val32 != DOWNLOAD_SUCCESS) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: wait for download completion failed. " \
+ "Read: 0x%.8X\n", __func__, val32);
+ ret = -ETIMEDOUT;
+ goto error;
+ } else {
+ cw1200_dbg(CW1200_DBG_MSG,
+ "Firmware download completed.\n");
+ ret = 0;
+ }
+
+error:
+ kfree(buf);
+ if (firmware)
+ release_firmware(firmware);
+ return ret;
+
+#undef APB_WRITE
+#undef APB_READ
+#undef REG_WRITE
+#undef REG_READ
+}
+
+int cw1200_load_firmware(struct cw1200_common *priv)
+{
+ int ret;
+ int i;
+ u32 val32;
+ u16 val16;
+ u32 dpll = 0;
+ int major_revision;
+ int (*config_reg_read)(struct cw1200_common *priv, u16 reg, u32 *val);
+ int (*config_reg_write)(struct cw1200_common *priv, u16 reg, u32 val);
+
+ BUG_ON(!priv);
+
+ /* Read CONFIG Register Value - We will read 32 bits */
+ ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't read config register.\n", __func__);
+ goto out;
+ }
+
+ priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
+ if (priv->hw_type < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't deduct hardware type.\n", __func__);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ switch (priv->hw_type) {
+ case HIF_8601_VERSATILE:
+ case HIF_8601_SILICON:
+ dpll = DPLL_INIT_VAL_CW1200;
+ config_reg_read = cw1200_reg_read_32;
+ config_reg_write = cw1200_reg_write_32;
+ break;
+ case HIF_9000_SILICON_VERSTAILE:
+ dpll = DPLL_INIT_VAL_9000;
+ config_reg_read = config_reg_read_stlc9000;
+ config_reg_write = config_reg_write_stlc9000;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, dpll);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't write DPLL register.\n", __func__);
+ goto out;
+ }
+
+ msleep(20);
+
+ /* Read DPLL Reg value and compare with value written */
+ ret = cw1200_reg_read_32(priv,
+ ST90TDS_TSET_GEN_R_W_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't read DPLL register.\n", __func__);
+ goto out;
+ }
+
+ if (val32 != dpll) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: unable to initialise " \
+ "DPLL register. Wrote 0x%.8X, read 0x%.8X.\n",
+ __func__, dpll, val32);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Set wakeup bit in device */
+ ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: set_wakeup: can't read " \
+ "control register.\n", __func__);
+ goto out;
+ }
+
+ ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
+ val16 | ST90TDS_CONT_WUP_BIT);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: set_wakeup: can't write " \
+ "control register.\n", __func__);
+ goto out;
+ }
+
+ /* Wait for wakeup */
+ for (i = 0 ; i < 300 ; i += 1 + i / 2) {
+ ret = cw1200_reg_read_16(priv,
+ ST90TDS_CONTROL_REG_ID, &val16);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: wait_for_wakeup: can't read " \
+ "control register.\n", __func__);
+ goto out;
+ }
+
+ if (val16 & ST90TDS_CONT_RDY_BIT) {
+ cw1200_dbg(CW1200_DBG_MSG,
+ "WLAN device is ready.\n");
+ break;
+ }
+ msleep(i);
+ }
+
+ if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: wait_for_wakeup: device is not responding.\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (major_revision == 1) {
+ /* CW1200 Hardware detection logic : Check for CUT1.1 */
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32);
+ if (ret) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: HW detection: can't read CUT ID.\n",
+ __func__);
+ goto out;
+ }
+
+ switch (val32) {
+ case CW1200_CUT_11_ID_STR:
+ cw1200_dbg(CW1200_DBG_MSG,
+ "Cut 1.1 silicon is detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT11;
+ break;
+ default:
+ cw1200_dbg(CW1200_DBG_MSG,
+ "Cut 1.0 silicon is detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT10;
+ break;
+ }
+ } else if (major_revision == 2) {
+ u32 ar1, ar2, ar3;
+ cw1200_dbg(CW1200_DBG_MSG, "Cut 2.x silicon is detected.\n");
+
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1);
+ if (ret) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: (1) HW detection: can't read CUT ID.\n",
+ __func__);
+ goto out;
+ }
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2);
+ if (ret) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: (2) HW detection: can't read CUT ID.\n",
+ __func__);
+ goto out;
+ }
+
+ ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3);
+ if (ret) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: (3) HW detection: can't read CUT ID.\n",
+ __func__);
+ goto out;
+ }
+
+ if (ar1 == CW1200_CUT_22_ID_STR1 &&
+ ar2 == CW1200_CUT_22_ID_STR2 &&
+ ar3 == CW1200_CUT_22_ID_STR3) {
+ cw1200_dbg(CW1200_DBG_MSG, "Cut 2.2 detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT22;
+ } else {
+ cw1200_dbg(CW1200_DBG_MSG, "Cut 2.0 detected.\n");
+ priv->hw_revision = CW1200_HW_REV_CUT20;
+ }
+ } else {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: unsupported silicon major revision %d.\n",
+ __func__, major_revision);
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ /* Checking for access mode */
+ ret = config_reg_read(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: check_access_mode: can't read " \
+ "config register.\n", __func__);
+ goto out;
+ }
+
+ if (val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT) {
+ switch (priv->hw_type) {
+ case HIF_8601_SILICON:
+ cw1200_dbg(CW1200_DBG_MSG,
+ "%s: CW1200 detected.\n", __func__);
+ ret = cw1200_load_firmware_cw1200(priv);
+ break;
+ case HIF_8601_VERSATILE:
+ /* TODO: Not implemented yet!
+ ret = cw1200_load_firmware_cw1100(priv);
+ */
+ ret = -ENOTSUPP;
+ goto out;
+ case HIF_9000_SILICON_VERSTAILE:
+ /* TODO: Not implemented yet!
+ ret = cw1200_load_firmware_stlc9000(priv);
+ */
+ ret = -ENOTSUPP;
+ goto out;
+ default:
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Unknown hardware: %d.\n",
+ __func__, priv->hw_type);
+ }
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't download firmware.\n", __func__);
+ goto out;
+ }
+ } else {
+ cw1200_dbg(CW1200_DBG_MSG,
+ "%s: check_access_mode: device is already " \
+ "in QUEUE mode.\n", __func__);
+ /* TODO: verify this branch. Do we need something to do? */
+ }
+
+ /* Register Interrupt Handler */
+ ret = priv->sbus_ops->irq_subscribe(priv->sbus_priv,
+ (sbus_irq_handler)cw1200_irq_handler, priv);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't register IRQ handler.\n", __func__);
+ goto out;
+ }
+
+ if (HIF_8601_SILICON == priv->hw_type) {
+ /* If device is CW1200 the IRQ enable/disable bits
+ * are in CONFIG register */
+ ret = config_reg_read(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: enable_irq: can't read " \
+ "config register.\n", __func__);
+ goto unsubscribe;
+ }
+ ret = config_reg_write(priv, ST90TDS_CONFIG_REG_ID,
+ val32 | ST90TDS_CONF_IRQ_RDY_ENABLE);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: enable_irq: can't write " \
+ "config register.\n", __func__);
+ goto unsubscribe;
+ }
+ } else {
+ /* If device is STLC9000 the IRQ enable/disable bits
+ * are in CONTROL register */
+ /* Enable device interrupts - Both DATA_RDY and WLAN_RDY */
+ ret = cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: enable_irq: can't read " \
+ "control register.\n", __func__);
+ goto unsubscribe;
+ }
+ ret = cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID,
+ val16 | ST90TDS_CONT_IRQ_RDY_ENABLE);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: enable_irq: can't write " \
+ "control register.\n", __func__);
+ goto unsubscribe;
+ }
+
+ }
+
+ /* Configure device for MESSSAGE MODE */
+ ret = config_reg_read(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: set_mode: can't read config register.\n",
+ __func__);
+ goto unsubscribe;
+ }
+ ret = config_reg_write(priv, ST90TDS_CONFIG_REG_ID,
+ val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: set_mode: can't write config register.\n",
+ __func__);
+ goto unsubscribe;
+ }
+
+ /* Unless we read the CONFIG Register we are
+ * not able to get an interrupt */
+ mdelay(10);
+ config_reg_read(priv, ST90TDS_CONFIG_REG_ID, &val32);
+
+out:
+ return ret;
+
+unsubscribe:
+ priv->sbus_ops->irq_unsubscribe(priv->sbus_priv);
+ return ret;
+}
+
diff --git a/drivers/staging/cw1200/fwio.h b/drivers/staging/cw1200/fwio.h
new file mode 100644
index 00000000000..cb91b8dc481
--- /dev/null
+++ b/drivers/staging/cw1200/fwio.h
@@ -0,0 +1,36 @@
+/*
+ * Firmware API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FWIO_H_INCLUDED
+#define FWIO_H_INCLUDED
+
+#define FIRMWARE_CUT22 ("cw1200/wsm_22.bin")
+#define FIRMWARE_CUT20 ("cw1200/wsm_20.bin")
+#define FIRMWARE_CUT11 ("cw1200/wsm_11.bin")
+#define FIRMWARE_CUT10 ("cw1200/wsm_10.bin")
+#define SDD_FILE_22 ("cw1200/sdd_22.bin")
+#define SDD_FILE_20 ("cw1200/sdd_20.bin")
+#define SDD_FILE_11 ("cw1200/sdd_11.bin")
+#define SDD_FILE_10 ("cw1200/sdd_10.bin")
+
+#define CW1200_HW_REV_CUT10 (10)
+#define CW1200_HW_REV_CUT11 (11)
+#define CW1200_HW_REV_CUT20 (20)
+#define CW1200_HW_REV_CUT22 (22)
+
+int cw1200_load_firmware(struct cw1200_common *priv);
+
+#endif
diff --git a/drivers/staging/cw1200/ht.h b/drivers/staging/cw1200/ht.h
new file mode 100644
index 00000000000..5c486a634c7
--- /dev/null
+++ b/drivers/staging/cw1200/ht.h
@@ -0,0 +1,43 @@
+/*
+ * HT-related code for ST-Ericsson CW1200 driver
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_HT_H_INCLUDED
+#define CW1200_HT_H_INCLUDED
+
+#include <net/mac80211.h>
+
+struct cw1200_ht_info {
+ struct ieee80211_sta_ht_cap ht_cap;
+ enum nl80211_channel_type channel_type;
+ u16 operation_mode;
+};
+
+static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info)
+{
+ return ht_info->channel_type != NL80211_CHAN_NO_HT;
+}
+
+static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info)
+{
+ return cw1200_is_ht(ht_info) &&
+ (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ht_info->operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+}
+
+static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info)
+{
+ if (!cw1200_is_ht(ht_info))
+ return 0;
+ return ht_info->ht_cap.ampdu_density;
+}
+
+#endif /* CW1200_HT_H_INCLUDED */
diff --git a/drivers/staging/cw1200/hwio.c b/drivers/staging/cw1200/hwio.c
new file mode 100644
index 00000000000..b544a0a4a80
--- /dev/null
+++ b/drivers/staging/cw1200/hwio.c
@@ -0,0 +1,287 @@
+/*
+ * Low-level device IO routines for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver, which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+#include "cw1200.h"
+#include "hwio.h"
+#include "sbus.h"
+
+ /* Sdio addr is 4*spi_addr */
+#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2)
+#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \
+ ((((buf_id) & 0x1F) << 7) \
+ | (((mpf) & 1) << 6) \
+ | (((rfu) & 1) << 5) \
+ | (((reg_id_ofs) & 0x1F) << 0))
+#define MAX_RETRY 3
+
+
+static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr,
+ void *buf, size_t buf_len, int buf_id)
+{
+ u16 addr_sdio;
+ u32 sdio_reg_addr_17bit ;
+
+ /* Check if buffer is aligned to 4 byte boundary */
+ if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: buffer is not aligned.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Convert to SDIO Register Address */
+ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
+ sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
+
+ BUG_ON(!priv->sbus_ops);
+ return priv->sbus_ops->sbus_memcpy_fromio(priv->sbus_priv,
+ sdio_reg_addr_17bit,
+ buf, buf_len);
+}
+
+static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
+ const void *buf, size_t buf_len, int buf_id)
+{
+ u16 addr_sdio;
+ u32 sdio_reg_addr_17bit ;
+
+#if 0
+ /* Check if buffer is aligned to 4 byte boundary */
+ if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
+ cw1200_dbg(CW1200_DBG_ERROR, "%s: buffer is not aligned.\n",
+ __func__);
+ return -EINVAL;
+ }
+#endif
+
+ /* Convert to SDIO Register Address */
+ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
+ sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
+
+ BUG_ON(!priv->sbus_ops);
+ return priv->sbus_ops->sbus_memcpy_toio(priv->sbus_priv,
+ sdio_reg_addr_17bit,
+ buf, buf_len);
+}
+
+static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
+ u16 addr, u32 *val)
+{
+ return __cw1200_reg_read(priv, addr, val, sizeof(val), 0);
+}
+
+static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
+ u16 addr, u32 val)
+{
+ return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0);
+}
+
+int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
+ size_t buf_len)
+{
+ int ret;
+ BUG_ON(!priv->sbus_ops);
+ priv->sbus_ops->lock(priv->sbus_priv);
+ ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0);
+ priv->sbus_ops->unlock(priv->sbus_priv);
+ return ret;
+}
+
+int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf,
+ size_t buf_len)
+{
+ int ret;
+ BUG_ON(!priv->sbus_ops);
+ priv->sbus_ops->lock(priv->sbus_priv);
+ ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0);
+ priv->sbus_ops->unlock(priv->sbus_priv);
+ return ret;
+}
+
+int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len)
+{
+ int ret, retry = 1;
+ BUG_ON(!priv->sbus_ops);
+ priv->sbus_ops->lock(priv->sbus_priv);
+ {
+ int buf_id_rx = priv->buf_id_rx;
+ while (retry <= MAX_RETRY) {
+ ret = __cw1200_reg_read(priv,
+ ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
+ buf_len, buf_id_rx + 1);
+ if (!ret) {
+ buf_id_rx = (buf_id_rx + 1) & 3;
+ priv->buf_id_rx = buf_id_rx;
+ break;
+ } else {
+ retry++;
+ mdelay(1);
+ cw1200_dbg(CW1200_DBG_ERROR, "%s,error :[%d]\n",
+ __func__, ret);
+ }
+ }
+ }
+ priv->sbus_ops->unlock(priv->sbus_priv);
+ return ret;
+}
+
+int cw1200_data_write(struct cw1200_common *priv, const void *buf,
+ size_t buf_len)
+{
+ int ret, retry = 1;
+ BUG_ON(!priv->sbus_ops);
+ priv->sbus_ops->lock(priv->sbus_priv);
+ {
+ int buf_id_tx = priv->buf_id_tx;
+ while (retry <= MAX_RETRY) {
+ ret = __cw1200_reg_write(priv,
+ ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
+ buf_len, buf_id_tx);
+ if (!ret) {
+ buf_id_tx = (buf_id_tx + 1) & 31;
+ priv->buf_id_tx = buf_id_tx;
+ break;
+ } else {
+ retry++;
+ mdelay(1);
+ cw1200_dbg(CW1200_DBG_ERROR, "%s,error :[%d]\n",
+ __func__, ret);
+ }
+ }
+ }
+ priv->sbus_ops->unlock(priv->sbus_priv);
+ return ret;
+}
+
+int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
+ size_t buf_len, u32 prefetch, u16 port_addr)
+{
+ u32 val32 = 0;
+ int i, ret;
+
+ if ((buf_len / 2) >= 0x1000) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't read more than 0xfff words.\n",
+ __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ goto out;
+ }
+
+ priv->sbus_ops->lock(priv->sbus_priv);
+ /* Write address */
+ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't write address register.\n",
+ __func__);
+ goto out;
+ }
+
+ /* Read CONFIG Register Value - We will read 32 bits */
+ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't read config register.\n",
+ __func__);
+ goto out;
+ }
+
+ /* Set PREFETCH bit */
+ ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID,
+ val32 | prefetch);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't write prefetch bit.\n",
+ __func__);
+ goto out;
+ }
+
+ /* Check for PRE-FETCH bit to be cleared */
+ for (i = 0; i < 20; i++) {
+ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't check prefetch bit.\n",
+ __func__);
+ goto out;
+ }
+ if (!(val32 & prefetch))
+ break;
+
+ mdelay(i);
+ }
+
+ if (val32 & prefetch) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Prefetch bit is not cleared.\n",
+ __func__);
+ goto out;
+ }
+
+ /* Read data port */
+ ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't read data port.\n",
+ __func__);
+ goto out;
+ }
+
+out:
+ priv->sbus_ops->unlock(priv->sbus_priv);
+ return ret;
+}
+
+int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
+ size_t buf_len)
+{
+ int ret;
+
+ if ((buf_len / 2) >= 0x1000) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't wrire more than 0xfff words.\n",
+ __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ priv->sbus_ops->lock(priv->sbus_priv);
+
+ /* Write address */
+ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: Can't write address register.\n",
+ __func__);
+ goto out;
+ }
+
+ /* Write data port */
+ ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID,
+ buf, buf_len, 0);
+ if (ret < 0) {
+ cw1200_dbg(CW1200_DBG_ERROR, "%s: Can't write data port.\n",
+ __func__);
+ goto out;
+ }
+
+out:
+ priv->sbus_ops->unlock(priv->sbus_priv);
+ return ret;
+}
+
diff --git a/drivers/staging/cw1200/hwio.h b/drivers/staging/cw1200/hwio.h
new file mode 100644
index 00000000000..25c8f6b4c28
--- /dev/null
+++ b/drivers/staging/cw1200/hwio.h
@@ -0,0 +1,243 @@
+/*
+ * Low-level API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_HWIO_H_INCLUDED
+#define CW1200_HWIO_H_INCLUDED
+
+/* extern */ struct cw1200_common;
+
+/* DPLL initial values */
+#define DPLL_INIT_VAL_9000 (0x00000191)
+#define DPLL_INIT_VAL_CW1200 (0x0EC4F121)
+
+/* Hardware Type Definitions */
+#define HIF_8601_VERSATILE (0)
+#define HIF_8601_SILICON (1)
+#define HIF_9000_SILICON_VERSTAILE (2)
+
+#define CW1200_CUT_11_ID_STR (0x302E3830)
+#define CW1200_CUT_22_ID_STR1 (0x302e3132)
+#define CW1200_CUT_22_ID_STR2 (0x32302e30)
+#define CW1200_CUT_22_ID_STR3 (0x3335)
+#define CW1200_CUT_ID_ADDR (0xFFF17F90)
+#define CW1200_CUT2_ID_ADDR (0xFFF1FF90)
+
+/* Download control area */
+/* boot loader start address in SRAM */
+#define DOWNLOAD_BOOT_LOADER_OFFSET (0x00000000)
+/* 32K, 0x4000 to 0xDFFF */
+#define DOWNLOAD_FIFO_OFFSET (0x00004000)
+/* 32K */
+#define DOWNLOAD_FIFO_SIZE (0x00008000)
+/* 128 bytes, 0xFF80 to 0xFFFF */
+#define DOWNLOAD_CTRL_OFFSET (0x0000FF80)
+#define DOWNLOAD_CTRL_DATA_DWORDS (32-6)
+
+struct download_cntl_t {
+ /* size of whole firmware file (including Cheksum), host init */
+ u32 ImageSize;
+ /* downloading flags */
+ u32 Flags;
+ /* No. of bytes put into the download, init & updated by host */
+ u32 Put;
+ /* last traced program counter, last ARM reg_pc */
+ u32 TracePc;
+ /* No. of bytes read from the download, host init, device updates */
+ u32 Get;
+ /* r0, boot losader status, host init to pending, device updates */
+ u32 Status;
+ /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */
+ u32 DebugData[DOWNLOAD_CTRL_DATA_DWORDS];
+};
+
+#define DOWNLOAD_IMAGE_SIZE_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, ImageSize))
+#define DOWNLOAD_FLAGS_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, Flags))
+#define DOWNLOAD_PUT_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, Put))
+#define DOWNLOAD_TRACE_PC_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, TracePc))
+#define DOWNLOAD_GET_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, Get))
+#define DOWNLOAD_STATUS_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, Status))
+#define DOWNLOAD_DEBUG_DATA_REG \
+ (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, DebugData))
+#define DOWNLOAD_DEBUG_DATA_LEN (108)
+
+#define DOWNLOAD_BLOCK_SIZE (1024)
+
+/* For boot loader detection */
+#define DOWNLOAD_ARE_YOU_HERE (0x87654321)
+#define DOWNLOAD_I_AM_HERE (0x12345678)
+
+/* Download error code */
+#define DOWNLOAD_PENDING (0xFFFFFFFF)
+#define DOWNLOAD_SUCCESS (0)
+#define DOWNLOAD_EXCEPTION (1)
+#define DOWNLOAD_ERR_MEM_1 (2)
+#define DOWNLOAD_ERR_MEM_2 (3)
+#define DOWNLOAD_ERR_SOFTWARE (4)
+#define DOWNLOAD_ERR_FILE_SIZE (5)
+#define DOWNLOAD_ERR_CHECKSUM (6)
+#define DOWNLOAD_ERR_OVERFLOW (7)
+#define DOWNLOAD_ERR_IMAGE (8)
+#define DOWNLOAD_ERR_HOST (9)
+#define DOWNLOAD_ERR_ABORT (10)
+
+
+#define SYS_BASE_ADDR_SILICON (0)
+#define PAC_BASE_ADDRESS_SILICON (SYS_BASE_ADDR_SILICON + 0x09000000)
+#define PAC_SHARED_MEMORY_SILICON (PAC_BASE_ADDRESS_SILICON)
+
+#define CW12000_APB(addr) (PAC_SHARED_MEMORY_SILICON + (addr))
+
+/* ***************************************************************
+*Device register definitions
+*************************************************************** */
+/* WBF - SPI Register Addresses */
+#define ST90TDS_ADDR_ID_BASE (0x0000)
+/* 16/32 bits */
+#define ST90TDS_CONFIG_REG_ID (0x0000)
+/* 16/32 bits */
+#define ST90TDS_CONTROL_REG_ID (0x0001)
+/* 16 bits, Q mode W/R */
+#define ST90TDS_IN_OUT_QUEUE_REG_ID (0x0002)
+/* 32 bits, AHB bus R/W */
+#define ST90TDS_AHB_DPORT_REG_ID (0x0003)
+/* 16/32 bits */
+#define ST90TDS_SRAM_BASE_ADDR_REG_ID (0x0004)
+/* 32 bits, APB bus R/W */
+#define ST90TDS_SRAM_DPORT_REG_ID (0x0005)
+/* 32 bits, t_settle/general */
+#define ST90TDS_TSET_GEN_R_W_REG_ID (0x0006)
+/* 16 bits, Q mode read, no length */
+#define ST90TDS_FRAME_OUT_REG_ID (0x0007)
+#define ST90TDS_ADDR_ID_MAX (ST90TDS_FRAME_OUT_REG_ID)
+
+/* WBF - Control register bit set */
+/* next o/p length, bit 11 to 0 */
+#define ST90TDS_CONT_NEXT_LEN_MASK (0x0FFF)
+#define ST90TDS_CONT_WUP_BIT (BIT(12))
+#define ST90TDS_CONT_RDY_BIT (BIT(13))
+#define ST90TDS_CONT_IRQ_ENABLE (BIT(14))
+#define ST90TDS_CONT_RDY_ENABLE (BIT(15))
+#define ST90TDS_CONT_IRQ_RDY_ENABLE (BIT(14)|BIT(15))
+
+/* SPI Config register bit set */
+#define ST90TDS_CONFIG_FRAME_BIT (BIT(2))
+#define ST90TDS_CONFIG_WORD_MODE_BITS (BIT(3)|BIT(4))
+#define ST90TDS_CONFIG_WORD_MODE_1 (BIT(3))
+#define ST90TDS_CONFIG_WORD_MODE_2 (BIT(4))
+#define ST90TDS_CONFIG_ERROR_0_BIT (BIT(5))
+#define ST90TDS_CONFIG_ERROR_1_BIT (BIT(6))
+#define ST90TDS_CONFIG_ERROR_2_BIT (BIT(7))
+/* TBD: Sure??? */
+#define ST90TDS_CONFIG_CSN_FRAME_BIT (BIT(7))
+#define ST90TDS_CONFIG_ERROR_3_BIT (BIT(8))
+#define ST90TDS_CONFIG_ERROR_4_BIT (BIT(9))
+/* QueueM */
+#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10))
+/* AHB bus */
+#define ST90TDS_CONFIG_AHB_PFETCH_BIT (BIT(11))
+#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12))
+/* APB bus */
+#define ST90TDS_CONFIG_PFETCH_BIT (BIT(13))
+/* cpu reset */
+#define ST90TDS_CONFIG_CPU_RESET_BIT (BIT(14))
+#define ST90TDS_CONFIG_CLEAR_INT_BIT (BIT(15))
+
+/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */
+#define ST90TDS_CONF_IRQ_RDY_ENABLE (BIT(16)|BIT(17))
+
+int cw1200_data_read(struct cw1200_common *priv,
+ void *buf, size_t buf_len);
+int cw1200_data_write(struct cw1200_common *priv,
+ const void *buf, size_t buf_len);
+
+int cw1200_reg_read(struct cw1200_common *priv, u16 addr,
+ void *buf, size_t buf_len);
+int cw1200_reg_write(struct cw1200_common *priv, u16 addr,
+ const void *buf, size_t buf_len);
+
+static inline int cw1200_reg_read_16(struct cw1200_common *priv,
+ u16 addr, u16 *val)
+{
+ u32 bigVal;
+ int ret;
+ ret = cw1200_reg_read(priv, addr, &bigVal, sizeof(bigVal));
+ *val = (u16)bigVal;
+ return ret;
+}
+
+static inline int cw1200_reg_write_16(struct cw1200_common *priv,
+ u16 addr, u16 val)
+{
+ u32 bigVal = (u32)val;
+ return cw1200_reg_write(priv, addr, &bigVal, sizeof(bigVal));
+}
+
+static inline int cw1200_reg_read_32(struct cw1200_common *priv,
+ u16 addr, u32 *val)
+{
+ return cw1200_reg_read(priv, addr, val, sizeof(val));
+}
+
+static inline int cw1200_reg_write_32(struct cw1200_common *priv,
+ u16 addr, u32 val)
+{
+ return cw1200_reg_write(priv, addr, &val, sizeof(val));
+}
+
+int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
+ size_t buf_len, u32 prefetch, u16 port_addr);
+int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
+ size_t buf_len);
+
+static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr,
+ void *buf, size_t buf_len)
+{
+ return cw1200_indirect_read(priv, addr, buf, buf_len,
+ ST90TDS_CONFIG_PFETCH_BIT, ST90TDS_SRAM_DPORT_REG_ID);
+}
+
+static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr,
+ void *buf, size_t buf_len)
+{
+ return cw1200_indirect_read(priv, addr, buf, buf_len,
+ ST90TDS_CONFIG_AHB_PFETCH_BIT, ST90TDS_AHB_DPORT_REG_ID);
+}
+
+static inline int cw1200_apb_read_32(struct cw1200_common *priv,
+ u32 addr, u32 *val)
+{
+ return cw1200_apb_read(priv, addr, val, sizeof(val));
+}
+
+static inline int cw1200_apb_write_32(struct cw1200_common *priv,
+ u32 addr, u32 val)
+{
+ return cw1200_apb_write(priv, addr, &val, sizeof(val));
+}
+
+static inline int cw1200_ahb_read_32(struct cw1200_common *priv,
+ u32 addr, u32 *val)
+{
+ return cw1200_ahb_read(priv, addr, val, sizeof(val));
+}
+
+#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/staging/cw1200/itp.c b/drivers/staging/cw1200/itp.c
new file mode 100644
index 00000000000..eb7e53bf096
--- /dev/null
+++ b/drivers/staging/cw1200/itp.c
@@ -0,0 +1,739 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ * ITP code
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <linux/kallsyms.h>
+#include <net/mac80211.h>
+#include "cw1200.h"
+#include "debug.h"
+#include "itp.h"
+#include "sta.h"
+
+static int __cw1200_itp_open(struct cw1200_common *priv);
+static int __cw1200_itp_close(struct cw1200_common *priv);
+static void cw1200_itp_rx_start(struct cw1200_common *priv);
+static void cw1200_itp_rx_stop(struct cw1200_common *priv);
+static void cw1200_itp_rx_stats(struct cw1200_common *priv);
+static void cw1200_itp_rx_reset(struct cw1200_common *priv);
+static void cw1200_itp_tx_stop(struct cw1200_common *priv);
+static void cw1200_itp_handle(struct cw1200_common *priv,
+ struct sk_buff *skb);
+static void cw1200_itp_err(struct cw1200_common *priv,
+ int err,
+ int arg);
+static void __cw1200_itp_tx_stop(struct cw1200_common *priv);
+
+static ssize_t cw1200_itp_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ struct cw1200_itp *itp = &priv->debug->itp;
+ struct sk_buff *skb;
+ int ret;
+
+ if (skb_queue_empty(&itp->log_queue))
+ return 0;
+
+ skb = skb_dequeue(&itp->log_queue);
+ ret = copy_to_user(user_buf, skb->data, skb->len);
+ *ppos += skb->len;
+ skb->data[skb->len] = 0;
+ itp_printk(KERN_DEBUG "[ITP] >>> %s", skb->data);
+ consume_skb(skb);
+
+ return skb->len - ret;
+}
+
+static ssize_t cw1200_itp_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct cw1200_common *priv = file->private_data;
+ struct sk_buff *skb;
+
+ if (!count || count > 1024)
+ return -EINVAL;
+ skb = dev_alloc_skb(count + 1);
+ if (!skb)
+ return -ENOMEM;
+ skb_trim(skb, 0);
+ skb_put(skb, count + 1);
+ if (copy_from_user(skb->data, user_buf, count)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+ skb->data[count] = 0;
+
+ cw1200_itp_handle(priv, skb);
+ consume_skb(skb);
+ return count;
+}
+
+static unsigned int cw1200_itp_poll(struct file *file, poll_table *wait)
+{
+ struct cw1200_common *priv = file->private_data;
+ struct cw1200_itp *itp = &priv->debug->itp;
+ unsigned int mask = 0;
+
+ poll_wait(file, &itp->read_wait, wait);
+
+ if (!skb_queue_empty(&itp->log_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ mask |= POLLOUT | POLLWRNORM;
+
+ return mask;
+}
+
+static int cw1200_itp_open(struct inode *inode, struct file *file)
+{
+ struct cw1200_common *priv = inode->i_private;
+ struct cw1200_itp *itp = &priv->debug->itp;
+ int ret = 0;
+
+ file->private_data = priv;
+ if (atomic_inc_return(&itp->open_count) == 1) {
+ ret = __cw1200_itp_open(priv);
+ if (ret && !atomic_dec_return(&itp->open_count))
+ __cw1200_itp_close(priv);
+ } else {
+ atomic_dec(&itp->open_count);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static int cw1200_itp_close(struct inode *inode, struct file *file)
+{
+ struct cw1200_common *priv = file->private_data;
+ struct cw1200_itp *itp = &priv->debug->itp;
+ if (!atomic_dec_return(&itp->open_count)) {
+ __cw1200_itp_close(priv);
+ wake_up(&itp->close_wait);
+ }
+ return 0;
+}
+
+static const struct file_operations fops_itp = {
+ .open = cw1200_itp_open,
+ .read = cw1200_itp_read,
+ .write = cw1200_itp_write,
+ .poll = cw1200_itp_poll,
+ .release = cw1200_itp_close,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static void cw1200_itp_fill_pattern(u8 *data, int size,
+ enum cw1200_itp_data_modes mode)
+{
+ u8 *p = data;
+
+ if (size <= 0)
+ return;
+
+ switch (mode) {
+ default:
+ case ITP_DATA_ZEROS:
+ memset(data, 0x0, size);
+ break;
+ case ITP_DATA_ONES:
+ memset(data, 0xff, size);
+ break;
+ case ITP_DATA_ZERONES:
+ memset(data, 0x55, size);
+ break;
+ case ITP_DATA_RANDOM:
+ while (p < data+size-sizeof(u32)) {
+ (*(u32 *)p) = random32();
+ p += sizeof(u32);
+ }
+ while (p < data+size) {
+ (*p) = random32() & 0xFF;
+ p++;
+ }
+ break;
+ }
+ return;
+}
+
+static void cw1200_itp_tx_work(struct work_struct *work)
+{
+ struct cw1200_itp *itp = container_of(work, struct cw1200_itp,
+ tx_work.work);
+ struct cw1200_common *priv = itp->priv;
+ atomic_set(&priv->bh_tx, 1);
+ wake_up(&priv->bh_wq);
+}
+
+static void cw1200_itp_tx_finish(struct work_struct *work)
+{
+ struct cw1200_itp *itp = container_of(work, struct cw1200_itp,
+ tx_finish.work);
+ __cw1200_itp_tx_stop(itp->priv);
+}
+
+int cw1200_itp_init(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+
+ itp->priv = priv;
+ atomic_set(&itp->open_count, 0);
+ atomic_set(&itp->stop_tx, 0);
+ atomic_set(&itp->awaiting_confirm, 0);
+ skb_queue_head_init(&itp->log_queue);
+ spin_lock_init(&itp->tx_lock);
+ init_waitqueue_head(&itp->read_wait);
+ init_waitqueue_head(&itp->write_wait);
+ init_waitqueue_head(&itp->close_wait);
+ INIT_DELAYED_WORK(&itp->tx_work, cw1200_itp_tx_work);
+ INIT_DELAYED_WORK(&itp->tx_finish, cw1200_itp_tx_finish);
+ itp->data = NULL;
+ itp->hdr_len = WSM_TX_EXTRA_HEADROOM +
+ sizeof(struct ieee80211_hdr_3addr);
+
+ if (!debugfs_create_file("itp", S_IRUSR | S_IWUSR,
+ priv->debug->debugfs_phy, priv, &fops_itp))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void cw1200_itp_release(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+
+ wait_event_interruptible(itp->close_wait,
+ !atomic_read(&itp->open_count));
+
+ WARN_ON(atomic_read(&itp->open_count));
+
+ skb_queue_purge(&itp->log_queue);
+ cw1200_itp_tx_stop(priv);
+}
+
+static int __cw1200_itp_open(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+
+ if (!priv->vif)
+ return -EINVAL;
+ if (priv->join_status)
+ return -EINVAL;
+ itp->saved_channel = priv->channel;
+ if (!priv->channel)
+ priv->channel = &priv->hw->
+ wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+ wsm_set_bssid_filtering(priv, false);
+ cw1200_itp_rx_reset(priv);
+ return 0;
+}
+
+static int __cw1200_itp_close(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ if (atomic_read(&itp->test_mode) == TEST_MODE_RX_TEST)
+ cw1200_itp_rx_stop(priv);
+ cw1200_itp_tx_stop(priv);
+ cw1200_disable_listening(priv);
+ cw1200_update_filtering(priv);
+ priv->channel = itp->saved_channel;
+ return 0;
+}
+
+bool cw1200_is_itp(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ return atomic_read(&itp->open_count) != 0;
+}
+
+static void cw1200_itp_rx_reset(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ itp->rx_cnt = 0;
+ itp->rx_rssi = 0;
+ itp->rx_rssi_max = -1000;
+ itp->rx_rssi_min = 1000;
+}
+
+static void cw1200_itp_rx_start(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+
+ itp_printk(KERN_DEBUG "[ITP] RX start, band = %d, ch = %d\n",
+ itp->band, itp->ch);
+ atomic_set(&itp->test_mode, TEST_MODE_RX_TEST);
+ cw1200_update_listening(priv, false);
+ priv->channel = &priv->hw->
+ wiphy->bands[itp->band]->channels[itp->ch];
+ cw1200_update_listening(priv, true);
+ wsm_set_bssid_filtering(priv, false);
+}
+
+static void cw1200_itp_rx_stop(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ itp_printk(KERN_DEBUG "[ITP] RX stop\n");
+ atomic_set(&itp->test_mode, TEST_MODE_NO_TEST);
+ cw1200_itp_rx_reset(priv);
+}
+
+static void cw1200_itp_rx_stats(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ struct sk_buff *skb;
+ char buf[128];
+ int len, ret;
+ struct wsm_counters_table counters;
+
+ ret = wsm_get_counters_table(priv, &counters);
+
+ if (ret)
+ cw1200_itp_err(priv, -EBUSY, 20);
+
+ if (!itp->rx_cnt)
+ len = snprintf(buf, sizeof(buf), "1,0,0,0,0,%d\n",
+ counters.countRxPacketErrors);
+ else
+ len = snprintf(buf, sizeof(buf), "1,%d,%ld,%d,%d,%d\n",
+ itp->rx_cnt,
+ itp->rx_cnt ? itp->rx_rssi / itp->rx_cnt : 0,
+ itp->rx_rssi_min, itp->rx_rssi_max,
+ counters.countRxPacketErrors);
+
+ if (len <= 0) {
+ cw1200_itp_err(priv, -EBUSY, 21);
+ return;
+ }
+
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ cw1200_itp_err(priv, -ENOMEM, 22);
+ return;
+ }
+
+ itp->rx_cnt = 0;
+ itp->rx_rssi = 0;
+ itp->rx_rssi_max = -1000;
+ itp->rx_rssi_min = 1000;
+
+ skb_trim(skb, 0);
+ skb_put(skb, len);
+
+ memcpy(skb->data, buf, len);
+ skb_queue_tail(&itp->log_queue, skb);
+ wake_up(&itp->read_wait);
+}
+
+static void cw1200_itp_tx_start(struct cw1200_common *priv)
+{
+ struct wsm_tx *tx;
+ struct ieee80211_hdr_3addr *hdr;
+ struct cw1200_itp *itp = &priv->debug->itp;
+ struct wsm_association_mode assoc_mode = {
+ .flags = WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE,
+ .preambleType = itp->preamble,
+ };
+ int len;
+ u8 da_addr[6] = ITP_DEFAULT_DA_ADDR;
+
+ /* Rates index 4 and 5 are not supported */
+ if (itp->rate > 3)
+ itp->rate += 2;
+
+ itp_printk(KERN_DEBUG "[ITP] TX start: band = %d, ch = %d, rate = %d,"
+ " preamble = %d, number = %d, data_mode = %d,"
+ " interval = %d, power = %d, data_len = %d\n",
+ itp->band, itp->ch, itp->rate, itp->preamble,
+ itp->number, itp->data_mode, itp->interval_us,
+ itp->power, itp->data_len);
+
+ len = itp->hdr_len + itp->data_len;
+
+ itp->data = kmalloc(len, GFP_KERNEL);
+ tx = (struct wsm_tx *)itp->data;
+ tx->hdr.len = itp->data_len + itp->hdr_len;
+ tx->hdr.id = __cpu_to_le16(0x0004 | 1 << 6);
+ tx->maxTxRate = itp->rate;
+ tx->queueId = 3;
+ tx->more = 0;
+ tx->flags = 0xc;
+ tx->packetID = 0x55ff55;
+ tx->reserved = 0;
+ tx->expireTime = 1;
+
+ if (itp->preamble == ITP_PREAMBLE_GREENFIELD)
+ tx->htTxParameters = WSM_HT_TX_GREENFIELD;
+ else if (itp->preamble == ITP_PREAMBLE_MIXED)
+ tx->htTxParameters = WSM_HT_TX_MIXED;
+
+ hdr = (struct ieee80211_hdr_3addr *)&itp->data[sizeof(struct wsm_tx)];
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_FCTL_TODS);
+ memcpy(hdr->addr1, da_addr, ETH_ALEN);
+ memcpy(hdr->addr2, priv->vif->addr, ETH_ALEN);
+ memcpy(hdr->addr3, da_addr, ETH_ALEN);
+
+ cw1200_itp_fill_pattern(&itp->data[itp->hdr_len],
+ itp->data_len, itp->data_mode);
+
+ cw1200_update_listening(priv, false);
+ priv->channel = &priv->hw->
+ wiphy->bands[itp->band]->channels[itp->ch];
+ WARN_ON(wsm_set_output_power(priv, itp->power));
+ if (itp->preamble == ITP_PREAMBLE_SHORT ||
+ itp->preamble == ITP_PREAMBLE_LONG)
+ WARN_ON(wsm_set_association_mode(priv,
+ &assoc_mode));
+ wsm_set_bssid_filtering(priv, false);
+ cw1200_update_listening(priv, true);
+
+ spin_lock_bh(&itp->tx_lock);
+ atomic_set(&itp->test_mode, TEST_MODE_TX_TEST);
+ atomic_set(&itp->awaiting_confirm, 0);
+ atomic_set(&itp->stop_tx, 0);
+ atomic_set(&priv->bh_tx, 1);
+ ktime_get_ts(&itp->last_sent);
+ wake_up(&priv->bh_wq);
+ spin_unlock_bh(&itp->tx_lock);
+}
+
+void __cw1200_itp_tx_stop(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ spin_lock_bh(&itp->tx_lock);
+ kfree(itp->data);
+ itp->data = NULL;
+ atomic_set(&itp->test_mode, TEST_MODE_NO_TEST);
+ spin_unlock_bh(&itp->tx_lock);
+}
+
+static void cw1200_itp_tx_stop(struct cw1200_common *priv)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ itp_printk(KERN_DEBUG "[ITP] TX stop\n");
+ atomic_set(&itp->stop_tx, 1);
+ flush_workqueue(priv->workqueue);
+
+ /* time for FW to confirm all tx requests */
+ msleep(500);
+
+ __cw1200_itp_tx_stop(priv);
+}
+
+static void cw1200_itp_get_version(struct cw1200_common *priv,
+ enum cw1200_itp_version_type type)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ struct sk_buff *skb;
+ char buf[ITP_BUF_SIZE];
+ size_t size = 0;
+ int len;
+ itp_printk(KERN_DEBUG "[ITP] print %s version\n", type == ITP_CHIP_ID ?
+ "chip" : "firmware");
+
+ len = snprintf(buf, ITP_BUF_SIZE, "2,");
+ if (len <= 0) {
+ cw1200_itp_err(priv, -EINVAL, 40);
+ return;
+ }
+ size += len;
+
+ switch (type) {
+ case ITP_CHIP_ID:
+ len = cw1200_print_fw_version(priv, buf+size,
+ ITP_BUF_SIZE - size);
+
+ if (len <= 0) {
+ cw1200_itp_err(priv, -EINVAL, 41);
+ return;
+ }
+ size += len;
+ break;
+ case ITP_FW_VER:
+ len = snprintf(buf+size, ITP_BUF_SIZE - size,
+ "%d.%d", priv->wsm_caps.hardwareId,
+ priv->wsm_caps.hardwareSubId);
+ if (len <= 0) {
+ cw1200_itp_err(priv, -EINVAL, 42);
+ return;
+ }
+ size += len;
+ break;
+ default:
+ cw1200_itp_err(priv, -EINVAL, 43);
+ break;
+ }
+
+ len = snprintf(buf+size, ITP_BUF_SIZE-size, "\n");
+ if (len <= 0) {
+ cw1200_itp_err(priv, -EINVAL, 44);
+ return;
+ }
+ size += len;
+
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ cw1200_itp_err(priv, -ENOMEM, 45);
+ return;
+ }
+
+ skb_trim(skb, 0);
+ skb_put(skb, size);
+
+ memcpy(skb->data, buf, size);
+ skb_queue_tail(&itp->log_queue, skb);
+ wake_up(&itp->read_wait);
+}
+
+int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst)
+{
+ struct cw1200_itp *itp;
+ struct timespec now;
+ int time_left_us;
+
+ if (!priv->debug)
+ return 0;
+
+ itp = &priv->debug->itp;
+
+ if (!itp)
+ return 0;
+
+ spin_lock_bh(&itp->tx_lock);
+ if (atomic_read(&itp->test_mode) != TEST_MODE_TX_TEST)
+ goto out;
+
+ if (atomic_read(&itp->stop_tx))
+ goto out;
+
+ if (itp->number == 0) {
+ atomic_set(&itp->stop_tx, 1);
+ queue_delayed_work(priv->workqueue, &itp->tx_finish,
+ HZ/10);
+ goto out;
+ }
+
+ if (!itp->data)
+ goto out;
+
+ if (priv->hw_bufs_used >= 2) {
+ if (!atomic_read(&priv->bh_rx))
+ atomic_set(&priv->bh_rx, 1);
+ atomic_set(&priv->bh_tx, 1);
+ goto out;
+ }
+
+ ktime_get_ts(&now);
+ time_left_us = (itp->last_sent.tv_sec -
+ now.tv_sec)*1000000 +
+ (itp->last_sent.tv_nsec - now.tv_nsec)/1000
+ + itp->interval_us;
+
+ if (time_left_us > ITP_TIME_THRES_US) {
+ queue_delayed_work(priv->workqueue, &itp->tx_work,
+ ITP_US_TO_MS(time_left_us)*HZ/1000);
+ goto out;
+ }
+
+ if (time_left_us > 50)
+ udelay(time_left_us);
+
+ if (itp->number > 0)
+ itp->number--;
+
+ *data = itp->data;
+ *tx_len = itp->data_len + itp->hdr_len;
+
+ if (itp->data_mode == ITP_DATA_RANDOM)
+ cw1200_itp_fill_pattern(&itp->data[itp->hdr_len],
+ itp->data_len, itp->data_mode);
+ *burst = 2;
+ atomic_set(&priv->bh_tx, 1);
+ ktime_get_ts(&itp->last_sent);
+ atomic_add(1, &itp->awaiting_confirm);
+ spin_unlock_bh(&itp->tx_lock);
+ return 1;
+
+out:
+ spin_unlock_bh(&itp->tx_lock);
+ return 0;
+}
+
+bool cw1200_itp_rxed(struct cw1200_common *priv, struct sk_buff *skb)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ struct ieee80211_rx_status *rx = IEEE80211_SKB_RXCB(skb);
+ int signal;
+
+ if (atomic_read(&itp->test_mode) != TEST_MODE_RX_TEST)
+ return cw1200_is_itp(priv);
+ if (rx->freq != priv->channel->center_freq)
+ return true;
+
+ signal = rx->signal;
+ itp->rx_cnt++;
+ itp->rx_rssi += signal;
+ if (itp->rx_rssi_min > rx->signal)
+ itp->rx_rssi_min = rx->signal;
+ if (itp->rx_rssi_max < rx->signal)
+ itp->rx_rssi_max = rx->signal;
+
+ return true;
+}
+
+void cw1200_itp_wake_up_tx(struct cw1200_common *priv)
+{
+ wake_up(&priv->debug->itp.write_wait);
+}
+
+bool cw1200_itp_tx_running(struct cw1200_common *priv)
+{
+ if (atomic_read(&priv->debug->itp.awaiting_confirm) ||
+ atomic_read(&priv->debug->itp.test_mode) ==
+ TEST_MODE_TX_TEST) {
+ atomic_sub(1, &priv->debug->itp.awaiting_confirm);
+ return true;
+ }
+ return false;
+}
+
+static void cw1200_itp_handle(struct cw1200_common *priv,
+ struct sk_buff *skb)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ const struct wiphy *wiphy = priv->hw->wiphy;
+ int cmd;
+ int ret;
+
+ itp_printk(KERN_DEBUG "[ITP] <<< %s", skb->data);
+ if (sscanf(skb->data, "%d", &cmd) != 1) {
+ cw1200_itp_err(priv, -EINVAL, 1);
+ return;
+ }
+
+ switch (cmd) {
+ case 1: /* RX test */
+ if (atomic_read(&itp->test_mode)) {
+ cw1200_itp_err(priv, -EBUSY, 0);
+ return;
+ }
+ ret = sscanf(skb->data, "%d,%d,%d",
+ &cmd, &itp->band, &itp->ch);
+ if (ret != 3) {
+ cw1200_itp_err(priv, -EINVAL, ret + 1);
+ return;
+ }
+ if (itp->band >= 2)
+ cw1200_itp_err(priv, -EINVAL, 2);
+ else if (!wiphy->bands[itp->band])
+ cw1200_itp_err(priv, -EINVAL, 2);
+ else if (itp->ch >=
+ wiphy->bands[itp->band]->n_channels)
+ cw1200_itp_err(priv, -EINVAL, 3);
+ else {
+ cw1200_itp_rx_stats(priv);
+ cw1200_itp_rx_start(priv);
+ }
+ break;
+ case 2: /* RX stat */
+ cw1200_itp_rx_stats(priv);
+ break;
+ case 3: /* RX/TX stop */
+ if (atomic_read(&itp->test_mode) == TEST_MODE_RX_TEST) {
+ cw1200_itp_rx_stats(priv);
+ cw1200_itp_rx_stop(priv);
+ } else if (atomic_read(&itp->test_mode) == TEST_MODE_TX_TEST) {
+ cw1200_itp_tx_stop(priv);
+ } else
+ cw1200_itp_err(priv, -EBUSY, 0);
+ break;
+ case 4: /* TX start */
+ if (atomic_read(&itp->test_mode) != TEST_MODE_NO_TEST) {
+ cw1200_itp_err(priv, -EBUSY, 0);
+ return;
+ }
+ ret = sscanf(skb->data, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
+ &cmd, &itp->band, &itp->ch, &itp->rate,
+ &itp->preamble, &itp->number, &itp->data_mode,
+ &itp->interval_us, &itp->power, &itp->data_len);
+ if (ret != 10) {
+ cw1200_itp_err(priv, -EINVAL, ret + 1);
+ return;
+ }
+ if (itp->band >= 2)
+ cw1200_itp_err(priv, -EINVAL, 2);
+ else if (!wiphy->bands[itp->band])
+ cw1200_itp_err(priv, -EINVAL, 2);
+ else if (itp->ch >=
+ wiphy->bands[itp->band]->n_channels)
+ cw1200_itp_err(priv, -EINVAL, 3);
+ else if (itp->rate >= 20)
+ cw1200_itp_err(priv, -EINVAL, 4);
+ else if (itp->preamble >= ITP_PREAMBLE_MAX)
+ cw1200_itp_err(priv, -EINVAL, 5);
+ else if (itp->data_mode >= ITP_DATA_MAX_MODE)
+ cw1200_itp_err(priv, -EINVAL, 7);
+ else if (itp->data_len < ITP_MIN_DATA_SIZE ||
+ itp->data_len > priv->wsm_caps.sizeInpChBuf -
+ itp->hdr_len)
+ cw1200_itp_err(priv, -EINVAL, 8);
+ else {
+ cw1200_itp_tx_start(priv);
+ }
+ break;
+ case 5:
+ cw1200_itp_get_version(priv, ITP_CHIP_ID);
+ break;
+ case 6:
+ cw1200_itp_get_version(priv, ITP_FW_VER);
+ break;
+
+ }
+}
+
+static void cw1200_itp_err(struct cw1200_common *priv,
+ int err, int arg)
+{
+ struct cw1200_itp *itp = &priv->debug->itp;
+ struct sk_buff *skb;
+ static char buf[255];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%d,%d\n",
+ err, arg);
+ if (len <= 0)
+ return;
+
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return;
+
+ skb_trim(skb, 0);
+ skb_put(skb, len);
+
+ memcpy(skb->data, buf, len);
+ skb_queue_tail(&itp->log_queue, skb);
+ wake_up(&itp->read_wait);
+
+ len = sprint_symbol(buf,
+ (unsigned long)__builtin_return_address(0));
+ if (len <= 0)
+ return;
+ itp_printk(KERN_DEBUG "[ITP] error %d,%d from %s\n",
+ err, arg, buf);
+}
diff --git a/drivers/staging/cw1200/itp.h b/drivers/staging/cw1200/itp.h
new file mode 100644
index 00000000000..635e7f85ff9
--- /dev/null
+++ b/drivers/staging/cw1200/itp.h
@@ -0,0 +1,151 @@
+/*
+ * ITP code for ST-Ericsson CW1200 mac80211 driver
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_ITP_H_INCLUDED
+#define CW1200_ITP_H_INCLUDED
+
+struct cw200_common;
+struct wsm_tx_confirm;
+struct dentry;
+
+#ifdef CONFIG_CW1200_ITP
+
+/*extern*/ struct ieee80211_channel;
+
+#define TEST_MODE_NO_TEST (0)
+#define TEST_MODE_RX_TEST (1)
+#define TEST_MODE_TX_TEST (2)
+
+#define itp_printk(...) printk(__VA_ARGS__)
+#define ITP_DEFAULT_DA_ADDR {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+#define ITP_MIN_DATA_SIZE 6
+#define ITP_MAX_DATA_SIZE 1600
+#define ITP_TIME_THRES_US 10000
+#define ITP_US_TO_MS(x) ((x)/1000)
+#define ITP_MS_TO_US(x) ((x)*1000)
+#if ((ITP_US_TO_MS(ITP_TIME_THRES_US))*HZ/1000) < 1
+#warning not able to achieve non-busywaiting ITP_TIME_THRES_US\
+precision with current HZ value !
+#endif
+#define ITP_BUF_SIZE 255
+
+
+enum cw1200_itp_data_modes {
+ ITP_DATA_ZEROS,
+ ITP_DATA_ONES,
+ ITP_DATA_ZERONES,
+ ITP_DATA_RANDOM,
+ ITP_DATA_MAX_MODE,
+};
+
+enum cw1200_itp_version_type {
+ ITP_CHIP_ID,
+ ITP_FW_VER,
+};
+
+enum cw1200_itp_preamble_type {
+ ITP_PREAMBLE_LONG,
+ ITP_PREAMBLE_SHORT,
+ ITP_PREAMBLE_OFDM,
+ ITP_PREAMBLE_MIXED,
+ ITP_PREAMBLE_GREENFIELD,
+ ITP_PREAMBLE_MAX,
+};
+
+
+struct cw1200_itp {
+ struct cw1200_common *priv;
+ atomic_t open_count;
+ atomic_t awaiting_confirm;
+ struct sk_buff_head log_queue;
+ wait_queue_head_t read_wait;
+ wait_queue_head_t write_wait;
+ wait_queue_head_t close_wait;
+ struct ieee80211_channel *saved_channel;
+ atomic_t stop_tx;
+ struct delayed_work tx_work;
+ struct delayed_work tx_finish;
+ spinlock_t tx_lock;
+ struct timespec last_sent;
+ atomic_t test_mode;
+ int rx_cnt;
+ long rx_rssi;
+ int rx_rssi_max;
+ int rx_rssi_min;
+ unsigned band;
+ unsigned ch;
+ unsigned rate;
+ unsigned preamble;
+ unsigned int number;
+ unsigned data_mode;
+ int interval_us;
+ int power;
+ u8 *data;
+ int hdr_len;
+ int data_len;
+};
+
+int cw1200_itp_init(struct cw1200_common *priv);
+void cw1200_itp_release(struct cw1200_common *priv);
+
+bool cw1200_is_itp(struct cw1200_common *priv);
+bool cw1200_itp_rxed(struct cw1200_common *priv, struct sk_buff *skb);
+void cw1200_itp_wake_up_tx(struct cw1200_common *priv);
+int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst);
+bool cw1200_itp_tx_running(struct cw1200_common *priv);
+
+#else /* CONFIG_CW1200_ITP */
+
+static inline int
+cw1200_itp_init(struct cw1200_common *priv)
+{
+ return 0;
+}
+
+static inline void cw1200_itp_release(struct cw1200_common *priv)
+{
+}
+
+static inline bool cw1200_is_itp(struct cw1200_common *priv)
+{
+ return false;
+}
+
+static inline bool cw1200_itp_rxed(struct cw1200_common *priv,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+
+static inline void cw1200_itp_consume_txed(struct cw1200_common *priv)
+{
+}
+
+static inline void cw1200_itp_wake_up_tx(struct cw1200_common *priv)
+{
+}
+
+static inline int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst)
+{
+ return 0;
+}
+
+static inline bool cw1200_itp_tx_running(struct cw1200_common *priv)
+{
+ return false;
+}
+
+#endif /* CONFIG_CW1200_ITP */
+
+#endif /* CW1200_ITP_H_INCLUDED */
diff --git a/drivers/staging/cw1200/main.c b/drivers/staging/cw1200/main.c
new file mode 100644
index 00000000000..06434629dcd
--- /dev/null
+++ b/drivers/staging/cw1200/main.c
@@ -0,0 +1,567 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on:
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * Based on:
+ * - the islsm (softmac prism54) driver, which is:
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ * - stlc45xx driver
+ * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "cw1200.h"
+#include "txrx.h"
+#include "sbus.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bh.h"
+#include "sta.h"
+#include "ap.h"
+#include "scan.h"
+#include "debug.h"
+#include "pm.h"
+
+MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>");
+MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("cw1200_core");
+
+/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */
+static u8 cw1200_mac_template[ETH_ALEN] = {0x00, 0x80, 0xe1, 0x00, 0x00, 0x00};
+module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(macaddr, "MAC address");
+
+/* TODO: use rates and channels from the device */
+#define RATETAB_ENT(_rate, _rateid, _flags) \
+ { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate cw1200_rates[] = {
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, 0),
+ RATETAB_ENT(55, 2, 0),
+ RATETAB_ENT(110, 3, 0),
+ RATETAB_ENT(60, 6, 0),
+ RATETAB_ENT(90, 7, 0),
+ RATETAB_ENT(120, 8, 0),
+ RATETAB_ENT(180, 9, 0),
+ RATETAB_ENT(240, 10, 0),
+ RATETAB_ENT(360, 11, 0),
+ RATETAB_ENT(480, 12, 0),
+ RATETAB_ENT(540, 13, 0),
+};
+
+static struct ieee80211_rate cw1200_mcs_rates[] = {
+ RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS),
+ RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS),
+};
+
+#define cw1200_a_rates (cw1200_rates + 4)
+#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4)
+#define cw1200_g_rates (cw1200_rates + 0)
+#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates))
+#define cw1200_n_rates (cw1200_mcs_rates)
+#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates))
+
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel cw1200_2ghz_chantable[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+#ifdef CONFIG_CW1200_5GHZ_SUPPORT
+static struct ieee80211_channel cw1200_5ghz_chantable[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+#endif /* CONFIG_CW1200_5GHZ_SUPPORT */
+
+static struct ieee80211_supported_band cw1200_band_2ghz = {
+ .channels = cw1200_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable),
+ .bitrates = cw1200_g_rates,
+ .n_bitrates = cw1200_g_rates_size,
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
+ IEEE80211_HT_CAP_MAX_AMSDU,
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask[0] = 0xFF,
+ .rx_highest = __cpu_to_le16(0x41),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+#ifdef CONFIG_CW1200_5GHZ_SUPPORT
+static struct ieee80211_supported_band cw1200_band_5ghz = {
+ .channels = cw1200_5ghz_chantable,
+ .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable),
+ .bitrates = cw1200_a_rates,
+ .n_bitrates = cw1200_a_rates_size,
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
+ IEEE80211_HT_CAP_MAX_AMSDU,
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask[0] = 0xFF,
+ .rx_highest = __cpu_to_le16(0x41),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+#endif /* CONFIG_CW1200_5GHZ_SUPPORT */
+
+static const unsigned long cw1200_ttl[] = {
+ 1 * HZ, /* VO */
+ 2 * HZ, /* VI */
+ 5 * HZ, /* BE */
+ 10 * HZ /* BK */
+};
+
+static const struct ieee80211_ops cw1200_ops = {
+ .start = cw1200_start,
+ .stop = cw1200_stop,
+ .add_interface = cw1200_add_interface,
+ .remove_interface = cw1200_remove_interface,
+ .tx = cw1200_tx,
+ .hw_scan = cw1200_hw_scan,
+ .set_tim = cw1200_set_tim,
+ .sta_notify = cw1200_sta_notify,
+ .sta_add = cw1200_sta_add,
+ .sta_remove = cw1200_sta_remove,
+ .set_key = cw1200_set_key,
+ .set_rts_threshold = cw1200_set_rts_threshold,
+ .config = cw1200_config,
+ .bss_info_changed = cw1200_bss_info_changed,
+ .prepare_multicast = cw1200_prepare_multicast,
+ .configure_filter = cw1200_configure_filter,
+ .conf_tx = cw1200_conf_tx,
+ .get_stats = cw1200_get_stats,
+ .ampdu_action = cw1200_ampdu_action,
+ .flush = cw1200_flush,
+#ifdef CONFIG_PM
+ .suspend = cw1200_wow_suspend,
+ .resume = cw1200_wow_resume,
+#endif /* CONFIG_PM */
+ /* Intentionally not offloaded: */
+ /*.channel_switch = cw1200_channel_switch, */
+ /*.remain_on_channel = cw1200_remain_on_channel, */
+ /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */
+};
+
+struct ieee80211_hw *cw1200_init_common(size_t priv_data_len)
+{
+ int i;
+ struct ieee80211_hw *hw;
+ struct cw1200_common *priv;
+
+ hw = ieee80211_alloc_hw(priv_data_len, &cw1200_ops);
+ if (!hw)
+ return NULL;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->rates = cw1200_rates; /* TODO: fetch from FW */
+ priv->mcs_rates = cw1200_n_rates;
+ /* Enable block ACK for every TID but voice. */
+ priv->ba_tid_mask = 0x3F;
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
+ /* Aggregation is fully controlled by firmware.
+ * Do not need any support from the mac80211 stack */
+ /* IEEE80211_HW_AMPDU_AGGREGATION | */
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ IEEE80211_HW_SUPPORTS_P2P_PS |
+ IEEE80211_HW_SUPPORTS_CQM_BEACON_MISS |
+ IEEE80211_HW_SUPPORTS_CQM_TX_FAIL |
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+ IEEE80211_HW_BEACON_FILTER;
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ /* Support only for limited wowlan functionalities */
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY |
+ WIPHY_WOWLAN_DISCONNECT;
+ hw->wiphy->wowlan.n_patterns = 0;
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+
+#if defined(CONFIG_CW1200_DISABLE_BEACON_HINTS)
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_BEACON_HINTS;
+#endif
+
+ hw->channel_change_time = 1000; /* TODO: find actual value */
+ /* priv->beacon_req_id = cpu_to_le32(0); */
+ hw->queues = 4;
+ priv->noise = -94;
+
+ hw->max_rates = 8;
+ hw->max_rate_tries = 15;
+ hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM +
+ 8 /* TKIP IV */ +
+ 12 /* TKIP ICV and MIC */;
+
+ hw->sta_data_size = sizeof(struct cw1200_sta_priv);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+#ifdef CONFIG_CW1200_5GHZ_SUPPORT
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+#endif /* CONFIG_CW1200_5GHZ_SUPPORT */
+
+ hw->wiphy->max_scan_ssids = 2;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+
+ SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template);
+
+ if (hw->wiphy->perm_addr[3] == 0 &&
+ hw->wiphy->perm_addr[4] == 0 &&
+ hw->wiphy->perm_addr[5] == 0) {
+ get_random_bytes(&hw->wiphy->perm_addr[3], 3);
+ }
+
+ mutex_init(&priv->wsm_cmd_mux);
+ mutex_init(&priv->conf_mutex);
+ priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+ sema_init(&priv->scan.lock, 1);
+ INIT_WORK(&priv->scan.work, cw1200_scan_work);
+ INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
+ INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout);
+ INIT_WORK(&priv->join_work, cw1200_join_work);
+ INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout);
+ INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work);
+ INIT_WORK(&priv->offchannel_work, cw1200_offchannel_work);
+ INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work);
+ INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work);
+ spin_lock_init(&priv->event_queue_lock);
+ INIT_LIST_HEAD(&priv->event_queue);
+ INIT_WORK(&priv->event_handler, cw1200_event_handler);
+ INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work);
+ INIT_DELAYED_WORK(&priv->connection_loss_work,
+ cw1200_connection_loss_work);
+ spin_lock_init(&priv->bss_loss_lock);
+ INIT_WORK(&priv->tx_failure_work, cw1200_tx_failure_work);
+ spin_lock_init(&priv->ps_state_lock);
+ INIT_DELAYED_WORK(&priv->set_cts_work, cw1200_set_cts_work);
+ INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work);
+ INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work);
+ INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work);
+ INIT_WORK(&priv->link_id_work, cw1200_link_id_work);
+ INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work);
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset);
+#endif
+ INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
+ INIT_WORK(&priv->ba_work, cw1200_ba_work);
+ init_timer(&priv->mcast_timeout);
+ priv->mcast_timeout.data = (unsigned long)priv;
+ priv->mcast_timeout.function = cw1200_mcast_timeout;
+ spin_lock_init(&priv->ba_lock);
+ init_timer(&priv->ba_timer);
+ priv->ba_timer.data = (unsigned long)priv;
+ priv->ba_timer.function = cw1200_ba_timer;
+
+ if (unlikely(cw1200_queue_stats_init(&priv->tx_queue_stats,
+ CW1200_LINK_ID_MAX,
+ cw1200_skb_dtor,
+ priv))) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ if (unlikely(cw1200_queue_init(&priv->tx_queue[i],
+ &priv->tx_queue_stats, i, 16,
+ cw1200_ttl[i]))) {
+ for (; i > 0; i--)
+ cw1200_queue_deinit(&priv->tx_queue[i - 1]);
+ cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+ }
+
+ init_waitqueue_head(&priv->channel_switch_done);
+ init_waitqueue_head(&priv->wsm_cmd_wq);
+ init_waitqueue_head(&priv->wsm_startup_done);
+ wsm_buf_init(&priv->wsm_cmd_buf);
+ spin_lock_init(&priv->wsm_cmd.lock);
+ tx_policy_init(priv);
+#if defined(CONFIG_CW1200_WSM_DUMPS_SHORT)
+ priv->wsm_dump_max_size = 20;
+#endif /* CONFIG_CW1200_WSM_DUMPS_SHORT */
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(cw1200_init_common);
+
+int cw1200_register_common(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ int err;
+
+ err = cw1200_pm_init(&priv->pm_state, priv);
+ if (err) {
+ cw1200_dbg(CW1200_DBG_ERROR, "Cannot init PM. (%d).\n",
+ err);
+ return err;
+ }
+
+ err = ieee80211_register_hw(dev);
+ if (err) {
+ cw1200_dbg(CW1200_DBG_ERROR, "Cannot register device (%d).\n",
+ err);
+ cw1200_pm_deinit(&priv->pm_state);
+ return err;
+ }
+
+#ifdef CONFIG_CW1200_LEDS
+ err = cw1200_init_leds(priv);
+ if (err) {
+ cw1200_pm_deinit(&priv->pm_state);
+ ieee80211_unregister_hw(dev);
+ return err;
+ }
+#endif /* CONFIG_CW1200_LEDS */
+
+ cw1200_debug_init(priv);
+
+ cw1200_dbg(CW1200_DBG_MSG, "is registered as '%s'\n",
+ wiphy_name(dev->wiphy));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cw1200_register_common);
+
+void cw1200_free_common(struct ieee80211_hw *dev)
+{
+ /* struct cw1200_common *priv = dev->priv; */
+ /* unsigned int i; */
+
+ ieee80211_free_hw(dev);
+}
+EXPORT_SYMBOL_GPL(cw1200_free_common);
+
+void cw1200_unregister_common(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ int i;
+
+ ieee80211_unregister_hw(dev);
+
+ del_timer_sync(&priv->mcast_timeout);
+ del_timer_sync(&priv->ba_timer);
+
+ priv->sbus_ops->irq_unsubscribe(priv->sbus_priv);
+ cw1200_unregister_bh(priv);
+
+ cw1200_debug_release(priv);
+
+#ifdef CONFIG_CW1200_LEDS
+ cw1200_unregister_leds(priv);
+#endif /* CONFIG_CW1200_LEDS */
+
+ mutex_destroy(&priv->conf_mutex);
+
+ wsm_buf_deinit(&priv->wsm_cmd_buf);
+
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
+
+ if (priv->skb_cache) {
+ dev_kfree_skb(priv->skb_cache);
+ priv->skb_cache = NULL;
+ }
+
+ if (priv->sdd) {
+ release_firmware(priv->sdd);
+ priv->sdd = NULL;
+ }
+
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_deinit(&priv->tx_queue[i]);
+ cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+ cw1200_pm_deinit(&priv->pm_state);
+}
+EXPORT_SYMBOL_GPL(cw1200_unregister_common);
+
+int cw1200_core_probe(const struct sbus_ops *sbus_ops,
+ struct sbus_priv *sbus,
+ struct device *pdev,
+ struct cw1200_common **pself)
+{
+ int err = -ENOMEM;
+ struct ieee80211_hw *dev;
+ struct cw1200_common *priv;
+ struct wsm_operational_mode mode = {
+ .power_mode = wsm_power_mode_quiescent,
+ .disableMoreFlagUsage = true,
+ };
+
+ dev = cw1200_init_common(sizeof(struct cw1200_common));
+ if (!dev)
+ goto err;
+
+ priv = dev->priv;
+
+ priv->sbus_ops = sbus_ops;
+ priv->sbus_priv = sbus;
+ priv->pdev = pdev;
+ SET_IEEE80211_DEV(priv->hw, pdev);
+
+ /* WSM callbacks. */
+ priv->wsm_cbc.scan_complete = cw1200_scan_complete_cb;
+ priv->wsm_cbc.tx_confirm = cw1200_tx_confirm_cb;
+ priv->wsm_cbc.rx = cw1200_rx_cb;
+ priv->wsm_cbc.suspend_resume = cw1200_suspend_resume;
+ /* priv->wsm_cbc.set_pm_complete = cw1200_set_pm_complete_cb; */
+ priv->wsm_cbc.channel_switch = cw1200_channel_switch_cb;
+
+ err = cw1200_register_bh(priv);
+ if (err)
+ goto err1;
+
+ err = cw1200_load_firmware(priv);
+ if (err)
+ goto err2;
+ priv->sbus_ops->lock(priv->sbus_priv);
+ WARN_ON(priv->sbus_ops->set_block_size(priv->sbus_priv,
+ SDIO_BLOCK_SIZE));
+ priv->sbus_ops->unlock(priv->sbus_priv);
+
+ if (wait_event_interruptible_timeout(priv->wsm_startup_done,
+ priv->wsm_caps.firmwareReady, 3*HZ) <= 0) {
+ /* TODO: Needs to find how to reset device */
+ /* in QUEUE mode properly. */
+ goto err3;
+ }
+
+ /* Set low-power mode. */
+ WARN_ON(wsm_set_operational_mode(priv, &mode));
+
+ /* Enable multi-TX confirmation */
+ WARN_ON(wsm_use_multi_tx_conf(priv, true));
+
+ err = cw1200_register_common(dev);
+ if (err) {
+ priv->sbus_ops->irq_unsubscribe(priv->sbus_priv);
+ goto err3;
+ }
+
+ *pself = dev->priv;
+ return err;
+
+err3:
+ sbus_ops->reset(sbus);
+err2:
+ cw1200_unregister_bh(priv);
+err1:
+ cw1200_free_common(dev);
+err:
+ return err;
+}
+EXPORT_SYMBOL_GPL(cw1200_core_probe);
+
+void cw1200_core_release(struct cw1200_common *self)
+{
+ cw1200_unregister_common(self->hw);
+ cw1200_free_common(self->hw);
+ return;
+}
+EXPORT_SYMBOL_GPL(cw1200_core_release);
diff --git a/drivers/staging/cw1200/pm.c b/drivers/staging/cw1200/pm.c
new file mode 100644
index 00000000000..d95e5d3dc9e
--- /dev/null
+++ b/drivers/staging/cw1200/pm.c
@@ -0,0 +1,459 @@
+/*
+ * Mac80211 power management API for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include "cw1200.h"
+#include "pm.h"
+#include "sta.h"
+#include "bh.h"
+#include "sbus.h"
+
+#define CW1200_BEACON_SKIPPING_MULTIPLIER 3
+
+struct cw1200_udp_port_filter {
+ struct wsm_udp_port_filter_hdr hdr;
+ struct wsm_udp_port_filter dhcp;
+ struct wsm_udp_port_filter upnp;
+} __packed;
+
+struct cw1200_ether_type_filter {
+ struct wsm_ether_type_filter_hdr hdr;
+ struct wsm_ether_type_filter ip;
+ struct wsm_ether_type_filter pae;
+ struct wsm_ether_type_filter wapi;
+} __packed;
+
+static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = {
+ .hdr.nrFilters = 2,
+ .dhcp = {
+ .filterAction = WSM_FILTER_ACTION_FILTER_OUT,
+ .portType = WSM_FILTER_PORT_TYPE_DST,
+ .udpPort = __cpu_to_le16(67),
+ },
+ .upnp = {
+ .filterAction = WSM_FILTER_ACTION_FILTER_OUT,
+ .portType = WSM_FILTER_PORT_TYPE_DST,
+ .udpPort = __cpu_to_le16(1900),
+ },
+ /* Please add other known ports to be filtered out here and
+ * update nrFilters field in the header.
+ * Up to 4 filters are allowed. */
+};
+
+static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = {
+ .nrFilters = 0,
+};
+
+#ifndef ETH_P_WAPI
+#define ETH_P_WAPI 0x88B4
+#endif
+
+static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = {
+ .hdr.nrFilters = 3,
+ .ip = {
+ .filterAction = WSM_FILTER_ACTION_FILTER_IN,
+ .etherType = __cpu_to_le16(ETH_P_IP),
+ },
+ .pae = {
+ .filterAction = WSM_FILTER_ACTION_FILTER_IN,
+ .etherType = __cpu_to_le16(ETH_P_PAE),
+ },
+ .wapi = {
+ .filterAction = WSM_FILTER_ACTION_FILTER_IN,
+ .etherType = __cpu_to_le16(ETH_P_WAPI),
+ },
+ /* Please add other known ether types to be filtered out here and
+ * update nrFilters field in the header.
+ * Up to 4 filters are allowed. */
+};
+
+static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = {
+ .nrFilters = 0,
+};
+
+static int cw1200_suspend_late(struct device *dev);
+static void cw1200_pm_release(struct device *dev);
+static int cw1200_pm_probe(struct platform_device *pdev);
+
+/* private */
+struct cw1200_suspend_state {
+ unsigned long bss_loss_tmo;
+ unsigned long connection_loss_tmo;
+ unsigned long join_tmo;
+ unsigned long direct_probe;
+ unsigned long link_id_gc;
+ bool beacon_skipping;
+};
+
+static const struct dev_pm_ops cw1200_pm_ops = {
+ .suspend_noirq = cw1200_suspend_late,
+};
+
+static struct platform_driver cw1200_power_driver = {
+ .probe = cw1200_pm_probe,
+ .driver = {
+ .name = "cw1200_power",
+ .pm = &cw1200_pm_ops,
+ },
+};
+
+static int cw1200_pm_init_common(struct cw1200_pm_state *pm,
+ struct cw1200_common *priv)
+{
+ int ret;
+
+ spin_lock_init(&pm->lock);
+ ret = platform_driver_register(&cw1200_power_driver);
+ if (ret)
+ return ret;
+ pm->pm_dev = platform_device_alloc("cw1200_power", 0);
+ if (!pm->pm_dev) {
+ platform_driver_unregister(&cw1200_power_driver);
+ return -ENOMEM;
+ }
+
+ pm->pm_dev->dev.platform_data = priv;
+ ret = platform_device_add(pm->pm_dev);
+ if (ret) {
+ kfree(pm->pm_dev);
+ pm->pm_dev = NULL;
+ }
+
+ return ret;
+}
+
+static void cw1200_pm_deinit_common(struct cw1200_pm_state *pm)
+{
+ platform_driver_unregister(&cw1200_power_driver);
+ if (pm->pm_dev) {
+ pm->pm_dev->dev.platform_data = NULL;
+ platform_device_unregister(pm->pm_dev);
+ pm->pm_dev = NULL;
+ }
+}
+
+#ifdef CONFIG_WAKELOCK
+
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+ struct cw1200_common *priv)
+{
+ int ret = cw1200_pm_init_common(pm, priv);
+ if (!ret)
+ wake_lock_init(&pm->wakelock,
+ WAKE_LOCK_SUSPEND, "cw1200_wlan");
+ return ret;
+}
+
+void cw1200_pm_deinit(struct cw1200_pm_state *pm)
+{
+ if (wake_lock_active(&pm->wakelock))
+ wake_unlock(&pm->wakelock);
+ wake_lock_destroy(&pm->wakelock);
+ cw1200_pm_deinit_common(pm);
+}
+
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+ unsigned long tmo)
+{
+ long cur_tmo;
+ spin_lock_bh(&pm->lock);
+ cur_tmo = pm->wakelock.expires - jiffies;
+ if (!wake_lock_active(&pm->wakelock) ||
+ cur_tmo < (long)tmo)
+ wake_lock_timeout(&pm->wakelock, tmo);
+ spin_unlock_bh(&pm->lock);
+}
+
+#else /* CONFIG_WAKELOCK */
+
+static void cw1200_pm_stay_awake_tmo(unsigned long arg)
+{
+}
+
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+ struct cw1200_common *priv)
+{
+ int ret = cw1200_pm_init_common(pm, priv);
+ if (!ret) {
+ init_timer(&pm->stay_awake);
+ pm->stay_awake.data = (unsigned long)pm;
+ pm->stay_awake.function = cw1200_pm_stay_awake_tmo;
+ }
+ return ret;
+}
+
+void cw1200_pm_deinit(struct cw1200_pm_state *pm)
+{
+ del_timer_sync(&pm->stay_awake);
+ cw1200_pm_deinit_common(pm);
+}
+
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+ unsigned long tmo)
+{
+ long cur_tmo;
+ spin_lock_bh(&pm->lock);
+ cur_tmo = pm->stay_awake.expires - jiffies;
+ if (!timer_pending(&pm->stay_awake) ||
+ cur_tmo < (long)tmo)
+ mod_timer(&pm->stay_awake, jiffies + tmo);
+ spin_unlock_bh(&pm->lock);
+}
+
+#endif /* CONFIG_WAKELOCK */
+
+static long cw1200_suspend_work(struct delayed_work *work)
+{
+ int ret = cancel_delayed_work(work);
+ long tmo;
+ if (ret > 0) {
+ /* Timer is pending */
+ tmo = work->timer.expires - jiffies;
+ if (tmo < 0)
+ tmo = 0;
+ } else {
+ tmo = -1;
+ }
+ return tmo;
+}
+
+static int cw1200_resume_work(struct cw1200_common *priv,
+ struct delayed_work *work,
+ unsigned long tmo)
+{
+ if ((long)tmo < 0)
+ return 1;
+
+ return queue_delayed_work(priv->workqueue, work, tmo);
+}
+
+static int cw1200_suspend_late(struct device *dev)
+{
+ struct cw1200_common *priv = dev->platform_data;
+ if (atomic_read(&priv->bh_rx)) {
+ wiphy_dbg(priv->hw->wiphy,
+ "%s: Suspend interrupted.\n",
+ __func__);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static void cw1200_pm_release(struct device *dev)
+{
+}
+
+static int cw1200_pm_probe(struct platform_device *pdev)
+{
+ pdev->dev.release = cw1200_pm_release;
+ return 0;
+}
+
+int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_pm_state *pm_state = &priv->pm_state;
+ struct cw1200_suspend_state *state;
+ int ret;
+
+#ifndef CONFIG_WAKELOCK
+ spin_lock_bh(&pm_state->lock);
+ ret = timer_pending(&pm_state->stay_awake);
+ spin_unlock_bh(&pm_state->lock);
+ if (ret)
+ return -EAGAIN;
+#endif
+
+ /* Do not suspend when datapath is not idle */
+ if (priv->tx_queue_stats.num_queued)
+ return -EBUSY;
+
+ /* Make sure there is no configuration requests in progress. */
+ if (!mutex_trylock(&priv->conf_mutex))
+ return -EBUSY;
+
+ /* Ensure pending operations are done.
+ * Note also that wow_suspend must return in ~2.5sec, before
+ * watchdog is triggered. */
+ if (priv->channel_switch_in_progress)
+ goto revert1;
+
+ /* Do not suspend when join work is scheduled */
+ if (work_pending(&priv->join_work))
+ goto revert1;
+
+ /* Do not suspend when scanning */
+ if (down_trylock(&priv->scan.lock))
+ goto revert1;
+
+ /* Lock TX. */
+ wsm_lock_tx_async(priv);
+
+ /* Wait to avoid possible race with bh code.
+ * But do not wait too long... */
+ if (wait_event_timeout(priv->bh_evt_wq,
+ !priv->hw_bufs_used, HZ / 10) <= 0)
+ goto revert2;
+
+ /* Set UDP filter */
+ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr);
+
+ /* Set ethernet frame type filter */
+ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr);
+
+ /* Allocate state */
+ state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL);
+ if (!state)
+ goto revert3;
+
+ /* Store delayed work states. */
+ state->bss_loss_tmo =
+ cw1200_suspend_work(&priv->bss_loss_work);
+ state->connection_loss_tmo =
+ cw1200_suspend_work(&priv->connection_loss_work);
+ state->join_tmo =
+ cw1200_suspend_work(&priv->join_timeout);
+ state->direct_probe =
+ cw1200_suspend_work(&priv->scan.probe_work);
+ state->link_id_gc =
+ cw1200_suspend_work(&priv->link_id_gc_work);
+
+ /* Enable beacon skipping */
+ if (priv->join_status == CW1200_JOIN_STATUS_STA
+ && priv->join_dtim_period
+ && !priv->has_multicast_subscription) {
+ state->beacon_skipping = true;
+ wsm_set_beacon_wakeup_period(priv,
+ priv->join_dtim_period,
+ CW1200_BEACON_SKIPPING_MULTIPLIER *
+ priv->join_dtim_period);
+ }
+
+ /* Stop serving thread */
+ if (cw1200_bh_suspend(priv))
+ goto revert4;
+
+ ret = timer_pending(&priv->mcast_timeout);
+ if (ret)
+ goto revert5;
+
+ /* Cancel block ack stat timer */
+ del_timer_sync(&priv->ba_timer);
+
+ /* Store suspend state */
+ pm_state->suspend_state = state;
+
+ /* Enable IRQ wake */
+ ret = priv->sbus_ops->power_mgmt(priv->sbus_priv, true);
+ if (ret) {
+ wiphy_err(priv->hw->wiphy,
+ "%s: PM request failed: %d. WoW is disabled.\n",
+ __func__, ret);
+ cw1200_wow_resume(hw);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
+ return -EAGAIN;
+ }
+
+ return 0;
+
+revert5:
+ WARN_ON(cw1200_bh_resume(priv));
+revert4:
+ cw1200_resume_work(priv, &priv->bss_loss_work,
+ state->bss_loss_tmo);
+ cw1200_resume_work(priv, &priv->connection_loss_work,
+ state->connection_loss_tmo);
+ cw1200_resume_work(priv, &priv->join_timeout,
+ state->join_tmo);
+ cw1200_resume_work(priv, &priv->scan.probe_work,
+ state->direct_probe);
+ cw1200_resume_work(priv, &priv->link_id_gc_work,
+ state->link_id_gc);
+ kfree(state);
+revert3:
+ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
+ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
+revert2:
+ wsm_unlock_tx(priv);
+ up(&priv->scan.lock);
+revert1:
+ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+}
+
+int cw1200_wow_resume(struct ieee80211_hw *hw)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct cw1200_pm_state *pm_state = &priv->pm_state;
+ struct cw1200_suspend_state *state;
+
+ state = pm_state->suspend_state;
+ pm_state->suspend_state = NULL;
+
+ /* Disable IRQ wake */
+ priv->sbus_ops->power_mgmt(priv->sbus_priv, false);
+
+ /* Resume BH thread */
+ WARN_ON(cw1200_bh_resume(priv));
+
+ if (state->beacon_skipping) {
+ wsm_set_beacon_wakeup_period(priv, priv->beacon_int *
+ priv->join_dtim_period >
+ MAX_BEACON_SKIP_TIME_MS ? 1 :
+ priv->join_dtim_period, 0);
+ state->beacon_skipping = false;
+ }
+
+ /* Resume delayed work */
+ cw1200_resume_work(priv, &priv->bss_loss_work,
+ state->bss_loss_tmo);
+ cw1200_resume_work(priv, &priv->connection_loss_work,
+ state->connection_loss_tmo);
+ cw1200_resume_work(priv, &priv->join_timeout,
+ state->join_tmo);
+ cw1200_resume_work(priv, &priv->scan.probe_work,
+ state->direct_probe);
+ cw1200_resume_work(priv, &priv->link_id_gc_work,
+ state->link_id_gc);
+
+ /* Restart block ack stat */
+ spin_lock_bh(&priv->ba_lock);
+ if (priv->ba_cnt)
+ mod_timer(&priv->ba_timer,
+ jiffies + CW1200_BLOCK_ACK_INTERVAL);
+ spin_unlock_bh(&priv->ba_lock);
+
+ /* Remove UDP port filter */
+ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
+
+ /* Remove ethernet frame type filter */
+ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
+
+ /* Unlock datapath */
+ wsm_unlock_tx(priv);
+
+ /* Unlock scan */
+ up(&priv->scan.lock);
+
+ /* Unlock configuration mutex */
+ mutex_unlock(&priv->conf_mutex);
+
+ /* Free memory */
+ kfree(state);
+
+ return 0;
+}
diff --git a/drivers/staging/cw1200/pm.h b/drivers/staging/cw1200/pm.h
new file mode 100644
index 00000000000..0515f6cfb92
--- /dev/null
+++ b/drivers/staging/cw1200/pm.h
@@ -0,0 +1,49 @@
+/*
+ * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PM_H_INCLUDED
+#define PM_H_INCLUDED
+
+#ifdef CONFIG_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+/* ******************************************************************** */
+/* mac80211 API */
+
+#ifdef CONFIG_PM
+
+/* extern */ struct cw1200_common;
+/* private */ struct cw1200_suspend_state;
+
+struct cw1200_pm_state {
+ struct cw1200_suspend_state *suspend_state;
+#ifdef CONFIG_WAKELOCK
+ struct wake_lock wakelock;
+#else
+ struct timer_list stay_awake;
+#endif
+ struct platform_device *pm_dev;
+ spinlock_t lock;
+};
+
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+ struct cw1200_common *priv);
+void cw1200_pm_deinit(struct cw1200_pm_state *pm);
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+ unsigned long tmo);
+int cw1200_wow_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int cw1200_wow_resume(struct ieee80211_hw *hw);
+
+#endif /* CONFIG_PM */
+
+#endif
diff --git a/drivers/staging/cw1200/queue.c b/drivers/staging/cw1200/queue.c
new file mode 100644
index 00000000000..014e6b2a8a4
--- /dev/null
+++ b/drivers/staging/cw1200/queue.c
@@ -0,0 +1,584 @@
+/*
+ * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/sched.h>
+#include "queue.h"
+#include "cw1200.h"
+#include "debug.h"
+
+/* private */ struct cw1200_queue_item
+{
+ struct list_head head;
+ struct sk_buff *skb;
+ u32 packetID;
+ unsigned long queue_timestamp;
+ unsigned long xmit_timestamp;
+ struct cw1200_txpriv txpriv;
+ u8 generation;
+};
+
+static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ if (queue->tx_locked_cnt++ == 0) {
+ txrx_printk(KERN_DEBUG "[TX] Queue %d is locked.\n",
+ queue->queue_id);
+ ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
+ }
+}
+
+static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ BUG_ON(!queue->tx_locked_cnt);
+ if (--queue->tx_locked_cnt == 0) {
+ txrx_printk(KERN_DEBUG "[TX] Queue %d is unlocked.\n",
+ queue->queue_id);
+ ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
+ }
+}
+
+static inline void cw1200_queue_parse_id(u32 packetID, u8 *queue_generation,
+ u8 *queue_id,
+ u8 *item_generation,
+ u8 *item_id)
+{
+ *item_id = (packetID >> 0) & 0xFF;
+ *item_generation = (packetID >> 8) & 0xFF;
+ *queue_id = (packetID >> 16) & 0xFF;
+ *queue_generation = (packetID >> 24) & 0xFF;
+}
+
+static inline u32 cw1200_queue_make_packet_id(u8 queue_generation, u8 queue_id,
+ u8 item_generation, u8 item_id)
+{
+ return ((u32)item_id << 0) |
+ ((u32)item_generation << 8) |
+ ((u32)queue_id << 16) |
+ ((u32)queue_generation << 24);
+}
+
+static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
+ struct list_head *gc_list)
+{
+ struct cw1200_queue_item *item;
+
+ while (!list_empty(gc_list)) {
+ item = list_first_entry(
+ gc_list, struct cw1200_queue_item, head);
+ list_del(&item->head);
+ stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
+ kfree(item);
+ }
+}
+
+static void cw1200_queue_register_post_gc(struct list_head *gc_list,
+ struct cw1200_queue_item *item)
+{
+ struct cw1200_queue_item *gc_item;
+ gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+ GFP_ATOMIC);
+ BUG_ON(!gc_item);
+ memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
+ list_add_tail(&gc_item->head, gc_list);
+}
+
+static void __cw1200_queue_gc(struct cw1200_queue *queue,
+ struct list_head *head,
+ bool unlock)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct cw1200_queue_item *item = NULL;
+ bool wakeup_stats = false;
+
+ while (!list_empty(&queue->queue)) {
+ item = list_first_entry(
+ &queue->queue, struct cw1200_queue_item, head);
+ if (jiffies - item->queue_timestamp < queue->ttl)
+ break;
+ --queue->num_queued;
+ --queue->link_map_cache[item->txpriv.link_id];
+ spin_lock_bh(&stats->lock);
+ --stats->num_queued;
+ if (!--stats->link_map_cache[item->txpriv.link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->lock);
+ cw1200_debug_tx_ttl(stats->priv);
+ cw1200_queue_register_post_gc(head, item);
+ item->skb = NULL;
+ list_move_tail(&item->head, &queue->free_pool);
+ }
+
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+
+ if (queue->overfull) {
+ if (queue->num_queued <= (queue->capacity >> 1)) {
+ queue->overfull = false;
+ if (unlock)
+ __cw1200_queue_unlock(queue);
+ } else {
+ unsigned long tmo = item->queue_timestamp + queue->ttl;
+ mod_timer(&queue->gc, tmo);
+ cw1200_pm_stay_awake(&stats->priv->pm_state,
+ tmo - jiffies);
+ }
+ }
+}
+
+static void cw1200_queue_gc(unsigned long arg)
+{
+ LIST_HEAD(list);
+ struct cw1200_queue *queue =
+ (struct cw1200_queue *)arg;
+
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_gc(queue, &list, true);
+ spin_unlock_bh(&queue->lock);
+ cw1200_queue_post_gc(queue->stats, &list);
+}
+
+int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
+ size_t map_capacity,
+ cw1200_queue_skb_dtor_t skb_dtor,
+ struct cw1200_common *priv)
+{
+ memset(stats, 0, sizeof(*stats));
+ stats->map_capacity = map_capacity;
+ stats->skb_dtor = skb_dtor;
+ stats->priv = priv;
+ spin_lock_init(&stats->lock);
+ init_waitqueue_head(&stats->wait_link_id_empty);
+
+ stats->link_map_cache = kzalloc(sizeof(int[map_capacity]),
+ GFP_KERNEL);
+ if (!stats->link_map_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int cw1200_queue_init(struct cw1200_queue *queue,
+ struct cw1200_queue_stats *stats,
+ u8 queue_id,
+ size_t capacity,
+ unsigned long ttl)
+{
+ size_t i;
+
+ memset(queue, 0, sizeof(*queue));
+ queue->stats = stats;
+ queue->capacity = capacity;
+ queue->queue_id = queue_id;
+ queue->ttl = ttl;
+ INIT_LIST_HEAD(&queue->queue);
+ INIT_LIST_HEAD(&queue->pending);
+ INIT_LIST_HEAD(&queue->free_pool);
+ spin_lock_init(&queue->lock);
+ init_timer(&queue->gc);
+ queue->gc.data = (unsigned long)queue;
+ queue->gc.function = cw1200_queue_gc;
+
+ queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
+ GFP_KERNEL);
+ if (!queue->pool)
+ return -ENOMEM;
+
+ queue->link_map_cache = kzalloc(sizeof(int[stats->map_capacity]),
+ GFP_KERNEL);
+ if (!queue->link_map_cache) {
+ kfree(queue->pool);
+ queue->pool = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < capacity; ++i)
+ list_add_tail(&queue->pool[i].head, &queue->free_pool);
+
+ return 0;
+}
+
+int cw1200_queue_clear(struct cw1200_queue *queue)
+{
+ int i;
+ LIST_HEAD(gc_list);
+ struct cw1200_queue_stats *stats = queue->stats;
+
+ spin_lock_bh(&queue->lock);
+ queue->generation++;
+ list_splice_tail_init(&queue->queue, &queue->pending);
+ while (!list_empty(&queue->pending)) {
+ struct cw1200_queue_item *item = list_first_entry(
+ &queue->pending, struct cw1200_queue_item, head);
+ WARN_ON(!item->skb);
+ cw1200_queue_register_post_gc(&gc_list, item);
+ item->skb = NULL;
+ list_move_tail(&item->head, &queue->free_pool);
+ }
+ queue->num_queued = 0;
+ queue->num_pending = 0;
+
+ spin_lock_bh(&stats->lock);
+ for (i = 0; i < stats->map_capacity; ++i) {
+ stats->num_queued -= queue->link_map_cache[i];
+ stats->link_map_cache[i] -= queue->link_map_cache[i];
+ queue->link_map_cache[i] = 0;
+ }
+ spin_unlock_bh(&stats->lock);
+ if (unlikely(queue->overfull)) {
+ queue->overfull = false;
+ __cw1200_queue_unlock(queue);
+ }
+ spin_unlock_bh(&queue->lock);
+ wake_up(&stats->wait_link_id_empty);
+ cw1200_queue_post_gc(stats, &gc_list);
+ return 0;
+}
+
+void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
+{
+ kfree(stats->link_map_cache);
+ stats->link_map_cache = NULL;
+}
+
+void cw1200_queue_deinit(struct cw1200_queue *queue)
+{
+ cw1200_queue_clear(queue);
+ del_timer_sync(&queue->gc);
+ INIT_LIST_HEAD(&queue->free_pool);
+ kfree(queue->pool);
+ kfree(queue->link_map_cache);
+ queue->pool = NULL;
+ queue->link_map_cache = NULL;
+ queue->capacity = 0;
+}
+
+size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
+ u32 link_id_map)
+{
+ size_t ret;
+ int i, bit;
+ size_t map_capacity = queue->stats->map_capacity;
+
+ if (!link_id_map)
+ return 0;
+
+ spin_lock_bh(&queue->lock);
+ if (likely(link_id_map == (u32) -1))
+ ret = queue->num_queued - queue->num_pending;
+ else {
+ ret = 0;
+ for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
+ if (link_id_map & bit)
+ ret += queue->link_map_cache[i];
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+int cw1200_queue_put(struct cw1200_queue *queue,
+ struct sk_buff *skb,
+ struct cw1200_txpriv *txpriv)
+{
+ int ret = 0;
+ LIST_HEAD(gc_list);
+ struct cw1200_queue_stats *stats = queue->stats;
+
+ if (txpriv->link_id >= queue->stats->map_capacity)
+ return -EINVAL;
+
+ spin_lock_bh(&queue->lock);
+ if (!WARN_ON(list_empty(&queue->free_pool))) {
+ struct cw1200_queue_item *item = list_first_entry(
+ &queue->free_pool, struct cw1200_queue_item, head);
+ BUG_ON(item->skb);
+
+ list_move_tail(&item->head, &queue->queue);
+ item->skb = skb;
+ item->txpriv = *txpriv;
+ item->generation = 0;
+ item->packetID = cw1200_queue_make_packet_id(
+ queue->generation, queue->queue_id,
+ item->generation, item - queue->pool);
+ item->queue_timestamp = jiffies;
+
+ ++queue->num_queued;
+ ++queue->link_map_cache[txpriv->link_id];
+
+ spin_lock_bh(&stats->lock);
+ ++stats->num_queued;
+ ++stats->link_map_cache[txpriv->link_id];
+ spin_unlock_bh(&stats->lock);
+
+ /*
+ * TX may happen in parallel sometimes.
+ * Leave extra queue slots so we don't overflow.
+ */
+ if (queue->overfull == false &&
+ queue->num_queued >=
+ (queue->capacity - (num_present_cpus() - 1))) {
+ queue->overfull = true;
+ __cw1200_queue_lock(queue);
+ mod_timer(&queue->gc, jiffies);
+ }
+ } else {
+ ret = -ENOENT;
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+int cw1200_queue_get(struct cw1200_queue *queue,
+ u32 link_id_map,
+ struct wsm_tx **tx,
+ struct ieee80211_tx_info **tx_info,
+ const struct cw1200_txpriv **txpriv)
+{
+ int ret = -ENOENT;
+ struct cw1200_queue_item *item;
+ struct cw1200_queue_stats *stats = queue->stats;
+ bool wakeup_stats = false;
+
+ spin_lock_bh(&queue->lock);
+ list_for_each_entry(item, &queue->queue, head) {
+ if (link_id_map & BIT(item->txpriv.link_id)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (!WARN_ON(ret)) {
+ *tx = (struct wsm_tx *)item->skb->data;
+ *tx_info = IEEE80211_SKB_CB(item->skb);
+ *txpriv = &item->txpriv;
+ (*tx)->packetID = __cpu_to_le32(item->packetID);
+ list_move_tail(&item->head, &queue->pending);
+ ++queue->num_pending;
+ --queue->link_map_cache[item->txpriv.link_id];
+ item->xmit_timestamp = jiffies;
+
+ spin_lock_bh(&stats->lock);
+ --stats->num_queued;
+ if (!--stats->link_map_cache[item->txpriv.link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->lock);
+ }
+ spin_unlock_bh(&queue->lock);
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+ return ret;
+}
+
+int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packetID)
+{
+ int ret = 0;
+ u8 queue_generation, queue_id, item_generation, item_id;
+ struct cw1200_queue_item *item;
+ struct cw1200_queue_stats *stats = queue->stats;
+
+ cw1200_queue_parse_id(packetID, &queue_generation, &queue_id,
+ &item_generation, &item_id);
+
+ item = &queue->pool[item_id];
+
+ spin_lock_bh(&queue->lock);
+ BUG_ON(queue_id != queue->queue_id);
+ if (unlikely(queue_generation != queue->generation)) {
+ ret = -ENOENT;
+ } else if (unlikely(item_id >= (unsigned) queue->capacity)) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ } else if (unlikely(item->generation != item_generation)) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ } else {
+ --queue->num_pending;
+ ++queue->link_map_cache[item->txpriv.link_id];
+
+ spin_lock_bh(&stats->lock);
+ ++stats->num_queued;
+ ++stats->link_map_cache[item->txpriv.link_id];
+ spin_unlock_bh(&stats->lock);
+
+ item->generation = ++item_generation;
+ item->packetID = cw1200_queue_make_packet_id(
+ queue_generation, queue_id, item_generation, item_id);
+ list_move(&item->head, &queue->queue);
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+int cw1200_queue_requeue_all(struct cw1200_queue *queue)
+{
+ struct cw1200_queue_stats *stats = queue->stats;
+ spin_lock_bh(&queue->lock);
+ while (!list_empty(&queue->pending)) {
+ struct cw1200_queue_item *item = list_entry(
+ queue->pending.prev, struct cw1200_queue_item, head);
+
+ --queue->num_pending;
+ ++queue->link_map_cache[item->txpriv.link_id];
+
+ spin_lock_bh(&stats->lock);
+ ++stats->num_queued;
+ ++stats->link_map_cache[item->txpriv.link_id];
+ spin_unlock_bh(&stats->lock);
+
+ ++item->generation;
+ item->packetID = cw1200_queue_make_packet_id(
+ queue->generation, queue->queue_id,
+ item->generation, item - queue->pool);
+ list_move(&item->head, &queue->queue);
+ }
+ spin_unlock_bh(&queue->lock);
+
+ return 0;
+}
+
+int cw1200_queue_remove(struct cw1200_queue *queue, u32 packetID)
+{
+ int ret = 0;
+ u8 queue_generation, queue_id, item_generation, item_id;
+ struct cw1200_queue_item *item;
+ struct cw1200_queue_stats *stats = queue->stats;
+ struct sk_buff *gc_skb = NULL;
+ struct cw1200_txpriv gc_txpriv;
+
+ cw1200_queue_parse_id(packetID, &queue_generation, &queue_id,
+ &item_generation, &item_id);
+
+ item = &queue->pool[item_id];
+
+ spin_lock_bh(&queue->lock);
+ BUG_ON(queue_id != queue->queue_id);
+ if (unlikely(queue_generation != queue->generation)) {
+ ret = -ENOENT;
+ } else if (unlikely(item_id >= (unsigned) queue->capacity)) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ } else if (unlikely(item->generation != item_generation)) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ } else {
+ gc_txpriv = item->txpriv;
+ gc_skb = item->skb;
+ item->skb = NULL;
+ --queue->num_pending;
+ --queue->num_queued;
+ ++queue->num_sent;
+ ++item->generation;
+ /* Do not use list_move_tail here, but list_move:
+ * try to utilize cache row.
+ */
+ list_move(&item->head, &queue->free_pool);
+
+ if (unlikely(queue->overfull) &&
+ (queue->num_queued <= (queue->capacity >> 1))) {
+ queue->overfull = false;
+ __cw1200_queue_unlock(queue);
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+
+ if (gc_skb)
+ stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
+
+ return ret;
+}
+
+int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packetID,
+ struct sk_buff **skb,
+ const struct cw1200_txpriv **txpriv)
+{
+ int ret = 0;
+ u8 queue_generation, queue_id, item_generation, item_id;
+ struct cw1200_queue_item *item;
+ cw1200_queue_parse_id(packetID, &queue_generation, &queue_id,
+ &item_generation, &item_id);
+
+ item = &queue->pool[item_id];
+
+ spin_lock_bh(&queue->lock);
+ BUG_ON(queue_id != queue->queue_id);
+ if (unlikely(queue_generation != queue->generation)) {
+ ret = -ENOENT;
+ } else if (unlikely(item_id >= (unsigned) queue->capacity)) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ } else if (unlikely(item->generation != item_generation)) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ } else {
+ *skb = item->skb;
+ *txpriv = &item->txpriv;
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+void cw1200_queue_lock(struct cw1200_queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_lock(queue);
+ spin_unlock_bh(&queue->lock);
+}
+
+void cw1200_queue_unlock(struct cw1200_queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+ __cw1200_queue_unlock(queue);
+ spin_unlock_bh(&queue->lock);
+}
+
+bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
+ unsigned long *timestamp)
+{
+ struct cw1200_queue_item *item;
+ bool ret;
+
+ spin_lock_bh(&queue->lock);
+ ret = !list_empty(&queue->pending);
+ if (ret) {
+ list_for_each_entry(item, &queue->pending, head) {
+ if (time_before(item->xmit_timestamp, *timestamp))
+ *timestamp = item->xmit_timestamp;
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+ return ret;
+}
+
+bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
+ u32 link_id_map)
+{
+ bool empty = true;
+
+ spin_lock_bh(&stats->lock);
+ if (link_id_map == (u32)-1)
+ empty = stats->num_queued == 0;
+ else {
+ int i;
+ for (i = 0; i < stats->map_capacity; ++i) {
+ if (link_id_map & BIT(i)) {
+ if (stats->link_map_cache[i]) {
+ empty = false;
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_bh(&stats->lock);
+
+ return empty;
+}
diff --git a/drivers/staging/cw1200/queue.h b/drivers/staging/cw1200/queue.h
new file mode 100644
index 00000000000..aa9a7f9444c
--- /dev/null
+++ b/drivers/staging/cw1200/queue.h
@@ -0,0 +1,116 @@
+/*
+ * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_QUEUE_H_INCLUDED
+#define CW1200_QUEUE_H_INCLUDED
+
+/* private */ struct cw1200_queue_item;
+
+/* extern */ struct sk_buff;
+/* extern */ struct wsm_tx;
+/* extern */ struct cw1200_common;
+/* extern */ struct ieee80211_tx_queue_stats;
+/* extern */ struct cw1200_txpriv;
+
+/* forward */ struct cw1200_queue_stats;
+
+typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv);
+
+struct cw1200_queue {
+ struct cw1200_queue_stats *stats;
+ size_t capacity;
+ size_t num_queued;
+ size_t num_pending;
+ size_t num_sent;
+ struct cw1200_queue_item *pool;
+ struct list_head queue;
+ struct list_head free_pool;
+ struct list_head pending;
+ int tx_locked_cnt;
+ int *link_map_cache;
+ bool overfull;
+ spinlock_t lock;
+ u8 queue_id;
+ u8 generation;
+ struct timer_list gc;
+ unsigned long ttl;
+};
+
+struct cw1200_queue_stats {
+ spinlock_t lock;
+ int *link_map_cache;
+ int num_queued;
+ size_t map_capacity;
+ wait_queue_head_t wait_link_id_empty;
+ cw1200_queue_skb_dtor_t skb_dtor;
+ struct cw1200_common *priv;
+};
+
+struct cw1200_txpriv {
+ u8 link_id;
+ u8 raw_link_id;
+ u8 tid;
+ u8 rate_id;
+ u8 offset;
+};
+
+int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
+ size_t map_capacity,
+ cw1200_queue_skb_dtor_t skb_dtor,
+ struct cw1200_common *priv);
+int cw1200_queue_init(struct cw1200_queue *queue,
+ struct cw1200_queue_stats *stats,
+ u8 queue_id,
+ size_t capacity,
+ unsigned long ttl);
+int cw1200_queue_clear(struct cw1200_queue *queue);
+void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats);
+void cw1200_queue_deinit(struct cw1200_queue *queue);
+
+size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
+ u32 link_id_map);
+int cw1200_queue_put(struct cw1200_queue *queue,
+ struct sk_buff *skb,
+ struct cw1200_txpriv *txpriv);
+int cw1200_queue_get(struct cw1200_queue *queue,
+ u32 link_id_map,
+ struct wsm_tx **tx,
+ struct ieee80211_tx_info **tx_info,
+ const struct cw1200_txpriv **txpriv);
+int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packetID);
+int cw1200_queue_requeue_all(struct cw1200_queue *queue);
+int cw1200_queue_remove(struct cw1200_queue *queue,
+ u32 packetID);
+int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packetID,
+ struct sk_buff **skb,
+ const struct cw1200_txpriv **txpriv);
+void cw1200_queue_lock(struct cw1200_queue *queue);
+void cw1200_queue_unlock(struct cw1200_queue *queue);
+bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
+ unsigned long *timestamp);
+
+
+bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
+ u32 link_id_map);
+
+static inline u8 cw1200_queue_get_queue_id(u32 packetID)
+{
+ return (packetID >> 16) & 0xFF;
+}
+
+static inline u8 cw1200_queue_get_generation(u32 packetID)
+{
+ return (packetID >> 8) & 0xFF;
+}
+
+#endif /* CW1200_QUEUE_H_INCLUDED */
diff --git a/drivers/staging/cw1200/sbus.h b/drivers/staging/cw1200/sbus.h
new file mode 100644
index 00000000000..49bd06d20e5
--- /dev/null
+++ b/drivers/staging/cw1200/sbus.h
@@ -0,0 +1,39 @@
+/*
+ * Common sbus abstraction layer interface for cw1200 wireless driver
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_SBUS_H
+#define CW1200_SBUS_H
+
+/*
+ * sbus priv forward definition.
+ * Implemented and instantiated in particular modules.
+ */
+struct sbus_priv;
+
+typedef void (*sbus_irq_handler)(void *priv);
+
+struct sbus_ops {
+ int (*sbus_memcpy_fromio)(struct sbus_priv *self, unsigned int addr,
+ void *dst, int count);
+ int (*sbus_memcpy_toio)(struct sbus_priv *self, unsigned int addr,
+ const void *src, int count);
+ void (*lock)(struct sbus_priv *self);
+ void (*unlock)(struct sbus_priv *self);
+ int (*irq_subscribe)(struct sbus_priv *self, sbus_irq_handler handler,
+ void *priv);
+ int (*irq_unsubscribe)(struct sbus_priv *self);
+ int (*reset)(struct sbus_priv *self);
+ size_t (*align_size)(struct sbus_priv *self, size_t size);
+ int (*power_mgmt)(struct sbus_priv *self, bool suspend);
+ int (*set_block_size)(struct sbus_priv *self, size_t size);
+};
+
+#endif /* CW1200_SBUS_H */
diff --git a/drivers/staging/cw1200/scan.c b/drivers/staging/cw1200/scan.c
new file mode 100644
index 00000000000..b12af9ded62
--- /dev/null
+++ b/drivers/staging/cw1200/scan.c
@@ -0,0 +1,446 @@
+/*
+ * Scan implementation for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include "cw1200.h"
+#include "scan.h"
+#include "sta.h"
+#include "pm.h"
+
+static void cw1200_scan_restart_delayed(struct cw1200_common *priv);
+
+static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
+{
+ int ret, i;
+ int tmo = 2000;
+
+ for (i = 0; i < scan->numOfChannels; ++i)
+ tmo += scan->ch[i].maxChannelTime + 10;
+
+ atomic_set(&priv->scan.in_progress, 1);
+ cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000);
+ queue_delayed_work(priv->workqueue, &priv->scan.timeout,
+ tmo * HZ / 1000);
+ ret = wsm_scan(priv, scan);
+ if (unlikely(ret)) {
+ atomic_set(&priv->scan.in_progress, 0);
+ cancel_delayed_work_sync(&priv->scan.timeout);
+ cw1200_scan_restart_delayed(priv);
+ }
+ return ret;
+}
+
+int cw1200_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct cw1200_common *priv = hw->priv;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
+ };
+ int i;
+
+ if (!priv->vif)
+ return -EINVAL;
+
+ /* Scan when P2P_GO corrupt firmware MiniAP mode */
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ return -EOPNOTSUPP;
+
+ if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
+ req->n_ssids = 0;
+
+ wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n",
+ req->n_ssids);
+
+ if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
+ return -EINVAL;
+
+ frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0,
+ req->ie, req->ie_len);
+ if (!frame.skb)
+ return -ENOMEM;
+
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+ if (frame.skb) {
+ int ret = wsm_set_template_frame(priv, &frame);
+ if (0 == ret) {
+ /*
+ * set empty probe response template in order
+ * to receive probe requests from firmware
+ */
+ frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE;
+ frame.disable = true;
+ ret = wsm_set_template_frame(priv, &frame);
+ }
+ if (ret) {
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+ dev_kfree_skb(frame.skb);
+ return ret;
+ }
+ }
+
+ wsm_lock_tx(priv);
+
+ BUG_ON(priv->scan.req);
+ priv->scan.req = req;
+ priv->scan.n_ssids = 0;
+ priv->scan.status = 0;
+ priv->scan.begin = &req->channels[0];
+ priv->scan.curr = priv->scan.begin;
+ priv->scan.end = &req->channels[req->n_channels];
+ priv->scan.output_power = priv->output_power;
+
+ for (i = 0; i < req->n_ssids; ++i) {
+ struct wsm_ssid *dst =
+ &priv->scan.ssids[priv->scan.n_ssids];
+ BUG_ON(req->ssids[i].ssid_len > sizeof(dst->ssid));
+ memcpy(&dst->ssid[0], req->ssids[i].ssid,
+ sizeof(dst->ssid));
+ dst->length = req->ssids[i].ssid_len;
+ ++priv->scan.n_ssids;
+ }
+
+ mutex_unlock(&priv->conf_mutex);
+
+ if (frame.skb)
+ dev_kfree_skb(frame.skb);
+ queue_work(priv->workqueue, &priv->scan.work);
+ return 0;
+}
+
+void cw1200_scan_work(struct work_struct *work)
+{
+ struct cw1200_common *priv = container_of(work, struct cw1200_common,
+ scan.work);
+ struct ieee80211_channel **it;
+ struct wsm_scan scan = {
+ .scanType = WSM_SCAN_TYPE_FOREGROUND,
+ .scanFlags = WSM_SCAN_FLAG_SPLIT_METHOD,
+ };
+ bool first_run = priv->scan.begin == priv->scan.curr &&
+ priv->scan.begin != priv->scan.end;
+ int i;
+
+ if (first_run) {
+ /* Firmware gets crazy if scan request is sent
+ * when STA is joined but not yet associated.
+ * Force unjoin in this case. */
+ if (cancel_delayed_work_sync(&priv->join_timeout) > 0)
+ cw1200_join_timeout(&priv->join_timeout.work);
+ }
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (first_run) {
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ !(priv->powersave_mode.pmMode & WSM_PSM_PS)) {
+ struct wsm_set_pm pm = priv->powersave_mode;
+ pm.pmMode = WSM_PSM_PS;
+ cw1200_set_pm(priv, &pm);
+ } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+ /* FW bug: driver has to restart p2p-dev mode
+ * after scan */
+ cw1200_disable_listening(priv);
+ }
+ }
+
+ if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
+ if (priv->scan.output_power != priv->output_power)
+ WARN_ON(wsm_set_output_power(priv,
+ priv->output_power * 10));
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ !(priv->powersave_mode.pmMode & WSM_PSM_PS))
+ cw1200_set_pm(priv, &priv->powersave_mode);
+
+ if (priv->scan.status < 0)
+ wiphy_dbg(priv->hw->wiphy,
+ "[SCAN] Scan failed (%d).\n",
+ priv->scan.status);
+ else if (priv->scan.req)
+ wiphy_dbg(priv->hw->wiphy,
+ "[SCAN] Scan completed.\n");
+ else
+ wiphy_dbg(priv->hw->wiphy,
+ "[SCAN] Scan canceled.\n");
+
+ priv->scan.req = NULL;
+ cw1200_scan_restart_delayed(priv);
+ wsm_unlock_tx(priv);
+ mutex_unlock(&priv->conf_mutex);
+ ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0);
+ up(&priv->scan.lock);
+ return;
+ } else {
+ struct ieee80211_channel *first = *priv->scan.curr;
+ for (it = priv->scan.curr + 1, i = 1;
+ it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS;
+ ++it, ++i) {
+ if ((*it)->band != first->band)
+ break;
+ if (((*it)->flags ^ first->flags) &
+ IEEE80211_CHAN_PASSIVE_SCAN)
+ break;
+ if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ (*it)->max_power != first->max_power)
+ break;
+ }
+ scan.band = first->band;
+
+ if (priv->scan.req->no_cck)
+ scan.maxTransmitRate = WSM_TRANSMIT_RATE_6;
+ else
+ scan.maxTransmitRate = WSM_TRANSMIT_RATE_1;
+ /* TODO: Is it optimal? */
+ scan.numOfProbeRequests =
+ (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
+ scan.numOfSSIDs = priv->scan.n_ssids;
+ scan.ssids = &priv->scan.ssids[0];
+ scan.numOfChannels = it - priv->scan.curr;
+ /* TODO: Is it optimal? */
+ scan.probeDelay = 100;
+ /* It is not stated in WSM specification, however
+ * FW team says that driver may not use FG scan
+ * when joined. */
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ scan.scanType = WSM_SCAN_TYPE_BACKGROUND;
+ scan.scanFlags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
+ }
+ scan.ch = kzalloc(
+ sizeof(struct wsm_scan_ch[it - priv->scan.curr]),
+ GFP_KERNEL);
+ if (!scan.ch) {
+ priv->scan.status = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < scan.numOfChannels; ++i) {
+ scan.ch[i].number = priv->scan.curr[i]->hw_value;
+ scan.ch[i].minChannelTime = 50;
+ scan.ch[i].maxChannelTime = 110;
+ }
+ if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ priv->scan.output_power != first->max_power) {
+ priv->scan.output_power = first->max_power;
+ WARN_ON(wsm_set_output_power(priv,
+ priv->scan.output_power * 10));
+ }
+ priv->scan.status = cw1200_scan_start(priv, &scan);
+ kfree(scan.ch);
+ if (priv->scan.status)
+ goto fail;
+ priv->scan.curr = it;
+ }
+ mutex_unlock(&priv->conf_mutex);
+ return;
+
+fail:
+ priv->scan.curr = priv->scan.end;
+ mutex_unlock(&priv->conf_mutex);
+ queue_work(priv->workqueue, &priv->scan.work);
+ return;
+}
+
+static void cw1200_scan_restart_delayed(struct cw1200_common *priv)
+{
+ if (priv->delayed_link_loss) {
+ int tmo = priv->cqm_beacon_loss_count;
+
+ if (priv->scan.direct_probe)
+ tmo = 0;
+
+ priv->delayed_link_loss = 0;
+ /* Restart beacon loss timer and requeue
+ BSS loss work. */
+ wiphy_dbg(priv->hw->wiphy,
+ "[CQM] Requeue BSS loss in %d "
+ "beacons.\n", tmo);
+ spin_lock(&priv->bss_loss_lock);
+ priv->bss_loss_status = CW1200_BSS_LOSS_NONE;
+ spin_unlock(&priv->bss_loss_lock);
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work,
+ tmo * HZ / 10);
+ }
+
+ /* FW bug: driver has to restart p2p-dev mode after scan. */
+ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+ cw1200_enable_listening(priv);
+ cw1200_update_filtering(priv);
+ }
+
+ if (priv->delayed_unjoin) {
+ priv->delayed_unjoin = false;
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+}
+
+static void cw1200_scan_complete(struct cw1200_common *priv)
+{
+ if (priv->scan.direct_probe) {
+ wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n");
+ cw1200_scan_restart_delayed(priv);
+ priv->scan.direct_probe = 0;
+ up(&priv->scan.lock);
+ wsm_unlock_tx(priv);
+ } else {
+ cw1200_scan_work(&priv->scan.work);
+ }
+}
+
+void cw1200_scan_complete_cb(struct cw1200_common *priv,
+ struct wsm_scan_complete *arg)
+{
+ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
+ /* STA is stopped. */
+ return;
+
+ if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
+ priv->scan.status = 1;
+ queue_delayed_work(priv->workqueue,
+ &priv->scan.timeout, 0);
+ }
+}
+
+void cw1200_scan_timeout(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, scan.timeout.work);
+ if (likely(atomic_xchg(&priv->scan.in_progress, 0))) {
+ if (priv->scan.status > 0)
+ priv->scan.status = 0;
+ else if (!priv->scan.status) {
+ wiphy_warn(priv->hw->wiphy,
+ "Timeout waiting for scan "
+ "complete notification.\n");
+ priv->scan.status = -ETIMEDOUT;
+ priv->scan.curr = priv->scan.end;
+ WARN_ON(wsm_stop_scan(priv));
+ }
+ cw1200_scan_complete(priv);
+ }
+}
+
+void cw1200_probe_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, scan.probe_work.work);
+ u8 queueId = cw1200_queue_get_queue_id(priv->pending_frame_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queueId];
+ const struct cw1200_txpriv *txpriv;
+ struct wsm_tx *wsm;
+ struct wsm_template_frame frame = {
+ .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
+ };
+ struct wsm_ssid ssids[1] = {{
+ .length = 0,
+ } };
+ struct wsm_scan_ch ch[1] = {{
+ .minChannelTime = 0,
+ .maxChannelTime = 10,
+ } };
+ struct wsm_scan scan = {
+ .scanType = WSM_SCAN_TYPE_FOREGROUND,
+ .numOfProbeRequests = 1,
+ .probeDelay = 0,
+ .numOfChannels = 1,
+ .ssids = ssids,
+ .ch = ch,
+ };
+ u8 *ies;
+ size_t ies_len;
+ int ret;
+
+ wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n");
+
+ BUG_ON(queueId >= 4);
+ BUG_ON(!priv->channel);
+
+ mutex_lock(&priv->conf_mutex);
+ if (unlikely(down_trylock(&priv->scan.lock))) {
+ /* Scan is already in progress. Requeue self. */
+ schedule();
+ queue_delayed_work(priv->workqueue,
+ &priv->scan.probe_work, HZ / 10);
+ mutex_unlock(&priv->conf_mutex);
+ return;
+ }
+
+ if (cw1200_queue_get_skb(queue, priv->pending_frame_id,
+ &frame.skb, &txpriv)) {
+ up(&priv->scan.lock);
+ mutex_unlock(&priv->conf_mutex);
+ wsm_unlock_tx(priv);
+ return;
+ }
+ wsm = (struct wsm_tx *)frame.skb->data;
+ scan.maxTransmitRate = wsm->maxTxRate;
+ scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ scan.scanType = WSM_SCAN_TYPE_BACKGROUND;
+ scan.scanFlags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
+ }
+ ch[0].number = priv->channel->hw_value;
+
+ skb_pull(frame.skb, txpriv->offset);
+
+ ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)];
+ ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr);
+
+ if (ies_len) {
+ u8 *ssidie =
+ (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len);
+ if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) {
+ u8 *nextie = &ssidie[2 + ssidie[1]];
+ /* Remove SSID from the IE list. It has to be provided
+ * as a separate argument in cw1200_scan_start call */
+
+ /* Store SSID localy */
+ ssids[0].length = ssidie[1];
+ memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length);
+ scan.numOfSSIDs = 1;
+
+ /* Remove SSID from IE list */
+ ssidie[1] = 0;
+ memmove(&ssidie[2], nextie, &ies[ies_len] - nextie);
+ skb_trim(frame.skb, frame.skb->len - ssids[0].length);
+ }
+ }
+
+ /* FW bug: driver has to restart p2p-dev mode after scan */
+ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+ cw1200_disable_listening(priv);
+ ret = WARN_ON(wsm_set_template_frame(priv, &frame));
+ priv->scan.direct_probe = 1;
+ if (!ret) {
+ wsm_flush_tx(priv);
+ ret = WARN_ON(cw1200_scan_start(priv, &scan));
+ }
+ mutex_unlock(&priv->conf_mutex);
+
+ skb_push(frame.skb, txpriv->offset);
+ if (!ret)
+ IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK;
+ BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id));
+
+ if (ret) {
+ priv->scan.direct_probe = 0;
+ up(&priv->scan.lock);
+ wsm_unlock_tx(priv);
+ }
+
+ return;
+}
diff --git a/drivers/staging/cw1200/scan.h b/drivers/staging/cw1200/scan.h
new file mode 100644
index 00000000000..abffd1d796f
--- /dev/null
+++ b/drivers/staging/cw1200/scan.h
@@ -0,0 +1,54 @@
+/*
+ * Scan interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SCAN_H_INCLUDED
+#define SCAN_H_INCLUDED
+
+#include <linux/semaphore.h>
+#include "wsm.h"
+
+/* external */ struct sk_buff;
+/* external */ struct cfg80211_scan_request;
+/* external */ struct ieee80211_channel;
+/* external */ struct ieee80211_hw;
+/* external */ struct work_struct;
+
+struct cw1200_scan {
+ struct semaphore lock;
+ struct work_struct work;
+ struct delayed_work timeout;
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel **begin;
+ struct ieee80211_channel **curr;
+ struct ieee80211_channel **end;
+ struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS];
+ int output_power;
+ int n_ssids;
+ int status;
+ atomic_t in_progress;
+ /* Direct probe requests workaround */
+ struct delayed_work probe_work;
+ int direct_probe;
+};
+
+int cw1200_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void cw1200_scan_work(struct work_struct *work);
+void cw1200_scan_timeout(struct work_struct *work);
+void cw1200_scan_complete_cb(struct cw1200_common *priv,
+ struct wsm_scan_complete *arg);
+
+/* ******************************************************************** */
+/* Raw probe requests TX workaround */
+void cw1200_probe_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/staging/cw1200/sta.c b/drivers/staging/cw1200/sta.c
new file mode 100644
index 00000000000..25d414c39f7
--- /dev/null
+++ b/drivers/staging/cw1200/sta.c
@@ -0,0 +1,1638 @@
+/*
+ * Mac80211 STA API for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+
+#include "cw1200.h"
+#include "sta.h"
+#include "ap.h"
+#include "fwio.h"
+#include "bh.h"
+#include "debug.h"
+
+#if defined(CONFIG_CW1200_STA_DEBUG)
+#define sta_printk(...) printk(__VA_ARGS__)
+#else
+#define sta_printk(...)
+#endif
+
+static inline void __cw1200_free_event_queue(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct cw1200_wsm_event *event =
+ list_first_entry(list, struct cw1200_wsm_event,
+ link);
+ list_del(&event->link);
+ kfree(event);
+ }
+}
+
+/* ******************************************************************** */
+/* STA API */
+
+int cw1200_start(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ int ret = 0;
+
+ mutex_lock(&priv->conf_mutex);
+
+ /* default EDCA */
+ WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false);
+ WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false);
+ WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false);
+ WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false);
+ ret = wsm_set_edca_params(priv, &priv->edca);
+ if (WARN_ON(ret))
+ goto out;
+
+ ret = cw1200_set_uapsd_param(priv, &priv->edca);
+ if (WARN_ON(ret))
+ goto out;
+
+ priv->setbssparams_done = false;
+
+ memset(priv->bssid, ~0, ETH_ALEN);
+ memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN);
+ priv->mode = NL80211_IFTYPE_MONITOR;
+ priv->softled_state = 0;
+ priv->wep_default_key_id = -1;
+
+ priv->cqm_link_loss_count = 60;
+ priv->cqm_beacon_loss_count = 20;
+
+ /* Temporary configuration - beacon filter table */
+ priv->bf_table.numOfIEs = __cpu_to_le32(2);
+ priv->bf_table.entry[0].ieId = WLAN_EID_VENDOR_SPECIFIC;
+ priv->bf_table.entry[0].actionFlags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+ WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+ WSM_BEACON_FILTER_IE_HAS_APPEARED;
+ priv->bf_table.entry[0].oui[0] = 0x50;
+ priv->bf_table.entry[0].oui[1] = 0x6F;
+ priv->bf_table.entry[0].oui[2] = 0x9A;
+ priv->bf_table.entry[1].ieId = WLAN_EID_ERP_INFO;
+ priv->bf_table.entry[1].actionFlags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+ WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+ WSM_BEACON_FILTER_IE_HAS_APPEARED;
+
+ priv->bf_control.enabled = 1;
+ ret = cw1200_setup_mac(priv);
+ if (WARN_ON(ret))
+ goto out;
+
+ /* err = cw1200_set_leds(priv); */
+
+out:
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+void cw1200_stop(struct ieee80211_hw *dev)
+{
+ struct cw1200_common *priv = dev->priv;
+ LIST_HEAD(list);
+ int i;
+
+ wsm_lock_tx(priv);
+
+ while (down_trylock(&priv->scan.lock)) {
+ /* Scan is in progress. Force it to stop. */
+ priv->scan.req = NULL;
+ schedule();
+ }
+ up(&priv->scan.lock);
+
+ cancel_delayed_work_sync(&priv->scan.probe_work);
+ cancel_delayed_work_sync(&priv->scan.timeout);
+ cancel_delayed_work_sync(&priv->join_timeout);
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ cancel_delayed_work_sync(&priv->connection_loss_work);
+ cancel_delayed_work_sync(&priv->link_id_gc_work);
+ flush_workqueue(priv->workqueue);
+ del_timer_sync(&priv->mcast_timeout);
+ del_timer_sync(&priv->ba_timer);
+
+ mutex_lock(&priv->conf_mutex);
+ priv->mode = NL80211_IFTYPE_UNSPECIFIED;
+ priv->listening = false;
+
+ priv->softled_state = 0;
+ /* cw1200_set_leds(priv); */
+
+ spin_lock(&priv->event_queue_lock);
+ list_splice_init(&priv->event_queue, &list);
+ spin_unlock(&priv->event_queue_lock);
+ __cw1200_free_event_queue(&list);
+
+ priv->delayed_link_loss = 0;
+
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+
+ for (i = 0; i < 4; i++)
+ cw1200_queue_clear(&priv->tx_queue[i]);
+
+ /* HACK! */
+ if (atomic_xchg(&priv->tx_lock, 1) != 1)
+ sta_printk(KERN_DEBUG "[STA] TX is force-unlocked "
+ "due to stop request.\n");
+
+ wsm_unlock_tx(priv);
+
+ mutex_unlock(&priv->conf_mutex);
+}
+
+int cw1200_add_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
+{
+ int ret;
+ struct cw1200_common *priv = dev->priv;
+ /* __le32 auto_calibration_mode = __cpu_to_le32(1); */
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (priv->mode != NL80211_IFTYPE_MONITOR) {
+ mutex_unlock(&priv->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ priv->mode = vif->type;
+ break;
+ default:
+ mutex_unlock(&priv->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ priv->vif = vif;
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
+
+ ret = WARN_ON(cw1200_setup_mac(priv));
+ /* Enable auto-calibration */
+ /* Exception in subsequent channel switch; disabled.
+ WARN_ON(wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE,
+ &auto_calibration_mode, sizeof(auto_calibration_mode)));
+ */
+
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+void cw1200_remove_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct wsm_reset reset = {
+ .reset_statistics = true,
+ };
+ int i;
+
+ mutex_lock(&priv->conf_mutex);
+ wsm_lock_tx(priv);
+ switch (priv->join_status) {
+ case CW1200_JOIN_STATUS_STA:
+ wsm_lock_tx(priv);
+ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ break;
+ case CW1200_JOIN_STATUS_AP:
+ for (i = 0; priv->link_id_map; ++i) {
+ if (priv->link_id_map & BIT(i)) {
+ reset.link_id = i;
+ wsm_reset(priv, &reset);
+ priv->link_id_map &= ~BIT(i);
+ }
+ }
+ memset(priv->link_id_db, 0,
+ sizeof(priv->link_id_db));
+ priv->sta_asleep_mask = 0;
+ priv->enable_beacon = false;
+ priv->tx_multicast = false;
+ priv->aid0_bit_set = false;
+ priv->buffered_multicasts = false;
+ priv->pspoll_mask = 0;
+ reset.link_id = 0;
+ wsm_reset(priv, &reset);
+ break;
+ case CW1200_JOIN_STATUS_MONITOR:
+ cw1200_update_listening(priv, false);
+ break;
+ default:
+ break;
+ }
+ priv->vif = NULL;
+ priv->mode = NL80211_IFTYPE_MONITOR;
+ memset(priv->mac_addr, 0, ETH_ALEN);
+ memset(priv->bssid, 0, ETH_ALEN);
+ cw1200_free_keys(priv);
+ cw1200_setup_mac(priv);
+ priv->listening = false;
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ if (!__cw1200_flush(priv, true))
+ wsm_unlock_tx(priv);
+ wsm_unlock_tx(priv);
+
+ mutex_unlock(&priv->conf_mutex);
+}
+
+int cw1200_config(struct ieee80211_hw *dev, u32 changed)
+{
+ int ret = 0;
+ struct cw1200_common *priv = dev->priv;
+ struct ieee80211_conf *conf = &dev->conf;
+
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+ /* TODO: IEEE80211_CONF_CHANGE_QOS */
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ priv->output_power = conf->power_level;
+ sta_printk(KERN_DEBUG "[STA] TX power: %d\n",
+ priv->output_power);
+ WARN_ON(wsm_set_output_power(priv, priv->output_power * 10));
+ }
+
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) &&
+ (priv->channel != conf->channel)) {
+ struct ieee80211_channel *ch = conf->channel;
+ struct wsm_switch_channel channel = {
+ .newChannelNumber = ch->hw_value,
+ };
+ sta_printk(KERN_DEBUG "[STA] Freq %d (wsm ch: %d).\n",
+ ch->center_freq, ch->hw_value);
+
+ ret = WARN_ON(__cw1200_flush(priv, false));
+ if (!ret) {
+ ret = WARN_ON(wsm_switch_channel(priv, &channel));
+ if (!ret) {
+ ret = wait_event_timeout(
+ priv->channel_switch_done,
+ !priv->channel_switch_in_progress,
+ 3 * HZ);
+ /* TODO: We should check also switch channel
+ * complete indication
+ */
+ if (ret) {
+ priv->channel = ch;
+ ret = 0;
+ } else
+ ret = -ETIMEDOUT;
+ } else
+ wsm_unlock_tx(priv);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (!(conf->flags & IEEE80211_CONF_PS))
+ priv->powersave_mode.pmMode = WSM_PSM_ACTIVE;
+ else if (conf->dynamic_ps_timeout <= 0)
+ priv->powersave_mode.pmMode = WSM_PSM_PS;
+ else
+ priv->powersave_mode.pmMode = WSM_PSM_FAST_PS;
+
+ /* Firmware requires that value for this 1-byte field must
+ * be specified in units of 500us. Values above the 128ms
+ * threshold are not supported. */
+ if (conf->dynamic_ps_timeout >= 0x80)
+ priv->powersave_mode.fastPsmIdlePeriod = 0xFF;
+ else
+ priv->powersave_mode.fastPsmIdlePeriod =
+ conf->dynamic_ps_timeout << 1;
+
+ if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+ priv->bss_params.aid)
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ if (changed & IEEE80211_CONF_CHANGE_P2P_PS) {
+ struct wsm_p2p_ps_modeinfo *modeinfo;
+ modeinfo = &priv->p2p_ps_modeinfo;
+ sta_printk(KERN_DEBUG "[STA] IEEE80211_CONF_CHANGE_P2P_PS\n");
+ sta_printk(KERN_DEBUG "[STA] Legacy PS: %d for AID %d "
+ "in %d mode.\n", conf->p2p_ps.legacy_ps,
+ priv->bss_params.aid, priv->join_status);
+
+ if (conf->p2p_ps.legacy_ps >= 0) {
+ if (conf->p2p_ps.legacy_ps > 0)
+ priv->powersave_mode.pmMode = WSM_PSM_PS;
+ else
+ priv->powersave_mode.pmMode = WSM_PSM_ACTIVE;
+
+ if (priv->join_status == CW1200_JOIN_STATUS_STA)
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+
+ sta_printk(KERN_DEBUG "[STA] CTWindow: %d\n",
+ conf->p2p_ps.ctwindow);
+ if (conf->p2p_ps.ctwindow >= 128)
+ modeinfo->oppPsCTWindow = 127;
+ else if (conf->p2p_ps.ctwindow >= 0)
+ modeinfo->oppPsCTWindow = conf->p2p_ps.ctwindow;
+
+ sta_printk(KERN_DEBUG "[STA] Opportunistic: %d\n",
+ conf->p2p_ps.opp_ps);
+ switch (conf->p2p_ps.opp_ps) {
+ case 0:
+ modeinfo->oppPsCTWindow &= ~(BIT(7));
+ break;
+ case 1:
+ modeinfo->oppPsCTWindow |= BIT(7);
+ break;
+ default:
+ break;
+ }
+
+ sta_printk(KERN_DEBUG "[STA] NOA: %d, %d, %d, %d\n",
+ conf->p2p_ps.count,
+ conf->p2p_ps.start,
+ conf->p2p_ps.duration,
+ conf->p2p_ps.interval);
+ /* Notice of Absence */
+ modeinfo->count = conf->p2p_ps.count;
+
+ if (conf->p2p_ps.count) {
+ /* In case P2P_GO we need some extra time to be sure
+ * we will update beacon/probe_resp IEs correctly */
+#define NOA_DELAY_START_MS 300
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ modeinfo->startTime =
+ __cpu_to_le32(conf->p2p_ps.start +
+ NOA_DELAY_START_MS);
+ else
+ modeinfo->startTime =
+ __cpu_to_le32(conf->p2p_ps.start);
+ modeinfo->duration =
+ __cpu_to_le32(conf->p2p_ps.duration);
+ modeinfo->interval =
+ __cpu_to_le32(conf->p2p_ps.interval);
+ modeinfo->dtimCount = 1;
+ modeinfo->reserved = 0;
+ } else {
+ modeinfo->dtimCount = 0;
+ modeinfo->startTime = 0;
+ modeinfo->reserved = 0;
+ modeinfo->duration = 0;
+ modeinfo->interval = 0;
+ }
+
+#if defined(CONFIG_CW1200_STA_DEBUG)
+ print_hex_dump_bytes("p2p_set_ps_modeinfo: ",
+ DUMP_PREFIX_NONE,
+ (u8 *)modeinfo,
+ sizeof(*modeinfo));
+#endif /* CONFIG_CW1200_STA_DEBUG */
+ if (priv->join_status == CW1200_JOIN_STATUS_STA ||
+ priv->join_status == CW1200_JOIN_STATUS_AP) {
+ WARN_ON(wsm_set_p2p_ps_modeinfo(priv, modeinfo));
+ }
+
+ /* Temporary solution while firmware don't support NOA change
+ * notification yet */
+ cw1200_notify_noa(priv, 10);
+ }
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ /* TBD: It looks like it's transparent
+ * there's a monitor interface present -- use this
+ * to determine for example whether to calculate
+ * timestamps for packets or not, do not use instead
+ * of filter flags! */
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ struct wsm_operational_mode mode = {
+ .power_mode = wsm_power_mode_quiescent,
+ .disableMoreFlagUsage = true,
+ };
+
+ wsm_lock_tx(priv);
+ /* Disable p2p-dev mode forced by TX request */
+ if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) &&
+ (conf->flags & IEEE80211_CONF_IDLE) &&
+ !priv->listening) {
+ cw1200_disable_listening(priv);
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ }
+ WARN_ON(wsm_set_operational_mode(priv, &mode));
+ wsm_unlock_tx(priv);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ sta_printk(KERN_DEBUG "[STA] Retry limits: %d (long), " \
+ "%d (short).\n",
+ conf->long_frame_max_tx_count,
+ conf->short_frame_max_tx_count);
+ spin_lock_bh(&priv->tx_policy_cache.lock);
+ priv->long_frame_max_tx_count = conf->long_frame_max_tx_count;
+ priv->short_frame_max_tx_count =
+ (conf->short_frame_max_tx_count < 0x0F) ?
+ conf->short_frame_max_tx_count : 0x0F;
+ priv->hw->max_rate_tries = priv->short_frame_max_tx_count;
+ spin_unlock_bh(&priv->tx_policy_cache.lock);
+ /* TBD: I think we don't need tx_policy_force_upload().
+ * Outdated policies will leave cache in a normal way. */
+ /* WARN_ON(tx_policy_force_upload(priv)); */
+ }
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+ return ret;
+}
+
+void cw1200_update_filtering(struct cw1200_common *priv)
+{
+ int ret;
+ bool bssid_filtering = !priv->rx_filter.bssid;
+ static struct wsm_beacon_filter_control bf_disabled = {
+ .enabled = 0,
+ .bcn_count = 1,
+ };
+
+ if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE)
+ return;
+ else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+ bssid_filtering = false;
+
+ /*
+ * When acting as p2p client being connected to p2p GO, in order to
+ * receive frames from a different p2p device, turn off bssid filter.
+ *
+ * WARNING: FW dependency!
+ * This can only be used with FW WSM371 and its successors.
+ * In that FW version even with bssid filter turned off,
+ * device will block most of the unwanted frames.
+ */
+ if (priv->vif->p2p)
+ bssid_filtering = false;
+
+ ret = wsm_set_rx_filter(priv, &priv->rx_filter);
+ if (!ret)
+ ret = wsm_set_beacon_filter_table(priv, &priv->bf_table);
+ if (!ret) {
+ if (priv->disable_beacon_filter)
+ ret = wsm_beacon_filter_control(priv,
+ &bf_disabled);
+ else
+ ret = wsm_beacon_filter_control(priv,
+ &priv->bf_control);
+ }
+ if (!ret)
+ ret = wsm_set_bssid_filtering(priv, bssid_filtering);
+ if (!ret)
+ ret = wsm_set_multicast_filter(priv, &priv->multicast_filter);
+ if (ret)
+ wiphy_err(priv->hw->wiphy,
+ "%s: Update filtering failed: %d.\n",
+ __func__, ret);
+ return;
+}
+
+void cw1200_update_filtering_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common,
+ update_filtering_work);
+
+ cw1200_update_filtering(priv);
+}
+
+u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ static u8 broadcast_ipv6[ETH_ALEN] = {
+ 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
+ };
+ static u8 broadcast_ipv4[ETH_ALEN] = {
+ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01
+ };
+ struct cw1200_common *priv = hw->priv;
+ struct netdev_hw_addr *ha;
+ int count = 0;
+
+ /* Disable multicast filtering */
+ priv->has_multicast_subscription = false;
+ memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter));
+
+ if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES)
+ return 0;
+
+ /* Enable if requested */
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ sta_printk(KERN_DEBUG "[STA] multicast: %pM\n", ha->addr);
+ memcpy(&priv->multicast_filter.macAddress[count],
+ ha->addr, ETH_ALEN);
+ if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
+ memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+ priv->has_multicast_subscription = true;
+ count++;
+ }
+
+ if (count) {
+ priv->multicast_filter.enable = __cpu_to_le32(1);
+ priv->multicast_filter.numOfAddresses = __cpu_to_le32(count);
+ }
+
+ return netdev_hw_addr_list_count(mc_list);
+}
+
+void cw1200_configure_filter(struct ieee80211_hw *dev,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct cw1200_common *priv = dev->priv;
+ bool listening = !!(*total_flags &
+ (FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS |
+ FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROBE_REQ));
+
+ *total_flags &= FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS |
+ FIF_FCSFAIL |
+ FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROBE_REQ;
+
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
+ priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
+ ? 1 : 0;
+ priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
+ FIF_PROBE_REQ)) ? 1 : 0;
+ priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
+ priv->bf_control.bcn_count = (*total_flags &
+ (FIF_BCN_PRBRESP_PROMISC |
+ FIF_PROMISC_IN_BSS |
+ FIF_PROBE_REQ)) ? 1 : 0;
+ if (priv->listening ^ listening) {
+ priv->listening = listening;
+ wsm_lock_tx(priv);
+ cw1200_update_listening(priv, listening);
+ wsm_unlock_tx(priv);
+ }
+ cw1200_update_filtering(priv);
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+}
+
+int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct cw1200_common *priv = dev->priv;
+ int ret = 0;
+ /* To prevent re-applying PM request OID again and again*/
+ bool old_uapsdFlags;
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (queue < dev->queues) {
+ old_uapsdFlags = priv->uapsd_info.uapsdFlags;
+
+ WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
+ ret = wsm_set_tx_queue_params(priv,
+ &priv->tx_queue_params.params[queue], queue);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WSM_EDCA_SET(&priv->edca, queue, params->aifs,
+ params->cw_min, params->cw_max, params->txop, 0xc8,
+ params->uapsd);
+ ret = wsm_set_edca_params(priv, &priv->edca);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (priv->mode == NL80211_IFTYPE_STATION) {
+ ret = cw1200_set_uapsd_param(priv, &priv->edca);
+ if (!ret && priv->setbssparams_done &&
+ (priv->join_status == CW1200_JOIN_STATUS_STA) &&
+ (old_uapsdFlags != priv->uapsd_info.uapsdFlags))
+ cw1200_set_pm(priv, &priv->powersave_mode);
+ }
+ } else
+ ret = -EINVAL;
+
+out:
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+int cw1200_get_stats(struct ieee80211_hw *dev,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct cw1200_common *priv = dev->priv;
+
+ memcpy(stats, &priv->stats, sizeof(*stats));
+ return 0;
+}
+
+/*
+int cw1200_get_tx_stats(struct ieee80211_hw *dev,
+ struct ieee80211_tx_queue_stats *stats)
+{
+ int i;
+ struct cw1200_common *priv = dev->priv;
+
+ for (i = 0; i < dev->queues; ++i)
+ cw1200_queue_get_stats(&priv->tx_queue[i], &stats[i]);
+
+ return 0;
+}
+*/
+
+int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
+{
+ struct wsm_set_pm pm = *arg;
+
+ if (priv->uapsd_info.uapsdFlags != 0)
+ pm.pmMode &= ~WSM_PSM_FAST_PS_FLAG;
+
+ if (memcmp(&pm, &priv->firmware_ps_mode,
+ sizeof(struct wsm_set_pm))) {
+ priv->firmware_ps_mode = pm;
+ return wsm_set_pm(priv, &pm);
+ } else {
+ return 0;
+ }
+}
+
+int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret = -EOPNOTSUPP;
+ struct cw1200_common *priv = dev->priv;
+
+ mutex_lock(&priv->conf_mutex);
+
+ if (cmd == SET_KEY) {
+ u8 *peer_addr = NULL;
+ int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ?
+ 1 : 0;
+ int idx = cw1200_alloc_key(priv);
+ struct wsm_add_key *wsm_key = &priv->keys[idx];
+
+ if (idx < 0) {
+ ret = -EINVAL;
+ goto finally;
+ }
+
+ BUG_ON(pairwise && !sta);
+ if (sta)
+ peer_addr = sta->addr;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (key->keylen > 16) {
+ cw1200_free_key(priv, idx);
+ ret = -EINVAL;
+ goto finally;
+ }
+
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE;
+ memcpy(wsm_key->wepPairwiseKey.peerAddress,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->wepPairwiseKey.keyData,
+ &key->key[0], key->keylen);
+ wsm_key->wepPairwiseKey.keyLength = key->keylen;
+ } else {
+ wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT;
+ memcpy(wsm_key->wepGroupKey.keyData,
+ &key->key[0], key->keylen);
+ wsm_key->wepGroupKey.keyLength = key->keylen;
+ wsm_key->wepGroupKey.keyId = key->keyidx;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE;
+ memcpy(wsm_key->tkipPairwiseKey.peerAddress,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->tkipPairwiseKey.tkipKeyData,
+ &key->key[0], 16);
+ memcpy(wsm_key->tkipPairwiseKey.txMicKey,
+ &key->key[16], 8);
+ memcpy(wsm_key->tkipPairwiseKey.rxMicKey,
+ &key->key[24], 8);
+ } else {
+ size_t mic_offset =
+ (priv->mode == NL80211_IFTYPE_AP) ?
+ 16 : 24;
+ wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP;
+ memcpy(wsm_key->tkipGroupKey.tkipKeyData,
+ &key->key[0], 16);
+ memcpy(wsm_key->tkipGroupKey.rxMicKey,
+ &key->key[mic_offset], 8);
+
+ /* TODO: Where can I find TKIP SEQ? */
+ memset(wsm_key->tkipGroupKey.rxSeqCounter,
+ 0, 8);
+ wsm_key->tkipGroupKey.keyId = key->keyidx;
+
+ print_hex_dump_bytes("TKIP: ", DUMP_PREFIX_NONE,
+ key->key, key->keylen);
+ }
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE;
+ memcpy(wsm_key->aesPairwiseKey.peerAddress,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->aesPairwiseKey.aesKeyData,
+ &key->key[0], 16);
+ } else {
+ wsm_key->type = WSM_KEY_TYPE_AES_GROUP;
+ memcpy(wsm_key->aesGroupKey.aesKeyData,
+ &key->key[0], 16);
+ /* TODO: Where can I find AES SEQ? */
+ memset(wsm_key->aesGroupKey.rxSeqCounter,
+ 0, 8);
+ wsm_key->aesGroupKey.keyId = key->keyidx;
+ }
+ break;
+#ifdef CONFIG_CW1200_WAPI_SUPPORT
+ case WLAN_CIPHER_SUITE_SMS4:
+ if (pairwise) {
+ wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE;
+ memcpy(wsm_key->wapiPairwiseKey.peerAddress,
+ peer_addr, ETH_ALEN);
+ memcpy(wsm_key->wapiPairwiseKey.wapiKeyData,
+ &key->key[0], 16);
+ memcpy(wsm_key->wapiPairwiseKey.micKeyData,
+ &key->key[16], 16);
+ wsm_key->wapiPairwiseKey.keyId = key->keyidx;
+ } else {
+ wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP;
+ memcpy(wsm_key->wapiGroupKey.wapiKeyData,
+ &key->key[0], 16);
+ memcpy(wsm_key->wapiGroupKey.micKeyData,
+ &key->key[16], 16);
+ wsm_key->wapiGroupKey.keyId = key->keyidx;
+ }
+ break;
+#endif /* CONFIG_CW1200_WAPI_SUPPORT */
+ default:
+ WARN_ON(1);
+ cw1200_free_key(priv, idx);
+ ret = -EOPNOTSUPP;
+ goto finally;
+ }
+ ret = WARN_ON(wsm_add_key(priv, wsm_key));
+ if (!ret)
+ key->hw_key_idx = idx;
+ else
+ cw1200_free_key(priv, idx);
+ } else if (cmd == DISABLE_KEY) {
+ struct wsm_remove_key wsm_key = {
+ .entryIndex = key->hw_key_idx,
+ };
+
+ if (wsm_key.entryIndex > WSM_KEY_MAX_INDEX) {
+ ret = -EINVAL;
+ goto finally;
+ }
+
+ cw1200_free_key(priv, wsm_key.entryIndex);
+ ret = wsm_remove_key(priv, &wsm_key);
+ } else {
+ BUG_ON("Unsupported command");
+ }
+
+finally:
+ mutex_unlock(&priv->conf_mutex);
+ return ret;
+}
+
+void cw1200_wep_key_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, wep_key_work);
+ u8 queueId = cw1200_queue_get_queue_id(priv->pending_frame_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queueId];
+ __le32 wep_default_key_id = __cpu_to_le32(
+ priv->wep_default_key_id);
+
+ BUG_ON(queueId >= 4);
+
+ sta_printk(KERN_DEBUG "[STA] Setting default WEP key: %d\n",
+ priv->wep_default_key_id);
+ wsm_flush_tx(priv);
+ WARN_ON(wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &wep_default_key_id, sizeof(wep_default_key_id)));
+ cw1200_queue_requeue(queue, priv->pending_frame_id);
+ wsm_unlock_tx(priv);
+}
+
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ int ret;
+ __le32 val32;
+
+ if (value != (u32) -1)
+ val32 = __cpu_to_le32(value);
+ else
+ val32 = 0; /* disabled */
+
+ /* mutex_lock(&priv->conf_mutex); */
+ ret = WARN_ON(wsm_write_mib(hw->priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD,
+ &val32, sizeof(val32)));
+ /* mutex_unlock(&priv->conf_mutex); */
+ return ret;
+}
+
+int __cw1200_flush(struct cw1200_common *priv, bool drop)
+{
+ int i, ret;
+
+ for (;;) {
+ /* TODO: correct flush handling is required when dev_stop.
+ * Temporary workaround: 2s
+ */
+ if (drop) {
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_clear(&priv->tx_queue[i]);
+ } else {
+ ret = wait_event_timeout(
+ priv->tx_queue_stats.wait_link_id_empty,
+ cw1200_queue_stats_is_empty(
+ &priv->tx_queue_stats, -1),
+ 2 * HZ);
+ }
+
+ if (!drop && unlikely(ret <= 0)) {
+ ret = -ETIMEDOUT;
+ break;
+ } else {
+ ret = 0;
+ }
+
+ wsm_lock_tx(priv);
+ if (unlikely(!cw1200_queue_stats_is_empty(
+ &priv->tx_queue_stats, -1))) {
+ /* Highly unlekely: WSM requeued frames. */
+ wsm_unlock_tx(priv);
+ continue;
+ }
+ break;
+ }
+ return ret;
+}
+
+void cw1200_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct cw1200_common *priv = hw->priv;
+
+ switch (priv->mode) {
+ case NL80211_IFTYPE_MONITOR:
+ drop = true;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!priv->enable_beacon)
+ drop = true;
+ break;
+ }
+
+ if (!WARN_ON(__cw1200_flush(priv, drop)))
+ wsm_unlock_tx(priv);
+
+ return;
+}
+
+/* ******************************************************************** */
+/* WSM callbacks */
+
+void cw1200_channel_switch_cb(struct cw1200_common *priv)
+{
+ wsm_unlock_tx(priv);
+}
+
+void cw1200_free_event_queue(struct cw1200_common *priv)
+{
+ LIST_HEAD(list);
+
+ spin_lock(&priv->event_queue_lock);
+ list_splice_init(&priv->event_queue, &list);
+ spin_unlock(&priv->event_queue_lock);
+
+ __cw1200_free_event_queue(&list);
+}
+
+void cw1200_event_handler(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, event_handler);
+ struct cw1200_wsm_event *event;
+ LIST_HEAD(list);
+
+ spin_lock(&priv->event_queue_lock);
+ list_splice_init(&priv->event_queue, &list);
+ spin_unlock(&priv->event_queue_lock);
+
+ list_for_each_entry(event, &list, link) {
+ switch (event->evt.eventId) {
+ case WSM_EVENT_ERROR:
+ /* I even don't know what is it about.. */
+ STUB();
+ break;
+ case WSM_EVENT_BSS_LOST:
+ {
+ spin_lock(&priv->bss_loss_lock);
+ if (priv->bss_loss_status > CW1200_BSS_LOSS_NONE) {
+ spin_unlock(&priv->bss_loss_lock);
+ break;
+ }
+ priv->bss_loss_status = CW1200_BSS_LOSS_CHECKING;
+ spin_unlock(&priv->bss_loss_lock);
+
+ sta_printk(KERN_DEBUG "[CQM] BSS lost.\n");
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ cancel_delayed_work_sync(&priv->connection_loss_work);
+ if (!down_trylock(&priv->scan.lock)) {
+ up(&priv->scan.lock);
+ priv->delayed_link_loss = 0;
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work, 0);
+ } else {
+ /* Scan is in progress. Delay reporting. */
+ /* Scan complete will trigger bss_loss_work */
+ priv->delayed_link_loss = 1;
+ /* Also we're starting watchdog. */
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work, 10 * HZ);
+ }
+ break;
+ }
+ case WSM_EVENT_BSS_REGAINED:
+ {
+ sta_printk(KERN_DEBUG "[CQM] BSS regained.\n");
+ priv->delayed_link_loss = 0;
+ spin_lock(&priv->bss_loss_lock);
+ priv->bss_loss_status = CW1200_BSS_LOSS_NONE;
+ spin_unlock(&priv->bss_loss_lock);
+ cancel_delayed_work_sync(&priv->bss_loss_work);
+ cancel_delayed_work_sync(&priv->connection_loss_work);
+ break;
+ }
+ case WSM_EVENT_RADAR_DETECTED:
+ STUB();
+ break;
+ case WSM_EVENT_RCPI_RSSI:
+ {
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110 */
+ int rcpiRssi = (int)(event->evt.eventData & 0xFF);
+ int cqm_evt;
+ if (priv->cqm_use_rssi)
+ rcpiRssi = (s8)rcpiRssi;
+ else
+ rcpiRssi = rcpiRssi / 2 - 110;
+
+ cqm_evt = (rcpiRssi <= priv->cqm_rssi_thold) ?
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ sta_printk(KERN_DEBUG "[CQM] RSSI event: %d", rcpiRssi);
+ ieee80211_cqm_rssi_notify(priv->vif, cqm_evt,
+ GFP_KERNEL);
+ break;
+ }
+ case WSM_EVENT_BT_INACTIVE:
+ STUB();
+ break;
+ case WSM_EVENT_BT_ACTIVE:
+ STUB();
+ break;
+ }
+ }
+ __cw1200_free_event_queue(&list);
+}
+
+void cw1200_bss_loss_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, bss_loss_work.work);
+ int timeout; /* in beacons */
+ struct sk_buff *skb;
+
+ timeout = priv->cqm_link_loss_count -
+ priv->cqm_beacon_loss_count;
+
+ /* Skip the confimration procedure in P2P case */
+ if (priv->vif->p2p)
+ goto report;
+
+ spin_lock(&priv->bss_loss_lock);
+ if (priv->bss_loss_status == CW1200_BSS_LOSS_CHECKING) {
+ spin_unlock(&priv->bss_loss_lock);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ if (!(WARN_ON(!skb))) {
+ cw1200_tx(priv->hw, skb);
+ /* Start watchdog -- if nullfunc TX doesn't fail
+ * in 1 sec, forward event to upper layers */
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work, 1 * HZ);
+ }
+ return;
+ } else if (priv->bss_loss_status == CW1200_BSS_LOSS_CONFIRMING) {
+ priv->bss_loss_status = CW1200_BSS_LOSS_NONE;
+ spin_unlock(&priv->bss_loss_lock);
+ return;
+ }
+ spin_unlock(&priv->bss_loss_lock);
+
+report:
+ if (priv->cqm_beacon_loss_count) {
+ sta_printk(KERN_DEBUG "[CQM] Beacon loss.\n");
+ if (timeout <= 0)
+ timeout = 0;
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ ieee80211_cqm_beacon_miss_notify(priv->vif, GFP_KERNEL);
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+ } else {
+ timeout = 0;
+ }
+
+ cancel_delayed_work_sync(&priv->connection_loss_work);
+ queue_delayed_work(priv->workqueue,
+ &priv->connection_loss_work,
+ timeout * HZ / 10);
+
+ spin_lock(&priv->bss_loss_lock);
+ priv->bss_loss_status = CW1200_BSS_LOSS_NONE;
+ spin_unlock(&priv->bss_loss_lock);
+}
+
+void cw1200_connection_loss_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common,
+ connection_loss_work.work);
+ sta_printk(KERN_DEBUG "[CQM] Reporting connection loss.\n");
+ ieee80211_connection_loss(priv->vif);
+}
+
+void cw1200_tx_failure_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, tx_failure_work);
+ sta_printk(KERN_DEBUG "[CQM] Reporting TX failure.\n");
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ ieee80211_cqm_tx_fail_notify(priv->vif, GFP_KERNEL);
+#else /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+ (void)priv;
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+}
+
+/* ******************************************************************** */
+/* Internal API */
+
+
+
+/*
+* This function is called to Parse the SDD file
+ *to extract listen_interval and PTA related information
+*/
+static int cw1200_parse_SDD_file(struct cw1200_common *priv)
+{
+ u8 *sdd_data = (u8 *)priv->sdd->data;
+ struct cw1200_sdd {
+ u8 id ;
+ u8 length ;
+ u8 data[] ;
+ } *pElement;
+ int parsedLength = 0;
+ #define SDD_PTA_CFG_ELT_ID 0xEB
+ #define FIELD_OFFSET(type, field) ((u8 *)&((type *)0)->field - (u8 *)0)
+
+ priv->is_BT_Present = false;
+
+ pElement = (struct cw1200_sdd *)sdd_data;
+
+ pElement = (struct cw1200_sdd *)((u8 *)pElement +
+ FIELD_OFFSET(struct cw1200_sdd, data) + pElement->length);
+
+ parsedLength += (FIELD_OFFSET(struct cw1200_sdd, data) +
+ pElement->length);
+
+ while (parsedLength <= priv->sdd->size) {
+ switch (pElement->id) {
+ case SDD_PTA_CFG_ELT_ID:
+ {
+ priv->conf_listen_interval =
+ (*((u16 *)pElement->data+1) >> 7) & 0x1F;
+ priv->is_BT_Present = true;
+ sta_printk(KERN_DEBUG "PTA element found.\n");
+ sta_printk(KERN_DEBUG "Listen Interval %d\n",
+ priv->conf_listen_interval);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ pElement = (struct cw1200_sdd *)
+ ((u8 *)pElement + FIELD_OFFSET(struct cw1200_sdd, data)
+ + pElement->length);
+ parsedLength +=
+ (FIELD_OFFSET(struct cw1200_sdd, data) + pElement->length);
+ }
+
+ if (priv->is_BT_Present == false) {
+ sta_printk(KERN_DEBUG "PTA element NOT found.\n");
+ priv->conf_listen_interval = 0;
+ }
+ return 0;
+
+ #undef SDD_PTA_CFG_ELT_ID
+ #undef FIELD_OFFSET
+}
+
+
+int cw1200_setup_mac(struct cw1200_common *priv)
+{
+ int ret = 0;
+
+ /* NOTE: There is a bug in FW: it reports signal
+ * as RSSI if RSSI subscription is enabled.
+ * It's not enough to set WSM_RCPI_RSSI_USE_RSSI. */
+ /* NOTE2: RSSI based reports have been switched to RCPI, since
+ * FW has a bug and RSSI reported values are not stable,
+ * what can leads to signal level oscilations in user-end applications */
+ struct wsm_rcpi_rssi_threshold threshold = {
+ .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE |
+ WSM_RCPI_RSSI_DONT_USE_UPPER |
+ WSM_RCPI_RSSI_DONT_USE_LOWER,
+ .rollingAverageCount = 16,
+ };
+
+ /* Remember the decission here to make sure, we will handle
+ * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS */
+ if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI)
+ priv->cqm_use_rssi = true;
+
+ if (!priv->sdd) {
+ const char *sdd_path = NULL;
+ struct wsm_configuration cfg = {
+ .dot11StationId = &priv->mac_addr[0],
+ };
+
+ switch (priv->hw_revision) {
+ case CW1200_HW_REV_CUT10:
+ sdd_path = SDD_FILE_10;
+ break;
+ case CW1200_HW_REV_CUT11:
+ sdd_path = SDD_FILE_11;
+ break;
+ case CW1200_HW_REV_CUT20:
+ sdd_path = SDD_FILE_20;
+ break;
+ case CW1200_HW_REV_CUT22:
+ sdd_path = SDD_FILE_22;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ ret = request_firmware(&priv->sdd,
+ sdd_path, priv->pdev);
+
+ if (unlikely(ret)) {
+ cw1200_dbg(CW1200_DBG_ERROR,
+ "%s: can't load sdd file %s.\n",
+ __func__, sdd_path);
+ return ret;
+ }
+
+ cfg.dpdData = priv->sdd->data;
+ cfg.dpdData_size = priv->sdd->size;
+ ret = WARN_ON(wsm_configuration(priv, &cfg));
+ /* Parse SDD file for PTA element */
+ cw1200_parse_SDD_file(priv);
+ }
+ if (ret)
+ return ret;
+
+ /* Configure RSSI/SCPI reporting as RSSI. */
+ WARN_ON(wsm_set_rcpi_rssi_threshold(priv, &threshold));
+
+ /* TODO: */
+ switch (priv->mode) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ /* TODO: Not verified yet. */
+ STUB();
+ break;
+ }
+
+ return 0;
+}
+
+void cw1200_offchannel_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, offchannel_work);
+ u8 queueId = cw1200_queue_get_queue_id(priv->pending_frame_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queueId];
+
+ BUG_ON(queueId >= 4);
+ BUG_ON(!priv->channel);
+
+ mutex_lock(&priv->conf_mutex);
+ if (likely(!priv->join_status)) {
+ wsm_flush_tx(priv);
+ cw1200_update_listening(priv, true);
+ cw1200_update_filtering(priv);
+ }
+ if (unlikely(!priv->join_status))
+ cw1200_queue_remove(queue, priv->pending_frame_id);
+ else
+ cw1200_queue_requeue(queue, priv->pending_frame_id);
+ mutex_unlock(&priv->conf_mutex);
+ wsm_unlock_tx(priv);
+}
+
+void cw1200_join_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, join_work);
+ u8 queueId = cw1200_queue_get_queue_id(priv->pending_frame_id);
+ struct cw1200_queue *queue = &priv->tx_queue[queueId];
+ const struct cw1200_txpriv *txpriv = NULL;
+ struct sk_buff *skb = NULL;
+ const struct wsm_tx *wsm;
+ const struct ieee80211_hdr *frame;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ const u8 *ssidie;
+ const u8 *dtimie;
+ const struct ieee80211_tim_ie *tim = NULL;
+ struct wsm_protected_mgmt_policy mgmt_policy;
+
+ BUG_ON(queueId >= 4);
+ if (cw1200_queue_get_skb(queue, priv->pending_frame_id,
+ &skb, &txpriv)) {
+ wsm_unlock_tx(priv);
+ return;
+ }
+ wsm = (struct wsm_tx *)&skb->data[0];
+ frame = (struct ieee80211_hdr *)&skb->data[txpriv->offset];
+ bssid = &frame->addr1[0]; /* AP SSID in a 802.11 frame */
+
+ BUG_ON(!wsm);
+ BUG_ON(!priv->channel);
+
+ if (unlikely(priv->join_status)) {
+ wsm_lock_tx(priv);
+ cw1200_unjoin_work(&priv->unjoin_work);
+ }
+
+ cancel_delayed_work_sync(&priv->join_timeout);
+
+ bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel,
+ bssid, NULL, 0, 0, 0);
+ if (!bss) {
+ cw1200_queue_remove(queue, priv->pending_frame_id);
+ wsm_unlock_tx(priv);
+ return;
+ }
+ ssidie = cfg80211_find_ie(WLAN_EID_SSID,
+ bss->information_elements,
+ bss->len_information_elements);
+ dtimie = cfg80211_find_ie(WLAN_EID_TIM,
+ bss->information_elements,
+ bss->len_information_elements);
+ if (dtimie)
+ tim = (struct ieee80211_tim_ie *)&dtimie[2];
+
+ mutex_lock(&priv->conf_mutex);
+ {
+ struct wsm_join join = {
+ .mode = (bss->capability & WLAN_CAPABILITY_IBSS) ?
+ WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
+ .preambleType = WSM_JOIN_PREAMBLE_SHORT,
+ .probeForJoin = 1,
+ /* dtimPeriod will be updated after association */
+ .dtimPeriod = 1,
+ .beaconInterval = bss->beacon_interval,
+ /* basicRateSet will be updated after association */
+ .basicRateSet = 7,
+ };
+
+ /* BT Coex related changes */
+ if (priv->is_BT_Present) {
+ if (((priv->conf_listen_interval * 100) %
+ bss->beacon_interval) == 0)
+ priv->listen_interval =
+ ((priv->conf_listen_interval * 100) /
+ bss->beacon_interval);
+ else
+ priv->listen_interval =
+ ((priv->conf_listen_interval * 100) /
+ bss->beacon_interval + 1);
+ }
+
+ if (tim && tim->dtim_period > 1) {
+ join.dtimPeriod = tim->dtim_period;
+ priv->join_dtim_period = tim->dtim_period;
+ }
+ priv->beacon_int = bss->beacon_interval;
+ sta_printk(KERN_DEBUG "[STA] Join DTIM: %d, interval: %d\n",
+ join.dtimPeriod, priv->beacon_int);
+
+ join.channelNumber = priv->channel->hw_value;
+ join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+
+ memcpy(&join.bssid[0], bssid, sizeof(join.bssid));
+ memcpy(&priv->join_bssid[0], bssid, sizeof(priv->join_bssid));
+
+ if (ssidie) {
+ join.ssidLength = ssidie[1];
+ if (WARN_ON(join.ssidLength > sizeof(join.ssid)))
+ join.ssidLength = sizeof(join.ssid);
+ memcpy(&join.ssid[0], &ssidie[2], join.ssidLength);
+ }
+
+ if (priv->vif->p2p) {
+ join.flags |= WSM_JOIN_FLAGS_P2P_GO;
+ join.basicRateSet =
+ cw1200_rate_mask_to_wsm(priv, 0xFF0);
+ }
+
+ wsm_flush_tx(priv);
+
+ /* Queue unjoin if not associated in 3 sec. */
+ queue_delayed_work(priv->workqueue,
+ &priv->join_timeout, 3 * HZ);
+ /*Stay Awake for Join Timeout*/
+ cw1200_pm_stay_awake(&priv->pm_state, 3 * HZ);
+
+ cw1200_update_listening(priv, false);
+ /* BlockACK policy will be updated when assoc is done */
+ WARN_ON(wsm_set_block_ack_policy(priv,
+ 0, priv->ba_tid_mask));
+
+ spin_lock_bh(&priv->ba_lock);
+ priv->ba_ena = false;
+ priv->ba_cnt = 0;
+ priv->ba_acc = 0;
+ priv->ba_hist = 0;
+ spin_unlock_bh(&priv->ba_lock);
+
+ mgmt_policy.protectedMgmtEnable = 0;
+ mgmt_policy.unprotectedMgmtFramesAllowed = 1;
+ mgmt_policy.encryptionForAuthFrame = 1;
+ wsm_set_protected_mgmt_policy(priv, &mgmt_policy);
+
+ if (wsm_join(priv, &join)) {
+ memset(&priv->join_bssid[0],
+ 0, sizeof(priv->join_bssid));
+ cw1200_queue_remove(queue, priv->pending_frame_id);
+ cancel_delayed_work_sync(&priv->join_timeout);
+ cw1200_update_listening(priv, priv->listening);
+ } else {
+ /* Upload keys */
+ WARN_ON(cw1200_upload_keys(priv));
+ cw1200_queue_requeue(queue, priv->pending_frame_id);
+ priv->join_status = CW1200_JOIN_STATUS_STA;
+
+ /* Due to beacon filtering it is possible that the
+ * AP's beacon is not known for the mac80211 stack.
+ * Disable filtering temporary to make sure the stack
+ * receives at least one */
+ priv->disable_beacon_filter = true;
+
+ }
+ cw1200_update_filtering(priv);
+ }
+ mutex_unlock(&priv->conf_mutex);
+ cfg80211_put_bss(bss);
+ wsm_unlock_tx(priv);
+}
+
+void cw1200_join_timeout(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, join_timeout.work);
+ sta_printk(KERN_DEBUG "[WSM] Issue unjoin command (TMO).\n");
+ wsm_lock_tx(priv);
+ cw1200_unjoin_work(&priv->unjoin_work);
+}
+
+void cw1200_unjoin_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, unjoin_work);
+
+ struct wsm_reset reset = {
+ .reset_statistics = true,
+ };
+
+ del_timer_sync(&priv->ba_timer);
+ mutex_lock(&priv->conf_mutex);
+ if (unlikely(atomic_read(&priv->scan.in_progress))) {
+ if (priv->delayed_unjoin) {
+ wiphy_dbg(priv->hw->wiphy,
+ "%s: Delayed unjoin "
+ "is already scheduled.\n",
+ __func__);
+ wsm_unlock_tx(priv);
+ } else {
+ priv->delayed_unjoin = true;
+ }
+ mutex_unlock(&priv->conf_mutex);
+ return;
+ }
+
+ if (priv->join_status &&
+ priv->join_status > CW1200_JOIN_STATUS_STA) {
+ wiphy_err(priv->hw->wiphy,
+ "%s: Unexpected: join status: %d\n",
+ __func__, priv->join_status);
+ BUG_ON(1);
+ }
+ if (priv->join_status) {
+ cancel_work_sync(&priv->update_filtering_work);
+ memset(&priv->join_bssid[0], 0, sizeof(priv->join_bssid));
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+
+ /* Unjoin is a reset. */
+ wsm_flush_tx(priv);
+ WARN_ON(wsm_keep_alive_period(priv, 0));
+ WARN_ON(wsm_reset(priv, &reset));
+ priv->join_dtim_period = 0;
+ WARN_ON(cw1200_setup_mac(priv));
+ cw1200_free_event_queue(priv);
+ cancel_work_sync(&priv->event_handler);
+ cancel_delayed_work_sync(&priv->connection_loss_work);
+ cw1200_update_listening(priv, priv->listening);
+ WARN_ON(wsm_set_block_ack_policy(priv,
+ 0, priv->ba_tid_mask));
+ priv->disable_beacon_filter = false;
+ cw1200_update_filtering(priv);
+ priv->setbssparams_done = false;
+ memset(&priv->association_mode, 0,
+ sizeof(priv->association_mode));
+ memset(&priv->bss_params, 0, sizeof(priv->bss_params));
+ memset(&priv->firmware_ps_mode, 0,
+ sizeof(priv->firmware_ps_mode));
+ sta_printk(KERN_DEBUG "[STA] Unjoin.\n");
+ }
+ mutex_unlock(&priv->conf_mutex);
+ wsm_unlock_tx(priv);
+}
+
+int cw1200_enable_listening(struct cw1200_common *priv)
+{
+ struct wsm_start start = {
+ .mode = WSM_START_MODE_P2P_DEV,
+ .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
+ .channelNumber = priv->channel->hw_value,
+ .beaconInterval = 100,
+ .DTIMPeriod = 1,
+ .probeDelay = 0,
+ .basicRateSet = 0x0F,
+ };
+ return wsm_start(priv, &start);
+}
+
+int cw1200_disable_listening(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_reset reset = {
+ .reset_statistics = true,
+ };
+ ret = wsm_reset(priv, &reset);
+ return ret;
+}
+
+void cw1200_update_listening(struct cw1200_common *priv, bool enabled)
+{
+ if (enabled) {
+ switch (priv->join_status) {
+ case CW1200_JOIN_STATUS_PASSIVE:
+ if (!WARN_ON(cw1200_enable_listening(priv)))
+ priv->join_status = CW1200_JOIN_STATUS_MONITOR;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (priv->join_status) {
+ case CW1200_JOIN_STATUS_MONITOR:
+ if (!WARN_ON(cw1200_disable_listening(priv)))
+ priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+ default:
+ break;
+ }
+ }
+}
+
+int cw1200_set_uapsd_param(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg)
+{
+ int ret;
+ u16 uapsdFlags = 0;
+
+ /* Here's the mapping AC [queue, bit]
+ VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]*/
+
+ if (arg->params[0].uapsdEnable)
+ uapsdFlags |= 1 << 3;
+
+ if (arg->params[1].uapsdEnable)
+ uapsdFlags |= 1 << 2;
+
+ if (arg->params[2].uapsdEnable)
+ uapsdFlags |= 1 << 1;
+
+ if (arg->params[3].uapsdEnable)
+ uapsdFlags |= 1;
+
+ /* Currently pseudo U-APSD operation is not supported, so setting
+ * MinAutoTriggerInterval, MaxAutoTriggerInterval and
+ * AutoTriggerStep to 0 */
+
+ priv->uapsd_info.uapsdFlags = cpu_to_le16(uapsdFlags);
+ priv->uapsd_info.minAutoTriggerInterval = 0;
+ priv->uapsd_info.maxAutoTriggerInterval = 0;
+ priv->uapsd_info.autoTriggerStep = 0;
+
+ ret = wsm_set_uapsd_info(priv, &priv->uapsd_info);
+ return ret;
+}
+
+void cw1200_ba_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, ba_work);
+ u8 tx_ba_tid_mask;
+
+ if (priv->join_status != CW1200_JOIN_STATUS_STA)
+ return;
+ if (!priv->setbssparams_done)
+ return;
+
+ spin_lock_bh(&priv->ba_lock);
+ tx_ba_tid_mask = priv->ba_ena ? priv->ba_tid_mask : 0;
+ spin_unlock_bh(&priv->ba_lock);
+
+ WARN_ON(wsm_set_block_ack_policy(priv,
+ tx_ba_tid_mask, priv->ba_tid_mask));
+}
+
+void cw1200_ba_timer(unsigned long arg)
+{
+ bool ba_ena;
+ struct cw1200_common *priv =
+ (struct cw1200_common *)arg;
+
+ spin_lock_bh(&priv->ba_lock);
+ cw1200_debug_ba(priv, priv->ba_cnt, priv->ba_acc);
+
+ if (atomic_read(&priv->scan.in_progress))
+ goto skip_statistic_update;
+
+ ba_ena = (priv->ba_cnt >= CW1200_BLOCK_ACK_CNT &&
+ priv->ba_acc / priv->ba_cnt >= CW1200_BLOCK_ACK_THLD);
+ priv->ba_cnt = 0;
+ priv->ba_acc = 0;
+
+ if (ba_ena != priv->ba_ena) {
+ if (ba_ena || ++priv->ba_hist >= CW1200_BLOCK_ACK_HIST) {
+ priv->ba_ena = ba_ena;
+ priv->ba_hist = 0;
+ sta_printk(KERN_DEBUG "[STA] %s block ACK:\n",
+ ba_ena ? "enable" : "disable");
+ queue_work(priv->workqueue, &priv->ba_work);
+ }
+ } else if (priv->ba_hist)
+ --priv->ba_hist;
+
+skip_statistic_update:
+ spin_unlock_bh(&priv->ba_lock);
+}
diff --git a/drivers/staging/cw1200/sta.h b/drivers/staging/cw1200/sta.h
new file mode 100644
index 00000000000..4e4833afcf7
--- /dev/null
+++ b/drivers/staging/cw1200/sta.h
@@ -0,0 +1,87 @@
+/*
+ * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef STA_H_INCLUDED
+#define STA_H_INCLUDED
+
+/* ******************************************************************** */
+/* mac80211 API */
+
+int cw1200_start(struct ieee80211_hw *dev);
+void cw1200_stop(struct ieee80211_hw *dev);
+int cw1200_add_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif);
+void cw1200_remove_interface(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif);
+int cw1200_config(struct ieee80211_hw *dev, u32 changed);
+void cw1200_configure_filter(struct ieee80211_hw *dev,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+int cw1200_get_stats(struct ieee80211_hw *dev,
+ struct ieee80211_low_level_stats *stats);
+/* Not more a part of interface?
+int cw1200_get_tx_stats(struct ieee80211_hw *dev,
+ struct ieee80211_tx_queue_stats *stats);
+*/
+int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+
+void cw1200_flush(struct ieee80211_hw *hw, bool drop);
+
+u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+
+int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
+
+/* ******************************************************************** */
+/* WSM callbacks */
+
+/* void cw1200_set_pm_complete_cb(struct cw1200_common *priv,
+ struct wsm_set_pm_complete *arg); */
+void cw1200_channel_switch_cb(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* WSM events */
+
+void cw1200_free_event_queue(struct cw1200_common *priv);
+void cw1200_event_handler(struct work_struct *work);
+void cw1200_bss_loss_work(struct work_struct *work);
+void cw1200_connection_loss_work(struct work_struct *work);
+void cw1200_keep_alive_work(struct work_struct *work);
+void cw1200_tx_failure_work(struct work_struct *work);
+
+/* ******************************************************************** */
+/* Internal API */
+
+int cw1200_setup_mac(struct cw1200_common *priv);
+void cw1200_join_work(struct work_struct *work);
+void cw1200_join_timeout(struct work_struct *work);
+void cw1200_unjoin_work(struct work_struct *work);
+void cw1200_offchannel_work(struct work_struct *work);
+void cw1200_wep_key_work(struct work_struct *work);
+void cw1200_update_listening(struct cw1200_common *priv, bool enabled);
+void cw1200_update_filtering(struct cw1200_common *priv);
+void cw1200_update_filtering_work(struct work_struct *work);
+int __cw1200_flush(struct cw1200_common *priv, bool drop);
+int cw1200_enable_listening(struct cw1200_common *priv);
+int cw1200_disable_listening(struct cw1200_common *priv);
+int cw1200_set_uapsd_param(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg);
+void cw1200_ba_work(struct work_struct *work);
+void cw1200_ba_timer(unsigned long arg);
+
+#endif
diff --git a/drivers/staging/cw1200/txrx.c b/drivers/staging/cw1200/txrx.c
new file mode 100644
index 00000000000..7c0fa0b0f2a
--- /dev/null
+++ b/drivers/staging/cw1200/txrx.c
@@ -0,0 +1,1372 @@
+/*
+ * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "cw1200.h"
+#include "wsm.h"
+#include "bh.h"
+#include "ap.h"
+#include "debug.h"
+
+#if defined(CONFIG_CW1200_TX_POLICY_DEBUG)
+#define tx_policy_printk(...) printk(__VA_ARGS__)
+#else
+#define tx_policy_printk(...)
+#endif
+
+#define CW1200_INVALID_RATE_ID (0xFF)
+
+static int cw1200_handle_action_rx(struct cw1200_common *priv,
+ struct sk_buff *skb);
+static const struct ieee80211_rate *
+cw1200_get_tx_rate(const struct cw1200_common *priv,
+ const struct ieee80211_tx_rate *rate);
+
+/* ******************************************************************** */
+/* TX queue lock / unlock */
+
+static inline void cw1200_tx_queues_lock(struct cw1200_common *priv)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_lock(&priv->tx_queue[i]);
+}
+
+static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv)
+{
+ int i;
+ for (i = 0; i < 4; ++i)
+ cw1200_queue_unlock(&priv->tx_queue[i]);
+}
+
+/* ******************************************************************** */
+/* TX policy cache implementation */
+
+static void tx_policy_dump(struct tx_policy *policy)
+{
+ tx_policy_printk(KERN_DEBUG "[TX policy] "
+ "%.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X"
+ "%.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X"
+ "%.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n",
+ policy->raw[0] & 0x0F, policy->raw[0] >> 4,
+ policy->raw[1] & 0x0F, policy->raw[1] >> 4,
+ policy->raw[2] & 0x0F, policy->raw[2] >> 4,
+ policy->raw[3] & 0x0F, policy->raw[3] >> 4,
+ policy->raw[4] & 0x0F, policy->raw[4] >> 4,
+ policy->raw[5] & 0x0F, policy->raw[5] >> 4,
+ policy->raw[6] & 0x0F, policy->raw[6] >> 4,
+ policy->raw[7] & 0x0F, policy->raw[7] >> 4,
+ policy->raw[8] & 0x0F, policy->raw[8] >> 4,
+ policy->raw[9] & 0x0F, policy->raw[9] >> 4,
+ policy->raw[10] & 0x0F, policy->raw[10] >> 4,
+ policy->raw[11] & 0x0F, policy->raw[11] >> 4,
+ policy->defined);
+}
+
+static void tx_policy_build(const struct cw1200_common *priv,
+ /* [out] */ struct tx_policy *policy,
+ struct ieee80211_tx_rate *rates, size_t count)
+{
+ int i, j;
+ unsigned limit = priv->short_frame_max_tx_count;
+ unsigned total = 0;
+ BUG_ON(rates[0].idx < 0);
+ memset(policy, 0, sizeof(*policy));
+
+ /* minstrel is buggy a little bit, so distille
+ * incoming rates first. */
+
+ /* Sort rates in descending order. */
+ for (i = 1; i < count; ++i) {
+ if (rates[i].idx < 0) {
+ count = i;
+ break;
+ }
+ if (rates[i].idx > rates[i - 1].idx) {
+ struct ieee80211_tx_rate tmp = rates[i - 1];
+ rates[i - 1] = rates[i];
+ rates[i] = tmp;
+ }
+ }
+
+ /* Eliminate duplicates. */
+ total = rates[0].count;
+ for (i = 0, j = 1; j < count; ++j) {
+ if (rates[j].idx == rates[i].idx) {
+ rates[i].count += rates[j].count;
+ } else if (rates[j].idx > rates[i].idx) {
+ break;
+ } else {
+ ++i;
+ if (i != j)
+ rates[i] = rates[j];
+ }
+ total += rates[j].count;
+ }
+ count = i + 1;
+
+ /* Re-fill policy trying to keep every requested rate and with
+ * respect to the global max tx retransmission count. */
+ if (limit < count)
+ limit = count;
+ if (total > limit) {
+ for (i = 0; i < count; ++i) {
+ int left = count - i - 1;
+ if (rates[i].count > limit - left)
+ rates[i].count = limit - left;
+ limit -= rates[i].count;
+ }
+ }
+
+ /* HACK!!! Device has problems (at least) switching from
+ * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+ * of time (100-200 ms), leading to valuable throughput drop.
+ * As a workaround, additional g-rates are injected to the
+ * policy.
+ */
+ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+ rates[0].idx > 4 && rates[0].count > 2 &&
+ rates[1].idx < 2) {
+ /* ">> 1" is an equivalent of "/ 2", but faster */
+ int mid_rate = (rates[0].idx + 4) >> 1;
+
+ /* Decrease number of retries for the initial rate */
+ rates[0].count -= 2;
+
+ if (mid_rate != 4) {
+ /* Keep fallback rate at 1Mbps. */
+ rates[3] = rates[1];
+
+ /* Inject 1 transmission on lowest g-rate */
+ rates[2].idx = 4;
+ rates[2].count = 1;
+ rates[2].flags = rates[1].flags;
+
+ /* Inject 1 transmission on mid-rate */
+ rates[1].idx = mid_rate;
+ rates[1].count = 1;
+
+ /* Fallback to 1 Mbps is a really bad thing,
+ * so let's try to increase probability of
+ * successful transmission on the lowest g rate
+ * even more */
+ if (rates[0].count >= 3) {
+ --rates[0].count;
+ ++rates[2].count;
+ }
+
+ /* Adjust amount of rates defined */
+ count += 2;
+ } else {
+ /* Keep fallback rate at 1Mbps. */
+ rates[2] = rates[1];
+
+ /* Inject 2 transmissions on lowest g-rate */
+ rates[1].idx = 4;
+ rates[1].count = 2;
+
+ /* Adjust amount of rates defined */
+ count += 1;
+ }
+ }
+
+ policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1;
+
+ for (i = 0; i < count; ++i) {
+ register unsigned rateid, off, shift, retries;
+
+ rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value;
+ off = rateid >> 3; /* eq. rateid / 8 */
+ shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */
+
+ retries = rates[i].count;
+ if (unlikely(retries > 0x0F))
+ rates[i].count = retries = 0x0F;
+ policy->tbl[off] |= __cpu_to_le32(retries << shift);
+ policy->retry_count += retries;
+ }
+
+ tx_policy_printk(KERN_DEBUG "[TX policy] Policy (%d): " \
+ "%d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n",
+ count,
+ rates[0].idx, rates[0].count,
+ rates[1].idx, rates[1].count,
+ rates[2].idx, rates[2].count,
+ rates[3].idx, rates[3].count,
+ rates[4].idx, rates[4].count);
+}
+
+static inline bool tx_policy_is_equal(const struct tx_policy *wanted,
+ const struct tx_policy *cached)
+{
+ size_t count = wanted->defined >> 1;
+ if (wanted->defined > cached->defined)
+ return false;
+ if (count) {
+ if (memcmp(wanted->raw, cached->raw, count))
+ return false;
+ }
+ if (wanted->defined & 1) {
+ if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F))
+ return false;
+ }
+ return true;
+}
+
+static int tx_policy_find(struct tx_policy_cache *cache,
+ const struct tx_policy *wanted)
+{
+ /* O(n) complexity. Not so good, but there's only 8 entries in
+ * the cache.
+ * Also lru helps to reduce search time. */
+ struct tx_policy_cache_entry *it;
+ /* First search for policy in "used" list */
+ list_for_each_entry(it, &cache->used, link) {
+ if (tx_policy_is_equal(wanted, &it->policy))
+ return it - cache->cache;
+ }
+ /* Then - in "free list" */
+ list_for_each_entry(it, &cache->free, link) {
+ if (tx_policy_is_equal(wanted, &it->policy))
+ return it - cache->cache;
+ }
+ return -1;
+}
+
+static inline void tx_policy_use(struct tx_policy_cache *cache,
+ struct tx_policy_cache_entry *entry)
+{
+ ++entry->policy.usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static inline int tx_policy_release(struct tx_policy_cache *cache,
+ struct tx_policy_cache_entry *entry)
+{
+ int ret = --entry->policy.usage_count;
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+/* ******************************************************************** */
+/* External TX policy cache API */
+
+void tx_policy_init(struct cw1200_common *priv)
+{
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+
+ for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+static int tx_policy_get(struct cw1200_common *priv,
+ struct ieee80211_tx_rate *rates,
+ size_t count, bool *renew)
+{
+ int idx;
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ struct tx_policy wanted;
+
+ tx_policy_build(priv, &wanted, rates, count);
+
+ spin_lock_bh(&cache->lock);
+ if (WARN_ON_ONCE(list_empty(&cache->free))) {
+ spin_unlock_bh(&cache->lock);
+ return CW1200_INVALID_RATE_ID;
+ }
+ idx = tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ tx_policy_printk(KERN_DEBUG "[TX policy] Used TX policy: %d\n",
+ idx);
+ *renew = false;
+ } else {
+ struct tx_policy_cache_entry *entry;
+ *renew = true;
+ /* If policy is not found create a new one
+ * using the oldest entry in "free" list */
+ entry = list_entry(cache->free.prev,
+ struct tx_policy_cache_entry, link);
+ entry->policy = wanted;
+ idx = entry - cache->cache;
+ tx_policy_printk(KERN_DEBUG "[TX policy] New TX policy: %d\n",
+ idx);
+ tx_policy_dump(&entry->policy);
+ }
+ tx_policy_use(cache, &cache->cache[idx]);
+ if (unlikely(list_empty(&cache->free))) {
+ /* Lock TX queues. */
+ cw1200_tx_queues_lock(priv);
+ }
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void tx_policy_put(struct cw1200_common *priv, int idx)
+{
+ int usage, locked;
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = tx_policy_release(cache, &cache->cache[idx]);
+ if (unlikely(locked) && !usage) {
+ /* Unlock TX queues. */
+ cw1200_tx_queues_unlock(priv);
+ }
+ spin_unlock_bh(&cache->lock);
+}
+
+/*
+bool tx_policy_cache_full(struct cw1200_common *priv)
+{
+ bool ret;
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ spin_lock_bh(&cache->lock);
+ ret = list_empty(&cache->free);
+ spin_unlock_bh(&cache->lock);
+ return ret;
+}
+*/
+
+static int tx_policy_upload(struct cw1200_common *priv)
+{
+ struct tx_policy_cache *cache = &priv->tx_policy_cache;
+ int i;
+ struct wsm_set_tx_rate_retry_policy arg = {
+ .hdr = {
+ .numTxRatePolicies = 0,
+ }
+ };
+ spin_lock_bh(&cache->lock);
+
+ /* Upload only modified entries. */
+ for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) {
+ struct tx_policy *src = &cache->cache[i].policy;
+ if (src->retry_count && !src->uploaded) {
+ struct wsm_set_tx_rate_retry_policy_policy *dst =
+ &arg.tbl[arg.hdr.numTxRatePolicies];
+ dst->policyIndex = i;
+ dst->shortRetryCount = priv->short_frame_max_tx_count;
+ dst->longRetryCount = priv->long_frame_max_tx_count;
+
+ /* BIT(2) - Terminate retries when Tx rate retry policy
+ * finishes.
+ * BIT(3) - Count initial frame transmission as part of
+ * rate retry counting but not as a retry
+ * attempt */
+ dst->policyFlags = BIT(2) | BIT(3);
+
+ memcpy(dst->rateCountIndices, src->tbl,
+ sizeof(dst->rateCountIndices));
+ src->uploaded = 1;
+ ++arg.hdr.numTxRatePolicies;
+ }
+ }
+ spin_unlock_bh(&cache->lock);
+ cw1200_debug_tx_cache_miss(priv);
+ tx_policy_printk(KERN_DEBUG "[TX policy] Upload %d policies\n",
+ arg.hdr.numTxRatePolicies);
+ return wsm_set_tx_rate_retry_policy(priv, &arg);
+}
+
+void tx_policy_upload_work(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, tx_policy_upload_work);
+
+ tx_policy_printk(KERN_DEBUG "[TX] TX policy upload.\n");
+ WARN_ON(tx_policy_upload(priv));
+
+ wsm_unlock_tx(priv);
+ cw1200_tx_queues_unlock(priv);
+}
+
+/* ******************************************************************** */
+/* cw1200 TX implementation */
+
+struct cw1200_txinfo {
+ struct sk_buff *skb;
+ unsigned queue;
+ struct ieee80211_tx_info *tx_info;
+ const struct ieee80211_rate *rate;
+ struct ieee80211_hdr *hdr;
+ size_t hdrlen;
+ const u8 *da;
+ struct cw1200_sta_priv *sta_priv;
+ struct cw1200_txpriv txpriv;
+};
+
+u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
+{
+ u32 ret = 0;
+ int i;
+ for (i = 0; i < 32; ++i) {
+ if (rates & BIT(i))
+ ret |= BIT(priv->rates[i].hw_value);
+ }
+ return ret;
+}
+
+static const struct ieee80211_rate *
+cw1200_get_tx_rate(const struct cw1200_common *priv,
+ const struct ieee80211_tx_rate *rate)
+{
+ if (rate->idx < 0)
+ return NULL;
+ if (rate->flags & IEEE80211_TX_RC_MCS)
+ return &priv->mcs_rates[rate->idx];
+ return &priv->hw->wiphy->bands[priv->channel->band]->
+ bitrates[rate->idx];
+}
+
+static int
+cw1200_tx_h_calc_link_ids(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+
+ if (likely(t->tx_info->control.sta && t->sta_priv->link_id))
+ t->txpriv.raw_link_id =
+ t->txpriv.link_id =
+ t->sta_priv->link_id;
+ else if (priv->mode != NL80211_IFTYPE_AP)
+ t->txpriv.raw_link_id =
+ t->txpriv.link_id = 0;
+ else if (is_multicast_ether_addr(t->da)) {
+ if (priv->enable_beacon) {
+ t->txpriv.raw_link_id = 0;
+ t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
+ } else {
+ t->txpriv.raw_link_id = 0;
+ t->txpriv.link_id = 0;
+ }
+ } else {
+ t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
+ if (!t->txpriv.link_id)
+ t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
+ if (!t->txpriv.link_id) {
+ wiphy_err(priv->hw->wiphy,
+ "%s: No more link IDs available.\n",
+ __func__);
+ return -ENOENT;
+ }
+ t->txpriv.raw_link_id = t->txpriv.link_id;
+ }
+ if (t->txpriv.raw_link_id)
+ priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
+ jiffies;
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ if (t->tx_info->control.sta &&
+ (t->tx_info->control.sta->uapsd_queues & BIT(t->queue)))
+ t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+ return 0;
+}
+
+static void
+cw1200_tx_h_pm(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (unlikely(ieee80211_is_auth(t->hdr->frame_control))) {
+ u32 mask = ~BIT(t->txpriv.raw_link_id);
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->sta_asleep_mask &= mask;
+ priv->pspoll_mask &= mask;
+ spin_unlock_bh(&priv->ps_state_lock);
+ }
+}
+
+static void
+cw1200_tx_h_calc_tid(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (ieee80211_is_data_qos(t->hdr->frame_control)) {
+ u8 *qos = ieee80211_get_qos_ctl(t->hdr);
+ t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_data(t->hdr->frame_control)) {
+ t->txpriv.tid = 0;
+ }
+}
+
+/* IV/ICV injection. */
+/* TODO: Quite unoptimal. It's better co modify mac80211
+ * to reserve space for IV */
+static int
+cw1200_tx_h_crypt(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ size_t iv_len;
+ size_t icv_len;
+ u8 *icv;
+ u8 *newhdr;
+
+ if (!t->tx_info->control.hw_key ||
+ !(t->hdr->frame_control &
+ __cpu_to_le32(IEEE80211_FCTL_PROTECTED)))
+ return 0;
+
+ iv_len = t->tx_info->control.hw_key->iv_len;
+ icv_len = t->tx_info->control.hw_key->icv_len;
+
+ if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ icv_len += 8; /* MIC */
+
+ if ((skb_headroom(t->skb) + skb_tailroom(t->skb) <
+ iv_len + icv_len + WSM_TX_EXTRA_HEADROOM) ||
+ (skb_headroom(t->skb) <
+ iv_len + WSM_TX_EXTRA_HEADROOM)) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated for crypto headers.\n"
+ "headroom: %d, tailroom: %d, "
+ "req_headroom: %d, req_tailroom: %d\n"
+ "Please fix it in cw1200_get_skb().\n",
+ skb_headroom(t->skb), skb_tailroom(t->skb),
+ iv_len + WSM_TX_EXTRA_HEADROOM, icv_len);
+ return -ENOMEM;
+ } else if (skb_tailroom(t->skb) < icv_len) {
+ size_t offset = icv_len - skb_tailroom(t->skb);
+ u8 *p;
+ wiphy_warn(priv->hw->wiphy,
+ "Slowpath: tailroom is not big enough. "
+ "Req: %d, got: %d.\n",
+ icv_len, skb_tailroom(t->skb));
+
+ p = skb_push(t->skb, offset);
+ memmove(p, &p[offset], t->skb->len - offset);
+ skb_trim(t->skb, t->skb->len - offset);
+ }
+
+ newhdr = skb_push(t->skb, iv_len);
+ memmove(newhdr, newhdr + iv_len, t->hdrlen);
+ t->hdr = (struct ieee80211_hdr *) newhdr;
+ t->hdrlen += iv_len;
+ icv = skb_put(t->skb, icv_len);
+
+ return 0;
+}
+
+static int
+cw1200_tx_h_align(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ u8 *flags)
+{
+ size_t offset = (size_t)t->skb->data & 3;
+
+ if (!offset)
+ return 0;
+
+ if (offset & 1) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: attempt to transmit a frame "
+ "with wrong alignment: %d\n",
+ offset);
+ return -EINVAL;
+ }
+
+ if (skb_headroom(t->skb) < offset) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated "
+ "for DMA alignment.\n"
+ "headroom: %d\n",
+ skb_headroom(t->skb));
+ return -ENOMEM;
+ }
+ skb_push(t->skb, offset);
+ t->hdrlen += offset;
+ t->txpriv.offset += offset;
+ *flags |= WSM_TX_2BYTES_SHIFT;
+ cw1200_debug_tx_align(priv);
+ return 0;
+}
+
+static int
+cw1200_tx_h_action(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ struct ieee80211_mgmt *mgmt =
+ (struct ieee80211_mgmt *)t->hdr;
+ if (ieee80211_is_action(t->hdr->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ return 1;
+ else
+ return 0;
+}
+
+/* Add WSM header */
+static struct wsm_tx *
+cw1200_tx_h_wsm(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ struct wsm_tx *wsm;
+
+ if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
+ wiphy_err(priv->hw->wiphy,
+ "Bug: no space allocated "
+ "for WSM header.\n"
+ "headroom: %d\n",
+ skb_headroom(t->skb));
+ return NULL;
+ }
+
+ wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx));
+ t->txpriv.offset += sizeof(struct wsm_tx);
+ memset(wsm, 0, sizeof(*wsm));
+ wsm->hdr.len = __cpu_to_le16(t->skb->len);
+ wsm->hdr.id = __cpu_to_le16(0x0004);
+ wsm->queueId = wsm_queue_id_to_wsm(t->queue);
+ return wsm;
+}
+
+/* BT Coex specific handling */
+static void
+cw1200_tx_h_bt(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ struct wsm_tx *wsm)
+{
+ u8 priority = 0;
+
+ if (!priv->is_BT_Present)
+ return;
+
+ if (unlikely(ieee80211_is_nullfunc(t->hdr->frame_control)))
+ priority = WSM_EPTA_PRIORITY_MGT;
+ else if (ieee80211_is_data(t->hdr->frame_control)) {
+ /* Skip LLC SNAP header (+6) */
+ u8 *payload = &t->skb->data[t->hdrlen];
+ u16 *ethertype = (u16 *) &payload[6];
+ if (unlikely(*ethertype == __be16_to_cpu(ETH_P_PAE)))
+ priority = WSM_EPTA_PRIORITY_EAPOL;
+ } else if (unlikely(ieee80211_is_assoc_req(t->hdr->frame_control) ||
+ ieee80211_is_reassoc_req(t->hdr->frame_control))) {
+ struct ieee80211_mgmt *mgt_frame =
+ (struct ieee80211_mgmt *)t->hdr;
+
+ if (mgt_frame->u.assoc_req.listen_interval <
+ priv->listen_interval) {
+ txrx_printk(KERN_DEBUG
+ "Modified Listen Interval to %d from %d\n",
+ priv->listen_interval,
+ mgt_frame->u.assoc_req.listen_interval);
+ /* Replace listen interval derieved from
+ * the one read from SDD */
+ mgt_frame->u.assoc_req.listen_interval =
+ priv->listen_interval;
+ }
+ }
+
+ if (likely(!priority)) {
+ if (ieee80211_is_action(t->hdr->frame_control))
+ priority = WSM_EPTA_PRIORITY_ACTION;
+ else if (ieee80211_is_mgmt(t->hdr->frame_control))
+ priority = WSM_EPTA_PRIORITY_MGT;
+ else if ((wsm->queueId == WSM_QUEUE_VOICE))
+ priority = WSM_EPTA_PRIORITY_VOICE;
+ else if ((wsm->queueId == WSM_QUEUE_VIDEO))
+ priority = WSM_EPTA_PRIORITY_VIDEO;
+ else
+ priority = WSM_EPTA_PRIORITY_DATA;
+ }
+
+ txrx_printk(KERN_DEBUG "[TX] EPTA priority %d.\n",
+ priority);
+
+ wsm->flags |= priority << 1;
+}
+
+static int
+cw1200_tx_h_rate_policy(struct cw1200_common *priv,
+ struct cw1200_txinfo *t,
+ struct wsm_tx *wsm)
+{
+ bool tx_policy_renew = false;
+
+ t->txpriv.rate_id = tx_policy_get(priv,
+ t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
+ &tx_policy_renew);
+ if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID)
+ return -EFAULT;
+
+ wsm->flags |= t->txpriv.rate_id << 4;
+
+ t->rate = cw1200_get_tx_rate(priv,
+ &t->tx_info->control.rates[0]),
+ wsm->maxTxRate = t->rate->hw_value;
+ if (t->rate->flags & IEEE80211_TX_RC_MCS) {
+ if (cw1200_ht_greenfield(&priv->ht_info))
+ wsm->htTxParameters |=
+ __cpu_to_le32(WSM_HT_TX_GREENFIELD);
+ else
+ wsm->htTxParameters |=
+ __cpu_to_le32(WSM_HT_TX_MIXED);
+ }
+
+ if (tx_policy_renew) {
+ tx_policy_printk(KERN_DEBUG "[TX] TX policy renew.\n");
+ /* It's not so optimal to stop TX queues every now and then.
+ * Maybe it's better to reimplement task scheduling with
+ * a counter. */
+ /* cw1200_tx_queues_lock(priv); */
+ /* Definetly better. TODO. */
+ wsm_lock_tx_async(priv);
+ cw1200_tx_queues_lock(priv);
+ if (queue_work(priv->workqueue,
+ &priv->tx_policy_upload_work) <= 0) {
+ cw1200_tx_queues_unlock(priv);
+ wsm_unlock_tx(priv);
+ }
+ }
+ return 0;
+}
+
+static bool
+cw1200_tx_h_pm_state(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ int was_buffered = 1;
+
+ if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
+ !priv->buffered_multicasts) {
+ priv->buffered_multicasts = true;
+ if (priv->sta_asleep_mask)
+ queue_work(priv->workqueue,
+ &priv->multicast_start_work);
+ }
+
+ if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
+ was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1]
+ .buffered[t->txpriv.tid]++;
+
+ return !was_buffered;
+}
+
+static void
+cw1200_tx_h_ba_stat(struct cw1200_common *priv,
+ struct cw1200_txinfo *t)
+{
+ if (priv->join_status != CW1200_JOIN_STATUS_STA)
+ return;
+ if (!cw1200_is_ht(&priv->ht_info))
+ return;
+ if (!priv->setbssparams_done)
+ return;
+ if (!ieee80211_is_data(t->hdr->frame_control))
+ return;
+
+ spin_lock_bh(&priv->ba_lock);
+ priv->ba_acc += t->skb->len - t->hdrlen;
+ if (!priv->ba_cnt++) {
+ mod_timer(&priv->ba_timer,
+ jiffies + CW1200_BLOCK_ACK_INTERVAL);
+ }
+ spin_unlock_bh(&priv->ba_lock);
+}
+
+/* ******************************************************************** */
+
+void cw1200_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+{
+ struct cw1200_common *priv = dev->priv;
+ struct cw1200_txinfo t = {
+ .skb = skb,
+ .queue = skb_get_queue_mapping(skb),
+ .tx_info = IEEE80211_SKB_CB(skb),
+ .hdr = (struct ieee80211_hdr *)skb->data,
+ .txpriv.tid = CW1200_MAX_TID,
+ .txpriv.rate_id = CW1200_INVALID_RATE_ID,
+ };
+ struct ieee80211_sta *sta;
+ struct wsm_tx *wsm;
+ bool tid_update = 0;
+ u8 flags = 0;
+ int ret;
+
+ t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
+ t.da = ieee80211_get_DA(t.hdr);
+ t.sta_priv =
+ (struct cw1200_sta_priv *)&t.tx_info->control.sta->drv_priv;
+
+ if (WARN_ON(t.queue >= 4))
+ goto drop;
+
+ ret = cw1200_tx_h_calc_link_ids(priv, &t);
+ if (ret)
+ goto drop;
+
+ txrx_printk(KERN_DEBUG "[TX] TX %d bytes "
+ "(queue: %d, link_id: %d (%d)).\n",
+ skb->len, t.queue, t.txpriv.link_id,
+ t.txpriv.raw_link_id);
+
+ cw1200_tx_h_pm(priv, &t);
+ cw1200_tx_h_calc_tid(priv, &t);
+ ret = cw1200_tx_h_crypt(priv, &t);
+ if (ret)
+ goto drop;
+ ret = cw1200_tx_h_align(priv, &t, &flags);
+ if (ret)
+ goto drop;
+ ret = cw1200_tx_h_action(priv, &t);
+ if (ret)
+ goto drop;
+ wsm = cw1200_tx_h_wsm(priv, &t);
+ if (!wsm) {
+ ret = -ENOMEM;
+ goto drop;
+ }
+ wsm->flags |= flags;
+ cw1200_tx_h_bt(priv, &t, wsm);
+ ret = cw1200_tx_h_rate_policy(priv, &t, wsm);
+ if (ret)
+ goto drop;
+
+ rcu_read_lock();
+ sta = rcu_dereference(t.tx_info->control.sta);
+
+ cw1200_tx_h_ba_stat(priv, &t);
+ spin_lock_bh(&priv->ps_state_lock);
+ {
+ tid_update = cw1200_tx_h_pm_state(priv, &t);
+ BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
+ t.skb, &t.txpriv));
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ if (tid_update && sta)
+ ieee80211_sta_set_buffered(sta,
+ t.txpriv.tid, true);
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+
+ rcu_read_unlock();
+
+ cw1200_bh_wakeup(priv);
+
+ return;
+
+drop:
+ cw1200_skb_dtor(priv, skb, &t.txpriv);
+ return;
+}
+
+/* ******************************************************************** */
+
+static int cw1200_handle_action_rx(struct cw1200_common *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ /* Filter block ACK negotiation: fully controlled by firmware */
+ if (mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ return 1;
+
+ return 0;
+}
+
+static int cw1200_handle_pspoll(struct cw1200_common *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_pspoll *pspoll =
+ (struct ieee80211_pspoll *) skb->data;
+ int link_id = 0;
+ u32 pspoll_mask = 0;
+ int drop = 1;
+ int i;
+
+ if (priv->join_status != CW1200_JOIN_STATUS_AP)
+ goto done;
+ if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN))
+ goto done;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, pspoll->ta);
+ if (sta) {
+ struct cw1200_sta_priv *sta_priv;
+ sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
+ link_id = sta_priv->link_id;
+ pspoll_mask = BIT(sta_priv->link_id);
+ }
+ rcu_read_unlock();
+ if (!link_id)
+ goto done;
+
+ priv->pspoll_mask |= pspoll_mask;
+ drop = 0;
+
+ /* Do not report pspols if data for given link id is
+ * queued already. */
+ for (i = 0; i < 4; ++i) {
+ if (cw1200_queue_get_num_queued(
+ &priv->tx_queue[i],
+ pspoll_mask)) {
+ cw1200_bh_wakeup(priv);
+ drop = 1;
+ break;
+ }
+ }
+ txrx_printk(KERN_DEBUG "[RX] PSPOLL: %s\n", drop ? "local" : "fwd");
+done:
+ return drop;
+}
+
+/* ******************************************************************** */
+
+void cw1200_tx_confirm_cb(struct cw1200_common *priv,
+ struct wsm_tx_confirm *arg)
+{
+ u8 queue_id = cw1200_queue_get_queue_id(arg->packetID);
+ struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+ struct sk_buff *skb;
+ const struct cw1200_txpriv *txpriv;
+
+ txrx_printk(KERN_DEBUG "[TX] TX confirm: %d, %d.\n",
+ arg->status, arg->ackFailures);
+
+ if (unlikely(cw1200_itp_tx_running(priv)))
+ return;
+
+ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) {
+ /* STA is stopped. */
+ return;
+ }
+
+ if (WARN_ON(queue_id >= 4))
+ return;
+
+ if (arg->status)
+ txrx_printk(KERN_DEBUG "TX failed: %d.\n",
+ arg->status);
+
+ if ((arg->status == WSM_REQUEUE) &&
+ (arg->flags & WSM_TX_STATUS_REQUEUE)) {
+ /* "Requeue" means "implicit suspend" */
+ struct wsm_suspend_resume suspend = {
+ .link_id = arg->link_id,
+ .stop = 1,
+ .multicast = !arg->link_id,
+ };
+ cw1200_suspend_resume(priv, &suspend);
+ wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d)."
+ " STAs asleep: 0x%.8X\n",
+ arg->link_id,
+ cw1200_queue_get_generation(arg->packetID) + 1,
+ priv->sta_asleep_mask);
+ WARN_ON(cw1200_queue_requeue(queue,
+ arg->packetID));
+ spin_lock_bh(&priv->ps_state_lock);
+ if (!arg->link_id) {
+ priv->buffered_multicasts = true;
+ if (priv->sta_asleep_mask) {
+ queue_work(priv->workqueue,
+ &priv->multicast_start_work);
+ }
+ }
+ spin_unlock_bh(&priv->ps_state_lock);
+ } else if (!WARN_ON(cw1200_queue_get_skb(
+ queue, arg->packetID, &skb, &txpriv))) {
+ struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
+ int tx_count = arg->ackFailures;
+ u8 ht_flags = 0;
+ int i;
+
+ if (cw1200_ht_greenfield(&priv->ht_info))
+ ht_flags |= IEEE80211_TX_RC_GREEN_FIELD;
+
+ if (likely(!arg->status)) {
+ tx->flags |= IEEE80211_TX_STAT_ACK;
+ priv->cqm_tx_failure_count = 0;
+ ++tx_count;
+ cw1200_debug_txed(priv);
+ if (arg->flags & WSM_TX_STATUS_AGGREGATION) {
+ /* Do not report aggregation to mac80211:
+ * it confuses minstrel a lot. */
+ /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */
+ cw1200_debug_txed_agg(priv);
+ }
+ } else {
+ spin_lock(&priv->bss_loss_lock);
+ if (priv->bss_loss_status ==
+ CW1200_BSS_LOSS_CONFIRMING &&
+ priv->bss_loss_confirm_id ==
+ arg->packetID) {
+ priv->bss_loss_status =
+ CW1200_BSS_LOSS_CONFIRMED;
+ spin_unlock(&priv->bss_loss_lock);
+ cancel_delayed_work(&priv->bss_loss_work);
+ queue_delayed_work(priv->workqueue,
+ &priv->bss_loss_work, 0);
+ } else
+ spin_unlock(&priv->bss_loss_lock);
+
+ /* TODO: Update TX failure counters */
+ if (unlikely(priv->cqm_tx_failure_thold &&
+ (++priv->cqm_tx_failure_count >
+ priv->cqm_tx_failure_thold))) {
+ priv->cqm_tx_failure_thold = 0;
+ queue_work(priv->workqueue,
+ &priv->tx_failure_work);
+ }
+ if (tx_count)
+ ++tx_count;
+ }
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ if (tx->status.rates[i].count >= tx_count) {
+ tx->status.rates[i].count = tx_count;
+ break;
+ }
+ tx_count -= tx->status.rates[i].count;
+ if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS)
+ tx->status.rates[i].flags |= ht_flags;
+ }
+
+ for (++i; i < IEEE80211_TX_MAX_RATES; ++i) {
+ tx->status.rates[i].count = 0;
+ tx->status.rates[i].idx = -1;
+ }
+
+
+ cw1200_queue_remove(queue, arg->packetID);
+ }
+}
+
+static void cw1200_notify_buffered_tx(struct cw1200_common *priv,
+ struct sk_buff *skb, int link_id, int tid)
+{
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ u8 *buffered;
+ u8 still_buffered = 0;
+
+ if (link_id && tid < CW1200_MAX_TID) {
+ buffered = priv->link_id_db
+ [link_id - 1].buffered;
+
+ spin_lock_bh(&priv->ps_state_lock);
+ if (!WARN_ON(!buffered[tid]))
+ still_buffered = --buffered[tid];
+ spin_unlock_bh(&priv->ps_state_lock);
+
+ if (!still_buffered && tid < CW1200_MAX_TID) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tid, false);
+ rcu_read_unlock();
+ }
+ }
+#endif /* CONFIG_CW1200_USE_STE_EXTENSIONS */
+}
+
+void cw1200_skb_dtor(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv)
+{
+ skb_pull(skb, txpriv->offset);
+ if (txpriv->rate_id != CW1200_INVALID_RATE_ID) {
+ cw1200_notify_buffered_tx(priv, skb,
+ txpriv->raw_link_id, txpriv->tid);
+ tx_policy_put(priv, txpriv->rate_id);
+ }
+ if (likely(!cw1200_is_itp(priv)))
+ ieee80211_tx_status(priv->hw, skb);
+}
+
+void cw1200_rx_cb(struct cw1200_common *priv,
+ struct wsm_rx *arg,
+ struct sk_buff **skb_p)
+{
+ struct sk_buff *skb = *skb_p;
+ struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+#endif
+ struct cw1200_link_entry *entry = NULL;
+ unsigned long grace_period;
+ bool early_data = false;
+ hdr->flag = 0;
+
+ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) {
+ /* STA is stopped. */
+ goto drop;
+ }
+
+ if (arg->link_id && arg->link_id <= CW1200_MAX_STA_IN_AP_MODE) {
+ entry = &priv->link_id_db[arg->link_id - 1];
+ if (entry->status == CW1200_LINK_SOFT &&
+ ieee80211_is_data(frame->frame_control))
+ early_data = true;
+ entry->timestamp = jiffies;
+ }
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ else if ((priv->vif->p2p == WSM_START_MODE_P2P_GO)
+ && ieee80211_is_action(frame->frame_control)
+ && (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
+ txrx_printk(KERN_DEBUG "[RX] Going to MAP&RESET link ID\n");
+
+ if (work_pending(&priv->linkid_reset_work))
+ WARN_ON(1);
+
+ memcpy(&priv->action_frame_sa[0],
+ ieee80211_get_SA(frame), ETH_ALEN);
+ priv->action_linkid = 0;
+ schedule_work(&priv->linkid_reset_work);
+ }
+
+ if (arg->link_id && (priv->vif->p2p == WSM_START_MODE_P2P_GO)
+ && ieee80211_is_action(frame->frame_control)
+ && (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
+ /* Link ID already exists for the ACTION frame.
+ * Reset and Remap */
+ if (work_pending(&priv->linkid_reset_work))
+ WARN_ON(1);
+ memcpy(&priv->action_frame_sa[0],
+ ieee80211_get_SA(frame), ETH_ALEN);
+ priv->action_linkid = arg->link_id;
+ schedule_work(&priv->linkid_reset_work);
+ }
+#endif
+ if (unlikely(arg->status)) {
+ if (arg->status == WSM_STATUS_MICFAILURE) {
+ txrx_printk(KERN_DEBUG "[RX] MIC failure.\n");
+ hdr->flag |= RX_FLAG_MMIC_ERROR;
+ } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) {
+ txrx_printk(KERN_DEBUG "[RX] No key found.\n");
+ goto drop;
+ } else {
+ txrx_printk(KERN_DEBUG "[RX] Receive failure: %d.\n",
+ arg->status);
+ goto drop;
+ }
+ }
+
+ if (skb->len < sizeof(struct ieee80211_pspoll)) {
+ wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. "
+ "Size is lesser than IEEE header.\n");
+ goto drop;
+ }
+
+ if (unlikely(ieee80211_is_pspoll(frame->frame_control)))
+ if (cw1200_handle_pspoll(priv, skb))
+ goto drop;
+
+ hdr->mactime = 0; /* Not supported by WSM */
+ hdr->band = (arg->channelNumber > 14) ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ hdr->freq = ieee80211_channel_to_frequency(
+ arg->channelNumber,
+ hdr->band);
+
+ if (arg->rxedRate >= 14) {
+ hdr->flag |= RX_FLAG_HT;
+ hdr->rate_idx = arg->rxedRate - 14;
+ } else if (arg->rxedRate >= 4) {
+ hdr->rate_idx = arg->rxedRate - 2;
+ } else {
+ hdr->rate_idx = arg->rxedRate;
+ }
+
+ hdr->signal = (s8)arg->rcpiRssi;
+ hdr->antenna = 0;
+
+ if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
+ size_t iv_len = 0, icv_len = 0;
+ size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
+
+ hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
+
+ /* Oops... There is no fast way to ask mac80211 about
+ * IV/ICV lengths. Even defineas are not exposed.*/
+ switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
+ case WSM_RX_STATUS_WEP:
+ iv_len = 4 /* WEP_IV_LEN */;
+ icv_len = 4 /* WEP_ICV_LEN */;
+ break;
+ case WSM_RX_STATUS_TKIP:
+ iv_len = 8 /* TKIP_IV_LEN */;
+ icv_len = 4 /* TKIP_ICV_LEN */
+ + 8 /*MICHAEL_MIC_LEN*/;
+ hdr->flag |= RX_FLAG_MMIC_STRIPPED;
+ break;
+ case WSM_RX_STATUS_AES:
+ iv_len = 8 /* CCMP_HDR_LEN */;
+ icv_len = 8 /* CCMP_MIC_LEN */;
+ break;
+ case WSM_RX_STATUS_WAPI:
+ iv_len = 18 /* WAPI_HDR_LEN */;
+ icv_len = 16 /* WAPI_MIC_LEN */;
+ break;
+ default:
+ WARN_ON("Unknown encryption type");
+ goto drop;
+ }
+
+ /* Firmware strips ICV in case of MIC failure. */
+ if (arg->status == WSM_STATUS_MICFAILURE)
+ icv_len = 0;
+
+ if (skb->len < hdrlen + iv_len + icv_len) {
+ wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. "
+ "Size is lesser than crypto headers.\n");
+ goto drop;
+ }
+
+ /* Remove IV, ICV and MIC */
+ skb_trim(skb, skb->len - icv_len);
+ memmove(skb->data + iv_len, skb->data, hdrlen);
+ skb_pull(skb, iv_len);
+ }
+
+ cw1200_debug_rxed(priv);
+ if (arg->flags & WSM_RX_STATUS_AGGREGATE)
+ cw1200_debug_rxed_agg(priv);
+
+ if (ieee80211_is_action(frame->frame_control) &&
+ (arg->flags & WSM_RX_STATUS_ADDRESS1)) {
+ if (cw1200_handle_action_rx(priv, skb))
+ return;
+ } else if (unlikely(priv->disable_beacon_filter) &&
+ !arg->status &&
+ ieee80211_is_beacon(frame->frame_control) &&
+ !memcmp(ieee80211_get_SA(frame), priv->join_bssid,
+ ETH_ALEN)) {
+ priv->disable_beacon_filter = false;
+ queue_work(priv->workqueue, &priv->update_filtering_work);
+ }
+
+ /* Stay awake for 1sec. after frame is received to give
+ * userspace chance to react and acquire appropriate
+ * wakelock. */
+ if (ieee80211_is_auth(frame->frame_control))
+ grace_period = 5 * HZ;
+ else if (ieee80211_is_deauth(frame->frame_control))
+ grace_period = 5 * HZ;
+ else
+ grace_period = 1 * HZ;
+ cw1200_pm_stay_awake(&priv->pm_state, grace_period);
+
+ if (unlikely(cw1200_itp_rxed(priv, skb)))
+ consume_skb(skb);
+ else if (unlikely(early_data)) {
+ spin_lock_bh(&priv->ps_state_lock);
+ /* Double-check status with lock held */
+ if (entry->status == CW1200_LINK_SOFT)
+ skb_queue_tail(&entry->rx_queue, skb);
+ else
+ ieee80211_rx_irqsafe(priv->hw, skb);
+ spin_unlock_bh(&priv->ps_state_lock);
+ } else {
+ ieee80211_rx_irqsafe(priv->hw, skb);
+ }
+ *skb_p = NULL;
+
+ return;
+
+drop:
+ /* TODO: update failure counters */
+ return;
+}
+
+/* ******************************************************************** */
+/* Security */
+
+int cw1200_alloc_key(struct cw1200_common *priv)
+{
+ int idx;
+
+ idx = ffs(~priv->key_map) - 1;
+ if (idx < 0 || idx > WSM_KEY_MAX_INDEX)
+ return -1;
+
+ priv->key_map |= BIT(idx);
+ priv->keys[idx].entryIndex = idx;
+ return idx;
+}
+
+void cw1200_free_key(struct cw1200_common *priv, int idx)
+{
+ BUG_ON(!(priv->key_map & BIT(idx)));
+ memset(&priv->keys[idx], 0, sizeof(priv->keys[idx]));
+ priv->key_map &= ~BIT(idx);
+}
+
+void cw1200_free_keys(struct cw1200_common *priv)
+{
+ memset(&priv->keys, 0, sizeof(priv->keys));
+ priv->key_map = 0;
+}
+
+int cw1200_upload_keys(struct cw1200_common *priv)
+{
+ int idx, ret = 0;
+ for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx)
+ if (priv->key_map & BIT(idx)) {
+ ret = wsm_add_key(priv, &priv->keys[idx]);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+/* Workaround for WFD test case 6.1.10 */
+void cw1200_link_id_reset(struct work_struct *work)
+{
+ struct cw1200_common *priv =
+ container_of(work, struct cw1200_common, linkid_reset_work);
+ int temp_linkid;
+
+ if (!priv->action_linkid) {
+ /* In GO mode we can receive ACTION frames without a linkID */
+ temp_linkid = cw1200_alloc_link_id(priv,
+ &priv->action_frame_sa[0]);
+ WARN_ON(!temp_linkid);
+ if (temp_linkid) {
+ /* Make sure we execute the WQ */
+ flush_workqueue(priv->workqueue);
+ /* Release the link ID */
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->link_id_db[temp_linkid - 1].prev_status =
+ priv->link_id_db[temp_linkid - 1].status;
+ priv->link_id_db[temp_linkid - 1].status =
+ CW1200_LINK_RESET;
+ spin_unlock_bh(&priv->ps_state_lock);
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue,
+ &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+ } else {
+ spin_lock_bh(&priv->ps_state_lock);
+ priv->link_id_db[priv->action_linkid - 1].prev_status =
+ priv->link_id_db[priv->action_linkid - 1].status;
+ priv->link_id_db[priv->action_linkid - 1].status =
+ CW1200_LINK_RESET_REMAP;
+ spin_unlock_bh(&priv->ps_state_lock);
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+ wsm_unlock_tx(priv);
+ flush_workqueue(priv->workqueue);
+ }
+}
+#endif
diff --git a/drivers/staging/cw1200/txrx.h b/drivers/staging/cw1200/txrx.h
new file mode 100644
index 00000000000..f3b2023ff2b
--- /dev/null
+++ b/drivers/staging/cw1200/txrx.h
@@ -0,0 +1,95 @@
+/*
+ * Datapath interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_TXRX_H
+#define CW1200_TXRX_H
+
+#include <linux/list.h>
+
+/* extern */ struct ieee80211_hw;
+/* extern */ struct sk_buff;
+/* extern */ struct wsm_tx;
+/* extern */ struct wsm_rx;
+/* extern */ struct wsm_tx_confirm;
+/* extern */ struct cw1200_txpriv;
+
+struct tx_policy {
+ union {
+ __le32 tbl[3];
+ u8 raw[12];
+ };
+ u8 defined; /* TODO: u32 or u8, profile and select best */
+ u8 usage_count; /* --// -- */
+ u8 retry_count; /* --// -- */
+ u8 uploaded;
+};
+
+struct tx_policy_cache_entry {
+ struct tx_policy policy;
+ struct list_head link;
+};
+
+#define TX_POLICY_CACHE_SIZE (8)
+struct tx_policy_cache {
+ struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE];
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock;
+};
+
+/* ******************************************************************** */
+/* TX policy cache */
+/* Intention of TX policy cache is an overcomplicated WSM API.
+ * Device does not accept per-PDU tx retry sequence.
+ * It uses "tx retry policy id" instead, so driver code has to sync
+ * linux tx retry sequences with a retry policy table in the device.
+ */
+void tx_policy_init(struct cw1200_common *priv);
+void tx_policy_upload_work(struct work_struct *work);
+
+/* ******************************************************************** */
+/* TX implementation */
+
+u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv,
+ u32 rates);
+void cw1200_tx(struct ieee80211_hw *dev, struct sk_buff *skb);
+void cw1200_skb_dtor(struct cw1200_common *priv,
+ struct sk_buff *skb,
+ const struct cw1200_txpriv *txpriv);
+
+/* ******************************************************************** */
+/* WSM callbacks */
+
+void cw1200_tx_confirm_cb(struct cw1200_common *priv,
+ struct wsm_tx_confirm *arg);
+void cw1200_rx_cb(struct cw1200_common *priv,
+ struct wsm_rx *arg,
+ struct sk_buff **skb_p);
+
+/* ******************************************************************** */
+/* Timeout */
+
+void cw1200_tx_timeout(struct work_struct *work);
+
+/* ******************************************************************** */
+/* Security */
+int cw1200_alloc_key(struct cw1200_common *priv);
+void cw1200_free_key(struct cw1200_common *priv, int idx);
+void cw1200_free_keys(struct cw1200_common *priv);
+int cw1200_upload_keys(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* Workaround for WFD test case 6.1.10 */
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+void cw1200_link_id_reset(struct work_struct *work);
+#endif
+
+#endif /* CW1200_TXRX_H */
diff --git a/drivers/staging/cw1200/wsm.c b/drivers/staging/cw1200/wsm.c
new file mode 100644
index 00000000000..3cf53704f07
--- /dev/null
+++ b/drivers/staging/cw1200/wsm.c
@@ -0,0 +1,1836 @@
+/*
+ * WSM host interface (HI) implementation for
+ * ST-Ericsson CW1200 mac80211 drivers.
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "cw1200.h"
+#include "wsm.h"
+#include "bh.h"
+#include "debug.h"
+#include "itp.h"
+
+#if defined(CONFIG_CW1200_WSM_DEBUG)
+#define wsm_printk(...) printk(__VA_ARGS__)
+#else
+#define wsm_printk(...)
+#endif
+
+#define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */
+#define WSM_CMD_JOIN_TIMEOUT (7 * HZ) /* Join timeout is 5 sec. in FW */
+#define WSM_CMD_START_TIMEOUT (7 * HZ)
+#define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */
+
+#define WSM_SKIP(buf, size) \
+ do { \
+ if (unlikely((buf)->data + size > (buf)->end)) \
+ goto underflow; \
+ (buf)->data += size; \
+ } while (0)
+
+#define WSM_GET(buf, ptr, size) \
+ do { \
+ if (unlikely((buf)->data + size > (buf)->end)) \
+ goto underflow; \
+ memcpy(ptr, (buf)->data, size); \
+ (buf)->data += size; \
+ } while (0)
+
+#define __WSM_GET(buf, type, cvt) \
+ ({ \
+ type val; \
+ if (unlikely((buf)->data + sizeof(type) > (buf)->end)) \
+ goto underflow; \
+ val = cvt(*(type *)(buf)->data); \
+ (buf)->data += sizeof(type); \
+ val; \
+ })
+
+#define WSM_GET8(buf) __WSM_GET(buf, u8, (u8))
+#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16_to_cpu)
+#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32_to_cpu)
+
+#define WSM_PUT(buf, ptr, size) \
+ do { \
+ if (unlikely((buf)->data + size > (buf)->end)) \
+ if (unlikely(wsm_buf_reserve((buf), size))) \
+ goto nomem; \
+ memcpy((buf)->data, ptr, size); \
+ (buf)->data += size; \
+ } while (0)
+
+#define __WSM_PUT(buf, val, type, cvt) \
+ do { \
+ if (unlikely((buf)->data + sizeof(type) > (buf)->end)) \
+ if (unlikely(wsm_buf_reserve((buf), sizeof(type)))) \
+ goto nomem; \
+ *(type *)(buf)->data = cvt(val); \
+ (buf)->data += sizeof(type); \
+ } while (0)
+
+#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, (u8))
+#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __cpu_to_le16)
+#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __cpu_to_le32)
+
+static void wsm_buf_reset(struct wsm_buf *buf);
+static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size);
+
+static int wsm_cmd_send(struct cw1200_common *priv,
+ struct wsm_buf *buf,
+ void *arg, u16 cmd, long tmo);
+
+static inline void wsm_cmd_lock(struct cw1200_common *priv)
+{
+ mutex_lock(&priv->wsm_cmd_mux);
+}
+
+static inline void wsm_cmd_unlock(struct cw1200_common *priv)
+{
+ mutex_unlock(&priv->wsm_cmd_mux);
+}
+
+/* ******************************************************************** */
+/* WSM API implementation */
+
+static int wsm_generic_confirm(struct cw1200_common *priv,
+ void *arg,
+ struct wsm_buf *buf)
+{
+ u32 status = WSM_GET32(buf);
+ if (status != WSM_STATUS_SUCCESS)
+ return -EINVAL;
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime);
+ WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime);
+ WSM_PUT32(buf, arg->dot11RtsThreshold);
+
+ /* DPD block. */
+ WSM_PUT16(buf, arg->dpdData_size + 12);
+ WSM_PUT16(buf, 1); /* DPD version */
+ WSM_PUT(buf, arg->dot11StationId, ETH_ALEN);
+ WSM_PUT16(buf, 5); /* DPD flags */
+ WSM_PUT(buf, arg->dpdData, arg->dpdData_size);
+
+ ret = wsm_cmd_send(priv, buf, arg, 0x0009, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+static int wsm_configuration_confirm(struct cw1200_common *priv,
+ struct wsm_configuration *arg,
+ struct wsm_buf *buf)
+{
+ int i;
+ int status;
+
+ status = WSM_GET32(buf);
+ if (WARN_ON(status != WSM_STATUS_SUCCESS))
+ return -EINVAL;
+
+ WSM_GET(buf, arg->dot11StationId, ETH_ALEN);
+ arg->dot11FrequencyBandsSupported = WSM_GET8(buf);
+ WSM_SKIP(buf, 1);
+ arg->supportedRateMask = WSM_GET32(buf);
+ for (i = 0; i < 2; ++i) {
+ arg->txPowerRange[i].min_power_level = WSM_GET32(buf);
+ arg->txPowerRange[i].max_power_level = WSM_GET32(buf);
+ arg->txPowerRange[i].stepping = WSM_GET32(buf);
+ }
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ u16 cmd = 0x000A | WSM_TX_LINK_ID(arg->link_id);
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT32(buf, arg->reset_statistics ? 0 : 1);
+ ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+struct wsm_mib {
+ u16 mibId;
+ void *buf;
+ size_t buf_size;
+};
+
+int wsm_read_mib(struct cw1200_common *priv, u16 mibId, void *_buf,
+ size_t buf_size)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ struct wsm_mib mib_buf = {
+ .mibId = mibId,
+ .buf = _buf,
+ .buf_size = buf_size,
+ };
+ wsm_cmd_lock(priv);
+
+ WSM_PUT16(buf, mibId);
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, &mib_buf, 0x0005, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+static int wsm_read_mib_confirm(struct cw1200_common *priv,
+ struct wsm_mib *arg,
+ struct wsm_buf *buf)
+{
+ u16 size;
+ if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS))
+ return -EINVAL;
+
+ if (WARN_ON(WSM_GET16(buf) != arg->mibId))
+ return -EINVAL;
+
+ size = WSM_GET16(buf);
+ if (size > arg->buf_size)
+ size = arg->buf_size;
+
+ WSM_GET(buf, arg->buf, size);
+ arg->buf_size = size;
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+int wsm_write_mib(struct cw1200_common *priv, u16 mibId, void *_buf,
+ size_t buf_size)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ struct wsm_mib mib_buf = {
+ .mibId = mibId,
+ .buf = _buf,
+ .buf_size = buf_size,
+ };
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT16(buf, mibId);
+ WSM_PUT16(buf, buf_size);
+ WSM_PUT(buf, _buf, buf_size);
+
+ ret = wsm_cmd_send(priv, buf, &mib_buf, 0x0006, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+static int wsm_write_mib_confirm(struct cw1200_common *priv,
+ struct wsm_mib *arg,
+ struct wsm_buf *buf)
+{
+ int ret;
+
+ ret = wsm_generic_confirm(priv, arg, buf);
+ if (ret)
+ return ret;
+
+ if (arg->mibId == 0x1006) {
+ /* OperationalMode: update PM status. */
+ const char *p = arg->buf;
+ cw1200_enable_powersave(priv,
+ (p[0] & 0x0F) ? true : false);
+ }
+ return 0;
+}
+
+/* ******************************************************************** */
+
+int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg)
+{
+ int i;
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ if (unlikely(arg->numOfChannels > 48))
+ return -EINVAL;
+
+ if (unlikely(arg->numOfSSIDs > 2))
+ return -EINVAL;
+
+ if (unlikely(arg->band > 1))
+ return -EINVAL;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->band);
+ WSM_PUT8(buf, arg->scanType);
+ WSM_PUT8(buf, arg->scanFlags);
+ WSM_PUT8(buf, arg->maxTransmitRate);
+ WSM_PUT32(buf, arg->autoScanInterval);
+ WSM_PUT8(buf, arg->numOfProbeRequests);
+ WSM_PUT8(buf, arg->numOfChannels);
+ WSM_PUT8(buf, arg->numOfSSIDs);
+ WSM_PUT8(buf, arg->probeDelay);
+
+ for (i = 0; i < arg->numOfChannels; ++i) {
+ WSM_PUT16(buf, arg->ch[i].number);
+ WSM_PUT16(buf, 0);
+ WSM_PUT32(buf, arg->ch[i].minChannelTime);
+ WSM_PUT32(buf, arg->ch[i].maxChannelTime);
+ WSM_PUT32(buf, 0);
+ }
+
+ for (i = 0; i < arg->numOfSSIDs; ++i) {
+ WSM_PUT32(buf, arg->ssids[i].length);
+ WSM_PUT(buf, &arg->ssids[i].ssid[0],
+ sizeof(arg->ssids[i].ssid));
+ }
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0007, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_stop_scan(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ wsm_cmd_lock(priv);
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0008, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+}
+
+
+static int wsm_tx_confirm(struct cw1200_common *priv,
+ struct wsm_buf *buf,
+ int link_id)
+{
+ struct wsm_tx_confirm tx_confirm;
+
+ tx_confirm.packetID = WSM_GET32(buf);
+ tx_confirm.status = WSM_GET32(buf);
+ tx_confirm.txedRate = WSM_GET8(buf);
+ tx_confirm.ackFailures = WSM_GET8(buf);
+ tx_confirm.flags = WSM_GET16(buf);
+ tx_confirm.mediaDelay = WSM_GET32(buf);
+ tx_confirm.txQueueDelay = WSM_GET32(buf);
+ tx_confirm.link_id = link_id;
+
+ if (priv->wsm_cbc.tx_confirm)
+ priv->wsm_cbc.tx_confirm(priv, &tx_confirm);
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static int wsm_multi_tx_confirm(struct cw1200_common *priv,
+ struct wsm_buf *buf, int link_id)
+{
+ int ret;
+ int count;
+ int i;
+
+ count = WSM_GET32(buf);
+ if (WARN_ON(count <= 0))
+ return -EINVAL;
+ else if (count > 1) {
+ ret = wsm_release_tx_buffer(priv, count - 1);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ cw1200_bh_wakeup(priv);
+ }
+
+ cw1200_debug_txed_multi(priv, count);
+ for (i = 0; i < count; ++i) {
+ ret = wsm_tx_confirm(priv, buf, link_id);
+ if (ret)
+ return ret;
+ }
+ return ret;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+static int wsm_join_confirm(struct cw1200_common *priv,
+ struct wsm_join *arg,
+ struct wsm_buf *buf)
+{
+ if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS))
+ return -EINVAL;
+
+ arg->minPowerLevel = WSM_GET32(buf);
+ arg->maxPowerLevel = WSM_GET32(buf);
+
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+int wsm_join(struct cw1200_common *priv, struct wsm_join *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->mode);
+ WSM_PUT8(buf, arg->band);
+ WSM_PUT16(buf, arg->channelNumber);
+ WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid));
+ WSM_PUT16(buf, arg->atimWindow);
+ WSM_PUT8(buf, arg->preambleType);
+ WSM_PUT8(buf, arg->probeForJoin);
+ WSM_PUT8(buf, arg->dtimPeriod);
+ WSM_PUT8(buf, arg->flags);
+ WSM_PUT32(buf, arg->ssidLength);
+ WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid));
+ WSM_PUT32(buf, arg->beaconInterval);
+ WSM_PUT32(buf, arg->basicRateSet);
+
+ priv->tx_burst_idx = -1;
+ ret = wsm_cmd_send(priv, buf, arg, 0x000B, WSM_CMD_JOIN_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_bss_params(struct cw1200_common *priv,
+ const struct wsm_set_bss_params *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, 0);
+ WSM_PUT8(buf, arg->beaconLostCount);
+ WSM_PUT16(buf, arg->aid);
+ WSM_PUT32(buf, arg->operationalRateSet);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0011, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT(buf, arg, sizeof(*arg));
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x000C, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->entryIndex);
+ WSM_PUT8(buf, 0);
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x000D, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_tx_queue_params(struct cw1200_common *priv,
+ const struct wsm_set_tx_queue_params *arg, u8 id)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, queue_id_to_wmm_aci[id]);
+ WSM_PUT8(buf, 0);
+ WSM_PUT8(buf, arg->ackPolicy);
+ WSM_PUT8(buf, 0);
+ WSM_PUT32(buf, arg->maxTransmitLifetime);
+ WSM_PUT16(buf, arg->allowedMediumTime);
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_edca_params(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ /* Implemented according to specification. */
+
+ WSM_PUT16(buf, arg->params[3].cwMin);
+ WSM_PUT16(buf, arg->params[2].cwMin);
+ WSM_PUT16(buf, arg->params[1].cwMin);
+ WSM_PUT16(buf, arg->params[0].cwMin);
+
+ WSM_PUT16(buf, arg->params[3].cwMax);
+ WSM_PUT16(buf, arg->params[2].cwMax);
+ WSM_PUT16(buf, arg->params[1].cwMax);
+ WSM_PUT16(buf, arg->params[0].cwMax);
+
+ WSM_PUT8(buf, arg->params[3].aifns);
+ WSM_PUT8(buf, arg->params[2].aifns);
+ WSM_PUT8(buf, arg->params[1].aifns);
+ WSM_PUT8(buf, arg->params[0].aifns);
+
+ WSM_PUT16(buf, arg->params[3].txOpLimit);
+ WSM_PUT16(buf, arg->params[2].txOpLimit);
+ WSM_PUT16(buf, arg->params[1].txOpLimit);
+ WSM_PUT16(buf, arg->params[0].txOpLimit);
+
+ WSM_PUT32(buf, arg->params[3].maxReceiveLifetime);
+ WSM_PUT32(buf, arg->params[2].maxReceiveLifetime);
+ WSM_PUT32(buf, arg->params[1].maxReceiveLifetime);
+ WSM_PUT32(buf, arg->params[0].maxReceiveLifetime);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0013, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_switch_channel(struct cw1200_common *priv,
+ const struct wsm_switch_channel *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_lock_tx(priv);
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->channelMode);
+ WSM_PUT8(buf, arg->channelSwitchCount);
+ WSM_PUT16(buf, arg->newChannelNumber);
+
+ priv->channel_switch_in_progress = 1;
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0016, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ if (ret) {
+ wsm_unlock_tx(priv);
+ priv->channel_switch_in_progress = 0;
+ }
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ wsm_unlock_tx(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->pmMode);
+ WSM_PUT8(buf, arg->fastPsmIdlePeriod);
+ WSM_PUT8(buf, arg->apPsmChangePeriod);
+ WSM_PUT8(buf, arg->minAutoPsPollPeriod);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0010, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT8(buf, arg->mode);
+ WSM_PUT8(buf, arg->band);
+ WSM_PUT16(buf, arg->channelNumber);
+ WSM_PUT32(buf, arg->CTWindow);
+ WSM_PUT32(buf, arg->beaconInterval);
+ WSM_PUT8(buf, arg->DTIMPeriod);
+ WSM_PUT8(buf, arg->preambleType);
+ WSM_PUT8(buf, arg->probeDelay);
+ WSM_PUT8(buf, arg->ssidLength);
+ WSM_PUT(buf, arg->ssid, sizeof(arg->ssid));
+ WSM_PUT32(buf, arg->basicRateSet);
+
+ priv->tx_burst_idx = -1;
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0017, WSM_CMD_START_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_beacon_transmit(struct cw1200_common *priv,
+ const struct wsm_beacon_transmit *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT32(buf, arg->enableBeaconing ? 1 : 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0018, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_start_find(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+ ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+}
+
+/* ******************************************************************** */
+
+int wsm_stop_find(struct cw1200_common *priv)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+ ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT);
+ wsm_cmd_unlock(priv);
+ return ret;
+}
+
+/* ******************************************************************** */
+
+int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+ u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id);
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr));
+ WSM_PUT16(buf, 0);
+
+ ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_update_ie(struct cw1200_common *priv,
+ const struct wsm_update_ie *arg)
+{
+ int ret;
+ struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+ wsm_cmd_lock(priv);
+
+ WSM_PUT16(buf, arg->what);
+ WSM_PUT16(buf, arg->count);
+ WSM_PUT(buf, arg->ies, arg->length);
+
+ ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT);
+
+ wsm_cmd_unlock(priv);
+ return ret;
+
+nomem:
+ wsm_cmd_unlock(priv);
+ return -ENOMEM;
+
+}
+
+/* ******************************************************************** */
+/* WSM indication events implementation */
+
+static int wsm_startup_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ u16 status;
+ char fw_label[129];
+ static const char * const fw_types[] = {
+ "ETF",
+ "WFM",
+ "WSM",
+ "HI test",
+ "Platform test"
+ };
+
+ priv->wsm_caps.numInpChBufs = WSM_GET16(buf);
+ priv->wsm_caps.sizeInpChBuf = WSM_GET16(buf);
+ priv->wsm_caps.hardwareId = WSM_GET16(buf);
+ priv->wsm_caps.hardwareSubId = WSM_GET16(buf);
+ status = WSM_GET16(buf);
+ priv->wsm_caps.firmwareCap = WSM_GET16(buf);
+ priv->wsm_caps.firmwareType = WSM_GET16(buf);
+ priv->wsm_caps.firmwareApiVer = WSM_GET16(buf);
+ priv->wsm_caps.firmwareBuildNumber = WSM_GET16(buf);
+ priv->wsm_caps.firmwareVersion = WSM_GET16(buf);
+ WSM_GET(buf, &fw_label[0], sizeof(fw_label) - 1);
+ fw_label[sizeof(fw_label) - 1] = 0; /* Do not trust FW too much. */
+
+ if (WARN_ON(status))
+ return -EINVAL;
+
+ if (WARN_ON(priv->wsm_caps.firmwareType > 4))
+ return -EINVAL;
+
+ printk(KERN_INFO "CW1200 WSM init done.\n"
+ " Input buffers: %d x %d bytes\n"
+ " Hardware: %d.%d\n"
+ " %s firmware [%s], ver: %d, build: %d,"
+ " api: %d, cap: 0x%.4X\n",
+ priv->wsm_caps.numInpChBufs, priv->wsm_caps.sizeInpChBuf,
+ priv->wsm_caps.hardwareId, priv->wsm_caps.hardwareSubId,
+ fw_types[priv->wsm_caps.firmwareType],
+ &fw_label[0], priv->wsm_caps.firmwareVersion,
+ priv->wsm_caps.firmwareBuildNumber,
+ priv->wsm_caps.firmwareApiVer, priv->wsm_caps.firmwareCap);
+
+ priv->wsm_caps.firmwareReady = 1;
+
+ wake_up(&priv->wsm_startup_done);
+ return 0;
+
+underflow:
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static int wsm_receive_indication(struct cw1200_common *priv,
+ int link_id,
+ struct wsm_buf *buf,
+ struct sk_buff **skb_p)
+{
+ priv->rx_timestamp = jiffies;
+ if (priv->wsm_cbc.rx) {
+ struct wsm_rx rx;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ __le16 fctl;
+
+ rx.status = WSM_GET32(buf);
+ rx.channelNumber = WSM_GET16(buf);
+ rx.rxedRate = WSM_GET8(buf);
+ rx.rcpiRssi = WSM_GET8(buf);
+ rx.flags = WSM_GET32(buf);
+
+ /* FW Workaround: Drop probe resp or
+ beacon when RSSI is 0 */
+ hdr = (struct ieee80211_hdr *) (*skb_p)->data;
+
+ if (!rx.rcpiRssi &&
+ (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return 0;
+
+ /* If no RSSI subscription has been made,
+ * convert RCPI to RSSI here */
+ if (!priv->cqm_use_rssi)
+ rx.rcpiRssi = rx.rcpiRssi / 2 - 110;
+
+ rx.link_id = link_id;
+ fctl = *(__le16 *)buf->data;
+ hdr_len = buf->data - buf->begin;
+ skb_pull(*skb_p, hdr_len);
+ if (!rx.status && unlikely(ieee80211_is_deauth(fctl))) {
+ if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+ /* Shedule unjoin work */
+ wsm_printk(KERN_DEBUG \
+ "[WSM] Issue unjoin command (RX).\n");
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue,
+ &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+ }
+ priv->wsm_cbc.rx(priv, &rx, skb_p);
+ if (*skb_p)
+ skb_push(*skb_p, hdr_len);
+ }
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf)
+{
+ int first;
+ struct cw1200_wsm_event *event;
+
+ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) {
+ /* STA is stopped. */
+ return 0;
+ }
+
+ event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL);
+
+ event->evt.eventId = __le32_to_cpu(WSM_GET32(buf));
+ event->evt.eventData = __le32_to_cpu(WSM_GET32(buf));
+
+ wsm_printk(KERN_DEBUG "[WSM] Event: %d(%d)\n",
+ event->evt.eventId, event->evt.eventData);
+
+ spin_lock(&priv->event_queue_lock);
+ first = list_empty(&priv->event_queue);
+ list_add_tail(&event->link, &priv->event_queue);
+ spin_unlock(&priv->event_queue_lock);
+
+ if (first)
+ queue_work(priv->workqueue, &priv->event_handler);
+
+ return 0;
+
+underflow:
+ kfree(event);
+ return -EINVAL;
+}
+
+static int wsm_channel_switch_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ wsm_unlock_tx(priv); /* Re-enable datapath */
+ WARN_ON(WSM_GET32(buf));
+
+ priv->channel_switch_in_progress = 0;
+ wake_up(&priv->channel_switch_done);
+
+ if (priv->wsm_cbc.channel_switch)
+ priv->wsm_cbc.channel_switch(priv);
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_set_pm_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ return 0;
+}
+
+static int wsm_scan_complete_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ if (priv->wsm_cbc.scan_complete) {
+ struct wsm_scan_complete arg;
+ arg.status = WSM_GET32(buf);
+ arg.psm = WSM_GET8(buf);
+ arg.numChannels = WSM_GET8(buf);
+ priv->wsm_cbc.scan_complete(priv, &arg);
+ }
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+static int wsm_find_complete_indication(struct cw1200_common *priv,
+ struct wsm_buf *buf)
+{
+ /* TODO: Implement me. */
+ STUB();
+ return 0;
+}
+
+static int wsm_suspend_resume_indication(struct cw1200_common *priv,
+ int link_id, struct wsm_buf *buf)
+{
+ if (priv->wsm_cbc.suspend_resume) {
+ u32 flags;
+ struct wsm_suspend_resume arg;
+
+ flags = WSM_GET32(buf);
+ arg.link_id = link_id;
+ arg.stop = !(flags & 1);
+ arg.multicast = !!(flags & 8);
+ arg.queue = (flags >> 1) & 3;
+
+ priv->wsm_cbc.suspend_resume(priv, &arg);
+ }
+ return 0;
+
+underflow:
+ return -EINVAL;
+}
+
+
+/* ******************************************************************** */
+/* WSM TX */
+
+int wsm_cmd_send(struct cw1200_common *priv,
+ struct wsm_buf *buf,
+ void *arg, u16 cmd, long tmo)
+{
+ size_t buf_len = buf->data - buf->begin;
+ int ret;
+
+ if (cmd == 0x0006) /* Write MIB */
+ wsm_printk(KERN_DEBUG "[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%d)\n",
+ cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]),
+ buf_len);
+ else
+ wsm_printk(KERN_DEBUG "[WSM] >>> 0x%.4X (%d)\n", cmd, buf_len);
+
+ /* Fill HI message header */
+ /* BH will add sequence number */
+ ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len);
+ ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd);
+
+ spin_lock(&priv->wsm_cmd.lock);
+ BUG_ON(priv->wsm_cmd.ptr);
+ priv->wsm_cmd.done = 0;
+ priv->wsm_cmd.ptr = buf->begin;
+ priv->wsm_cmd.len = buf_len;
+ priv->wsm_cmd.arg = arg;
+ priv->wsm_cmd.cmd = cmd;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ cw1200_bh_wakeup(priv);
+
+ if (unlikely(priv->bh_error)) {
+ /* Do not wait for timeout if BH is dead. Exit immediately. */
+ ret = 0;
+ } else {
+ long rx_timestamp;
+ /* Firmware prioritizes data traffic over control confirm.
+ * Loop below checks if data was RXed and increases timeout
+ * accordingly. */
+ do {
+ /* It's safe to use unprotected access to
+ * wsm_cmd.done here */
+ ret = wait_event_timeout(
+ priv->wsm_cmd_wq,
+ priv->wsm_cmd.done, tmo);
+ rx_timestamp = jiffies - priv->rx_timestamp;
+ if (unlikely(rx_timestamp < 0))
+ rx_timestamp = tmo + 1;
+ } while (!ret && rx_timestamp <= tmo);
+ }
+
+ if (unlikely(ret == 0)) {
+ u16 raceCheck;
+
+ spin_lock(&priv->wsm_cmd.lock);
+ raceCheck = priv->wsm_cmd.cmd;
+ priv->wsm_cmd.arg = NULL;
+ priv->wsm_cmd.ptr = NULL;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ /* Race condition check to make sure _confirm is not called
+ * after exit of _send */
+ if (raceCheck == 0xFFFF) {
+ /* If wsm_handle_rx got stuck in _confirm we will hang
+ * system there. It's better than silently currupt
+ * stack or heap, isn't it? */
+ BUG_ON(wait_event_timeout(
+ priv->wsm_cmd_wq, priv->wsm_cmd.done,
+ WSM_CMD_LAST_CHANCE_TIMEOUT) <= 0);
+ }
+
+ /* Kill BH thread to report the error to the top layer. */
+ priv->bh_error = 1;
+ wake_up(&priv->bh_wq);
+ ret = -ETIMEDOUT;
+ } else {
+ spin_lock(&priv->wsm_cmd.lock);
+ BUG_ON(!priv->wsm_cmd.done);
+ ret = priv->wsm_cmd.ret;
+ spin_unlock(&priv->wsm_cmd.lock);
+ }
+ wsm_buf_reset(buf);
+ return ret;
+}
+
+/* ******************************************************************** */
+/* WSM TX port control */
+
+void wsm_lock_tx(struct cw1200_common *priv)
+{
+ wsm_cmd_lock(priv);
+ if (atomic_add_return(1, &priv->tx_lock) == 1) {
+ if (wsm_flush_tx(priv))
+ wsm_printk(KERN_DEBUG "[WSM] TX is locked.\n");
+ }
+ wsm_cmd_unlock(priv);
+}
+
+void wsm_lock_tx_async(struct cw1200_common *priv)
+{
+ if (atomic_add_return(1, &priv->tx_lock) == 1)
+ wsm_printk(KERN_DEBUG "[WSM] TX is locked (async).\n");
+}
+
+bool wsm_flush_tx(struct cw1200_common *priv)
+{
+ unsigned long timestamp = jiffies;
+ bool pending = false;
+ long timeout;
+ int i;
+
+ /* Flush must be called with TX lock held. */
+ BUG_ON(!atomic_read(&priv->tx_lock));
+
+ /* First check if we really need to do something.
+ * It is safe to use unprotected access, as hw_bufs_used
+ * can only decrements. */
+ if (!priv->hw_bufs_used)
+ return true;
+
+ if (priv->bh_error) {
+ /* In case of failure do not wait for magic. */
+ wsm_printk(KERN_ERR "[WSM] Fatal error occured, "
+ "will not flush TX.\n");
+ return false;
+ } else {
+ /* Get a timestamp of "oldest" frame */
+ for (i = 0; i < 4; ++i)
+ pending |= cw1200_queue_get_xmit_timestamp(
+ &priv->tx_queue[i],
+ &timestamp);
+ /* It is allowed to lock TX with only a command in the pipe. */
+ if (!pending)
+ return true;
+
+ timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies;
+ if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq,
+ !priv->hw_bufs_used,
+ timeout) <= 0) {
+ /* Hmmm... Not good. Frame had stuck in firmware. */
+ priv->bh_error = 1;
+ wake_up(&priv->bh_wq);
+ return false;
+ }
+
+ /* Ok, everything is flushed. */
+ return true;
+ }
+}
+
+void wsm_unlock_tx(struct cw1200_common *priv)
+{
+ int tx_lock;
+ if (priv->bh_error)
+ wsm_printk(KERN_ERR "fatal error occured, unlock is unsafe\n");
+ else {
+ tx_lock = atomic_sub_return(1, &priv->tx_lock);
+ if (tx_lock < 0) {
+ BUG_ON(1);
+ } else if (tx_lock == 0) {
+ cw1200_bh_wakeup(priv);
+ wsm_printk(KERN_DEBUG "[WSM] TX is unlocked.\n");
+ }
+ }
+}
+
+/* ******************************************************************** */
+/* WSM RX */
+
+int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len)
+{
+ struct wsm_buf buf;
+ u32 reason;
+ u32 reg[18];
+ char fname[48];
+ size_t i;
+
+ static const char * const reason_str[] = {
+ "undefined instruction",
+ "prefetch abort",
+ "data abort",
+ "unknown error",
+ };
+
+#if defined(CONFIG_CW1200_USE_STE_EXTENSIONS)
+ /* Send the event upwards on the FW exception */
+ cw1200_pm_stay_awake(&priv->pm_state, 3*HZ);
+ ieee80211_driver_hang_notify(priv->vif, GFP_KERNEL);
+#endif
+
+ buf.begin = buf.data = data;
+ buf.end = &buf.begin[len];
+
+ reason = WSM_GET32(&buf);
+ for (i = 0; i < ARRAY_SIZE(reg); ++i)
+ reg[i] = WSM_GET32(&buf);
+ WSM_GET(&buf, fname, sizeof(fname));
+
+ if (reason < 4)
+ wiphy_err(priv->hw->wiphy,
+ "Firmware exception: %s.\n",
+ reason_str[reason]);
+ else
+ wiphy_err(priv->hw->wiphy,
+ "Firmware assert at %.*s, line %d\n",
+ sizeof(fname), fname, reg[1]);
+
+ for (i = 0; i < 12; i += 4)
+ wiphy_err(priv->hw->wiphy,
+ "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n",
+ i + 0, reg[i + 0], i + 1, reg[i + 1],
+ i + 2, reg[i + 2], i + 3, reg[i + 3]);
+ wiphy_err(priv->hw->wiphy,
+ "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n",
+ reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]);
+ i += 4;
+ wiphy_err(priv->hw->wiphy,
+ "CPSR: 0x%.8X, SPSR: 0x%.8X\n",
+ reg[i + 0], reg[i + 1]);
+
+ print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE,
+ fname, sizeof(fname));
+ return 0;
+
+underflow:
+ wiphy_err(priv->hw->wiphy,
+ "Firmware exception.\n");
+ print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE,
+ data, len);
+ return -EINVAL;
+}
+
+int wsm_handle_rx(struct cw1200_common *priv, int id,
+ struct wsm_hdr *wsm, struct sk_buff **skb_p)
+{
+ int ret = 0;
+ struct wsm_buf wsm_buf;
+ int link_id = (id >> 6) & 0x0F;
+
+ /* Strip link id. */
+ id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
+
+ wsm_buf.begin = (u8 *)&wsm[0];
+ wsm_buf.data = (u8 *)&wsm[1];
+ wsm_buf.end = &wsm_buf.begin[__le32_to_cpu(wsm->len)];
+
+ wsm_printk(KERN_DEBUG "[WSM] <<< 0x%.4X (%d)\n", id,
+ wsm_buf.end - wsm_buf.begin);
+
+ if (id == 0x404) {
+ ret = wsm_tx_confirm(priv, &wsm_buf, link_id);
+ } else if (id == 0x41E) {
+ ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id);
+ } else if (id & 0x0400) {
+ void *wsm_arg;
+ u16 wsm_cmd;
+
+ /* Do not trust FW too much. Protection against repeated
+ * response and race condition removal (see above). */
+ spin_lock(&priv->wsm_cmd.lock);
+ wsm_arg = priv->wsm_cmd.arg;
+ wsm_cmd = priv->wsm_cmd.cmd &
+ ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
+ priv->wsm_cmd.cmd = 0xFFFF;
+ spin_unlock(&priv->wsm_cmd.lock);
+
+ if (WARN_ON((id & ~0x0400) != wsm_cmd)) {
+ /* Note that any non-zero is a fatal retcode. */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (id) {
+ case 0x0409:
+ /* Note that wsm_arg can be NULL in case of timeout in
+ * wsm_cmd_send(). */
+ if (likely(wsm_arg))
+ ret = wsm_configuration_confirm(priv, wsm_arg,
+ &wsm_buf);
+ break;
+ case 0x0405:
+ if (likely(wsm_arg))
+ ret = wsm_read_mib_confirm(priv, wsm_arg,
+ &wsm_buf);
+ break;
+ case 0x0406:
+ if (likely(wsm_arg))
+ ret = wsm_write_mib_confirm(priv, wsm_arg,
+ &wsm_buf);
+ break;
+ case 0x040B:
+ if (likely(wsm_arg))
+ ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf);
+ break;
+ case 0x0407: /* start-scan */
+ case 0x0408: /* stop-scan */
+ case 0x040A: /* wsm_reset */
+ case 0x040C: /* add_key */
+ case 0x040D: /* remove_key */
+ case 0x0410: /* wsm_set_pm */
+ case 0x0411: /* set_bss_params */
+ case 0x0412: /* set_tx_queue_params */
+ case 0x0413: /* set_edca_params */
+ case 0x0416: /* switch_channel */
+ case 0x0417: /* start */
+ case 0x0418: /* beacon_transmit */
+ case 0x0419: /* start_find */
+ case 0x041A: /* stop_find */
+ case 0x041B: /* update_ie */
+ case 0x041C: /* map_link */
+ WARN_ON(wsm_arg != NULL);
+ ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf);
+ if (ret)
+ wiphy_warn(priv->hw->wiphy,
+ "wsm_generic_confirm "
+ "failed for request 0x%.4X.\n",
+ id & ~0x0400);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ spin_lock(&priv->wsm_cmd.lock);
+ priv->wsm_cmd.ret = ret;
+ priv->wsm_cmd.done = 1;
+ spin_unlock(&priv->wsm_cmd.lock);
+ ret = 0; /* Error response from device should ne stop BH. */
+
+ wake_up(&priv->wsm_cmd_wq);
+ } else if (id & 0x0800) {
+ switch (id) {
+ case 0x0801:
+ ret = wsm_startup_indication(priv, &wsm_buf);
+ break;
+ case 0x0804:
+ ret = wsm_receive_indication(priv, link_id,
+ &wsm_buf, skb_p);
+ break;
+ case 0x0805:
+ ret = wsm_event_indication(priv, &wsm_buf);
+ break;
+ case 0x080A:
+ ret = wsm_channel_switch_indication(priv, &wsm_buf);
+ break;
+ case 0x0809:
+ ret = wsm_set_pm_indication(priv, &wsm_buf);
+ break;
+ case 0x0806:
+ ret = wsm_scan_complete_indication(priv, &wsm_buf);
+ break;
+ case 0x080B:
+ ret = wsm_find_complete_indication(priv, &wsm_buf);
+ break;
+ case 0x080C:
+ ret = wsm_suspend_resume_indication(priv,
+ link_id, &wsm_buf);
+ break;
+ default:
+ STUB();
+ }
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+out:
+ return ret;
+}
+
+static bool wsm_handle_tx_data(struct cw1200_common *priv,
+ const struct wsm_tx *wsm,
+ const struct ieee80211_tx_info *tx_info,
+ const struct cw1200_txpriv *txpriv,
+ struct cw1200_queue *queue)
+{
+ bool handled = false;
+ const struct ieee80211_hdr *frame =
+ (struct ieee80211_hdr *) &((u8 *)wsm)[txpriv->offset];
+ __le16 fctl = frame->frame_control;
+ enum {
+ doProbe,
+ doDrop,
+ doJoin,
+ doOffchannel,
+ doWep,
+ doTx,
+ } action = doTx;
+
+ switch (priv->mode) {
+ case NL80211_IFTYPE_STATION:
+ if (unlikely((priv->join_status == CW1200_JOIN_STATUS_STA) &&
+ ieee80211_is_nullfunc(fctl))) {
+ spin_lock(&priv->bss_loss_lock);
+ if (priv->bss_loss_status == CW1200_BSS_LOSS_CHECKING) {
+ priv->bss_loss_status =
+ CW1200_BSS_LOSS_CONFIRMING;
+ priv->bss_loss_confirm_id = wsm->packetID;
+ }
+ spin_unlock(&priv->bss_loss_lock);
+ } else if (unlikely(
+ (priv->join_status <= CW1200_JOIN_STATUS_MONITOR) ||
+ memcmp(frame->addr1, priv->join_bssid,
+ sizeof(priv->join_bssid)))) {
+ if (ieee80211_is_auth(fctl))
+ action = doJoin;
+ else if (ieee80211_is_probe_req(fctl))
+ action = doTx;
+ else if (priv->join_status >=
+ CW1200_JOIN_STATUS_MONITOR)
+ action = doTx;
+ else
+ action = doOffchannel;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ if (unlikely(!priv->join_status))
+ action = doDrop;
+ else if (unlikely(!(BIT(txpriv->raw_link_id) &
+ (BIT(0) | priv->link_id_map)))) {
+ wiphy_warn(priv->hw->wiphy,
+ "A frame with expired link id "
+ "is dropped.\n");
+ action = doDrop;
+ }
+ if (cw1200_queue_get_generation(wsm->packetID) >
+ CW1200_MAX_REQUEUE_ATTEMPTS) {
+ /* HACK!!! WSM324 firmware has tendency to requeue
+ * multicast frames in a loop, causing performance
+ * drop and high power consumption of the driver.
+ * In this situation it is better just to drop
+ * the problematic frame. */
+ wiphy_warn(priv->hw->wiphy,
+ "Too many attempts "
+ "to requeue a frame. "
+ "Frame is dropped.\n");
+ action = doDrop;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ STUB();
+ case NL80211_IFTYPE_MONITOR:
+ default:
+ action = doDrop;
+ break;
+ }
+
+ if (action == doTx) {
+ if (unlikely(ieee80211_is_probe_req(fctl)))
+ action = doProbe;
+ else if ((fctl & __cpu_to_le32(IEEE80211_FCTL_PROTECTED)) &&
+ tx_info->control.hw_key &&
+ unlikely(tx_info->control.hw_key->keyidx !=
+ priv->wep_default_key_id) &&
+ (tx_info->control.hw_key->cipher ==
+ WLAN_CIPHER_SUITE_WEP40 ||
+ tx_info->control.hw_key->cipher ==
+ WLAN_CIPHER_SUITE_WEP104))
+ action = doWep;
+ }
+
+ switch (action) {
+ case doProbe:
+ {
+ /* An interesting FW "feature". Device filters
+ * probe responses.
+ * The easiest way to get it back is to convert
+ * probe request into WSM start_scan command. */
+ wsm_printk(KERN_DEBUG \
+ "[WSM] Convert probe request to scan.\n");
+ wsm_lock_tx_async(priv);
+ priv->pending_frame_id = __le32_to_cpu(wsm->packetID);
+ queue_delayed_work(priv->workqueue,
+ &priv->scan.probe_work, 0);
+ handled = true;
+ }
+ break;
+ case doDrop:
+ {
+ /* See detailed description of "join" below.
+ * We are dropping everything except AUTH in non-joined mode. */
+ wsm_printk(KERN_DEBUG "[WSM] Drop frame (0x%.4X).\n", fctl);
+ BUG_ON(cw1200_queue_remove(queue,
+ __le32_to_cpu(wsm->packetID)));
+ handled = true;
+ }
+ break;
+ case doJoin:
+ {
+ /* There is one more interesting "feature"
+ * in FW: it can't do RX/TX before "join".
+ * "Join" here is not an association,
+ * but just a syncronization between AP and STA.
+ * priv->join_status is used only in bh thread and does
+ * not require protection */
+ wsm_printk(KERN_DEBUG "[WSM] Issue join command.\n");
+ wsm_lock_tx_async(priv);
+ priv->pending_frame_id = __le32_to_cpu(wsm->packetID);
+ if (queue_work(priv->workqueue, &priv->join_work) <= 0)
+ wsm_unlock_tx(priv);
+ handled = true;
+ }
+ break;
+ case doOffchannel:
+ {
+ wsm_printk(KERN_DEBUG "[WSM] Offchannel TX request.\n");
+ wsm_lock_tx_async(priv);
+ priv->pending_frame_id = __le32_to_cpu(wsm->packetID);
+ if (queue_work(priv->workqueue, &priv->offchannel_work) <= 0)
+ wsm_unlock_tx(priv);
+ handled = true;
+ }
+ break;
+ case doWep:
+ {
+ wsm_printk(KERN_DEBUG "[WSM] Issue set_default_wep_key.\n");
+ wsm_lock_tx_async(priv);
+ priv->wep_default_key_id = tx_info->control.hw_key->keyidx;
+ priv->pending_frame_id = __le32_to_cpu(wsm->packetID);
+ if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0)
+ wsm_unlock_tx(priv);
+ handled = true;
+ }
+ break;
+ case doTx:
+ {
+#if 0
+ /* Kept for history. If you want to implement wsm->more,
+ * make sure you are able to send a frame after that. */
+ wsm->more = (count > 1) ? 1 : 0;
+ if (wsm->more) {
+ /* HACK!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * It's undocumented in WSM spec, but CW1200 hangs
+ * if 'more' is set and no TX is performed due to TX
+ * buffers limitation. */
+ if (priv->hw_bufs_used + 1 ==
+ priv->wsm_caps.numInpChBufs)
+ wsm->more = 0;
+ }
+
+ /* BUG!!! FIXME: we can't use 'more' at all: we don't know
+ * future. It could be a request from upper layer with TX lock
+ * requirements (scan, for example). If "more" is set device
+ * will not send data and wsm_tx_lock() will fail...
+ * It's not obvious how to fix this deadlock. Any ideas?
+ * As a workaround more is set to 0. */
+ wsm->more = 0;
+#endif /* 0 */
+
+ if (ieee80211_is_deauth(fctl) &&
+ priv->mode != NL80211_IFTYPE_AP) {
+ /* Shedule unjoin work */
+ wsm_printk(KERN_DEBUG "[WSM] Issue unjoin command"
+ " (TX).\n");
+#if 0
+ wsm->more = 0;
+#endif /* 0 */
+ wsm_lock_tx_async(priv);
+ if (queue_work(priv->workqueue,
+ &priv->unjoin_work) <= 0)
+ wsm_unlock_tx(priv);
+ }
+ }
+ break;
+ }
+ return handled;
+}
+
+static int cw1200_get_prio_queue(struct cw1200_common *priv,
+ u32 link_id_map, int *total)
+{
+ static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) |
+ BIT(CW1200_LINK_ID_UAPSD);
+ struct wsm_edca_queue_params *edca;
+ unsigned score, best = -1;
+ int winner = -1;
+ int queued;
+ int i;
+
+ /* search for a winner using edca params */
+ for (i = 0; i < 4; ++i) {
+ queued = cw1200_queue_get_num_queued(&priv->tx_queue[i],
+ link_id_map);
+ if (!queued)
+ continue;
+ *total += queued;
+ edca = &priv->edca.params[i];
+ score = ((edca->aifns + edca->cwMin) << 16) +
+ (edca->cwMax - edca->cwMin) *
+ (random32() & 0xFFFF);
+ if (score < best && (winner < 0 || i != 3)) {
+ best = score;
+ winner = i;
+ }
+ }
+
+ /* override winner if bursting */
+ if (winner >= 0 && priv->tx_burst_idx >= 0 &&
+ winner != priv->tx_burst_idx &&
+ !cw1200_queue_get_num_queued(
+ &priv->tx_queue[winner],
+ link_id_map & urgent) &&
+ cw1200_queue_get_num_queued(
+ &priv->tx_queue[priv->tx_burst_idx],
+ link_id_map))
+ winner = priv->tx_burst_idx;
+
+ return winner;
+}
+
+static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv,
+ struct cw1200_queue **queue_p,
+ u32 *tx_allowed_mask_p,
+ bool *more)
+{
+ int idx;
+ u32 tx_allowed_mask;
+ int total = 0;
+
+ /* Search for a queue with multicast frames buffered */
+ if (priv->tx_multicast) {
+ tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM);
+ idx = cw1200_get_prio_queue(priv,
+ tx_allowed_mask, &total);
+ if (idx >= 0) {
+ *more = total > 1;
+ goto found;
+ }
+ }
+
+ /* Search for unicast traffic */
+ tx_allowed_mask = ~priv->sta_asleep_mask;
+ tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD);
+ if (priv->sta_asleep_mask) {
+ tx_allowed_mask |= priv->pspoll_mask;
+ tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM);
+ } else {
+ tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM);
+ }
+ idx = cw1200_get_prio_queue(priv,
+ tx_allowed_mask, &total);
+ if (idx < 0)
+ return -ENOENT;
+
+found:
+ *queue_p = &priv->tx_queue[idx];
+ *tx_allowed_mask_p = tx_allowed_mask;
+ return 0;
+}
+
+int wsm_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst)
+{
+ struct wsm_tx *wsm = NULL;
+ struct ieee80211_tx_info *tx_info;
+ struct cw1200_queue *queue = NULL;
+ int queue_num;
+ u32 tx_allowed_mask = 0;
+ const struct cw1200_txpriv *txpriv = NULL;
+ /*
+ * Count was intended as an input for wsm->more flag.
+ * During implementation it was found that wsm->more
+ * is not usable, see details above. It is kept just
+ * in case you would like to try to implement it again.
+ */
+ int count = 0;
+
+ /* More is used only for broadcasts. */
+ bool more = false;
+
+ count = cw1200_itp_get_tx(priv, data, tx_len, burst);
+ if (count)
+ return count;
+
+ if (priv->wsm_cmd.ptr) {
+ ++count;
+ spin_lock(&priv->wsm_cmd.lock);
+ BUG_ON(!priv->wsm_cmd.ptr);
+ *data = priv->wsm_cmd.ptr;
+ *tx_len = priv->wsm_cmd.len;
+ *burst = 1;
+ spin_unlock(&priv->wsm_cmd.lock);
+ } else {
+ for (;;) {
+ int ret;
+
+ if (atomic_add_return(0, &priv->tx_lock))
+ break;
+
+ spin_lock_bh(&priv->ps_state_lock);
+
+ ret = wsm_get_tx_queue_and_mask(priv, &queue,
+ &tx_allowed_mask, &more);
+ queue_num = queue - priv->tx_queue;
+
+ if (priv->buffered_multicasts &&
+ (ret || !more) &&
+ (priv->tx_multicast ||
+ !priv->sta_asleep_mask)) {
+ priv->buffered_multicasts = false;
+ if (priv->tx_multicast) {
+ priv->tx_multicast = false;
+ queue_work(priv->workqueue,
+ &priv->multicast_stop_work);
+ }
+ }
+
+ spin_unlock_bh(&priv->ps_state_lock);
+
+ if (ret)
+ break;
+
+ if (cw1200_queue_get(queue,
+ tx_allowed_mask,
+ &wsm, &tx_info, &txpriv))
+ continue;
+
+ if (wsm_handle_tx_data(priv, wsm,
+ tx_info, txpriv, queue))
+ continue; /* Handled by WSM */
+
+ wsm->hdr.id &= __cpu_to_le16(
+ ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX));
+ wsm->hdr.id |= cpu_to_le16(
+ WSM_TX_LINK_ID(txpriv->raw_link_id));
+ priv->pspoll_mask &= ~BIT(txpriv->raw_link_id);
+
+ *data = (u8 *)wsm;
+ *tx_len = __le16_to_cpu(wsm->hdr.len);
+
+ /* allow bursting if txop is set */
+ if (priv->edca.params[queue_num].txOpLimit)
+ *burst = min(*burst,
+ (int)cw1200_queue_get_num_queued(
+ queue, tx_allowed_mask) + 1);
+ else
+ *burst = 1;
+
+ /* store index of bursting queue */
+ if (*burst > 1)
+ priv->tx_burst_idx = queue_num;
+ else
+ priv->tx_burst_idx = -1;
+
+ if (more) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *)
+ &((u8 *)wsm)[txpriv->offset];
+ /* more buffered multicast/broadcast frames
+ * ==> set MoreData flag in IEEE 802.11 header
+ * to inform PS STAs */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+
+ wsm_printk(KERN_DEBUG "[WSM] >>> 0x%.4X (%d) %p %c\n",
+ 0x0004, *tx_len, *data,
+ wsm->more ? 'M' : ' ');
+ ++count;
+ break;
+ }
+ }
+
+ return count;
+}
+
+void wsm_txed(struct cw1200_common *priv, u8 *data)
+{
+ if (data == priv->wsm_cmd.ptr) {
+ spin_lock(&priv->wsm_cmd.lock);
+ priv->wsm_cmd.ptr = NULL;
+ spin_unlock(&priv->wsm_cmd.lock);
+ }
+}
+
+/* ******************************************************************** */
+/* WSM buffer */
+
+void wsm_buf_init(struct wsm_buf *buf)
+{
+ BUG_ON(buf->begin);
+ buf->begin = kmalloc(SDIO_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
+ buf->end = buf->begin ? &buf->begin[SDIO_BLOCK_SIZE] : buf->begin;
+ wsm_buf_reset(buf);
+}
+
+void wsm_buf_deinit(struct wsm_buf *buf)
+{
+ kfree(buf->begin);
+ buf->begin = buf->data = buf->end = NULL;
+}
+
+static void wsm_buf_reset(struct wsm_buf *buf)
+{
+ if (buf->begin) {
+ buf->data = &buf->begin[4];
+ *(u32 *)buf->begin = 0;
+ } else
+ buf->data = buf->begin;
+}
+
+static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
+{
+ size_t pos = buf->data - buf->begin;
+ size_t size = pos + extra_size;
+
+
+ if (size & (SDIO_BLOCK_SIZE - 1)) {
+ size &= SDIO_BLOCK_SIZE;
+ size += SDIO_BLOCK_SIZE;
+ }
+
+ buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
+ if (buf->begin) {
+ buf->data = &buf->begin[pos];
+ buf->end = &buf->begin[size];
+ return 0;
+ } else {
+ buf->end = buf->data = buf->begin;
+ return -ENOMEM;
+ }
+}
+
+
diff --git a/drivers/staging/cw1200/wsm.h b/drivers/staging/cw1200/wsm.h
new file mode 100644
index 00000000000..c3bd002d432
--- /dev/null
+++ b/drivers/staging/cw1200/wsm.h
@@ -0,0 +1,1833 @@
+/*
+ * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ *
+ * Based on CW1200 UMAC WSM API, which is
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stewart Mathers <stewart.mathers@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_WSM_H_INCLUDED
+#define CW1200_WSM_H_INCLUDED
+
+#include <linux/spinlock.h>
+
+struct cw1200_common;
+
+/* Bands */
+/* Radio band 2.412 -2.484 GHz. */
+#define WSM_PHY_BAND_2_4G (0)
+
+/* Radio band 4.9375-5.8250 GHz. */
+#define WSM_PHY_BAND_5G (1)
+
+/* Transmit rates */
+/* 1 Mbps ERP-DSSS */
+#define WSM_TRANSMIT_RATE_1 (0)
+
+/* 2 Mbps ERP-DSSS */
+#define WSM_TRANSMIT_RATE_2 (1)
+
+/* 5.5 Mbps ERP-CCK, ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_5 (2) */
+
+/* 11 Mbps ERP-CCK, ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_11 (3) */
+
+/* 22 Mbps ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_22 (4) */
+
+/* 33 Mbps ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_33 (5) */
+
+/* 6 Mbps (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_6 (6)
+
+/* 9 Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_9 (7)
+
+/* 12 Mbps (6 Mbps) ERP-OFDM, QPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_12 (8)
+
+/* 18 Mbps (9 Mbps) ERP-OFDM, QPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_18 (9)
+
+/* 24 Mbps (12 Mbps) ERP-OFDM, 16QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_24 (10)
+
+/* 36 Mbps (18 Mbps) ERP-OFDM, 16QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_36 (11)
+
+/* 48 Mbps (24 Mbps) ERP-OFDM, 64QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_48 (12)
+
+/* 54 Mbps (27 Mbps) ERP-OFDM, 64QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_54 (13)
+
+/* 6.5 Mbps HT-OFDM, BPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_6 (14)
+
+/* 13 Mbps HT-OFDM, QPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_13 (15)
+
+/* 19.5 Mbps HT-OFDM, QPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_19 (16)
+
+/* 26 Mbps HT-OFDM, 16QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_26 (17)
+
+/* 39 Mbps HT-OFDM, 16QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_39 (18)
+
+/* 52 Mbps HT-OFDM, 64QAM coding rate 2/3 */
+#define WSM_TRANSMIT_RATE_HT_52 (19)
+
+/* 58.5 Mbps HT-OFDM, 64QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_58 (20)
+
+/* 65 Mbps HT-OFDM, 64QAM coding rate 5/6 */
+#define WSM_TRANSMIT_RATE_HT_65 (21)
+
+/* Scan types */
+/* Foreground scan */
+#define WSM_SCAN_TYPE_FOREGROUND (0)
+
+/* Background scan */
+#define WSM_SCAN_TYPE_BACKGROUND (1)
+
+/* Auto scan */
+#define WSM_SCAN_TYPE_AUTO (2)
+
+/* Scan flags */
+/* Forced background scan means if the station cannot */
+/* enter the power-save mode, it shall force to perform a */
+/* background scan. Only valid when ScanType is */
+/* background scan. */
+#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0))
+
+/* The WLAN device scans one channel at a time so */
+/* that disturbance to the data traffic is minimized. */
+#define WSM_SCAN_FLAG_SPLIT_METHOD (BIT(1))
+
+/* Preamble Type. Long if not set. */
+#define WSM_SCAN_FLAG_SHORT_PREAMBLE (BIT(2))
+
+/* 11n Tx Mode. Mixed if not set. */
+#define WSM_SCAN_FLAG_11N_GREENFIELD (BIT(3))
+
+/* Scan constraints */
+/* Maximum number of channels to be scanned. */
+#define WSM_SCAN_MAX_NUM_OF_CHANNELS (48)
+
+/* The maximum number of SSIDs that the device can scan for. */
+#define WSM_SCAN_MAX_NUM_OF_SSIDS (2)
+
+/* Power management modes */
+/* 802.11 Active mode */
+#define WSM_PSM_ACTIVE (0)
+
+/* 802.11 PS mode */
+#define WSM_PSM_PS BIT(0)
+
+/* Fast Power Save bit */
+#define WSM_PSM_FAST_PS_FLAG BIT(7)
+
+/* Dynamic aka Fast power save */
+#define WSM_PSM_FAST_PS (BIT(0) | BIT(7))
+
+/* Undetermined */
+/* Note : Undetermined status is reported when the */
+/* NULL data frame used to advertise the PM mode to */
+/* the AP at Pre or Post Background Scan is not Acknowledged */
+#define WSM_PSM_UNKNOWN BIT(1)
+
+/* Queue IDs */
+/* best effort/legacy */
+#define WSM_QUEUE_BEST_EFFORT (0)
+
+/* background */
+#define WSM_QUEUE_BACKGROUND (1)
+
+/* video */
+#define WSM_QUEUE_VIDEO (2)
+
+/* voice */
+#define WSM_QUEUE_VOICE (3)
+
+/* HT TX parameters */
+/* Non-HT */
+#define WSM_HT_TX_NON_HT (0)
+
+/* Mixed format */
+#define WSM_HT_TX_MIXED (1)
+
+/* Greenfield format */
+#define WSM_HT_TX_GREENFIELD (2)
+
+/* STBC allowed */
+#define WSM_HT_TX_STBC (BIT(7))
+
+/* EPTA prioirty flags for BT Coex */
+/* default epta priority */
+#define WSM_EPTA_PRIORITY_DEFAULT 4
+/* use for normal data */
+#define WSM_EPTA_PRIORITY_DATA 4
+/* use for connect/disconnect/roaming*/
+#define WSM_EPTA_PRIORITY_MGT 5
+/* use for action frames */
+#define WSM_EPTA_PRIORITY_ACTION 5
+/* use for AC_VI data */
+#define WSM_EPTA_PRIORITY_VIDEO 5
+/* use for AC_VO data */
+#define WSM_EPTA_PRIORITY_VOICE 6
+/* use for EAPOL exchange */
+#define WSM_EPTA_PRIORITY_EAPOL 7
+
+/* TX status */
+/* Frame was sent aggregated */
+/* Only valid for WSM_SUCCESS status. */
+#define WSM_TX_STATUS_AGGREGATION (BIT(0))
+
+/* Host should requeue this frame later. */
+/* Valid only when status is WSM_REQUEUE. */
+#define WSM_TX_STATUS_REQUEUE (BIT(1))
+
+/* Normal Ack */
+#define WSM_TX_STATUS_NORMAL_ACK (0<<2)
+
+/* No Ack */
+#define WSM_TX_STATUS_NO_ACK (1<<2)
+
+/* No explicit acknowledgement */
+#define WSM_TX_STATUS_NO_EXPLICIT_ACK (2<<2)
+
+/* Block Ack */
+/* Only valid for WSM_SUCCESS status. */
+#define WSM_TX_STATUS_BLOCK_ACK (3<<2)
+
+/* RX status */
+/* Unencrypted */
+#define WSM_RX_STATUS_UNENCRYPTED (0<<0)
+
+/* WEP */
+#define WSM_RX_STATUS_WEP (1<<0)
+
+/* TKIP */
+#define WSM_RX_STATUS_TKIP (2<<0)
+
+/* AES */
+#define WSM_RX_STATUS_AES (3<<0)
+
+/* WAPI */
+#define WSM_RX_STATUS_WAPI (4<<0)
+
+/* Macro to fetch encryption subfield. */
+#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07)
+
+/* Frame was part of an aggregation */
+#define WSM_RX_STATUS_AGGREGATE (BIT(3))
+
+/* Frame was first in the aggregation */
+#define WSM_RX_STATUS_AGGREGATE_FIRST (BIT(4))
+
+/* Frame was last in the aggregation */
+#define WSM_RX_STATUS_AGGREGATE_LAST (BIT(5))
+
+/* Indicates a defragmented frame */
+#define WSM_RX_STATUS_DEFRAGMENTED (BIT(6))
+
+/* Indicates a Beacon frame */
+#define WSM_RX_STATUS_BEACON (BIT(7))
+
+/* Indicates STA bit beacon TIM field */
+#define WSM_RX_STATUS_TIM (BIT(8))
+
+/* Indicates Beacon frame's virtual bitmap contains multicast bit */
+#define WSM_RX_STATUS_MULTICAST (BIT(9))
+
+/* Indicates frame contains a matching SSID */
+#define WSM_RX_STATUS_MATCHING_SSID (BIT(10))
+
+/* Indicates frame contains a matching BSSI */
+#define WSM_RX_STATUS_MATCHING_BSSI (BIT(11))
+
+/* Indicates More bit set in Framectl field */
+#define WSM_RX_STATUS_MORE_DATA (BIT(12))
+
+/* Indicates frame received during a measurement process */
+#define WSM_RX_STATUS_MEASUREMENT (BIT(13))
+
+/* Indicates frame received as an HT packet */
+#define WSM_RX_STATUS_HT (BIT(14))
+
+/* Indicates frame received with STBC */
+#define WSM_RX_STATUS_STBC (BIT(15))
+
+/* Indicates Address 1 field matches dot11StationId */
+#define WSM_RX_STATUS_ADDRESS1 (BIT(16))
+
+/* Indicates Group address present in the Address 1 field */
+#define WSM_RX_STATUS_GROUP (BIT(17))
+
+/* Indicates Broadcast address present in the Address 1 field */
+#define WSM_RX_STATUS_BROADCAST (BIT(18))
+
+/* Indicates group key used with encrypted frames */
+#define WSM_RX_STATUS_GROUP_KEY (BIT(19))
+
+/* Macro to fetch encryption key index. */
+#define WSM_RX_STATUS_KEY_IDX(status) (((status >> 20)) & 0x0F)
+
+/* Frame Control field starts at Frame offset + 2 */
+#define WSM_TX_2BYTES_SHIFT (BIT(7))
+
+/* Join mode */
+/* IBSS */
+#define WSM_JOIN_MODE_IBSS (0)
+
+/* BSS */
+#define WSM_JOIN_MODE_BSS (1)
+
+/* PLCP preamble type */
+/* For long preamble */
+#define WSM_JOIN_PREAMBLE_LONG (0)
+
+/* For short preamble (Long for 1Mbps) */
+#define WSM_JOIN_PREAMBLE_SHORT (1)
+
+/* For short preamble (Long for 1 and 2Mbps) */
+#define WSM_JOIN_PREAMBLE_SHORT_2 (2)
+
+/* Join flags */
+/* Unsynchronized */
+#define WSM_JOIN_FLAGS_UNSYNCRONIZED BIT(0)
+/* The BSS owner is a P2P GO */
+#define WSM_JOIN_FLAGS_P2P_GO BIT(1)
+/* Force to join BSS with the BSSID and the
+ * SSID specified without waiting for beacons. The
+ * ProbeForJoin parameter is ignored. */
+#define WSM_JOIN_FLAGS_FORCE BIT(2)
+/* Give probe request/response higher
+ * priority over the BT traffic */
+#define WSM_JOIN_FLAGS_PRIO BIT(3)
+
+/* Key types */
+#define WSM_KEY_TYPE_WEP_DEFAULT (0)
+#define WSM_KEY_TYPE_WEP_PAIRWISE (1)
+#define WSM_KEY_TYPE_TKIP_GROUP (2)
+#define WSM_KEY_TYPE_TKIP_PAIRWISE (3)
+#define WSM_KEY_TYPE_AES_GROUP (4)
+#define WSM_KEY_TYPE_AES_PAIRWISE (5)
+#define WSM_KEY_TYPE_WAPI_GROUP (6)
+#define WSM_KEY_TYPE_WAPI_PAIRWISE (7)
+
+/* Key indexes */
+#define WSM_KEY_MAX_INDEX (10)
+
+/* ACK policy */
+#define WSM_ACK_POLICY_NORMAL (0)
+#define WSM_ACK_POLICY_NO_ACK (1)
+
+/* Start modes */
+#define WSM_START_MODE_AP (0) /* Mini AP */
+#define WSM_START_MODE_P2P_GO (1) /* P2P GO */
+#define WSM_START_MODE_P2P_DEV (2) /* P2P device */
+
+/* SetAssociationMode MIB flags */
+#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE (BIT(0))
+#define WSM_ASSOCIATION_MODE_USE_HT_MODE (BIT(1))
+#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET (BIT(2))
+#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING (BIT(3))
+#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES (BIT(4))
+
+/* RcpiRssiThreshold MIB flags */
+#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0))
+#define WSM_RCPI_RSSI_USE_RSSI (BIT(1))
+#define WSM_RCPI_RSSI_DONT_USE_UPPER (BIT(2))
+#define WSM_RCPI_RSSI_DONT_USE_LOWER (BIT(3))
+
+/* Update-ie constants */
+#define WSM_UPDATE_IE_BEACON (BIT(0))
+#define WSM_UPDATE_IE_PROBE_RESP (BIT(1))
+#define WSM_UPDATE_IE_PROBE_REQ (BIT(2))
+
+/* WSM events */
+/* Error */
+#define WSM_EVENT_ERROR (0)
+
+/* BSS lost */
+#define WSM_EVENT_BSS_LOST (1)
+
+/* BSS regained */
+#define WSM_EVENT_BSS_REGAINED (2)
+
+/* Radar detected */
+#define WSM_EVENT_RADAR_DETECTED (3)
+
+/* RCPI or RSSI threshold triggered */
+#define WSM_EVENT_RCPI_RSSI (4)
+
+/* BT inactive */
+#define WSM_EVENT_BT_INACTIVE (5)
+
+/* BT active */
+#define WSM_EVENT_BT_ACTIVE (6)
+
+/* MIB IDs */
+/* 4.1 dot11StationId */
+#define WSM_MIB_ID_DOT11_STATION_ID 0x0000
+
+/* 4.2 dot11MaxtransmitMsduLifeTime */
+#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME 0x0001
+
+/* 4.3 dot11MaxReceiveLifeTime */
+#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME 0x0002
+
+/* 4.4 dot11SlotTime */
+#define WSM_MIB_ID_DOT11_SLOT_TIME 0x0003
+
+/* 4.5 dot11GroupAddressesTable */
+#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004
+#define WSM_MAX_GRP_ADDRTABLE_ENTRIES 8
+
+/* 4.6 dot11WepDefaultKeyId */
+#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID 0x0005
+
+/* 4.7 dot11CurrentTxPowerLevel */
+#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL 0x0006
+
+/* 4.8 dot11RTSThreshold */
+#define WSM_MIB_ID_DOT11_RTS_THRESHOLD 0x0007
+
+/* 4.9 NonErpProtection */
+#define WSM_MIB_ID_NON_ERP_PROTECTION 0x1000
+
+/* 4.10 ArpIpAddressesTable */
+#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE 0x1001
+#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES 1
+
+/* 4.11 TemplateFrame */
+#define WSM_MIB_ID_TEMPLATE_FRAME 0x1002
+
+/* 4.12 RxFilter */
+#define WSM_MIB_ID_RX_FILTER 0x1003
+
+/* 4.13 BeaconFilterTable */
+#define WSM_MIB_ID_BEACON_FILTER_TABLE 0x1004
+
+/* 4.14 BeaconFilterEnable */
+#define WSM_MIB_ID_BEACON_FILTER_ENABLE 0x1005
+
+/* 4.15 OperationalPowerMode */
+#define WSM_MIB_ID_OPERATIONAL_POWER_MODE 0x1006
+
+/* 4.16 BeaconWakeUpPeriod */
+#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD 0x1007
+
+/* 4.17 RcpiRssiThreshold */
+#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD 0x1009
+
+/* 4.18 StatisticsTable */
+#define WSM_MIB_ID_STATISTICS_TABLE 0x100A
+
+/* 4.19 IbssPsConfig */
+#define WSM_MIB_ID_IBSS_PS_CONFIG 0x100B
+
+/* 4.20 CountersTable */
+#define WSM_MIB_ID_COUNTERS_TABLE 0x100C
+
+/* 4.21 BlockAckPolicy */
+#define WSM_MIB_ID_BLOCK_ACK_POLICY 0x100E
+
+/* 4.22 OverrideInternalTxRate */
+#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE 0x100F
+
+/* 4.23 SetAssociationMode */
+#define WSM_MIB_ID_SET_ASSOCIATION_MODE 0x1010
+
+/* 4.24 UpdateEptaConfigData */
+#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA 0x1011
+
+/* 4.25 SelectCcaMethod */
+#define WSM_MIB_ID_SELECT_CCA_METHOD 0x1012
+
+/* 4.26 SetUpasdInformation */
+#define WSM_MIB_ID_SET_UAPSD_INFORMATION 0x1013
+
+/* 4.27 SetAutoCalibrationMode WBF00004073 */
+#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE 0x1015
+
+/* 4.28 SetTxRateRetryPolicy */
+#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY 0x1016
+
+/* 4.29 SetHostMessageTypeFilter */
+#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER 0x1017
+
+/* 4.30 P2PFindInfo */
+#define WSM_MIB_ID_P2P_FIND_INFO 0x1018
+
+/* 4.31 P2PPsModeInfo */
+#define WSM_MIB_ID_P2P_PS_MODE_INFO 0x1019
+
+/* 4.32 SetEtherTypeDataFrameFilter */
+#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A
+
+/* 4.33 SetUDPPortDataFrameFilter */
+#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER 0x101B
+
+/* 4.34 SetMagicDataFrameFilter */
+#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER 0x101C
+
+/* This is the end of specification. */
+
+/* 4.35 P2PDeviceInfo */
+#define WSM_MIB_ID_P2P_DEVICE_INFO 0x101D
+
+/* 4.36 SetWCDMABand */
+#define WSM_MIB_ID_SET_WCDMA_BAND 0x101E
+
+/* 4.37 GroupTxSequenceCounter */
+#define WSM_MIB_ID_GRP_SEQ_COUNTER 0x101F
+
+/* 4.38 ProtectedMgmtPolicy */
+#define WSM_MIB_ID_PROTECTED_MGMT_POLICY 0x1020
+
+/* 4.39 SetHtProtection */
+#define WSM_MID_ID_SET_HT_PROTECTION 0x1021
+
+/* 4.40 GPIO Command */
+#define WSM_MIB_ID_GPIO_COMMAND 0x1022
+
+/* 4.41 TSF Counter Value */
+#define WSM_MIB_ID_TSF_COUNTER 0x1023
+
+/* Test Purposes Only */
+#define WSM_MIB_ID_BLOCK_ACK_INFO 0x100D
+
+/* 4.42 UseMultiTxConfMessage */
+#define WSM_MIB_USE_MULTI_TX_CONF 0x1024
+
+/* 4.43 Keep-alive period */
+#define WSM_MIB_ID_KEEP_ALIVE_PERIOD 0x1025
+
+/* 4.44 Disable BSSID filter */
+#define WSM_MIB_ID_DISABLE_BSSID_FILTER 0x1026
+
+/* Frame template types */
+#define WSM_FRAME_TYPE_PROBE_REQUEST (0)
+#define WSM_FRAME_TYPE_BEACON (1)
+#define WSM_FRAME_TYPE_NULL (2)
+#define WSM_FRAME_TYPE_QOS_NULL (3)
+#define WSM_FRAME_TYPE_PS_POLL (4)
+#define WSM_FRAME_TYPE_PROBE_RESPONSE (5)
+
+#define WSM_FRAME_GREENFIELD (0x80) /* See 4.11 */
+
+/* Status */
+/* The WSM firmware has completed a request */
+/* successfully. */
+#define WSM_STATUS_SUCCESS (0)
+
+/* This is a generic failure code if other error codes do */
+/* not apply. */
+#define WSM_STATUS_FAILURE (1)
+
+/* A request contains one or more invalid parameters. */
+#define WSM_INVALID_PARAMETER (2)
+
+/* The request cannot perform because the device is in */
+/* an inappropriate mode. */
+#define WSM_ACCESS_DENIED (3)
+
+/* The frame received includes a decryption error. */
+#define WSM_STATUS_DECRYPTFAILURE (4)
+
+/* A MIC failure is detected in the received packets. */
+#define WSM_STATUS_MICFAILURE (5)
+
+/* The transmit request failed due to retry limit being */
+/* exceeded. */
+#define WSM_STATUS_RETRY_EXCEEDED (6)
+
+/* The transmit request failed due to MSDU life time */
+/* being exceeded. */
+#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7)
+
+/* The link to the AP is lost. */
+#define WSM_STATUS_LINK_LOST (8)
+
+/* No key was found for the encrypted frame */
+#define WSM_STATUS_NO_KEY_FOUND (9)
+
+/* Jammer was detected when transmitting this frame */
+#define WSM_STATUS_JAMMER_DETECTED (10)
+
+/* The message should be requeued later. */
+/* This is applicable only to Transmit */
+#define WSM_REQUEUE (11)
+
+/* Advanced filtering options */
+#define WSM_MAX_FILTER_ELEMENTS (4)
+
+#define WSM_FILTER_ACTION_IGNORE (0)
+#define WSM_FILTER_ACTION_FILTER_IN (1)
+#define WSM_FILTER_ACTION_FILTER_OUT (2)
+
+#define WSM_FILTER_PORT_TYPE_DST (0)
+#define WSM_FILTER_PORT_TYPE_SRC (1)
+
+
+
+struct wsm_hdr {
+ __le16 len;
+ __le16 id;
+};
+
+#define WSM_TX_SEQ_MAX (7)
+#define WSM_TX_SEQ(seq) \
+ ((seq & WSM_TX_SEQ_MAX) << 13)
+#define WSM_TX_LINK_ID_MAX (0x0F)
+#define WSM_TX_LINK_ID(link_id) \
+ ((link_id & WSM_TX_LINK_ID_MAX) << 6)
+
+#define MAX_BEACON_SKIP_TIME_MS 1000
+
+#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2)
+
+/* ******************************************************************** */
+/* WSM capcbility */
+
+struct wsm_caps {
+ u16 numInpChBufs;
+ u16 sizeInpChBuf;
+ u16 hardwareId;
+ u16 hardwareSubId;
+ u16 firmwareCap;
+ u16 firmwareType;
+ u16 firmwareApiVer;
+ u16 firmwareBuildNumber;
+ u16 firmwareVersion;
+ int firmwareReady;
+};
+
+/* ******************************************************************** */
+/* WSM commands */
+
+struct wsm_tx_power_range {
+ int min_power_level;
+ int max_power_level;
+ u32 stepping;
+};
+
+/* 3.1 */
+struct wsm_configuration {
+ /* [in] */ u32 dot11MaxTransmitMsduLifeTime;
+ /* [in] */ u32 dot11MaxReceiveLifeTime;
+ /* [in] */ u32 dot11RtsThreshold;
+ /* [in, out] */ u8 *dot11StationId;
+ /* [in] */ const void *dpdData;
+ /* [in] */ size_t dpdData_size;
+ /* [out] */ u8 dot11FrequencyBandsSupported;
+ /* [out] */ u32 supportedRateMask;
+ /* [out] */ struct wsm_tx_power_range txPowerRange[2];
+};
+
+int wsm_configuration(struct cw1200_common *priv,
+ struct wsm_configuration *arg);
+
+/* 3.3 */
+struct wsm_reset {
+ /* [in] */ int link_id;
+ /* [in] */ bool reset_statistics;
+};
+
+int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg);
+
+/* 3.5 */
+int wsm_read_mib(struct cw1200_common *priv, u16 mibId, void *buf,
+ size_t buf_size);
+
+/* 3.7 */
+int wsm_write_mib(struct cw1200_common *priv, u16 mibId, void *buf,
+ size_t buf_size);
+
+/* 3.9 */
+struct wsm_ssid {
+ u8 ssid[32];
+ u32 length;
+};
+
+struct wsm_scan_ch {
+ u16 number;
+ u32 minChannelTime;
+ u32 maxChannelTime;
+ u32 txPowerLevel;
+};
+
+/* 3.13 */
+struct wsm_scan_complete {
+ /* WSM_STATUS_... */
+ u32 status;
+
+ /* WSM_PSM_... */
+ u8 psm;
+
+ /* Number of channels that the scan operation completed. */
+ u8 numChannels;
+};
+
+typedef void (*wsm_scan_complete_cb) (struct cw1200_common *priv,
+ struct wsm_scan_complete *arg);
+
+/* 3.9 */
+struct wsm_scan {
+ /* WSM_PHY_BAND_... */
+ /* [in] */ u8 band;
+
+ /* WSM_SCAN_TYPE_... */
+ /* [in] */ u8 scanType;
+
+ /* WSM_SCAN_FLAG_... */
+ /* [in] */ u8 scanFlags;
+
+ /* WSM_TRANSMIT_RATE_... */
+ /* [in] */ u8 maxTransmitRate;
+
+ /* Interval period in TUs that the device shall the re- */
+ /* execute the requested scan. Max value supported by the device */
+ /* is 256s. */
+ /* [in] */ u32 autoScanInterval;
+
+ /* Number of probe requests (per SSID) sent to one (1) */
+ /* channel. Zero (0) means that none is send, which */
+ /* means that a passive scan is to be done. Value */
+ /* greater than zero (0) means that an active scan is to */
+ /* be done. */
+ /* [in] */ u32 numOfProbeRequests;
+
+ /* Number of channels to be scanned. */
+ /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */
+ /* [in] */ u8 numOfChannels;
+
+ /* Number of SSID provided in the scan command (this */
+ /* is zero (0) in broadcast scan) */
+ /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */
+ /* [in] */ u8 numOfSSIDs;
+
+ /* The delay time (in microseconds) period */
+ /* before sending a probe-request. */
+ /* [in] */ u8 probeDelay;
+
+ /* SSIDs to be scanned [numOfSSIDs]; */
+ /* [in] */ struct wsm_ssid *ssids;
+
+ /* Channels to be scanned [numOfChannels]; */
+ /* [in] */ struct wsm_scan_ch *ch;
+};
+
+int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg);
+
+/* 3.11 */
+int wsm_stop_scan(struct cw1200_common *priv);
+
+/* 3.14 */
+struct wsm_tx_confirm {
+ /* Packet identifier used in wsm_tx. */
+ /* [out] */ u32 packetID;
+
+ /* WSM_STATUS_... */
+ /* [out] */ u32 status;
+
+ /* WSM_TRANSMIT_RATE_... */
+ /* [out] */ u8 txedRate;
+
+ /* The number of times the frame was transmitted */
+ /* without receiving an acknowledgement. */
+ /* [out] */ u8 ackFailures;
+
+ /* WSM_TX_STATUS_... */
+ /* [out] */ u16 flags;
+
+ /* The total time in microseconds that the frame spent in */
+ /* the WLAN device before transmission as completed. */
+ /* [out] */ u32 mediaDelay;
+
+ /* The total time in microseconds that the frame spent in */
+ /* the WLAN device before transmission was started. */
+ /* [out] */ u32 txQueueDelay;
+
+ /* [out]*/ u32 link_id;
+};
+
+/* 3.15 */
+typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv,
+ struct wsm_tx_confirm *arg);
+
+/* Note that ideology of wsm_tx struct is different against the rest of
+ * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input
+ * argument for WSM call, but a prepared bytestream to be sent to firmware.
+ * It is filled partly in cw1200_tx, partly in low-level WSM code.
+ * Please pay attention once again: ideology is different.
+ *
+ * Legend:
+ * - [in]: cw1200_tx must fill this field.
+ * - [wsm]: the field is filled by low-level WSM.
+ */
+struct wsm_tx {
+ /* common WSM header */
+ /* [in/wsm] */ struct wsm_hdr hdr;
+
+ /* Packet identifier that meant to be used in completion. */
+ /* [in] */ __le32 packetID;
+
+ /* WSM_TRANSMIT_RATE_... */
+ /* [in] */ u8 maxTxRate;
+
+ /* WSM_QUEUE_... */
+ /* [in] */ u8 queueId;
+
+ /* True: another packet is pending on the host for transmission. */
+ /* [wsm] */ u8 more;
+
+ /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */
+ /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */
+ /* Bits 3:1 - PTA Priority */
+ /* Bits 6:4 - Tx Rate Retry Policy */
+ /* Bit 7 - Reserved */
+ /* [in] */ u8 flags;
+
+ /* Should be 0. */
+ /* [in] */ __le32 reserved;
+
+ /* The elapsed time in TUs, after the initial transmission */
+ /* of an MSDU, after which further attempts to transmit */
+ /* the MSDU shall be terminated. Overrides the global */
+ /* dot11MaxTransmitMsduLifeTime setting [optional] */
+ /* Device will set the default value if this is 0. */
+ /* [wsm] */ __le32 expireTime;
+
+ /* WSM_HT_TX_... */
+ /* [in] */ __le32 htTxParameters;
+};
+
+/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */
+#define WSM_TX_EXTRA_HEADROOM (28)
+
+/* 3.16 */
+struct wsm_rx {
+ /* WSM_STATUS_... */
+ /* [out] */ u32 status;
+
+ /* Specifies the channel of the received packet. */
+ /* [out] */ u16 channelNumber;
+
+ /* WSM_TRANSMIT_RATE_... */
+ /* [out] */ u8 rxedRate;
+
+ /* This value is expressed in signed Q8.0 format for */
+ /* RSSI and unsigned Q7.1 format for RCPI. */
+ /* [out] */ u8 rcpiRssi;
+
+ /* WSM_RX_STATUS_... */
+ /* [out] */ u32 flags;
+
+ /* An 802.11 frame. */
+ /* [out] */ void *frame;
+
+ /* Size of the frame */
+ /* [out] */ size_t frame_size;
+
+ /* Link ID */
+ /* [out] */ int link_id;
+};
+
+/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */
+#define WSM_RX_EXTRA_HEADROOM (16)
+
+typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg,
+ struct sk_buff **skb_p);
+
+/* 3.17 */
+struct wsm_event {
+ /* WSM_STATUS_... */
+ /* [out] */ u32 eventId;
+
+ /* Indication parameters. */
+ /* For error indication, this shall be a 32-bit WSM status. */
+ /* For RCPI or RSSI indication, this should be an 8-bit */
+ /* RCPI or RSSI value. */
+ /* [out] */ u32 eventData;
+};
+
+struct cw1200_wsm_event {
+ struct list_head link;
+ struct wsm_event evt;
+};
+
+/* 3.18 - 3.22 */
+/* Measurement. Skipped for now. Irrelevent. */
+
+typedef void (*wsm_event_cb) (struct cw1200_common *priv,
+ struct wsm_event *arg);
+
+/* 3.23 */
+struct wsm_join {
+ /* WSM_JOIN_MODE_... */
+ /* [in] */ u8 mode;
+
+ /* WSM_PHY_BAND_... */
+ /* [in] */ u8 band;
+
+ /* Specifies the channel number to join. The channel */
+ /* number will be mapped to an actual frequency */
+ /* according to the band */
+ /* [in] */ u16 channelNumber;
+
+ /* Specifies the BSSID of the BSS or IBSS to be joined */
+ /* or the IBSS to be started. */
+ /* [in] */ u8 bssid[6];
+
+ /* ATIM window of IBSS */
+ /* When ATIM window is zero the initiated IBSS does */
+ /* not support power saving. */
+ /* [in] */ u16 atimWindow;
+
+ /* WSM_JOIN_PREAMBLE_... */
+ /* [in] */ u8 preambleType;
+
+ /* Specifies if a probe request should be send with the */
+ /* specified SSID when joining to the network. */
+ /* [in] */ u8 probeForJoin;
+
+ /* DTIM Period (In multiples of beacon interval) */
+ /* [in] */ u8 dtimPeriod;
+
+ /* WSM_JOIN_FLAGS_... */
+ /* [in] */ u8 flags;
+
+ /* Length of the SSID */
+ /* [in] */ u32 ssidLength;
+
+ /* Specifies the SSID of the IBSS to join or start */
+ /* [in] */ u8 ssid[32];
+
+ /* Specifies the time between TBTTs in TUs */
+ /* [in] */ u32 beaconInterval;
+
+ /* A bit mask that defines the BSS basic rate set. */
+ /* [in] */ u32 basicRateSet;
+
+ /* Minimum transmission power level in units of 0.1dBm */
+ /* [out] */ int minPowerLevel;
+
+ /* Maximum transmission power level in units of 0.1dBm */
+ /* [out] */ int maxPowerLevel;
+};
+
+int wsm_join(struct cw1200_common *priv, struct wsm_join *arg);
+
+/* 3.25 */
+struct wsm_set_pm {
+ /* WSM_PSM_... */
+ /* [in] */ u8 pmMode;
+
+ /* in unit of 500us; 0 to use default */
+ /* [in] */ u8 fastPsmIdlePeriod;
+
+ /* in unit of 500us; 0 to use default */
+ /* [in] */ u8 apPsmChangePeriod;
+
+ /* in unit of 500us; 0 to disable auto-pspoll */
+ /* [in] */ u8 minAutoPsPollPeriod;
+};
+
+int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
+
+/* 3.27 */
+struct wsm_set_pm_complete {
+ u8 psm; /* WSM_PSM_... */
+};
+
+typedef void (*wsm_set_pm_complete_cb) (struct cw1200_common *priv,
+ struct wsm_set_pm_complete *arg);
+
+/* 3.28 */
+struct wsm_set_bss_params {
+ /* The number of lost consecutive beacons after which */
+ /* the WLAN device should indicate the BSS-Lost event */
+ /* to the WLAN host driver. */
+ u8 beaconLostCount;
+
+ /* The AID received during the association process. */
+ u16 aid;
+
+ /* The operational rate set mask */
+ u32 operationalRateSet;
+};
+
+int wsm_set_bss_params(struct cw1200_common *priv,
+ const struct wsm_set_bss_params *arg);
+
+/* 3.30 */
+struct wsm_add_key {
+ u8 type; /* WSM_KEY_TYPE_... */
+ u8 entryIndex; /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */
+ u16 reserved;
+ union {
+ struct {
+ u8 peerAddress[6]; /* MAC address of the
+ * peer station */
+ u8 reserved;
+ u8 keyLength; /* Key length in bytes */
+ u8 keyData[16]; /* Key data */
+ } __packed wepPairwiseKey;
+ struct {
+ u8 keyId; /* Unique per key identifier
+ * (0..3) */
+ u8 keyLength; /* Key length in bytes */
+ u16 reserved;
+ u8 keyData[16]; /* Key data */
+ } __packed wepGroupKey;
+ struct {
+ u8 peerAddress[6]; /* MAC address of the
+ * peer station */
+ u8 reserved[2];
+ u8 tkipKeyData[16]; /* TKIP key data */
+ u8 rxMicKey[8]; /* Rx MIC key */
+ u8 txMicKey[8]; /* Tx MIC key */
+ } __packed tkipPairwiseKey;
+ struct {
+ u8 tkipKeyData[16]; /* TKIP key data */
+ u8 rxMicKey[8]; /* Rx MIC key */
+ u8 keyId; /* Key ID */
+ u8 reserved[3];
+ u8 rxSeqCounter[8]; /* Receive Sequence Counter */
+ } __packed tkipGroupKey;
+ struct {
+ u8 peerAddress[6]; /* MAC address of the
+ * peer station */
+ u16 reserved;
+ u8 aesKeyData[16]; /* AES key data */
+ } __packed aesPairwiseKey;
+ struct {
+ u8 aesKeyData[16]; /* AES key data */
+ u8 keyId; /* Key ID */
+ u8 reserved[3];
+ u8 rxSeqCounter[8]; /* Receive Sequence Counter */
+ } __packed aesGroupKey;
+ struct {
+ u8 peerAddress[6]; /* MAC address of the
+ * peer station */
+ u8 keyId; /* Key ID */
+ u8 reserved;
+ u8 wapiKeyData[16]; /* WAPI key data */
+ u8 micKeyData[16]; /* MIC key data */
+ } __packed wapiPairwiseKey;
+ struct {
+ u8 wapiKeyData[16]; /* WAPI key data */
+ u8 micKeyData[16]; /* MIC key data */
+ u8 keyId; /* Key ID */
+ u8 reserved[3];
+ } __packed wapiGroupKey;
+ } __packed;
+} __packed;
+
+int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg);
+
+/* 3.32 */
+struct wsm_remove_key {
+ /* Key entry index : 0-10 */
+ u8 entryIndex;
+};
+
+int wsm_remove_key(struct cw1200_common *priv,
+ const struct wsm_remove_key *arg);
+
+/* 3.34 */
+struct wsm_set_tx_queue_params {
+ /* WSM_ACK_POLICY_... */
+ u8 ackPolicy;
+
+ /* Medium Time of TSPEC (in 32us units) allowed per */
+ /* One Second Averaging Period for this queue. */
+ u16 allowedMediumTime;
+
+ /* dot11MaxTransmitMsduLifetime to be used for the */
+ /* specified queue. */
+ u32 maxTransmitLifetime;
+};
+
+struct wsm_tx_queue_params {
+ /* NOTE: index is a linux queue id. */
+ struct wsm_set_tx_queue_params params[4];
+};
+
+
+#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\
+ max_life_time) \
+do { \
+ struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
+ p->ackPolicy = (ack_policy); \
+ p->allowedMediumTime = (allowed_time); \
+ p->maxTransmitLifetime = (max_life_time); \
+} while (0)
+
+int wsm_set_tx_queue_params(struct cw1200_common *priv,
+ const struct wsm_set_tx_queue_params *arg, u8 id);
+
+/* 3.36 */
+struct wsm_edca_queue_params {
+ /* CWmin (in slots) for the access class. */
+ /* [in] */ u16 cwMin;
+
+ /* CWmax (in slots) for the access class. */
+ /* [in] */ u16 cwMax;
+
+ /* AIFS (in slots) for the access class. */
+ /* [in] */ u8 aifns;
+
+ /* TX OP Limit (in microseconds) for the access class. */
+ /* [in] */ u16 txOpLimit;
+
+ /* dot11MaxReceiveLifetime to be used for the specified */
+ /* the access class. Overrides the global */
+ /* dot11MaxReceiveLifetime value */
+ /* [in] */ u32 maxReceiveLifetime;
+
+ /* UAPSD trigger support for the access class. */
+ /* [in] */ bool uapsdEnable;
+};
+
+struct wsm_edca_params {
+ /* NOTE: index is a linux queue id. */
+ struct wsm_edca_queue_params params[4];
+};
+
+#define TXOP_UNIT 32
+#define WSM_EDCA_SET(edca, queue, aifs, cw_min, cw_max, txop, life_time,\
+ uapsd) \
+ do { \
+ struct wsm_edca_queue_params *p = &(edca)->params[queue]; \
+ p->cwMin = (cw_min); \
+ p->cwMax = (cw_max); \
+ p->aifns = (aifs); \
+ p->txOpLimit = ((txop) * TXOP_UNIT); \
+ p->maxReceiveLifetime = (life_time); \
+ p->uapsdEnable = (uapsd); \
+ } while (0)
+
+int wsm_set_edca_params(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg);
+
+int wsm_set_uapsd_param(struct cw1200_common *priv,
+ const struct wsm_edca_params *arg);
+
+/* 3.38 */
+/* Set-System info. Skipped for now. Irrelevent. */
+
+/* 3.40 */
+struct wsm_switch_channel {
+ /* 1 - means the STA shall not transmit any further */
+ /* frames until the channel switch has completed */
+ /* [in] */ u8 channelMode;
+
+ /* Number of TBTTs until channel switch occurs. */
+ /* 0 - indicates switch shall occur at any time */
+ /* 1 - occurs immediately before the next TBTT */
+ /* [in] */ u8 channelSwitchCount;
+
+ /* The new channel number to switch to. */
+ /* Note this is defined as per section 2.7. */
+ /* [in] */ u16 newChannelNumber;
+};
+
+int wsm_switch_channel(struct cw1200_common *priv,
+ const struct wsm_switch_channel *arg);
+
+typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv);
+
+struct wsm_start {
+ /* WSM_START_MODE_... */
+ /* [in] */ u8 mode;
+
+ /* WSM_PHY_BAND_... */
+ /* [in] */ u8 band;
+
+ /* Channel number */
+ /* [in] */ u16 channelNumber;
+
+ /* Client Traffic window in units of TU */
+ /* Valid only when mode == ..._P2P */
+ /* [in] */ u32 CTWindow;
+
+ /* Interval between two consecutive */
+ /* beacon transmissions in TU. */
+ /* [in] */ u32 beaconInterval;
+
+ /* DTIM period in terms of beacon intervals */
+ /* [in] */ u8 DTIMPeriod;
+
+ /* WSM_JOIN_PREAMBLE_... */
+ /* [in] */ u8 preambleType;
+
+ /* The delay time (in microseconds) period */
+ /* before sending a probe-request. */
+ /* [in] */ u8 probeDelay;
+
+ /* Length of the SSID */
+ /* [in] */ u8 ssidLength;
+
+ /* SSID of the BSS or P2P_GO to be started now. */
+ /* [in] */ u8 ssid[32];
+
+ /* The basic supported rates for the MiniAP. */
+ /* [in] */ u32 basicRateSet;
+};
+
+int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg);
+
+struct wsm_beacon_transmit {
+ /* 1: enable; 0: disable */
+ /* [in] */ u8 enableBeaconing;
+};
+
+int wsm_beacon_transmit(struct cw1200_common *priv,
+ const struct wsm_beacon_transmit *arg);
+
+int wsm_start_find(struct cw1200_common *priv);
+
+int wsm_stop_find(struct cw1200_common *priv);
+
+typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status);
+
+struct wsm_suspend_resume {
+ /* See 3.52 */
+ /* Link ID */
+ /* [out] */ int link_id;
+ /* Stop sending further Tx requests down to device for this link */
+ /* [out] */ bool stop;
+ /* Transmit multicast Frames */
+ /* [out] */ bool multicast;
+ /* The AC on which Tx to be suspended /resumed. */
+ /* This is applicable only for U-APSD */
+ /* WSM_QUEUE_... */
+ /* [out] */ int queue;
+};
+
+typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv,
+ struct wsm_suspend_resume *arg);
+
+/* 3.54 Update-IE request. */
+struct wsm_update_ie {
+ /* WSM_UPDATE_IE_... */
+ /* [in] */ u16 what;
+ /* [in] */ u16 count;
+ /* [in] */ u8 *ies;
+ /* [in] */ size_t length;
+};
+
+int wsm_update_ie(struct cw1200_common *priv,
+ const struct wsm_update_ie *arg);
+
+/* 3.56 */
+struct wsm_map_link {
+ /* MAC address of the remote device */
+ /* [in] */ u8 mac_addr[6];
+ /* [in] */ u8 link_id;
+};
+
+int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg);
+
+struct wsm_cbc {
+ wsm_scan_complete_cb scan_complete;
+ wsm_tx_confirm_cb tx_confirm;
+ wsm_rx_cb rx;
+ wsm_event_cb event;
+ wsm_set_pm_complete_cb set_pm_complete;
+ wsm_channel_switch_cb channel_switch;
+ wsm_find_complete_cb find_complete;
+ wsm_suspend_resume_cb suspend_resume;
+};
+
+/* ******************************************************************** */
+/* MIB shortcats */
+
+static inline int wsm_set_output_power(struct cw1200_common *priv,
+ int power_level)
+{
+ __le32 val = __cpu_to_le32(power_level);
+ return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL,
+ &val, sizeof(val));
+}
+
+static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv,
+ unsigned dtim_interval,
+ unsigned listen_interval)
+{
+ struct {
+ u8 numBeaconPeriods;
+ u8 reserved;
+ __le16 listenInterval;
+ } val = {
+ dtim_interval, 0, __cpu_to_le16(listen_interval)};
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ else
+ return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+struct wsm_rcpi_rssi_threshold {
+ u8 rssiRcpiMode; /* WSM_RCPI_RSSI_... */
+ u8 lowerThreshold;
+ u8 upperThreshold;
+ u8 rollingAverageCount;
+};
+
+static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv,
+ struct wsm_rcpi_rssi_threshold *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg,
+ sizeof(*arg));
+}
+
+struct wsm_counters_table {
+ __le32 countPlcpErrors;
+ __le32 countFcsErrors;
+ __le32 countTxPackets;
+ __le32 countRxPackets;
+ __le32 countRxPacketErrors;
+ __le32 countRxDecryptionFailures;
+ __le32 countRxMicFailures;
+ __le32 countRxNoKeyFailures;
+ __le32 countTxMulticastFrames;
+ __le32 countTxFramesSuccess;
+ __le32 countTxFrameFailures;
+ __le32 countTxFramesRetried;
+ __le32 countTxFramesMultiRetried;
+ __le32 countRxFrameDuplicates;
+ __le32 countRtsSuccess;
+ __le32 countRtsFailures;
+ __le32 countAckFailures;
+ __le32 countRxMulticastFrames;
+ __le32 countRxFramesSuccess;
+ __le32 countRxCMACICVErrors;
+ __le32 countRxCMACReplays;
+ __le32 countRxMgmtCCMPReplays;
+};
+
+static inline int wsm_get_counters_table(struct cw1200_common *priv,
+ struct wsm_counters_table *arg)
+{
+ return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(*arg));
+}
+
+static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac)
+{
+ return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN);
+}
+
+struct wsm_rx_filter {
+ bool promiscuous;
+ bool bssid;
+ bool fcs;
+};
+
+static inline int wsm_set_rx_filter(struct cw1200_common *priv,
+ const struct wsm_rx_filter *arg)
+{
+ __le32 val = 0;
+ if (arg->promiscuous)
+ val |= __cpu_to_le32(BIT(0));
+ if (arg->bssid)
+ val |= __cpu_to_le32(BIT(1));
+ if (arg->fcs)
+ val |= __cpu_to_le32(BIT(2));
+ return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val));
+}
+
+#define WSM_BEACON_FILTER_IE_HAS_CHANGED BIT(0)
+#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1)
+#define WSM_BEACON_FILTER_IE_HAS_APPEARED BIT(2)
+
+struct wsm_beacon_filter_table_entry {
+ u8 ieId;
+ u8 actionFlags;
+ u8 oui[3];
+ u8 matchData[3];
+} __packed;
+
+struct wsm_beacon_filter_table {
+ __le32 numOfIEs;
+ struct wsm_beacon_filter_table_entry entry[10];
+} __packed;
+
+static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv,
+ struct wsm_beacon_filter_table *ft)
+{
+ size_t size = __le32_to_cpu(ft->numOfIEs) *
+ sizeof(struct wsm_beacon_filter_table_entry) +
+ sizeof(__le32);
+
+ return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size);
+}
+
+struct wsm_beacon_filter_control {
+ int enabled;
+ int bcn_count;
+};
+
+static inline int wsm_beacon_filter_control(struct cw1200_common *priv,
+ struct wsm_beacon_filter_control *arg)
+{
+ struct {
+ __le32 enabled;
+ __le32 bcn_count;
+ } val;
+ val.enabled = __cpu_to_le32(arg->enabled);
+ val.bcn_count = __cpu_to_le32(arg->bcn_count);
+ return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val,
+ sizeof(val));
+}
+
+enum wsm_power_mode {
+ wsm_power_mode_active = 0,
+ wsm_power_mode_doze = 1,
+ wsm_power_mode_quiescent = 2,
+};
+
+struct wsm_operational_mode {
+ enum wsm_power_mode power_mode;
+ int disableMoreFlagUsage;
+ int performAntDiversity;
+};
+
+static inline int wsm_set_operational_mode(struct cw1200_common *priv,
+ const struct wsm_operational_mode *arg)
+{
+ u8 val = arg->power_mode;
+ if (arg->disableMoreFlagUsage)
+ val |= BIT(4);
+ if (arg->performAntDiversity)
+ val |= BIT(5);
+ return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val,
+ sizeof(val));
+}
+
+struct wsm_template_frame {
+ u8 frame_type;
+ u8 rate;
+ bool disable;
+ struct sk_buff *skb;
+};
+
+static inline int wsm_set_template_frame(struct cw1200_common *priv,
+ struct wsm_template_frame *arg)
+{
+ int ret;
+ u8 *p = skb_push(arg->skb, 4);
+ p[0] = arg->frame_type;
+ p[1] = arg->rate;
+ if (arg->disable)
+ ((u16 *) p)[1] = 0;
+ else
+ ((u16 *) p)[1] = __cpu_to_le16(arg->skb->len - 4);
+ ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
+ skb_pull(arg->skb, 4);
+ return ret;
+}
+
+
+struct wsm_protected_mgmt_policy {
+ bool protectedMgmtEnable;
+ bool unprotectedMgmtFramesAllowed;
+ bool encryptionForAuthFrame;
+};
+
+static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv,
+ struct wsm_protected_mgmt_policy *arg)
+{
+ __le32 val = 0;
+ int ret;
+ if (arg->protectedMgmtEnable)
+ val |= __cpu_to_le32(BIT(0));
+ if (arg->unprotectedMgmtFramesAllowed)
+ val |= __cpu_to_le32(BIT(1));
+ if (arg->encryptionForAuthFrame)
+ val |= __cpu_to_le32(BIT(2));
+ ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+ return ret;
+}
+
+static inline int wsm_set_block_ack_policy(struct cw1200_common *priv,
+ u8 blockAckTxTidPolicy,
+ u8 blockAckRxTidPolicy)
+{
+ struct {
+ u8 blockAckTxTidPolicy;
+ u8 reserved1;
+ u8 blockAckRxTidPolicy;
+ u8 reserved2;
+ } val = {
+ .blockAckTxTidPolicy = blockAckTxTidPolicy,
+ .blockAckRxTidPolicy = blockAckRxTidPolicy,
+ };
+ return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val,
+ sizeof(val));
+}
+
+struct wsm_association_mode {
+ u8 flags; /* WSM_ASSOCIATION_MODE_... */
+ u8 preambleType; /* WSM_JOIN_PREAMBLE_... */
+ u8 greenfieldMode; /* 1 for greenfield */
+ u8 mpduStartSpacing;
+ __le32 basicRateSet;
+};
+
+static inline int wsm_set_association_mode(struct cw1200_common *priv,
+ struct wsm_association_mode *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg,
+ sizeof(*arg));
+}
+
+struct wsm_set_tx_rate_retry_policy_header {
+ u8 numTxRatePolicies;
+ u8 reserved[3];
+} __packed;
+
+struct wsm_set_tx_rate_retry_policy_policy {
+ u8 policyIndex;
+ u8 shortRetryCount;
+ u8 longRetryCount;
+ u8 policyFlags;
+ u8 rateRecoveryCount;
+ u8 reserved[3];
+ __le32 rateCountIndices[3];
+} __packed;
+
+struct wsm_set_tx_rate_retry_policy {
+ struct wsm_set_tx_rate_retry_policy_header hdr;
+ struct wsm_set_tx_rate_retry_policy_policy tbl[8];
+} __packed;
+
+static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv,
+ struct wsm_set_tx_rate_retry_policy *arg)
+{
+ size_t size = sizeof(struct wsm_set_tx_rate_retry_policy_header) +
+ arg->hdr.numTxRatePolicies *
+ sizeof(struct wsm_set_tx_rate_retry_policy_policy);
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg,
+ size);
+}
+
+/* 4.32 SetEtherTypeDataFrameFilter */
+struct wsm_ether_type_filter_hdr {
+ u8 nrFilters; /* Up to WSM_MAX_FILTER_ELEMENTS */
+ u8 reserved[3];
+} __packed;
+
+struct wsm_ether_type_filter {
+ u8 filterAction; /* WSM_FILTER_ACTION_XXX */
+ u8 reserved;
+ __le16 etherType; /* Type of ethernet frame */
+} __packed;
+
+static inline int wsm_set_ether_type_filter(struct cw1200_common *priv,
+ struct wsm_ether_type_filter_hdr *arg)
+{
+ size_t size = sizeof(struct wsm_ether_type_filter_hdr) +
+ arg->nrFilters * sizeof(struct wsm_ether_type_filter);
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER,
+ arg, size);
+}
+
+
+/* 4.33 SetUDPPortDataFrameFilter */
+struct wsm_udp_port_filter_hdr {
+ u8 nrFilters; /* Up to WSM_MAX_FILTER_ELEMENTS */
+ u8 reserved[3];
+} __packed;
+
+struct wsm_udp_port_filter {
+ u8 filterAction; /* WSM_FILTER_ACTION_XXX */
+ u8 portType; /* WSM_FILTER_PORT_TYPE_XXX */
+ __le16 udpPort; /* Port number */
+} __packed;
+
+static inline int wsm_set_udp_port_filter(struct cw1200_common *priv,
+ struct wsm_udp_port_filter_hdr *arg)
+{
+ size_t size = sizeof(struct wsm_udp_port_filter_hdr) +
+ arg->nrFilters * sizeof(struct wsm_udp_port_filter);
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER,
+ arg, size);
+}
+
+/* Undocumented MIBs: */
+/* 4.35 P2PDeviceInfo */
+#define D11_MAX_SSID_LEN (32)
+
+struct wsm_p2p_device_type {
+ __le16 categoryId;
+ u8 oui[4];
+ __le16 subCategoryId;
+} __packed;
+
+struct wsm_p2p_device_info {
+ struct wsm_p2p_device_type primaryDevice;
+ u8 reserved1[3];
+ u8 devNameSize;
+ u8 localDevName[D11_MAX_SSID_LEN];
+ u8 reserved2[3];
+ u8 numSecDevSupported;
+ struct wsm_p2p_device_type secondaryDevices[0];
+} __packed;
+
+/* 4.36 SetWCDMABand - WO */
+struct wsm_cdma_band {
+ u8 WCDMA_Band;
+ u8 reserved[3];
+} __packed;
+
+/* 4.37 GroupTxSequenceCounter - RO */
+struct wsm_group_tx_seq {
+ __le32 bits_47_16;
+ __le16 bits_15_00;
+ __le16 reserved;
+} __packed;
+
+/* 4.39 SetHtProtection - WO */
+#define WSM_DUAL_CTS_PROT_ENB (1 << 0)
+#define WSM_NON_GREENFIELD_STA PRESENT(1 << 1)
+#define WSM_HT_PROT_MODE__NO_PROT (0 << 2)
+#define WSM_HT_PROT_MODE__NON_MEMBER (1 << 2)
+#define WSM_HT_PROT_MODE__20_MHZ (2 << 2)
+#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2)
+#define WSM_LSIG_TXOP_PROT_FULL (1 << 4)
+#define WSM_LARGE_L_LENGTH_PROT (1 << 5)
+
+struct wsm_ht_protection {
+ __le32 flags;
+} __packed;
+
+/* 4.40 GPIO Command - R/W */
+#define WSM_GPIO_COMMAND_SETUP 0
+#define WSM_GPIO_COMMAND_READ 1
+#define WSM_GPIO_COMMAND_WRITE 2
+#define WSM_GPIO_COMMAND_RESET 3
+#define WSM_GPIO_ALL_PINS 0xFF
+
+struct wsm_gpio_command {
+ u8 GPIO_Command;
+ u8 pin;
+ __le16 config;
+} __packed;
+
+/* 4.41 TSFCounter - RO */
+struct wsm_tsf_counter {
+ __le64 TSF_Counter;
+} __packed;
+
+/* 4.43 Keep alive period */
+struct wsm_keep_alive_period {
+ __le16 keepAlivePeriod;
+ u8 reserved[2];
+} __packed;
+
+static inline int wsm_keep_alive_period(struct cw1200_common *priv,
+ int period)
+{
+ struct wsm_keep_alive_period arg = {
+ .keepAlivePeriod = __cpu_to_le16(period),
+ };
+ return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+/* BSSID filtering */
+struct wsm_set_bssid_filtering {
+ u8 filter;
+ u8 reserved[3];
+} __packed;
+
+static inline int wsm_set_bssid_filtering(struct cw1200_common *priv,
+ bool enabled)
+{
+ struct wsm_set_bssid_filtering arg = {
+ .filter = !enabled,
+ };
+ return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER,
+ &arg, sizeof(arg));
+}
+
+/* Multicat filtering - 4.5 */
+struct wsm_multicast_filter {
+ __le32 enable;
+ __le32 numOfAddresses;
+ u8 macAddress[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN];
+} __packed;
+
+static inline int wsm_set_multicast_filter(struct cw1200_common *priv,
+ struct wsm_multicast_filter *fp)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+/* ARP IPv4 filtering - 4.10 */
+struct wsm_arp_ipv4_filter {
+ __le32 enable;
+ __be32 ipv4Address[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES];
+} __packed;
+
+static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv,
+ struct wsm_arp_ipv4_filter *fp)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+/* P2P Power Save Mode Info - 4.31 */
+struct wsm_p2p_ps_modeinfo {
+ u8 oppPsCTWindow;
+ u8 count;
+ u8 reserved;
+ u8 dtimCount;
+ __le32 duration;
+ __le32 interval;
+ __le32 startTime;
+} __packed;
+
+static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv,
+ struct wsm_p2p_ps_modeinfo *mi)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
+ mi, sizeof(*mi));
+}
+
+static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv,
+ struct wsm_p2p_ps_modeinfo *mi)
+{
+ return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
+ mi, sizeof(*mi));
+}
+
+/* UseMultiTxConfMessage */
+
+static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv,
+ bool enabled)
+{
+ __le32 arg = enabled ? __cpu_to_le32(1) : 0;
+
+ return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF,
+ &arg, sizeof(arg));
+}
+
+
+/* 4.26 SetUpasdInformation */
+struct wsm_uapsd_info {
+ __le16 uapsdFlags;
+ __le16 minAutoTriggerInterval;
+ __le16 maxAutoTriggerInterval;
+ __le16 autoTriggerStep;
+};
+
+static inline int wsm_set_uapsd_info(struct cw1200_common *priv,
+ struct wsm_uapsd_info *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION,
+ arg, sizeof(*arg));
+}
+
+/* 4.22 OverrideInternalTxRate */
+struct wsm_override_internal_txrate {
+ u8 internalTxRate;
+ u8 nonErpInternalTxRate;
+ u8 reserved[2];
+} __packed;
+
+static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv,
+ struct wsm_override_internal_txrate *arg)
+{
+ return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+ arg, sizeof(*arg));
+}
+
+/* ******************************************************************** */
+/* WSM TX port control */
+
+void wsm_lock_tx(struct cw1200_common *priv);
+void wsm_lock_tx_async(struct cw1200_common *priv);
+bool wsm_flush_tx(struct cw1200_common *priv);
+void wsm_unlock_tx(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* WSM / BH API */
+
+int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len);
+int wsm_handle_rx(struct cw1200_common *priv, int id, struct wsm_hdr *wsm,
+ struct sk_buff **skb_p);
+
+/* ******************************************************************** */
+/* wsm_buf API */
+
+struct wsm_buf {
+ u8 *begin;
+ u8 *data;
+ u8 *end;
+};
+
+void wsm_buf_init(struct wsm_buf *buf);
+void wsm_buf_deinit(struct wsm_buf *buf);
+
+/* ******************************************************************** */
+/* wsm_cmd API */
+
+struct wsm_cmd {
+ spinlock_t lock;
+ int done;
+ u8 *ptr;
+ size_t len;
+ void *arg;
+ int ret;
+ u16 cmd;
+};
+
+/* ******************************************************************** */
+/* WSM TX buffer access */
+
+int wsm_get_tx(struct cw1200_common *priv, u8 **data,
+ size_t *tx_len, int *burst);
+void wsm_txed(struct cw1200_common *priv, u8 *data);
+
+/* ******************************************************************** */
+/* Queue mapping: WSM <---> linux */
+/* Linux: VO VI BE BK */
+/* WSM: BE BK VI VO */
+
+static inline u8 wsm_queue_id_to_linux(u8 queueId)
+{
+ static const u8 queue_mapping[] = {
+ 2, 3, 1, 0
+ };
+ return queue_mapping[queueId];
+}
+
+static inline u8 wsm_queue_id_to_wsm(u8 queueId)
+{
+ static const u8 queue_mapping[] = {
+ 3, 2, 0, 1
+ };
+ return queue_mapping[queueId];
+}
+
+#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/staging/mmio/Kconfig b/drivers/staging/mmio/Kconfig
new file mode 100644
index 00000000000..d6a5a9ad918
--- /dev/null
+++ b/drivers/staging/mmio/Kconfig
@@ -0,0 +1,11 @@
+
+config U8500_MMIO
+ bool "ST-Ericsson MMIO (Camera) Driver"
+ depends on ARCH_U8500
+ help
+ Enables the ST-Ericsson MMIO (Camera) Driver
+
+config U5500_MMIO
+ bool "ST-Ericsson U5500 MMIO (Camera) Driver"
+ depends on UX500_SOC_DB5500
+
diff --git a/drivers/staging/mmio/Makefile b/drivers/staging/mmio/Makefile
new file mode 100644
index 00000000000..bec2a6efe63
--- /dev/null
+++ b/drivers/staging/mmio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_U8500_MMIO) := st_mmio.o
diff --git a/drivers/staging/mmio/mmio.h b/drivers/staging/mmio/mmio.h
new file mode 100644
index 00000000000..1c6f68e3556
--- /dev/null
+++ b/drivers/staging/mmio/mmio.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Joakim Axelsson <joakim.axelsson@stericsson.com> for ST-Ericsson
+ * Author: Rajat Verma <rajat.verma@stericsson.com> for ST-Ericsson
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef MMIO_H
+#define MMIO_H
+
+#include <linux/ioctl.h>
+
+#define MMIO_NAME "mmio_camera"
+#define SRA_SUPPORT 1
+
+#ifdef SRA_SUPPORT
+#define SREG_16_BIT (0x1)
+#define SREG_32_BIT (0x2)
+#endif
+/* Kernel side interface for MMIO */
+/* Which camera is currently active */
+enum camera_slot_t {
+ PRIMARY_CAMERA = 0,
+ SECONDARY_CAMERA,
+ CAMERA_SLOT_END
+};
+struct mmio_gpio {
+ int gpio; /* Set to zero if not in use */
+ int active_high;/* Set if pin is active high */
+ int udelay; /* Time to wait when activating the pin, in usec */
+};
+enum mmio_select_i2c_t {
+ MMIO_ACTIVATE_IPI2C2 = 0,
+ MMIO_ACTIVATE_I2C_HOST,
+ MMIO_DEACTIVATE_I2C
+};
+
+enum mmio_select_xshutdown_t {
+ MMIO_ENABLE_XSHUTDOWN_FW = 0,
+ MMIO_ENABLE_XSHUTDOWN_HOST,
+ MMIO_DISABLE_XSHUTDOWN
+};
+struct mmio_platform_data {
+ struct device *dev;
+ enum camera_slot_t camera_slot; /* Which camera is currently used,
+ * Primary/Secondary */
+ void *extra; /* Board's private data structure
+ * placeholder */
+ int reset_ipgpio[CAMERA_SLOT_END]; /* Contains logical IP GPIO for
+ * reset pin */
+ int sia_base;
+ int cr_base;
+ int (*platform_init)(struct mmio_platform_data *pdata);
+ void (*platform_exit)(struct mmio_platform_data *pdata);
+ int (*power_enable)(struct mmio_platform_data *pdata);
+ void (*power_disable)(struct mmio_platform_data *pdata);
+ int (*config_xshutdown_pins)(struct mmio_platform_data *pdata,
+ enum mmio_select_xshutdown_t select, int is_active_high);
+ int (*config_i2c_pins)(struct mmio_platform_data *pdata,
+ enum mmio_select_i2c_t select);
+ int (*clock_enable)(struct mmio_platform_data *pdata);
+ void (*clock_disable)(struct mmio_platform_data *pdata);
+ void (*set_xshutdown)(struct mmio_platform_data *pdata);
+};
+
+#define USER_SIDE_INTERFACE 1
+/* User side is only allowed to access code in USER_SIDE_INTERFACE block */
+#ifdef USER_SIDE_INTERFACE
+enum mmio_bool_t {
+ MMIO_FALSE = 0,
+ MMIO_TRUE = !MMIO_FALSE,
+ MMIO_BOOL_MAX = 0x7FFFFFFF
+};
+
+struct xshutdown_info_t {
+ int ip_gpio;
+ int camera_function;
+};
+
+struct xp70_fw_t {
+ void __iomem *addr_sdram_ext;
+ void __iomem *addr_esram_ext;
+ void __iomem *addr_split;
+ void __iomem *addr_data;
+ unsigned int size_sdram_ext;
+ unsigned int size_esram_ext;
+ unsigned int size_split;
+ unsigned int size_data;
+};
+
+struct isp_write_t {
+ unsigned long t1_dest;
+ unsigned long *data;
+ unsigned long count;
+};
+
+struct trace_buf_t {
+ void *address;
+ unsigned int size;
+};
+
+#ifdef SRA_SUPPORT
+struct s_reg {
+ unsigned int addr;
+ unsigned int value;
+ unsigned int mask;
+};
+
+struct s_reg_list {
+ unsigned int access_mode;
+ unsigned int entries;
+ struct s_reg *s_regs_p;
+};
+#endif
+struct mmio_input_output_t {
+ union {
+ enum mmio_bool_t power_on;
+ struct xp70_fw_t xp70_fw;
+ struct isp_write_t isp_write;
+ unsigned int addr_to_map;
+ struct xshutdown_info_t xshutdown_info;
+ enum camera_slot_t camera_slot;
+ struct trace_buf_t trace_buf;
+#ifdef SRA_SUPPORT
+ struct s_reg_list s_reg_list;
+#endif
+ } mmio_arg;
+};
+
+#define MMIO_TRUE (1)
+#define MMIO_FALSE (0)
+#define MMIO_INVALID (~0)
+
+/*Xshutdown from host takes two arguments*/
+#define MMIO_XSHUTDOWN_ENABLE (0x1)
+#define MMIO_XSHUTDOWN_ACTIVE_HIGH (0x2)
+
+#define MMIO_MAGIC_NUMBER 0x15
+
+#define MMIO_CAM_INITBOARD _IOW(MMIO_MAGIC_NUMBER, 1,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_PWR_SENSOR _IOW(MMIO_MAGIC_NUMBER, 2,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_SET_EXT_CLK _IOW(MMIO_MAGIC_NUMBER, 3,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_SET_PRI_HWIF _IO(MMIO_MAGIC_NUMBER, 4)
+#define MMIO_CAM_SET_SEC_HWIF _IO(MMIO_MAGIC_NUMBER, 5)
+#define MMIO_CAM_INITMMDSPTIMER _IO(MMIO_MAGIC_NUMBER, 6)
+#define MMIO_CAM_LOAD_XP70_FW _IOW(MMIO_MAGIC_NUMBER, 7,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_MAP_STATS_AREA _IOWR(MMIO_MAGIC_NUMBER, 8,\
+struct mmio_input_output_t*)
+#define MMIO_ACTIVATE_I2C2 _IOW(MMIO_MAGIC_NUMBER, 9, int*)
+#define MMIO_ENABLE_XSHUTDOWN_FROM_HOST _IOW(MMIO_MAGIC_NUMBER, 10, int*)
+#define MMIO_CAM_ISP_WRITE _IOW(MMIO_MAGIC_NUMBER, 11,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_GET_IP_GPIO _IOWR(MMIO_MAGIC_NUMBER, 12,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_DESINITBOARD _IO(MMIO_MAGIC_NUMBER, 13)
+#define MMIO_CAM_SET_TRACE_BUFFER _IOW(MMIO_MAGIC_NUMBER, 14,\
+struct mmio_input_output_t*)
+
+#ifdef SRA_SUPPORT
+#define MMIO_CAM_READ_REGS _IOWR(MMIO_MAGIC_NUMBER, 15,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_MODIFY_REGS _IOWR(MMIO_MAGIC_NUMBER, 16,\
+struct mmio_input_output_t*)
+#define MMIO_CAM_WRITE_REGS _IOWR(MMIO_MAGIC_NUMBER, 17,\
+struct mmio_input_output_t*)
+#endif
+
+#endif /* USER_SIDE_INTERFACE */
+
+#endif
+/* MMIO_H */
diff --git a/drivers/staging/mmio/st_mmio.c b/drivers/staging/mmio/st_mmio.c
new file mode 100644
index 00000000000..a006c55c544
--- /dev/null
+++ b/drivers/staging/mmio/st_mmio.c
@@ -0,0 +1,1173 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pankaj Chauhan <pankaj.chauhan@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/delay.h>
+#include <linux/init.h> /* Initiliasation support */
+#include <linux/module.h> /* Module support */
+#include <linux/kernel.h> /* Kernel support */
+#include <linux/version.h> /* Kernel version */
+#include <linux/fs.h> /* File operations (fops) defines */
+#include <linux/errno.h> /* Defines standard err codes */
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/ratelimit.h>
+#include "mmio.h"
+
+#define ISP_REGION_IO (0xE0000000)
+#define SIA_ISP_REG_ADDR (0x521E4)
+#define SIA_BASE_ADDR (0x54000)
+#define SIA_ISP_MEM (0x56000)
+#define SIA_TIMER_ITC (0x5BC00)
+#define SIA_ISP_MCU_SYS_SIZE (0x100000)
+#define SIA_ISP_MEM_PAGE_REG (0x54070)
+#define SIA_ISP_MCU_SYS_ADDR0_OFFSET (SIA_BASE_ADDR + 0x40)
+#define SIA_ISP_MCU_SYS_SIZE0_OFFSET (SIA_BASE_ADDR + 0x42)
+#define SIA_ISP_MCU_SYS_ADDR1_OFFSET (SIA_ISP_MCU_SYS_ADDR0_OFFSET + 0x04)
+#define SIA_ISP_MCU_SYS_SIZE1_OFFSET (SIA_ISP_MCU_SYS_SIZE0_OFFSET + 0x04)
+#define SIA_ISP_MCU_IO_ADDR0_HI (SIA_BASE_ADDR + 0x60)
+
+/* HTimer enable in CR register */
+#define CR_REG0_HTIMEN (1 << 26)
+#define PICTOR_IN_XP70_L2_MEM_BASE_ADDR (0x40000)
+#define PICTOR_IN_XP70_TCDM_MEM_BASE_ADDR (0x60000)
+#define L2_PSRAM_MEM_SIZE (0x10000)
+
+#define FW_TO_HOST_ADDR_MASK (0x00001FFF)
+#define FW_TO_HOST_ADDR_SHIFT (0xD)
+#define FW_TO_HOST_CLR_MASK (0x3F)
+#define PHY_TO_ISP_MCU_IO_ADDR0_HI(x) (((x) >> 24) << 8)
+#define XP70_ADDR_MASK (0x00FFFFFF)
+
+#define CLOCK_ENABLE_DELAY (0x2)
+
+#define MAX_PRCMU_QOS_APP (0x64)
+
+#define ISP_WRITE_DATA_SIZE (0x4)
+
+#define clrbits32(_addr, _clear) \
+ writel(readl(_addr) & ~(u32)(_clear), _addr)
+#define setbits32(_addr, _set) \
+ writel(readl(_addr) | (u32)(_set), _addr)
+
+#define XP70_BLOCK_SIZE 124
+#define XP70_NB_BLOCK 50
+/*
+ * For 30 fps video, there is 33 msec delay between every two frames
+ * MMIO driver reads traces from trace buffer every XP70_TIMEOUT_MSEC.
+ * If traces are not read in time from trace buffer, camera firmware
+ * will start overwiting the traces as size of trace buffer is limited.
+ */
+#define XP70_TIMEOUT_MSEC 30
+#define XP70_DEFAULT_MSG_ID (0xCDCDCDCD)
+#define XP70_MAX_BLOCK_ID (0xFFFFFFFF)
+
+#define upper_16_bits(n) ((u16)((u32)(n) >> 16))
+
+struct trace_block {
+ u32 msg_id;
+ char data[XP70_BLOCK_SIZE];
+};
+
+struct mmio_trace {
+ u32 nb_block;
+ u32 block_size;
+ u32 block_id;
+ u32 overwrite_count;
+ struct trace_block block[XP70_NB_BLOCK];
+};
+
+struct trace_buffer_status {
+ u32 prev_overwrite_count;
+ u32 prev_block_id;
+};
+
+struct mmio_info {
+ struct mmio_platform_data *pdata; /* Config from board */
+ struct device *dev; /* My device */
+ /* Runtime variables */
+ struct miscdevice misc_dev;
+ void __iomem *siabase;
+ void __iomem *crbase;
+ /* States */
+ int xshutdown_enabled;
+ int xshutdown_is_active_high;
+ /* tracing */
+ struct trace_buffer_status trace_status;
+ struct mmio_trace *trace_buffer;
+ struct delayed_work trace_work;
+ int trace_allowed;
+ struct mutex lock;
+};
+
+/*
+ * The one and only private data holder. Default inited to NULL.
+ * Declare it here so no code above can use it directly.
+ */
+static struct mmio_info *info;
+
+/*
+ * This function converts a given logical memory region size
+ * to appropriate ISP_MCU_SYS_SIZEx register value.
+ */
+static int get_mcu_sys_size(u32 size, u32 *val)
+{
+ int ret = 0;
+
+ if (size > 0 && size <= SZ_4K)
+ *val = 4;
+ else if (size > SZ_4K && size <= SZ_8K)
+ *val = 5;
+ else if (size > SZ_8K && size <= SZ_16K)
+ *val = 6;
+ else if (size > SZ_16K && size <= SZ_32K)
+ *val = 7;
+ else if (size > SZ_32K && size <= SZ_64K)
+ *val = 0;
+ else if (size > SZ_64K && size <= SZ_1M)
+ *val = 1;
+ else if (size > SZ_1M && size <= SZ_16M)
+ *val = 2;
+ else if (size > SZ_16M && size <= SZ_256M)
+ *val = 3;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int mmio_cam_pwr_sensor(struct mmio_info *info, int on)
+{
+ int err = 0;
+
+ if (on) {
+ err = info->pdata->power_enable(info->pdata);
+
+ if (err)
+ dev_err(info->dev,
+ "power_enable failed. err = %d\n", err);
+
+ /*
+ * When switching from secondary YUV camera
+ * to primary Raw Bayer Camera, a hang is observed without the
+ * below delay. I2C access failure are observed while
+ * communicating with primary camera sensor indicating camera
+ * sensor was not powered up correctly.
+ */
+ mdelay(CLOCK_ENABLE_DELAY);
+ } else {
+ info->pdata->power_disable(info->pdata);
+ }
+
+ return err;
+}
+
+static int mmio_cam_control_clocks(struct mmio_info *info,
+ enum mmio_bool_t power_on)
+{
+ int err = 0;
+
+ if (power_on) {
+ err = info->pdata->clock_enable(info->pdata);
+
+ if (err)
+ dev_err(info->dev,
+ "clock_enable failed, err = %d\n",
+ err);
+ } else {
+ info->pdata->clock_disable(info->pdata);
+ }
+
+ return err;
+}
+
+static int mmio_cam_set_pri_hwif(struct mmio_info *info)
+{
+ if (info->xshutdown_enabled)
+ info->pdata->set_xshutdown(info->pdata);
+
+ return 0;
+}
+
+static int mmio_cam_set_sec_hwif(struct mmio_info *info)
+{
+ if (info->xshutdown_enabled)
+ info->pdata->set_xshutdown(info->pdata);
+
+ return 0;
+}
+
+static int mmio_cam_init_mmdsp_timer(struct mmio_info *info)
+{
+ /* Disabling Accelerators timers */
+ clrbits32(info->crbase, CR_REG0_HTIMEN);
+ /* Write MMDSPTimer */
+ writel(0, info->siabase + SIA_TIMER_ITC);
+ /* Enabling Accelerators timers */
+ setbits32(info->crbase, CR_REG0_HTIMEN);
+ return 0;
+}
+
+static u32 t1_to_arm(u32 t1_addr, void __iomem *smia_base_address,
+ u16 *p_mem_page)
+{
+ u16 mem_page_update = 0;
+ mem_page_update = (t1_addr >> FW_TO_HOST_ADDR_SHIFT) &
+ FW_TO_HOST_CLR_MASK;
+
+ if (mem_page_update != *p_mem_page) {
+ /* Update sia_mem_page register */
+ dev_dbg(info->dev, "mem_page_update=0x%x, mem_page=0x%x\n",
+ mem_page_update, *p_mem_page);
+ writew(mem_page_update, smia_base_address +
+ SIA_ISP_MEM_PAGE_REG);
+ *p_mem_page = mem_page_update;
+ }
+
+ return SIA_ISP_MEM + (t1_addr & FW_TO_HOST_ADDR_MASK);
+}
+
+static int copy_user_buffer(void __iomem **dest_buf,
+ void __iomem *src_buf, u32 size)
+{
+ int err = 0;
+
+ if (!src_buf)
+ return -EINVAL;
+
+ *dest_buf = kmalloc(size, GFP_KERNEL);
+
+ if (!*dest_buf) {
+ err = -ENOMEM;
+ goto nomem;
+ }
+
+ if (copy_from_user(*dest_buf, src_buf, size)) {
+ err = -EFAULT;
+ goto cp_failed;
+ }
+
+ return err;
+cp_failed:
+ kfree(*dest_buf);
+nomem:
+ return err;
+}
+static int mmio_load_xp70_fw(struct mmio_info *info,
+ struct xp70_fw_t *xp70_fw)
+{
+ u32 i = 0;
+ u32 offset = 0;
+ u32 itval = 0;
+ u16 mem_page = 0;
+ void __iomem *addr_split = NULL;
+ void __iomem *addr_data = NULL;
+ int err = 0;
+
+ if (xp70_fw->size_split != 0) {
+ err = copy_user_buffer(&addr_split, xp70_fw->addr_split,
+ xp70_fw->size_split);
+
+ if (err)
+ goto err_exit;
+
+ writel(0x0, info->siabase + SIA_ISP_REG_ADDR);
+
+ /* Put the low 64k IRP firmware in ISP MCU L2 PSRAM */
+ for (i = PICTOR_IN_XP70_L2_MEM_BASE_ADDR;
+ i < (PICTOR_IN_XP70_L2_MEM_BASE_ADDR +
+ L2_PSRAM_MEM_SIZE); i = i + 2) {
+ itval = t1_to_arm(i, info->siabase, &mem_page);
+ itval = ((u32) info->siabase) + itval;
+ /* Copy fw in L2 */
+ writew((*((u16 *) addr_split + offset++)), itval);
+ }
+
+ kfree(addr_split);
+ }
+
+ if (xp70_fw->size_data != 0) {
+ mem_page = 0;
+ offset = 0;
+ err = copy_user_buffer(&addr_data, xp70_fw->addr_data,
+ xp70_fw->size_data);
+
+ if (err)
+ goto err_exit;
+
+ writel(0x0, info->siabase + SIA_ISP_REG_ADDR);
+
+ for (i = PICTOR_IN_XP70_TCDM_MEM_BASE_ADDR;
+ i < (PICTOR_IN_XP70_TCDM_MEM_BASE_ADDR +
+ (xp70_fw->size_data)); i = i + 2) {
+ itval = t1_to_arm(i, info->siabase, &mem_page);
+ itval = ((u32) info->siabase) + itval;
+ /* Copy fw data in TCDM */
+ writew((*((u16 *) addr_data + offset++)), itval);
+ }
+
+ kfree(addr_data);
+ }
+
+ if (xp70_fw->size_esram_ext != 0) {
+ /*
+ * ISP_MCU_SYS_ADDRx XP70 register (@ of ESRAM where the
+ * external code has been loaded
+ */
+ writew(upper_16_bits(xp70_fw->addr_esram_ext),
+ info->siabase + SIA_ISP_MCU_SYS_ADDR0_OFFSET);
+ /* ISP_MCU_SYS_SIZEx XP70 register (size of the code =64KB) */
+ writew(0x0, info->siabase + SIA_ISP_MCU_SYS_SIZE0_OFFSET);
+ }
+
+ if (xp70_fw->size_sdram_ext != 0) {
+ /*
+ * ISP_MCU_SYS_ADDRx XP70 register (@ of SDRAM where the
+ * external code has been loaded
+ */
+ writew(upper_16_bits(xp70_fw->addr_sdram_ext),
+ info->siabase + SIA_ISP_MCU_SYS_ADDR1_OFFSET);
+ /* ISP_MCU_SYS_SIZEx XP70 register */
+ err = get_mcu_sys_size(xp70_fw->size_sdram_ext, &itval);
+
+ if (err)
+ goto err_exit;
+
+ writew(itval, info->siabase + SIA_ISP_MCU_SYS_SIZE1_OFFSET);
+ }
+
+ return 0;
+err_exit:
+ dev_err(info->dev, "Loading XP70 fw failed\n");
+ return -EFAULT;
+}
+
+static int mmio_map_statistics_mem_area(struct mmio_info *info,
+ void __iomem *addr_to_map)
+{
+ u16 value;
+ BUG_ON(addr_to_map == NULL);
+ /* 16 Mbyte aligned page */
+ value = PHY_TO_ISP_MCU_IO_ADDR0_HI(*((u32 *)addr_to_map));
+ writew(value, info->siabase + SIA_ISP_MCU_IO_ADDR0_HI);
+ /* Return the address in the XP70 address space */
+ *((u32 *)addr_to_map) = (*((u32 *)addr_to_map) & XP70_ADDR_MASK) |
+ ISP_REGION_IO;
+ return 0;
+}
+
+static int mmio_activate_i2c2(struct mmio_info *info, unsigned long enable)
+{
+ int err = 0;
+
+ switch (enable) {
+ case MMIO_ACTIVATE_I2C_HOST:
+ /* Select I2C-2 */
+ err = info->pdata->config_i2c_pins(info->pdata,
+ MMIO_ACTIVATE_I2C_HOST);
+
+ if (err) {
+ dev_err(info->dev, "Failed to Enable I2C-2, err %d\n",
+ err);
+ goto out;
+ }
+
+ break;
+ case MMIO_ACTIVATE_IPI2C2:
+ /* Select IPI2C */
+ err = info->pdata->config_i2c_pins(info->pdata,
+ MMIO_ACTIVATE_IPI2C2);
+
+ if (err) {
+ dev_err(info->dev, "Failed to Enable IPI2C, err %d\n",
+ err);
+ goto out;
+ }
+
+ break;
+ case MMIO_DEACTIVATE_I2C: {
+ info->pdata->config_i2c_pins(info->pdata, MMIO_DEACTIVATE_I2C);
+ }
+ break;
+ default:
+ dev_warn(info->dev, "Invalid I2C2 config\n");
+ err = -EINVAL;
+ break;
+ }
+
+out:
+ return err;
+}
+
+static int mmio_enable_xshutdown_from_host(struct mmio_info *info,
+ unsigned long enable)
+{
+ int err = 0;
+ info->xshutdown_is_active_high = enable & MMIO_XSHUTDOWN_ACTIVE_HIGH;
+
+ if (enable & MMIO_XSHUTDOWN_ENABLE) {
+ err = info->pdata->config_xshutdown_pins(info->pdata,
+ MMIO_ENABLE_XSHUTDOWN_HOST, enable &
+ MMIO_XSHUTDOWN_ACTIVE_HIGH);
+ } else {
+ info->pdata->config_xshutdown_pins(info->pdata,
+ MMIO_ENABLE_XSHUTDOWN_FW, -1);
+ /*
+ * XShutdown is controlled by firmware, initial output value is
+ * provided by firmware
+ */
+ }
+
+ info->xshutdown_enabled = enable & MMIO_XSHUTDOWN_ENABLE;
+ return 0;
+}
+
+static int mmio_cam_initboard(struct mmio_info *info)
+{
+ int err = 0;
+ err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, MMIO_NAME,
+ MAX_PRCMU_QOS_APP);
+
+ if (err) {
+ dev_err(info->dev, "Error adding PRCMU QoS requirement %d\n",
+ err);
+ goto out;
+ }
+
+ /* Configure xshutdown to be disabled by default */
+ err = mmio_enable_xshutdown_from_host(info, 0);
+
+ if (err)
+ goto out;
+
+ /* Enable IPI2C */
+ err = mmio_activate_i2c2(info, MMIO_ACTIVATE_IPI2C2);
+out:
+ return err;
+}
+
+static int mmio_cam_desinitboard(struct mmio_info *info)
+{
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, MMIO_NAME);
+ return 0;
+}
+
+static int mmio_isp_write(struct mmio_info *info,
+ struct isp_write_t *isp_write_p)
+{
+ int err = 0, i;
+ void __iomem *data = NULL;
+ void __iomem *addr = NULL;
+ u16 mem_page = 0;
+
+ if (!isp_write_p->count) {
+ dev_warn(info->dev, "no data to write to isp\n");
+ return -EINVAL;
+ }
+
+ err = copy_user_buffer(&data, isp_write_p->data,
+ isp_write_p->count * ISP_WRITE_DATA_SIZE);
+
+ if (err)
+ goto out;
+
+ for (i = 0; i < isp_write_p->count; i++) {
+ addr = (void *)(info->siabase + t1_to_arm(isp_write_p->t1_dest
+ + ISP_WRITE_DATA_SIZE * i,
+ info->siabase, &mem_page));
+ *((u32 *)addr) = *((u32 *)data + i);
+ }
+
+ kfree(data);
+out:
+ return err;
+}
+
+static int mmio_set_trace_buffer(struct mmio_info *info,
+ struct trace_buf_t *buf)
+{
+ u32 i;
+ int ret = 0;
+
+ if (info->trace_allowed != 1) {
+ dev_warn(info->dev, "trace disabled in kernel\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (!buf->size || !buf->address
+ || buf->size < sizeof(struct mmio_trace)) {
+ dev_err(info->dev, "invalid xp70 trace buffer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&info->lock);
+ if (info->trace_buffer) {
+ dev_info(info->dev, "unmap old buffer");
+ iounmap(info->trace_buffer);
+ info->trace_buffer = NULL;
+ }
+
+ info->trace_buffer = ioremap((u32)buf->address, buf->size);
+
+ if (!info->trace_buffer) {
+ dev_err(info->dev, "failed to map trace buffer\n");
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ dev_info(info->dev, "xp70 overwrite_cnt=%d (0x%x) blk_id=%d (0x%x)",
+ info->trace_buffer->overwrite_count,
+ info->trace_buffer->overwrite_count,
+ info->trace_buffer->block_id, info->trace_buffer->block_id);
+#ifndef CAM_SHARED_MEM_DEBUG
+
+ /* Reset the allocated buffer contents */
+ for (i = 0; i < XP70_NB_BLOCK; i++)
+ info->trace_buffer->block[i].msg_id = XP70_DEFAULT_MSG_ID;
+
+#endif /* CAM_SHARED_MEMORY_DEBUG */
+ dev_info(info->dev, "xp70 overwrite_cnt=%d (0x%x) blk_id=%d (0x%x)\n",
+ info->trace_buffer->overwrite_count,
+ info->trace_buffer->overwrite_count,
+ info->trace_buffer->block_id, info->trace_buffer->block_id);
+ info->trace_status.prev_overwrite_count = 0;
+ info->trace_status.prev_block_id = 0;
+
+ /* schedule work */
+ if (!schedule_delayed_work(&info->trace_work,
+ msecs_to_jiffies(XP70_TIMEOUT_MSEC)))
+ dev_err(info->dev, "failed to schedule work\n");
+
+out_unlock:
+ mutex_unlock(&info->lock);
+out:
+ return ret;
+}
+
+static long mmio_ioctl(struct file *filp, u32 cmd,
+ unsigned long arg)
+{
+ struct mmio_input_output_t data;
+ int no_of_bytes;
+ int enable;
+ int ret = 0;
+ struct mmio_info *info = (struct mmio_info *)filp->private_data;
+ BUG_ON(info == NULL);
+
+ switch (cmd) {
+ case MMIO_CAM_INITBOARD:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user(&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ info->pdata->camera_slot = data.mmio_arg.camera_slot;
+ ret = mmio_cam_initboard(info);
+ break;
+ case MMIO_CAM_DESINITBOARD:
+ ret = mmio_cam_desinitboard(info);
+ break;
+ case MMIO_CAM_PWR_SENSOR:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_cam_pwr_sensor(info, data.mmio_arg.power_on);
+ break;
+ case MMIO_CAM_SET_EXT_CLK:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_cam_control_clocks(info, data.mmio_arg.power_on);
+ break;
+ case MMIO_CAM_LOAD_XP70_FW:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_load_xp70_fw(info, &data.mmio_arg.xp70_fw);
+ break;
+ case MMIO_CAM_MAP_STATS_AREA:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_map_statistics_mem_area(info,
+ &data.mmio_arg.addr_to_map);
+
+ if (0 != ret) {
+ dev_err(info->dev,
+ "Unable to map Statistics Mem area\n");
+ break;
+ }
+
+ if (copy_to_user((struct mmio_input_output_t *)arg,
+ &data, sizeof(no_of_bytes))) {
+ dev_err(info->dev,
+ "Copy to userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ break;
+ case MMIO_CAM_SET_PRI_HWIF:
+ ret = mmio_cam_set_pri_hwif(info);
+ break;
+ case MMIO_CAM_SET_SEC_HWIF:
+ ret = mmio_cam_set_sec_hwif(info);
+ break;
+ case MMIO_CAM_INITMMDSPTIMER:
+ ret = mmio_cam_init_mmdsp_timer(info);
+ break;
+ case MMIO_CAM_ISP_WRITE:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_isp_write(info, &data.mmio_arg.isp_write);
+ break;
+ case MMIO_ACTIVATE_I2C2:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&enable, (int *)arg, sizeof(enable))) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_activate_i2c2(info, enable);
+ break;
+ case MMIO_ENABLE_XSHUTDOWN_FROM_HOST:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&enable, (int *)arg, sizeof(enable))) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_enable_xshutdown_from_host(info, enable);
+ break;
+ case MMIO_CAM_GET_IP_GPIO:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *)arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ data.mmio_arg.xshutdown_info.ip_gpio =
+ info->pdata->reset_ipgpio
+ [data.mmio_arg.xshutdown_info.camera_function];
+
+ if (copy_to_user((struct mmio_input_output_t *)arg,
+ &data, sizeof(no_of_bytes))) {
+ dev_err(info->dev,
+ "Copy to userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ break;
+ case MMIO_CAM_SET_TRACE_BUFFER:
+ no_of_bytes = sizeof(struct mmio_input_output_t);
+ memset(&data, 0, sizeof(struct mmio_input_output_t));
+
+ if (copy_from_user
+ (&data, (struct mmio_input_output_t *) arg,
+ no_of_bytes)) {
+ dev_err(info->dev,
+ "Copy from userspace failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = mmio_set_trace_buffer(info, &data.mmio_arg.trace_buf);
+ break;
+ default:
+ dev_err(info->dev, "Not an ioctl for this module\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mmio_release(struct inode *node, struct file *filp)
+{
+ struct mmio_info *info = filp->private_data;
+ BUG_ON(info == NULL);
+ mmio_activate_i2c2(info, MMIO_DEACTIVATE_I2C);
+ info->pdata->config_xshutdown_pins(info->pdata, MMIO_DISABLE_XSHUTDOWN,
+ -1);
+
+ mutex_lock(&info->lock);
+ if (info->trace_buffer) {
+ flush_delayed_work_sync(&info->trace_work);
+ iounmap(info->trace_buffer);
+ info->trace_buffer = NULL;
+ }
+ mutex_unlock(&info->lock);
+ return 0;
+}
+
+static int mmio_open(struct inode *node, struct file *filp)
+{
+ filp->private_data = info; /* Hook our mmio info */
+ return 0;
+}
+
+static const struct file_operations mmio_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = mmio_ioctl,
+ .open = mmio_open,
+ .release = mmio_release,
+};
+
+
+static ssize_t xp70_data_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int len;
+ int size = 0;
+ int count = 0;
+ int first_index;
+ mutex_lock(&info->lock);
+ first_index = info->trace_status.prev_block_id + 1;
+
+ if (!info->trace_buffer || info->trace_buffer->block_id ==
+ XP70_MAX_BLOCK_ID)
+ goto out_unlock;
+
+ if (info->trace_allowed != 1) {
+ dev_warn(info->dev, "xp70 trace disabled in kernel\n");
+ size = sprintf(buf, "xp70 trace disabled in kernel, "
+ "use sysfs to enable\n");
+ goto out_unlock;
+ }
+
+ count = info->trace_buffer->block_id - info->trace_status.prev_block_id;
+
+ if ((info->trace_buffer->overwrite_count -
+ info->trace_status.prev_overwrite_count) * XP70_NB_BLOCK
+ + (info->trace_buffer->block_id -
+ info->trace_status.prev_block_id)
+ >= XP70_NB_BLOCK) {
+ /* overflow case */
+ info->trace_status.prev_block_id =
+ info->trace_buffer->block_id - XP70_NB_BLOCK;
+ first_index = info->trace_buffer->block_id + 1;
+ count = XP70_NB_BLOCK;
+ len = sprintf(buf, "XP70 trace overflow\n");
+ size += len;
+ buf += len;
+ }
+
+ for (i = first_index; count; count--) {
+ int msg_len;
+
+ if (i < 0 || i >= XP70_NB_BLOCK || count > XP70_NB_BLOCK) {
+ dev_err(info->dev, "trace index out-of-bounds\n");
+ goto out_unlock;
+ }
+
+ msg_len = strnlen(info->trace_buffer->block[i].data,
+ XP70_BLOCK_SIZE);
+
+ if (msg_len > 0) {
+ /* zero terminate full length message */
+ if (msg_len == XP70_BLOCK_SIZE)
+ info->trace_buffer->block[i].data[
+ XP70_BLOCK_SIZE - 1] = '\0';
+
+ len = snprintf(buf, PAGE_SIZE - size, "%d %s\n",
+ info->trace_buffer->block[i].msg_id,
+ info->trace_buffer->block[i].data);
+
+ if (len > PAGE_SIZE - size) {
+ dev_err(info->dev, "sysfs buffer overflow\n");
+ size = PAGE_SIZE;
+ goto out_unlock;
+ }
+
+ size += len;
+ buf += len;
+ }
+
+ i = (i + 1) % XP70_NB_BLOCK;
+ }
+
+out_unlock:
+ mutex_unlock(&info->lock);
+ return size;
+}
+
+static ssize_t xp70_trace_allowed_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len;
+ len = sprintf(buf, "%d\n", info->trace_allowed);
+ return len;
+}
+
+static ssize_t xp70_trace_allowed_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (count <= 0) {
+ dev_err(info->dev, "empty buffer to store\n");
+ return 0;
+ }
+
+ if (buf[0] == '1')
+ info->trace_allowed = 1;
+ else if (buf[0] == '0')
+ info->trace_allowed = 0;
+ else
+ dev_err(info->dev, "illegal trace_allowed val %c\n",
+ buf[0]);
+
+ return count;
+}
+
+static struct device_attribute xp70_device_attrs[] = {
+ __ATTR_RO(xp70_data),
+ __ATTR(trace_allowed, S_IRUGO | S_IWUSR, xp70_trace_allowed_show,
+ xp70_trace_allowed_store),
+ __ATTR_NULL
+};
+
+static void xp70_buffer_wqtask(struct work_struct *data)
+{
+ int i;
+ int first_index = info->trace_status.prev_block_id + 1;
+ int count;
+ mutex_lock(&info->lock);
+
+ if (!info->trace_buffer)
+ goto out_err;
+
+ dev_dbg(info->dev, "xp70 overwrite_cnt=%d (0x%x) blk_id=%d (0x%x)",
+ info->trace_buffer->overwrite_count,
+ info->trace_buffer->overwrite_count,
+ info->trace_buffer->block_id, info->trace_buffer->block_id);
+
+ /* check if trace already started */
+ if (info->trace_buffer->block_id == XP70_MAX_BLOCK_ID ||
+ info->trace_buffer->block_id == XP70_DEFAULT_MSG_ID ||
+ info->trace_buffer->overwrite_count == XP70_DEFAULT_MSG_ID)
+ goto out;
+
+ if ((info->trace_buffer->overwrite_count -
+ info->trace_status.prev_overwrite_count) * XP70_NB_BLOCK
+ + (info->trace_buffer->block_id -
+ info->trace_status.prev_block_id)
+ >= XP70_NB_BLOCK) {
+ /* overflow case */
+ info->trace_status.prev_block_id =
+ info->trace_buffer->block_id - XP70_NB_BLOCK;
+ first_index = info->trace_buffer->block_id + 1;
+ count = XP70_NB_BLOCK;
+
+ pr_info_ratelimited("XP70 trace overflow\n");
+ } else if (info->trace_buffer->block_id
+ >= info->trace_status.prev_block_id) {
+ count = info->trace_buffer->block_id -
+ info->trace_status.prev_block_id;
+ } else {
+ u32 block_id, prev_block_id, diff;
+ block_id = (u32)(info->trace_buffer->block_id);
+ prev_block_id = (u32)(info->trace_status.prev_block_id);
+ diff = (block_id + XP70_NB_BLOCK) - prev_block_id;
+ count = (u32)diff;
+ }
+
+ for (i = first_index; count; count--) {
+ if (i < 0 || i >= XP70_NB_BLOCK || count > XP70_NB_BLOCK) {
+ pr_info_ratelimited("trace index out-of-bounds"
+ "i=%d count=%d XP70_NB_BLOCK=%d\n",
+ i, count, XP70_NB_BLOCK);
+
+ break;
+ }
+
+ if (info->trace_buffer->block[i].msg_id !=
+ XP70_DEFAULT_MSG_ID) {
+ int msg_len = strnlen(
+ info->trace_buffer->block[i].data,
+ XP70_BLOCK_SIZE);
+
+ /* zero terminate full length message */
+ if (msg_len > 0) {
+ if (msg_len == XP70_BLOCK_SIZE)
+ info->trace_buffer->block[i].data[
+ XP70_BLOCK_SIZE - 1] = '\0';
+
+ dev_info(info->dev, "%d %s\n",
+ info->trace_buffer->block[i].msg_id,
+ info->trace_buffer->block[i].data);
+ }
+ }
+
+ i = (i + 1) % XP70_NB_BLOCK;
+ }
+
+ info->trace_status.prev_overwrite_count =
+ info->trace_buffer->overwrite_count;
+ info->trace_status.prev_block_id = info->trace_buffer->block_id;
+out:
+ /* Schedule work */
+ if (!schedule_delayed_work(&info->trace_work,
+ msecs_to_jiffies(XP70_TIMEOUT_MSEC)))
+ dev_info(info->dev, "failed to schedule work\n");
+
+out_err:
+ mutex_unlock(&info->lock);
+ return;
+}
+
+/**
+* mmio_probe() - Initialize MMIO Camera resources.
+* @pdev: Platform device.
+*
+* Initialize the module and register misc device.
+*
+* Returns:
+* 0 if there is no err.
+* -ENOMEM if allocation fails.
+* -EEXIST if device has already been started.
+* Error codes from misc_register.
+*/
+static int __devinit mmio_probe(struct platform_device *pdev)
+{
+ int err;
+ int i;
+ int ret;
+ printk(KERN_INFO "%s\n", __func__);
+ /* Initialize private data. */
+ info = kzalloc(sizeof(struct mmio_info), GFP_KERNEL);
+
+ if (!info) {
+ dev_err(&pdev->dev, "Could not alloc info struct\n");
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Fill in private data */
+ info->pdata = pdev->dev.platform_data;
+ info->dev = &pdev->dev;
+ info->pdata->dev = &pdev->dev;
+ info->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ info->misc_dev.name = MMIO_NAME;
+ info->misc_dev.fops = &mmio_fops;
+ info->misc_dev.parent = pdev->dev.parent;
+ mutex_init(&info->lock);
+ info->xshutdown_enabled = 0;
+ info->xshutdown_is_active_high = 0;
+ info->trace_allowed = 0;
+ /* Register Misc character device */
+ err = misc_register(&(info->misc_dev));
+
+ if (err) {
+ dev_err(&pdev->dev, "Error %d registering misc dev!", err);
+ goto err_miscreg;
+ }
+
+ /* Memory mapping */
+ info->siabase = ioremap(info->pdata->sia_base, SIA_ISP_MCU_SYS_SIZE);
+
+ if (!info->siabase) {
+ dev_err(info->dev, "Could not ioremap SIA_BASE\n");
+ err = -ENOMEM;
+ goto err_ioremap_sia_base;
+ }
+
+ info->crbase = ioremap(info->pdata->cr_base, PAGE_SIZE);
+
+ if (!info->crbase) {
+ dev_err(info->dev, "Could not ioremap CR_BASE\n");
+ err = -ENOMEM;
+ goto err_ioremap_cr_base;
+ }
+
+ /* Initialize platform specific data */
+ err = info->pdata->platform_init(info->pdata);
+
+ if (err)
+ goto err_platform_init;
+
+ /* create sysfs entries */
+ for (i = 0; attr_name(xp70_device_attrs[i]); i++) {
+ ret = device_create_file(info->misc_dev.this_device,
+ &xp70_device_attrs[i]);
+
+ if (ret) {
+ dev_err(info->dev, "Error creating SYSFS entry"
+ " %s (%d)\n", xp70_device_attrs[i].attr.name,
+ ret);
+ }
+ }
+
+ INIT_DELAYED_WORK(&info->trace_work, xp70_buffer_wqtask);
+ dev_info(&pdev->dev, "MMIO driver initialized with minor=%d\n",
+ info->misc_dev.minor);
+ return 0;
+err_platform_init:
+ iounmap(info->crbase);
+err_ioremap_cr_base:
+ iounmap(info->siabase);
+err_ioremap_sia_base:
+ misc_deregister(&info->misc_dev);
+err_miscreg:
+ kfree(info);
+ info = NULL;
+err_alloc:
+ return err;
+}
+
+/**
+* mmio_remove() - Release MMIO Camera resources.
+* @pdev: Platform device.
+*
+* Remove misc device and free resources.
+*
+* Returns:
+* 0 if success.
+* Error codes from misc_deregister.
+*/
+static int __devexit mmio_remove(struct platform_device *pdev)
+{
+ int err;
+ int i;
+
+ if (!info)
+ return 0;
+
+ flush_scheduled_work();
+
+ /* sysfs parameters */
+ for (i = 0; attr_name(xp70_device_attrs[i]); i++)
+ device_remove_file(info->misc_dev.this_device,
+ &xp70_device_attrs[i]);
+
+ err = misc_deregister(&info->misc_dev);
+
+ if (err)
+ dev_err(&pdev->dev, "Error %d deregistering misc dev", err);
+
+ info->pdata->platform_exit(info->pdata);
+ iounmap(info->siabase);
+ iounmap(info->crbase);
+ mutex_destroy(&info->lock);
+ kfree(info);
+ info = NULL;
+ return 0;
+}
+static struct platform_driver mmio_driver = {
+ .driver = {
+ .name = MMIO_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mmio_probe,
+ .remove = __devexit_p(mmio_remove)
+};
+
+/**
+* mmio_init() - Initialize module.
+*
+* Registers platform driver.
+*/
+static int __init mmio_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return platform_driver_register(&mmio_driver);
+}
+
+/**
+* mmio_exit() - Remove module.
+*
+* Unregisters platform driver.
+*/
+static void __exit mmio_exit(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ platform_driver_unregister(&mmio_driver);
+}
+
+module_init(mmio_init);
+module_exit(mmio_exit);
+
+MODULE_AUTHOR("Joakim Axelsson ST-Ericsson");
+MODULE_AUTHOR("Pankaj Chauhan ST-Ericsson");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMIO Camera driver");
diff --git a/drivers/staging/nmf-cm/Kconfig b/drivers/staging/nmf-cm/Kconfig
new file mode 100644
index 00000000000..9545fd5acd1
--- /dev/null
+++ b/drivers/staging/nmf-cm/Kconfig
@@ -0,0 +1,12 @@
+
+config U8500_CM
+ tristate "U8500 Component Manager driver"
+ depends on UX500_SOC_DB8500
+ help
+ This is the Component Manager driver. It is part of the
+ Nomadik Multiprocessing Framework.
+
+ Note: This option allows the kernel developers to build
+ the driver in kernel to ease there life. By default, this driver
+ must be built outside this kernel source tree.
+
diff --git a/drivers/staging/nmf-cm/Make.config b/drivers/staging/nmf-cm/Make.config
new file mode 100644
index 00000000000..ccbc150158b
--- /dev/null
+++ b/drivers/staging/nmf-cm/Make.config
@@ -0,0 +1,8 @@
+# Copyright (C) ST-Ericsson SA 2011. All rights reserved.
+# This code is ST-Ericsson proprietary and confidential.
+# Any use of the code for whatever purpose is subject to
+# specific written permission of ST-Ericsson SA.
+
+#CM driver file to copy but not to compile
+CMENGINESRC_COPY_NO_BUILD = cm/engine/elf/src/elfxx.c
+
diff --git a/drivers/staging/nmf-cm/Makefile b/drivers/staging/nmf-cm/Makefile
new file mode 100644
index 00000000000..b1a5b9afe1f
--- /dev/null
+++ b/drivers/staging/nmf-cm/Makefile
@@ -0,0 +1,99 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+# License terms: GNU General Public License (GPL), version 2.
+#
+
+#
+# Rules to build kernel modules
+#
+ifneq ($(findstring KERNELRELEASE,$(.VARIABLES)),)
+
+ # $(src): current relative dir; $(kbuild-dir): cur absolute dir
+ ifdef kbuild-dir
+ SRCDIR = $(realpath $(kbuild-dir))
+ else
+ SRCDIR = $(realpath $(src))
+ endif
+ include $(SRCDIR)/Make.config
+ ifndef FIXED_CPPFLAGS
+ # In Android env, we can not depend on files that are out of kernel tree.
+ # and thus we can't include $(SRCDIR)/../../../../mmenv/SharedARMFlags.mk
+ # where FIXED_CPPFLAGS is defined.
+ # So, define FIXED_CPPFLAGS here
+ FIXED_CPPFLAGS=-D__STN_8500=30 -DLINUX -D__ARM_LINUX
+ endif
+ EXTRA_CFLAGS := -I$(SRCDIR) $(FIXED_CPPFLAGS)
+ EXTRA_CFLAGS += -Wall -Werror
+ #EXTRA_CFLAGS += -DCM_DEBUG_ALLOC
+
+ #
+ # CM object files to compile with
+ #
+ GENERIC_CM_FILES:=$(shell cd $(SRCDIR); find cm -name "*.c")
+ GENERIC_CM_FILES := $(filter-out $(CMENGINESRC_COPY_NO_BUILD), $(GENERIC_CM_FILES))
+
+ CM_OBJS := $(GENERIC_CM_FILES:.c=.o)
+ CM_OBJS += cmld.o cm_syscall.o osal-kernel.o cm_service.o cm_debug.o configuration.o
+ CM_OBJS += cm_dma.o
+
+ obj-$(CONFIG_U8500_CM) := cm.o
+
+ #Note: build system prepends the $(PWD) directory to these objects paths
+ cm-objs := $(CM_OBJS)
+
+else
+
+ # CM module is built in kernel in android env
+ # or as module otherwise (OSI env, ...)
+ export CONFIG_U8500_CM ?= m
+
+ ifeq ($(findstring install,$(MAKECMDGOALS)),)
+ # If not only performing install then include needed files for build
+ include $(MM_MAKEFILES_DIR)/SharedARMFlags.mk
+ export FIXED_CPPFLAGS
+ -include $(MM_MAKEFILES_DIR)/KernelConfig.mk
+
+ ifeq ($(findstring clean,$(MAKECMDGOALS)),)
+ ifndef KERNEL_BUILD_DIR
+ $(error KERNEL_BUILD_DIR not defined)
+ endif
+ endif
+ endif
+
+ include $(MM_MAKEFILES_DIR)/SharedConfig.mk
+
+ module:
+ $(MAKE) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) -C $(KERNEL_BUILD_DIR) \
+ M=$(PWD) INSTALL_HEADER_DIR=$(INSTALL_HEADER_DIR) \
+ modules
+
+ all: module
+ $(MAKE) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) -C $(KERNEL_BUILD_DIR) \
+ M=$(PWD) INSTALL_HEADER_DIR=$(INSTALL_HEADER_DIR) \
+ INSTALL_MOD_PATH=$(PWD)/lib/$(PLATFORM) \
+ modules_install
+ rm -f $(PWD)/lib/$(PLATFORM)/lib/modules/*/modules.*
+
+ #
+ # Rules to clean and install
+ #
+ clean:
+ @rm -rf $(PLATFORM) $(CM_OBJS) .built-in.o.cmd .cm*o.cmd Module.symvers \
+ .tmp_versions modules.order cm.ko cm.o cm.mod.* lib \
+ $(foreach f,$(CM_OBJS), $(dir $f).$(notdir $f).cmd)
+
+ realclean: clean
+ $(foreach platform, \
+ $(shell grep property ../../component/component.xml | cut -d\" -f 4), \
+ rm -rf $(platform);)
+ @rm -rf *~
+
+ install:
+ $(GEN_LN) -d lib/$(PLATFORM)/lib $(INSTALL_LIB_DIR)/lib
+
+ uninstall:
+ $(GEN_LN) -r -d lib/$(PLATFORM)/lib $(INSTALL_LIB_DIR)/lib
+
+endif #ifdef KERNELRELEASE
+
diff --git a/drivers/staging/nmf-cm/cm/engine/api/channel_engine.h b/drivers/staging/nmf-cm/cm/engine/api/channel_engine.h
new file mode 100644
index 00000000000..19353ee7328
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/channel_engine.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Communication Component Manager internal API type.
+ */
+
+#ifndef CHANNEL_ENGINE_H
+#define CHANNEL_ENGINE_H
+
+#include <nmf/inc/channel_type.h>
+#include <nmf/inc/service_type.h>
+#include <cm/engine/communication/inc/communication_type.h>
+
+/*!
+ * \brief Internal channel identification.
+ *
+ * Same as t_nmf_channel meaning but this the channel used internaly by
+ * OS Integration part
+ *
+ * \ingroup CM_OS_API
+ */
+typedef t_uint32 t_os_channel;
+
+/*!
+ * \brief Invalid value for os_channel
+ *
+ * Invalid value for os channel.
+ *
+ * \ingroup CM_OS_API
+ */
+#define NMF_OS_CHANNEL_INVALID_HANDLE 0xffffffff
+
+/*!
+ * \brief Structure used for storing required parameters for Interface Callback
+ * messages.
+ *
+ * This struture is used internally by CM_GetMessage() and CM_ExecuteMessage() as
+ * the message content in the given buffer.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef struct {
+ t_nmf_mpc2host_handle THIS; //!< Context of interface implementation
+ t_uint32 methodIndex; //!< Method index in interface
+ char params[1]; //!< Is of variable length concretely
+} t_interface_data;
+
+/*!
+ * \brief Structure used for storing required parameters for Service Callback
+ * messages.
+ *
+ * This struture is used internally by CM_GetMessage() and CM_ExecuteMessage() as
+ * the message content in the given buffer.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef struct {
+ t_nmf_service_type type; //!< Type of the service message
+ t_nmf_service_data data;
+} t_service_data;
+
+typedef enum {
+ MSG_INTERFACE,
+ MSG_SERVICE
+} t_message_type;
+
+/*!
+ * \brief Structure used for storing required parameters for the internal NMF
+ * messages.
+ *
+ * This struture is used internally by CM_GetMessage() and CM_ExecuteMessage() as
+ * the message content in the given buffer.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef struct {
+ t_message_type type; //!< Type of the nmf message
+ union {
+ t_interface_data itf;
+ t_service_data srv;
+ } data;
+} t_os_message;
+
+/*!
+ * \brief Structure used for storing required parameters for the internal NMF
+ * messages.
+ *
+ * This struture is used internally by CM_GetMessage() and CM_ExecuteMessage() as
+ * the message content in the given buffer.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef struct {
+ t_nmf_channel channel; //!< Channel (required to handle service message)
+ t_os_message osMsg;
+} t_nmf_message;
+
+#endif /* CHANNEL_ENGINE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/api/cm_engine.h b/drivers/staging/nmf-cm/cm/engine/api/cm_engine.h
new file mode 100644
index 00000000000..0f4c1e4219e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/cm_engine.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief CM Engine API.
+ *
+ * This file contains the Component Manager Engine API.
+ */
+
+/*!
+ * \defgroup CM_ENGINE_MODULE CM Engine
+ */
+/*!
+ * \defgroup CM_ENGINE_API CM Engine API
+ *
+ * \note This API is not for user developers, this API is only an internal API.
+ *
+ * \warning All parameters in out from this API means that the parameter is a reference to a data that is complete by the call.
+ *
+ * This API is provided by CM Engine and shall be required by driver kernel part.
+ * \ingroup CM_ENGINE_MODULE
+ */
+
+#ifndef CM_ENGINE_H_
+#define CM_ENGINE_H_
+
+#include <cm/engine/api/configuration_engine.h>
+
+#include <cm/engine/api/component_engine.h>
+
+#include <cm/engine/api/memory_engine.h>
+
+#include <cm/engine/api/communication_engine.h>
+
+#include <cm/engine/api/perfmeter_engine.h>
+
+#include <cm/engine/api/executive_engine_mgt_engine.h>
+
+#include <cm/engine/api/repository_mgt_engine.h>
+
+#include <cm/engine/api/domain_engine.h>
+
+#include <cm/engine/api/migration_engine.h>
+
+#endif /*CM_ENGINE_H_*/
+
diff --git a/drivers/staging/nmf-cm/cm/engine/api/communication_engine.h b/drivers/staging/nmf-cm/cm/engine/api/communication_engine.h
new file mode 100644
index 00000000000..477a66a4002
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/communication_engine.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Communication User Engine API.
+ *
+ * This file contains the Communication Engine API for manipulating components.
+ *
+ */
+#ifndef COMMUNICATION_ENGINE_H_
+#define COMMUNICATION_ENGINE_H_
+
+#include <cm/engine/communication/inc/communication_type.h>
+
+/*!
+ * \brief Allocate Event buffer where parameters will be marshalled.
+ *
+ * In order to optimize call, this method don't need to be exported to user space,
+ * but must be used by CM driver.
+ *
+ * See \ref HOST2MPC "Host->MPC binding" for seeing an integration example.
+ *
+ * \note This method is not called from user space!!!
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_event_params_handle CM_ENGINE_AllocEvent(t_cm_bf_host2mpc_handle host2mpcId);
+
+/*!
+ * \brief Push a event in Fifo.
+ *
+ * In order to optimize call, this method don't need to be exported to user space,
+ * but must be used by CM driver.
+ *
+ * See \ref HOST2MPC "Host->MPC binding" for seeing an integration example.
+ *
+ * \note This method is not called from user space!!!
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_PushEvent(t_cm_bf_host2mpc_handle host2mpcId, t_event_params_handle h, t_uint32 methodIndex);
+
+/*!
+ * \brief Push a event in Fifo.
+ *
+ * In order to optimize call, this method need to be exported to user space
+ * and must be implemented by CM driver.
+ *
+ * See \ref HOST2MPC "Host->MPC binding" for seeing an integration example.
+ *
+ * \note No implementation of this method is provided in kernel CM engine!!!
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC t_cm_error CM_ENGINE_PushEventWithSize(t_cm_bf_host2mpc_handle host2mpcId, t_event_params_handle h, t_uint32 size, t_uint32 methodIndex);
+
+/*!
+ * \brief Aknowledge a Fifo that the received event has been demarshalled.
+ *
+ * In order to optimize call, this method don't need to be exported to user space,
+ * but must be used by CM driver.
+ *
+ * See \ref MPC2HOST "MPC->Host binding" for seeing an integration example.
+ *
+ * \note This method is not called from user space!!!
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED void CM_ENGINE_AcknowledgeEvent(t_cm_bf_mpc2host_handle mpc2hostId);
+
+#endif /*COMMUNICATION_ENGINE_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/api/component_engine.h b/drivers/staging/nmf-cm/cm/engine/api/component_engine.h
new file mode 100644
index 00000000000..cbd61769597
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/component_engine.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Components Component Manager User Engine API.
+ *
+ * This file contains the Component Manager Engine API for manipulating components.
+ *
+ */
+
+#ifndef COMPONENT_ENGINE_H_
+#define COMPONENT_ENGINE_H_
+
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/component/inc/component_type.h>
+#include <cm/engine/communication/inc/communication_type.h>
+#include <inc/nmf-limits.h>
+
+/*!
+ * \brief Instantiate a new component.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_InstantiateComponent(
+ const char templateName[MAX_TEMPLATE_NAME_LENGTH], //!< [in] Null terminated string (Max size=\ref MAX_TEMPLATE_NAME_LENGTH)
+ t_cm_domain_id domainId, //!< [in] Domain
+ t_nmf_client_id clientId, //!< [in] Client ID (aka PID)
+ t_nmf_ee_priority priority, //!< [in] Component priority
+ const char localName[MAX_COMPONENT_NAME_LENGTH], //!< [in] Null terminated string (Max size=\ref MAX_COMPONENT_NAME_LENGTH)
+ const char *dataFile, //!< [in] Optional reference on file where component is stored
+ t_cm_instance_handle *component //!< [out] component
+ );
+
+/*!
+ * \brief Start a component.
+ *
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_StartComponent(
+ t_cm_instance_handle component,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Stop a component.
+ *
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_StopComponent(
+ t_cm_instance_handle component,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Destroy a component.
+ *
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_DestroyComponent(
+ t_cm_instance_handle component,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Stop and destroy all components belonging to the given client.
+ *
+ * \param[in] client
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_FlushComponents(
+ t_nmf_client_id client);
+
+/*!
+ * \brief Bind two components together.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_BindComponent(
+ const t_cm_instance_handle client, //!<
+ const char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH], //!< Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ const t_cm_instance_handle server, //!<
+ const char providedItfServerName[MAX_INTERFACE_NAME_LENGTH], //!< Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ t_bool traced, //!< FALSE for synchronous binding, TRUE for traced one
+ t_nmf_client_id clientId, //!< Client ID
+ const char *dataFileTrace //!< Component file data in case on traced (Note: could be null if file already in cache)
+ );
+
+/*!
+ * \brief Unbind a component.
+ *
+ * \param[in] client
+ * \param[in] requiredItfClientName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_UnbindComponent(
+ const t_cm_instance_handle client,
+ const char * requiredItfClientName,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Bind a component to void (silently ignore a call).
+ *
+ * \param[in] client
+ * \param[in] requiredItfClientName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_BindComponentToVoid(
+ const t_cm_instance_handle client,
+ const char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH],
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Bind two components together in an asynchronous way
+ * (the components can be on the same MPC or on two different MPC)
+ *
+ * \param[in] client
+ * \param[in] requiredItfClientName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ * \param[in] server
+ * \param[in] providedItfServerName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ * \param[in] fifosize
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_BindComponentAsynchronous(
+ const t_cm_instance_handle client,
+ const char * requiredItfClientName,
+ const t_cm_instance_handle server,
+ const char * providedItfServerName,
+ t_uint32 fifosize,
+ t_cm_mpc_memory_type eventMemType,
+ t_nmf_client_id clientId,
+ const char *dataFileSkeletonOrEvent,
+ const char *dataFileStub);
+
+/*!
+ * \brief Unbind a component previously binded asynchronously
+ *
+ * \param[in] client
+ * \param[in] requiredItfClientName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_UnbindComponentAsynchronous(
+ const t_cm_instance_handle client,
+ const char * requiredItfClientName,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Bind the Host to a component.
+ *
+ * \param[in] server
+ * \param[in] providedItfServerName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ * \param[in] fifosize
+ * \param[out] host2mpcId
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_BindComponentFromCMCore(
+ const t_cm_instance_handle server,
+ const char * providedItfServerName,
+ t_uint32 fifosize,
+ t_cm_mpc_memory_type eventMemType,
+ t_cm_bf_host2mpc_handle *host2mpcId,
+ t_nmf_client_id clientId,
+ const char *dataFileSkeleton);
+
+/*!
+ * \brief Unbind a component from the Host.
+ *
+ * \param[in] host2mpcId
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_UnbindComponentFromCMCore(
+ t_cm_bf_host2mpc_handle host2mpcId);
+
+/*!
+ * \brief Bind a component to the Host, see \ref CM_ENGINE_BindComponentToCMCore.
+ *
+ * See \ref MPC2HOST "MPC->Host binding" for seeing an integration example.
+ *
+ * \note This method is not called from CM Proxy, its only there for wrapping purpose!!!
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_BindComponentToCMCore(
+ const t_cm_instance_handle client,
+ const char *requiredItfClientName,
+ t_uint32 fifosize,
+ t_nmf_mpc2host_handle upLayerThis,
+ const char *dataFileStub,
+ t_cm_bf_mpc2host_handle *mpc2hostId,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Unbind a component to the Host, see \ref CM_ENGINE_UnbindComponentToCMCore.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_UnbindComponentToCMCore(
+ const t_cm_instance_handle client,
+ const char *requiredItfClientName,
+ t_nmf_mpc2host_handle *upLayerThis,
+ t_nmf_client_id clientId);
+
+/*!
+ * \brief Read a value on an attribute exported by a component instance.
+ *
+ * \param[in] component
+ * \param[in] attrName Null terminated string (Max size=\ref MAX_ATTRIBUTE_NAME_LENGTH).
+ * \param[out] value
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_ReadComponentAttribute(
+ const t_cm_instance_handle component,
+ const char* attrName,
+ t_uint24 *value);
+
+/*!
+ * \brief Get the older component.
+ *
+ * \param[in] client
+ * \param[out] headerComponent
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentListHeader(
+ const t_nmf_client_id client,
+ t_cm_instance_handle *headerComponent);
+
+/*!
+ * \brief Get the next component.
+ *
+ * \param[in] client
+ * \param[in] prevComponent
+ * \param[out] nextComponent
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentListNext(
+ const t_nmf_client_id client,
+ const t_cm_instance_handle prevComponent,
+ t_cm_instance_handle *nextComponent);
+
+/*!
+ * \brief Get a component description
+ *
+ * \param[in] component
+ * \param[in] templateNameLength
+ * \param[in] localNameLength
+ * \param[out] templateName Null terminated string (Size=templateNameLength, Max size=\ref MAX_TEMPLATE_NAME_LENGTH).
+ * \param[out] coreId
+ * \param[out] localName Null terminated string (Size=localNameLength, Max size=\ref MAX_COMPONENT_NAME_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentDescription(
+ const t_cm_instance_handle component,
+ char *templateName,
+ t_uint32 templateNameLength,
+ t_nmf_core_id *coreId,
+ char *localName,
+ t_uint32 localNameLength,
+ t_nmf_ee_priority *priority);
+
+/*!
+ * \brief Get number of interface required by a component.
+ *
+ * \param[in] component
+ * \param[out] numberRequiredInterfaces
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentRequiredInterfaceNumber(
+ const t_cm_instance_handle component,
+ t_uint8 *numberRequiredInterfaces);
+
+/*!
+ * \brief Return information about required interface.
+ *
+ * \param[in] component
+ * \param[in] index
+ * \param[in] itfNameLength
+ * \param[in] itfTypeLength
+ * \param[out] itfName Null terminated string (Size=itfNameLength, Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ * \param[out] itfType Null terminated string (Size=itfTypeLength, Max size=\ref MAX_INTERFACE_TYPE_NAME_LENGTH).
+ * \param[out] collectionSize
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentRequiredInterface(
+ const t_cm_instance_handle component,
+ const t_uint8 index,
+ char *itfName,
+ t_uint32 itfNameLength,
+ char *itfType,
+ t_uint32 itfTypeLength,
+ t_cm_require_state *requireState,
+ t_sint16 *collectionSize);
+
+/*!
+ * \brief Get the component binded to a required interface.
+ *
+ * \param[in] component
+ * \param[in] itfName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ * \param[in] serverItfNameLength
+ * \param[out] server
+ * \param[out] serverItfName Null terminated string (Size=serverItfNameLength, Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentRequiredInterfaceBinding(
+ const t_cm_instance_handle component,
+ const char *itfName,
+ t_cm_instance_handle *server,
+ char *serverItfName,
+ t_uint32 serverItfNameLength);
+
+/*!
+ * \brief Get number of interface provided by a component.
+ *
+ * \param[in] component
+ * \param[out] numberProvidedInterfaces
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentProvidedInterfaceNumber(
+ const t_cm_instance_handle component,
+ t_uint8 *numberProvidedInterfaces);
+
+/*!
+ * \brief Return information about provided interface.
+ *
+ * \param[in] component
+ * \param[in] index
+ * \param[in] itfNameLength
+ * \param[in] itfTypeLength
+ * \param[out] itfName Null terminated string (Size=itfNameLength, Max size=\ref MAX_INTERFACE_NAME_LENGTH).
+ * \param[out] itfType Null terminated string (Size=itfTypeLength, Max size=\ref MAX_INTERFACE_TYPE_NAME_LENGTH).
+ * \param[out] collectionSize
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentProvidedInterface(
+ const t_cm_instance_handle component,
+ const t_uint8 index,
+ char *itfName,
+ t_uint32 itfNameLength,
+ char *itfType,
+ t_uint32 itfTypeLength,
+ t_sint16 *collectionSize);
+
+/*!
+ * \brief Get number of properties of a component.
+ *
+ * \param[in] component
+ * \param[out] numberProperties
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentPropertyNumber(
+ const t_cm_instance_handle component,
+ t_uint8 *numberProperties);
+
+/*!
+ * \brief Return the name of a property.
+ *
+ * \param[in] component
+ * \param[in] index
+ * \param[in] propertyNameLength
+ * \param[out] propertyName Null terminated string (Size=propertyNameLength, Max size=\ref MAX_PROPERTY_NAME_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentPropertyName(
+ const t_cm_instance_handle component,
+ const t_uint8 index,
+ char *propertyName,
+ t_uint32 propertyNameLength);
+
+/*!
+ * \brief Get property value of a component.
+ *
+ * \param[in] component
+ * \param[in] propertyName
+ * \param[in] propertyValueLength
+ * \param[out] propertyValue Null terminated string (Size=propertyValueLength, Max size=\ref MAX_PROPERTY_VALUE_LENGTH).
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetComponentPropertyValue(
+ const t_cm_instance_handle component,
+ const char *propertyName,
+ char *propertyValue,
+ t_uint32 propertyValueLength);
+
+#endif /*COMPONENT_ENGINE_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/api/configuration_engine.h b/drivers/staging/nmf-cm/cm/engine/api/configuration_engine.h
new file mode 100644
index 00000000000..0336f62265e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/configuration_engine.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Configuration Component Manager User Engine API.
+ *
+ * This file contains the Configuration CM Engine API for manipulating CM.
+ *
+ */
+
+#ifndef CONFIGURATION_ENGINE_H
+#define CONFIGURATION_ENGINE_H
+
+#include <cm/engine/configuration/inc/configuration_type.h>
+
+/*!
+ * \brief Dynamically set some debug parameters of the CM
+ *
+ * \param[in] aCmdID The command for the parameter to update
+ * \param[in] aParam The actual value to set for the given command
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_SetMode(t_cm_cmd_id aCmdID, t_sint32 aParam);
+
+#endif /* CONFIGURATION_ENGINE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/api/control/configuration_engine.h b/drivers/staging/nmf-cm/cm/engine/api/control/configuration_engine.h
new file mode 100644
index 00000000000..a9543a2af39
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/control/configuration_engine.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Configuration Component Manager User Engine API.
+ *
+ * This file contains the Configuration CM Engine API for manipulating CM.
+ */
+
+#ifndef CONTROL_CONFIGURATION_ENGINE_H
+#define CONTROL_CONFIGURATION_ENGINE_H
+
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/memory/inc/memory_type.h>
+#include <cm/engine/communication/inc/communication_type.h>
+
+/*****************************************************************************************/
+/* Component Manager dedicated (for Configuration purpose) structured types definition */
+/*****************************************************************************************/
+
+/*!
+ * \brief Description of the Nomadik HW mapping configuration
+ *
+ * Describe the Nomadik mapping that is to say:
+ * - the ESRAM memory managed by the CM (The ESRAM address space SHALL BE declared as non cacheable, non bufferable inside host MMU table)
+ * - the mapping of the System HW Semaphore IP
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef struct {
+ t_nmf_memory_segment esramDesc; //!< Description of the ESRAM memory mapping into Nomadik SOC
+ t_cm_system_address hwSemaphoresMappingBaseAddr; //!< Description of the System HW Semaphores IP mapping into Nomadik SOC
+} t_nmf_hw_mapping_desc;
+
+/*!
+ * @defgroup t_nmf_nomadik_version t_nmf_nomadik_version
+ * \brief Description of the various supported Nomadik SOC version
+ * @{
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef t_uint8 t_nmf_nomadik_version; //!< Fake enumeration type
+#define NOMADIK_8810 ((t_nmf_nomadik_version)0) //!< STn8810 chip (any cut)
+#define NOMADIK_8815A0 ((t_nmf_nomadik_version)1) //!< STn8815 chip (cut A0)
+#define NOMADIK_8815 ((t_nmf_nomadik_version)2) //!< STn8815 chip (other cuts)
+#define NOMADIK_8820 ((t_nmf_nomadik_version)3) //!< STn8820 chip
+#define NOMADIK_8500 ((t_nmf_nomadik_version)4) //!< STn8500 chip
+/* @} */
+
+/*!
+ * \brief Description of the configuration parameters of the Component Manager
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef struct {
+ t_nmf_coms_location comsLocation; //!< Configure where CM Communications objects are put (see \ref t_nmf_coms_location)
+} t_nmf_config_desc;
+
+/*!
+ * @defgroup t_nmf_power_ctx t_nmf_power_ctx
+ * \brief Definition of the CM-engine context
+ *
+ * OS integrator uses this value to known the context where the associated OSAL routine is called
+ *
+ * @{
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+
+typedef t_uint32 t_nmf_power_ctx; //!< Fake enumeration type
+#define PWR_FLUSH_REQ_INTERRUPT_CTX ((t_nmf_power_ctx)0x00) //!< Interrupt context - called by \ref CM_ProcessMpcEvent
+#define PWR_FLUSH_REQ_NORMAL_CTX ((t_nmf_power_ctx)0x01) //!< Normal context (CM user call)
+
+/* @} */
+
+
+/****************************************************************************************************************/
+/* Component Manager dedicated (for Media Processors Cores Configuration purpose) structured types definition */
+/****************************************************************************************************************/
+/*!
+ * @defgroup t_nmf_executive_engine_id t_nmf_executive_engine_id
+ * \brief Identification of the Media Processor Executive Engine to deploy
+ * @{
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef t_uint8 t_nmf_executive_engine_id; //!< Fake enumeration type
+#define SYNCHRONOUS_EXECUTIVE_ENGINE ((t_nmf_executive_engine_id)0) //!< MPC Synchronous executive engine
+#define HYBRID_EXECUTIVE_ENGINE ((t_nmf_executive_engine_id)1) //!< MPC Hybrid synchronous executive engine
+/* @} */
+
+/*!
+ * @defgroup t_nmf_semaphore_type_id t_nmf_semaphore_type_id
+ * \brief Definition of which type semaphore shall be used for the given Media Processor communication mechanism
+ * @{
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef t_uint8 t_nmf_semaphore_type_id; //!< Fake enumeration type
+#define LOCAL_SEMAPHORES ((t_nmf_semaphore_type_id)0) //!< Embedded MMDSP macrocell semaphore, so CM_ProcessMpcEvent(<coreId>) shall be called under ISR connected to local MMDSP IRQ0
+#define SYSTEM_SEMAPHORES ((t_nmf_semaphore_type_id)1) //!< Shared system HW Semaphores, so CM_ProcessMpcEvent(ARM_CORE_ID) shall be called under ISR connected to shared HW Sem Host IRQ
+/* @} */
+
+
+/*!
+ * \brief Opaque type for allocator, returned at CM configuration.
+ */
+typedef t_uint32 t_cfg_allocator_id;
+
+/********************************************************************************/
+/* Configuration Component Manager API prototypes */
+/********************************************************************************/
+
+/*!
+ * \brief Initialisation part
+ *
+ * This routine initialize and configure the Component Manager.
+ *
+ * \param[in] pNmfHwMappingDesc hardware mapping description
+ * \param[in] pNmfConfigDesc NMF (mainly CM) Configuration description
+ *
+ * \exception TBD
+ * \return exception number.
+ *
+ * \warning The ESRAM address space SHALL BE declared as non cacheable, non bufferable inside host MMU table
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC t_cm_error CM_ENGINE_Init(
+ const t_nmf_hw_mapping_desc *pNmfHwMappingDesc,
+ const t_nmf_config_desc *pNmfConfigDesc
+ );
+
+
+/*!
+ * \brief Media Processor core initialisation part
+ *
+ * This routine configures a given Media Processor core
+ *
+ * \param[in] coreId Media Processor identifier
+ * \param[in] executiveEngineId Media Processor Executive Engine identifier
+ * \param[in] semaphoreTypeId Media Processor semaphores (to be used by communication mechanism) identifier
+ * \param[in] nbYramBanks is the number of tcm ram banks to reserved for y memory
+ * \param[in] mediaProcessorMappingBaseAddr Media Processor mapping into host CPU addressable space
+ * \param[in] commDomain Domain for allocating communication FIFOs
+ * \param[in] eeDomain Domain for EE instantiation
+ * \param[in] sdramCodeAllocId Allocator Id for the SDRAM Code segment
+ * \param[in] sdramDataAllocId Allocator Id for the SDRAM Data segment
+ *
+ * \exception TBD
+ * \return exception number.
+ *
+ * \warning The Media Processor mapping address space SHALL BE declared as non cacheable, non bufferable inside host MMU table
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC t_cm_error CM_ENGINE_ConfigureMediaProcessorCore(
+ t_nmf_core_id coreId,
+ t_nmf_executive_engine_id executiveEngineId,
+ t_nmf_semaphore_type_id semaphoreTypeId,
+ t_uint8 nbYramBanks,
+ const t_cm_system_address *mediaProcessorMappingBaseAddr,
+ const t_cm_domain_id eeDomain,
+ const t_cfg_allocator_id sdramCodeAllocId,
+ const t_cfg_allocator_id sdramDataAllocId
+ );
+
+/*!
+ * \brief Configure a memory segment for later
+ *
+ * \exception TBD
+ * \return TBD
+ *
+ * \warning
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC t_cm_error CM_ENGINE_AddMpcSdramSegment(
+ const t_nmf_memory_segment *pDesc, //!< [in] Memory segment description.
+ t_cfg_allocator_id *allocId, //!< [out] Identifier of the created allocator.
+ const char *memoryname //!< [in] Memory purpose name
+ );
+
+/********************************************************************************/
+/* Destruction Component Manager API prototypes */
+/********************************************************************************/
+/*!
+ * \brief Destruction part
+ *
+ * This routine destroyes and releases all resources used by the Component Manager.
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC void CM_ENGINE_Destroy(void);
+
+
+#endif /* CONTROL_CONFIGURATION_ENGINE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/api/control/control_engine.h b/drivers/staging/nmf-cm/cm/engine/api/control/control_engine.h
new file mode 100644
index 00000000000..1d823b27fc1
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/control/control_engine.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief CM Engine API.
+ *
+ * This file contains the Component Manager Engine API.
+ */
+/*!
+ * \defgroup CM_ENGINE_CONTROL_API CM Engine Control API
+ * \note This API is not for OS integrator, it's only for low level system integration.
+ * \ingroup CM_ENGINE_MODULE
+ */
+
+#ifndef CM_CONTROL_H_
+#define CM_CONTROL_H_
+
+#include <cm/engine/api/control/configuration_engine.h>
+
+#include <cm/engine/api/control/irq_engine.h>
+
+#include <cm/engine/api/control/power_engine.h>
+
+#endif /*CM_CONTROL_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/api/control/irq_engine.h b/drivers/staging/nmf-cm/cm/engine/api/control/irq_engine.h
new file mode 100644
index 00000000000..e3974764e91
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/control/irq_engine.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief NMF API for interrupt handler.
+ *
+ * This file contains the Component Manager API for interrupt handler.
+ */
+#ifndef CONTROL_IRQ_ENGINE_H
+#define CONTROL_IRQ_ENGINE_H
+
+#include <share/inc/nmf.h>
+#include <cm/inc/cm_type.h>
+#include <nmf/inc/service_type.h>
+#include <ee/api/trace.idt>
+
+/*!
+ * \brief MPCs -> HOST communication handler
+ *
+ * This routine shall be integrated as interrupt handler into the OS
+ *
+ * If the given Media Processor Core has been configured (through CM_ConfigureMediaProcessorCore()) as using \ref LOCAL_SEMAPHORES, then
+ * the NMF communication mechanism will use the embedded MMDSP macrocell semaphore,
+ * so CM_ProcessMpcEvent(<\e coreId>) shall be called under ISR connected to local MMDSP IRQ0, with the related \e coreId as parameter.
+ *
+ * If the given Media Processor Core has been configured (through CM_ConfigureMediaProcessorCore()) as using \ref SYSTEM_SEMAPHORES, then
+ * the NMF communication mechanism will use the shared system HW Semaphores,
+ * so CM_ProcessMpcEvent(\ref ARM_CORE_ID) shall be called under ISR connected to shared HW Sem Host IRQ, with \ref ARM_CORE_ID as parameter.
+ *
+ * NB: A Media Processor Core belonging to the distribution pool shall be configured with \ref SYSTEM_SEMAPHORES
+ *
+ * \see t_nmf_semaphore_type_id description
+ *
+ * \param[in] coreId identification of the source of the interrupt
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC IMPORT_SHARED void CM_ProcessMpcEvent(t_nmf_core_id coreId);
+
+/*!
+ * \brief Service type
+ *
+ * \note We used an enumeration in structure since this description remain inside the kernel
+ * and we assume that everything in the kernel is compile with same compiler and option.
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef enum { // Allowed since i
+ CM_MPC_SERVICE_NONE = 0, //!< No service found
+ CM_MPC_SERVICE_PANIC = 1, //!< Panic service found
+ CM_MPC_SERVICE_PRINT = 2, //!< Print service found
+ CM_MPC_SERVICE_TRACE = 3 //!< Trace service found
+} t_cm_service_type;
+ //!< Service description type
+/*!
+ * \brief Service description data
+ *
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+typedef struct {
+ union {
+ t_nmf_panic_data panic; //!< Panic description
+ struct {
+ t_uint32 dspAddress;
+ t_uint32 value1;
+ t_uint32 value2;
+ } print; //!< Printf like description
+ } u; //!< Union of service description
+} t_cm_service_description;
+
+/*!
+ * \brief MPC Panic handler
+ *
+ * This routine shall be called as interrupt handler into the OS.
+ *
+ * So CM_getPanicDescription shall be called under ISR connected to local MMDSP IRQ1, with the related \e coreId as parameter.
+ *
+ * \param[in] coreId identification of the source of the interrupt
+ * \param[out] srcType Pointer on service type
+ * \param[out] srcDescr Pointer on service description
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_getServiceDescription(
+ t_nmf_core_id coreId,
+ t_cm_service_type *srcType,
+ t_cm_service_description *srcDescr);
+
+/*!
+ * \brief Read a null terminated string inside an MPC
+ *
+ * This routine could be used to read the MPC string give as parameter during an CM_NMF_SERVICE_PRINT
+ *
+ * \param[in] coreId Identification of the code where read string
+ * \param[in] dspAddress Address of the string in the MPC
+ * \param[out] buffer Buffer pointer where returning null terminated string
+ * \param[in] bufferSize Buffer size
+ *
+ * \ingroup CM_ENGINE_CONTROL_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ReadMPCString(
+ t_nmf_core_id coreId,
+ t_uint32 dspAddress,
+ char * buffer,
+ t_uint32 bufferSize);
+
+typedef enum {
+ CM_MPC_TRACE_NONE = 0,
+ CM_MPC_TRACE_READ = 1,
+ CM_MPC_TRACE_READ_OVERRUN = 2
+} t_cm_trace_type;
+
+PUBLIC IMPORT_SHARED t_cm_trace_type CM_ENGINE_GetNextTrace(
+ t_nmf_core_id coreId,
+ struct t_nmf_trace *trace);
+
+#endif /* CONTROL_IRQ_ENGINE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/api/domain_engine.h b/drivers/staging/nmf-cm/cm/engine/api/domain_engine.h
new file mode 100644
index 00000000000..7cc6f33ed90
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/domain_engine.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Public Component Manager Memory User SYSCALL API.
+ *
+ * This file contains the Component Manager SYSCALL API for manipulating domains.
+ *
+ */
+
+#ifndef __INC_DOMAIN_ENGINE_H
+#define __INC_DOMAIN_ENGINE_H
+
+#include <cm/engine/memory/inc/domain_type.h>
+
+/*!
+ * \brief Create a domain.
+ *
+ * Create a memory domain for use in the CM for component instantiation and memory allocation.
+ *
+ * \param[in] client Id of the client.
+ * \param[in] domain Description of domain memories.
+ * \param[out] handle Idetifier of the created domain
+ *
+ * \exception CM_INVALID_DOMAIN_DEFINITION
+ * \exception CM_INTERNAL_DOMAIN_OVERFLOW
+ * \exception CM_OK
+ *
+ * \return Error code.
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_CreateMemoryDomain(
+ const t_nmf_client_id client,
+ const t_cm_domain_memory *domain,
+ t_cm_domain_id *handle
+ );
+
+/*!
+ * \brief Create a scratch domain.
+ *
+ * Create a scratch memory domain. Scratch domains
+ * are used to perform overlapping allocations.
+ *
+ * \param[in] client Id of the client.
+ * \param[in] parentId Identifier of the parent domain.
+ * \param[in] domain Description of domain memories.
+ * \param[out] handle Idetifier of the created domain
+ *
+ * \exception CM_INVALID_DOMAIN_DEFINITION
+ * \exception CM_INTERNAL_DOMAIN_OVERFLOW
+ * \exception CM_NO_MORE_MEMORY
+ * \exception CM_OK
+ *
+ * \return Error code.
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_CreateMemoryDomainScratch(
+ const t_nmf_client_id client,
+ const t_cm_domain_id parentId,
+ const t_cm_domain_memory *domain,
+ t_cm_domain_id *handle
+ );
+
+/*!
+ * \brief Destroy a memory domain.
+
+ * \param[in] handle Domain identifier to destroy.
+ *
+ * \exception CM_INVALID_DOMAIN_HANDLE
+ * \exception CM_OK
+ *
+ * \return Error code.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_DestroyMemoryDomain(
+ t_cm_domain_id handle);
+
+/*!
+ * \brief Destroy all domains belonging to a given client.
+ *
+ * \param[in] client
+ *
+ * \return Error code.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_FlushMemoryDomains(
+ t_nmf_client_id client);
+
+/*!
+ * \brief Retrieve the coreId for a given domain. Utility.
+
+ * \param[in] domainId Domain identifier.
+ * \param[out] coreId Core identifier.
+ *
+ * \exception CM_INVALID_DOMAIN_HANDLE Invalid domain handle
+ * \exception CM_OK
+ *
+ * \return Error code.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetDomainCoreId(const t_cm_domain_id domainId, t_nmf_core_id *coreId);
+
+#endif /* __INC_DOMAIN_ENGINE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/api/executive_engine_mgt_engine.h b/drivers/staging/nmf-cm/cm/engine/api/executive_engine_mgt_engine.h
new file mode 100644
index 00000000000..9cb8bc1481b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/executive_engine_mgt_engine.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief CM executive engine management Engine API.
+ *
+ * This file contains the Component Manager executive engine management Engine API.
+ */
+#ifndef CM_EXECUTIVE_ENGINE_MANAGEMENT_ENGINE_H_
+#define CM_EXECUTIVE_ENGINE_MANAGEMENT_ENGINE_H_
+
+#include <cm/inc/cm_type.h>
+
+/*!
+ * \brief Return executive engine handle for given core
+ *
+ * \param[in] coreId The core for which we want executive engine handle.
+ * \param[out] executiveEngineHandle executive engine instance (null if the executive engine is not loaded)
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetExecutiveEngineHandle(
+ t_cm_domain_id domainId,
+ t_cm_instance_handle *executiveEngineHandle);
+
+#endif /*CM_EXECUTIVE_ENGINE_MANAGEMENT_ENGINE_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/api/memory_engine.h b/drivers/staging/nmf-cm/cm/engine/api/memory_engine.h
new file mode 100644
index 00000000000..9f5e25b3ebf
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/memory_engine.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Public Component Manager Memory User Engine API.
+ *
+ * This file contains the Component Manager Engine API for manipulating memory.
+ *
+ */
+
+#ifndef CM_MEMORY_ENGINE_H_
+#define CM_MEMORY_ENGINE_H_
+
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/memory/inc/memory_type.h>
+
+/*!
+ * \brief Allocate memory in a Media Processor Core memory
+ *
+ * \param[in] domainId
+ * \param[in] memType
+ * \param[in] size
+ * \param[in] memAlignment
+ * \param[out] pHandle
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_AllocMpcMemory(
+ t_cm_domain_id domainId,
+ t_nmf_client_id clientId, //!< [in] Client ID (aka PID)
+ t_cm_mpc_memory_type memType,
+ t_cm_size size,
+ t_cm_mpc_memory_alignment memAlignment,
+ t_cm_memory_handle *pHandle
+ );
+
+
+/*!
+ * \brief Free a MPC memory block.
+ *
+ * \param[in] handle
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_FreeMpcMemory(t_cm_memory_handle handle);
+
+/*!
+ * \brief Get the start address of the MPC memory block seen by the host CPU (physical and logical)
+ *
+ * The logical system address returned by this method is valid only in kernel space and the physical
+ * address is accessible only from kernel space too.
+ *
+ * \see OSMem "OS Memory management" for seeing an integration example.
+ *
+ * \param[in] handle
+ * \param[out] pSystemAddress
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemorySystemAddress(
+ t_cm_memory_handle handle,
+ t_cm_system_address *pSystemAddress);
+
+/*!
+ * \brief Get the start address of the memory block seen by the Media Processor Core
+ *
+ * \param[in] handle
+ * \param[out] pMpcAddress
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemoryMpcAddress(
+ t_cm_memory_handle handle,
+ t_uint32 *pMpcAddress);
+
+/*!
+ * \brief Get the memory status for given memory type of a given Media Processor Core
+ *
+ * \param[in] domainId
+ * \param[in] memType
+ * \param[out] pStatus
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemoryStatus(
+ t_cm_domain_id domainId,
+ t_cm_mpc_memory_type memType,
+ t_cm_allocator_status *pStatus);
+
+#endif /* CM_MEMORY_ENGINE_H_ */
+
diff --git a/drivers/staging/nmf-cm/cm/engine/api/migration_engine.h b/drivers/staging/nmf-cm/cm/engine/api/migration_engine.h
new file mode 100644
index 00000000000..77a266d4459
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/migration_engine.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#ifndef CM_MIGRATION_ENGINE_H
+#define CM_MIGRATION_ENGINE_H
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/memory/inc/domain_type.h>
+
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_Migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst);
+
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_Unmigrate(void);
+
+#endif /* CM_MIGRATION_ENGINE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/api/perfmeter_engine.h b/drivers/staging/nmf-cm/cm/engine/api/perfmeter_engine.h
new file mode 100644
index 00000000000..bead49dc81e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/perfmeter_engine.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief CM Performance Meter Engine API.
+ *
+ * This file contains the Component Manager Performance Meter Engine API.
+ */
+#ifndef CM_ENGINE_PERFMETER_ENGINE_H_
+#define CM_ENGINE_PERFMETER_ENGINE_H_
+
+#include <cm/engine/perfmeter/inc/perfmeter_type.h>
+
+/*!
+ * \brief MPC cpu load
+ *
+ * \param[in] coreId identification of mpc from which we want cpu load
+ * \param[out] mpcLoadCounter will contain mpc cpu load counters value if success
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_getMpcLoadCounter(
+ t_nmf_core_id coreId,
+ t_cm_mpc_load_counter *mpcLoadCounter);
+
+/*!
+ * \brief MPC cpu load
+ * Same as \ref CM_ENGINE_getMpcLoadCounter() without lock
+ *
+ * \param[in] coreId identification of mpc from which we want cpu load
+ * \param[out] mpcLoadCounter will contain mpc cpu load counters value if success
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_GetMpcLoadCounter(
+ t_nmf_core_id coreId,
+ t_cm_mpc_load_counter *mpcLoadCounter);
+
+#endif /*CM_ENGINE_PERFMETER_ENGINE_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/api/repository_mgt_engine.h b/drivers/staging/nmf-cm/cm/engine/api/repository_mgt_engine.h
new file mode 100644
index 00000000000..b63c60d85eb
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/api/repository_mgt_engine.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief Repository Component Manager User Engine API.
+ *
+ * This file contains the Component Manager Engine API for manipulating the components files.
+ */
+
+#ifndef REPOSITORY_MGT_ENGINE_H_
+#define REPOSITORY_MGT_ENGINE_H_
+
+#include <inc/nmf-limits.h>
+#include <cm/engine/repository_mgt/inc/repository_type.h>
+
+/*!
+ * \brief Get the name(s) of the component(s) to load.
+ *
+ * \param[in] client Handle of the client component (optional)
+ * \param[in] requiredItfClientName Null terminated string (Max size=\ref MAX_INTERFACE_NAME_LENGTH) (optional).
+ * \param[in] server Handle of the server component (optional)
+ * \param[in] providedItfServerName Null terminated string (Max size==\ref MAX_INTERFACE_NAME_LENGTH) (optional).
+ * \param[out] fileList List of required component(s).
+ * \param[in,out] listSize Initial size of the list as input. Updated with the number of entries really used.
+ * \param[out] type Interface type of the client required or server provided interface. Null terminated string (Max size=\ref MAX_INTERFACE_TYPE_NAME_LENGTH) (optional) .
+ * \param[out] methodNumber Number of method in the interface type of the client required interface. (only used when called from CM_BindComponentToUser) (optional)
+ *
+ * \note It returns the component(s) name(s) to load, depending on the first four parameters.
+ *
+ * - If all 4 are NULL, it returns the name of the Executive Engine components to load
+ * - If 'client' is NULL, it returns the name of the required components for a Bind From CMCore.
+ * - If 'server' is NULL, it returns the name of the required components for a Bind To CMCore.
+ * - If none is NULL, it returns the name of the required components for an asynchronous binding
+ *
+ * The names are returned in fileList, whose initial size is specified in listSize.
+ * (sizeList must be the number of provided entries of \ref MAX_INTERFACE_TYPE_NAME_LENGTH length
+ * If not enough space is provided, CM_NO_MORE_MEMORY is returned
+ *
+ * sizeList is updated with the number entries really filled.
+ *
+ * This method is also used to retrieve the interface type when called from CM_BindComponentToUser and CM_BindComponentFromUser
+ * and the number of methods when called from CM_BindComponentToUser.
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_GetRequiredComponentFiles(
+ // IN
+ t_action_to_do action,
+ const t_cm_instance_handle client,
+ const char *requiredItfClientName,
+ const t_cm_instance_handle server,
+ const char *providedItfServerName,
+ // OUT component to be pushed
+ char fileList[][MAX_INTERFACE_TYPE_NAME_LENGTH],
+ // IN max component allowed to be pushed
+ t_uint32 listSize,
+ // OUT interface information
+ char type[MAX_INTERFACE_TYPE_NAME_LENGTH],
+ t_uint32 *methodNumber);
+
+/*!
+ * \brief Push a component into the CM Component Cache.
+ *
+ * \param[in] name Component name, null terminated string (Max size=\ref MAX_INTERFACE_TYPE_NAME_LENGTH)
+ * \param[in] data Pointer to _user_ data of the component.
+ * \param[in] size Size of the data.
+ *
+ * \note Push a component in the Component Cache
+ * The 'data' must be provided such a way that they can be freed by a call to OSAL_Free()
+ * The caller doesn't need and must NOT free the data, even in case of failure.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_PushComponent(const char *name, const void *data, t_cm_size size);
+
+/*!
+ * \brief Remove a component from the CM Component Cache.
+ *
+ * \param[in] name Component name, null terminated string (Max size=\ref MAX_INTERFACE_TYPE_NAME_LENGTH)
+ *
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_cm_error CM_ENGINE_ReleaseComponent (const char *name);
+
+/*!
+ * \brief Check if the CM Component Cache is empty.
+ *
+ * \return a boolean value TRUE or FALSE.
+ * \ingroup CM_ENGINE_API
+ */
+PUBLIC IMPORT_SHARED t_bool CM_ENGINE_IsComponentCacheEmpty(void);
+#endif /*REPOSITORY_MGT_ENGINE_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/communication/fifo/inc/nmf_fifo_arm.h b/drivers/staging/nmf-cm/cm/engine/communication/fifo/inc/nmf_fifo_arm.h
new file mode 100644
index 00000000000..0463d6a71a5
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/communication/fifo/inc/nmf_fifo_arm.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#ifndef __INC_NMF_FIFO_ARM
+#define __INC_NMF_FIFO_ARM
+
+#include <cm/inc/cm_type.h>
+#include <share/communication/inc/nmf_fifo_desc.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/memory/inc/domain.h>
+
+/*
+ * ARM Fifo descriptor (encapsulate the share one)
+ */
+typedef struct
+{
+ t_uint32 magic;
+ t_memory_handle chunkHandle;
+ t_nmf_core_id pusherCoreId;
+ t_nmf_core_id poperCoreId;
+ t_shared_addr dspAdress;
+ t_dsp_address_info dspAddressInfo;
+ t_nmf_fifo_desc *fifoDesc; //used for all fifo operations and systematically updated by the migrated offset (see cm_AllocEvent)
+ t_nmf_fifo_desc *fifoDescShadow; //shadow desc, is used to restore state after migration and perform the update of the real desc
+
+ // ExtendedField
+ t_memory_handle extendedFieldHandle;
+ t_shared_field *extendedField;
+} t_nmf_fifo_arm_desc;
+
+PUBLIC t_uint32 fifo_isFifoIdValid(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC t_nmf_fifo_arm_desc* fifo_alloc(
+ t_nmf_core_id pusherCoreId, t_nmf_core_id poperCoreId,
+ t_uint16 size_in_16bit, t_uint16 nbElem, t_uint16 nbExtendedSharedFields,
+ t_dsp_memory_type_id memType, t_dsp_memory_type_id memExtendedFieldType, t_cm_domain_id domainId);
+PUBLIC void fifo_free(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC t_uint16 fifo_normalizeDepth(t_uint16 requestedDepth);
+
+PUBLIC t_shared_addr fifo_getAndAckNextElemToWritePointer(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC t_shared_addr fifo_getAndAckNextElemToReadPointer(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC t_shared_addr fifo_getNextElemToWritePointer(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC t_shared_addr fifo_getNextElemToReadPointer(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC void fifo_acknowledgeRead(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC void fifo_acknowledgeWrite(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC void fifo_coms_acknowledgeWriteAndInterruptGeneration(t_nmf_fifo_arm_desc *pArmFifo);
+
+PUBLIC t_cm_error fifo_params_setSharedField(t_nmf_fifo_arm_desc *pArmFifo, t_uint32 sharedFieldIndex, t_shared_field value);
+
+#endif /* __INC_NMF_FIFO_ARM */
diff --git a/drivers/staging/nmf-cm/cm/engine/communication/fifo/src/nmf_fifo_arm.c b/drivers/staging/nmf-cm/cm/engine/communication/fifo/src/nmf_fifo_arm.c
new file mode 100644
index 00000000000..48d7f9e9f03
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/communication/fifo/src/nmf_fifo_arm.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <share/communication/inc/nmf_fifo_desc.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include "../inc/nmf_fifo_arm.h"
+
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/trace/inc/trace.h>
+
+/* define value of fifo magic number */
+#define NMF_FIFO_MAGIC_NB 0xF1F0BEEF
+
+PRIVATE t_uint16 fifo_getCount(
+ t_uint16 writeIndex,
+ t_uint16 readIndex,
+ t_uint16 fifoSize
+)
+{
+ if (writeIndex >= readIndex) {return writeIndex - readIndex;}
+ else {return fifoSize - readIndex + writeIndex;}
+}
+
+PRIVATE t_uint16 fifo_incrementIndex(
+ t_uint16 index,
+ t_uint16 wrappingValue
+)
+{
+ if (++index == wrappingValue) {index = 0;}
+
+ return index;
+}
+
+PUBLIC t_uint16 fifo_normalizeDepth(t_uint16 requestedDepth)
+{
+ /* with new implementation we don't align on power of two */
+ return requestedDepth;
+}
+
+PUBLIC t_nmf_fifo_arm_desc* fifo_alloc(
+ t_nmf_core_id pusherCoreId, t_nmf_core_id poperCoreId,
+ t_uint16 size_in_16bit, t_uint16 nbElem, t_uint16 nbExtendedSharedFields,
+ t_dsp_memory_type_id memType, t_dsp_memory_type_id memExtendedFieldType, t_cm_domain_id domainId)
+{
+ t_uint16 realNbElem = nbElem + 1;/* we need one more elem in new implementation */
+ t_uint16 sizeToAlloc = sizeof(t_nmf_fifo_desc) + ((size_in_16bit<<1)*realNbElem);
+ t_nmf_fifo_arm_desc *pArmFifoDesc;
+
+ pArmFifoDesc = (t_nmf_fifo_arm_desc*)OSAL_Alloc(sizeof (t_nmf_fifo_arm_desc));
+ if (pArmFifoDesc == NULL)
+ goto errorde;
+
+ pArmFifoDesc->chunkHandle = cm_DM_Alloc(domainId, memType,
+ (sizeToAlloc/2), CM_MM_ALIGN_2WORDS, TRUE); /* size in 16-bit since we use EXT16 memory */
+ if (pArmFifoDesc->chunkHandle == INVALID_MEMORY_HANDLE)
+ goto errorsh;
+
+ pArmFifoDesc->magic = NMF_FIFO_MAGIC_NB;
+ pArmFifoDesc->pusherCoreId = pusherCoreId;
+ pArmFifoDesc->poperCoreId = poperCoreId;
+
+ pArmFifoDesc->fifoDesc = (t_nmf_fifo_desc *)cm_DSP_GetHostLogicalAddress(pArmFifoDesc->chunkHandle);
+ cm_DSP_GetDspAddress(pArmFifoDesc->chunkHandle, &pArmFifoDesc->dspAdress);
+
+ pArmFifoDesc->fifoDescShadow = pArmFifoDesc->fifoDesc;
+ cm_DSP_GetDspDataAddressInfo(cm_DM_GetDomainCoreId(domainId), pArmFifoDesc->dspAdress, &pArmFifoDesc->dspAddressInfo);
+
+ pArmFifoDesc->extendedFieldHandle = INVALID_MEMORY_HANDLE;
+ pArmFifoDesc->extendedField = NULL;
+
+ pArmFifoDesc->fifoDesc->elemSize = size_in_16bit;
+ pArmFifoDesc->fifoDesc->fifoFullValue = nbElem;
+ pArmFifoDesc->fifoDesc->wrappingValue = realNbElem;
+
+ pArmFifoDesc->fifoDesc->semId = cm_SEM_Alloc(pusherCoreId, poperCoreId);
+ pArmFifoDesc->fifoDesc->readIndex = 0;
+ pArmFifoDesc->fifoDesc->writeIndex = 0;
+
+ LOG_INTERNAL(2, "\n##### Fifo alloc 0x%x (0x%x)\n\n", pArmFifoDesc, pArmFifoDesc->fifoDesc, 0, 0, 0, 0);
+
+ if (nbExtendedSharedFields >= 1)
+ {
+ if(poperCoreId == ARM_CORE_ID)
+ {
+ /* Optimization: Don't put extended Field in DSP memory since use only by ARM if popper */
+ pArmFifoDesc->extendedField = (t_shared_field*)OSAL_Alloc(nbExtendedSharedFields * sizeof(t_shared_field));
+ if (pArmFifoDesc->extendedField == NULL)
+ goto errorex;
+
+ pArmFifoDesc->fifoDesc->extendedField = (t_uint32)pArmFifoDesc->extendedField;
+ }
+ else
+ {
+ pArmFifoDesc->extendedFieldHandle = cm_DM_Alloc(domainId, memExtendedFieldType,
+ nbExtendedSharedFields * sizeof(t_shared_field) / 4, CM_MM_ALIGN_WORD, TRUE);
+ if (pArmFifoDesc->extendedFieldHandle == INVALID_MEMORY_HANDLE)
+ goto errorex;
+
+ pArmFifoDesc->extendedField = (t_shared_field*)cm_DSP_GetHostLogicalAddress(pArmFifoDesc->extendedFieldHandle);
+ cm_DSP_GetDspAddress(pArmFifoDesc->extendedFieldHandle, (t_uint32*)&pArmFifoDesc->fifoDesc->extendedField);
+ }
+
+ pArmFifoDesc->extendedField[EXTENDED_FIELD_BCTHIS_OR_TOP] = (t_shared_field)0;
+ }
+
+ return pArmFifoDesc;
+
+errorex:
+ (void)cm_DM_Free(pArmFifoDesc->chunkHandle, TRUE);
+errorsh:
+ OSAL_Free(pArmFifoDesc);
+errorde:
+ return NULL;
+}
+
+PUBLIC t_uint32 fifo_isFifoIdValid(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ if (((t_uint32)pArmFifo & CM_MM_ALIGN_WORD) != 0) {return FALSE;}
+ if (pArmFifo->magic == NMF_FIFO_MAGIC_NB) {return TRUE;}
+ else {return FALSE;}
+}
+
+PUBLIC void fifo_free(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ CM_ASSERT(pArmFifo->pusherCoreId != ARM_CORE_ID || pArmFifo->poperCoreId != ARM_CORE_ID);
+
+ pArmFifo->magic = ~NMF_FIFO_MAGIC_NB;
+
+ if(pArmFifo->extendedFieldHandle != INVALID_MEMORY_HANDLE)
+ (void)cm_DM_Free(pArmFifo->extendedFieldHandle, TRUE);
+ else if(pArmFifo->extendedField != NULL)
+ OSAL_Free(pArmFifo->extendedField);
+
+ (void)cm_DM_Free(pArmFifo->chunkHandle, TRUE);
+ OSAL_Free(pArmFifo);
+}
+
+PUBLIC t_shared_addr fifo_getAndAckNextElemToWritePointer(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_shared_addr retValue;
+
+ retValue = fifo_getNextElemToWritePointer(pArmFifo);
+ if (retValue != 0)
+ {
+ fifo_acknowledgeWrite(pArmFifo);
+ }
+
+ return retValue;
+}
+
+PUBLIC t_shared_addr fifo_getAndAckNextElemToReadPointer(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_shared_addr retValue;
+
+ retValue = fifo_getNextElemToReadPointer(pArmFifo);
+ if (retValue != 0)
+ {
+ fifo_acknowledgeRead(pArmFifo);
+ }
+
+ return retValue;
+}
+
+PUBLIC t_shared_addr fifo_getNextElemToWritePointer(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_shared_addr retValue = 0;
+ t_nmf_fifo_desc *pDesc;
+ t_uint16 count;
+
+ if ((NULL == pArmFifo) || (NULL == (pDesc = pArmFifo->fifoDesc)))
+ return 0;
+
+ count = fifo_getCount(pDesc->writeIndex, pDesc->readIndex,pDesc->wrappingValue);
+ if (count < pDesc->fifoFullValue)
+ {
+ retValue = ((t_shared_addr)pDesc + sizeof(t_nmf_fifo_desc) + (pDesc->writeIndex*(pDesc->elemSize<<1)));
+ }
+
+ return retValue;
+}
+
+PUBLIC t_shared_addr fifo_getNextElemToReadPointer(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_shared_addr retValue = 0;
+ t_nmf_fifo_desc *pDesc;
+ t_uint16 count;
+
+ if ((NULL == pArmFifo) || (NULL == (pDesc = pArmFifo->fifoDesc)))
+ return 0;
+
+ count = fifo_getCount(pDesc->writeIndex, pDesc->readIndex,pDesc->wrappingValue);
+ if (count != 0)
+ {
+ retValue = ((t_shared_addr)pDesc+ sizeof(t_nmf_fifo_desc) + (pDesc->readIndex*(pDesc->elemSize<<1)));
+ }
+
+ return retValue;
+}
+
+PUBLIC void fifo_acknowledgeRead(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_nmf_fifo_desc *pDesc = pArmFifo->fifoDesc;
+
+ pDesc->readIndex = fifo_incrementIndex(pDesc->readIndex, pDesc->wrappingValue);
+}
+
+PUBLIC void fifo_acknowledgeWrite(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_nmf_fifo_desc *pDesc = pArmFifo->fifoDesc;
+
+ pDesc->writeIndex = fifo_incrementIndex(pDesc->writeIndex, pDesc->wrappingValue);
+}
+
+PUBLIC void fifo_coms_acknowledgeWriteAndInterruptGeneration(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ t_nmf_fifo_desc *pDesc = pArmFifo->fifoDesc;
+
+ fifo_acknowledgeWrite(pArmFifo);
+ //Be sure before generate irq that fifo has been updated
+ OSAL_mb();
+ cm_SEM_GenerateIrq[pArmFifo->poperCoreId](pArmFifo->poperCoreId, pDesc->semId);
+ //cm_SEM_Take[pArmFifo->poperCoreId](pArmFifo->poperCoreId, pDesc->semId);
+ //cm_SEM_GiveWithInterruptGeneration[pArmFifo->poperCoreId](pArmFifo->poperCoreId, pDesc->semId);
+}
+
+PUBLIC t_cm_error fifo_params_setSharedField(t_nmf_fifo_arm_desc *pArmFifo, t_uint32 sharedFieldIndex, t_shared_field value)
+{
+ pArmFifo->extendedField[sharedFieldIndex] = value;
+
+ return CM_OK;
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/communication/inc/communication.h b/drivers/staging/nmf-cm/cm/engine/communication/inc/communication.h
new file mode 100644
index 00000000000..53ab87b7096
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/communication/inc/communication.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Components Management internal methods - Communication part.
+ *
+ */
+#ifndef __INC_NMF_COM
+#define __INC_NMF_COM
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/communication/fifo/inc/nmf_fifo_arm.h>
+#include <cm/engine/memory/inc/memory.h>
+
+#include <cm/engine/communication/inc/communication_type.h>
+
+extern t_dsp_memory_type_id comsLocation;
+extern t_dsp_memory_type_id paramsLocation;
+extern t_dsp_memory_type_id extendedFieldLocation;
+
+PUBLIC t_cm_error cm_COM_Init(t_nmf_coms_location comsLocation);
+PUBLIC t_cm_error cm_COM_AllocateMpc(t_nmf_core_id coreId);
+PUBLIC void cm_COM_InitMpc(t_nmf_core_id coreId);
+PUBLIC void cm_COM_FreeMpc(t_nmf_core_id coreId);
+
+PUBLIC t_cm_error cm_PushEventTrace(t_nmf_fifo_arm_desc*, t_event_params_handle h, t_uint32 methodIndex, t_uint32 isTrace);
+PUBLIC t_cm_error cm_PushEvent(t_nmf_fifo_arm_desc *pArmFifo, t_event_params_handle h, t_uint32 methodIndex);
+PUBLIC void cm_AcknowledgeEvent(t_nmf_fifo_arm_desc *pArmFifo);
+PUBLIC t_event_params_handle cm_AllocEvent(t_nmf_fifo_arm_desc *pArmFifo);
+
+/*!
+ * \internal
+ * \brief Definition of custom value for userTHIS parameter of PostDfc OSAL call
+ *
+ * This value is used as 1st parameter of a pPostDfc call to indicate that a given interrupt is linked to an internal Component Manager event
+ */
+#define NMF_INTERNAL_USERTHIS ((void*)MASK_ALL32)
+
+typedef void (*t_callback_method)(t_nmf_core_id coreId, t_event_params_handle pParam);
+
+#endif /* __INC_NMF_COM */
diff --git a/drivers/staging/nmf-cm/cm/engine/communication/inc/communication_type.h b/drivers/staging/nmf-cm/cm/engine/communication/inc/communication_type.h
new file mode 100644
index 00000000000..53a6ff39b07
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/communication/inc/communication_type.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Communication Component Manager API type.
+ */
+#ifndef COMMUNICATION_TYPE_H_
+#define COMMUNICATION_TYPE_H_
+
+#include <cm/inc/cm_type.h>
+
+
+/*!
+ * \brief Buffer type used for (un)marshalling parameters.
+ *
+ * This buffer type is used for (un)marshalling paramaters. It can either be a
+ * shared memory buffer (ESRAM or SDRAM) or a pure host software memory (stack).
+
+ * \ingroup CM_ENGINE_API
+ */
+typedef t_uint16 *t_event_params_handle;
+
+/*!
+ * \brief Component manager handle to Host -> MPC communication.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef t_uint32 t_cm_bf_host2mpc_handle;
+
+/*!
+ * \brief Component manager handle to MPC -> Host communication.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef t_uint32 t_cm_bf_mpc2host_handle;
+
+/*!
+ * \brief Component manager proxy handle to MPC -> Host skeleton context.
+ *
+ * \ingroup CM_ENGINE_API
+ */
+typedef t_uint32 t_nmf_mpc2host_handle;
+
+/*!
+ * @defgroup t_nmf_coms_location t_nmf_coms_location
+ * \brief Definition of the location of the internal CM communication objects
+ *
+ * @{
+ * \ingroup CM_ENGINE_API
+ */
+typedef t_uint8 t_nmf_coms_location; //!< Fake enumeration type
+#define COMS_IN_ESRAM ((t_nmf_coms_location)0) //!< All coms objects (coms and params fifos) will be in embedded RAM
+#define COMS_IN_SDRAM ((t_nmf_coms_location)1) //!< All coms objects (coms and params fifos) will be in external RAM
+/* @} */
+
+#endif /*COMMUNICATION_TYPE_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/communication/src/communication.c b/drivers/staging/nmf-cm/cm/engine/communication/src/communication.c
new file mode 100644
index 00000000000..ead1e090d7c
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/communication/src/communication.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#include <cm/inc/cm_type.h>
+#include "../inc/communication.h"
+#include <share/communication/inc/communication_fifo.h>
+#include <cm/engine/api/control/irq_engine.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/communication/fifo/inc/nmf_fifo_arm.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/memory/inc/migration.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/trace/inc/xtitrace.h>
+
+#include <cm/engine/component/inc/initializer.h>
+
+#define ARM_DSP_EVENT_FIFO_SIZE 128
+
+t_dsp_memory_type_id comsLocation;
+t_dsp_memory_type_id paramsLocation;
+t_dsp_memory_type_id extendedFieldLocation;
+
+#define __DEBUG
+
+#ifdef __DEBUG
+PRIVATE volatile t_uint32 armdspCounter = 0;
+PRIVATE volatile t_uint32 armdspIrqCounter = 0;
+PRIVATE volatile t_uint32 dsparmCounter = 0;
+PRIVATE volatile t_uint32 dsparmIrqCounter = 0;
+#endif /* __DEBUG */
+
+t_nmf_fifo_arm_desc* mpc2mpcComsFifoId[NB_CORE_IDS][NB_CORE_IDS];
+
+PRIVATE const t_callback_method internalHostJumptable[] = {
+ processAsyncAcknowledge,
+ processAsyncAcknowledge,
+ processAsyncAcknowledge,
+ processSyncAcknowledge,
+ processAsyncAcknowledge,
+ processAsyncAcknowledge,
+ processAsyncAcknowledge,
+ processSyncAcknowledge,
+ processAsyncAcknowledge,
+ processSyncAcknowledge,
+ processSyncAcknowledge, // Start sync
+ processSyncAcknowledge // Stop sync
+};
+
+PUBLIC t_cm_error cm_COM_Init(t_nmf_coms_location _comsLocation)
+{
+ t_nmf_core_id coreId, localCoreId;
+
+ /*
+ * Configure the default location of coms and params fifo (configuration by user) */
+ switch(_comsLocation)
+ {
+ case COMS_IN_SDRAM:
+ comsLocation = SDRAM_EXT16;
+ paramsLocation = SDRAM_EXT16;
+ extendedFieldLocation = SDRAM_EXT24;
+ break;
+ case COMS_IN_ESRAM:
+ comsLocation = ESRAM_EXT16;
+ paramsLocation = ESRAM_EXT16;
+ extendedFieldLocation = ESRAM_EXT24;
+ break;
+ default: CM_ASSERT(0);
+ }
+
+ for (coreId = ARM_CORE_ID; coreId < NB_CORE_IDS; coreId++)
+ {
+ for (localCoreId = ARM_CORE_ID; localCoreId < NB_CORE_IDS; localCoreId++)
+ {
+ mpc2mpcComsFifoId[coreId][localCoreId] = NULL;
+ }
+ }
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_COM_AllocateMpc(t_nmf_core_id coreId)
+{
+ t_nmf_core_id localCoreId;
+
+ /*
+ * Allocation of the coms fifo with neighbor MPCs
+ * if they are already initialized (known through initializedCoresMask)
+ */
+ for (localCoreId = ARM_CORE_ID; localCoreId < NB_CORE_IDS; localCoreId++)
+ {
+ if (localCoreId == coreId) continue; /* no coms fifo with itself ;) */
+ if(cm_DSP_GetState(localCoreId)->state != MPC_STATE_BOOTED) continue;
+
+ /*
+ * coms fifo from other initialized MPCs to the given one
+ */
+ if (mpc2mpcComsFifoId[coreId][localCoreId] != NULL) continue; /* coms fifo already allocated */
+
+ mpc2mpcComsFifoId[coreId][localCoreId] = fifo_alloc(
+ coreId, localCoreId,
+ EVENT_ELEM_SIZE_IN_BYTE/2, ARM_DSP_EVENT_FIFO_SIZE,
+ 0, comsLocation, extendedFieldLocation, cm_DSP_GetState(coreId)->domainEE
+ );
+ if (mpc2mpcComsFifoId[coreId][localCoreId] == NULL)
+ goto oom;
+
+ /*
+ * coms fifo from the given MPC to the other initialized ones
+ */
+ if (mpc2mpcComsFifoId[localCoreId][coreId] != NULL) continue; /* coms fifo already allocated */
+
+ mpc2mpcComsFifoId[localCoreId][coreId] = fifo_alloc(
+ localCoreId, coreId,
+ EVENT_ELEM_SIZE_IN_BYTE/2, ARM_DSP_EVENT_FIFO_SIZE,
+ 0, comsLocation, extendedFieldLocation, cm_DSP_GetState(coreId)->domainEE
+ );
+ if (mpc2mpcComsFifoId[localCoreId][coreId] == NULL)
+ goto oom;
+ }
+
+ return CM_OK;
+oom:
+ cm_COM_FreeMpc(coreId);
+ ERROR("CM_NO_MORE_MEMORY: fifo_alloc() failed in cm_COM_AllocateMpc()\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+}
+
+PUBLIC void cm_COM_InitMpc(t_nmf_core_id coreId)
+{
+ // Here we assume that attribute are in XRAM, thus we don't need memory type
+ t_uint32* toNeighborsComsFifoIdSharedVar[NB_CORE_IDS];
+ t_uint32* fromNeighborsComsFifoIdSharedVar[NB_CORE_IDS];
+
+ t_nmf_core_id localCoreId;
+
+ /*
+ * Initialization of the core identifier of a given Executive Engine
+ * Used into communication scheme so the init is done here, will be moved MAY BE into EE loading module!!!
+ */
+ cm_writeAttribute(cm_EEM_getExecutiveEngine(coreId)->instance, "semaphores/myCoreId", coreId);
+
+ /*
+ * Initialization of the coms fifo with the Host for the given coreId
+ */
+ for (localCoreId = FIRST_MPC_ID/* NOT ARM*/; localCoreId <= LAST_CORE_ID; localCoreId++)
+ {
+ // Note: This loop will also include coreId in order to fill
+ if(cm_DSP_GetState(localCoreId)->state != MPC_STATE_BOOTED) continue;/* no coms fifo initialisation with not booted MPC */
+
+ toNeighborsComsFifoIdSharedVar[localCoreId] = (t_uint32*)cm_getAttributeHostAddr(cm_EEM_getExecutiveEngine(localCoreId)->instance, "comms/toNeighborsComsFifoId");
+
+ fromNeighborsComsFifoIdSharedVar[localCoreId] = (t_uint32*)cm_getAttributeHostAddr(cm_EEM_getExecutiveEngine(localCoreId)->instance, "comms/fromNeighborsComsFifoId");
+ }
+
+ toNeighborsComsFifoIdSharedVar[coreId][ARM_CORE_ID] = mpc2mpcComsFifoId[coreId][ARM_CORE_ID]->dspAdress;
+ fromNeighborsComsFifoIdSharedVar[coreId][ARM_CORE_ID] = mpc2mpcComsFifoId[ARM_CORE_ID][coreId]->dspAdress;
+
+ for (localCoreId = FIRST_MPC_ID/* NOT ARM*/; localCoreId <= LAST_CORE_ID; localCoreId++)
+ {
+ if (localCoreId == coreId) continue; /* no coms fifo with itself ;) */
+ if(cm_DSP_GetState(localCoreId)->state != MPC_STATE_BOOTED) continue;/* no coms fifo initialisation with not booted MPC */
+
+ toNeighborsComsFifoIdSharedVar[coreId][localCoreId] = mpc2mpcComsFifoId[coreId][localCoreId]->dspAdress;
+ fromNeighborsComsFifoIdSharedVar[localCoreId][coreId] = mpc2mpcComsFifoId[coreId][localCoreId]->dspAdress;
+
+ fromNeighborsComsFifoIdSharedVar[coreId][localCoreId] = mpc2mpcComsFifoId[localCoreId][coreId]->dspAdress;
+ toNeighborsComsFifoIdSharedVar[localCoreId][coreId] = mpc2mpcComsFifoId[localCoreId][coreId]->dspAdress;
+ }
+}
+
+PUBLIC void cm_COM_FreeMpc(t_nmf_core_id coreId)
+{
+ t_nmf_core_id localCoreId;
+
+ for (localCoreId = ARM_CORE_ID; localCoreId < NB_CORE_IDS; localCoreId++)
+ {
+ /*
+ * Free coms fifo from other initialized MPCs to the given one
+ */
+ if ( mpc2mpcComsFifoId[coreId][localCoreId] != NULL)
+ {
+ fifo_free(mpc2mpcComsFifoId[coreId][localCoreId]);
+ mpc2mpcComsFifoId[coreId][localCoreId] = NULL;
+ }
+
+ /*
+ * Free coms fifo from the given MPC to the other initialized ones
+ */
+ if ( mpc2mpcComsFifoId[localCoreId][coreId] != NULL)
+ {
+ fifo_free(mpc2mpcComsFifoId[localCoreId][coreId]);
+ mpc2mpcComsFifoId[localCoreId][coreId] = NULL;
+ }
+ }
+}
+
+PUBLIC t_event_params_handle cm_AllocEvent(t_nmf_fifo_arm_desc *pArmFifo)
+
+{
+ t_uint32 retValue;
+
+ //migration impacts the ARM-side address of the fifoDesc,
+ //thus translate the fifo desc adress systematically.
+ pArmFifo->fifoDesc = (t_nmf_fifo_desc*)cm_migration_translate(pArmFifo->dspAddressInfo.segmentType, (t_shared_addr)pArmFifo->fifoDescShadow);
+
+ retValue = fifo_getAndAckNextElemToWritePointer(pArmFifo);
+
+ return (t_event_params_handle)retValue;
+}
+
+PUBLIC void cm_AcknowledgeEvent(t_nmf_fifo_arm_desc *pArmFifo)
+{
+ fifo_acknowledgeRead(pArmFifo);
+}
+
+PUBLIC t_cm_error cm_PushEventTrace(t_nmf_fifo_arm_desc *pArmFifo, t_event_params_handle h, t_uint32 methodIndex, t_uint32 isTrace)
+{
+ t_uint32 retValue;
+
+ retValue = fifo_getNextElemToWritePointer(mpc2mpcComsFifoId[ARM_CORE_ID][pArmFifo->poperCoreId]);
+
+ if(retValue != 0x0) {
+ t_shared_field *pEvent = (t_shared_field *)retValue;
+
+#ifdef __DEBUG
+ armdspCounter++;
+#endif /* __DEBUG */
+
+ pEvent[EVENT_ELEM_METHOD_IDX] = (t_shared_addr)methodIndex;
+ pEvent[EVENT_ELEM_PARAM_IDX] = pArmFifo->dspAdress + (((t_cm_logical_address)h - (t_cm_logical_address)pArmFifo->fifoDesc) >> 1); //note byte to half-word conversion
+ pEvent[EVENT_ELEM_EXTFIELD_IDX] = pArmFifo->fifoDesc->extendedField;
+
+ if (isTrace)
+ {
+ cm_TRC_traceCommunication(
+ TRACE_COMMUNICATION_COMMAND_SEND,
+ ARM_CORE_ID,
+ pArmFifo->poperCoreId);
+ }
+ fifo_coms_acknowledgeWriteAndInterruptGeneration(mpc2mpcComsFifoId[ARM_CORE_ID][pArmFifo->poperCoreId]);
+
+ return CM_OK;
+ }
+
+ ERROR("CM_MPC_NOT_RESPONDING: FIFO COM full '%s'\n", 0, 0, 0, 0, 0, 0);
+ return CM_MPC_NOT_RESPONDING;
+}
+
+PUBLIC t_cm_error cm_PushEvent(t_nmf_fifo_arm_desc *pArmFifo, t_event_params_handle h, t_uint32 methodIndex)
+{
+ return cm_PushEventTrace(pArmFifo,h,methodIndex,1);
+}
+
+static void cmProcessMPCFifo(t_nmf_core_id coreId)
+{
+ t_shared_field *pEvent;
+
+ while((pEvent = (t_shared_field *)fifo_getNextElemToReadPointer(mpc2mpcComsFifoId[coreId][ARM_CORE_ID])) != NULL)
+ {
+ t_event_params_handle pParamsAddr;
+ t_shared_field *pParamsFifoESFDesc;
+
+ pParamsAddr = (t_event_params_handle)cm_DSP_ConvertDspAddressToHostLogicalAddress(
+ coreId,
+ pEvent[EVENT_ELEM_PARAM_IDX]);
+ pParamsFifoESFDesc = (t_shared_field *)pEvent[EVENT_ELEM_EXTFIELD_IDX];
+#ifdef __DEBUG
+ dsparmCounter++;
+#endif /* __DEBUG */
+
+ if(pParamsFifoESFDesc[EXTENDED_FIELD_BCTHIS_OR_TOP] == (t_shared_field)NMF_INTERNAL_USERTHIS)
+ {
+ internalHostJumptable[pEvent[EVENT_ELEM_METHOD_IDX]](coreId, pParamsAddr);
+ }
+ else
+ {
+ cm_TRC_traceCommunication(
+ TRACE_COMMUNICATION_COMMAND_RECEIVE,
+ ARM_CORE_ID,
+ coreId);
+
+ OSAL_PostDfc(
+ pParamsFifoESFDesc[EXTENDED_FIELD_BCTHIS_OR_TOP],
+ pEvent[EVENT_ELEM_METHOD_IDX],
+ pParamsAddr,
+ pParamsFifoESFDesc[EXTENDED_FIELD_BCDESC]);
+ }
+
+ // [Pwr] mpc2hostComsFifoId value is checked to support the case where
+ // CM_PostCleanUpAndFlush method is called under interrupt context
+ // -> mpc2hostComsFifoId can be released.
+ if (mpc2mpcComsFifoId[coreId][ARM_CORE_ID] != NULL)
+ fifo_acknowledgeRead(mpc2mpcComsFifoId[coreId][ARM_CORE_ID]);
+ else
+ break;
+ }
+}
+
+PUBLIC EXPORT_SHARED void CM_ProcessMpcEvent(t_nmf_core_id coreId)
+{
+#ifdef __DEBUG
+ dsparmIrqCounter++;
+#endif /* __DEBUG */
+
+ if (coreId != ARM_CORE_ID)
+ {
+ /* Acknowledge DSP communication interrupt */
+ cm_DSP_AcknowledgeDspIrq(coreId, DSP2ARM_IRQ_0);
+
+ cmProcessMPCFifo(coreId);
+ }
+ else
+ {
+ while((coreId = cm_HSEM_GetCoreIdFromIrqSrc()) <= LAST_MPC_ID)
+ cmProcessMPCFifo(coreId);
+ }
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/bind.h b/drivers/staging/nmf-cm/cm/engine/component/inc/bind.h
new file mode 100644
index 00000000000..325703e3367
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/bind.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ * \brief Binding Factories internal API.
+ *
+ * \defgroup BF_COMMON Binding factories: Common API
+ * \defgroup BF_PRIMITIVE Binding Factories: Primitive API
+ * \defgroup BF_TRACE Binding Factories: Trace API
+ * \defgroup BF_ASYNCHRONOUS Binding Factories: Asynchronous API
+ * \defgroup BF_DISTRIBUTED Binding Factories: Distributed API
+ */
+#ifndef __INC_CM_BIND_H
+#define __INC_CM_BIND_H
+
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/communication/inc/communication.h>
+#include <cm/engine/utils/inc/table.h>
+
+/**
+ * \internal
+ * \ingroup BF_COMMON
+ *
+ * \brief Identification number of prefedined Binding Factories
+ */
+typedef enum {
+ BF_SYNCHRONOUS, //!< Intra-DSP Synchronous Binding Factory Identifier
+ BF_TRACE, //!< Intra-DSP trace synchronous Binding Factory Identifier
+ BF_ASYNCHRONOUS, //!< Intra-DSP Asynchronous Binding Factory Identifier
+ BF_DSP2HOST, //!< DSP to Host Binding Factory Identifier
+ BF_HOST2DSP, //!< Host to DSP Binding Factory Identifier
+ BF_DSP2DSP, //!< DSP to DSP Binding Factory Identifier
+} t_bf_info_ID;
+
+/*!
+ * \internal
+ * \brief Description of a provided interface
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct _t_interface_reference {
+ const t_component_instance *instance; //!< Component instance that provide this interface
+ t_uint8 provideIndex; //!< Index of the interface in the provide array
+ t_uint8 collectionIndex;//!< Index in the collection if provided interface is a collection
+ t_bf_info_ID bfInfoID; //!< Identification of BF used for creating binding
+ void* bfInfo; //!< Storage of the binding factory info
+} t_interface_reference;
+
+/**
+ * \internal
+ * \ingroup BF_COMMON
+ *
+ * Make some basic sanity check for a client:
+ * - component stopped
+ * - Interface really required
+ *
+ * \param[in] client The client component instance handle.
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[out] requiredItf return the required interface (avoid user searching)
+ */
+t_cm_error cm_checkValidClient(
+ const t_component_instance* client,
+ const char* requiredItfClientName,
+ t_interface_require_description *itfRequire,
+ t_bool *bindable);
+/**
+ * \internal
+ * \ingroup BF_COMMON
+ *
+ * Make some basic sanity check for a server:
+ * - Interface really provided
+ *
+ * \param[in] server The server component instance handle.
+ * \param[in] providedItfServerName The server provided interface name
+ * \param[out] itf return the provided interface (avoid user searching)
+ */
+t_cm_error cm_checkValidServer(
+ const t_component_instance* server,
+ const char* providedItfServerName,
+ t_interface_provide_description *itfProvide);
+
+/**
+ * \internal
+ * \ingroup BF_COMMON
+ *
+ * Make some basic sanity check for a binding:
+ * - Sanity check for a server
+ * - Sanity check for a client (and potentially wait initialisation)
+ * - Provided and required interface matches
+ *
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[in] server The server component instance handle
+ * \param[in] providedItfServerName The server provided interface name
+ * \param[out] requiredItf return the required interface (avoid user searching)
+ * \param[out] itf return the provided interface (avoid user searching)
+ */
+t_cm_error cm_checkValidBinding(
+ const t_component_instance* client,
+ const char* requiredItfClientName,
+ const t_component_instance* server,
+ const char* requiredItfServerName,
+ t_interface_require_description *itfRequire,
+ t_interface_provide_description *itfProvide,
+ t_bool *bindable);
+
+/**
+ * \internal
+ * \ingroup BF_COMMON
+ *
+ * Make some basic sanity check for each unbinding:
+ * - Interface really required
+ * - Component stopped
+ *
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[out] itfRequire return the previously binded required interface (avoid user searching)
+ * \param[out] itfProvide return the previously binded provided interface (avoid user searching)
+ * \param[out] bfInfoID return the binding factory identifiant which done the previously bind
+ * \param[out] bfInfo return the binding factory information which done the previously bind
+ */
+t_cm_error cm_checkValidUnbinding(
+ const t_component_instance* client,
+ const char* requiredItfClientName,
+ t_interface_require_description *itfRequire,
+ t_interface_provide_description *itfProvide);
+
+/**
+ * \internal
+ * \ingroup BF_PRIMITIVE
+ *
+ * Create a primitive binding between a client to a server interface.
+ *
+ * \param[in] itfRequire The client required interface description
+ * \param[in] itfProvide The server provided interface description
+ */
+t_cm_error cm_bindInterface(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide);
+
+/**
+ * \internal
+ * \ingroup BF_PRIMITIVE
+ *
+ * Unbind a previously binded client.
+ *
+ * \param[in] itfRequire The client required interafce description
+ */
+void cm_unbindInterface(
+ const t_interface_require_description *itfRequire);
+
+/**
+ * \internal
+ * \ingroup BF_PRIMITIVE
+ *
+ * Get a server interface previouly binded to a client
+ *
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[out] itf The server interface
+ */
+t_cm_error cm_lookupInterface(
+ const t_interface_require_description *itfRequire,
+ t_interface_provide_description *itfProvide);
+
+/**
+ * \internal
+ * \ingroup BF_PRIMITIVE
+ *
+ * Create a void binding.
+ *
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ */
+t_cm_error cm_bindInterfaceToVoid(
+ const t_interface_require_description *itfRequire);
+
+/**
+ * \internal
+ * \ingroup BF_TRACE
+ *
+ * Trace synchronous binding factory Information
+ */
+typedef struct {
+ t_component_instance *traceInstance; //!< Trace binding component instance
+} t_trace_bf_info;
+
+/**
+ * \internal
+ * \ingroup BF_TRACE
+ *
+ * Create a traced binding between a client to a server interface.
+ *
+ * \param[in] itfRequire The client required interface description
+ * \param[in] itfProvide The server provided interface description
+ */
+t_cm_error cm_bindInterfaceTrace(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide,
+ t_elfdescription *elfhandleTrace);
+
+/**
+ * \internal
+ * \ingroup BF_TRACE
+ *
+ * Unbind a previously binded client.
+ *
+ * \param[in] itfRequire The client required interafce description
+ */
+void cm_unbindInterfaceTrace(
+ const t_interface_require_description *itfRequire,
+ t_trace_bf_info *bfInfo);
+
+
+/**
+ * \internal
+ * \ingroup BF_ASYNCHRONOUS
+ *
+ * Asynchronous binding factory Information
+ */
+typedef struct {
+ t_component_instance *eventInstance; //!< Event binding component instance
+ t_memory_handle dspfifoHandle; //!< Memory handle of allocated event fifo (pass to the event binding component)
+} t_async_bf_info;
+
+/**
+ * \internal
+ * \ingroup BF_ASYNCHRONOUS
+ *
+ * Create a asynchronous binding between a client to a server interface.
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[in] itf The server interface
+ * \param[in] fifosize Number of waited event in the fifo
+ */
+t_cm_error cm_bindInterfaceAsynchronous(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType,
+ t_elfdescription *elfhandleEvent);
+/**
+ * \internal
+ * \ingroup BF_ASYNCHRONOUS
+ *
+ * Destroy a asynchronous binding between a client to a server interface.
+ * \param[in] itfRequire the required interface
+ */
+void cm_unbindInterfaceAsynchronous(
+ const t_interface_require_description *itfRequire,
+ t_async_bf_info *bfInfo);
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Stub information in distributed binding factory (client side)
+ */
+typedef struct {
+ t_component_instance *stubInstance; //!< Stub
+} t_dspstub_bf_info;
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Skeleton information in distributed binding factory (server side)
+ */
+typedef struct {
+ t_component_instance *skelInstance; //!< Skeleton binding component instance
+ t_memory_handle dspfifoHandle; //!< Memory handle of allocated event fifo (pass to the event binding component)
+} t_dspskel_bf_info;
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Host to DSP distributed binding factory Information
+ */
+typedef struct {
+ t_dspskel_bf_info dspskeleton; //!< Information about the DSP skeleton (server side)
+ t_nmf_fifo_arm_desc* fifo; //!< Handle of the fifo params
+ t_nmf_client_id clientId; //!< Client ID of the host client
+} t_host2mpc_bf_info;
+
+/*
+ * Table of instantiated of host2mpc bindings
+ */
+extern t_nmf_table Host2MpcBindingTable; /**< list (table) of host2mpc bindings */
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Create a Host to DSP distributed binding between a host client interface to a server interface.
+ * (Not manage in the same way as distributed binding since the Host programming model is not component aware).
+ * \param[in] itfServer The server interface
+ * \param[in] fifosize Number of waited event in the fifo
+ * \param[in] dspEventMemType The type of memory to use
+ * \param[in] bfInfo info structure
+ */
+t_cm_error cm_bindComponentFromCMCore(
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType,
+ t_elfdescription *elfhandleSkeleton,
+ t_host2mpc_bf_info **bfInfo);
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Destroy a Host to DSP distributed binding between a host client interface to a server interface.
+ * \param[in] bfInfo The Host to DSP distributed binding factory information
+ */
+void cm_unbindComponentFromCMCore(
+ t_host2mpc_bf_info *bfInfo);
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * DSP to Host distributed binding factory Information
+ */
+typedef struct {
+ t_dspstub_bf_info dspstub; //!< Information about the DSP stub (client side)
+ t_nmf_fifo_arm_desc* fifo; //!< Handle of the fifo params
+ t_uint32 context;
+} t_mpc2host_bf_info;
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Create a DSP to Host distributed binding between a client interface to a host server interface.
+ * (Not manage in the same way as distributed binding since the Host programming model is not component aware).
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[in] itfref The host server interface to be called
+ * \param[in] fifosize Number of waited event in the fifo
+ */
+t_cm_error cm_bindComponentToCMCore(
+ const t_interface_require_description *itfRequire,
+ t_uint32 fifosize,
+ t_uint32 context,
+ t_elfdescription *elfhandleStub,
+ t_mpc2host_bf_info ** bfInfo);
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Destroy a DSP to Host distributed binding between a client interface to a server interface.
+ * \param[in] itfRequire The required interface
+ * \param[out] upLayerThis The 'THIS' context of upper layer
+ */
+void cm_unbindComponentToCMCore(
+ const t_interface_require_description *itfRequire,
+ t_mpc2host_bf_info *bfInfo);
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Asynchronous distributed binding factory Information
+ */
+typedef struct {
+ t_nmf_fifo_arm_desc* fifo; //!< Handle of the fifo params
+ t_dspstub_bf_info dspstub; //!< Information about the DSP stub (client side)
+ t_dspskel_bf_info dspskeleton; //!< Information about the DSP skeleton (server side)
+} t_mpc2mpc_bf_info;
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Create a asynchronous distributed binding between a client interface to a server interface.
+ * \param[in] client The client component instance handle
+ * \param[in] requiredItfClientName The client required interface name
+ * \param[in] itf The server interface
+ * \param[in] fifosize Number of waited event in the fifo
+ */
+t_cm_error cm_bindInterfaceDistributed(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType,
+ t_elfdescription *elfhandleSkeleton,
+ t_elfdescription *elfhandleStub);
+
+/**
+ * \internal
+ * \ingroup BF_DISTRIBUTED
+ *
+ * Destroy a asynchronous distributed binding between a client interface to a server interface.
+ * \param[in] itfRequire The required interface
+ */
+void cm_unbindInterfaceDistributed(
+ const t_interface_require_description *itfRequire,
+ t_mpc2mpc_bf_info *bfInfo);
+
+/**
+ * \internal
+ *
+ * Bind a static interrupt to server provide interface name.
+ * \param[in] coreId The core to which component is loaded
+ * \param[in] interruptLine Interrupt line number to use
+ * \param[in] server Server instance that provide interrupt service
+ * \param[in] providedItfServerName Interface name hat provide interrupt service
+ */
+t_cm_error cm_bindInterfaceStaticInterrupt(
+ const t_nmf_core_id coreId,
+ const int interruptLine,
+ const t_component_instance *server,
+ const char* providedItfServerName);
+
+/**
+ * \internal
+ *
+ * Unbind a static interrupt.
+ * \param[in] coreId The core to which component is loaded
+ * \param[in] interruptLine Interrupt line number to use
+ */
+t_cm_error cm_unbindInterfaceStaticInterrupt(
+ const t_nmf_core_id coreId,
+ const int interruptLine);
+
+void cm_destroyRequireInterface(t_component_instance* component, t_nmf_client_id clientId);
+void cm_registerSingletonBinding(
+ t_component_instance* component,
+ t_interface_require_description* itfRequire,
+ t_interface_provide_description* itfProvide,
+ t_nmf_client_id clientId);
+t_bool cm_unregisterSingletonBinding(
+ t_component_instance* component,
+ t_interface_require_description* itfRequire,
+ t_interface_provide_description* itfProvide,
+ t_nmf_client_id clientId);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/component_type.h b/drivers/staging/nmf-cm/cm/engine/component/inc/component_type.h
new file mode 100644
index 00000000000..2e769ec8b22
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/component_type.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Components Component Manager API type.
+ *
+ * \defgroup COMPONENT CM Components API
+ * \ingroup CM_USER_API
+ */
+
+#ifndef COMPONENT_TYPE_H_
+#define COMPONENT_TYPE_H_
+
+#include <cm/inc/cm_type.h>
+#include <nmf/inc/component_type.h>
+
+/*!
+ * @defgroup t_nmf_ee_priority t_nmf_ee_priority
+ * \brief Identification of the execution engine priority and sub priority.
+ * @{
+ * \ingroup COMPONENT
+ */
+typedef t_uint32 t_nmf_ee_priority; //!< Fake enumeration type
+
+#define NMF_SCHED_BACKGROUND ((t_nmf_ee_priority)0) //!< Background priority
+#define NMF_SCHED_NORMAL ((t_nmf_ee_priority)1) //!< Normal priority
+#define NMF_SCHED_URGENT ((t_nmf_ee_priority)2) //!< Urgent priority
+/* @} */
+
+
+/*!
+ * \brief Identification of host component returned during introspection
+ *
+ * \ingroup COMPONENT_INTROSPECTION
+ */
+#define NMF_HOST_COMPONENT ((t_cm_instance_handle)0xFFFFFFFF)
+
+/*!
+ * \brief Identification of void component returned during introspection
+ *
+ * \ingroup COMPONENT_INTROSPECTION
+ */
+#define NMF_VOID_COMPONENT ((t_cm_instance_handle)0xFFFFFFFE)
+
+
+/*!
+ * @defgroup t_nmf_ee_priority t_nmf_ee_priority
+ * \brief Identification of the execution engine priority and sub priority.
+ * @{
+ * \ingroup COMPONENT
+ */
+typedef t_uint8 t_cm_require_state; //!< Fake enumeration type
+
+#define CM_REQUIRE_STATIC ((t_cm_require_state)0) //!< Required interface is static
+#define CM_REQUIRE_OPTIONAL ((t_cm_require_state)1) //!< Required interface is optional
+#define CM_REQUIRE_COLLECTION ((t_cm_require_state)2) //!< Required interface is a collection
+
+/* @} */
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/description.h b/drivers/staging/nmf-cm/cm/engine/component/inc/description.h
new file mode 100644
index 00000000000..882dc1ea873
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/description.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#ifndef __INC_CM_COMPONENT_DESCRIPTION_H
+#define __INC_CM_COMPONENT_DESCRIPTION_H
+
+#include <cm/engine/elf/inc/memory.h>
+#include <cm/engine/utils/inc/string.h>
+
+#include <inc/nmf-limits.h>
+
+/*!
+ * \internal
+ * \brief Description of an interface
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct _t_interface_description {
+ t_dup_char type; //!< Type of the interface
+ t_uint16 referenceCounter; //!< Number of template referencing the interface
+ t_uint8 methodNumber; //!< Number of method in the interfaces
+ struct _t_interface_description* next;
+ t_dup_char methodNames[1]; //!< Array of method names
+} t_interface_description;
+
+/*!
+ * \internal
+ * \brief Description of a variable memory on a collection index
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_uint32 offset; //!< Offset in the memory
+ const t_elfmemory *memory; //!< Memory
+} t_memory_reference;
+
+/*!
+ * \internal
+ * \brief Description of a required interface on a collection index
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_uint32 numberOfClient; //!< Number of interface descriptor really connected to this interface
+ t_memory_reference *memories; /*!< Memory where each interface reference descriptor resides
+ \note memories[numberOfClient] */
+} t_interface_require_index;
+
+/*!
+ * \internal
+ * \brief Description of a required interface
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_dup_char name; //!< Name of the interface
+ t_interface_description *interface; //!< Description of the interface
+ t_uint8 requireTypes; //!< Mask of t_elf_interface_require_type
+ t_uint8 collectionSize; //!< Size of the collection (1 if not a collection)
+ t_interface_require_index *indexes; /*!< Require information for each collection index
+ \note indexes[collectionSize] */
+} t_interface_require;
+
+/*!
+ * \internal
+ * \brief Description of a provided interface method on a collection index
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_memory_reference memory; //!< Memory of the method
+} t_interface_provide_index;
+
+/*!
+ * \internal
+ * \brief Description of a provided interface
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_dup_char name; //!< Name of the interface
+ t_interface_description *interface; //!< Description of the interface
+ t_uint8 provideTypes; //!< Mask of t_elf_interface_provide_type
+ t_uint8 interruptLine; //!< Interrupt line if interrupt (0 if not)
+ t_uint8 collectionSize; //!< Size of the collection (1 if not a collection)
+ t_interface_provide_index **indexes; //!< Provide information for each collection index
+} t_interface_provide;
+
+/*!
+ * \internal
+ * \brief Description of a attribute
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_dup_char name; //!< Name of the attribute
+ t_memory_reference memory; //!< Memory where the attribute reside
+} t_attribute;
+
+/*!
+ * \internal
+ * \brief Description of a property
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_dup_char name; //!< Name of this attribute
+ t_dup_char value; //!< String of the value
+} t_property;
+
+
+
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/dspevent.h b/drivers/staging/nmf-cm/cm/engine/component/inc/dspevent.h
new file mode 100644
index 00000000000..bb47363c0ae
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/dspevent.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_DSP_EVENT
+#define __INC_DSP_EVENT
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/memory/inc/memory.h>
+
+/* value should be size of t_remote_event in mmdsp word */
+#define DSP_REMOTE_EVENT_SIZE_IN_DSPWORD 5
+
+t_cm_error dspevent_createDspEventFifo(
+ const t_component_instance *pComp,
+ const char* nameOfTOP,
+ t_uint32 fifoNbElem,
+ t_uint32 fifoElemSizeInWord,
+ t_dsp_memory_type_id dspEventMemType,
+ t_memory_handle *pHandle);
+void dspevent_destroyDspEventFifo(t_memory_handle handle);
+
+#endif /* __INC_DSP_EVENT */
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/initializer.h b/drivers/staging/nmf-cm/cm/engine/component/inc/initializer.h
new file mode 100644
index 00000000000..5ac9ec453b7
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/initializer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_NMF_INITIALIZER
+#define __INC_NMF_INITIALIZER
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/component/inc/instance.h>
+#include <share/communication/inc/initializer.h>
+
+PUBLIC t_cm_error cm_COMP_INIT_Init(t_nmf_core_id coreId);
+PUBLIC t_cm_error cm_COMP_CallService(int serviceIndex, t_component_instance *pComp, t_uint32 methodAddress);
+PUBLIC void cm_COMP_Flush(t_nmf_core_id coreId);
+PUBLIC void cm_COMP_INIT_Close(t_nmf_core_id coreId);
+PUBLIC t_cm_error cm_COMP_UpdateStack(t_nmf_core_id coreId, t_uint32 stackSize);
+PUBLIC t_cm_error cm_COMP_ULPForceWakeup(t_nmf_core_id coreId);
+PUBLIC t_cm_error cm_COMP_ULPAllowSleep(t_nmf_core_id coreId);
+PUBLIC t_cm_error cm_COMP_InstructionCacheLock(t_nmf_core_id coreId, t_uint32 mmdspAddr, t_uint32 mmdspSize);
+PUBLIC t_cm_error cm_COMP_InstructionCacheUnlock(t_nmf_core_id coreId, t_uint32 mmdspAddr, t_uint32 mmdspSize);
+
+
+PUBLIC void processAsyncAcknowledge(t_nmf_core_id coreId, t_event_params_handle pParam);
+PUBLIC void processSyncAcknowledge(t_nmf_core_id coreId, t_event_params_handle pParam);
+
+#endif /* __INC_NMF_INITIALIZER */
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/instance.h b/drivers/staging/nmf-cm/cm/engine/component/inc/instance.h
new file mode 100644
index 00000000000..0a7d80e2e02
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/instance.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Components Management internal methods - Instance API.
+ *
+ */
+#ifndef __INC_CM_INSTANCE_H
+#define __INC_CM_INSTANCE_H
+
+#include <cm/engine/component/inc/template.h>
+#include <cm/engine/repository_mgt/inc/repository_mgt.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/utils/inc/table.h>
+#include <cm/engine/utils/inc/string.h>
+
+/*----------------------------------------------------------------------------
+ * Component Instance API.
+ *----------------------------------------------------------------------------*/
+struct _t_interface_reference;
+
+/*!
+ * \internal
+ * \brief Component life cycle state
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef enum {
+ STATE_NONE,
+ STATE_STOPPED,
+ STATE_RUNNABLE,
+ // STATE_DESTROYED identified when component remove from component list
+} t_component_state;
+
+struct t_client_of_singleton
+{
+ struct t_client_of_singleton *next;
+ t_nmf_client_id clientId;
+ t_uint16 numberOfInstance;
+ t_uint16 numberOfStart;
+ t_uint16 numberOfBind;
+};
+
+/*!
+ * \internal
+ * \brief Description of a component instance
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct t_component_instance {
+ t_dup_char pathname; //!< Path Name of this component in the components architecture
+
+ t_component_state state; //!< Component state
+ t_nmf_ee_priority priority; //!< Executive engine component priority
+ t_component_template *Template; //!< Component template
+
+ t_uint32 thisAddress; //!< Cached value of cm_DSP_GetDspAddress(component->memories[data], &thisAddress);
+
+ t_memory_handle memories[NUMBER_OF_MMDSP_MEMORY]; //!<Reference in different memory where datas are (YES, we fix implementation to MMDSP)
+
+ struct _t_interface_reference **interfaceReferences; /*!< Interface references
+ (Share same index as template->u.p.requires)
+ type == targets[interface_index][collection_index] */
+
+ t_uint16 providedItfUsedCount; //!< Use count to reference the number of components binded to this once, ie count the number of provided interfaces in use
+ t_cm_instance_handle instance; //!< index of this component within the ComponentTable
+ t_cm_domain_id domainId; //!< Domain where the component has been installed
+
+ struct t_client_of_singleton *clientOfSingleton; //!< Client of singleton list
+ t_memory_handle loadMapHandle; // handle of allocated memory for the loadMap structure and name;
+ void *dbgCooky; //!< pointer to OS internal data
+} t_component_instance;
+
+t_component_template* cm_lookupTemplate(t_nmf_core_id dspId, t_dup_char str);
+
+/*!
+ * \internal
+ * \brief Load a component template.
+ *
+ * ...
+ *
+ * \param[in] templateName name of the template to load
+ * \param[in] coreId DSP where template must be loaded
+ * \praem[in] pRepComponent Pointer to the component entry stored in the Component Cache Repository
+ * \param[in, out] template reference to put the loaded template (null if first instance)
+ *
+ * \exception CM_COMPONENT_NOT_FOUND
+ * \exception CM_NO_MORE_MEMORY
+ *
+ * \return exception number.
+ *
+ * \warning For Component manager use only.
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_loadComponent(
+ t_dup_char templateName,
+ t_cm_domain_id domainId,
+ t_elfdescription* elfhandle,
+ t_component_template **reftemplate);
+
+/*!
+ * \internal
+ * \brief Unload a component template.
+ *
+ * ...
+ *
+ * \param[in] template template to be unloaded
+ * \praem[in] Private memories that has been created from component binary file
+ *
+ * \return exception number.
+ *
+ * \warning For Component manager use only.
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_unloadComponent(
+ t_component_template *reftemplate);
+
+/*!
+ * \internal
+ * \brief Instantiate a component.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_instantiateComponent(const char* templateName,
+ t_cm_domain_id domainId,
+ t_nmf_ee_priority priority,
+ const char* pathName,
+ t_elfdescription *elfhandle,
+ t_component_instance** refcomponent);
+
+struct t_client_of_singleton* cm_getClientOfSingleton(t_component_instance* component, t_bool createdIfNotExist, t_nmf_client_id clientId);
+
+/*!
+ * \internal
+ * \brief Start a component.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_startComponent(t_component_instance* component, t_nmf_client_id clientId);
+
+/*!
+ * \internal
+ * \brief Stop a component.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_stopComponent(t_component_instance* component, t_nmf_client_id clientId);
+
+/*!
+ * \internal
+ */
+typedef enum {
+ DESTROY_NORMAL,
+ DESTROY_WITHOUT_CHECK,
+ DESTROY_WITHOUT_CHECK_CALL
+} t_destroy_state;
+
+/*!
+ * \internal
+ * \brief Destroy a component instance.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_destroyInstance(t_component_instance* component, t_destroy_state forceDestroy);
+
+/*!
+ * \internal
+ * \brief Destroy a component instance.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_destroyInstanceForClient(t_component_instance* component, t_destroy_state forceDestroy, t_nmf_client_id clientId);
+
+/*!
+ * \internal
+ * \brief
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+void cm_delayedDestroyComponent(t_component_instance *component);
+
+/*!
+ * \internal
+ * \brief
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_component_instance *cm_lookupComponent(const t_cm_instance_handle hdl);
+
+/*!
+ * \internal
+ * \brief
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_bool cm_isComponentOnCoreId(t_nmf_core_id coreId);
+
+/*!
+ * \internal
+ * \brief
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_COMP_Init(void);
+
+/*!
+ * \internal
+ * \brief
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+void cm_COMP_Destroy(void);
+
+/*
+ * Table of instantiated components.
+ */
+extern t_nmf_table ComponentTable; /**< list (table) of components */
+#define componentEntry(i) ((t_component_instance *)ComponentTable.entries[i])
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/introspection.h b/drivers/staging/nmf-cm/cm/engine/component/inc/introspection.h
new file mode 100644
index 00000000000..cfb55c91779
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/introspection.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Components Management internal methods - Introspection.
+ *
+ */
+#ifndef __INC_CM_INTROSPECTION_H
+#define __INC_CM_INTROSPECTION_H
+
+#include <cm/engine/component/inc/instance.h>
+
+/*!
+ * \internal
+ * \brief Description of a required interface reference
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ const t_component_instance *client; //!< Component that provide this interface
+ t_uint8 requireIndex; //!< Index of the interface in the require array
+ t_uint8 collectionIndex; //!< Index in the collection if required interface is a collection
+ const char* origName; //!< Name of the component interface
+} t_interface_require_description;
+
+/*!
+ * \internal
+ * \brief Description of a provided interface
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ const t_component_instance *server; //!< Component that provide this interface
+ t_uint8 provideIndex; //!< Index of the interface in the provide array
+ t_uint8 collectionIndex; //!< Index in the collection if provided interface is a collection
+ const char* origName; //!< Name of the component interface
+} t_interface_provide_description;
+
+
+/*!
+ * \internal
+ * \brief Get property of a component.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_getComponentProperty(
+ const t_component_instance *component,
+ const char *propName,
+ char value[MAX_PROPERTY_VALUE_LENGTH],
+ t_uint32 valueLength);
+
+
+t_dsp_address cm_getAttributeMpcAddress(
+ const t_component_instance *component,
+ const char *attrName);
+
+t_cm_logical_address cm_getAttributeHostAddr(
+ const t_component_instance *component,
+ const char *attrName);
+
+t_uint32 cm_readAttributeNoError(
+ const t_component_instance *component,
+ const char *attrName);
+
+t_cm_error cm_readAttribute(
+ const t_component_instance *component,
+ const char *attrName,
+ t_uint32 *value);
+
+t_cm_error cm_writeAttribute(
+ const t_component_instance *component,
+ const char *attrName,
+ t_uint32 value);
+
+/*!
+ * \internal
+ * \brief Get internal component symbol
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_dsp_address cm_getFunction(
+ const t_component_instance* component,
+ const char* interfaceName,
+ const char* methodName);
+
+/*!
+ * \internal
+ * \brief Get interface provided by a component instance.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_getProvidedInterface(const t_component_instance* server,
+ const char* itfName,
+ t_interface_provide_description *itfProvide);
+
+/*!
+ * \internal
+ * \brief Get interface required by a component instance.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+t_cm_error cm_getRequiredInterface(const t_component_instance* server,
+ const char* itfName,
+ t_interface_require_description *itfRequire);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/nmfheaderabi.h b/drivers/staging/nmf-cm/cm/engine/component/inc/nmfheaderabi.h
new file mode 100644
index 00000000000..9eae19b2f70
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/nmfheaderabi.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief NMF component description ABI
+ *
+ * \defgroup NMF_HEADER NMF Component Description ABI
+ * The NMF component description ABI is stored in the nmf_segment in the ELF component file.
+ * The NMF component description section start by the t_elf_component_header structure.
+ *
+ * \warning <B>The format of this section is not fixed and is able to be changed without concerting.</B>
+ * \note You can use the nmfHeaderVersion to check if the format has changed.
+ * \note Each pointers in this section is relative to the beginning of the section and must be relocated before used.
+ * \ingroup NMF_ABI
+ */
+#ifndef __INC_CM_NMF_HEADERABI_H
+#define __INC_CM_NMF_HEADERABI_H
+
+#include <cm/inc/cm_type.h>
+
+/*!
+ * \brief Description of a interface
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ char *type; //!< Type of this Interface
+ t_uint8 methodNumber; //!< Number of method in the interfaces
+ t_uint8 reserved1, reserved2, reserved3;
+ char *methodNames[1]; //!< Array of method names [methodNumber]
+} t_elf_interface_description;
+
+/*!
+ * \brief Description of required interface type (value could be combinated)
+ * \ingroup NMF_HEADER
+ */
+typedef enum {
+ COLLECTION_REQUIRE = 1, //!< Required interface is a collection
+ OPTIONAL_REQUIRE = 2, //!< Required interface if optional
+ STATIC_REQUIRE = 4, //!< Required interface is static
+ VIRTUAL_REQUIRE = 8, //!< Required interface is virtual (only for introspection purpose)
+ INTRINSEC_REQUIRE = 16 //!< Required interface is intrinsec (bind automatically done by runtime)
+} t_elf_interface_require_type;
+
+/*!
+ * \brief Description of a required interface on a collection index
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ t_uint32 numberOfClient; //!< Number of interface descriptor really connected to this interface
+ t_uint32 symbols[1]; /*!< Symbol of the real name of the attribute
+ \note Real type symbols[numberOfClient]
+ \note Use relocation in order to get symbol information */
+} t_elf_interface_require_index;
+
+/*!
+ * \brief Description of an interface required
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ char *name; //!< name of the interface: offset in string segment
+ t_uint8 requireTypes; //!< Mask of t_elf_interface_require_type
+ t_uint8 collectionSize; //!< Size of the collection (1 if not a collection)
+ t_uint8 reserved1, reserved2;
+ t_elf_interface_description *interface; //!< Interface description
+ t_elf_interface_require_index indexes[1]; /*!< Require information for each collection index
+ \note Real type: indexes[collectionSize],
+ available only if not static interface */
+} t_elf_required_interface;
+
+/*!
+ * \brief Description of provided interface type (value could be combinated)
+ * \ingroup NMF_HEADER
+ */
+typedef enum {
+ COLLECTION_PROVIDE = 1, //!< Provided interface is a collection
+ VIRTUAL_PROVIDE = 2 //!< Provided interface is virtual (only for introspection purpose)
+} t_elf_interface_provide_type;
+
+/*!
+ * \brief Description of an interface provided
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ char* name; //!< name of the interface: offset in string segment
+ t_uint8 provideTypes; //!< Mask of t_elf_interface_provide_type
+ t_uint8 interruptLine; //!< Interrupt line if interrupt (0 if not)
+ t_uint8 collectionSize; //!< Size of the collection (1 if not a collection)
+ t_uint8 reserved1;
+ t_elf_interface_description *interface; //!< Interface description
+ t_uint32 methodSymbols[1]; /*!< Symbol of the real name of methods of the interface for each collection index
+ \note Real type: methodSymbols[collectionSize][methodNumber]
+ \note Use relocation in order to get symbol information*/
+} t_elf_provided_interface;
+
+/*!
+ * \brief Description of an attribute
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ char* name; //!< Name of this attribute
+ t_uint32 symbols; /*!< Symbol of the real name of the attribute
+ \note Use relocation in order to get symbol information */
+} t_elf_attribute;
+
+/*!
+ * \brief Description of an property
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ char* name; //!< Name of this attribute
+ char* value; //!< String of the value
+} t_elf_property;
+
+#define MAGIC_COMPONENT 0x123 //!< Magic Number for a component \ingroup NMF_HEADER
+#define MAGIC_SINGLETON 0x321 //!< Magic Number for a singleton component \ingroup NMF_HEADER
+#define MAGIC_FIRMWARE 0x456 //!< Magic Number for Execution Engine Component \ingroup NMF_HEADER
+
+/*!
+ * \brief Description of a ELF component header
+ *
+ * The NMF component description section start by this structure.
+ *
+ * \ingroup NMF_HEADER
+ */
+typedef struct {
+ t_uint32 magic; //!< Magic Number
+ t_uint32 nmfVersion; //!< Version of the NMF Header
+
+ char* templateName; //!< Name of the component template
+
+ t_uint32 LCCConstruct; //!< Life cycle Constructor offset
+ t_uint32 LCCStart; //!< Life cycle Starter offset
+ t_uint32 LCCStop; //!< Life cycle Stopper offset
+ t_uint32 LCCDestroy; //!< Life cycle Destructer offset
+
+ t_uint32 minStackSize; //!< Minimum stack size
+
+ t_uint32 attributeNumber;//!< Number of attributes
+ t_elf_attribute *attributes; //!< Array of attributes (be careful, this reference must be relocated before use)
+
+ t_uint32 propertyNumber; //!< Number of properties
+ t_elf_property *properties; //!< Array of properties (be careful, this reference must be relocated before use)
+
+ t_uint32 provideNumber; //!< Number of interfaces provided
+ t_elf_provided_interface *provides; //!< Array of interfaces provided (be careful, this reference must be relocated before use)
+
+ t_uint32 requireNumber; //!< Array of interfaces required
+ t_elf_required_interface *requires; //!< Array of interfaces required (be careful, this reference must be relocated before use)
+
+} t_elf_component_header;
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/inc/template.h b/drivers/staging/nmf-cm/cm/engine/component/inc/template.h
new file mode 100644
index 00000000000..2718d8ae9fb
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/inc/template.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Components Management internal methods - Template API.
+ *
+ * \defgroup COMPONENT_INTERNAL Private component instances API
+ */
+#ifndef __INC_CM_TEMPLATE_H
+#define __INC_CM_TEMPLATE_H
+
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/component/inc/description.h>
+#include <cm/engine/elf/inc/elfapi.h>
+#include <cm/engine/utils/inc/string.h>
+
+
+/*!
+ * \internal
+ * \brief Class of a component
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef enum {
+ COMPONENT, //!< Primitive component
+ SINGLETON, //!< Singleton component
+ FIRMWARE, //!< Firmware composite component
+} t_component_classe;
+
+/*!
+ * \internal
+ * \brief Description of delayed relocation
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct _t_function_relocation {
+ t_dup_char symbol_name;
+ t_uint32 type;
+ char *reloc_addr;
+ struct _t_function_relocation *next;
+} t_function_relocation;
+
+struct t_component_instance;
+
+/*!
+ * \internal
+ * \brief Description of a provided interface method on a collection index ; Available only when template loaded
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_uint32 methodAddresses; //!< Address of each method
+} t_interface_provide_index_loaded;
+
+/*!
+ * \internal
+ * \brief Description of a provided interface ; Available only when template loaded
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct {
+ t_interface_provide_index_loaded **indexesLoaded; //!< Provide information for each collection index
+} t_interface_provide_loaded;
+
+
+/*!
+ * \internal
+ * \brief Description of a component template
+ * \ingroup COMPONENT_INTERNAL
+ */
+typedef struct _t_component_template {
+ t_dup_char name; //!< Template name (a.k.a component type)
+
+ t_component_classe classe; //!< Class of the component
+ //TODO, juraj, remove dspId
+ t_nmf_core_id dspId; //!< Reference on DSP where template is loaded
+
+ t_uint8 numberOfInstance; //!< Number of same instance (or singleton copy) create from this template
+
+ t_uint8 propertyNumber; //!< Number of properties in this template
+ t_uint8 attributeNumber; //!< Number of attributes in this template
+ t_uint8 provideNumber; //!< Number of interface provided by this template
+ t_uint8 requireNumber; //!< Number of interface required by this template
+
+ t_uint32 LCCConstructAddress; //!< Life cycle Constructor address
+ t_uint32 LCCStartAddress; //!< Life cycle Starter address
+ t_uint32 LCCStopAddress; //!< Life cycle Stopper address
+ t_uint32 LCCDestroyAddress; //!< Life cycle Destructer address
+
+ t_uint32 minStackSize; //!< Minimum stack size
+
+ t_memory_handle memories[NUMBER_OF_MMDSP_MEMORY]; //!< Reference in different memory where datas are (YES, we fix implementation to MMDSP)
+ const t_elfmemory *thisMemory; //!< Memory used to determine this
+ const t_elfmemory *codeMemory; //!< Memory used to determine code
+
+ t_function_relocation *delayedRelocation; //!< List of reference that can't been relocatable while appropritae binding done.
+
+ t_property *properties; //!< Array of properties in this template
+ t_attribute *attributes; //!< Array of attributes in this template
+ t_interface_provide *provides; //!< Array of interface provided by this template
+ t_interface_require *requires; //!< Array of interface required by this template
+
+ t_interface_provide_loaded *providesLoaded; //!< Array of interface provided by this template ; Available when loaded
+
+ t_bool descriptionAssociatedWithTemplate;
+
+ struct _t_component_template *prev, *next;
+ struct t_component_instance *singletonIfAvaliable;
+} t_component_template;
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/binder.c b/drivers/staging/nmf-cm/cm/engine/component/src/binder.c
new file mode 100644
index 00000000000..5f08713833b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/binder.c
@@ -0,0 +1,1313 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include "../inc/bind.h"
+#include "../inc/dspevent.h"
+#include <cm/engine/communication/fifo/inc/nmf_fifo_arm.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/component/inc/introspection.h>
+
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/trace/inc/xtitrace.h>
+
+#include <cm/engine/utils/inc/string.h>
+
+#define CM_IT_NAME_MAX_LENGTH 8
+
+t_nmf_table Host2MpcBindingTable; /**< list (table) of host2mpc bindings */
+
+static void cm_fillItName(int interruptLine, char *itName);
+static t_uint16 getNumberOfBind(t_component_instance* component);
+
+/*
+ * Bind virtual interface, here we assume that:
+ * - client component require this interface as last one and without collection,
+ * - server component provide only this interface and without collection.
+ * Fixed in loader.c.
+ */
+static void cm_bindVirtualInterface(
+ t_component_instance* client,
+ const t_component_instance* server) {
+ t_interface_require_description itfRequire;
+
+ if(cm_getRequiredInterface(client, "coms", &itfRequire) == CM_OK)
+ {
+ t_interface_reference* itfRef = client->interfaceReferences[itfRequire.requireIndex];
+
+ /*
+ * Memorise this reference
+ */
+ itfRef->provideIndex = 0;
+ itfRef->collectionIndex = 0;
+ itfRef->instance = server;
+ itfRef->bfInfoID = (t_bf_info_ID)0;
+ itfRef->bfInfo = (void*)-1; // TODO
+ }
+ else
+ {
+ ERROR("Internal Error in cm_bindVirtualInterface\n", 0, 0, 0, 0, 0, 0);
+ }
+}
+
+static void cm_unbindVirtualInterface(
+ t_component_instance* client) {
+ t_interface_require_description itfRequire;
+
+ if(cm_getRequiredInterface(client, "coms", &itfRequire) == CM_OK)
+ {
+ t_interface_reference* itfRef = client->interfaceReferences[itfRequire.requireIndex];
+ itfRef->instance = NULL;
+ }
+ else
+ {
+ ERROR("Internal Error in cm_unbindVirtualInterface\n", 0, 0, 0, 0, 0, 0);
+ }
+}
+
+/*
+ * Bind component
+ */
+static void cm_bindLowLevelInterface(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfLocalBC, /* On the same DSP */
+ t_bf_info_ID bfInfoID, void* bfInfo)
+{
+ const t_component_instance* client = itfRequire->client;
+ t_component_instance* server = (t_component_instance*)itfLocalBC->server;
+ t_interface_require *require = &client->Template->requires[itfRequire->requireIndex];
+ t_interface_provide* provide = &server->Template->provides[itfLocalBC->provideIndex];
+ t_interface_provide_loaded* provideLoaded = &server->Template->providesLoaded[itfLocalBC->provideIndex];
+ int k, j;
+
+ if(require->indexes != NULL)
+ {
+ t_interface_require_index *requireindex = &require->indexes[itfRequire->collectionIndex];
+
+ for(k = 0; k < requireindex->numberOfClient; k++) {
+ t_uint32 *hostAddr;
+
+ hostAddr = (t_uint32*)(
+ cm_DSP_GetHostLogicalAddress(client->memories[requireindex->memories[k].memory->id]) +
+ requireindex->memories[k].offset * requireindex->memories[k].memory->memEntSize);
+
+ LOG_INTERNAL(2, "Fill ItfRef %s.%s mem=%s Off=%x @=%x\n",
+ client->pathname, require->name,
+ requireindex->memories[k].memory->memoryName,
+ requireindex->memories[k].offset,
+ hostAddr, 0);
+
+ /*
+ * Fill the interface references. We start by This then methods in order to keep
+ * Unbinded panic as long as possible and not used method with wrong This. This is
+ * relevent only for optional since we must go in stop state before rebinding other
+ * required interface.
+ *
+ * Direct write to DSP memory without go through DSP abstraction since we know we are in 24bits
+ */
+ // Write THIS reference into the Data field of the interface reference
+ // Write the interface methods reference
+
+ if(((t_uint32)hostAddr & 0x7) == 0 && require->interface->methodNumber > 0)
+ {
+ // We are 64word byte aligned, combine this write with first method
+ *(volatile t_uint64*)hostAddr =
+ ((t_uint64)server->thisAddress << 0) |
+ ((t_uint64)provideLoaded->indexesLoaded[itfLocalBC->collectionIndex][0].methodAddresses << 32);
+ hostAddr += 2;
+ j = 1;
+ }
+ else
+ {
+ // We are not, write this which will align us
+ *hostAddr++ = (t_uint32)server->thisAddress;
+ j = 0;
+ }
+
+ // Word align copy
+ for(; j < require->interface->methodNumber - 1; j+=2) {
+ *(volatile t_uint64*)hostAddr =
+ ((t_uint64)provideLoaded->indexesLoaded[itfLocalBC->collectionIndex][j].methodAddresses << 0) |
+ ((t_uint64)provideLoaded->indexesLoaded[itfLocalBC->collectionIndex][j+1].methodAddresses << 32);
+ hostAddr += 2;
+ }
+
+ // Last word align if required
+ if(j < require->interface->methodNumber)
+ *hostAddr = provideLoaded->indexesLoaded[itfLocalBC->collectionIndex][j].methodAddresses;
+ }
+ }
+ else
+ {
+ t_function_relocation *reloc = client->Template->delayedRelocation;
+ while(reloc != NULL) {
+ for(j = 0; j < provide->interface->methodNumber; j++)
+ {
+ if(provide->interface->methodNames[j] == reloc->symbol_name) {
+ cm_ELF_performRelocation(
+ reloc->type,
+ reloc->symbol_name,
+ provideLoaded->indexesLoaded[itfLocalBC->collectionIndex][j].methodAddresses,
+ reloc->reloc_addr);
+ break;
+ }
+ }
+
+ reloc = reloc -> next;
+ }
+ }
+
+ /*
+ * Memorise this reference
+ */
+ {
+ t_interface_reference* itfRef = &client->interfaceReferences[itfRequire->requireIndex][itfRequire->collectionIndex];
+
+ itfRef->provideIndex = itfLocalBC->provideIndex;
+ itfRef->collectionIndex = itfLocalBC->collectionIndex;
+ itfRef->instance = itfLocalBC->server;
+ itfRef->bfInfoID = bfInfoID;
+ itfRef->bfInfo = bfInfo;
+
+ /*
+ * Do not count binding from EE (ie interrupt line), as this will prevent
+ * cm_destroyInstance() of server to succeed (interrupt line bindings are
+ * destroyed after the check in cm_destroyInstance()
+ */
+ if (client->Template->classe != FIRMWARE)
+ server->providedItfUsedCount++;
+ }
+}
+
+static void cm_registerLowLevelInterfaceToConst(
+ const t_interface_require_description *itfRequire,
+ const t_component_instance* targetInstance)
+{
+ const t_component_instance* client = itfRequire->client;
+
+ /*
+ * Memorise this no reference
+ */
+ {
+ t_interface_reference* itfRef = &client->interfaceReferences[itfRequire->requireIndex][itfRequire->collectionIndex];
+
+ // This is an unbind from a true component (not to void)
+ // Do not count bindings from EE (ie interrupt line)
+ if ((targetInstance == NULL)
+ && (client->Template->classe != FIRMWARE)
+ && (itfRef->instance != (t_component_instance *)NMF_VOID_COMPONENT)
+ && (itfRef->instance != NULL))
+ {
+ ((t_component_instance*)itfRef->instance)->providedItfUsedCount--;
+ }
+
+ itfRef->instance = targetInstance;
+ itfRef->bfInfoID = BF_SYNCHRONOUS; // Just to memorize no Binding component used and unbind ToVoid happy ;-).
+ }
+}
+
+static void cm_bindLowLevelInterfaceToConst(
+ const t_interface_require_description *itfRequire,
+ const t_dsp_address functionAddress,
+ const t_component_instance* targetInstance) {
+ const t_component_instance* client = itfRequire->client;
+ t_interface_require *require = &client->Template->requires[itfRequire->requireIndex];
+ int j, k;
+
+
+ // If DSP is off/panic/... -> write nothing
+ if(
+ require->indexes != NULL
+ && cm_DSP_GetState(client->Template->dspId)->state == MPC_STATE_BOOTED)
+ {
+ t_interface_require_index *requireindex = &require->indexes[itfRequire->collectionIndex];
+
+ for(k = 0; k < requireindex->numberOfClient; k++) {
+ t_uint32 *hostAddr;
+
+ hostAddr = (t_uint32*)(
+ cm_DSP_GetHostLogicalAddress(client->memories[requireindex->memories[k].memory->id]) +
+ requireindex->memories[k].offset * requireindex->memories[k].memory->memEntSize);
+
+ /*
+ * Fill the interface references. We start by Methods then This in order to swith to
+ * Unbinded panic as fast as possible and not used method with wrong This. This is
+ * relevent only for optional since we must go in stop state before rebinding other
+ * required interface.
+ *
+ * Direct write to DSP memory without go through DSP abstraction since we know we are in 24bits
+ */
+ /*
+ * Write THIS reference into the Data field of the interface reference
+ * Hack for simplifying debug just to keep THIS reference with caller one
+ * (could be removed if __return_address MMDSP intrinsec provided by compiler).
+ */
+ // Write the interface methods reference
+
+ if(((t_uint32)hostAddr & 0x7) == 0 && require->interface->methodNumber > 0)
+ {
+ // We are 64word byte aligned, combine this write with first method
+ *(volatile t_uint64*)hostAddr =
+ ((t_uint64)client->thisAddress << 0) |
+ ((t_uint64)functionAddress << 32);
+ hostAddr += 2;
+ j = 1;
+ }
+ else
+ {
+ // We are not, write this which will align us
+ *hostAddr++ = (t_uint32)client->thisAddress;
+ j = 0;
+ }
+
+ // Word align copy
+ for(; j < require->interface->methodNumber - 1; j+=2) {
+ *(volatile t_uint64*)hostAddr =
+ ((t_uint64)functionAddress << 0) |
+ ((t_uint64)functionAddress << 32);
+ hostAddr += 2;
+ }
+
+ // Last word align if required
+ if(j < require->interface->methodNumber)
+ *hostAddr = functionAddress;
+ }
+ }
+
+ cm_registerLowLevelInterfaceToConst(itfRequire, targetInstance);
+}
+
+/*
+ * Bind User component though primitive binding factory
+ */
+t_cm_error cm_bindInterface(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide) {
+
+ LOG_INTERNAL(1, "\n##### Bind Synchronous %s/%x.%s -> %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName,
+ itfProvide->server->pathname, itfProvide->server, itfProvide->origName);
+
+ cm_bindLowLevelInterface(
+ itfRequire,
+ itfProvide,
+ BF_SYNCHRONOUS, NULL);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_SYNCHRONOUS,
+ itfRequire->client, itfProvide->server,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ itfProvide->server->Template->provides[itfProvide->provideIndex].name);
+
+ return CM_OK;
+}
+
+/*
+ *
+ */
+void cm_unbindInterface(
+ const t_interface_require_description *itfRequire) {
+
+ LOG_INTERNAL(1, "\n##### UnBind synchronous %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_UNBIND_SYNCHRONOUS,
+ itfRequire->client, NULL,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ cm_bindLowLevelInterfaceToConst(itfRequire,
+ 0x0,
+ NULL);
+}
+
+/*
+ *
+ */
+t_cm_error cm_bindInterfaceToVoid(
+ const t_interface_require_description *itfRequire) {
+ LOG_INTERNAL(1, "\n##### Bind %s/%x.%s -> Void #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ cm_bindLowLevelInterfaceToConst(itfRequire,
+ cm_EEM_getExecutiveEngine(itfRequire->client->Template->dspId)->voidAddr,
+ (t_component_instance*)NMF_VOID_COMPONENT);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_SYNCHRONOUS,
+ itfRequire->client, NULL,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ return CM_OK;
+}
+/*
+ * Find the server and its interface inded to a given required interface for a given component
+ */
+t_cm_error cm_lookupInterface(
+ const t_interface_require_description *itfRequire,
+ t_interface_provide_description *itfProvide) {
+ const t_component_instance* client = itfRequire->client;
+ t_interface_reference* itfRef = &client->interfaceReferences[itfRequire->requireIndex][itfRequire->collectionIndex];
+
+ if(itfRef->instance != NULL)
+ {
+ itfProvide->server = itfRef->instance;
+ itfProvide->provideIndex = itfRef->provideIndex;
+ itfProvide->collectionIndex = itfRef->collectionIndex;
+
+ return CM_OK;
+ } else {
+ itfProvide->server = NULL;
+ return CM_INTERFACE_NOT_BINDED;
+ }
+}
+
+/*
+ *
+ */
+t_cm_error cm_bindInterfaceTrace(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide,
+ t_elfdescription *elfhandleTrace)
+{
+ t_interface_require *require = &itfRequire->client->Template->requires[itfRequire->requireIndex];
+ t_interface_require_description bcitfRequire;
+ t_interface_provide_description bcitfProvide;
+ t_trace_bf_info *bfInfo;
+ t_cm_error error;
+
+ LOG_INTERNAL(1, "\n##### Bind Synchronous Trace %s/%x.%s -> %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName,
+ itfProvide->server->pathname, itfProvide->server, itfProvide->origName);
+
+ /* Allocate aynchronous binding factory information */
+ bfInfo = (t_trace_bf_info*)OSAL_Alloc(sizeof(t_trace_bf_info));
+ if(bfInfo == 0)
+ return CM_NO_MORE_MEMORY;
+
+ /*
+ * Instantiate related trace on dsp
+ */
+ {
+ char traceTemplateName[4 + MAX_INTERFACE_TYPE_NAME_LENGTH + 1];
+
+ cm_StringCopy(traceTemplateName,"_tr.", sizeof(traceTemplateName));
+ cm_StringConcatenate(traceTemplateName, require->interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+
+ if ((error = cm_instantiateComponent(
+ traceTemplateName,
+ itfRequire->client->domainId,
+ itfProvide->server->priority,
+ traceDup,
+ elfhandleTrace,
+ &bfInfo->traceInstance)) != CM_OK) {
+ OSAL_Free(bfInfo);
+ return (error == CM_COMPONENT_NOT_FOUND)?CM_BINDING_COMPONENT_NOT_FOUND : error;
+ }
+ }
+
+ /* Bind event to server interface (Error must not occure) */
+ CM_ASSERT(cm_getRequiredInterface(bfInfo->traceInstance, "target", &bcitfRequire) == CM_OK);
+
+ cm_bindLowLevelInterface(&bcitfRequire, itfProvide, BF_SYNCHRONOUS, NULL);
+
+ /* Get the event interface (Error must not occure) */
+ CM_ASSERT(cm_getProvidedInterface(bfInfo->traceInstance, "target", &bcitfProvide) == CM_OK);
+
+ /* Bind client to event (Error must not occure) */
+ cm_bindLowLevelInterface(itfRequire, &bcitfProvide, BF_TRACE, bfInfo);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_SYNCHRONOUS,
+ itfRequire->client, itfProvide->server,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ itfProvide->server->Template->provides[itfProvide->provideIndex].name);
+
+ return CM_OK;
+}
+
+void cm_unbindInterfaceTrace(
+ const t_interface_require_description *itfRequire,
+ t_trace_bf_info *bfInfo)
+{
+ t_interface_require_description traceitfRequire;
+
+ LOG_INTERNAL(1, "\n##### UnBind trace synchronous %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_UNBIND_SYNCHRONOUS,
+ itfRequire->client, NULL,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ /* Unbind Client from Event Binding Component */
+ cm_bindLowLevelInterfaceToConst(itfRequire, 0x0, NULL);
+
+ /* Unbind explicitly Event from Server Binding Component */
+ /* This is mandatory to fix the providedItfUsedCount of the server */
+ CM_ASSERT(cm_getRequiredInterface(bfInfo->traceInstance, "target", &traceitfRequire) == CM_OK);
+
+ cm_registerLowLevelInterfaceToConst(&traceitfRequire, NULL);
+
+ /* Destroy Event Binding Component */
+ cm_destroyInstance(bfInfo->traceInstance, DESTROY_WITHOUT_CHECK);
+
+ /* Free BF info */
+ OSAL_Free(bfInfo);
+}
+
+
+/*
+ *
+ */
+t_cm_error cm_bindInterfaceAsynchronous(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType,
+ t_elfdescription *elfhandleEvent) {
+ t_interface_require *require = &itfRequire->client->Template->requires[itfRequire->requireIndex];
+ t_interface_require_description eventitfRequire;
+ t_interface_provide_description eventitfProvide;
+ t_async_bf_info *bfInfo;
+ t_cm_error error;
+
+ LOG_INTERNAL(1, "\n##### Bind Asynchronous %s/%x.%s -> %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName,
+ itfProvide->server->pathname, itfProvide->server, itfProvide->origName);
+
+ /* Allocate aynchronous binding factory information */
+ bfInfo = (t_async_bf_info*)OSAL_Alloc(sizeof(t_async_bf_info));
+ if(bfInfo == 0)
+ return CM_NO_MORE_MEMORY;
+
+ /*
+ * Instantiate related event on dsp
+ */
+ {
+ char eventTemplateName[4 + MAX_INTERFACE_TYPE_NAME_LENGTH + 1];
+
+ cm_StringCopy(eventTemplateName,"_ev.", sizeof(eventTemplateName));
+ cm_StringConcatenate(eventTemplateName, require->interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+
+ if ((error = cm_instantiateComponent(
+ eventTemplateName,
+ itfRequire->client->domainId,
+ itfProvide->server->priority,
+ eventDup,
+ elfhandleEvent,
+ &bfInfo->eventInstance)) != CM_OK) {
+ OSAL_Free(bfInfo);
+ return (error == CM_COMPONENT_NOT_FOUND)?CM_BINDING_COMPONENT_NOT_FOUND : error;
+ }
+ }
+
+ /*
+ * Initialize the event component
+ */
+ {
+ unsigned int size;
+
+ // Get fifo elem size (which was store in TOP by convention)
+ size = cm_readAttributeNoError(bfInfo->eventInstance, "TOP");
+ LOG_INTERNAL(3, "DspEvent Fifo element size = %d\n", size, 0, 0, 0, 0, 0);
+
+ // Allocate fifo
+ if ((error = dspevent_createDspEventFifo(bfInfo->eventInstance,
+ "TOP",
+ fifosize, size,
+ dspEventMemType,
+ &bfInfo->dspfifoHandle)) != CM_OK)
+ {
+ cm_destroyInstance(bfInfo->eventInstance, DESTROY_WITHOUT_CHECK);
+ OSAL_Free(bfInfo);
+ return error;
+ }
+ }
+
+ /* Bind event to server interface (Error must not occure) */
+ CM_ASSERT(cm_getRequiredInterface(bfInfo->eventInstance, "target", &eventitfRequire) == CM_OK);
+
+ cm_bindLowLevelInterface(&eventitfRequire, itfProvide, BF_SYNCHRONOUS, NULL);
+
+ /* Get the event interface (Error must not occure) */
+ CM_ASSERT(cm_getProvidedInterface(bfInfo->eventInstance, "target", &eventitfProvide) == CM_OK);
+
+ /* Bind client to event (Error must not occure) */
+ cm_bindLowLevelInterface(itfRequire, &eventitfProvide, BF_ASYNCHRONOUS, bfInfo);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_ASYNCHRONOUS,
+ itfRequire->client, itfProvide->server,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ itfProvide->server->Template->provides[itfProvide->provideIndex].name);
+
+ return CM_OK;
+}
+
+void cm_unbindInterfaceAsynchronous(
+ const t_interface_require_description *itfRequire,
+ t_async_bf_info *bfInfo)
+{
+ t_interface_require_description eventitfRequire;
+
+ LOG_INTERNAL(1, "\n##### UnBind asynchronous %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_UNBIND_ASYNCHRONOUS,
+ itfRequire->client, NULL,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ /* Unbind Client from Event Binding Component */
+ cm_bindLowLevelInterfaceToConst(itfRequire, 0x0, NULL);
+
+ /* Unbind explicitly Event from Server Binding Component */
+ /* This is mandatory to fix the providedItfUsedCount of the server */
+ CM_ASSERT(cm_getRequiredInterface(bfInfo->eventInstance, "target", &eventitfRequire) == CM_OK);
+
+ cm_registerLowLevelInterfaceToConst(&eventitfRequire, NULL);
+
+ /* Destroy Event fifo */
+ dspevent_destroyDspEventFifo(bfInfo->dspfifoHandle);
+
+ /* Destroy Event Binding Component */
+ cm_destroyInstance(bfInfo->eventInstance, DESTROY_WITHOUT_CHECK);
+
+ /* Free BF info */
+ OSAL_Free(bfInfo);
+}
+
+/*!
+ * Create Shared FIFO and set stub and skeleton to it
+ */
+PRIVATE t_cm_error cm_createParamsFifo(t_component_instance *stub,
+ t_component_instance *skeleton,
+ t_cm_domain_id domainId,
+ t_uint32 fifosize,
+ t_nmf_fifo_arm_desc **fifo,
+ t_uint32 *fifoElemSize,
+ t_uint32 bcDescSize)
+{
+ t_nmf_core_id stubcore = (stub != NULL) ?(stub->Template->dspId): ARM_CORE_ID;
+ t_nmf_core_id skelcore = (skeleton != NULL) ?(skeleton->Template->dspId) : ARM_CORE_ID;
+ t_component_instance *bcnotnull = (stub != NULL) ? stub : skeleton;
+ int _fifoelemsize;
+
+ CM_ASSERT(bcnotnull != NULL);
+
+ /* Get fifo param elem size (which was store in FIFO by convention) */
+ _fifoelemsize = cm_readAttributeNoError(bcnotnull, "FIFO");
+ LOG_INTERNAL(3, "Fifo Params element size = %d\n", _fifoelemsize, 0, 0, 0, 0, 0);
+ if(fifoElemSize != NULL)
+ *fifoElemSize = _fifoelemsize;
+
+ /* Allocation of the fifo params */
+ *fifo = fifo_alloc(stubcore, skelcore, _fifoelemsize, fifosize, 1+bcDescSize, paramsLocation, extendedFieldLocation, domainId); /* 1+nbMethods fro hostBCThis_or_TOP space */
+ if(*fifo == NULL) {
+ ERROR("CM_NO_MORE_MEMORY: fifo_alloc() failed in cm_createParamsFifo()\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ if(stub != NULL)
+ {
+ /* Set stub FIFO attribute (Error mut not occure) */
+ cm_writeAttribute(stub, "FIFO", (*fifo)->dspAdress);
+
+ LOG_INTERNAL(2, " FIFO param %x:%x\n", *fifo, (*fifo)->dspAdress, 0, 0, 0, 0);
+ }
+
+ if(skeleton != NULL)
+ {
+ /* Set Skeleton FIFO attribute (Error mut not occure) */
+ cm_writeAttribute(skeleton, "FIFO", (*fifo)->dspAdress);
+
+ LOG_INTERNAL(2, " FIFO param %x:%x\n", *fifo, (*fifo)->dspAdress, 0, 0, 0, 0);
+ }
+
+ return CM_OK;
+}
+/**
+ *
+ */
+static void cm_destroyParamsFifo(t_nmf_fifo_arm_desc *fifo) {
+ fifo_free(fifo);
+}
+
+/*!
+ * Create DSP skeleton
+ */
+PRIVATE t_cm_error cm_createDSPSkeleton(
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType, //INTERNAL_XRAM24
+ t_elfdescription *elfhandleSkeleton,
+ t_dspskel_bf_info *bfInfo)
+{
+ t_interface_provide *provide = &itfProvide->server->Template->provides[itfProvide->provideIndex];
+ t_interface_require_description skelitfRequire;
+ t_cm_error error;
+ unsigned int fifoeventsize = 0;
+
+ /* Instantiate related stub on dsp */
+ {
+ char stubTemplateName[4 + MAX_INTERFACE_TYPE_NAME_LENGTH + 1];
+
+ cm_StringCopy(stubTemplateName,"_sk.", sizeof(stubTemplateName));
+ cm_StringConcatenate(stubTemplateName, provide->interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+
+ if ((error = cm_instantiateComponent(
+ stubTemplateName,
+ itfProvide->server->domainId,
+ itfProvide->server->priority,
+ skeletonDup,
+ elfhandleSkeleton,
+ &bfInfo->skelInstance)) != CM_OK) {
+ return ((error == CM_COMPONENT_NOT_FOUND)?CM_BINDING_COMPONENT_NOT_FOUND:error);
+ }
+ }
+
+ /* Get fifo elem size (which was store in TOP by convention) */
+ fifoeventsize = cm_readAttributeNoError(bfInfo->skelInstance, "TOP");
+ LOG_INTERNAL(3, "DspEvent Fifo element size = %d\n", fifoeventsize, 0, 0, 0, 0, 0);
+
+ /* Allocation of the itf event dsp fifo */
+ if ((error = dspevent_createDspEventFifo(
+ bfInfo->skelInstance,
+ "TOP",
+ fifosize,
+ fifoeventsize,
+ dspEventMemType,
+ &bfInfo->dspfifoHandle)) != CM_OK)
+ {
+ cm_destroyInstance(bfInfo->skelInstance, DESTROY_WITHOUT_CHECK);
+ return error;
+ }
+
+ /* Bind stub to server component (Error must not occure) */
+ CM_ASSERT(cm_getRequiredInterface(bfInfo->skelInstance, "target", &skelitfRequire) == CM_OK);
+
+ cm_bindLowLevelInterface(&skelitfRequire, itfProvide, BF_SYNCHRONOUS, NULL);
+
+ return CM_OK;
+}
+
+/**
+ * Destroy DSP Skeleton
+ */
+PRIVATE t_cm_error cm_destroyDSPSkeleton(t_dspskel_bf_info *bfInfo) {
+ t_interface_require_description skelitfRequire;
+
+ /* Unbind explicitly stub from server component (Error must not occure) */
+ /* This is mandatory to fix the providedItfUsedCount of the server */
+ CM_ASSERT(cm_getRequiredInterface(bfInfo->skelInstance, "target", &skelitfRequire) == CM_OK);
+
+ cm_registerLowLevelInterfaceToConst(&skelitfRequire, NULL);
+
+ /* Destroy Event fifo */
+ dspevent_destroyDspEventFifo(bfInfo->dspfifoHandle);
+
+ /* Destroy Event Binding Component */
+ return cm_destroyInstance(bfInfo->skelInstance, DESTROY_WITHOUT_CHECK);
+}
+
+/*
+ *
+ */
+t_cm_error cm_bindComponentFromCMCore(
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType,
+ t_elfdescription *elfhandleSkeleton,
+ t_host2mpc_bf_info **bfInfo) {
+ t_interface_provide *provide = &itfProvide->server->Template->provides[itfProvide->provideIndex];
+ t_dsp_offset shareVarOffset;
+ t_cm_error error;
+
+ LOG_INTERNAL(1, "\n##### Bind HOST -> %s/%x.%s #####\n",
+ itfProvide->server->pathname, itfProvide->server, itfProvide->origName, 0, 0, 0);
+
+ /* Allocate host2dsp binding factory information */
+ *bfInfo = (t_host2mpc_bf_info*)OSAL_Alloc(sizeof(t_host2mpc_bf_info));
+ if((*bfInfo) == 0)
+ return CM_NO_MORE_MEMORY;
+
+ /* Create the Skeleton */
+ if ((error = cm_createDSPSkeleton(itfProvide,
+ fifo_normalizeDepth(fifosize), /* We SHALL create DSP Skeleton before creating the Params Fifo, but we need in advance the real depth of this fifo */
+ dspEventMemType,
+ elfhandleSkeleton,
+ &(*bfInfo)->dspskeleton)) != CM_OK)
+ {
+ OSAL_Free((*bfInfo));
+ return error;
+ }
+
+ /* Create the FIFO Params */
+ if ((error = cm_createParamsFifo(NULL,
+ (*bfInfo)->dspskeleton.skelInstance,
+ itfProvide->server->domainId,
+ fifosize,
+ &(*bfInfo)->fifo,
+ NULL,
+ provide->interface->methodNumber)) != CM_OK)
+ {
+ cm_destroyDSPSkeleton(&(*bfInfo)->dspskeleton);
+ OSAL_Free((*bfInfo));
+ return error;
+ }
+
+ /* Set Target info in FIFO param to TOP */
+ shareVarOffset = cm_getAttributeMpcAddress((*bfInfo)->dspskeleton.skelInstance, "TOP");
+
+ /*
+ * Set Target info in FIFO param to armThis
+ * Should not return any error
+ */
+ fifo_params_setSharedField((*bfInfo)->fifo, 0, (t_shared_field)shareVarOffset /* ArmBCThis_or_TOP */);
+
+ /* Initialise FIFO Param bcDesc with Skeleton methods */
+ {
+ int i;
+ t_component_instance *skel = (*bfInfo)->dspskeleton.skelInstance;
+ for (i=0; i < provide->interface->methodNumber; i++)
+ {
+ /* should not return error */
+ fifo_params_setSharedField(
+ (*bfInfo)->fifo,
+ 1+i,
+ skel->Template->providesLoaded[0].indexesLoaded[0][i].methodAddresses
+ );
+ }
+ }
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_ASYNCHRONOUS,
+ ARM_TRACE_COMPONENT, itfProvide->server,
+ NULL,
+ itfProvide->server->Template->provides[itfProvide->provideIndex].name);
+
+ return CM_OK;
+}
+
+void cm_unbindComponentFromCMCore(
+ t_host2mpc_bf_info* bfInfo) {
+ t_component_instance *skel = bfInfo->dspskeleton.skelInstance;
+ t_interface_reference* itfProvide = &skel->interfaceReferences[0][0];
+ t_interface_provide *provide = &itfProvide->instance->Template->provides[itfProvide->provideIndex];
+
+ LOG_INTERNAL(1, "\n##### UnBind HOST -> %s/%x.%s #####\n",
+ itfProvide->instance->pathname, itfProvide->instance, provide->name, 0, 0, 0);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_UNBIND_ASYNCHRONOUS,
+ ARM_TRACE_COMPONENT, itfProvide->instance,
+ NULL,
+ itfProvide->instance->Template->provides[itfProvide->provideIndex].name);
+
+ // Destroy FIFO params
+ cm_destroyParamsFifo(bfInfo->fifo);
+
+ // Destory Skeleton
+ cm_destroyDSPSkeleton(&bfInfo->dspskeleton);
+
+ // Free BF info (which contains bcDecr(==dspfct) and arm This)
+ OSAL_Free(bfInfo);
+}
+
+/**
+ * Create DSP Stub
+ */
+PRIVATE t_cm_error cm_createDSPStub(
+ const t_interface_require_description *itfRequire,
+ const char* itfType,
+ t_dspstub_bf_info* bfInfo,
+ t_elfdescription *elfhandleStub,
+ t_interface_provide_description *itfstubProvide) {
+ t_cm_error error;
+
+ /*
+ * Instantiate related skel on dsp
+ */
+ {
+ char skelTemplateName[4 + MAX_INTERFACE_TYPE_NAME_LENGTH + 1];
+
+ cm_StringCopy(skelTemplateName, "_st.", sizeof(skelTemplateName));
+ cm_StringConcatenate(skelTemplateName, itfType, MAX_INTERFACE_TYPE_NAME_LENGTH);
+
+ if ((error = cm_instantiateComponent(
+ skelTemplateName,
+ itfRequire->client->domainId,
+ itfRequire->client->priority,
+ stubDup,
+ elfhandleStub,
+ &bfInfo->stubInstance)) != CM_OK) {
+ return (error == CM_COMPONENT_NOT_FOUND)?CM_BINDING_COMPONENT_NOT_FOUND : error;
+ }
+ }
+
+ /* Get the internal component that serve this interface (Error must not occure) */
+ (void)cm_getProvidedInterface(bfInfo->stubInstance, "source", itfstubProvide);
+
+ return CM_OK;
+}
+
+PRIVATE t_cm_error cm_destroyDSPStub(
+ const t_interface_require_description *itfRequire,
+ t_dspstub_bf_info* bfInfo) {
+
+ /* Unbind Client from Event Binding Component */
+ cm_bindLowLevelInterfaceToConst(itfRequire,
+ 0x0,
+ NULL);
+
+ /* Destroy Event Binding Component */
+ return cm_destroyInstance(bfInfo->stubInstance, DESTROY_WITHOUT_CHECK);
+}
+/*
+ *
+ */
+t_cm_error cm_bindComponentToCMCore(
+ const t_interface_require_description *itfRequire,
+ t_uint32 fifosize,
+ t_uint32 context,
+ t_elfdescription *elfhandleStub,
+ t_mpc2host_bf_info ** bfInfo) {
+ t_interface_require *require = &itfRequire->client->Template->requires[itfRequire->requireIndex];
+ t_interface_provide_description itfstubProvide;
+ t_cm_error error;
+ t_uint32 fifoelemsize;
+
+ LOG_INTERNAL(1, "\n##### Bind %s/%x.%s -> HOST #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ /* Allocate dsp2host binding factory information */
+ *bfInfo = (t_mpc2host_bf_info*)OSAL_Alloc(sizeof(t_mpc2host_bf_info));
+ if(*bfInfo == 0)
+ return CM_NO_MORE_MEMORY;
+ (*bfInfo)->context = context;
+
+ if ((error = cm_createDSPStub(itfRequire,
+ require->interface->type,
+ &(*bfInfo)->dspstub,
+ elfhandleStub,
+ &itfstubProvide)) != CM_OK)
+ {
+ OSAL_Free(*bfInfo);
+ return error;
+ }
+
+ /* Create the FIFO Params */
+ if ((error = cm_createParamsFifo(
+ (*bfInfo)->dspstub.stubInstance,
+ NULL,
+ itfRequire->client->domainId,
+ fifosize,
+ &(*bfInfo)->fifo,
+ &fifoelemsize,
+ 1)) != CM_OK) /* 1 => we used first field as max params size */
+ {
+ cm_destroyDSPStub(itfRequire, &(*bfInfo)->dspstub);
+ OSAL_Free(*bfInfo);
+ return error;
+ }
+
+ /* Bind client to stub component (Error must not occure) */
+ cm_bindLowLevelInterface(itfRequire, &itfstubProvide, BF_DSP2HOST, *bfInfo);
+
+ /* Bind stub component to host (virtual bind) */
+ cm_bindVirtualInterface((*bfInfo)->dspstub.stubInstance, (t_component_instance*)NMF_HOST_COMPONENT);
+
+ /*
+ * Set Target info in FIFO param to armThis
+ * Initialise FIFO Param bcDesc with Jumptable
+ * Should not return any error
+ */
+ fifo_params_setSharedField((*bfInfo)->fifo, 0, (t_shared_field)context /* ArmBCThis_or_TOP */);
+ fifo_params_setSharedField((*bfInfo)->fifo, 1, (t_shared_field)fifoelemsize * 2/* bcDescRef */);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_ASYNCHRONOUS,
+ itfRequire->client, ARM_TRACE_COMPONENT,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ return error;
+}
+
+void cm_unbindComponentToCMCore(
+ const t_interface_require_description *itfRequire,
+ t_mpc2host_bf_info *bfInfo)
+{
+ LOG_INTERNAL(1, "\n##### UnBind %s/%x.%s -> HOST #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_UNBIND_ASYNCHRONOUS,
+ itfRequire->client, ARM_TRACE_COMPONENT,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ /* Unbind virtual interface coms */
+ cm_unbindVirtualInterface(bfInfo->dspstub.stubInstance);
+
+ // Destroy FIFO params
+ cm_destroyParamsFifo(bfInfo->fifo);
+
+ // Destroy DSP Stub
+ cm_destroyDSPStub(itfRequire, &bfInfo->dspstub);
+
+ /* Free BF info */
+ OSAL_Free(bfInfo);
+}
+
+/*!
+ *
+ */
+t_cm_error cm_bindInterfaceDistributed(
+ const t_interface_require_description *itfRequire,
+ const t_interface_provide_description *itfProvide,
+ t_uint32 fifosize,
+ t_dsp_memory_type_id dspEventMemType,
+ t_elfdescription *elfhandleSkeleton,
+ t_elfdescription *elfhandleStub) {
+ t_interface_require *require = &itfRequire->client->Template->requires[itfRequire->requireIndex];
+ t_interface_provide_description itfstubProvide;
+ t_cm_error error;
+ t_mpc2mpc_bf_info *bfInfo;
+ t_dsp_offset shareVarOffset;
+
+ LOG_INTERNAL(1, "\n##### Bind Distributed %s/%x.%s -> %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName,
+ itfProvide->server->pathname, itfProvide->server, itfProvide->origName);
+
+ /* Allocate aynchronous binding factory information */
+ bfInfo = (t_mpc2mpc_bf_info*)OSAL_Alloc(sizeof(t_mpc2mpc_bf_info));
+ if(bfInfo == 0)
+ return CM_NO_MORE_MEMORY;
+
+ /* Create the Skeleton */
+ if ((error = cm_createDSPSkeleton(itfProvide,
+ fifo_normalizeDepth(fifosize), /* We SHALL create DSP Skeleton before creating the Params Fifo, but we need in advance the real depth of this fifo */
+ dspEventMemType,
+ elfhandleSkeleton,
+ &bfInfo->dspskeleton)) != CM_OK)
+ {
+ OSAL_Free(bfInfo);
+ return error;
+ }
+
+ // Create DSP Stub
+ if ((error = cm_createDSPStub(itfRequire,
+ require->interface->type,
+ &bfInfo->dspstub,
+ elfhandleStub,
+ &itfstubProvide)) != CM_OK)
+ {
+ cm_destroyDSPSkeleton(&bfInfo->dspskeleton);
+ OSAL_Free(bfInfo);
+ return error;
+ }
+
+ /* Bind client to stub component (Error must not occure) */
+ cm_bindLowLevelInterface(itfRequire, &itfstubProvide, BF_DSP2DSP, bfInfo);
+
+ /* Create the FIFO Params */
+ if ((error = cm_createParamsFifo(
+ bfInfo->dspstub.stubInstance,
+ bfInfo->dspskeleton.skelInstance,
+ itfProvide->server->domainId,
+ fifosize,
+ &bfInfo->fifo,
+ NULL,
+ require->interface->methodNumber)) != CM_OK)
+ {
+ cm_destroyDSPStub(itfRequire, &bfInfo->dspstub);
+ cm_destroyDSPSkeleton(&bfInfo->dspskeleton);
+ OSAL_Free(bfInfo);
+ return error;
+ }
+
+ /* Bind stub component to host (virtual bind) */
+ cm_bindVirtualInterface(bfInfo->dspstub.stubInstance, bfInfo->dspskeleton.skelInstance);
+
+ /* Set Target info in FIFO param to TOP */
+ shareVarOffset = cm_getAttributeMpcAddress(bfInfo->dspskeleton.skelInstance, "TOP");
+
+ /*
+ * Set Target info in FIFO param to armThis
+ * Should not return any error
+ */
+ fifo_params_setSharedField(bfInfo->fifo, 0, (t_shared_field)shareVarOffset /* ArmBCThis_or_TOP */);
+
+ /* Initialise FIFO Param bcDesc with Skeleton methods */
+ {
+ int i;
+ t_component_instance *skel = bfInfo->dspskeleton.skelInstance;
+ for (i=0; i < require->interface->methodNumber; i++)
+ {
+ /* should not return error */
+ fifo_params_setSharedField(
+ bfInfo->fifo,
+ 1+i,
+ skel->Template->providesLoaded[0].indexesLoaded[0][i].methodAddresses
+ );
+ }
+ }
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_BIND_ASYNCHRONOUS,
+ itfRequire->client, itfProvide->server,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ itfProvide->server->Template->provides[itfProvide->provideIndex].name);
+
+ return CM_OK;
+}
+
+/*!
+ *
+ */
+void cm_unbindInterfaceDistributed(
+ const t_interface_require_description *itfRequire,
+ t_mpc2mpc_bf_info *bfInfo)
+{
+ LOG_INTERNAL(1, "\n##### UnBind distributed %s/%x.%s #####\n",
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0, 0);
+
+ cm_TRC_traceBinding(TRACE_BIND_COMMAND_UNBIND_ASYNCHRONOUS,
+ itfRequire->client, NULL,
+ itfRequire->client->Template->requires[itfRequire->requireIndex].name,
+ NULL);
+
+ /* Unbind virtual interface */
+ cm_unbindVirtualInterface(bfInfo->dspstub.stubInstance);
+
+ // Destroy FIFO params
+ cm_destroyParamsFifo(bfInfo->fifo);
+
+ // Destroy DSP Stub
+ cm_destroyDSPStub(itfRequire, &bfInfo->dspstub);
+
+ // Destory DSP Skeleton
+ cm_destroyDSPSkeleton(&bfInfo->dspskeleton);
+
+ // Destroy BF Info
+ OSAL_Free(bfInfo);
+}
+
+t_cm_error cm_bindInterfaceStaticInterrupt(
+ const t_nmf_core_id coreId,
+ const int interruptLine,
+ const t_component_instance *server,
+ const char* providedItfServerName
+)
+{
+ char requiredItfClientName[CM_IT_NAME_MAX_LENGTH];
+ t_component_instance *client = cm_EEM_getExecutiveEngine(coreId)->instance;
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_cm_error error;
+
+ //build it[%d] name
+ if (interruptLine < 0 || interruptLine > 255) {return CM_OUT_OF_LIMITS;}
+ cm_fillItName(interruptLine, requiredItfClientName);
+
+ //do binding
+ if ((error = cm_getRequiredInterface(client,requiredItfClientName,&itfRequire)) != CM_OK) {return error;}
+ if ((error = cm_getProvidedInterface(server,providedItfServerName,&itfProvide)) != CM_OK) {return error;}
+ if((error = cm_bindInterface(&itfRequire, &itfProvide)) != CM_OK) {return error;}
+
+ return CM_OK;
+}
+
+t_cm_error cm_unbindInterfaceStaticInterrupt(
+ const t_nmf_core_id coreId,
+ const int interruptLine
+)
+{
+ char requiredItfClientName[CM_IT_NAME_MAX_LENGTH];
+ t_component_instance *client = cm_EEM_getExecutiveEngine(coreId)->instance;
+ t_interface_require_description itfRequire;
+ t_cm_error error;
+
+ //build it[%d] name
+ if (interruptLine < 0 || interruptLine > 255) {return CM_OUT_OF_LIMITS;}
+ cm_fillItName(interruptLine, requiredItfClientName);
+
+ //do unbinding
+ if ((error = cm_getRequiredInterface(client,requiredItfClientName,&itfRequire)) != CM_OK) {return error;}
+ cm_unbindInterface(&itfRequire);
+
+ return CM_OK;
+}
+
+void cm_destroyRequireInterface(t_component_instance* component, t_nmf_client_id clientId)
+{
+ int i, j;
+
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(component->Template->classe == SINGLETON)
+ {
+ if(getNumberOfBind(component) > 0)
+ return;
+ }
+
+ for(i = 0; i < component->Template->requireNumber; i++)
+ {
+ int nb = component->Template->requires[i].collectionSize;
+ for(j = 0; j < nb; j++)
+ {
+ if(component->interfaceReferences[i][j].instance != NULL)
+ {
+ t_interface_reference* itfRef = &component->interfaceReferences[i][j];
+ t_interface_require_description itfRequire;
+
+ itfRequire.client = component;
+ itfRequire.requireIndex = i;
+ itfRequire.collectionIndex = j;
+ itfRequire.origName = component->Template->requires[i].name;
+
+ switch (itfRef->bfInfoID) {
+ case BF_SYNCHRONOUS:
+ /* Error ignored as it is always OK */
+ cm_unbindInterface(&itfRequire);
+ break;
+ case BF_TRACE:
+ cm_unbindInterfaceTrace(&itfRequire,
+ (t_trace_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+ break;
+ case BF_ASYNCHRONOUS:
+ cm_unbindInterfaceAsynchronous(&itfRequire,
+ (t_async_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+ break;
+ case BF_DSP2HOST:
+ /* This 'mpc2host handle' is provided by the host at OS Integration level.
+ It must then be handled and released in OS specific part.
+ */
+ cm_unbindComponentToCMCore(&itfRequire,
+ (t_mpc2host_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+ break;
+ case BF_HOST2DSP:
+ /* These bindings are from CM Core to DSP, they are not listed
+ here and must be handles/freed by host at OS Integration level
+ */
+ break;
+ case BF_DSP2DSP:
+ cm_unbindInterfaceDistributed(&itfRequire,
+ (t_mpc2mpc_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+void cm_registerSingletonBinding(
+ t_component_instance* component,
+ t_interface_require_description* itfRequire,
+ t_interface_provide_description* itfProvide,
+ t_nmf_client_id clientId)
+{
+ if(component->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(component, FALSE, clientId);
+ if(cl != NULL)
+ cl->numberOfBind++;
+
+ if(itfProvide != NULL)
+ LOG_INTERNAL(1, " -> Singleton[%d] : Register binding %s/%x.%s -> %s/%x\n",
+ clientId,
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName,
+ itfProvide->server->pathname, itfProvide->server);
+ else
+ LOG_INTERNAL(1, " -> Singleton[%d] : Register binding %s/%x.%s -> ARM/VOID\n",
+ clientId,
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0);
+ }
+}
+
+t_bool cm_unregisterSingletonBinding(
+ t_component_instance* component,
+ t_interface_require_description* itfRequire,
+ t_interface_provide_description* itfProvide,
+ t_nmf_client_id clientId)
+{
+ if(component->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(component, FALSE, clientId);
+ if(cl != NULL)
+ cl->numberOfBind--;
+
+ if(itfProvide->server == (t_component_instance *)NMF_VOID_COMPONENT)
+ LOG_INTERNAL(1, " -> Singleton[%d] : Unregister binding %s/%x.%s -> ARM/VOID\n",
+ clientId,
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0);
+ else if(itfProvide->server == NULL)
+ LOG_INTERNAL(1, " -> Singleton[%d] : Unregister binding %s/%x.%s -> ?? <already unbound>\n",
+ clientId,
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName, 0, 0);
+ else
+ LOG_INTERNAL(1, " -> Singleton[%d] : Unregister binding %s/%x.%s -> %s/%x\n",
+ clientId,
+ itfRequire->client->pathname, itfRequire->client, itfRequire->origName,
+ itfProvide->server->pathname, itfProvide->server);
+
+ if(getNumberOfBind(component) == 0)
+ {
+ LOG_INTERNAL(1, " -> Singleton[%d] : All required of %s/%x logically unbound, perform physical unbind\n",
+ clientId, itfRequire->client->pathname, itfRequire->client, 0, 0, 0);
+
+ (void)cm_EEM_ForceWakeup(component->Template->dspId);
+
+ // This is the last binding unbind all !!!
+ cm_destroyRequireInterface(component, clientId);
+
+ cm_EEM_AllowSleep(component->Template->dspId);
+ }
+ else if(itfProvide->server != NULL)
+ {
+ t_interface_require* itfReq;
+ itfReq = &itfRequire->client->Template->requires[itfRequire->requireIndex];
+ if((itfReq->requireTypes & OPTIONAL_REQUIRE) != 0x0)
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static t_uint16 getNumberOfBind(t_component_instance* component)
+{
+ t_uint16 bindNumber = 0;
+ struct t_client_of_singleton* cur = component->clientOfSingleton;
+
+ for( ; cur != NULL ; cur = cur->next)
+ {
+ bindNumber += cur->numberOfBind;
+ }
+
+ return bindNumber;
+}
+
+static void cm_fillItName(int interruptLine, char *itName)
+{
+ int divider = 10000;
+
+ *itName++ = 'i';
+ *itName++ = 't';
+ *itName++ = '[';
+
+ // Find first significant divider
+ while(divider > interruptLine)
+ divider /= 10;
+
+ // Compute number
+ do
+ {
+ *itName++ = "0123456789"[interruptLine / divider];
+ interruptLine %= divider;
+ divider /= 10;
+ } while(divider != 0);
+
+ *itName++ = ']';
+ *itName++ = '\0';
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/binder_check.c b/drivers/staging/nmf-cm/cm/engine/component/src/binder_check.c
new file mode 100644
index 00000000000..373fea0cd47
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/binder_check.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include "../inc/bind.h"
+#include <cm/engine/trace/inc/trace.h>
+
+#include <cm/engine/utils/inc/string.h>
+
+t_cm_error cm_checkValidClient(
+ const t_component_instance* client,
+ const char* requiredItfClientName,
+ t_interface_require_description *itfRequire,
+ t_bool *bindable) {
+ t_cm_error error;
+
+ // Component LC state check
+ if (NULL == client)
+ return CM_INVALID_COMPONENT_HANDLE;
+
+ // Check if the requiredItfClientName is required by client component
+ if ((error = cm_getRequiredInterface(client, requiredItfClientName, itfRequire)) != CM_OK)
+ return error;
+
+ // Check required interface not already binded
+ {
+ t_interface_reference* itfRef = &client->interfaceReferences[itfRequire->requireIndex][itfRequire->collectionIndex];
+
+ if(itfRef->instance != (t_component_instance*)NULL)
+ {
+ if(client->Template->classe == SINGLETON)
+ {
+ // Singleton is immutable thus we can't rebind it, nevertheless it's not an issue
+ *bindable = FALSE;
+ return CM_OK;
+ }
+ else
+ {
+ t_interface_reference* itfRef = &client->interfaceReferences[itfRequire->requireIndex][itfRequire->collectionIndex];
+
+ if(itfRef->instance == (const t_component_instance*)NMF_VOID_COMPONENT)
+ ERROR("CM_INTERFACE_ALREADY_BINDED(): Component (%s<%s>.%s) already bound to VOID\n",
+ client->pathname, client->Template->name, requiredItfClientName, 0, 0, 0);
+ else
+ ERROR("CM_INTERFACE_ALREADY_BINDED(): Component (%s<%s>.%s) already bound to another server (%s<%s>.%s)\n",
+ client->pathname, client->Template->name, requiredItfClientName,
+ itfRef->instance->pathname, itfRef->instance->Template->name, itfRef->instance->Template->provides[itfRef->provideIndex].name);
+ return CM_INTERFACE_ALREADY_BINDED;
+ }
+ }
+ }
+
+ // Delayed Component LC state check done only if not optional required interface or intrinsic one that has been solved by loader
+ {
+ t_interface_require* itfReq = &client->Template->requires[itfRequire->requireIndex];
+
+ if((itfReq->requireTypes & (OPTIONAL_REQUIRE | INTRINSEC_REQUIRE)) == 0) {
+ if(client->state == STATE_RUNNABLE)
+ return CM_COMPONENT_NOT_STOPPED;
+ }
+ }
+
+ *bindable = TRUE;
+
+ return CM_OK;
+}
+
+t_cm_error cm_checkValidServer(
+ const t_component_instance* server,
+ const char* providedItfServerName,
+ t_interface_provide_description *itfProvide) {
+ t_cm_error error;
+
+ // Check if the components are initialized
+ //if (server->state == STATE_INSTANCIATED)
+ // return CM_COMPONENT_NOT_INITIALIZED;
+ if(NULL == server)
+ return CM_INVALID_COMPONENT_HANDLE;
+
+ // Check if the providedItfServerName is provided by server component
+ if((error = cm_getProvidedInterface(server, providedItfServerName, itfProvide)) != CM_OK)
+ return error;
+
+ return CM_OK;
+}
+
+t_cm_error cm_checkValidBinding(
+ const t_component_instance* client,
+ const char* requiredItfClientName,
+ const t_component_instance* server,
+ const char* providedItfServerName,
+ t_interface_require_description *itfRequire,
+ t_interface_provide_description *itfProvide,
+ t_bool *bindable) {
+ t_interface_require *require;
+ t_interface_provide *provide;
+ t_cm_error error;
+
+ // Check Server
+ if((error = cm_checkValidServer(server, providedItfServerName, itfProvide)) != CM_OK)
+ return error;
+
+ // Check Client
+ if((error = cm_checkValidClient(client, requiredItfClientName, itfRequire, bindable)) != CM_OK)
+ return error;
+
+ // If this is a singleton which has been already bound check that next binding is at the same server
+ if(*bindable == FALSE
+ && client->Template->classe == SINGLETON)
+ {
+ t_interface_reference* itfRef = &client->interfaceReferences[itfRequire->requireIndex][itfRequire->collectionIndex];
+ while( itfRef->instance != server
+ || itfRef->provideIndex != itfProvide->provideIndex
+ || itfRef->collectionIndex != itfProvide->collectionIndex )
+ {
+ if(itfRef->instance == (const t_component_instance*)NMF_VOID_COMPONENT)
+ {
+ ERROR("CM_INTERFACE_ALREADY_BINDED(): Singleton (%s<%s>.%s) already bound to VOID\n",
+ client->pathname, client->Template->name, requiredItfClientName, 0, 0, 0);
+ return CM_INTERFACE_ALREADY_BINDED;
+ }
+ else if(itfRef->bfInfoID == BF_ASYNCHRONOUS || itfRef->bfInfoID == BF_TRACE)
+ {
+ t_interface_require_description eventitfRequire;
+ CM_ASSERT(cm_getRequiredInterface(itfRef->instance, "target", &eventitfRequire) == CM_OK);
+ itfRef = &itfRef->instance->interfaceReferences[eventitfRequire.requireIndex][eventitfRequire.collectionIndex];
+
+ // Go to see client of event if the same
+ }
+ else
+ {
+ ERROR("CM_INTERFACE_ALREADY_BINDED(): Singleton (%s<%s>.%s) already bound to different server (%s<%s>.%s)\n",
+ client->pathname, client->Template->name, requiredItfClientName,
+ itfRef->instance->pathname, itfRef->instance->Template->name, itfRef->instance->Template->provides[itfRef->provideIndex].name);
+ return CM_INTERFACE_ALREADY_BINDED;
+ }
+ }
+ }
+
+ // Check if provided and required type matches
+ require = &client->Template->requires[itfRequire->requireIndex];
+ provide = &server->Template->provides[itfProvide->provideIndex];
+ if(require->interface != provide->interface)
+ {
+ ERROR("CM_ILLEGAL_BINDING(%s, %s)\n", require->interface->type, provide->interface->type, 0, 0, 0, 0);
+ return CM_ILLEGAL_BINDING;
+ }
+
+ // Check if static required interface binded to singleton component
+ if((require->requireTypes & STATIC_REQUIRE) &&
+ (server->Template->classe != SINGLETON))
+ {
+ ERROR("CM_ILLEGAL_BINDING(): Can't bind static required interface to not singleton component\n",
+ 0, 0, 0, 0, 0, 0);
+ return CM_ILLEGAL_BINDING;
+ }
+
+ return CM_OK;
+}
+
+t_cm_error cm_checkValidUnbinding(
+ const t_component_instance* client,
+ const char* requiredItfClientName,
+ t_interface_require_description *itfRequire,
+ t_interface_provide_description *itfProvide) {
+ t_cm_error error;
+ t_interface_require* itfReq;
+
+ // Component LC state check
+ if (NULL == client)
+ return CM_INVALID_COMPONENT_HANDLE;
+
+ // Check if the requiredItfClientName is required by client component
+ if ((error = cm_getRequiredInterface(client, requiredItfClientName, itfRequire)) != CM_OK)
+ return error;
+
+ itfReq = &client->Template->requires[itfRequire->requireIndex];
+
+ // Check if the requiredItfClientName is required by client component
+ if ((error = cm_lookupInterface(itfRequire, itfProvide)) != CM_OK)
+ {
+ // We allow to unbind optional required of singleton even if not binded, since it could have been unbound previously but we don't
+ // want to break bind singleton reference counter
+ if((client->Template->classe == SINGLETON) &&
+ (itfReq->requireTypes & OPTIONAL_REQUIRE) != 0x0)
+ return CM_OK;
+
+ return error;
+ }
+
+ // Singleton is immutable, don't unbind it
+ if(client->Template->classe == SINGLETON)
+ return CM_OK;
+
+ /* if interface is optionnal then allow unbinding even if not stop */
+ if((itfReq->requireTypes & OPTIONAL_REQUIRE) == 0x0)
+ {
+ if(client->state == STATE_RUNNABLE)
+ return CM_COMPONENT_NOT_STOPPED;
+ }
+
+ return CM_OK;
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/component_wrapper.c b/drivers/staging/nmf-cm/cm/engine/component/src/component_wrapper.c
new file mode 100644
index 00000000000..88e6b4749ec
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/component_wrapper.c
@@ -0,0 +1,1298 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/api/component_engine.h>
+#include <cm/engine/api/communication_engine.h>
+
+#include <cm/engine/component/inc/bind.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/memory/inc/domain.h>
+
+#include <cm/engine/configuration/inc/configuration.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+
+/*
+ * Component mangement wrapping.
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_InstantiateComponent(
+ const char* templateName,
+ t_cm_domain_id domainId,
+ t_nmf_client_id clientId,
+ t_nmf_ee_priority priority,
+ const char localName[MAX_COMPONENT_NAME_LENGTH],
+ const char *dataFile,
+ t_cm_instance_handle *instance) {
+ t_cm_error error;
+ t_nmf_core_id coreId;
+ t_component_instance *comp;
+ t_elfdescription *elfhandle = NULL;
+
+ OSAL_LOCK_API();
+
+ /*
+ * Load Elf File
+ */
+ if(dataFile != NULL &&
+ (error = cm_ELF_CheckFile(
+ dataFile,
+ TRUE,
+ &elfhandle)) != CM_OK)
+ goto out;
+
+ //only allow instantiation in non-scratch domains (ie. DOMAIN_NORMAL)!
+ if ((error = cm_DM_CheckDomainWithClient(domainId, DOMAIN_NORMAL, clientId)) != CM_OK)
+ goto out;
+
+ coreId = cm_DM_GetDomainCoreId(domainId);
+
+ if(coreId < FIRST_MPC_ID || coreId > LAST_CORE_ID)
+ {
+ error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+
+ if ((error = cm_CFG_CheckMpcStatus(coreId)) != CM_OK)
+ goto out;
+
+ if ((error = cm_EEM_ForceWakeup(coreId)) != CM_OK)
+ goto out;
+
+ error = cm_instantiateComponent(
+ templateName,
+ domainId,
+ priority,
+ localName,
+ elfhandle,
+ &comp);
+ if(error == CM_OK)
+ *instance = comp->instance;
+
+ cm_EEM_AllowSleep(coreId);
+
+out:
+ cm_ELF_CloseFile(TRUE, elfhandle);
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_StartComponent(
+ t_cm_instance_handle instance,
+ t_nmf_client_id clientId) {
+ t_cm_error error;
+ t_component_instance *component;
+
+ OSAL_LOCK_API();
+
+ component = cm_lookupComponent(instance);
+ if (NULL == component)
+ error = CM_INVALID_COMPONENT_HANDLE;
+ else
+ {
+ error = cm_startComponent(component, clientId);
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_StopComponent(
+ t_cm_instance_handle instance,
+ t_nmf_client_id clientId) {
+ t_cm_error error;
+ t_component_instance *component;
+
+ OSAL_LOCK_API();
+
+ component = cm_lookupComponent(instance);
+ if (NULL == component)
+ error = CM_INVALID_COMPONENT_HANDLE;
+ else
+ {
+ error = cm_stopComponent(component, clientId);
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_DestroyComponent(
+ t_cm_instance_handle instance,
+ t_nmf_client_id clientId)
+{
+ t_cm_error error;
+ t_component_instance *component;
+
+ OSAL_LOCK_API();
+
+ component = cm_lookupComponent(instance);
+ if (NULL == component)
+ {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ }
+ else
+ {
+ t_nmf_core_id coreId = component->Template->dspId;
+
+ (void)cm_EEM_ForceWakeup(coreId);
+
+ error = cm_destroyInstanceForClient(component, DESTROY_NORMAL, clientId);
+
+ cm_CFG_ReleaseMpc(coreId);
+
+ cm_EEM_AllowSleep(coreId);
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_FlushComponents(t_nmf_client_id clientId)
+{
+ t_cm_error error = CM_OK;
+ t_component_instance *instance;
+ t_uint32 i;
+
+ if (clientId == 0)
+ return CM_INVALID_PARAMETER;
+
+ OSAL_LOCK_API();
+
+ // We don't know exactly where components will be, wake up everybody !!
+ (void)cm_EEM_ForceWakeup(SVA_CORE_ID);
+ (void)cm_EEM_ForceWakeup(SIA_CORE_ID);
+
+ /* Destroy all host2mpc bindings */
+ OSAL_LOCK_COM();
+ for (i=0; i<Host2MpcBindingTable.idxMax; i++)
+ {
+ t_host2mpc_bf_info* bfInfo;
+ bfInfo = Host2MpcBindingTable.entries[i];
+ if ((bfInfo != NULL) && (bfInfo->clientId == clientId)) {
+ cm_delEntry(&Host2MpcBindingTable, i);
+ OSAL_UNLOCK_COM();
+ cm_unbindComponentFromCMCore(bfInfo);
+ OSAL_LOCK_COM();
+ }
+ }
+ OSAL_UNLOCK_COM();
+
+ /* First, stop all remaining components for this client */
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ if ((instance = componentEntry(i)) == NULL)
+ continue;
+ if (/* skip EE */
+ (instance->Template->classe == FIRMWARE) ||
+ /* Skip all binding components */
+ (cm_StringCompare(instance->Template->name, "_ev.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_st.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_sk.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_tr.", 4) == 0))
+ continue;
+
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(instance->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(instance, FALSE, clientId);
+ if(cl == NULL)
+ continue;
+
+ cl->numberOfStart = 1; // == 1 since it will go to 0 in cm_stopComponent
+ cl->numberOfInstance = 1; // == 1 since it will go to 0 in cm_destroyInstanceForClient
+ cl->numberOfBind = 0; // == 0 since we don't want anymore binding for this component
+ }
+ else if(domainDesc[instance->domainId].client != clientId)
+ /* Skip all components not belonging to our client */
+ continue;
+
+ // Stop the component
+ error = cm_stopComponent(instance, clientId);
+ if (error != CM_OK && error != CM_COMPONENT_NOT_STARTED)
+ LOG_INTERNAL(0, "Error stopping component %s/%x (%s, error=%d, client=%u)\n", instance->pathname, instance, instance->Template->name, error, clientId, 0);
+
+ // Destroy dependencies
+ cm_destroyRequireInterface(instance, clientId);
+ }
+
+ /* Destroy all remaining components for this client */
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ if ((instance = componentEntry(i)) == NULL)
+ continue;
+ if (/* skip EE */
+ (instance->Template->classe == FIRMWARE) ||
+ /* Skip all binding components */
+ (cm_StringCompare(instance->Template->name, "_ev.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_st.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_sk.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_tr.", 4) == 0)) {
+ continue;
+ }
+
+
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(instance->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(instance, FALSE, clientId);
+ if(cl == NULL)
+ continue;
+ }
+ else if(domainDesc[instance->domainId].client != clientId)
+ /* Skip all components not belonging to our client */
+ continue;
+
+
+ // Destroy the component
+ error = cm_destroyInstanceForClient(instance, DESTROY_WITHOUT_CHECK, clientId);
+
+ if (error != CM_OK)
+ {
+ /* FIXME : add component name instance in log message but need to make a copy before cm_flushComponent()
+ * because it's no more available after.
+ */
+ LOG_INTERNAL(0, "Error flushing component (error=%d, client=%u)\n", error, clientId, 0, 0, 0, 0);
+ }
+ }
+
+ cm_CFG_ReleaseMpc(SVA_CORE_ID);
+ cm_CFG_ReleaseMpc(SIA_CORE_ID);
+
+ cm_EEM_AllowSleep(SVA_CORE_ID);
+ cm_EEM_AllowSleep(SIA_CORE_ID);
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+/*
+ * Component binding wrapping.
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_BindComponent(
+ const t_cm_instance_handle clientInstance,
+ const char* requiredItfClientName,
+ const t_cm_instance_handle serverInstance,
+ const char* providedItfServerName,
+ t_bool traced,
+ t_nmf_client_id clientId,
+ const char *dataFileTrace) {
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_bool bindable;
+ t_cm_error error;
+ t_component_instance *client, *server;
+ t_elfdescription *elfhandleTrace = NULL;
+
+ OSAL_LOCK_API();
+
+ /*
+ * Load Elf File
+ */
+ if(dataFileTrace != NULL &&
+ (error = cm_ELF_CheckFile(
+ dataFileTrace,
+ TRUE,
+ &elfhandleTrace)) != CM_OK)
+ goto out;
+
+ client = cm_lookupComponent(clientInstance);
+ server = cm_lookupComponent(serverInstance);
+ // Sanity check
+ if((error = cm_checkValidBinding(client, requiredItfClientName,
+ server, providedItfServerName,
+ &itfRequire, &itfProvide, &bindable)) != CM_OK)
+ goto out;
+
+ // Check that client and server component run on same DSP
+ if (itfRequire.client->Template->dspId != itfProvide.server->Template->dspId)
+ {
+ error = CM_ILLEGAL_BINDING;
+ goto out;
+ }
+
+ // Check if we really need to bind
+ if(bindable)
+ {
+ if ((error = cm_EEM_ForceWakeup(itfRequire.client->Template->dspId)) != CM_OK)
+ goto out;
+
+ /*
+ * Synchronous binding, so no binding component
+ */
+ if(traced)
+ error = cm_bindInterfaceTrace(&itfRequire, &itfProvide, elfhandleTrace);
+ else
+ error = cm_bindInterface(&itfRequire, &itfProvide);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+ }
+
+ cm_registerSingletonBinding(client, &itfRequire, &itfProvide, clientId);
+
+out:
+ cm_ELF_CloseFile(TRUE, elfhandleTrace);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_UnbindComponent(
+ const t_cm_instance_handle clientInstance,
+ const char* requiredItfClientName,
+ t_nmf_client_id clientId) {
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_bf_info_ID bfInfoID;
+ t_cm_error error;
+ t_component_instance *client;
+
+ OSAL_LOCK_API();
+
+ client = cm_lookupComponent(clientInstance);
+ // Sanity check
+ if((error = cm_checkValidUnbinding(client, requiredItfClientName,
+ &itfRequire, &itfProvide)) != CM_OK)
+ goto out;
+
+ // Check if this is a Primitive binding
+ bfInfoID = itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfoID;
+ if(bfInfoID != BF_SYNCHRONOUS && bfInfoID != BF_TRACE)
+ {
+ error = CM_ILLEGAL_UNBINDING;
+ goto out;
+ }
+
+ // Check if we really need to unbind
+ if(cm_unregisterSingletonBinding(client, &itfRequire, &itfProvide, clientId))
+ {
+ (void)cm_EEM_ForceWakeup(itfRequire.client->Template->dspId);
+
+ if(bfInfoID == BF_SYNCHRONOUS)
+ cm_unbindInterface(&itfRequire);
+ else
+ cm_unbindInterfaceTrace(
+ &itfRequire,
+ (t_trace_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+
+ error = CM_OK;
+ }
+
+out:
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_BindComponentToVoid(
+ const t_cm_instance_handle clientInstance,
+ const char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH],
+ t_nmf_client_id clientId)
+{
+ t_interface_require_description itfRequire;
+ t_bool bindable;
+ t_cm_error error;
+ t_component_instance *client;
+
+ OSAL_LOCK_API();
+
+ client = cm_lookupComponent(clientInstance);
+ // Check invalid binding
+ if((error = cm_checkValidClient(client, requiredItfClientName,
+ &itfRequire, &bindable)) != CM_OK)
+ goto out;
+
+ // Check if we really need to bind
+ if(bindable)
+ {
+ if ((error = cm_EEM_ForceWakeup(itfRequire.client->Template->dspId)) != CM_OK)
+ goto out;
+
+ error = cm_bindInterfaceToVoid(&itfRequire);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+ }
+
+ cm_registerSingletonBinding(client, &itfRequire, NULL, clientId);
+
+out:
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_BindComponentAsynchronous(
+ const t_cm_instance_handle clientInstance,
+ const char* requiredItfClientName,
+ const t_cm_instance_handle serverInstance,
+ const char* providedItfServerName,
+ t_uint32 fifosize,
+ t_cm_mpc_memory_type eventMemType,
+ t_nmf_client_id clientId,
+ const char *dataFileSkeletonOrEvent,
+ const char *dataFileStub) {
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_dsp_memory_type_id dspEventMemType;
+ t_bool bindable;
+ t_cm_error error;
+ t_component_instance *client, *server;
+ t_elfdescription *elfhandleSkeletonOrEvent = NULL;
+ t_elfdescription *elfhandleStub = NULL;
+
+ OSAL_LOCK_API();
+
+ /*
+ * Load Elf File
+ */
+ if(dataFileSkeletonOrEvent != NULL &&
+ (error = cm_ELF_CheckFile(
+ dataFileSkeletonOrEvent,
+ TRUE,
+ &elfhandleSkeletonOrEvent)) != CM_OK)
+ goto out;
+ if(dataFileStub != NULL &&
+ (error = cm_ELF_CheckFile(
+ dataFileStub,
+ TRUE,
+ &elfhandleStub)) != CM_OK)
+ goto out;
+
+ client = cm_lookupComponent(clientInstance);
+ server = cm_lookupComponent(serverInstance);
+ // Check invalid binding
+ if((error = cm_checkValidBinding(client, requiredItfClientName,
+ server, providedItfServerName,
+ &itfRequire, &itfProvide, &bindable)) != CM_OK)
+ goto out;
+
+ switch(eventMemType)
+ {
+ case CM_MM_MPC_TCM24_X:
+ dspEventMemType = INTERNAL_XRAM24;
+ break;
+ case CM_MM_MPC_ESRAM24:
+ dspEventMemType = ESRAM_EXT24;
+ break;
+ case CM_MM_MPC_SDRAM24:
+ dspEventMemType = SDRAM_EXT24;
+ break;
+ default:
+ error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+
+ // Check if we really need to bind
+ if(bindable)
+ {
+ // Create the binding and bind it to the client (or all sub-components clients ....)
+ if (itfRequire.client->Template->dspId != itfProvide.server->Template->dspId)
+ {
+ if ((error = cm_EEM_ForceWakeup(itfRequire.client->Template->dspId)) != CM_OK)
+ goto out;
+ if ((error = cm_EEM_ForceWakeup(itfProvide.server->Template->dspId)) != CM_OK)
+ {
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+ goto out;
+ }
+
+ // This is a distribute communication
+ error = cm_bindInterfaceDistributed(
+ &itfRequire,
+ &itfProvide,
+ fifosize,
+ dspEventMemType,
+ elfhandleSkeletonOrEvent,
+ elfhandleStub);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+ cm_EEM_AllowSleep(itfProvide.server->Template->dspId);
+ }
+ else
+ {
+ if ((error = cm_EEM_ForceWakeup(itfRequire.client->Template->dspId)) != CM_OK)
+ goto out;
+
+ // This is a acynchronous communication
+ error = cm_bindInterfaceAsynchronous(
+ &itfRequire,
+ &itfProvide,
+ fifosize,
+ dspEventMemType,
+ elfhandleSkeletonOrEvent);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+ }
+ }
+
+ cm_registerSingletonBinding(client, &itfRequire, &itfProvide, clientId);
+
+out:
+ cm_ELF_CloseFile(TRUE, elfhandleSkeletonOrEvent);
+ cm_ELF_CloseFile(TRUE, elfhandleStub);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_UnbindComponentAsynchronous(
+ const t_cm_instance_handle instance,
+ const char* requiredItfClientName,
+ t_nmf_client_id clientId) {
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_bf_info_ID bfInfoID;
+ t_cm_error error;
+ t_component_instance *client;
+
+ OSAL_LOCK_API();
+
+ client = cm_lookupComponent(instance);
+ // Sanity check
+ if((error = cm_checkValidUnbinding(client, requiredItfClientName,
+ &itfRequire, &itfProvide)) != CM_OK)
+ goto out;
+
+ bfInfoID = itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfoID;
+
+ // Check if we really need to unbind
+ if(cm_unregisterSingletonBinding(client, &itfRequire, &itfProvide, clientId))
+ {
+ // Check if this is a Asynchronous binding
+ if(bfInfoID == BF_DSP2DSP)
+ {
+ t_nmf_core_id clientDsp = itfRequire.client->Template->dspId;
+ t_nmf_core_id serverDsp = itfProvide.server->Template->dspId;
+
+ (void)cm_EEM_ForceWakeup(clientDsp);
+ (void)cm_EEM_ForceWakeup(serverDsp);
+
+ cm_unbindInterfaceDistributed(
+ &itfRequire,
+ (t_mpc2mpc_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+
+ cm_EEM_AllowSleep(clientDsp);
+ cm_EEM_AllowSleep(serverDsp);
+
+ error = CM_OK;
+ }
+ else if(bfInfoID == BF_ASYNCHRONOUS)
+ {
+ t_nmf_core_id clientDsp = itfRequire.client->Template->dspId;
+
+ (void)cm_EEM_ForceWakeup(clientDsp);
+
+ cm_unbindInterfaceAsynchronous(
+ &itfRequire,
+ (t_async_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo);
+
+ cm_EEM_AllowSleep(clientDsp);
+
+ error = CM_OK;
+ }
+ else
+ error = CM_ILLEGAL_UNBINDING;
+ }
+
+ out:
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_BindComponentFromCMCore(
+ const t_cm_instance_handle server,
+ const char* providedItfServerName,
+ t_uint32 fifosize,
+ t_cm_mpc_memory_type eventMemType,
+ t_cm_bf_host2mpc_handle *bfHost2mpcHdl,
+ t_nmf_client_id clientId,
+ const char *dataFileSkeleton) {
+ t_interface_provide_description itfProvide;
+ t_dsp_memory_type_id dspEventMemType;
+ t_cm_error error;
+ t_component_instance* component;
+ t_host2mpc_bf_info *bfInfo;
+ t_elfdescription *elfhandleSkeleton = NULL;
+
+ OSAL_LOCK_API();
+
+ /*
+ * Load Elf File
+ */
+ if(dataFileSkeleton != NULL &&
+ (error = cm_ELF_CheckFile(
+ dataFileSkeleton,
+ TRUE,
+ &elfhandleSkeleton)) != CM_OK)
+ goto out;
+
+ component = cm_lookupComponent(server);
+ // Check server validity
+ if((error = cm_checkValidServer(component, providedItfServerName,
+ &itfProvide)) != CM_OK)
+ goto out;
+
+ if ((error = cm_EEM_ForceWakeup(itfProvide.server->Template->dspId)) != CM_OK)
+ goto out;
+
+ switch(eventMemType)
+ {
+ case CM_MM_MPC_TCM24_X:
+ dspEventMemType = INTERNAL_XRAM24;
+ break;
+ case CM_MM_MPC_ESRAM24:
+ dspEventMemType = ESRAM_EXT24;
+ break;
+ case CM_MM_MPC_SDRAM24:
+ dspEventMemType = SDRAM_EXT24;
+ break;
+ default:
+ goto out;
+ }
+
+ error = cm_bindComponentFromCMCore(&itfProvide,
+ fifosize,
+ dspEventMemType,
+ elfhandleSkeleton,
+ &bfInfo);
+
+ cm_EEM_AllowSleep(itfProvide.server->Template->dspId);
+
+out:
+ cm_ELF_CloseFile(TRUE, elfhandleSkeleton);
+ OSAL_UNLOCK_API();
+
+ if (error == CM_OK) {
+ bfInfo->clientId = clientId;
+ OSAL_LOCK_COM();
+ *bfHost2mpcHdl = cm_addEntry(&Host2MpcBindingTable, bfInfo);
+ if (*bfHost2mpcHdl == 0)
+ error = CM_NO_MORE_MEMORY;
+ OSAL_UNLOCK_COM();
+
+ if (error != CM_OK) {
+ OSAL_LOCK_API();
+ (void)cm_EEM_ForceWakeup(itfProvide.server->Template->dspId);
+ cm_unbindComponentFromCMCore(bfInfo);
+ cm_EEM_AllowSleep(itfProvide.server->Template->dspId);
+ OSAL_UNLOCK_API();
+ }
+ }
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_UnbindComponentFromCMCore(
+ t_cm_bf_host2mpc_handle bfHost2mpcId) {
+ t_host2mpc_bf_info* bfInfo;
+ t_nmf_core_id coreId;
+
+ OSAL_LOCK_COM();
+ bfInfo = cm_lookupEntry(&Host2MpcBindingTable, bfHost2mpcId);
+ if (bfInfo)
+ cm_delEntry(&Host2MpcBindingTable, bfHost2mpcId & INDEX_MASK);
+ OSAL_UNLOCK_COM();
+ if (NULL == bfInfo)
+ return CM_INVALID_PARAMETER;
+
+ OSAL_LOCK_API();
+
+ // Check if this is a DSP to Host binding
+ //if(bfInfo->id != BF_HOST2DSP)
+ // return CM_ILLEGAL_UNBINDING;
+ coreId = bfInfo->dspskeleton.skelInstance->Template->dspId;
+
+ (void)cm_EEM_ForceWakeup(coreId);
+
+ cm_unbindComponentFromCMCore(bfInfo);
+
+ cm_EEM_AllowSleep(coreId);
+
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_BindComponentToCMCore(
+ const t_cm_instance_handle instance,
+ const char *requiredItfClientName,
+ t_uint32 fifosize,
+ t_nmf_mpc2host_handle upLayerThis,
+ const char *dataFileStub,
+ t_cm_bf_mpc2host_handle *mpc2hostId,
+ t_nmf_client_id clientId) {
+ t_interface_require_description itfRequire;
+ t_bool bindable;
+ t_cm_error error;
+ t_component_instance* client;
+ t_elfdescription *elfhandleStub = NULL;
+
+ OSAL_LOCK_API();
+
+ /*
+ * Load Elf File
+ */
+ if(dataFileStub != NULL &&
+ (error = cm_ELF_CheckFile(
+ dataFileStub,
+ TRUE,
+ &elfhandleStub)) != CM_OK)
+ goto out;
+
+ client = cm_lookupComponent(instance);
+ // Check invalid binding
+ if((error = cm_checkValidClient(client, requiredItfClientName,
+ &itfRequire, &bindable)) != CM_OK)
+ goto out;
+
+ // Check if we really need to bind
+ if(bindable)
+ {
+ if ((error = cm_EEM_ForceWakeup(itfRequire.client->Template->dspId)) != CM_OK)
+ goto out;
+
+ error = cm_bindComponentToCMCore(
+ &itfRequire,
+ fifosize,
+ upLayerThis,
+ elfhandleStub,
+ (t_mpc2host_bf_info**)mpc2hostId);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+ }
+
+ cm_registerSingletonBinding(client, &itfRequire, NULL, clientId);
+
+out:
+ cm_ELF_CloseFile(TRUE, elfhandleStub);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_UnbindComponentToCMCore(
+ const t_cm_instance_handle instance,
+ const char *requiredItfClientName,
+ t_nmf_mpc2host_handle *upLayerThis,
+ t_nmf_client_id clientId) {
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_cm_error error;
+ t_mpc2host_bf_info *bfInfo;
+ t_component_instance* client;
+
+ OSAL_LOCK_API();
+
+ client = cm_lookupComponent(instance);
+ // Sanity check
+ if((error = cm_checkValidUnbinding(client, requiredItfClientName,
+ &itfRequire, &itfProvide)) != CM_OK)
+ goto out;
+
+ // Check if this is a DSP to Host binding
+ if(itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfoID != BF_DSP2HOST)
+ {
+ error = CM_ILLEGAL_UNBINDING;
+ goto out;
+ }
+
+ bfInfo = (t_mpc2host_bf_info*)itfRequire.client->interfaceReferences[itfRequire.requireIndex][itfRequire.collectionIndex].bfInfo;
+
+ // Get client information
+ *upLayerThis = bfInfo->context;
+
+ // Check if we really need to unbind
+ if(cm_unregisterSingletonBinding(client, &itfRequire, &itfProvide, clientId))
+ {
+ (void)cm_EEM_ForceWakeup(itfRequire.client->Template->dspId);
+
+ cm_unbindComponentToCMCore(&itfRequire, bfInfo);
+
+ cm_EEM_AllowSleep(itfRequire.client->Template->dspId);
+
+ error = CM_OK;
+ }
+out:
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_event_params_handle CM_ENGINE_AllocEvent(t_cm_bf_host2mpc_handle host2mpcId) {
+ t_host2mpc_bf_info* bfInfo;
+ t_event_params_handle eventHandle;
+
+ OSAL_LOCK_COM();
+ bfInfo = cm_lookupEntry(&Host2MpcBindingTable, host2mpcId);
+ if (NULL == bfInfo) {
+ OSAL_UNLOCK_COM();
+ return NULL;
+ }
+
+ if(bfInfo->dspskeleton.skelInstance->interfaceReferences[0][0].instance->state != STATE_RUNNABLE) {
+ ERROR("CM_COMPONENT_NOT_STARTED: Call interface before start component %s<%s>\n",
+ bfInfo->dspskeleton.skelInstance->pathname,
+ bfInfo->dspskeleton.skelInstance->Template->name, 0, 0, 0, 0);
+ }
+
+ eventHandle = cm_AllocEvent(bfInfo->fifo);
+
+ OSAL_UNLOCK_COM();
+
+ return eventHandle;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_PushEvent(t_cm_bf_host2mpc_handle host2mpcId, t_event_params_handle h, t_uint32 methodIndex) {
+ t_host2mpc_bf_info* bfInfo;
+ t_cm_error error;
+
+ OSAL_LOCK_COM();
+ bfInfo = cm_lookupEntry(&Host2MpcBindingTable, host2mpcId);
+ if (NULL == bfInfo) {
+ OSAL_UNLOCK_COM();
+ return CM_INVALID_PARAMETER;
+ }
+ error = cm_PushEvent(bfInfo->fifo, h, methodIndex);
+ OSAL_UNLOCK_COM();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED void CM_ENGINE_AcknowledgeEvent(t_cm_bf_mpc2host_handle mpc2hostId) {
+ t_mpc2host_bf_info* bfInfo = (t_mpc2host_bf_info*)mpc2hostId;
+
+ //t_dsp2host_bf_info* bfInfo = (t_host2mpc_bf_info*)mpc2hostId;
+ OSAL_LOCK_COM();
+ cm_AcknowledgeEvent(bfInfo->fifo);
+ OSAL_UNLOCK_COM();
+}
+
+/*
+ * Get a reference on a given attribute of a given component
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_ReadComponentAttribute(
+ const t_cm_instance_handle instance,
+ const char* attrName,
+ t_uint24 *attrValue)
+{
+ t_cm_error error;
+ t_component_instance* component;
+
+ OSAL_LOCK_API();
+
+ component = cm_lookupComponent(instance);
+ if (NULL == component)
+ error = CM_INVALID_COMPONENT_HANDLE;
+ else
+ {
+ if ((error = cm_EEM_ForceWakeup(component->Template->dspId)) != CM_OK)
+ goto out;
+
+ // t_uint24 -> t_uint32 possible since we know it same size
+ error = cm_readAttribute(component, attrName, (t_uint32*)attrValue);
+
+ cm_EEM_AllowSleep(component->Template->dspId);
+ }
+
+out:
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+/*===============================================================================
+ * Introspection API
+ *===============================================================================*/
+/*
+ * Component
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentListHeader(
+ const t_nmf_client_id client,
+ t_cm_instance_handle *headerComponent) {
+ t_uint32 i;
+
+ OSAL_LOCK_API();
+
+ *headerComponent = 0;
+ for (i=0; i < ComponentTable.idxMax; i++) {
+ if ((componentEntry(i) != NULL) &&
+ (componentEntry(i)->Template->classe != FIRMWARE) &&
+ (domainDesc[componentEntry(i)->domainId].client == client)) {
+ *headerComponent = ENTRY2HANDLE(componentEntry(i), i);;
+ break;
+ }
+ }
+
+ OSAL_UNLOCK_API();
+
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentListNext(
+ const t_nmf_client_id client,
+ const t_cm_instance_handle prevComponent,
+ t_cm_instance_handle *nextComponent){
+ t_cm_error error;
+ t_uint32 i = prevComponent & INDEX_MASK;
+
+ OSAL_LOCK_API();
+
+ // Sanity check
+ if ((i >= ComponentTable.idxMax)
+ || (((unsigned int)componentEntry(i) << INDEX_SHIFT) != (prevComponent & ~INDEX_MASK)))
+ error = CM_INVALID_COMPONENT_HANDLE;
+ else {
+ *nextComponent = 0;
+ for (i++; i < ComponentTable.idxMax; i++) {
+ if ((componentEntry(i) != NULL) &&
+ (componentEntry(i)->Template->classe != FIRMWARE) &&
+ (domainDesc[componentEntry(i)->domainId].client == client)) {
+ *nextComponent = ENTRY2HANDLE(componentEntry(i), i);;
+ break;
+ }
+ }
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentDescription(
+ const t_cm_instance_handle instance,
+ char *templateName,
+ t_uint32 templateNameLength,
+ t_nmf_core_id *coreId,
+ char *localName,
+ t_uint32 localNameLength,
+ t_nmf_ee_priority *priority) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else {
+ cm_StringCopy(
+ templateName,
+ comp->Template->name,
+ templateNameLength);
+ *coreId = comp->Template->dspId;
+ cm_StringCopy(
+ localName,
+ comp->pathname,
+ localNameLength);
+ if (priority)
+ *priority = comp->priority;
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+/*
+ * Require interface
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentRequiredInterfaceNumber(
+ const t_cm_instance_handle instance,
+ t_uint8 *numberRequiredInterfaces) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else {
+ *numberRequiredInterfaces = comp->Template->requireNumber;
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentRequiredInterface(
+ const t_cm_instance_handle instance,
+ const t_uint8 index,
+ char *itfName,
+ t_uint32 itfNameLength,
+ char *itfType,
+ t_uint32 itfTypeLength,
+ t_cm_require_state *requireState,
+ t_sint16 *collectionSize) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else if(index >= comp->Template->requireNumber) {
+ error = CM_NO_SUCH_REQUIRED_INTERFACE;
+ } else {
+ cm_StringCopy(
+ itfName,
+ comp->Template->requires[index].name,
+ itfNameLength);
+ cm_StringCopy(
+ itfType,
+ comp->Template->requires[index].interface->type,
+ itfTypeLength);
+ if(comp->Template->requires[index].requireTypes & COLLECTION_REQUIRE)
+ *collectionSize = comp->Template->requires[index].collectionSize;
+ else
+ *collectionSize = -1;
+
+ if(requireState != NULL) {
+ *requireState = 0;
+ if(comp->Template->requires[index].requireTypes & COLLECTION_REQUIRE)
+ *requireState |= CM_REQUIRE_COLLECTION;
+ if(comp->Template->requires[index].requireTypes & OPTIONAL_REQUIRE)
+ *requireState |= CM_REQUIRE_OPTIONAL;
+ if(comp->Template->requires[index].requireTypes & STATIC_REQUIRE)
+ *requireState |= CM_REQUIRE_STATIC;
+ }
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentRequiredInterfaceBinding(
+ const t_cm_instance_handle instance,
+ const char *itfName,
+ t_cm_instance_handle *server,
+ char *serverItfName,
+ t_uint32 serverItfNameLength) {
+ t_component_instance *comp;
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if(NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else if ((error = cm_getRequiredInterface(comp, itfName, &itfRequire)) != CM_OK) {
+ // Check if the requiredItfClientName is required by client component
+ } else if ((error = cm_lookupInterface(&itfRequire, &itfProvide)) != CM_OK) {
+ // Check if the requiredItfClientName is required by client component
+ } else {
+ if ((t_cm_instance_handle)itfProvide.server == NMF_HOST_COMPONENT
+ || (t_cm_instance_handle)itfProvide.server == NMF_VOID_COMPONENT)
+ *server = (t_cm_instance_handle)itfProvide.server;
+ else
+ *server = itfProvide.server->instance;
+ if(*server == NMF_HOST_COMPONENT) {
+ cm_StringCopy(
+ serverItfName,
+ "unknown",
+ serverItfNameLength);
+ } else if(*server == NMF_VOID_COMPONENT) {
+ cm_StringCopy(
+ serverItfName,
+ "void",
+ serverItfNameLength);
+ } else if(*server != 0) {
+ cm_StringCopy(
+ serverItfName,
+ itfProvide.server->Template->provides[itfProvide.provideIndex].name,
+ serverItfNameLength);
+ if(itfProvide.server->Template->provides[itfProvide.provideIndex].provideTypes & COLLECTION_PROVIDE) {
+ int len = cm_StringLength(serverItfName, serverItfNameLength);
+ serverItfName[len++] = '[';
+ if(itfProvide.collectionIndex >= 100)
+ serverItfName[len++] = '0' + (itfProvide.collectionIndex / 100);
+ if(itfProvide.collectionIndex >= 10)
+ serverItfName[len++] = '0' + ((itfProvide.collectionIndex % 100) / 10);
+ serverItfName[len++] = '0' + (itfProvide.collectionIndex % 10);
+ serverItfName[len++] = ']';
+ serverItfName[len] = 0;
+ }
+ }
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+/*
+ * Provide interface
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentProvidedInterfaceNumber(
+ const t_cm_instance_handle instance,
+ t_uint8 *numberProvidedInterfaces) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else {
+ *numberProvidedInterfaces = comp->Template->provideNumber;
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentProvidedInterface(
+ const t_cm_instance_handle instance,
+ const t_uint8 index,
+ char *itfName,
+ t_uint32 itfNameLength,
+ char *itfType,
+ t_uint32 itfTypeLength,
+ t_sint16 *collectionSize) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else if(index >= comp->Template->provideNumber) {
+ error = CM_NO_SUCH_PROVIDED_INTERFACE;
+ } else {
+ cm_StringCopy(
+ itfName,
+ comp->Template->provides[index].name,
+ itfNameLength);
+ cm_StringCopy(
+ itfType,
+ comp->Template->provides[index].interface->type,
+ itfTypeLength);
+ if(comp->Template->provides[index].provideTypes & COLLECTION_PROVIDE)
+ *collectionSize = comp->Template->provides[index].collectionSize;
+ else
+ *collectionSize = -1;
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+/*
+ * Component Property
+ */
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentPropertyNumber(
+ const t_cm_instance_handle instance,
+ t_uint8 *numberProperties) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else {
+ *numberProperties = comp->Template->propertyNumber;
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentPropertyName(
+ const t_cm_instance_handle instance,
+ const t_uint8 index,
+ char *propertyName,
+ t_uint32 propertyNameLength) {
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ // Sanity check
+ if (NULL == comp) {
+ error = CM_INVALID_COMPONENT_HANDLE;
+ } else if(index >= comp->Template->propertyNumber) {
+ error = CM_NO_SUCH_PROPERTY;
+ } else {
+ cm_StringCopy(
+ propertyName,
+ comp->Template->properties[index].name,
+ propertyNameLength);
+
+ error = CM_OK;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetComponentPropertyValue(
+ const t_cm_instance_handle instance,
+ const char *propertyName,
+ char *propertyValue,
+ t_uint32 propertyValueLength)
+{
+ t_component_instance *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ comp = cm_lookupComponent(instance);
+ if (NULL == comp)
+ error = CM_INVALID_COMPONENT_HANDLE;
+ else
+ {
+ error = cm_getComponentProperty(
+ comp,
+ propertyName,
+ propertyValue,
+ propertyValueLength);
+
+ if(error == CM_NO_SUCH_PROPERTY)
+ ERROR("CM_NO_SUCH_PROPERTY(%s, %s)\n", comp->pathname, propertyName, 0, 0, 0, 0);
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/dspevent.c b/drivers/staging/nmf-cm/cm/engine/component/src/dspevent.c
new file mode 100644
index 00000000000..0d5e89e0515
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/dspevent.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/inc/cm_type.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/communication/inc/communication.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h>
+#include <cm/engine/trace/inc/trace.h>
+#include "../inc/dspevent.h"
+
+
+#define DSP_REMOTE_EVENT_SIZE_IN_BYTE (4*DSP_REMOTE_EVENT_SIZE_IN_DSPWORD)
+#define DSP_REMOTE_EVENT_NEXT_FIELD_OFFSET 0
+#define DSP_REMOTE_EVENT_REACTION_FIELD_OFFSET 1
+#define DSP_REMOTE_EVENT_THIS_FIELD_OFFSET 2
+#define DSP_REMOTE_EVENT_PRIORITY_FIELD_OFFSET 3
+#define DSP_REMOTE_EVENT_DATA_FIELD_OFFSET 4
+
+t_cm_error dspevent_createDspEventFifo(
+ const t_component_instance *pComp,
+ const char* nameOfTOP,
+ t_uint32 fifoNbElem,
+ t_uint32 fifoElemSizeInWord,
+ t_dsp_memory_type_id dspEventMemType,
+ t_memory_handle *pHandle)
+{
+ t_uint32 dspElementAddr;
+ t_uint32 *elemAddr32;
+ int i;
+
+ // Allocate fifo
+ *pHandle = cm_DM_Alloc(pComp->domainId, dspEventMemType, fifoNbElem*fifoElemSizeInWord, CM_MM_ALIGN_2WORDS, TRUE);
+ if(*pHandle == INVALID_MEMORY_HANDLE) {
+ ERROR("CM_NO_MORE_MEMORY: dspevent_createDspEventFifo()\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ cm_DSP_GetDspAddress(*pHandle, &dspElementAddr);
+
+ elemAddr32 = (t_uint32*)cm_DSP_GetHostLogicalAddress(*pHandle);
+
+ LOG_INTERNAL(2, "\n##### FIFO (dsp event): ARM=0x%x DSP=0x%x\n", elemAddr32, dspElementAddr, 0, 0, 0, 0);
+
+ // Read attribute addr (we assume that variable in XRAM)
+ cm_writeAttribute(pComp, nameOfTOP, dspElementAddr);
+
+ // Initialise the linked list (next...)
+ for (i = 0; i < fifoNbElem - 1; i++)
+ {
+ dspElementAddr += fifoElemSizeInWord;
+
+ /* Write next field */
+ *elemAddr32 = dspElementAddr;
+ /* Write THIS field & priority field */
+ *(volatile t_uint64*)&elemAddr32[DSP_REMOTE_EVENT_THIS_FIELD_OFFSET] =
+ ((t_uint64)pComp->thisAddress | (((t_uint64)pComp->priority) << 32));
+
+ elemAddr32 += fifoElemSizeInWord;
+ }
+
+ /* Last element: Write next field */
+ *elemAddr32 = 0x0 /* NULL */;
+ /* Last element: Write THIS field & priority field */
+ *(volatile t_uint64*)&elemAddr32[DSP_REMOTE_EVENT_THIS_FIELD_OFFSET] =
+ ((t_uint64)pComp->thisAddress | (((t_uint64)pComp->priority) << 32));
+
+ return CM_OK;
+}
+
+
+
+void dspevent_destroyDspEventFifo(t_memory_handle handle)
+{
+ (void)cm_DM_Free(handle, TRUE);
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/initializer.c b/drivers/staging/nmf-cm/cm/engine/component/src/initializer.c
new file mode 100644
index 00000000000..7f99b710401
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/initializer.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/inc/cm_type.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/communication/inc/communication.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h>
+
+#include <cm/engine/power_mgt/inc/power.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+#include <cm/engine/trace/inc/trace.h>
+
+#include "../inc/dspevent.h"
+#include "../inc/initializer.h"
+
+// Since now due to semaphore use call is synchrone so we only need a fifo size of three
+// (due to updateStack + (InstructionCacheLock or InstructionCacheUnlock))
+#define DEFAULT_INITIALIZER_FIFO_SIZE 3
+
+/* private prototype */
+PRIVATE t_cm_error cm_COMP_generic(t_nmf_core_id coreId, t_event_params_handle paramArray, t_uint32 paramNumber, t_uint32 serviceIndex);
+PRIVATE void cm_COMP_generatePanic(t_nmf_core_id coreId);
+
+/*
+ * This module is tightly coupled with cm_DSP_components one (communication/initializer)
+ */
+static struct {
+ t_nmf_fifo_arm_desc* downlinkFifo;
+ t_nmf_fifo_arm_desc* uplinkFifo;
+ t_memory_handle dspfifoHandle;
+ t_nmf_osal_sem_handle fifoSemHandle;
+ t_uint32 servicePending; // TODO : Use sem counter instead of defining such variable (need to create new OSAL)
+} initializerDesc[NB_CORE_IDS];
+
+PUBLIC t_cm_error cm_COMP_INIT_Init(t_nmf_core_id coreId)
+{
+ t_uint32 i;
+ t_cm_error error;
+ t_component_instance *ee;
+ t_dsp_offset sharedVarOffset;
+ t_interface_provide_description itfProvide;
+ t_interface_provide* provide;
+ t_interface_provide_loaded* provideLoaded;
+
+ ee = cm_EEM_getExecutiveEngine(coreId)->instance;
+
+ // Get interface description
+ if((error = cm_getProvidedInterface(ee, "service", &itfProvide)) != CM_OK)
+ return error;
+ provide = &ee->Template->provides[itfProvide.provideIndex];
+ provideLoaded = &ee->Template->providesLoaded[itfProvide.provideIndex];
+
+
+ if ((error = dspevent_createDspEventFifo(
+ ee, "comms/TOP",
+ DEFAULT_INITIALIZER_FIFO_SIZE,
+ DSP_REMOTE_EVENT_SIZE_IN_DSPWORD,
+ INTERNAL_XRAM24,
+ &initializerDesc[coreId].dspfifoHandle)) != CM_OK)
+ return error;
+
+ /* create fifo semaphore */
+ initializerDesc[coreId].servicePending = 0;
+ initializerDesc[coreId].fifoSemHandle = OSAL_CreateSemaphore(DEFAULT_INITIALIZER_FIFO_SIZE);
+ if (initializerDesc[coreId].fifoSemHandle == 0) {
+ dspevent_destroyDspEventFifo(initializerDesc[coreId].dspfifoHandle);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ /* static armTHis initialisation */
+ /*
+ * In the two next fifo_alloc call (1+n) means that we want to manage the hostThis_or_TOP and one method for each params fifos */
+ initializerDesc[coreId].downlinkFifo =
+ fifo_alloc(ARM_CORE_ID, coreId,
+ INIT_COMPONENT_CMD_SIZE, DEFAULT_INITIALIZER_FIFO_SIZE,
+ (1+provide->interface->methodNumber), paramsLocation, extendedFieldLocation, cm_DSP_GetState(coreId)->domainEE);
+ if (initializerDesc[coreId].downlinkFifo == NULL)
+ {
+ OSAL_DestroySemaphore(initializerDesc[coreId].fifoSemHandle);
+ dspevent_destroyDspEventFifo(initializerDesc[coreId].dspfifoHandle);
+ ERROR("CM_NO_MORE_MEMORY: fifo_alloc() failed in cm_COMP_INIT_Init()\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ initializerDesc[coreId].uplinkFifo =
+ fifo_alloc(coreId, ARM_CORE_ID,
+ INIT_COMPONENT_ACK_SIZE, DEFAULT_INITIALIZER_FIFO_SIZE,
+ (1), paramsLocation, extendedFieldLocation, cm_DSP_GetState(coreId)->domainEE); /* 1 is mandatory to compute internally the indexMask */
+ /* this statement is acceptable only written by skilled man ;) */
+ /* We don't used bcDescRef, since we assume that we don't need params size */
+ if (initializerDesc[coreId].uplinkFifo == NULL)
+ {
+ OSAL_DestroySemaphore(initializerDesc[coreId].fifoSemHandle);
+ fifo_free(initializerDesc[coreId].downlinkFifo);
+ dspevent_destroyDspEventFifo(initializerDesc[coreId].dspfifoHandle);
+ ERROR("CM_NO_MORE_MEMORY: fifo_alloc() failed in cm_COMP_INIT_Init()\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ cm_writeAttribute(ee, "comms/FIFOcmd", initializerDesc[coreId].downlinkFifo->dspAdress);
+
+ cm_writeAttribute(ee, "comms/FIFOack", initializerDesc[coreId].uplinkFifo->dspAdress);
+
+ sharedVarOffset = cm_getAttributeMpcAddress(ee, "comms/TOP");
+
+ /* HOST->DSP ParamsFifo extended fields initialisation */
+ fifo_params_setSharedField(
+ initializerDesc[coreId].downlinkFifo,
+ 0,
+ (t_shared_field)sharedVarOffset /* TOP DSP Address */
+ );
+ for(i=0; i<provide->interface->methodNumber; i++)
+ {
+ fifo_params_setSharedField(
+ initializerDesc[coreId].downlinkFifo,
+ i + 1,
+ provideLoaded->indexesLoaded[itfProvide.collectionIndex][i].methodAddresses);
+ }
+
+ /* DSP->HOST ParamsFifo extended fields initialisation */
+ fifo_params_setSharedField(
+ initializerDesc[coreId].uplinkFifo,
+ 0,
+ (t_shared_field)NMF_INTERNAL_USERTHIS
+ );
+
+ return CM_OK;
+}
+
+
+PUBLIC t_cm_error cm_COMP_CallService(
+ int serviceIndex,
+ t_component_instance *pComp,
+ t_uint32 methodAddress) {
+ t_cm_error error;
+ t_uint16 params[INIT_COMPONENT_CMD_SIZE];
+ t_bool isSynchronous = (serviceIndex == NMF_CONSTRUCT_SYNC_INDEX ||
+ serviceIndex == NMF_START_SYNC_INDEX ||
+ serviceIndex == NMF_STOP_SYNC_INDEX ||
+ serviceIndex == NMF_DESTROY_INDEX)?TRUE:FALSE;
+
+ params[INIT_COMPONENT_CMD_HANDLE_INDEX] = (t_uint16)((unsigned int)pComp & 0xFFFF);
+ params[INIT_COMPONENT_CMD_HANDLE_INDEX+1] = (t_uint16)((unsigned int)pComp >> 16);
+ params[INIT_COMPONENT_CMD_THIS_INDEX] = (t_uint16)(pComp->thisAddress & 0xFFFF);
+ params[INIT_COMPONENT_CMD_THIS_INDEX+1] = (t_uint16)(pComp->thisAddress >> 16);
+ params[INIT_COMPONENT_CMD_METHOD_INDEX] = (t_uint16)(methodAddress & 0xFFFF);
+ params[INIT_COMPONENT_CMD_METHOD_INDEX+1] = (t_uint16)(methodAddress >> 16);
+
+ error = cm_COMP_generic(pComp->Template->dspId, params, sizeof(params) / sizeof(t_uint16), serviceIndex);
+
+ if (isSynchronous == TRUE && error == CM_OK) {
+ if (OSAL_SEMAPHORE_WAIT_TIMEOUT(semHandle) != SYNC_OK) {
+ cm_COMP_generatePanic(pComp->Template->dspId);
+ error = CM_MPC_NOT_RESPONDING;
+ }
+ }
+
+ return error;
+}
+
+PUBLIC void cm_COMP_Flush(t_nmf_core_id coreId) {
+
+ if(initializerDesc[coreId].servicePending > 0)
+ {
+ t_uint16 params[INIT_COMPONENT_CMD_SIZE];
+ t_uint32 methodAddress = cm_EEM_getExecutiveEngine(coreId)->voidAddr;
+
+ // If service still pending on MMDSP side, send a flush command (today, we reuse Destroy to not create new empty service)
+ // When we receive the result, this mean that we have flushed all previous request.
+
+ params[INIT_COMPONENT_CMD_HANDLE_INDEX] = (t_uint16)(0x0 & 0xFFFF);
+ params[INIT_COMPONENT_CMD_HANDLE_INDEX+1] = (t_uint16)(0x0 >> 16);
+ params[INIT_COMPONENT_CMD_THIS_INDEX] = (t_uint16)(0x0 & 0xFFFF);
+ params[INIT_COMPONENT_CMD_THIS_INDEX+1] = (t_uint16)(0x0 >> 16);
+ params[INIT_COMPONENT_CMD_METHOD_INDEX] = (t_uint16)(methodAddress & 0xFFFF);
+ params[INIT_COMPONENT_CMD_METHOD_INDEX+1] = (t_uint16)(methodAddress >> 16);
+
+ if (cm_COMP_generic(coreId, params, sizeof(params) / sizeof(t_uint16), NMF_DESTROY_INDEX) != CM_OK ||
+ OSAL_SEMAPHORE_WAIT_TIMEOUT(semHandle) != SYNC_OK)
+ {
+ cm_COMP_generatePanic(coreId);
+ ERROR("CM_MPC_NOT_RESPONDING: can't call flush service\n", 0, 0, 0, 0, 0, 0);
+ }
+ }
+}
+
+PUBLIC void cm_COMP_INIT_Close(t_nmf_core_id coreId)
+{
+ unsigned int i;
+
+ /* wait for semaphore to be sure it would not be touch later on */
+ /* in case of timeout we break and try to clean everythink */
+ for(i = 0; i < DEFAULT_INITIALIZER_FIFO_SIZE; i++) {
+ if (OSAL_SEMAPHORE_WAIT_TIMEOUT(initializerDesc[coreId].fifoSemHandle) != SYNC_OK)
+ break;
+ }
+
+ /* destroy semaphore */
+ OSAL_DestroySemaphore(initializerDesc[coreId].fifoSemHandle);
+
+ /* Unallocate initializerDesc[index].uplinkFifo */
+ /* (who is used in this particular case to store dummy (with no data space (only descriptor)) DSP->HOST params fifo */
+ fifo_free(initializerDesc[coreId].uplinkFifo);
+
+ /* Unallocate initializerDesc[index].downlinkFifo */
+ fifo_free(initializerDesc[coreId].downlinkFifo);
+
+ /* Unallocate initializerDesc[index].dspfifoHandle */
+ dspevent_destroyDspEventFifo(initializerDesc[coreId].dspfifoHandle);
+}
+
+PUBLIC void processAsyncAcknowledge(t_nmf_core_id coreId, t_event_params_handle pParam)
+{
+ cm_AcknowledgeEvent(initializerDesc[coreId].uplinkFifo);
+
+ initializerDesc[coreId].servicePending--;
+ OSAL_SemaphorePost(initializerDesc[coreId].fifoSemHandle,1);
+}
+
+PUBLIC void processSyncAcknowledge(t_nmf_core_id coreId, t_event_params_handle pParam)
+{
+ cm_AcknowledgeEvent(initializerDesc[coreId].uplinkFifo);
+
+ initializerDesc[coreId].servicePending--;
+ OSAL_SemaphorePost(initializerDesc[coreId].fifoSemHandle,1);
+ OSAL_SemaphorePost(semHandle,1);
+}
+
+PUBLIC t_cm_error cm_COMP_UpdateStack(
+ t_nmf_core_id coreId,
+ t_uint32 stackSize
+)
+{
+ t_uint16 params[2];
+
+ // Marshall parameter
+ params[0] = (t_uint16)((unsigned int)stackSize & 0xFFFF);
+ params[1] = (t_uint16)((unsigned int)stackSize >> 16);
+
+ return cm_COMP_generic(coreId, params, sizeof(params) / sizeof(t_uint16), NMF_UPDATE_STACK);
+}
+
+PUBLIC t_cm_error cm_COMP_ULPForceWakeup(
+ t_nmf_core_id coreId
+)
+{
+ t_cm_error error;
+
+ error = cm_COMP_generic(coreId, NULL, 0, NMF_ULP_FORCEWAKEUP);
+
+ if (error == CM_OK) {
+ if (OSAL_SEMAPHORE_WAIT_TIMEOUT(semHandle) != SYNC_OK) {
+ cm_COMP_generatePanic(coreId);
+ error = CM_MPC_NOT_RESPONDING;
+ }
+ }
+
+ return error;
+}
+
+PUBLIC t_cm_error cm_COMP_ULPAllowSleep(
+ t_nmf_core_id coreId
+)
+{
+ return cm_COMP_generic(coreId, NULL, 0, NMF_ULP_ALLOWSLEEP);
+}
+
+PUBLIC t_cm_error cm_COMP_InstructionCacheLock(
+ t_nmf_core_id coreId,
+ t_uint32 mmdspAddr,
+ t_uint32 mmdspSize
+)
+{
+ t_uint16 params[4];
+ t_uint32 startAddr = cm_DSP_GetState(coreId)->locked_offset;
+ int way;
+
+ for(way = 1; startAddr < mmdspAddr + mmdspSize; startAddr += MMDSP_CODE_CACHE_WAY_SIZE, way++)
+ {
+ if(mmdspAddr < startAddr + MMDSP_CODE_CACHE_WAY_SIZE)
+ {
+ t_cm_error error;
+
+ // Marshall parameter
+ params[0] = (t_uint16)((unsigned int)startAddr & 0xFFFF);
+ params[1] = (t_uint16)((unsigned int)startAddr >> 16);
+ params[2] = (t_uint16)((unsigned int)way & 0xFFFF);
+ params[3] = (t_uint16)((unsigned int)way >> 16);
+
+ if((error = cm_COMP_generic(coreId, params, sizeof(params) / sizeof(t_uint16), NMF_LOCK_CACHE)) != CM_OK)
+ return error;
+ }
+ }
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_COMP_InstructionCacheUnlock(
+ t_nmf_core_id coreId,
+ t_uint32 mmdspAddr,
+ t_uint32 mmdspSize
+)
+{
+ t_uint16 params[2];
+ t_uint32 startAddr = cm_DSP_GetState(coreId)->locked_offset;
+ int way;
+
+ for(way = 1; startAddr < mmdspAddr + mmdspSize; startAddr += MMDSP_CODE_CACHE_WAY_SIZE, way++)
+ {
+ if(mmdspAddr < startAddr + MMDSP_CODE_CACHE_WAY_SIZE)
+ {
+ t_cm_error error;
+
+ // Marshall parameter
+ params[0] = (t_uint16)((unsigned int)way & 0xFFFF);
+ params[1] = (t_uint16)((unsigned int)way >> 16);
+
+ if((error = cm_COMP_generic(coreId, params, sizeof(params) / sizeof(t_uint16), NMF_UNLOCK_CACHE)) != CM_OK)
+ return error;
+ }
+ }
+
+ return CM_OK;
+}
+
+/* private method */
+PRIVATE t_cm_error cm_COMP_generic(
+ t_nmf_core_id coreId,
+ t_event_params_handle paramArray,
+ t_uint32 paramNumber,
+ t_uint32 serviceIndex
+)
+{
+ t_event_params_handle _xyuv_data;
+ t_cm_error error;
+ t_uint32 i;
+
+ // wait for an event in fifo
+ if (OSAL_SEMAPHORE_WAIT_TIMEOUT(initializerDesc[coreId].fifoSemHandle) != SYNC_OK) {
+ cm_COMP_generatePanic(coreId);
+ return CM_MPC_NOT_RESPONDING;
+ }
+
+
+ // AllocEvent
+ if((_xyuv_data = cm_AllocEvent(initializerDesc[coreId].downlinkFifo)) == NULL)
+ {
+ ERROR("CM_INTERNAL_FIFO_OVERFLOW: service FIFO full\n", 0, 0, 0, 0, 0, 0);
+ error = CM_INTERNAL_FIFO_OVERFLOW;
+ goto unlock;
+ }
+
+ // Copy param
+ for(i=0;i<paramNumber;i++)
+ _xyuv_data[i] = paramArray[i];
+
+ OSAL_LOCK_COM();
+
+ // Send Command
+ error = cm_PushEventTrace(initializerDesc[coreId].downlinkFifo, _xyuv_data, serviceIndex,0);
+ if(error == CM_OK)
+ initializerDesc[coreId].servicePending++;
+
+unlock:
+ OSAL_UNLOCK_COM();
+
+ return error;
+}
+
+PRIVATE void cm_COMP_generatePanic(t_nmf_core_id coreId)
+{
+ const t_dsp_desc* pDspDesc = cm_DSP_GetState(coreId);
+
+ if (pDspDesc->state != MPC_STATE_PANIC) {
+ cm_DSP_SetStatePanic(coreId);
+ OSAL_GeneratePanic(coreId, 0);
+ }
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/instantiater.c b/drivers/staging/nmf-cm/cm/engine/component/src/instantiater.c
new file mode 100644
index 00000000000..92c28b63171
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/instantiater.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/component/inc/bind.h>
+#include <cm/engine/component/inc/initializer.h>
+
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/configuration/inc/configuration_status.h>
+
+#include <cm/engine/dsp/inc/dsp.h>
+
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/trace/inc/xtitrace.h>
+
+#include <cm/engine/memory/inc/domain.h>
+
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/utils/inc/mem.h>
+#include <cm/engine/utils/inc/convert.h>
+
+#include <cm/engine/power_mgt/inc/power.h>
+
+
+t_nmf_table ComponentTable; /**< list (table) of components */
+
+static t_uint32 cm_getMaxStackValue(t_component_instance *pComponent);
+static t_uint16 getNumberOfInstance(t_component_instance* component);
+static t_uint16 getNumberOfStart(t_component_instance* component);
+
+
+t_cm_error cm_COMP_Init(void) {
+ t_cm_error error;
+ error = cm_initTable(&ComponentTable);
+ if (error == CM_OK)
+ error = cm_initTable(&Host2MpcBindingTable);
+ return error;
+}
+
+void cm_COMP_Destroy(void) {
+ cm_destroyTable(&ComponentTable);
+ cm_destroyTable(&Host2MpcBindingTable);
+}
+
+/** cm_addComponent - Add an internal handler to the list
+ *
+ * 1. Increase the size of the list if it's full
+ * 2. Search an empty entry
+ * 3. Add the element to the list
+ * 4. Compute and return the "user handle" (= t_cm_instance_handle)
+ */
+static t_cm_instance_handle cm_addComponent(t_component_instance *comp)
+{
+ OSAL_DisableServiceMessages();
+ comp->instance = cm_addEntry(&ComponentTable, comp);
+ OSAL_EnableServiceMessages();
+
+ return comp->instance;
+}
+
+/** cm_delComponent - remove the given component from the list
+ *
+ * 1. Check if the handle is valid
+ * 2. Search the entry and free it
+ */
+static void cm_delComponent(t_component_instance *comp)
+{
+ if (comp == NULL)
+ return;
+
+ OSAL_DisableServiceMessages();
+ cm_delEntry(&ComponentTable, comp->instance & INDEX_MASK);
+ OSAL_EnableServiceMessages();
+}
+
+/** cm_lookupComponent - search the component corresponding to
+ * the component instance.
+ *
+ * 1. Check if the instance is valid
+ * 2. Return a pointer to the component
+ */
+t_component_instance *cm_lookupComponent(const t_cm_instance_handle hdl)
+{
+ return cm_lookupEntry(&ComponentTable, hdl);
+}
+
+static void cm_DestroyComponentMemory(t_component_instance *component)
+{
+ int i;
+
+ /*
+ * Remove instance from list
+ */
+ cm_delComponent(component);
+
+ /*
+ * Destroy instance
+ */
+ {
+ struct t_client_of_singleton* cur = component->clientOfSingleton;
+
+ for( ; cur != NULL ; )
+ {
+ struct t_client_of_singleton* tmp = cur;
+ cur = cur->next;
+
+ OSAL_Free(tmp);
+ }
+ }
+
+ for(i = 0; i < component->Template->requireNumber; i++)
+ {
+ OSAL_Free(component->interfaceReferences[i]);
+ }
+
+ cm_StringRelease(component->pathname);
+
+ cm_ELF_FreeInstance(component->Template->dspId, component->Template->memories, component->memories);
+
+ cm_unloadComponent(component->Template);
+ OSAL_Free(component);
+}
+
+/**
+ * Non-Require:
+ * - MMDSP could be sleep (Since we access it only through HSEM)
+ */
+void cm_delayedDestroyComponent(t_component_instance *component) {
+ int i;
+
+ if (osal_debug_ops.component_destroy)
+ osal_debug_ops.component_destroy(component);
+
+ /*
+ * Remove component from load map here
+ */
+ cm_DSPABI_RemoveLoadMap(
+ component->domainId,
+ component->Template->name,
+ component->memories,
+ component->pathname,
+ component);
+
+ // Generate XTI/STM trace
+ cm_TRC_traceLoadMap(TRACE_COMPONENT_COMMAND_REMOVE, component);
+
+ /*
+ * disconnect interrupt handler if needed
+ */
+ for(i = 0; i < component->Template->provideNumber; i++)
+ {
+ if(component->Template->provides[i].interruptLine)
+ {
+ cm_unbindInterfaceStaticInterrupt(component->Template->dspId, component->Template->provides[i].interruptLine);
+ }
+ }
+
+ /*
+ * Update dsp stack size if needed
+ */
+ if (component->Template->minStackSize > MIN_STACK_SIZE)
+ {
+ if (cm_EEM_isStackUpdateNeed(component->Template->dspId, component->priority, FALSE, component->Template->minStackSize))
+ {
+ t_uint32 newStackValue;
+ t_uint32 maxComponentStackSize;
+
+ maxComponentStackSize = cm_getMaxStackValue(component);
+ cm_EEM_UpdateStack(component->Template->dspId, component->priority, maxComponentStackSize, &newStackValue);
+ if (cm_DSP_GetState(component->Template->dspId)->state == MPC_STATE_BOOTED)
+ cm_COMP_UpdateStack(component->Template->dspId, newStackValue);
+ }
+ }
+
+ cm_DestroyComponentMemory(component);
+}
+
+/**
+ * Pre-Require:
+ * - MMDSP wakeup (when loading in TCM)
+ */
+t_cm_error cm_instantiateComponent(const char* templateName,
+ t_cm_domain_id domainId,
+ t_nmf_ee_priority priority,
+ const char* pathName,
+ t_elfdescription *elfhandle,
+ t_component_instance** refcomponent)
+{
+ t_nmf_core_id coreId = cm_DM_GetDomainCoreId(domainId);
+ t_dup_char templateNameDup;
+ t_component_template* template;
+ t_component_instance *component;
+ /* coverity[var_decl] */
+ t_cm_error error;
+ int i, j, k;
+
+ *refcomponent = NULL;
+
+ templateNameDup = cm_StringDuplicate(templateName);
+ if(templateNameDup == NULL)
+ return CM_NO_MORE_MEMORY;
+
+ /*
+ * Lookup in template list
+ */
+ template = cm_lookupTemplate(coreId, templateNameDup);
+ if(template != NULL)
+ {
+ if(template->classe == SINGLETON)
+ {
+ // Return same handle for singleton component
+ struct t_client_of_singleton* cl;
+
+ cm_StringRelease(templateNameDup);
+
+ cl = cm_getClientOfSingleton(template->singletonIfAvaliable, TRUE, domainDesc[domainId].client);
+ if(cl == NULL)
+ return CM_NO_MORE_MEMORY;
+ cl->numberOfInstance++;
+
+ *refcomponent = template->singletonIfAvaliable;
+ LOG_INTERNAL(1, "##### Singleton : New handle of %s/%x component on %s provItf=%d#####\n",
+ template->singletonIfAvaliable->pathname, template->singletonIfAvaliable, cm_getDspName(coreId),
+ template->singletonIfAvaliable->providedItfUsedCount, 0, 0);
+ return CM_OK;
+ }
+ }
+
+ // Get the dataFile (identity if already pass as parameter)
+ if((elfhandle = cm_REP_getComponentFile(templateNameDup, elfhandle)) == NULL)
+ {
+ cm_StringRelease(templateNameDup);
+ return CM_COMPONENT_NOT_FOUND;
+ }
+
+ // Load template
+ if((error = cm_loadComponent(templateNameDup, domainId, elfhandle, &template)) != CM_OK)
+ {
+ cm_StringRelease(templateNameDup);
+ return error;
+ }
+
+ // templateNameDup no more used, release it
+ cm_StringRelease(templateNameDup);
+
+ // Allocated component
+ component = (t_component_instance*)OSAL_Alloc_Zero(
+ sizeof(t_component_instance) +
+ sizeof(t_interface_reference*) * template->requireNumber);
+ if(component == NULL)
+ {
+ cm_unloadComponent(template);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ component->interfaceReferences = (t_interface_reference**)((char*)component + sizeof(t_component_instance));
+ component->Template = template;
+
+ /*
+ * Update linked list
+ */
+ if (cm_addComponent(component) == 0) {
+ cm_unloadComponent(template);
+ OSAL_Free(component);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ // NOTE: From here use cm_DestroyComponentMemory
+
+ component->pathname = pathName ? cm_StringDuplicate(pathName) : cm_StringReference(anonymousDup);
+ if(component->pathname == NULL)
+ {
+ cm_DestroyComponentMemory(component);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ LOG_INTERNAL(1, "\n##### Instantiate %s/%x (%s) component on %s at priority %d #####\n", component->pathname, component, template->name, cm_getDspName(coreId), priority, 0);
+
+ if((error = cm_ELF_LoadInstance(domainId, elfhandle, template->memories, component->memories, template->classe == SINGLETON)) != CM_OK)
+ {
+ cm_DestroyComponentMemory(component);
+ return error;
+ }
+
+ if((error = cm_ELF_relocatePrivateSegments(
+ component->memories,
+ elfhandle,
+ template)) != CM_OK)
+ {
+ cm_DestroyComponentMemory(component);
+ return error;
+ }
+
+ cm_ELF_FlushInstance(coreId, template->memories, component->memories);
+
+ /*
+ * Create a new component instance
+ */
+ component->priority = priority;
+ component->thisAddress = 0xFFFFFFFF;
+ component->state = STATE_NONE;
+
+ if(component->Template->classe == SINGLETON)
+ { // Return same handle for singleton component
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(component, TRUE, domainDesc[domainId].client);
+ if(cl == NULL)
+ {
+ cm_DestroyComponentMemory(component);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ cl->numberOfInstance = 1;
+ template->singletonIfAvaliable = component;
+ if (cm_DM_GetDomainCoreId(domainId) == SVA_CORE_ID)
+ component->domainId = DEFAULT_SVA_DOMAIN;
+ else
+ component->domainId = DEFAULT_SIA_DOMAIN;
+ } else {
+ component->domainId = domainId;
+ }
+
+ if(component->memories[template->thisMemory->id] != INVALID_MEMORY_HANDLE)
+ cm_DSP_GetDspAddress(component->memories[template->thisMemory->id], &component->thisAddress);
+ else {
+ // In case of singleton or component without data
+ component->thisAddress = 0;
+ }
+
+ /*
+ * Create empty required interfaces array and set method interface to Panic
+ */
+ for(i = 0; i < template->requireNumber; i++) // For all required interface
+ {
+ component->interfaceReferences[i] =
+ (t_interface_reference*)OSAL_Alloc_Zero(sizeof(t_interface_reference) * template->requires[i].collectionSize);
+ if(component->interfaceReferences[i] == NULL)
+ {
+ cm_DestroyComponentMemory(component);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ for(j = 0; j < template->requires[i].collectionSize; j++) // ... and for each index in collection (set THIS&method for each client)
+ {
+ component->interfaceReferences[i][j].instance = NULL;
+ component->interfaceReferences[i][j].bfInfoID = BF_SYNCHRONOUS; // Just to memorize no Binding component used and unbind ToVoid happy ;-).
+
+ if(template->classe == COMPONENT && template->requires[i].indexes != NULL)
+ {
+ // If component, fill THIS to itself to detect UNBINDED panic with rigth DSP
+ t_interface_require_index *requireindex = &template->requires[i].indexes[j];
+ for(k = 0; k < requireindex->numberOfClient; k++)
+ {
+ t_uint32 *hostAddr;
+
+ hostAddr = (t_uint32*)(
+ cm_DSP_GetHostLogicalAddress(
+ component->memories[requireindex->memories[k].memory->id]) +
+ requireindex->memories[k].offset * requireindex->memories[k].memory->memEntSize);
+ *hostAddr++ = (t_uint32)component->thisAddress;
+ }
+ }
+ }
+ }
+
+ /*
+ * Inform debugger about new component
+ */
+ if ((error = cm_DSPABI_AddLoadMap(
+ domainId,
+ template->name,
+ component->pathname,
+ component->memories,
+ component)) != CM_OK)
+ {
+ cm_DestroyComponentMemory(component);
+ return error;
+ }
+
+ // Generate XTI/STM trace
+ cm_TRC_traceLoadMap(TRACE_COMPONENT_COMMAND_ADD, component);
+
+ // NOTE: From here use cm_delayedDestroyComponent
+
+ /*
+ * Relocate interrupt if this is an interrupt
+ */
+ for(i = 0; i < template->provideNumber; i++)
+ {
+ if(template->provides[i].interruptLine)
+ {
+ if ((error = cm_bindInterfaceStaticInterrupt(coreId,
+ template->provides[i].interruptLine,
+ component,
+ template->provides[i].name)) != CM_OK)
+ {
+ cm_delayedDestroyComponent(component);
+ return error;
+ }
+ }
+ }
+
+ /*
+ * For first instance of a component; Update ee stack size if needed
+ */
+ if(template->classe != FIRMWARE && template->numberOfInstance == 1 && template->minStackSize > MIN_STACK_SIZE)
+ {
+ t_uint32 newStackValue;
+
+ if (cm_EEM_isStackUpdateNeed(template->dspId, priority, TRUE, template->minStackSize))
+ {
+ error = cm_EEM_UpdateStack(template->dspId, priority, template->minStackSize, &newStackValue);
+ if (error != CM_OK)
+ {
+ cm_delayedDestroyComponent(component);
+ return error;
+ }
+ cm_COMP_UpdateStack(template->dspId, newStackValue);
+ }
+ }
+
+
+ /*
+ * For component or first instance
+ */
+ if(template->classe == SINGLETON || template->classe == COMPONENT)
+ {
+ /*
+ * Call init function generated by the compiler (one per .elf)
+ */
+ LOG_INTERNAL(2, "constructor call(s) <%s>\n", template->name, 0, 0, 0, 0, 0);
+ if (cm_DSP_GetState(template->dspId)->state != MPC_STATE_BOOTED)
+ {
+ cm_delayedDestroyComponent(component);
+ return CM_MPC_NOT_RESPONDING;
+ }
+ else if ((error = cm_COMP_CallService(
+ (priority > cm_EEM_getExecutiveEngine(coreId)->instance->priority)?NMF_CONSTRUCT_SYNC_INDEX:NMF_CONSTRUCT_INDEX,
+ component,
+ template->LCCConstructAddress)) != CM_OK)
+ {
+ if (error == CM_MPC_NOT_RESPONDING)
+ ERROR("CM_MPC_NOT_RESPONDING: can't call constructor '%s'\n", component->pathname, 0, 0, 0, 0, 0);
+ cm_delayedDestroyComponent(component);
+ return error;
+ }
+ }
+ else
+ {
+ /* be sure everything is write into memory, not required elsewhere since will be done by cm_COMP_CallService */
+ OSAL_mb();
+ }
+
+ // For firmware; Directly switch to STARTED state, don't need to start it
+ if (template->classe == FIRMWARE)
+ component->state = STATE_RUNNABLE;
+ else
+ component->state = STATE_STOPPED;
+
+ if (osal_debug_ops.component_create)
+ osal_debug_ops.component_create(component);
+
+ *refcomponent = component;
+ return CM_OK;
+}
+
+struct t_client_of_singleton* cm_getClientOfSingleton(t_component_instance* component, t_bool createdIfNotExist, t_nmf_client_id clientId)
+{
+ struct t_client_of_singleton* cur = component->clientOfSingleton;
+
+ for( ; cur != NULL ; cur = cur->next)
+ {
+ if(cur->clientId == clientId)
+ {
+ return cur;
+ }
+ }
+
+ //if(createdIfNotExist)
+ {
+ cur = OSAL_Alloc(sizeof(struct t_client_of_singleton));
+ if(cur != NULL)
+ {
+ cur->clientId = clientId;
+ cur->next = component->clientOfSingleton;
+ cur->numberOfBind = 0;
+ cur->numberOfInstance= 0;
+ cur->numberOfStart = 0;
+ component->clientOfSingleton = cur;
+ }
+ }
+ return cur;
+}
+
+/**
+ * Non-Require:
+ * - MMDSP could be sleep (Since we access it only through HSEM)
+ */
+t_cm_error cm_startComponent(t_component_instance* component, t_nmf_client_id clientId)
+{
+ t_cm_error error;
+ char value[MAX_PROPERTY_VALUE_LENGTH];
+ int i;
+
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(component->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(component, FALSE, clientId);
+ if(cl != NULL)
+ cl->numberOfStart++;
+ // A singleton could be started twice, thus start it only if first client starter
+ if(getNumberOfStart(component) > 1)
+ return CM_OK;
+
+ // Fall through and start really the singleton.
+ }
+
+ if(component->state == STATE_RUNNABLE)
+ return CM_COMPONENT_NOT_STOPPED;
+
+ // CM_ASSERT component->state == STATE_STOPPED
+
+ /*
+ * Check that all required binding have been binded!
+ */
+ for(i = 0; i < component->Template->requireNumber; i++)
+ {
+ int nb = component->Template->requires[i].collectionSize, j;
+ for(j = 0; j < nb; j++)
+ {
+ if(component->interfaceReferences[i][j].instance == NULL &&
+ (component->Template->requires[i].requireTypes & (OPTIONAL_REQUIRE | INTRINSEC_REQUIRE)) == 0)
+ {
+ ERROR("CM_REQUIRE_INTERFACE_UNBINDED: Required interface '%s'.'%s' binded\n", component->pathname, component->Template->requires[i].name, 0, 0, 0, 0);
+ return CM_REQUIRE_INTERFACE_UNBINDED;
+ }
+ }
+ }
+
+ component->state = STATE_RUNNABLE;
+
+ /*
+ * Power on, HW resources if required
+ */
+ if(cm_getComponentProperty(
+ component,
+ "hardware",
+ value,
+ sizeof(value)) == CM_OK)
+ {
+ error = cm_PWR_EnableMPC(MPC_PWR_HWIP, component->Template->dspId);
+ if(error != CM_OK)
+ return error;
+ }
+
+ /*
+ * Call starter if available
+ */
+ if(component->Template->LCCStartAddress != 0)
+ {
+ if (cm_DSP_GetState(component->Template->dspId)->state != MPC_STATE_BOOTED)
+ {
+ return CM_MPC_NOT_RESPONDING;
+ }
+ else if ((error = cm_COMP_CallService(
+ (component->priority > cm_EEM_getExecutiveEngine(component->Template->dspId)->instance->priority)?NMF_START_SYNC_INDEX:NMF_START_INDEX,
+ component,
+ component->Template->LCCStartAddress)) != CM_OK)
+ {
+ if (error == CM_MPC_NOT_RESPONDING)
+ ERROR("CM_MPC_NOT_RESPONDING: can't call starter '%s'\n", component->pathname, 0, 0, 0, 0, 0);
+ return error;
+ }
+ }
+
+ return CM_OK;
+}
+
+/**
+ * Non-Require:
+ * - MMDSP could be sleep (Since we access it only through HSEM)
+ */
+t_cm_error cm_stopComponent(t_component_instance* component, t_nmf_client_id clientId)
+{
+ char value[MAX_PROPERTY_VALUE_LENGTH];
+ t_cm_error error = CM_OK;
+ t_bool isHwProperty;
+
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(component->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(component, FALSE, clientId);
+ if(cl != NULL)
+ cl->numberOfStart--;
+ // A singleton could be started twice, thus stop it only if no more client starter
+ if(getNumberOfStart(component) > 0)
+ return CM_OK;
+
+ // Fall through and stop really the singleton.
+ }
+
+ /*
+ * Component life cycle sanity check
+ */
+ if(component->state == STATE_STOPPED)
+ return CM_COMPONENT_NOT_STARTED;
+
+ // CM_ASSERT component->state == STATE_RUNNABLE
+ component->state = STATE_STOPPED;
+
+ isHwProperty = (cm_getComponentProperty(
+ component,
+ "hardware",
+ value,
+ sizeof(value)) == CM_OK);
+
+ if (cm_DSP_GetState(component->Template->dspId)->state != MPC_STATE_BOOTED)
+ {
+ error = CM_MPC_NOT_RESPONDING;
+ }
+ else
+ {
+ /*
+ * Call stopper if available
+ */
+ if(component->Template->LCCStopAddress != 0)
+ {
+ if ((error = cm_COMP_CallService(
+ isHwProperty ? NMF_STOP_SYNC_INDEX : NMF_STOP_INDEX,
+ component,
+ component->Template->LCCStopAddress)) != CM_OK)
+ {
+ if (error == CM_MPC_NOT_RESPONDING)
+ ERROR("CM_MPC_NOT_RESPONDING: can't call stopper '%s'\n", component->pathname, 0, 0, 0, 0, 0);
+ }
+ }
+ }
+
+ /*
+ * Power on, HW resources if required
+ */
+ if(isHwProperty)
+ {
+ cm_PWR_DisableMPC(MPC_PWR_HWIP, component->Template->dspId);
+ }
+
+ return error;
+}
+
+t_cm_error cm_destroyInstance(t_component_instance* component, t_destroy_state forceDestroy)
+{
+ int i, j;
+
+ LOG_INTERNAL(1, "\n##### Destroy %s/%x (%s) component on %s #####\n",
+ component->pathname, component, component->Template->name, cm_getDspName(component->Template->dspId), 0, 0);
+
+ /*
+ * Component life cycle sanity check; do it only when destroying last reference.
+ */
+ if(forceDestroy == DESTROY_NORMAL)
+ {
+ if (component->state == STATE_RUNNABLE)
+ return CM_COMPONENT_NOT_STOPPED;
+
+ // CM_ASSERT component->state == STATE_STOPPED
+
+ // Check that all required binding have been unbound!
+ for(i = 0; i < component->Template->requireNumber; i++)
+ {
+ int nb = component->Template->requires[i].collectionSize;
+ for(j = 0; j < nb; j++)
+ {
+ if(component->interfaceReferences[i][j].instance != NULL)
+ {
+ ERROR("CM_COMPONENT_NOT_UNBINDED: Required interface %s/%x.%s still binded\n",
+ component->pathname, component, component->Template->requires[i].name, 0, 0, 0);
+ return CM_COMPONENT_NOT_UNBINDED;
+ }
+ }
+ }
+
+ // Check that all provided bindings have been unbound!
+ if (component->providedItfUsedCount != 0)
+ {
+ unsigned idx;
+
+ ERROR("CM_COMPONENT_NOT_UNBINDED: Still %d binding to %s/%x provided interface\n",
+ component->providedItfUsedCount, component->pathname, component, 0, 0, 0);
+
+ /* Find which interface is still bound to gracefully print an error message */
+ for (idx=0; idx<ComponentTable.idxMax; idx++)
+ {
+ if ((componentEntry(idx) == NULL) || (componentEntry(idx) == component))
+ continue;
+ for (i = 0; i < componentEntry(idx)->Template->requireNumber; i++)
+ {
+ for (j = 0; j < componentEntry(idx)->Template->requires[i].collectionSize; j++)
+ {
+ if(componentEntry(idx)->interfaceReferences[i][j].instance == component
+ && component->Template->provides[componentEntry(idx)->interfaceReferences[i][j].provideIndex].interruptLine == 0)
+ {
+ ERROR(" -> %s/%x.%s still used by %s/%x.%s\n",
+ component->pathname, component,
+ component->Template->provides[componentEntry(idx)->interfaceReferences[i][j].provideIndex].name,
+ componentEntry(idx)->pathname,
+ componentEntry(idx),
+ componentEntry(idx)->Template->requires[i].name);
+ }
+ }
+ }
+ }
+
+ return CM_COMPONENT_NOT_UNBINDED;
+ }
+ }
+
+ // Sanity check finished, here, we will do the JOB whatever error
+
+ if (cm_DSP_GetState(component->Template->dspId)->state == MPC_STATE_BOOTED)
+ {
+ /*
+ * Call destroy if available
+ */
+ /* Call the destructor only if we don't want to force the destruction */
+ if(forceDestroy != DESTROY_WITHOUT_CHECK_CALL && component->Template->LCCDestroyAddress != 0)
+ {
+ if (cm_COMP_CallService(
+ NMF_DESTROY_INDEX,
+ component,
+ component->Template->LCCDestroyAddress) != CM_OK)
+ {
+ ERROR("CM_MPC_NOT_RESPONDING: can't call destroy '%s'\n", component->pathname, 0, 0, 0, 0, 0);
+ }
+ }
+ else
+ {
+ cm_COMP_Flush(component->Template->dspId);
+ }
+ }
+
+ cm_delayedDestroyComponent(component);
+
+ return CM_OK;
+}
+
+/**
+ * Pre-Require:
+ * - MMDSP wakeup (when accessing loadmap)
+ */
+t_cm_error cm_destroyInstanceForClient(t_component_instance* component, t_destroy_state forceDestroy, t_nmf_client_id clientId)
+{
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(component->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = cm_getClientOfSingleton(component, FALSE, clientId);
+ int nbinstance;
+ if(cl != NULL)
+ cl->numberOfInstance--;
+
+ // A singleton could be instantiate twice, thus destroy it only if no more client constructor
+ nbinstance = getNumberOfInstance(component);
+ if(nbinstance > 0)
+ {
+ LOG_INTERNAL(1, "##### Singleton : Delete handle of %s/%x (%s) component on %s [%d] provItf=%d #####\n",
+ component->pathname, component, component->Template->name, cm_getDspName(component->Template->dspId),
+ nbinstance, component->providedItfUsedCount);
+ return CM_OK;
+ }
+
+ // Fall through
+ }
+
+ return cm_destroyInstance(component, forceDestroy);
+}
+
+
+static t_uint32 cm_getMaxStackValue(t_component_instance *pComponent)
+{
+ t_nmf_executive_engine_id executiveEngineId = cm_EEM_getExecutiveEngine(pComponent->Template->dspId)->executiveEngineId;
+ t_uint32 res = MIN_STACK_SIZE;
+ unsigned int i;
+
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ if ((componentEntry(i) != NULL) &&
+ (componentEntry(i) != pComponent) &&
+ (pComponent->Template->dspId == componentEntry(i)->Template->dspId) &&
+ (executiveEngineId == SYNCHRONOUS_EXECUTIVE_ENGINE || componentEntry(i)->priority == pComponent->priority))
+ {
+ if (componentEntry(i)->Template->minStackSize > res)
+ res = componentEntry(i)->Template->minStackSize;
+ }
+ }
+
+ return res;
+}
+
+static t_uint16 getNumberOfInstance(t_component_instance* component)
+{
+ t_uint16 instanceNumber = 0;
+ struct t_client_of_singleton* cur = component->clientOfSingleton;
+
+ for( ; cur != NULL ; cur = cur->next)
+ {
+ instanceNumber += cur->numberOfInstance;
+ }
+
+ return instanceNumber;
+}
+
+static t_uint16 getNumberOfStart(t_component_instance* component)
+{
+ t_uint16 startNumber = 0;
+ struct t_client_of_singleton* cur = component->clientOfSingleton;
+
+ for( ; cur != NULL ; cur = cur->next)
+ {
+ startNumber += cur->numberOfStart;
+ }
+
+ return startNumber;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/introspection.c b/drivers/staging/nmf-cm/cm/engine/component/src/introspection.c
new file mode 100644
index 00000000000..4aaf8dff889
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/introspection.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/string.h>
+
+/*
+ *
+ */
+t_cm_error cm_getComponentProperty(
+ const t_component_instance *component,
+ const char *propName,
+ char value[MAX_PROPERTY_VALUE_LENGTH],
+ t_uint32 valueLength){
+ t_component_template* template = component->Template;
+ int i;
+
+ for(i = 0; i < template->propertyNumber; i++) {
+ if(cm_StringCompare(template->properties[i].name, propName, MAX_PROPERTY_NAME_LENGTH) == 0) {
+ cm_StringCopy(
+ value,
+ template->properties[i].value,
+ valueLength);
+ return CM_OK;
+ }
+ }
+
+ return CM_NO_SUCH_PROPERTY;
+}
+
+/**
+ *
+ */
+static t_attribute* cm_getAttributeDescriptor(
+ const t_component_instance *component,
+ const char *attrName)
+{
+ int i;
+
+ for(i = 0; i < component->Template->attributeNumber; i++)
+ {
+ if(cm_StringCompare(component->Template->attributes[i].name, attrName, MAX_ATTRIBUTE_NAME_LENGTH) == 0)
+ {
+ return &component->Template->attributes[i];
+ }
+ }
+
+ return NULL;
+}
+
+t_dsp_address cm_getAttributeMpcAddress(
+ const t_component_instance *component,
+ const char *attrName)
+{
+ t_attribute* attribute;
+ t_uint32 dspAddress;
+
+ if((attribute = cm_getAttributeDescriptor(component, attrName)) == NULL)
+ return 0x0;
+
+ cm_DSP_GetDspAddress(component->memories[attribute->memory.memory->id], &dspAddress);
+
+ return (dspAddress +
+ attribute->memory.offset);
+}
+
+t_cm_logical_address cm_getAttributeHostAddr(
+ const t_component_instance *component,
+ const char *attrName)
+{
+ t_attribute* attribute;
+
+ if((attribute = cm_getAttributeDescriptor(component, attrName)) == NULL)
+ return 0x0;
+
+ // TODO JPF: component->Template->attributes[i].memory.offset could be converted in byte during load
+ return cm_DSP_GetHostLogicalAddress(component->memories[attribute->memory.memory->id]) +
+ attribute->memory.offset * attribute->memory.memory->memEntSize;
+}
+
+
+t_cm_error cm_readAttribute(
+ const t_component_instance* component,
+ const char* attrName,
+ t_uint32* value)
+{
+ t_attribute* attribute;
+ t_cm_logical_address hostAddr;
+
+ if((attribute = cm_getAttributeDescriptor(component, attrName)) == NULL)
+ {
+ ERROR("CM_NO_SUCH_ATTRIBUTE(%s, %s)\n", component->pathname, attrName, 0, 0, 0, 0);
+ return CM_NO_SUCH_ATTRIBUTE;
+ }
+
+ // TODO JPF: component->Template->attributes[i].memory.offset could be converted in byte during load
+ hostAddr = cm_DSP_GetHostLogicalAddress(component->memories[attribute->memory.memory->id]) +
+ attribute->memory.offset * attribute->memory.memory->memEntSize;
+
+ if(attribute->memory.memory->memEntSize != 2)
+ *value = *((t_uint32 *)hostAddr) & ~MASK_BYTE3;
+ else
+ *value = *((t_uint16 *)hostAddr);
+
+ LOG_INTERNAL(3, "cm_readAttribute: [%s:%s, %x]=%x\n",
+ component->pathname, attrName, hostAddr, *value, 0, 0);
+
+ return CM_OK;
+}
+
+t_uint32 cm_readAttributeNoError(
+ const t_component_instance* component,
+ const char* attrName)
+{
+ t_uint32 value;
+
+ if(cm_readAttribute(component, attrName, &value) != CM_OK)
+ value = 0;
+
+ return value;
+}
+
+t_cm_error cm_writeAttribute(
+ const t_component_instance* component,
+ const char* attrName,
+ t_uint32 value)
+{
+ t_attribute* attribute;
+ t_cm_logical_address hostAddr;
+
+ if((attribute = cm_getAttributeDescriptor(component, attrName)) == NULL)
+ {
+ ERROR("CM_NO_SUCH_ATTRIBUTE(%s, %s)\n", component->pathname, attrName, 0, 0, 0, 0);
+ return CM_NO_SUCH_ATTRIBUTE;
+ }
+
+ // TODO JPF: component->Template->attributes[i].memory.offset could be converted in byte during load
+ hostAddr = cm_DSP_GetHostLogicalAddress(component->memories[attribute->memory.memory->id]) +
+ attribute->memory.offset * attribute->memory.memory->memEntSize;
+
+ if(attribute->memory.memory->memEntSize != 2)
+ *((t_uint32 *)hostAddr) = value & ~MASK_BYTE3;
+ else
+ *((t_uint16 *)hostAddr) = value;
+
+ /* be sure attribute is write into memory */
+ OSAL_mb();
+
+ LOG_INTERNAL(3, "cm_writeAttribute: [%s:%s, %x]=%x\n",
+ component->pathname, attrName, hostAddr, value, 0, 0);
+
+ return CM_OK;
+}
+
+
+/**
+ *
+ */
+t_dsp_address cm_getFunction(
+ const t_component_instance* component,
+ const char* interfaceName,
+ const char* methodName)
+{
+ t_interface_provide_description itfProvide;
+ t_interface_provide* provide;
+ t_interface_provide_loaded* provideLoaded;
+ t_cm_error error;
+ int i;
+
+ // Get interface description
+ if((error = cm_getProvidedInterface(component, interfaceName, &itfProvide)) != CM_OK)
+ return error;
+
+ provide = &component->Template->provides[itfProvide.provideIndex];
+ provideLoaded = &component->Template->providesLoaded[itfProvide.provideIndex];
+
+ for(i = 0; i < provide->interface->methodNumber; i++)
+ {
+ if(cm_StringCompare(provide->interface->methodNames[i], methodName, MAX_INTERFACE_METHOD_NAME_LENGTH) == 0)
+ {
+ return provideLoaded->indexesLoaded[itfProvide.collectionIndex][i].methodAddresses;
+ }
+ }
+
+ return 0x0;
+}
+
+/**
+ *
+ */
+PRIVATE t_uint8 compareItfName(const char* simplename, const char* complexname, int *collectionIndex) {
+ int i;
+
+ // Search if simplename is a prefix of complexname ??
+ for(i = 0; simplename[i] != 0; i++) {
+ if(simplename[i] != complexname[i])
+ return 1; // NO
+ }
+
+ // YES
+ if(complexname[i] == '[') {
+ // This is a collection
+ int value = 0;
+ i++;
+ if(complexname[i] < '0' || complexname[i] > '9') {
+ return 1;
+ }
+ for(; complexname[i] >= '0' && complexname[i] <= '9'; i++) {
+ value = value * 10 + (complexname[i] - '0');
+ }
+ if(complexname[i++] != ']')
+ return 1;
+ *collectionIndex = value;
+ } else
+ *collectionIndex = -1;
+
+ if(complexname[i] != 0) {
+ // Complexe name has not been fully parsed -> different name
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ *
+ */
+PUBLIC t_cm_error cm_getProvidedInterface(const t_component_instance* server,
+ const char* itfName,
+ t_interface_provide_description *itfProvide){
+ int i;
+
+ for(i = 0; i < server->Template->provideNumber; i++)
+ {
+ int collectionIndex;
+ if(compareItfName(server->Template->provides[i].name, itfName, &collectionIndex) == 0)
+ {
+ t_interface_provide *provide = &server->Template->provides[i];
+ if(collectionIndex >= 0)
+ {
+ if(! (provide->provideTypes & COLLECTION_PROVIDE)) {
+ ERROR("CM_NO_SUCH_PROVIDED_INTERFACE(%s, %s)\n",
+ server->pathname, itfName, 0, 0, 0, 0);
+ goto out;
+ }
+ if(collectionIndex >= provide->collectionSize) {
+ ERROR("CM_NO_SUCH_PROVIDED_INTERFACE(%s, %s): out of range [0..%d[\n",
+ server->pathname, itfName, provide->collectionSize,
+ 0, 0, 0);
+ goto out;
+ }
+ }
+ else
+ {
+ if(provide->provideTypes & COLLECTION_PROVIDE) {
+ ERROR("CM_NO_SUCH_PROVIDED_INTERFACE(%s, %s): interface is a collection [0..%d[\n",
+ server->pathname, itfName, provide->collectionSize,
+ 0, 0, 0);
+ goto out;
+ }
+ collectionIndex = 0;
+ }
+ itfProvide->provideIndex = i;
+ itfProvide->server = server;
+ itfProvide->collectionIndex = collectionIndex;
+ itfProvide->origName = itfName;
+ return CM_OK;
+ }
+ }
+
+ ERROR("CM_NO_SUCH_PROVIDED_INTERFACE(%s, %s)\n", server->pathname, itfName, 0, 0, 0, 0);
+out:
+ itfProvide->provideIndex = 0;
+ itfProvide->server = NULL;
+ itfProvide->collectionIndex = 0;
+ itfProvide->origName = NULL;
+ return CM_NO_SUCH_PROVIDED_INTERFACE;
+}
+
+/**
+ *
+ */
+t_cm_error cm_getRequiredInterface(const t_component_instance* client,
+ const char* itfName,
+ t_interface_require_description *itfRequire){
+ int i;
+
+ for(i = 0; i < client->Template->requireNumber; i++) {
+ int collectionIndex;
+ if(compareItfName(client->Template->requires[i].name, itfName, &collectionIndex) == 0) {
+ t_interface_require *require = &client->Template->requires[i];
+ if(collectionIndex >= 0) {
+ if(! (require->requireTypes & COLLECTION_REQUIRE)) {
+ ERROR("CM_NO_SUCH_REQUIRED_INTERFACE(%s, %s)\n",
+ client->pathname, itfName, 0, 0, 0, 0);
+ return CM_NO_SUCH_REQUIRED_INTERFACE;
+ }
+ if(collectionIndex >= require->collectionSize) {
+ ERROR("CM_NO_SUCH_REQUIRED_INTERFACE(%s, %s): out of range [0..%d[\n",
+ client->pathname, itfName, require->collectionSize,
+ 0, 0, 0);
+ return CM_NO_SUCH_REQUIRED_INTERFACE;
+ }
+ } else {
+ if(require->requireTypes & COLLECTION_REQUIRE) {
+ ERROR("CM_NO_SUCH_REQUIRED_INTERFACE(%s, %s): interface is a collection [0..%d[\n",
+ client->pathname, itfName, require->collectionSize,
+ 0, 0, 0);
+ return CM_NO_SUCH_REQUIRED_INTERFACE;
+ }
+ collectionIndex = 0;
+ }
+ itfRequire->client = client;
+ itfRequire->requireIndex = i;
+ itfRequire->collectionIndex = collectionIndex;
+ itfRequire->origName = itfName;
+ return CM_OK;
+ }
+ }
+
+ ERROR("CM_NO_SUCH_REQUIRED_INTERFACE(%s, %s)\n", client->pathname, itfName, 0, 0, 0, 0);
+ return CM_NO_SUCH_REQUIRED_INTERFACE;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/component/src/loader.c b/drivers/staging/nmf-cm/cm/engine/component/src/loader.c
new file mode 100644
index 00000000000..3d81e8308f9
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/component/src/loader.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/component/inc/bind.h>
+
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/convert.h>
+
+void START(void);
+void END(char*);
+
+#undef NHASH
+#define NHASH 79 //Use a prime number!
+#define MULT 17
+
+static t_component_template *templates[NB_CORE_IDS][NHASH];
+
+static unsigned int templateHash(const char *str)
+{
+ unsigned int h = 0;
+ for(; *str; str++)
+ h = MULT * h + *str;
+ return h % NHASH;
+}
+
+static void templateAdd(t_component_template *template)
+{
+ unsigned int h = templateHash(template->name);
+
+ if(templates[template->dspId][h] != NULL)
+ templates[template->dspId][h]->prev = template;
+ template->next = templates[template->dspId][h];
+ template->prev = NULL;
+ templates[template->dspId][h] = template;
+}
+
+static void templateRemove(t_component_template *template)
+{
+ unsigned int h = templateHash(template->name);
+
+ if(template->prev != NULL)
+ template->prev->next = template->next;
+ if(template->next != NULL)
+ template->next->prev = template->prev;
+ if(template == templates[template->dspId][h])
+ templates[template->dspId][h] = template->next;
+}
+
+
+t_component_template* cm_lookupTemplate(t_nmf_core_id dspId, t_dup_char str)
+{
+ t_component_template *template;
+
+ for(template = templates[dspId][templateHash(str)]; template != NULL; template = template->next)
+ {
+ if(str == template->name)
+ return template;
+ }
+
+ return NULL;
+}
+
+t_bool cm_isComponentOnCoreId(t_nmf_core_id coreId) {
+ t_uint32 i;
+
+ for(i = 0; i < NHASH; i++)
+ {
+ if ((templates[coreId][i] != NULL)
+ && (templates[coreId][i]->classe != FIRMWARE)) // Skip firmware
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+static t_dsp_address MemoryToDspAdress(t_component_template *template, t_memory_reference *memory)
+{
+ if(memory->memory == NULL)
+ return (t_dsp_address)memory->offset;
+ else
+ {
+ t_dsp_address address;
+
+ cm_DSP_GetDspAddress(template->memories[memory->memory->id], &address);
+
+ return (t_dsp_address)(address + memory->offset);
+ }
+}
+
+/*
+ * Method callback
+ */
+t_uint32 cm_resolvSymbol(
+ void* context,
+ t_uint32 type,
+ t_dup_char symbolName,
+ char* reloc_addr)
+{
+ t_component_template *template = (t_component_template*)context;
+ t_component_instance* ee = cm_EEM_getExecutiveEngine(template->dspId)->instance;
+ int i, j;
+
+ // Search if this method is provided by EE and resolve it directly
+ for(i = 0; i < ee->Template->provideNumber; i++)
+ {
+ t_interface_provide* provide = &ee->Template->provides[i];
+ t_interface_provide_loaded* provideLoaded = &ee->Template->providesLoaded[i];
+
+ for(j = 0; j < provide->interface->methodNumber; j++)
+ {
+ if(provide->interface->methodNames[j] == symbolName)
+ {
+ return provideLoaded->indexesLoaded[0][j].methodAddresses; // Here we assume no collection provided !!
+ }
+ }
+ }
+
+ // Lookup if the method is statically required, ands delay relocation when bind occur
+ for(i = 0; i < template->requireNumber; i++)
+ {
+ if((template->requires[i].requireTypes & STATIC_REQUIRE) == 0)
+ continue;
+
+ for(j = 0; j < template->requires[i].interface->methodNumber; j++)
+ {
+ if(template->requires[i].interface->methodNames[j] == symbolName)
+ {
+ t_function_relocation* delayedRelocation = (t_function_relocation*)OSAL_Alloc(sizeof(t_function_relocation));
+ if(delayedRelocation == NULL)
+ return 0xFFFFFFFE;
+
+ delayedRelocation->type = type;
+ delayedRelocation->symbol_name = cm_StringReference(symbolName);
+ delayedRelocation->reloc_addr = reloc_addr;
+ delayedRelocation->next = template->delayedRelocation;
+ template->delayedRelocation = delayedRelocation;
+
+ return 0xFFFFFFFF;
+ }
+ }
+ }
+
+ //Symbol not found
+ return 0x0;
+}
+
+/*
+ * Template Management
+ */
+t_cm_error cm_loadComponent(
+ t_dup_char templateName,
+ t_cm_domain_id domainId,
+ t_elfdescription* elfhandle,
+ t_component_template **reftemplate)
+{
+ t_nmf_core_id coreId = cm_DM_GetDomainCoreId(domainId);
+ t_cm_error error;
+ int i, j, k;
+
+ /*
+ * Allocate new component template if first instance
+ */
+ if(*reftemplate == NULL)
+ {
+ t_component_template *template;
+
+ LOG_INTERNAL(1, "\n##### Load template %s on %s #####\n", templateName, cm_getDspName(coreId), 0, 0, 0, 0);
+
+ /*
+ * Sanity check
+ */
+ if(elfhandle->foundedTemplateName != templateName)
+ {
+ ERROR("CM_INVALID_ELF_FILE: template name %s != %s\n", templateName, elfhandle->foundedTemplateName, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+ }
+
+ // Alloc & Reset variable in order to use unloadComponent either with partial constructed template
+ *reftemplate = template = (t_component_template*)OSAL_Alloc_Zero(sizeof(t_component_template));
+ if(template == NULL)
+ return CM_NO_MORE_MEMORY;
+ template->name = cm_StringReference(elfhandle->foundedTemplateName);
+
+ // Get information from elfhandle
+ template->descriptionAssociatedWithTemplate = elfhandle->temporaryDescription;
+ template->requireNumber = elfhandle->requireNumber;
+ template->requires = elfhandle->requires;
+ template->attributeNumber = elfhandle->attributeNumber;
+ template->attributes = elfhandle->attributes;
+ template->propertyNumber = elfhandle->propertyNumber;
+ template->properties = elfhandle->properties;
+ template->provideNumber = elfhandle->provideNumber;
+ template->provides = elfhandle->provides;
+ if(template->descriptionAssociatedWithTemplate)
+ {
+ elfhandle->requires = NULL;
+ elfhandle->attributes = NULL;
+ elfhandle->properties = NULL;
+ elfhandle->provides = NULL;
+ }
+
+ // Compute simple information
+ template->numberOfInstance = 1;
+ template->dspId = coreId;
+ LOG_INTERNAL(3, "load<%x> = %s\n", (int)template, template->name, 0, 0, 0, 0);
+ switch(elfhandle->magicNumber) {
+ case MAGIC_COMPONENT:
+ template->classe = COMPONENT;
+ break;
+ case MAGIC_SINGLETON:
+ template->classe = SINGLETON;
+ break;
+ case MAGIC_FIRMWARE:
+ template->classe = FIRMWARE;
+ break;
+ }
+ template->minStackSize = elfhandle->minStackSize;
+
+ /*
+ * Load shared memory from file
+ */
+ // START();
+ if((error = cm_ELF_LoadTemplate(domainId, elfhandle, template->memories, template->classe == SINGLETON)) != CM_OK)
+ goto out;
+ MMDSP_serializeMemories(elfhandle->instanceProperty, &template->codeMemory, &template->thisMemory);
+ // END("cm_ELF_LoadTemplate");
+
+ /*
+ * Copy LCC functions information
+ * Since MMDSP require Constructor & Destructor (for cache flush and debug purpose) to be called
+ * either if not provided by user for allowing defered breakpoint, we use Void method if not provided.
+ */
+ template->LCCConstructAddress = MemoryToDspAdress(template, &elfhandle->memoryForConstruct);
+ template->LCCStartAddress = MemoryToDspAdress(template, &elfhandle->memoryForStart);
+ template->LCCStopAddress = MemoryToDspAdress(template, &elfhandle->memoryForStop);
+ template->LCCDestroyAddress = MemoryToDspAdress(template, &elfhandle->memoryForDestroy);
+ if(template->LCCConstructAddress == 0 && template->classe != FIRMWARE)
+ template->LCCConstructAddress = cm_EEM_getExecutiveEngine(coreId)->voidAddr;
+
+ // Compute provide methodIndex
+ if(template->provideNumber != 0)
+ {
+ template->providesLoaded =
+ (t_interface_provide_loaded*)OSAL_Alloc_Zero(sizeof(t_interface_provide_loaded) * template->provideNumber);
+ if(template->providesLoaded == NULL)
+ goto oom;
+
+ for(i = 0; i < template->provideNumber; i++)
+ {
+ template->providesLoaded[i].indexesLoaded = (t_interface_provide_index_loaded**)OSAL_Alloc_Zero(
+ sizeof(t_interface_provide_index_loaded*) * template->provides[i].collectionSize);
+ if(template->providesLoaded[i].indexesLoaded == NULL)
+ goto oom;
+
+ if(template->provides[i].interface->methodNumber != 0)
+ {
+ for(j = 0; j < template->provides[i].collectionSize; j++)
+ {
+ template->providesLoaded[i].indexesLoaded[j] = (t_interface_provide_index_loaded*)OSAL_Alloc(
+ sizeof(t_interface_provide_index_loaded) * template->provides[i].interface->methodNumber);
+ if(template->providesLoaded[i].indexesLoaded[j] == NULL)
+ goto oom;
+
+ for(k = 0; k < template->provides[i].interface->methodNumber; k++)
+ {
+ template->providesLoaded[i].indexesLoaded[j][k].methodAddresses =
+ MemoryToDspAdress(template, &template->provides[i].indexes[j][k].memory);
+
+ LOG_INTERNAL(2, " [%d, %d] method '%s' @ %x\n",
+ j, k, template->provides[i].interface->methodNames[k],
+ template->providesLoaded[i].indexesLoaded[j][k].methodAddresses, 0, 0);
+ }
+
+ }
+ }
+ }
+ }
+
+ /*
+ * TODO
+
+ if((error = elfhandle->errorOccured) != CM_OK)
+ goto out;
+ */
+
+ // START();
+ if(template->classe != FIRMWARE)
+ {
+ if((error = cm_ELF_relocateSharedSegments(
+ template->memories,
+ elfhandle,
+ template)) != CM_OK)
+ goto out;
+ }
+ // END("cm_ELF_relocateSharedSegments");
+
+ cm_ELF_FlushTemplate(coreId, template->memories);
+
+ templateAdd(template);
+
+ return CM_OK;
+ oom:
+ error = CM_NO_MORE_MEMORY;
+ out:
+ cm_unloadComponent(template);
+ return error;
+ }
+ else
+ {
+ (*reftemplate)->numberOfInstance++;
+ }
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_unloadComponent(
+ t_component_template *template)
+{
+ /*
+ * Destroy template if last instance
+ */
+ if(--template->numberOfInstance == 0) {
+ t_function_relocation* reloc;
+
+ LOG_INTERNAL(3, "unload<%s>\n", template->name, 0, 0, 0, 0, 0);
+
+ templateRemove(template);
+
+ // Free delayedRelocation
+ reloc = template->delayedRelocation;
+ while(reloc != NULL)
+ {
+ t_function_relocation *tofree = reloc;
+ reloc = reloc->next;
+ cm_StringRelease(tofree->symbol_name);
+ OSAL_Free(tofree);
+ }
+
+ if(template->providesLoaded != NULL)
+ {
+ int i, j;
+
+ for(i = 0; i < template->provideNumber; i++)
+ {
+ if(template->providesLoaded[i].indexesLoaded != NULL)
+ {
+ for(j = 0; j < template->provides[i].collectionSize; j++)
+ {
+ OSAL_Free(template->providesLoaded[i].indexesLoaded[j]);
+ }
+ OSAL_Free(template->providesLoaded[i].indexesLoaded);
+ }
+ }
+
+ OSAL_Free(template->providesLoaded);
+ }
+
+ if(template->descriptionAssociatedWithTemplate)
+ {
+ cm_ELF_ReleaseDescription(
+ template->requireNumber, template->requires,
+ template->attributeNumber, template->attributes,
+ template->propertyNumber, template->properties,
+ template->provideNumber, template->provides);
+ }
+
+ // Free shared memories
+ cm_ELF_FreeTemplate(template->dspId, template->memories);
+
+ cm_StringRelease(template->name);
+
+ OSAL_Free(template);
+ }
+
+ return CM_OK;
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration.h b/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration.h
new file mode 100644
index 00000000000..98d22bba743
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_CONFIGURATION_H_
+#define __INC_CONFIGURATION_H_
+
+#include <cm/engine/api/control/configuration_engine.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <inc/nmf-limits.h>
+#include <cm/engine/dsp/inc/dsp.h>
+
+/******************************************************************************/
+/************************ FUNCTIONS PROTOTYPES ********************************/
+/******************************************************************************/
+
+PUBLIC t_cm_error cm_CFG_ConfigureMediaProcessorCore(t_nmf_core_id coreId,
+ t_nmf_executive_engine_id executiveEngineId,
+ t_nmf_semaphore_type_id semaphoreTypeId, t_uint8 nbYramBanks,
+ const t_cm_system_address *mediaProcessorMappingBaseAddr,
+ const t_cm_domain_id eeDomain,
+ t_dsp_allocator_desc* sdramCodeAllocId,
+ t_dsp_allocator_desc* sdramDataAllocId
+ );
+
+PUBLIC t_cm_error cm_CFG_AddMpcSdramSegment(const t_nmf_memory_segment *pDesc,
+ const char *memoryname, t_dsp_allocator_desc **allocDesc);
+
+PUBLIC t_cm_error cm_CFG_CheckMpcStatus(t_nmf_core_id coreId);
+
+void cm_CFG_ReleaseMpc(t_nmf_core_id coreId);
+
+#endif /* __INC_CONFIGURATION_H_ */
diff --git a/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_status.h b/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_status.h
new file mode 100644
index 00000000000..0c75b9c49b0
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_status.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_CONFIGSTATUS_H_
+#define __INC_CONFIGSTATUS_H
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/utils/inc/string.h>
+
+/*
+ * Variable to active intensive check
+ *
+ * \ingroup CM_CONFIGURATION_API
+ */
+extern t_sint32 cmIntensiveCheckState;
+
+/*
+ * Variable to active trace level
+ *
+ * \ingroup CM_CONFIGURATION_API
+ */
+extern t_sint32 cm_debug_level;
+
+/*
+ * Variable to active error break
+ *
+ * \ingroup CM_CONFIGURATION_API
+ */
+extern t_sint32 cm_error_break;
+
+/*
+ * Variable to activate Ulp
+ *
+ * \ingroup CM_CONFIGURATION_API
+ */
+extern t_bool cmUlpEnable;
+
+extern t_dup_char anonymousDup, eventDup, skeletonDup, stubDup, traceDup;
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_type.h b/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_type.h
new file mode 100644
index 00000000000..af29d584ba4
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/configuration/inc/configuration_type.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Configuration Component Manager API type.
+ */
+#ifndef CONFIGURATION_TYPE_H
+#define CONFIGURATION_TYPE_H
+
+#include <cm/inc/cm_type.h>
+
+/*!
+ * @defgroup t_cm_cmd_id t_cm_cmd_id
+ * \brief Definition of the command ID
+ * \ingroup CM_CONFIGURATION_API
+ *
+ * CM_CMD_XXX designates the command ID used by the \ref CM_SetMode routine.
+ *
+ * \remarks Other command IDs are not yet implemented.
+ */
+
+typedef t_uint32 t_cm_cmd_id; //!< Fake enumeration type \ingroup t_cm_cmd_id
+#define CM_CMD_SYNC ((t_cm_cmd_id)0x01) //!< Synchronize on-going operations (no parameter) \ingroup t_cm_cmd_id
+
+#define CM_CMD_WARM_RESET ((t_cm_cmd_id)0x02) //!< Reset a part of the CM-engine (parameter indicates the part which must be reseted) \ingroup t_cm_cmd_id
+
+#define CM_CMD_PWR_MGR ((t_cm_cmd_id)0x10) //!< Enable/Disable the internal power management module (0=Disable, 1=Enable) \ingroup t_cm_cmd_id
+
+#define CM_CMD_DBG_MODE ((t_cm_cmd_id)0x40) //!< Enable/Disable DEBUG mode, Pwr Mgr is also disabled (0=Disable, 1=Enable) \ingroup t_cm_cmd_id
+
+#define CM_CMD_TRACE_ON ((t_cm_cmd_id)0x41) //!< Enable STM/XTI tracing and force network resetting and dumping \note Since MPC trace will be usable, you can enable them if not \ingroup t_cm_cmd_id
+#define CM_CMD_TRACE_OFF ((t_cm_cmd_id)0x42) //!< Disable STM/XTI tracing \note Since MPC trace will not be usable, you can also disable them \ingroup t_cm_cmd_id
+
+#define CM_CMD_MPC_TRACE_ON ((t_cm_cmd_id)0x50) //!< Enable MPC STM/XTI tracing (param == coreId). \note This command is not execute if execution engine not started on the coreId \ingroup t_cm_cmd_id
+#define CM_CMD_MPC_TRACE_OFF ((t_cm_cmd_id)0x51) //!< Disable MPC STM/XTI tracing (param == coreId) This is the default configuration. \note This command is not execute if execution engine not started on the coreId \ingroup t_cm_cmd_id
+
+#define CM_CMD_MPC_PRINT_OFF ((t_cm_cmd_id)0x52) //!< Set to OFF the level of MPC traces (param == coreId) \note This command is not execute if execution engine not started on the coreId \ingroup t_cm_cmd_id
+#define CM_CMD_MPC_PRINT_ERROR ((t_cm_cmd_id)0x53) //!< Set to ERROR the level of MPC traces param == coreId) \note This command is not execute if execution engine not started on the coreId \ingroup t_cm_cmd_id
+#define CM_CMD_MPC_PRINT_WARNING ((t_cm_cmd_id)0x54) //!< Set to WARNING the level of MPC traces param == coreId) \note This command is not execute if execution engine not started on the coreId \ingroup t_cm_cmd_id
+#define CM_CMD_MPC_PRINT_INFO ((t_cm_cmd_id)0x55) //!< Set to INFO the level of MPC traces (param == coreId) \note This command is not execute if execution engine not started on the coreId This is the default configuration. \ingroup t_cm_cmd_id
+#define CM_CMD_MPC_PRINT_VERBOSE ((t_cm_cmd_id)0x56) //!< Set to VERBOSE the level of MPC traces param == coreId) \note This command is not execute if execution engine not started on the coreId \ingroup t_cm_cmd_id
+
+/*!
+ * \brief Define the level of internal CM log traces
+ *
+ * Define the level of internal CM log traces (-1 to 3)
+ * -# <b>-1 </b> all internal LOG/ERROR traces are disabled
+ * -# <b> 0 </b> all internal LOG traces are disabled (<b>default/reset value</b>)
+ * -# <b> 1, 2, 3 </b> Most and most
+ *
+ * \ingroup t_cm_cmd_id
+ */
+#define CM_CMD_TRACE_LEVEL ((t_cm_cmd_id)0x80)
+
+/*!
+ * \brief Enable/Disable intensive internal check
+ *
+ * Enable/Disable intensive internal check (0=Disable, 1=Enable):
+ * - Component handle checking
+ *
+ * Must be used during the integration phase (additional process is time consuming).
+ *
+ * \ingroup t_cm_cmd_id
+ */
+#define CM_CMD_INTENSIVE_CHECK ((t_cm_cmd_id)0x100)
+
+/*!
+ * \brief Enable/Disable ulp mode
+ *
+ * Enable/Disable Ultra Low Power mode.
+ *
+ * \ingroup t_cm_cmd_id
+ */
+#define CM_CMD_ULP_MODE_ON ((t_cm_cmd_id)0x111) //!< Enable ULP mode \ingroup t_cm_cmd_id
+#define CM_CMD_ULP_MODE_OFF ((t_cm_cmd_id)0x110) //!< Deprecated (must be removed in 2.10) !!!
+
+#endif /* CONFIGURATION_TYPE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/configuration/src/configuration.c b/drivers/staging/nmf-cm/cm/engine/configuration/src/configuration.c
new file mode 100644
index 00000000000..f092c7061b4
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/configuration/src/configuration.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/configuration/inc/configuration.h>
+#include <cm/engine/component/inc/initializer.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <cm/engine/communication/inc/communication.h>
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/repository_mgt/inc/repository_mgt.h>
+#include <inc/nmf-limits.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/memory/inc/domain.h>
+
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/convert.h>
+
+#include <cm/engine/power_mgt/inc/power.h>
+
+t_sint32 cmIntensiveCheckState = 0;
+t_sint32 cm_debug_level = 1;
+t_sint32 cm_error_break = 0;
+t_bool cmUlpEnable = FALSE;
+
+
+#define MAX_EE_NAME_LENGTH 32
+typedef struct {
+ char eeName[MAX_EE_NAME_LENGTH];
+ t_nmf_executive_engine_id executiveEngineId;
+ t_uint32 EEmemoryCount;
+} t_cfg_mpc_desc;
+
+static t_cfg_mpc_desc cfgMpcDescArray[NB_CORE_IDS];
+
+PUBLIC t_cm_error cm_CFG_ConfigureMediaProcessorCore(
+ t_nmf_core_id coreId,
+ t_nmf_executive_engine_id executiveEngineId,
+ t_nmf_semaphore_type_id semaphoreTypeId,
+ t_uint8 nbYramBanks,
+ const t_cm_system_address *mediaProcessorMappingBaseAddr,
+ const t_cm_domain_id eeDomain,
+ t_dsp_allocator_desc *sdramCodeAllocDesc,
+ t_dsp_allocator_desc *sdramDataAllocDesc)
+{
+ /* Process requested configuration (save it) */
+ cfgMpcDescArray[coreId].EEmemoryCount = 0;
+ cfgMpcDescArray[coreId].executiveEngineId = executiveEngineId;
+ /* Build Executive Engine Name */
+ switch(executiveEngineId)
+ {
+ case SYNCHRONOUS_EXECUTIVE_ENGINE:
+ cm_StringCopy(cfgMpcDescArray[coreId].eeName, "synchronous_", MAX_EE_NAME_LENGTH);
+ break;
+ case HYBRID_EXECUTIVE_ENGINE:
+ cm_StringCopy(cfgMpcDescArray[coreId].eeName, "hybrid_", MAX_EE_NAME_LENGTH);
+ break;
+ }
+
+ switch(semaphoreTypeId)
+ {
+ case LOCAL_SEMAPHORES:
+ cm_StringConcatenate(cfgMpcDescArray[coreId].eeName, "lsem", MAX_EE_NAME_LENGTH);
+ break;
+ case SYSTEM_SEMAPHORES:
+ cm_StringConcatenate(cfgMpcDescArray[coreId].eeName, "hsem", MAX_EE_NAME_LENGTH);
+ break;
+ }
+
+ cm_SEM_InitMpc(coreId, semaphoreTypeId);
+
+ return cm_DSP_Add(coreId, nbYramBanks, mediaProcessorMappingBaseAddr, eeDomain, sdramCodeAllocDesc, sdramDataAllocDesc);
+}
+
+// TODO JPF: Move in dsp.c
+PUBLIC t_cm_error cm_CFG_AddMpcSdramSegment(const t_nmf_memory_segment *pDesc, const char* memoryname, t_dsp_allocator_desc **allocDesc)
+{
+ t_dsp_allocator_desc *desc;
+ if ( (pDesc == NULL) ||
+ ((pDesc->systemAddr.logical & CM_MM_ALIGN_64BYTES) != 0) )
+ return CM_INVALID_PARAMETER;
+
+ //TODO, juraj, the right place and way to do this?
+ desc = (t_dsp_allocator_desc*)OSAL_Alloc(sizeof (t_dsp_allocator_desc));
+ if (desc == 0)
+ return CM_NO_MORE_MEMORY;
+
+ desc->allocDesc = cm_MM_CreateAllocator(pDesc->size, 0, memoryname);
+ if (desc->allocDesc == 0) {
+ OSAL_Free(desc);
+ return CM_NO_MORE_MEMORY;
+ }
+ desc->baseAddress = pDesc->systemAddr;
+ desc->referenceCounter = 0;
+
+ *allocDesc = desc;
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_CFG_CheckMpcStatus(t_nmf_core_id coreId)
+{
+ t_cm_error error;
+
+ if (cm_DSP_GetState(coreId)->state == MPC_STATE_BOOTABLE)
+ {
+ /* Allocate coms fifo for a given MPC */
+ if ((error = cm_COM_AllocateMpc(coreId)) != CM_OK)
+ return error;
+
+ /* Launch EE */
+ if ((error = cm_EEM_Init(coreId,
+ cfgMpcDescArray[coreId].eeName,
+ cfgMpcDescArray[coreId].executiveEngineId)) != CM_OK)
+ {
+ cm_COM_FreeMpc(coreId);
+ return error;
+ }
+
+ /* Initialize coms fifo for a given MPC */
+ cm_COM_InitMpc(coreId);
+
+ /* Initialisation of the dedicated communication channel for component initialization */
+ if((error = cm_COMP_INIT_Init(coreId)) != CM_OK)
+ {
+ cm_EEM_Close(coreId);
+ cm_COM_FreeMpc(coreId);
+ return error;
+ }
+
+ cfgMpcDescArray[coreId].EEmemoryCount = cm_PWR_GetMPCMemoryCount(coreId);
+
+ if(cmUlpEnable)
+ {
+ // We have finish boot, allow MMDSP to go in auto idle
+ cm_EEM_AllowSleep(coreId);
+ }
+ }
+
+ if (cm_DSP_GetState(coreId)->state != MPC_STATE_BOOTED)
+ return CM_MPC_NOT_INITIALIZED;
+
+ return CM_OK;
+}
+
+void cm_CFG_ReleaseMpc(t_nmf_core_id coreId)
+{
+ t_uint32 memoryCount = cm_PWR_GetMPCMemoryCount(coreId);
+
+ // If No more memory and no more component (to avoid switch off in case of component using no memory)
+ if(
+ cm_PWR_GetMode() == NORMAL_PWR_MODE &&
+ memoryCount != 0 /* Just to see if there is something */ &&
+ memoryCount == cfgMpcDescArray[coreId].EEmemoryCount &&
+ cm_isComponentOnCoreId(coreId) == FALSE)
+ {
+ LOG_INTERNAL(1, "\n##### Shutdown %s #####\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+
+ (void)cm_EEM_ForceWakeup(coreId);
+
+ /* remove ee from load map here */
+ cm_COMP_INIT_Close(coreId);
+ cm_EEM_Close(coreId);
+ cm_COM_FreeMpc(coreId);
+
+ cfgMpcDescArray[coreId].EEmemoryCount = 0; // For debug purpose
+ }
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/configuration/src/configuration_wrapper.c b/drivers/staging/nmf-cm/cm/engine/configuration/src/configuration_wrapper.c
new file mode 100644
index 00000000000..bc3952e63b4
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/configuration/src/configuration_wrapper.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/api/configuration_engine.h>
+#include <cm/engine/communication/inc/communication.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/memory/inc/chunk_mgr.h>
+#include <cm/engine/repository_mgt/inc/repository_mgt.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/configuration/inc/configuration.h>
+#include <cm/engine/power_mgt/inc/power.h>
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/component/inc/bind.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/api/executive_engine_mgt_engine.h>
+
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/trace/inc/xtitrace.h>
+
+t_dup_char anonymousDup, eventDup, skeletonDup, stubDup, traceDup;
+
+PUBLIC t_cm_error CM_ENGINE_Init(
+ const t_nmf_hw_mapping_desc *pNmfHwMappingDesc,
+ const t_nmf_config_desc *pNmfConfigDesc
+ )
+{
+ t_cm_error error;
+
+ // The purpose of that is just to not free/unfree some String frequently used
+ anonymousDup = cm_StringDuplicate("anonymous");
+ eventDup = cm_StringDuplicate("event");
+ skeletonDup = cm_StringDuplicate("skeleton");
+ stubDup = cm_StringDuplicate("stub");
+ traceDup = cm_StringDuplicate("trace");
+
+ if ((
+ error = cm_OSAL_Init()
+ ) != CM_OK) { return error; }
+
+ if ((
+ error = cm_COMP_Init()
+ ) != CM_OK) { return error; }
+
+ if ((
+ error = cm_PWR_Init()
+ ) != CM_OK) { return error; }
+
+ cm_TRC_traceReset();
+
+ if ((
+ error = cm_DM_Init()
+ ) != CM_OK) {return error; }
+
+ if ((
+ error = cm_SEM_Init(&pNmfHwMappingDesc->hwSemaphoresMappingBaseAddr)
+ ) != CM_OK) { return error; }
+
+ if ((error = cm_COM_Init(pNmfConfigDesc->comsLocation)) != CM_OK)
+ return error;
+
+ cm_DSP_Init(&pNmfHwMappingDesc->esramDesc);
+
+ return CM_OK;
+}
+
+PUBLIC void CM_ENGINE_Destroy(void)
+{
+ t_component_instance *instance;
+ t_cm_error error;
+ t_uint32 i;
+
+ /* PP: Well, on Linux (and probably on Symbian too), this is called when driver is removed
+ * => the module (driver) can't be removed if there are some pending clients
+ * => all remaining components should have been destroyed in CM_ENGINE_FlushClient()
+ * => So, if we found some components here, we are in BIG trouble ...
+ */
+ /* First, stop all remaining components */
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ t_nmf_client_id clientId;
+
+ if ((instance = componentEntry(i)) == NULL)
+ continue;
+ clientId = domainDesc[instance->domainId].client;
+ LOG_INTERNAL(0, "Found a remaining component %s (%s) when destroying the CM !!!\n", instance->pathname, instance->Template->name, 0, 0, 0, 0);
+ if (/* skip EE */
+ (instance->Template->classe == FIRMWARE) ||
+ /* Skip all binding components */
+ (cm_StringCompare(instance->Template->name, "_ev.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_st.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_sk.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_tr.", 4) == 0))
+ continue;
+
+ /*
+ * Special code for SINGLETON handling
+ */
+ if(instance->Template->classe == SINGLETON)
+ {
+ struct t_client_of_singleton* cl = instance->clientOfSingleton;
+
+ clientId = instance->clientOfSingleton->clientId;
+ for( ; cl != NULL ; cl = cl->next)
+ {
+ if(cl == instance->clientOfSingleton)
+ {
+ cl->numberOfStart = 1; // == 1 since it will go to 0 in cm_stopComponent
+ cl->numberOfInstance = 1; // == 1 since it will go to 0 in cm_destroyInstanceForClient
+ }
+ else
+ {
+ cl->numberOfStart = 0;
+ cl->numberOfInstance = 0;
+ }
+ cl->numberOfBind = 0;
+ }
+ }
+
+ // Stop the component
+ error = cm_stopComponent(instance, clientId);
+ if (error != CM_OK && error != CM_COMPONENT_NOT_STARTED)
+ LOG_INTERNAL(0, "Error stopping component %s/%x (%s, error=%d, client=%u)\n", instance->pathname, instance, instance->Template->name, error, clientId, 0);
+
+ // Destroy dependencies
+ cm_destroyRequireInterface(instance, clientId);
+ }
+
+ /* Destroy all remaining components */
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ t_nmf_client_id clientId;
+
+ if ((instance = componentEntry(i)) == NULL)
+ continue;
+ clientId = domainDesc[instance->domainId].client;
+
+ if (/* skip EE */
+ (instance->Template->classe == FIRMWARE) ||
+ /* Skip all binding components */
+ (cm_StringCompare(instance->Template->name, "_ev.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_st.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_sk.", 4) == 0) ||
+ (cm_StringCompare(instance->Template->name, "_tr.", 4) == 0)) {
+ continue;
+ }
+
+ if(instance->Template->classe == SINGLETON)
+ {
+ clientId = instance->clientOfSingleton->clientId;
+ }
+
+ // Destroy the component
+ error = cm_destroyInstanceForClient(instance, DESTROY_WITHOUT_CHECK, clientId);
+
+ if (error != CM_OK)
+ {
+ /* FIXME : add component name instance in log message but need to make a copy before cm_flushComponent()
+ * because it's no more available after.
+ */
+ LOG_INTERNAL(0, "Error flushing component (error=%d, client=%u)\n", error, clientId, 0, 0, 0, 0);
+ }
+ }
+
+ /* This will power off all ressources and destroy EE */
+ cm_PWR_SetMode(NORMAL_PWR_MODE);
+ cm_DSP_Destroy();
+ cm_DM_Destroy();
+ /* Nothing to do about SEM */
+ //cm_MM_Destroy();
+ cm_REP_Destroy();
+ cm_COMP_Destroy();
+ cm_OSAL_Destroy();
+
+ cm_StringRelease(traceDup);
+ cm_StringRelease(stubDup);
+ cm_StringRelease(skeletonDup);
+ cm_StringRelease(eventDup);
+ cm_StringRelease(anonymousDup);
+}
+
+PUBLIC t_cm_error CM_ENGINE_ConfigureMediaProcessorCore(
+ t_nmf_core_id coreId,
+ t_nmf_executive_engine_id executiveEngineId,
+ t_nmf_semaphore_type_id semaphoreTypeId,
+ t_uint8 nbYramBanks,
+ const t_cm_system_address *mediaProcessorMappingBaseAddr,
+ const t_cm_domain_id eeDomain,
+ const t_cfg_allocator_id sdramCodeAllocId,
+ const t_cfg_allocator_id sdramDataAllocId
+ )
+{
+ return cm_CFG_ConfigureMediaProcessorCore(
+ coreId,
+ executiveEngineId,
+ semaphoreTypeId,
+ nbYramBanks,
+ mediaProcessorMappingBaseAddr,
+ eeDomain,
+ (t_dsp_allocator_desc*)sdramCodeAllocId,
+ (t_dsp_allocator_desc*)sdramDataAllocId
+ );
+}
+
+PUBLIC t_cm_error CM_ENGINE_AddMpcSdramSegment(
+ const t_nmf_memory_segment *pDesc,
+ t_cfg_allocator_id *id,
+ const char *memoryname
+ )
+{
+ return cm_CFG_AddMpcSdramSegment(pDesc, memoryname == NULL ? "" : memoryname, (t_dsp_allocator_desc**)id);
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_SetMode(t_cm_cmd_id aCmdID, t_sint32 aParam)
+{
+ t_cm_error error = CM_OK;
+ int i;
+
+ OSAL_LOCK_API();
+
+ switch(aCmdID) {
+ case CM_CMD_DBG_MODE:
+ cm_PWR_SetMode(( aParam==1 ) ? DISABLE_PWR_MODE : NORMAL_PWR_MODE);
+ switch(cm_PWR_GetMode())
+ {
+ case NORMAL_PWR_MODE:
+ // Release the MPC (which will switch it off if no more used)
+ for (i=FIRST_MPC_ID; i<NB_CORE_IDS; i++)
+ {
+ cm_CFG_ReleaseMpc(i);
+ }
+ break;
+ case DISABLE_PWR_MODE:
+ // Force the load of the EE if not already done.
+ for (i=FIRST_MPC_ID; i<NB_CORE_IDS;i++)
+ {
+ if((error = cm_CFG_CheckMpcStatus(i)) != CM_OK)
+ break;
+ }
+ break;
+ }
+ break;
+ case CM_CMD_TRACE_LEVEL:
+ if (aParam<-1) cm_debug_level = -1;
+ else cm_debug_level = aParam;
+ break;
+ case CM_CMD_INTENSIVE_CHECK:
+ cmIntensiveCheckState = aParam;
+ break;
+
+ case CM_CMD_TRACE_ON:
+ cm_trace_enabled = TRUE;
+ cm_TRC_Dump();
+ break;
+ case CM_CMD_TRACE_OFF:
+ cm_trace_enabled = FALSE;
+ break;
+
+ case CM_CMD_MPC_TRACE_ON:
+ cm_EEM_setTraceMode((t_nmf_core_id)aParam, 1);
+ break;
+ case CM_CMD_MPC_TRACE_OFF:
+ cm_EEM_setTraceMode((t_nmf_core_id)aParam, 0);
+ break;
+
+ case CM_CMD_MPC_PRINT_OFF:
+ cm_EEM_setPrintLevel((t_nmf_core_id)aParam, 0);
+ break;
+ case CM_CMD_MPC_PRINT_ERROR:
+ cm_EEM_setPrintLevel((t_nmf_core_id)aParam, 1);
+ break;
+ case CM_CMD_MPC_PRINT_WARNING:
+ cm_EEM_setPrintLevel((t_nmf_core_id)aParam, 2);
+ break;
+ case CM_CMD_MPC_PRINT_INFO:
+ cm_EEM_setPrintLevel((t_nmf_core_id)aParam, 3);
+ break;
+ case CM_CMD_MPC_PRINT_VERBOSE:
+ cm_EEM_setPrintLevel((t_nmf_core_id)aParam, 4);
+ break;
+
+ case CM_CMD_ULP_MODE_ON:
+ cmUlpEnable = TRUE;
+ break;
+
+ default:
+ error = CM_INVALID_PARAMETER;
+ break;
+ }
+
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/dsp/inc/dsp.h b/drivers/staging/nmf-cm/cm/engine/dsp/inc/dsp.h
new file mode 100644
index 00000000000..439deac115f
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/dsp/inc/dsp.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief DSP abstraction layer
+ *
+ * \defgroup DSP_INTERNAL Private DSP Abstraction Layer API.
+ *
+ */
+#ifndef __INC_CM_DSP_H
+#define __INC_CM_DSP_H
+
+#include <cm/inc/cm_type.h>
+#include <share/inc/nmf.h>
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/memory/inc/remote_allocator.h>
+
+
+#define SxA_NB_BLOCK_RAM 8 /*32kworks (24-bit) */
+
+#define SxA_LOCKED_WAY 1
+
+/*
+ * Type defintion to handle dsp offset in word
+ */
+typedef t_uint32 t_dsp_offset;
+
+typedef t_uint32 t_dsp_address;
+
+typedef enum {
+ DSP2ARM_IRQ_0,
+ DSP2ARM_IRQ_1
+} t_mpc2host_irq_num;
+
+typedef enum {
+ ARM2DSP_IRQ_0,
+ ARM2DSP_IRQ_1,
+ ARM2DSP_IRQ_2,
+ ARM2DSP_IRQ_3
+} t_host2mpc_irq_num;
+
+typedef enum {
+ INTERNAL_XRAM24 = 0, /* 24-bit XRAM */
+ INTERNAL_XRAM16 = 1, /* 16-bit XRAM */
+ INTERNAL_YRAM24 = 2, /* 24-bit YRAM */
+ INTERNAL_YRAM16 = 3, /* 16-bit YRAM */
+ SDRAM_EXT24 = 4, /* 24-bit external "X" memory */
+ SDRAM_EXT16 = 5, /* 16-bit external "X" memory */
+ ESRAM_EXT24 = 6, /* ESRAM24 */
+ ESRAM_EXT16 = 7, /* ESRAM16 */
+ SDRAM_CODE = 8, /* Program memory */
+ ESRAM_CODE = 9, /* ESRAM code */
+ LOCKED_CODE = 10, /* For way locking */
+ NB_DSP_MEMORY_TYPE,
+ DEFAULT_DSP_MEM_TYPE = MASK_ALL16
+} t_dsp_memory_type_id;
+
+typedef struct {
+ t_cm_allocator_desc *allocDesc;
+ t_cm_system_address baseAddress;
+ t_uint32 referenceCounter;
+} t_dsp_allocator_desc;
+
+typedef struct {
+ t_cm_system_address base;
+ t_uint32 size;
+} t_dsp_segment;
+
+typedef enum {
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ SDRAM_CODE_EE,
+ SDRAM_CODE_USER,
+ SDRAM_DATA_EE,
+ SDRAM_DATA_USER,
+ NB_MIGRATION_SEGMENT,
+ ESRAM_CODE_EE = NB_MIGRATION_SEGMENT,
+ ESRAM_CODE_USER,
+ ESRAM_DATA_EE,
+ ESRAM_DATA_USER,
+#else
+ SDRAM_CODE_EE,
+ SDRAM_DATA_EE,
+ ESRAM_CODE_EE,
+ ESRAM_DATA_EE,
+#endif
+ NB_DSP_SEGMENT_TYPE
+} t_dsp_segment_type;
+
+typedef struct {
+ t_dsp_segment_type segmentType;
+ t_uint32 baseOffset;
+} t_dsp_address_info;
+
+typedef enum {
+ MPC_STATE_UNCONFIGURED,
+ MPC_STATE_BOOTABLE,
+ MPC_STATE_BOOTED,
+ MPC_STATE_PANIC,
+} t_dsp_state;
+
+typedef struct {
+ t_dsp_state state;
+ t_uint8 nbYramBank;
+ t_cm_domain_id domainEE;
+ t_dsp_allocator_desc *allocator[NB_DSP_MEMORY_TYPE];
+ t_dsp_segment segments[NB_DSP_SEGMENT_TYPE];
+ t_uint32 yram_offset;
+ t_uint32 yram_size;
+ t_uint32 locked_offset;
+ t_uint32 locked_size;
+} t_dsp_desc;
+
+typedef struct {
+ t_nmf_core_id coreId;
+ t_dsp_memory_type_id memType; // Index in MPC desc allocator
+ t_cm_allocator_desc *alloc;
+} t_dsp_chunk_info;
+
+PUBLIC const t_dsp_desc* cm_DSP_GetState(t_nmf_core_id coreId);
+PUBLIC void cm_DSP_SetStatePanic(t_nmf_core_id coreId);
+
+PUBLIC void cm_DSP_Init(const t_nmf_memory_segment *pEsramDesc);
+PUBLIC void cm_DSP_Destroy(void);
+
+/*!
+ * \brief Initialize the memory segments management of a given MPC
+ *
+ * \param[in] coreId Identifier of the DSP to initialize
+ * \param[in] pDspMapDesc DSP mapping into host space
+ * \param[in] memConf configuration of the DSP memories (standalone or shared)
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_Add(t_nmf_core_id coreId,
+ t_uint8 nbYramBanks,
+ const t_cm_system_address *pDspMapDesc,
+ const t_cm_domain_id eeDomain,
+ t_dsp_allocator_desc *sdramCodeAllocDesc,
+ t_dsp_allocator_desc *sdramDataAllocDesc);
+
+
+
+/*!
+ * \brief Configure a given Media Processor Core
+ *
+ * This routine programs the configuration (caches, ahb wrapper, ...) registers of a given MPC.
+ *
+ * \param[in] coreId Identifier of the DSP to initialize
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_Boot(t_nmf_core_id coreId);
+
+/*!
+ * \brief Boot a given DSP
+ *
+ * This routine allows after having initialized and loaded the EE into a given DSP to start it (boot it)
+ *
+ * \param[in] coreId identifier of the DSP to boot
+ * \param[in] panicReasonOffset offset of panic reason which will pass to NONE_PANIC when DSP booted.
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_ConfigureAfterBoot(t_nmf_core_id coreId);
+
+PUBLIC void cm_DSP_Start(t_nmf_core_id coreId);
+
+PUBLIC void cm_DSP_Stop(t_nmf_core_id coreId);
+
+/*!
+ * \brief Shutdown a given DSP
+ *
+ * This routine allows to stop and shutdown a given DSP
+ *
+ * \param[in] coreId identifier of the DSP to shutdown
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_Shutdown(t_nmf_core_id coreId);
+
+PUBLIC t_uint32 cm_DSP_ReadXRamWord(t_nmf_core_id coreId, t_uint32 dspOffset);
+PUBLIC void cm_DSP_WriteXRamWord(t_nmf_core_id coreId, t_uint32 dspOffset, t_uint32 value);
+
+/*!
+ * \brief Convert a Dsp address (offset inside a given DSP memory segment) into the host address (logical)
+ *
+ * \param[in] coreId identifier of the given DSP
+ * \param[in] dspAddress dsp address to be converted
+ * \param[in] memType memory type identifier
+ *
+ * \retval t_cm_logical_address
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_logical_address cm_DSP_ConvertDspAddressToHostLogicalAddress(t_nmf_core_id coreId, t_shared_addr dspAddress);
+
+/*!
+ * \brief Acknowledge the local interrupt of a given DSP (when not using HW semaphore mechanisms)
+ *
+ * \param[in] coreId identifier of the given DSP
+ * \param[in] irqNum irq identifier
+ *
+ * \retval void
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_AcknowledgeDspIrq(t_nmf_core_id coreId, t_mpc2host_irq_num irqNum);
+
+
+/*
+ * Memory Management API routines
+ */
+
+/*!
+ * \brief Retrieve DSP information for a memory chunk.
+ *
+ * This function retrieves information stored in user-data of the allocated chunk.
+ * See also \ref{t_dsp_chunk_info}.
+ *
+ * \param[in] memHandle Handle to the allocated chunk.
+ * \param[out] info Dsp information structure.
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_GetDspChunkInfo(t_memory_handle memHandle, t_dsp_chunk_info *info);
+
+/*!
+ * \brief Get memory allocator for a given memory type on a DSP.
+ *
+ * \param[in] coreId Dsp identifier.
+ * \param[in] memType Memory type identifier.
+ *
+ * \retval reference to the allocator descriptor (or null)
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_allocator_desc* cm_DSP_GetAllocator(t_nmf_core_id coreId, t_dsp_memory_type_id memType);
+
+/*!
+ * \brief Get DSP internal memory (TCM) information for allocation.
+ *
+ * For DSP-internal memories (TCMX, Y 16/24), return the offset and size of the allocation zone (for domain
+ * mechanism) and the allocation memory type.
+ *
+ * \param[in] coreId Dsp identifier.
+ * \param[in] memType Memory type identifier.
+ * \param[out] mem_info Memory information structure.
+ *
+ * \retval CM_OK
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_GetInternalMemoriesInfo(t_cm_domain_id domainId, t_dsp_memory_type_id memType,
+ t_uint32 *offset, t_uint32 *size);
+
+
+/*!
+ * \brief Convert word size to byte size.
+ *
+ * \param[in] memType Memory type identifier.
+ * \param[in] wordSize Word size to be converted.
+ *
+ * \retval Byte size.
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_uint32 cm_DSP_ConvertSize(t_dsp_memory_type_id memType, t_uint32 wordSize);
+
+/*!
+ * \brief Provide the Memory status of a given memory type for a given DSP
+ *
+ * \param[in] coreId dsp identifier.
+ * \param[in] memType Type of memory.
+ * \param[out] pStatus requested memory status
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_GetAllocatorStatus(t_nmf_core_id coreId, t_dsp_memory_type_id memType, t_uint32 offset, t_uint32 size, t_cm_allocator_status *pStatus);
+
+/*!
+ * \brief Provide DSP memory host shared address
+ *
+ * \param[in] memHandle Allocated block handle
+ * \param[out] pAddr Returned system address.
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_GetHostSystemAddress( t_memory_handle memHandle, t_cm_system_address *pAddr);
+
+/*!
+ * \brief Get physical address of a memory chunk.
+ *
+ * \param[in] memHandle Memory handle.
+ *
+ * \retval Physical address.
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_physical_address cm_DSP_GetPhysicalAdress(t_memory_handle memHandle);
+
+/*!
+ * \brief Return Logical Address of an allocated memory chunk.
+ *
+ * \param[in] memHandle Allocated chunk handle
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_logical_address cm_DSP_GetHostLogicalAddress(t_memory_handle memHandle);
+
+/*!
+ * \brief Provide DSP memory DSP address (offset inside a given DSP memory segment)
+ *
+ * \param[in] memHandle Allocated block handle
+ * \param[out] dspAddress allocated block address seen by the given DSP
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_GetDspAddress(t_memory_handle handle, t_uint32 *pDspAddress);
+
+/*!
+ * \brief Return the adress of the DSP base associated to the memory type.
+ * Caution, this information is valid only in normal state (not when migrated).
+ *
+ * \param[in] coreId DSP Identifier.
+ * \param[in] memType Type of memory.
+ * \param[out] pAddr Base address.
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_GetDspBaseAddress(t_nmf_core_id coreId, t_dsp_memory_type_id memType, t_cm_system_address *pAddr);
+
+/*!
+ * \brief Return DSP memory handle offset (offset inside a given DSP memory)
+ *
+ * \param[in] coreId dsp identifier.
+ * \param[in] memType Type of memory.
+ * \param[in] memHandle Allocated block handle
+ *
+ * \retval t_uint32: Offset of memory handle inside memory
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_uint32 cm_DSP_GetDspMemoryHandleOffset(
+ t_nmf_core_id coreId,
+ t_dsp_memory_type_id dspMemType,
+ t_memory_handle memHandle);
+
+/*!
+ * \brief Provide DSP memory handle size
+ *
+ * \param[in] memHandle Allocated block handle
+ * \param[out] pDspSize Size of the given memory handle
+
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC void cm_DSP_GetDspMemoryHandleSize(t_memory_handle memHandle, t_uint32 *pDspSize);
+
+/*!
+ * \brief Resize xram allocator to reserve spave for stack.
+ *
+ * \param[in] coreId dsp identifier.
+ * \param[in] newStackSize New required stack size.
+
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_setStackSize(t_nmf_core_id coreId, t_uint32 newStackSize);
+
+/*!
+ * \brief Allow to know if nbYramBanks parameter is valid for coreId. This api is need since use of nbYramBanks
+ * is deferred.
+ *
+ * \param[in] coreId dsp identifier.
+ * \param[in] nbYramBanks number of yramBanks to use.
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_IsNbYramBanksValid(t_nmf_core_id coreId, t_uint8 nbYramBanks);
+
+/*!
+ * \brief Allow to know stack base address according to coreId and nbYramBanks use.
+ *
+ * \param[in] coreId dsp identifier.
+ * \param[in] nbYramBanks number of yramBanks to use.
+ *
+ * \retval t_uint32 return stack address
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_uint32 cm_DSP_getStackAddr(t_nmf_core_id coreId);
+
+/*!
+ * \brief For a give dsp adress return the offset from the hardware base that the adress is relative to.
+ *
+ * \param[in] coreId DSP identifier.
+ * \param[in] adr DSP address.
+ * \param[out] info Info structure containing (hw base id, offset)
+ *
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_GetDspDataAddressInfo(t_nmf_core_id coreId, t_uint32 adr, t_dsp_address_info *info);
+
+/*!
+ * \brief Modify the mapping of a code hardware base. Used for memory migration.
+ *
+ * The function calculates the new hardware base so that in the DSP address-space,
+ * the source address will be mapped to the destination address.
+ *
+ * \param[in] coreId DSP Identifier.
+ * \param[in] hwSegment Identifier of the hardware segment (thus hardware base).
+ * \param[in] src Source address
+ * \param[in] dst Destination address
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_updateCodeBase(t_nmf_core_id coreId, t_dsp_segment_type hwSegment, t_cm_system_address src, t_cm_system_address dst);
+
+/*!
+ * \brief Modify the mapping of a data hardware base. Used for memory migration.
+ *
+ * The function calculates the new hardware base so that in the DSP address-space,
+ * the source address will be mapped to the destination address.
+ *
+ * \param[in] coreId DSP Identifier.
+ * \param[in] hwSegment Identifier of the hardware segment (thus hardware base).
+ * \param[in] src Source address
+ * \param[in] dst Destination address
+ *
+ * \retval t_cm_error
+ * \ingroup DSP_INTERNAL
+ */
+PUBLIC t_cm_error cm_DSP_updateDataBase(t_nmf_core_id coreId, t_dsp_segment_type hwSegment, t_cm_system_address src, t_cm_system_address dst);
+
+#endif /* __INC_CM_DSP_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/dsp/inc/semaphores_dsp.h b/drivers/staging/nmf-cm/cm/engine/dsp/inc/semaphores_dsp.h
new file mode 100644
index 00000000000..1bb1c34cced
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/dsp/inc/semaphores_dsp.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_CM_SEMAPHORES_DSP_H
+#define __INC_CM_SEMAPHORES_DSP_H
+
+#include <share/semaphores/inc/semaphores.h>
+#include <cm/engine/dsp/inc/dsp.h>
+
+PUBLIC void cm_DSP_SEM_Take(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC void cm_DSP_SEM_Give(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC void cm_DSP_SEM_GenerateIrq(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC void cm_DSP_AssertDspIrq(t_nmf_core_id coreId, t_host2mpc_irq_num irqNum);
+
+PUBLIC void cm_DSP_AcknowledgeDspIrq(t_nmf_core_id coreId, t_mpc2host_irq_num irqNum);
+
+#endif /* __INC_CM_SEMAPHORES_DSP_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h b/drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h
new file mode 100644
index 00000000000..0ddc71d2c4f
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h
@@ -0,0 +1,959 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_MMDSP_HWP_H
+#define __INC_MMDSP_HWP_H
+
+#include <cm/inc/cm_type.h>
+
+#define MMDSP_NB_BLOCK_RAM 8
+#define MMDSP_RAM_BLOCK_SIZE 4096 /* 0x1000 */
+#define MMDSP_NB_TIMER 3
+#define MMDSP_NB_BIT_SEM 8
+#define MMDSP_NB_DMA_IF 8
+#define MMDSP_NB_DMA_CTRL 4
+#define MMDSP_NB_ITREMAP_REG 32
+
+#define MMDSP_INSTRUCTION_WORD_SIZE (sizeof(t_uint64))
+#define MMDSP_ICACHE_LINE_SIZE_IN_INST (4)
+#define MMDSP_ICACHE_LINE_SIZE (MMDSP_ICACHE_LINE_SIZE_IN_INST * MMDSP_INSTRUCTION_WORD_SIZE)
+
+#define MMDSP_DATA_WORD_SIZE (3)
+#define MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE (sizeof(t_uint32))
+#define MMDSP_DATA_WORD_SIZE_IN_EXT24 (sizeof(t_uint32))
+#define MMDSP_DATA_WORD_SIZE_IN_EXT16 (sizeof(t_uint16))
+#define MMDSP_DCACHE_LINE_SIZE_IN_WORDS (8)
+#define MMDSP_DCACHE_LINE_SIZE (MMDSP_DCACHE_LINE_SIZE_IN_WORDS * sizeof(t_uint32))
+
+#define MMDSP_NB_IO 16
+
+#define MMDSP_CODE_CACHE_WAY_SIZE 256
+
+//#define MMDSP_ESRAM_DSP_BASE_ADDR 0xE0000 /* 64-bit words */
+//#define MMDSP_DATA24_DSP_BASE_ADDR 0x10000
+//#define MMDSP_DATA16_DSP_BASE_ADDR 0x800000
+//#define MMDSP_MMIO_DSP_BASE_ADDR 0xF80000
+
+/* Specified according MMDSP & ELF convention */
+/* Note: Here we assume that ESRAM is less than 2MB */
+#define SDRAMTEXT_BASE_ADDR 0x00000000
+#define ESRAMTEXT_BASE_ADDR 0x000E0000
+
+#define SDRAMMEM24_BASE_ADDR 0x00010000
+#define ESRAMMEM24_BASE_ADDR 0x00600000 /* ELF == 0x00400000 TODO: Update it in MMDSP ELF compiler */
+#define SDRAMMEM16_BASE_ADDR 0x00800000
+#define ESRAMMEM16_BASE_ADDR 0x00D80000 /* ELF == 0x00BC0000 TODO: Update it in MMDSP ELF compiler */
+
+#define MMIO_BASE_ADDR 0x00F80000
+
+/*
+ * Definition of indirect host registers
+ */
+#define IHOST_ICACHE_FLUSH_REG 0x0
+#define IHOST_ICACHE_FLUSH_CMD_ENABLE (t_uint64)MASK_BIT0
+#define IHOST_ICACHE_FLUSH_ALL_ENTRIES_CMD (t_uint64)0x0
+#if 0
+#define IHOST_ICACHE_INVALID_ALL_UNLOCKED_L2_LINES_CMD (t_uint64)0x8
+#define IHOST_ICACHE_INVALID_ALL_LOCKED_L2_LINES_CMD (t_uint64)0xA
+#define IHOST_ICACHE_UNLOCK_ALL_LOCKED_L2_LINES_CMD (t_uint64)0xC
+#define IHOST_ICACHE_LOCK_ALL_WAYS_LESSER_THAN_LOCK_V_CMD (t_uint64)0xE
+#else
+#define IHOST_ICACHE_INVALID_ALL_UNLOCKED_L2_LINES_CMD (t_uint64)0x10
+#define IHOST_ICACHE_INVALID_ALL_LOCKED_L2_LINES_CMD (t_uint64)0x12
+#define IHOST_ICACHE_UNLOCK_ALL_LOCKED_L2_LINES_CMD (t_uint64)0x14
+#define IHOST_ICACHE_LOCK_ALL_WAYS_LESSER_THAN_LOCK_V_CMD (t_uint64)0x16
+#define IHOST_ICACHE_FLUSH_BY_SERVICE (t_uint64)0x18
+#define IHOST_ICACHE_FLUSH_OUTSIDE_RANGE (t_uint64)0x1A
+#endif
+
+#define IHOST_ICACHE_LOCK_V_REG 0x1
+
+#define IHOST_ICACHE_MODE_REG 0x2
+#define IHOST_ICACHE_MODE_PERFMETER_ON (t_uint64)MASK_BIT0
+#define IHOST_ICACHE_MODE_PERFMETER_OFF (t_uint64)0x0
+#define IHOST_ICACHE_MODE_L2_CACHE_ON (t_uint64)MASK_BIT1
+#define IHOST_ICACHE_MODE_L2_CACHE_OFF (t_uint64)0x0
+#define IHOST_ICACHE_MODE_L1_CACHE_ON (t_uint64)MASK_BIT2
+#define IHOST_ICACHE_MODE_L1_CACHE_OFF (t_uint64)0x0
+#define IHOST_ICACHE_MODE_FILL_MODE_ON (t_uint64)MASK_BIT3
+#define IHOST_ICACHE_MODE_FILL_MODE_OFF (t_uint64)0x0
+
+#define IHOST_CLEAR_PERFMETER_REG 0x3
+#define IHOST_CLEAR_PERFMETER_ON (t_uint64)0x1
+#define IHOST_CLEAR_PERFMETER_OFF (t_uint64)0x0
+
+#define IHOST_PERF_HIT_STATUS_REG 0x4
+
+#define IHOST_PERF_MISS_STATUS_REG 0x5
+
+#define IHOST_FILL_START_WAY_REG 0x6
+#define IHOST_FILL_START_ADDR_VALUE_SHIFT 0U
+#define IHOST_FILL_WAY_NUMBER_SHIFT 20U
+
+#define IHOST_PRG_BASE_ADDR_REG 0x7
+#define IHOST_PRG_BASE1_ADDR_SHIFT 0
+#define IHOST_PRG_BASE2_ADDR_SHIFT 32
+
+#if defined(__STN_8500) && (__STN_8500>10)
+#define IHOST_PRG_BASE_34_ADDR_REG 0x1A
+#define IHOST_PRG_BASE3_ADDR_SHIFT 0
+#define IHOST_PRG_BASE4_ADDR_SHIFT 32
+#endif
+
+#if defined(__STN_8815) /* __STN_8815 */
+#define IHOST_PRG_AHB_CONF_REG 0x8
+#define IHOST_PRG_AHB_LOCKED_SHIFT 0U
+#define IHOST_PRG_AHB_PROT_SHIFT 1U
+
+#define AHB_LOCKED_ON (t_uint64)1
+#define AHB_LOCKED_OFF (t_uint64)0
+
+#define AHB_PROT_USER (t_uint64)0
+#define AHB_PROT_PRIVILEGED (t_uint64)MASK_BIT0
+#define AHB_PROT_NONBUFFERABLE (t_uint64)0
+#define AHB_PROT_BUFFERABLE (t_uint64)MASK_BIT1
+#define AHB_PROT_NONCACHEABLE (t_uint64)0
+#define AHB_PROT_CACHEABLE (t_uint64)MASK_BIT2
+
+
+#define IHOST_DATA_AHB_CONF_REG 0x9
+#define IHOST_DATA_AHB_LOCKED_SHIFT 0U
+#define IHOST_DATA_AHB_PROT_SHIFT 1U
+#else /* def __STN_8820 or __STN_8500 */
+#define IHOST_STBUS_ID_CONF_REG 0x8
+#define SAA_STBUS_ID 176 /* = 0xB0 */
+#define SVA_STBUS_ID 4 /* = 0x4 */
+#define SIA_STBUS_ID 180 /* = 0xB4 */
+
+#define IHOST_STBUF_CONF_REG 0x9 /* RESERVED */
+#endif /* __STN_8820 or __STN_8500 */
+
+#define IHOST_DATA_EXT_BUS_BASE_REG 0xA
+#define IHOST_DATA_EXT_BUS_BASE_16_SHIFT 32ULL
+#define IHOST_DATA_EXT_BUS_BASE_24_SHIFT 0ULL
+
+#define IHOST_EXT_MMIO_BASE_DATA_EXT_BUS_TOP_REG 0xB
+#define IHOST_EXT_MMIO_DATA_EXT_BUS_TOP_SHIFT 0ULL
+#define IHOST_EXT_MMIO_BASE_ADDR_SHIFT 32ULL
+
+#define IHOST_DATA_EXT_BUS_BASE2_REG 0xC
+#define IHOST_DATA_EXT_BUS_BASE2_16_SHIFT 32ULL
+#define IHOST_DATA_EXT_BUS_BASE2_24_SHIFT 0ULL
+
+#if defined(__STN_8500) && (__STN_8500>10)
+
+#define IHOST_DATA_EXT_BUS_BASE3_REG 0x1B
+#define IHOST_DATA_EXT_BUS_BASE3_16_SHIFT 32ULL
+#define IHOST_DATA_EXT_BUS_BASE3_24_SHIFT 0ULL
+
+#define IHOST_DATA_EXT_BUS_BASE4_REG 0x1C
+#define IHOST_DATA_EXT_BUS_BASE4_16_SHIFT 32ULL
+#define IHOST_DATA_EXT_BUS_BASE4_24_SHIFT 0ULL
+
+#endif
+
+#define IHOST_ICACHE_STATE_REG 0xD
+#define IHOST_ICACHE_STATE_RESET 0x0
+#define IHOST_ICACHE_STATE_INITAGL2 0x1
+#define IHOST_ICACHE_STATE_READY_TO_START 0x2
+#define IHOST_ICACHE_STATE_WAIT_FOR_MISS 0x3
+#define IHOST_ICACHE_STATE_FILLDATARAM0 0x4
+#define IHOST_ICACHE_STATE_FILLDATARAM1 0x5
+#define IHOST_ICACHE_STATE_FILLDATARAM2 0x6
+#define IHOST_ICACHE_STATE_FILLDATARAM3 0x7
+#define IHOST_ICACHE_STATE_FLUSH 0x8
+#define IHOST_ICACHE_STATE_FILL_INIT 0x9
+#define IHOST_ICACHE_STATE_FILL_LOOP 0xA
+#define IHOST_ICACHE_STATE_FILL_LOOP0 0xB
+#define IHOST_ICACHE_STATE_FILL_LOOP1 0xC
+#define IHOST_ICACHE_STATE_FILL_LOOP2 0xD
+#define IHOST_ICACHE_STATE_FILL_LOOP3 0xE
+#define IHOST_ICACHE_STATE_FILL_END 0xF
+#define IHOST_ICACHE_STATE_SPECIFIC_FLUSH_R 0x10
+#define IHOST_ICACHE_STATE_SPECIFIC_FLUSH_W 0x11
+#define IHOST_ICACHE_STATE_SPECIFIC_FLUSH_END 0x12
+#define IHOST_ICACHE_STATE_OTHERS 0x1F
+
+#define IHOST_EN_EXT_BUS_TIMEOUT_REG 0xE
+#define IHOST_TIMEOUT_ENABLE 1ULL
+#define IHOST_TIMEOUT_DISABLE 0ULL
+
+#define IHOST_DATA2_1624_XA_BASE_REG 0xF
+#define IHOST_DATA2_24_XA_BASE_SHIFT 0ULL
+#define IHOST_DATA2_16_XA_BASE_SHIFT 32ULL
+#if defined(__STN_8500) && (__STN_8500>10)
+#define IHOST_DATA3_24_XA_BASE_SHIFT 8ULL
+#define IHOST_DATA3_16_XA_BASE_SHIFT 40ULL
+#define IHOST_DATA4_24_XA_BASE_SHIFT 16ULL
+#define IHOST_DATA4_16_XA_BASE_SHIFT 48ULL
+#endif
+
+#define IHOST_PERFMETERS_MODE_REG 0x10
+
+#if defined(__STN_8815) /* __STN_8815 */
+#define IHOST_EXT_MMIO_AHB_CONF_REG 0x11
+#define IHOST_EXT_MMIO_AHB_LOCKED_SHIFT 0U
+#define IHOST_EXT_MMIO_AHB_PROT_SHIFT 1U
+#else /* def __STN_8820 or __STN_8500 */
+#define IHOST_EXT_MMIO_STBS_CONF_REG 0x11 /* RESERVED */
+#endif /* __STN_8820 or __STN_8500 */
+
+#define IHOST_PRG_BASE_SEL_REG 0x12
+#define IHOST_PRG_BASE_SEL_OFF (t_uint64)0
+#define IHOST_PRG_BASE_SEL_ON (t_uint64)1
+
+#define IHOST_PRG_BASE2_ACTIV_REG 0x13
+#define IHOST_PRG_BASE2_ACTIV_OFF (t_uint64)0
+#if defined(__STN_8500) && (__STN_8500>10)
+/* TODO : for the moment just divide mmdsp in fix 4 spaces */
+ #define IHOST_PRG_BASE2_ACTIV_ON (t_uint64)((((t_uint64)0xf0000>>10)<<48) | (((t_uint64)0xe0000>>10)<<32) | (((t_uint64)0x70000>>10)<<16) | 1)
+#else
+ #define IHOST_PRG_BASE2_ACTIV_ON (t_uint64)1
+#endif
+
+#define IHOST_DATA_EXT_BUS_TOP_16_24_REG 0x14
+#define IHOST_DATA_EXT_BUS_TOP_24_SHIFT 0ULL
+#define IHOST_DATA_EXT_BUS_TOP_16_SHIFT 32ULL
+
+#define IHOST_DATA_TOP_16_24_CHK_REG 0x16
+#define IHOST_DATA_TOP_16_24_CHK_OFF (t_uint64)0
+#define IHOST_DATA_TOP_16_24_CHK_ON (t_uint64)1
+
+#define IHOST_EXT_BUS_TOP2_16_24_REG 0x15
+#define IHOST_DATA_EXT_BUS_TOP2_24_SHIFT 0ULL
+#define IHOST_DATA_EXT_BUS_TOP2_16_SHIFT 32ULL
+
+#if defined(__STN_8500) && (__STN_8500>10)
+
+#define IHOST_EXT_BUS_TOP3_16_24_REG 0x1D
+#define IHOST_DATA_EXT_BUS_TOP3_24_SHIFT 0ULL
+#define IHOST_DATA_EXT_BUS_TOP3_16_SHIFT 32ULL
+
+#define IHOST_EXT_BUS_TOP4_16_24_REG 0x1E
+#define IHOST_DATA_EXT_BUS_TOP4_24_SHIFT 0ULL
+#define IHOST_DATA_EXT_BUS_TOP4_16_SHIFT 32ULL
+
+#endif
+
+#define IHOST_DATA_BASE2_ACTIV_REG 0x17
+#define IHOST_DATA_BASE2_ACTIV_OFF (t_uint64)0
+#define IHOST_DATA_BASE2_ACTIV_ON (t_uint64)1
+
+#define IHOST_INST_BURST_SZ_REG 0x18
+#define IHOST_INST_BURST_SZ_ALWAYS_1_LINE (t_uint64)0x0
+#define IHOST_INST_BURST_SZ_ALWAYS_2_LINES (t_uint64)0x1
+#define IHOST_INST_BURST_SZ_AUTO (t_uint64)0x2 /* 2 lines for SDRAM [0, 0xE0000[, 1 line for ESRAM [0xE0000, 0xFFFFF] */
+
+#define IHOST_ICACHE_END_CLEAR_REG 0x19
+#define IHOST_ICACHE_START_CLEAR_REG IHOST_FILL_START_WAY_REG
+
+/*
+ * Definition of value of the ucmd register
+ */
+#define MMDSP_UCMD_WRITE 0
+#define MMDSP_UCMD_READ 4
+#define MMDSP_UCMD_CTRL_STATUS_ACCESS 0x10 // (MASK_BIT4 | !MASK_BIT3 | !MASK_BIT0)
+#define MMDSP_UCMD_DECREMENT_ADDR MASK_BIT5
+#define MMDSP_UCMD_INCREMENT_ADDR MASK_BIT1
+
+/*
+ * Definition of value of the ubkcmd register
+ */
+#define MMDSP_UBKCMD_EXT_CODE_MEM_ACCESS_ENABLE MASK_BIT3
+#define MMDSP_UBKCMD_EXT_CODE_MEM_ACCESS_DISABLE 0
+
+/*
+ * Definition of value of the clockcmd register
+ */
+#define MMDSP_CLOCKCMD_STOP_CLOCK MASK_BIT0
+#define MMDSP_CLOCKCMD_START_CLOCK 0
+
+/*
+ * Definition of macros used to access indirect addressed host register
+ */
+#define WRITE_INDIRECT_HOST_REG(pRegs, addr, value64) \
+{ \
+ (pRegs)->host_reg.emul_uaddrl = addr; \
+ (pRegs)->host_reg.emul_uaddrm = 0; \
+ (pRegs)->host_reg.emul_uaddrh = 0; \
+ (pRegs)->host_reg.emul_udata[0] = ((value64 >> 0ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[1] = ((value64 >> 8ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[2] = ((value64 >> 16ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[3] = ((value64 >> 24ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[4] = ((value64 >> 32ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[5] = ((value64 >> 40ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[6] = ((value64 >> 48ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_udata[7] = ((value64 >> 56ULL) & MASK_BYTE0); \
+ (pRegs)->host_reg.emul_ucmd = (MMDSP_UCMD_CTRL_STATUS_ACCESS | MMDSP_UCMD_WRITE); \
+}
+
+#define READ_INDIRECT_HOST_REG(pRegs, addr, value64) \
+{ \
+ (pRegs)->host_reg.emul_udata[0] = 0; \
+ (pRegs)->host_reg.emul_udata[1] = 0; \
+ (pRegs)->host_reg.emul_udata[2] = 0; \
+ (pRegs)->host_reg.emul_udata[3] = 0; \
+ (pRegs)->host_reg.emul_udata[4] = 0; \
+ (pRegs)->host_reg.emul_udata[5] = 0; \
+ (pRegs)->host_reg.emul_udata[6] = 0; \
+ (pRegs)->host_reg.emul_udata[7] = 0; \
+ (pRegs)->host_reg.emul_uaddrl = addr; \
+ (pRegs)->host_reg.emul_uaddrm = 0; \
+ (pRegs)->host_reg.emul_uaddrh = 0; \
+ (pRegs)->host_reg.emul_ucmd = (MMDSP_UCMD_CTRL_STATUS_ACCESS | MMDSP_UCMD_READ); \
+ value64 = (((t_uint64)((pRegs)->host_reg.emul_udata[0])) << 0ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[1])) << 8ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[2])) << 16ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[3])) << 24ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[4])) << 32ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[5])) << 40ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[6])) << 48ULL) | \
+ (((t_uint64)((pRegs)->host_reg.emul_udata[7])) << 56ULL); \
+}
+
+/* Common type to handle 64-bit modulo field in 32-bit mode */
+typedef struct {
+ t_uint32 value;
+ t_uint32 dummy;
+} t_mmdsp_field_32;
+
+typedef struct {
+ t_uint16 value;
+ t_uint16 dummy;
+} t_mmdsp_field_16;
+
+/* DCache registers */
+#define DCACHE_MODE_ENABLE MASK_BIT0
+#define DCACHE_MODE_DISABLE 0
+#define DCACHE_MODE_DIVIDE_PER_2 MASK_BIT1
+#define DCACHE_MODE_DIVIDE_PER_4 MASK_BIT2
+#define DCACHE_MODE_CHECK_TAG_ENABLE MASK_BIT3
+#define DCACHE_MODE_CHECK_TAG_DISABLE 0
+#define DCACHE_MODE_FORCE_LOCK_MODE MASK_BIT4
+#define DCACHE_MODE_LOCK_BIT MASK_BIT5
+
+#define DCACHE_CONTROL_PREFETCH_LINE MASK_BIT0
+#define DCACHE_CONTROL_NON_BLOCKING_REFILL 0
+#define DCACHE_CONTROL_FAST_READ_DISABLE MASK_BIT1
+#define DCACHE_CONTROL_FAST_READ_ENABLE 0
+#define DCACHE_CONTROL_ON_FLY_FILL_ACCESS_OFF MASK_BIT2
+#define DCACHE_CONTROL_ON_FLY_FILL_ACCESS_ON 0
+#define DCACHE_CONTROL_BURST_1_WRAP8 MASK_BIT3
+#define DCACHE_CONTROL_BURST_2_WRAP4 0
+#define DCACHE_CONTROL_NOT_USE_DATA_BUFFER MASK_BIT4
+#define DCACHE_CONTROL_USE_DATA_BUFFER 0
+#define DCACHE_CONTROL_WRITE_POSTING_ENABLE MASK_BIT5
+#define DCACHE_CONTROL_WRITE_POSTING_DISABLE 0
+
+#define DCACHE_CMD_NOP 0
+#define DCACHE_CMD_DISCARD_WAY 2 //see Dcache_way reg
+#define DCACHE_CMD_DISCARD_LINE 3 //see Dcache_line reg
+#define DCACHE_CMD_FREE_WAY 4 //see Dcache_way reg
+#define DCACHE_CMD_FREE_LINE 5 //see Dchache_line reg
+#define DCACHE_CMD_FLUSH 7
+
+#define DCACHE_STATUS_CURRENT_WAY_MASK (MASK_BIT2 | MASK_BIT1 | MASK_BIT0)
+#define DCACHE_STATUS_TAG_HIT_MASK MASK_BIT3
+#define DCACHE_STATUS_TAG_LOCKED_MASK MASK_BIT4
+#define DCACHE_STATUS_PROTECTION_ERROR_MASK MASK_BIT5
+
+#define DCACHE_CPTRSEL_COUNTER_1_MASK (MASK_BIT3 | MASK_BIT2 | MASK_BIT1 | MASK_BIT0)
+#define DCACHE_CPTRSEL_COUNTER_1_SHIFT 0
+#define DCACHE_CPTRSEL_COUNTER_2_MASK (MASK_BIT7 | MASK_BIT6 | MASK_BIT5 | MASK_BIT4)
+#define DCACHE_CPTRSEL_COUNTER_2_SHIFT 4
+#define DCACHE_CPTRSEL_COUNTER_3_MASK (MASK_BIT11 | MASK_BIT10 | MASK_BIT9 | MASK_BIT8)
+#define DCACHE_CPTRSEL_COUNTER_3_SHIFT 8
+#define DCACHE_CPTRSEL_XBUS_ACCESS_TO_CACHE_RAM 1
+#define DCACHE_CPTRSEL_CACHE_HIT 2
+#define DCACHE_CPTRSEL_LINE_MATCH 3
+#define DCACHE_CPTRSEL_XBUS_WS 4
+#define DCACHE_CPTRSEL_EXTMEM_WS 5
+#define DCACHE_CPTRSEL_CACHE_READ 6
+#define DCACHE_CPTRSEL_CACHE_WRITE 7
+#define DCACHE_CPTRSEL_TAG_HIT_READ 8
+#define DCACHE_CPTRSEL_TAG_LOCKED_ACCESS 9
+#define DCACHE_CPTRSEL_TAG_MEM_READ_CYCLE 10
+#define DCACHE_CPTRSEL_TAG_MEM_WRITE_CYCLE 11
+
+
+typedef volatile struct {
+ t_uint16 padding_1[5];
+ t_uint16 mode;
+ t_uint16 control;
+ t_uint16 way;
+ t_uint16 line;
+ t_uint16 command;
+ t_uint16 status;
+ t_uint16 cptr1l;
+ t_uint16 cptr1h;
+ t_uint16 cptr2l;
+ t_uint16 cptr2h;
+ t_uint16 cptr3l;
+ t_uint16 cptr3h;
+ t_uint16 cptrsel;
+ t_uint16 flush_base_lsb; /* only on STn8820 and STn8500 */
+ t_uint16 flush_base_msb; /* only on STn8820 and STn8500 */
+ t_uint16 flush_top_lsb; /* only on STn8820 and STn8500 */
+ t_uint16 flush_top_msb; /* only on STn8820 and STn8500 */
+ t_uint16 padding_2[10];
+} t_mmdsp_dcache_regs_16;
+
+typedef volatile struct {
+ t_uint32 padding_1[5];
+ t_uint32 mode;
+ t_uint32 control;
+ t_uint32 way;
+ t_uint32 line;
+ t_uint32 command;
+ t_uint32 status;
+ t_uint32 cptr1l;
+ t_uint32 cptr1h;
+ t_uint32 cptr2l;
+ t_uint32 cptr2h;
+ t_uint32 cptr3l;
+ t_uint32 cptr3h;
+ t_uint32 cptrsel;
+ t_uint32 flush_base_lsb; /* only on STn8820 and STn8500 */
+ t_uint32 flush_base_msb; /* only on STn8820 and STn8500 */
+ t_uint32 flush_top_lsb; /* only on STn8820 and STn8500 */
+ t_uint32 flush_top_msb; /* only on STn8820 and STn8500 */
+ t_uint32 padding_2[10];
+} t_mmdsp_dcache_regs_32;
+
+/* TIMER Registers */
+typedef volatile struct {
+ t_mmdsp_field_16 timer_msb;
+ t_mmdsp_field_16 timer_lsb;
+} t_mmdsp_timer_regs_16;
+
+typedef volatile struct {
+ t_mmdsp_field_32 timer_msb;
+ t_mmdsp_field_32 timer_lsb;
+} t_mmdsp_timer_regs_32;
+
+
+/* DMA interface Registers */
+typedef volatile struct {
+ t_uint16 arm_dma_sreq; /* dma0: 5e800, dma1: +0x20 ...*/
+ t_uint16 arm_dma_breq; /* ... 5e802 */
+ t_uint16 arm_dma_lsreq; /* ... 5e804 */
+ t_uint16 arm_dma_lbreq;
+ t_uint16 arm_dma_maskit;
+ t_uint16 arm_dma_it;
+ t_uint16 arm_dma_auto;
+ t_uint16 arm_dma_lauto;
+ t_uint16 dma_reserved[8];
+} t_mmdsp_dma_if_regs_16;
+
+typedef volatile struct {
+ t_uint32 arm_dma_sreq; /* dma0: 3a800, dma1: +0x40 ...*/
+ t_uint32 arm_dma_breq; /* ... 3a804 */
+ t_uint32 arm_dma_lsreq; /* ... 3a808 */
+ t_uint32 arm_dma_lbreq;
+ t_uint32 arm_dma_maskit;
+ t_uint32 arm_dma_it;
+ t_uint32 arm_dma_auto;
+ t_uint32 arm_dma_lauto;
+ t_uint32 dma_reserved[8];
+} t_mmdsp_dma_if_regs_32;
+
+/* MMDSP DMA controller Registers */
+typedef volatile struct {
+ t_uint16 dma_ctrl; /* dma0: 0x5d400, dma1: +0x10 ... */
+ t_uint16 dma_int_base; /* ... 0x5d402 */
+ t_uint16 dma_int_length; /* ... 0x5d404 */
+ t_uint16 dma_ext_baseh;
+ t_uint16 dma_ext_basel;
+ t_uint16 dma_count;
+ t_uint16 dma_ext_length;
+ t_uint16 dma_it_status;
+} t_mmdsp_dma_ctrl_regs_16;
+
+typedef volatile struct {
+ t_uint32 dma_ctrl; /* dma0: 0x3a800, dma1: +0x20 ... */
+ t_uint32 dma_int_base; /* ... 0x3a804 */
+ t_uint32 dma_int_length; /* ... 0x3a808 */
+ t_uint32 dma_ext_baseh;
+ t_uint32 dma_ext_basel;
+ t_uint32 dma_count;
+ t_uint32 dma_ext_length;
+ t_uint32 dma_it_status;
+} t_mmdsp_dma_ctrl_regs_32;
+
+/* IO registers */
+typedef volatile struct {
+ t_mmdsp_field_16 io_bit[MMDSP_NB_IO];
+ t_mmdsp_field_16 io_lsb;
+ t_mmdsp_field_16 io_msb;
+ t_mmdsp_field_16 io_all;
+ t_mmdsp_field_16 io_en;
+} t_mmdsp_io_regs_16;
+
+typedef volatile struct {
+ t_mmdsp_field_32 io_bit[MMDSP_NB_IO];
+ t_mmdsp_field_32 io_lsb;
+ t_mmdsp_field_32 io_msb;
+ t_mmdsp_field_32 io_all;
+ t_mmdsp_field_32 io_en;
+} t_mmdsp_io_regs_32;
+
+/* HOST Registers bit mapping */
+#define HOST_GATEDCLK_ITREMAP MASK_BIT0
+#define HOST_GATEDCLK_SYSDMA MASK_BIT1
+#define HOST_GATEDCLK_INTEG_REGS MASK_BIT2
+#define HOST_GATEDCLK_TIMER_GPIO MASK_BIT3
+#define HOST_GATEDCLK_XBUSDMA MASK_BIT4
+#define HOST_GATEDCLK_STACKCTRL MASK_BIT5
+#define HOST_GATEDCLK_ITC MASK_BIT6
+
+/* Only for STn8820 and STn8500 */
+#define HOST_PWR_DBG_MODE MASK_BIT0
+#define HOST_PWR_DC_STATUS (MASK_BIT1 | MASK_BIT2 | MASK_BIT3 | MASK_BIT4 | MASK_BIT5)
+#define HOST_PWR_DE_STATUS MASK_BIT6
+#define HOST_PWR_STOV_STATUS MASK_BIT7
+
+/* HOST Registers */
+typedef volatile struct {
+ t_uint16 ident; /*0x...60000*/
+ t_uint16 identx[4]; /*0x...60002..8*/
+ t_uint16 r5; /*0x...6000a*/
+ t_uint16 r6; /*0x...6000c*/
+ t_uint16 inte[2]; /*0x...6000e..10*/
+ t_uint16 intx[2]; /*0x...60012..14*/
+ t_uint16 int_ris[2]; /*0x...60016..18*/
+ t_uint16 intpol; /*0x...6001a*/
+ t_uint16 pwr; /*0x...6001c*/ /* only on STn8820 and STn8500 */
+ t_uint16 gatedclk; /*0x...6001e*/
+ t_uint16 softreset; /*0x...60020*/
+ t_uint16 int_icr[2]; /*0x...60022..24*/
+ t_uint16 cmd[4]; /*0x...60026..2c*/
+ t_uint16 RESERVED4;
+ t_uint16 int_mis0; /*0x...60030*/
+ t_uint16 RESERVED5;
+ t_uint16 RESERVED6;
+ t_uint16 RESERVED7;
+ t_uint16 i2cdiv; /*0x...60038*/
+ t_uint16 int_mis1; /*0x...6003a*/
+ t_uint16 RESERVED8;
+ t_uint16 RESERVED9;
+ t_uint16 emul_udata[8]; /*0x...60040..4e*/
+ t_uint16 emul_uaddrl; /*0x...60050*/
+ t_uint16 emul_uaddrm; /*0x...60052*/
+ t_uint16 emul_ucmd; /*0x...60054*/
+ t_uint16 emul_ubkcmd; /*0x...60056*/
+ t_uint16 emul_bk2addl; /*0x...60058*/
+ t_uint16 emul_bk2addm; /*0x...6005a*/
+ t_uint16 emul_bk2addh; /*0x...6005c*/
+ t_uint16 emul_mdata[3]; /*0x...6005e..62*/
+ t_uint16 emul_maddl; /*0x...60064*/
+ t_uint16 emul_maddm; /*0x...60066*/
+ t_uint16 emul_mcmd; /*0x...60068*/
+ t_uint16 emul_maddh; /*0x...6006a*/
+ t_uint16 emul_uaddrh; /*0x...6006c*/
+ t_uint16 emul_bk_eql; /*0x...6006e*/
+ t_uint16 emul_bk_eqh; /*0x...60070*/
+ t_uint16 emul_bk_combi; /*0x...60072*/
+ t_uint16 emul_clockcmd; /*0x...60074*/
+ t_uint16 emul_stepcmd; /*0x...60076*/
+ t_uint16 emul_scanreg; /*0x...60078*/
+ t_uint16 emul_breakcountl; /*0x...6007a*/
+ t_uint16 emul_breakcounth; /*0x...6007c*/
+ t_uint16 emul_forcescan; /*0x...6007e*/
+ t_uint16 user_area[(0x200 - 0x80)>>1];
+} t_mmdsp_host_regs_16;
+
+typedef volatile struct {
+ t_uint32 ident; /*0x...60000*/
+ t_uint32 identx[4]; /*0x...60004..10*/
+ t_uint32 r5; /*0x...60014*/
+ t_uint32 r6; /*0x...60018*/
+ t_uint32 inte[2]; /*0x...6001c..20*/
+ t_uint32 intx[2]; /*0x...60024..28*/
+ t_uint32 int_ris[2]; /*0x...6002c..30*/
+ t_uint32 intpol; /*0x...60034*/
+ t_uint32 pwr; /*0x...60038*/ /* only on STn8820 and STn8500 */
+ t_uint32 gatedclk; /*0x...6003c*/
+ t_uint32 softreset; /*0x...60040*/
+ t_uint32 int_icr[2]; /*0x...60044..48*/
+ t_uint32 cmd[4]; /*0x...6004c..58*/
+ t_uint32 RESERVED4;
+ t_uint32 int_mis0; /*0x...60060*/
+ t_uint32 RESERVED5;
+ t_uint32 RESERVED6;
+ t_uint32 RESERVED7;
+ t_uint32 i2cdiv; /*0x...60070*/
+ t_uint32 int_mis1; /*0x...60074*/
+ t_uint32 RESERVED8;
+ t_uint32 RESERVED9;
+ t_uint32 emul_udata[8]; /*0x...60080..9c*/
+ t_uint32 emul_uaddrl; /*0x...600a0*/
+ t_uint32 emul_uaddrm; /*0x...600a4*/
+ t_uint32 emul_ucmd; /*0x...600a8*/
+ t_uint32 emul_ubkcmd; /*0x...600ac*/
+ t_uint32 emul_bk2addl; /*0x...600b0*/
+ t_uint32 emul_bk2addm; /*0x...600b4*/
+ t_uint32 emul_bk2addh; /*0x...600b8*/
+ t_uint32 emul_mdata[3]; /*0x...600bc..c4*/
+ t_uint32 emul_maddl; /*0x...600c8*/
+ t_uint32 emul_maddm; /*0x...600cc*/
+ t_uint32 emul_mcmd; /*0x...600d0*/
+ t_uint32 emul_maddh; /*0x...600d4*/
+ t_uint32 emul_uaddrh; /*0x...600d8*/
+ t_uint32 emul_bk_eql; /*0x...600dc*/
+ t_uint32 emul_bk_eqh; /*0x...600e0*/
+ t_uint32 emul_bk_combi; /*0x...600e4*/
+ t_uint32 emul_clockcmd; /*0x...600e8*/
+ t_uint32 emul_stepcmd; /*0x...600ec*/
+ t_uint32 emul_scanreg; /*0x...600f0*/
+ t_uint32 emul_breakcountl; /*0x...600f4*/
+ t_uint32 emul_breakcounth; /*0x...600f8*/
+ t_uint32 emul_forcescan; /*0x...600fc*/
+ t_uint32 user_area[(0x400 - 0x100)>>2];
+} t_mmdsp_host_regs_32;
+
+/* MMIO blocks */
+#if defined(__STN_8820) || defined(__STN_8500)
+typedef volatile struct {
+ t_uint16 RESERVED1[(0xD400-0x8000)>>1];
+
+ t_mmdsp_dma_ctrl_regs_16 dma_ctrl[MMDSP_NB_DMA_CTRL];
+
+ t_uint16 RESERVED2[(0xD800-0xD440)>>1];
+
+ t_mmdsp_dcache_regs_16 dcache;
+
+ t_uint16 RESERVED3[(0xE000-0xD840)>>1];
+
+ t_mmdsp_io_regs_16 io;
+
+ t_uint16 RESERVED4[(0x60-0x50)>>1];
+
+ t_mmdsp_timer_regs_16 timer[MMDSP_NB_TIMER];
+
+ t_uint16 RESERVED5[(0x410-0x78)>>1];
+
+ t_mmdsp_field_16 sem[MMDSP_NB_BIT_SEM];
+
+ t_uint16 RESERVED6[(0x450-0x430)>>1];
+
+ t_mmdsp_field_16 ipen;
+ t_uint16 itip_0;
+ t_uint16 itip_1;
+ t_uint16 itip_2;
+ t_uint16 itip_3;
+ t_uint16 itop_0;
+ t_uint16 itop_1;
+ t_uint16 itop_2;
+ t_uint16 itop_3;
+ t_uint16 RESERVED7[(0x8a-0x64)>>1];
+ t_uint16 itip_4;
+ t_uint16 itop_4;
+
+ t_uint16 RESERVED8[(0x7e0-0x48e)>>1];
+
+ t_mmdsp_field_16 id[4];
+ t_mmdsp_field_16 idp[4];
+
+ t_mmdsp_dma_if_regs_16 dma_if[MMDSP_NB_DMA_IF];
+
+ t_uint16 RESERVED9[(0xC00-0x900)>>1];
+
+ t_mmdsp_field_16 emu_unit_maskit;
+ t_mmdsp_field_16 RESERVED[3];
+ t_mmdsp_field_16 config_data_mem;
+ t_mmdsp_field_16 compatibility;
+
+ t_uint16 RESERVED10[(0xF000-0xEC18)>>1];
+
+ t_uint16 stbus_if_config;
+ t_uint16 stbus_if_mode;
+ t_uint16 stbus_if_status;
+ t_uint16 stbus_if_security;
+ t_uint16 stbus_if_flush;
+ t_uint16 stbus_reserved;
+ t_uint16 stbus_if_priority;
+ t_uint16 stbus_msb_attribut;
+
+ t_uint16 RESERVED11[(0xFC00-0xF010)>>1];
+
+ t_mmdsp_field_16 itremap_reg[MMDSP_NB_ITREMAP_REG];
+ t_mmdsp_field_16 itmsk_l_reg;
+ t_mmdsp_field_16 itmsk_h_reg;
+
+ t_uint16 RESERVED12[(0xfc9c - 0xfc88)>>1];
+
+ t_mmdsp_field_16 itmemo_l_reg;
+ t_mmdsp_field_16 itmeme_h_reg;
+
+ t_uint16 RESERVED13[(0xfd00 - 0xfca4)>>1];
+
+ t_mmdsp_field_16 itremap1_reg[MMDSP_NB_ITREMAP_REG];
+
+ t_uint16 RESERVED14[(0x60000 - 0x5fd80)>>1];
+} t_mmdsp_mmio_regs_16;
+
+
+typedef volatile struct {
+ t_uint32 RESERVED1[(0xa800)>>2];
+
+ t_mmdsp_dma_ctrl_regs_32 dma_ctrl[MMDSP_NB_DMA_CTRL];
+
+ t_uint32 RESERVED2[(0xb000-0xa880)>>2];
+
+ t_mmdsp_dcache_regs_32 dcache;
+
+ t_uint32 RESERVED3[(0xc000-0xb080)>>2];
+
+ t_mmdsp_io_regs_32 io;
+
+ t_uint32 RESERVED4[(0xc0-0xa0)>>2];
+
+ t_mmdsp_timer_regs_32 timer[MMDSP_NB_TIMER];
+
+ t_uint32 RESERVED5[(0x820-0x0f0)>>2];
+
+ t_mmdsp_field_32 sem[MMDSP_NB_BIT_SEM];
+
+ t_uint32 RESERVED6[(0x8a0-0x860)>>2];
+
+ t_mmdsp_field_32 ipen;
+ t_uint32 itip_0;
+ t_uint32 itip_1;
+ t_uint32 itip_2;
+ t_uint32 itip_3;
+ t_uint32 itop_0;
+ t_uint32 itop_1;
+ t_uint32 itop_2;
+ t_uint32 itop_3;
+ t_uint32 RESERVED7[(0x914-0x8c8)>>2];
+ t_uint32 itip_4;
+ t_uint32 itop_4;
+
+ t_uint32 RESERVED8[(0xcfc0-0xc91c)>>2];
+
+ t_mmdsp_field_32 id[4];
+ t_mmdsp_field_32 idp[4];
+
+ t_mmdsp_dma_if_regs_32 dma_if[MMDSP_NB_DMA_IF];
+
+ t_uint32 RESERVED9[(0x800-0x200)>>2];
+
+ t_mmdsp_field_32 emu_unit_maskit;
+ t_mmdsp_field_32 RESERVED[3];
+ t_mmdsp_field_32 config_data_mem;
+ t_mmdsp_field_32 compatibility;
+
+ t_uint32 RESERVED10[(0xE000-0xD830)>>2];
+
+ t_uint32 stbus_if_config;
+ t_uint32 stbus_if_mode;
+ t_uint32 stbus_if_status;
+ t_uint32 stbus_if_security;
+ t_uint32 stbus_if_flush;
+ t_uint32 stbus_reserved;
+ t_uint32 stbus_if_priority;
+ t_uint32 stbus_msb_attribut;
+
+ t_uint32 RESERVED11[(0xF800-0xE020)>>2];
+
+ t_mmdsp_field_32 itremap_reg[MMDSP_NB_ITREMAP_REG];
+ t_mmdsp_field_32 itmsk_l_reg;
+ t_mmdsp_field_32 itmsk_h_reg;
+
+ t_uint32 RESERVED12[(0xf938 - 0xf910)>>2];
+
+ t_mmdsp_field_32 itmemo_l_reg;
+ t_mmdsp_field_32 itmeme_h_reg;
+
+ t_uint32 RESERVED13[(0xfa00 - 0xf948)>>2];
+
+ t_mmdsp_field_32 itremap1_reg[MMDSP_NB_ITREMAP_REG];
+
+ t_uint32 RESERVED14[(0x40000 - 0x3fb00)>>2];
+} t_mmdsp_mmio_regs_32;
+#endif /* __STN_8820 or __STN_8500 */
+
+#ifdef __STN_8815
+typedef volatile struct {
+ t_uint16 RESERVED1[(0xD400-0x8000)>>1];
+
+ t_mmdsp_dma_ctrl_regs_16 dma_ctrl[MMDSP_NB_DMA_CTRL];
+
+ t_uint16 RESERVED2[(0xD800-0xD440)>>1];
+
+ t_mmdsp_dcache_regs_16 dcache;
+
+ t_uint16 RESERVED3[(0xE000-0xD840)>>1];
+
+ t_mmdsp_io_regs_16 io;
+
+ t_uint16 RESERVED4[(0x60-0x50)>>1];
+
+ t_mmdsp_timer_regs_16 timer[MMDSP_NB_TIMER];
+
+ t_uint16 RESERVED5[(0x410-0x78)>>1];
+
+ t_mmdsp_field_16 sem[MMDSP_NB_BIT_SEM];
+
+ t_uint16 RESERVED6[(0x450-0x430)>>1];
+
+ t_mmdsp_field_16 ipen;
+ t_uint16 itip_0;
+ t_uint16 itip_1;
+ t_uint16 itip_2;
+ t_uint16 itip_3;
+ t_uint16 itop_0;
+ t_uint16 itop_1;
+ t_uint16 itop_2;
+ t_uint16 itop_3;
+ t_uint16 RESERVED7[(0x8a-0x64)>>1];
+ t_uint16 itip_4;
+ t_uint16 itop_4;
+
+ t_uint16 RESERVED8[(0x7e0-0x48e)>>1];
+
+ t_mmdsp_field_16 id[4];
+ t_mmdsp_field_16 idp[4];
+
+ t_mmdsp_dma_if_regs_16 dma_if[MMDSP_NB_DMA_IF];
+
+ t_uint16 RESERVED9[(0xC00-0x900)>>1];
+
+ t_mmdsp_field_16 emu_unit_maskit;
+ t_mmdsp_field_16 RESERVED[3];
+ t_mmdsp_field_16 config_data_mem;
+ t_mmdsp_field_16 compatibility;
+
+ t_uint16 RESERVED10[(0xF000-0xEC18)>>1];
+
+ t_uint16 ahb_if_config;
+ t_uint16 ahb_if_mode;
+ t_uint16 ahb_if_status;
+ t_uint16 ahb_if_security;
+ t_uint16 ahb_if_flush;
+
+ t_uint16 RESERVED11[(0xFC00-0xF00A)>>1];
+
+ t_mmdsp_field_16 itremap_reg[MMDSP_NB_ITREMAP_REG];
+ t_mmdsp_field_16 itmsk_l_reg;
+ t_mmdsp_field_16 itmsk_h_reg;
+
+ t_uint16 RESERVED12[(0xfc9c - 0xfc88)>>1];
+
+ t_mmdsp_field_16 itmemo_l_reg;
+ t_mmdsp_field_16 itmeme_h_reg;
+
+ t_uint16 RESERVED13[(0xfd00 - 0xfca4)>>1];
+
+ t_mmdsp_field_16 itremap1_reg[MMDSP_NB_ITREMAP_REG];
+
+ t_uint16 RESERVED14[(0x60000 - 0x5fd80)>>1];
+} t_mmdsp_mmio_regs_16;
+
+
+typedef volatile struct {
+ t_uint32 RESERVED1[(0xa800)>>2];
+
+ t_mmdsp_dma_ctrl_regs_32 dma_ctrl[MMDSP_NB_DMA_CTRL];
+
+ t_uint32 RESERVED2[(0xb000-0xa880)>>2];
+
+ t_mmdsp_dcache_regs_32 dcache;
+
+ t_uint32 RESERVED3[(0xc000-0xb080)>>2];
+
+ t_mmdsp_io_regs_32 io;
+
+ t_uint32 RESERVED4[(0xc0-0xa0)>>2];
+
+ t_mmdsp_timer_regs_32 timer[MMDSP_NB_TIMER];
+
+ t_uint32 RESERVED5[(0x820-0x0f0)>>2];
+
+ t_mmdsp_field_32 sem[MMDSP_NB_BIT_SEM];
+
+ t_uint32 RESERVED6[(0x8a0-0x860)>>2];
+
+ t_mmdsp_field_32 ipen;
+ t_uint32 itip_0;
+ t_uint32 itip_1;
+ t_uint32 itip_2;
+ t_uint32 itip_3;
+ t_uint32 itop_0;
+ t_uint32 itop_1;
+ t_uint32 itop_2;
+ t_uint32 itop_3;
+ t_uint32 RESERVED7[(0x914-0x8c8)>>2];
+ t_uint32 itip_4;
+ t_uint32 itop_4;
+
+ t_uint32 RESERVED8[(0xcfc0-0xc91c)>>2];
+
+ t_mmdsp_field_32 id[4];
+ t_mmdsp_field_32 idp[4];
+
+ t_mmdsp_dma_if_regs_32 dma_if[MMDSP_NB_DMA_IF];
+
+ t_uint32 RESERVED9[(0x800-0x200)>>2];
+
+ t_mmdsp_field_32 emu_unit_maskit;
+ t_mmdsp_field_32 RESERVED[3];
+ t_mmdsp_field_32 config_data_mem;
+ t_mmdsp_field_32 compatibility;
+
+ t_uint32 RESERVED10[(0xE000-0xD830)>>2];
+
+ t_uint32 ahb_if_config;
+ t_uint32 ahb_if_mode;
+ t_uint32 ahb_if_status;
+ t_uint32 ahb_if_security;
+ t_uint32 ahb_if_flush;
+
+ t_uint32 RESERVED11[(0xF800-0xE014)>>2];
+
+ t_mmdsp_field_32 itremap_reg[MMDSP_NB_ITREMAP_REG];
+ t_mmdsp_field_32 itmsk_l_reg;
+ t_mmdsp_field_32 itmsk_h_reg;
+
+ t_uint32 RESERVED12[(0xf938 - 0xf910)>>2];
+
+ t_mmdsp_field_32 itmemo_l_reg;
+ t_mmdsp_field_32 itmeme_h_reg;
+
+ t_uint32 RESERVED13[(0xfa00 - 0xf948)>>2];
+
+ t_mmdsp_field_32 itremap1_reg[MMDSP_NB_ITREMAP_REG];
+
+ t_uint32 RESERVED14[(0x40000 - 0x3fb00)>>2];
+} t_mmdsp_mmio_regs_32;
+#endif /* __STN_8815 */
+
+/* Smart xx Accelerator memory map */
+typedef volatile struct {
+ t_uint32 mem24[MMDSP_NB_BLOCK_RAM*MMDSP_RAM_BLOCK_SIZE]; /* 0x0000 -> 0x20000 */
+
+ t_uint32 RESERVED1[(0x30000 - 0x20000)>>2];
+
+ t_mmdsp_mmio_regs_32 mmio_32;
+
+ t_uint16 mem16[MMDSP_NB_BLOCK_RAM*MMDSP_RAM_BLOCK_SIZE]; /* 0x40000 -> 0x50000 */
+
+ t_uint32 RESERVED2[(0x58000 - 0x50000)>>2];
+
+ t_mmdsp_mmio_regs_16 mmio_16;
+
+ t_mmdsp_host_regs_16 host_reg;
+ /*
+ union host_reg {
+ t_mmdsp_host_regs_16 reg16;
+ t_mmdsp_host_regs_32 reg32;
+ };
+ */
+} t_mmdsp_hw_regs;
+
+#endif // __INC_MMDSP_HWP_H
diff --git a/drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_macros.h b/drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_macros.h
new file mode 100644
index 00000000000..b8911d27609
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/dsp/mmdsp/inc/mmdsp_macros.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_MMDSP_DSP_MACROS
+#define __INC_MMDSP_DSP_MACROS
+
+#include <cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h>
+
+#define MMDSP_ENABLE_WRITE_POSTING(pRegs) \
+{ \
+ (pRegs)->mmio_16.dcache.control |= DCACHE_CONTROL_WRITE_POSTING_ENABLE; \
+}
+
+#define MMDSP_FLUSH_DCACHE(pRegs) \
+{ /* Today, only full cache flush (clear all the ways) */ \
+ (pRegs)->mmio_16.dcache.command = DCACHE_CMD_FLUSH; \
+}
+
+#define MMDSP_FLUSH_DCACHE_BY_SERVICE(pRegs, startAddr, endAddr)
+
+#define MMDSP_FLUSH_ICACHE(pRegs) \
+{ /* Flush the Instruction cache */ \
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_ICACHE_FLUSH_REG, (IHOST_ICACHE_FLUSH_ALL_ENTRIES_CMD | IHOST_ICACHE_FLUSH_CMD_ENABLE)); \
+}
+
+#ifndef __STN_8810
+#define MMDSP_FLUSH_ICACHE_BY_SERVICE(pRegs, startAddr, endAddr) \
+{ /* Flush the Instruction cache by service */ \
+ /*t_uint64 start_clear_addr = startAddr & ~(MMDSP_ICACHE_LINE_SIZE_IN_INST - 1);*/ \
+ t_uint64 start_clear_addr = (startAddr)>>2; \
+ t_uint64 end_clear_addr = ((endAddr) + MMDSP_ICACHE_LINE_SIZE_IN_INST) & ~(MMDSP_ICACHE_LINE_SIZE_IN_INST - 1); \
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_ICACHE_START_CLEAR_REG, start_clear_addr); \
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_ICACHE_END_CLEAR_REG, end_clear_addr); \
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_ICACHE_FLUSH_REG, (IHOST_ICACHE_FLUSH_BY_SERVICE | IHOST_ICACHE_FLUSH_CMD_ENABLE)); \
+}
+#else
+#define MMDSP_FLUSH_ICACHE_BY_SERVICE(pRegs, startAddr, endAddr) {(void)pRegs; (void)startAddr; (void)endAddr; }
+#endif
+
+#define MMDSP_RESET_CORE(pRegs) \
+{ /* Assert DSP core soft reset */ \
+ (pRegs)->host_reg.softreset = 1; \
+}
+
+#define MMDSP_START_CORE(pRegs) \
+{ \
+ /* Enable external memory access (set bit 3 of ubkcmd) */ \
+ (pRegs)->host_reg.emul_ubkcmd |= MMDSP_UBKCMD_EXT_CODE_MEM_ACCESS_ENABLE; \
+ \
+ /* Start core clock */ \
+ (pRegs)->host_reg.emul_clockcmd = MMDSP_CLOCKCMD_START_CLOCK; \
+}
+
+#define MMDSP_STOP_CORE(pRegs) \
+{ \
+ /* Disable external memory access (reset bit 3 of ubkcmd) */ \
+ (pRegs)->host_reg.emul_ubkcmd = MMDSP_UBKCMD_EXT_CODE_MEM_ACCESS_DISABLE; \
+ \
+ /* Stop core clock */ \
+ (pRegs)->host_reg.emul_clockcmd = MMDSP_CLOCKCMD_STOP_CLOCK; \
+}
+
+#define MMDSP_ASSERT_IRQ(pRegs, irqNum) \
+{ \
+ (pRegs)->host_reg.cmd[irqNum] = 1; \
+}
+
+#define MMDSP_ACKNOWLEDGE_IRQ(pRegs, irqNum) \
+{ \
+ volatile t_uint16 dummy; \
+ dummy =(pRegs)->host_reg.intx[irqNum]; \
+}
+
+#define MMDSP_WRITE_XWORD(pRegs, offset, value) \
+{ \
+ (pRegs)->mem24[offset] = value; \
+}
+
+#define MMDSP_READ_XWORD(pRegs, offset) (pRegs)->mem24[offset]
+
+#endif /* __INC_MMDSP_DSP_MACROS */
diff --git a/drivers/staging/nmf-cm/cm/engine/dsp/src/dsp.c b/drivers/staging/nmf-cm/cm/engine/dsp/src/dsp.c
new file mode 100644
index 00000000000..ef11a5265aa
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/dsp/src/dsp.c
@@ -0,0 +1,1083 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/dsp/mmdsp/inc/mmdsp_macros.h>
+
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <cm/engine/power_mgt/inc/power.h>
+#include <cm/engine/memory/inc/migration.h>
+#include <cm/engine/trace/inc/trace.h>
+
+#include <share/inc/nomadik_mapping.h>
+
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/component/inc/component_type.h>
+
+static t_dsp_allocator_desc esramDesc;
+static t_dsp_desc mpcDesc[NB_CORE_IDS];
+static t_mmdsp_hw_regs *pMmdspRegs[NB_CORE_IDS];
+
+struct s_base_descr
+{
+ t_uint32 startAddress[2 /* DSP16 = 0, DSP24 = 1*/];
+ t_dsp_segment_type segmentType;
+};
+
+#if defined(__STN_8500) && (__STN_8500 > 10)
+
+#define DATA_BASE_NUMBER 4
+
+// In bytes
+#define SDRAM_CODE_SPACE_SPLIT 0x8000
+#define ESRAM_CODE_SPACE_SPLIT 0x4000
+#define SDRAM_DATA_SPACE_SPLIT 0x40000 // This is the modulo constraint of mmdsp
+#define ESRAM_DATA_SPACE_SPLIT 0x40000
+
+// In MMDSP word
+static const struct s_base_descr DATA_ADDRESS_BASE[DATA_BASE_NUMBER + 1 /* For guard */] = {
+ {{SDRAMMEM16_BASE_ADDR, SDRAMMEM24_BASE_ADDR}, SDRAM_DATA_EE},
+ {{SDRAMMEM16_BASE_ADDR + (SDRAM_DATA_SPACE_SPLIT / 2), SDRAMMEM24_BASE_ADDR + (SDRAM_DATA_SPACE_SPLIT / 4)}, SDRAM_DATA_USER},
+ {{ESRAMMEM16_BASE_ADDR, ESRAMMEM24_BASE_ADDR}, ESRAM_DATA_EE},
+ {{ESRAMMEM16_BASE_ADDR + (ESRAM_DATA_SPACE_SPLIT / 2), ESRAMMEM24_BASE_ADDR + (ESRAM_DATA_SPACE_SPLIT / 4)}, ESRAM_DATA_USER},
+ {{MMIO_BASE_ADDR, SDRAMMEM16_BASE_ADDR}, NB_DSP_SEGMENT_TYPE /* Not used*/}
+};
+
+#else
+
+#define DATA_BASE_NUMBER 2
+
+// In MMDSP word
+static const struct s_base_descr DATA_ADDRESS_BASE[DATA_BASE_NUMBER + 1 /* For guard */] = {
+ {{SDRAMMEM16_BASE_ADDR, SDRAMMEM24_BASE_ADDR}, SDRAM_DATA_EE},
+ {{ESRAMMEM16_BASE_ADDR, ESRAMMEM24_BASE_ADDR}, ESRAM_DATA_EE},
+ {{MMIO_BASE_ADDR, SDRAMMEM16_BASE_ADDR}, NB_DSP_SEGMENT_TYPE /* Not used*/}
+};
+
+#endif
+
+#if defined(__STN_8500) && (__STN_8500 > 10)
+// In word
+static const t_uint32 CODE_ADDRESS_BASE[4] = {
+ SDRAMTEXT_BASE_ADDR,
+ SDRAMTEXT_BASE_ADDR + (SDRAM_CODE_SPACE_SPLIT / 8),
+ ESRAMTEXT_BASE_ADDR,
+ ESRAMTEXT_BASE_ADDR + (ESRAM_CODE_SPACE_SPLIT / 8)
+};
+#endif
+
+static void arm_Init(void);
+static t_cm_error mmdsp_Init(const t_cm_system_address *dspSystemAddr,
+ t_uint8 nbXramBlocks, t_uint8 nbYramBlocks,
+ t_dsp_allocator_desc *sdramCodeDesc,
+ t_dsp_allocator_desc *sdramDataDesc,
+ t_cm_domain_id eeDomain,
+ t_dsp_desc *pDspDesc,
+ t_mmdsp_hw_regs **pRegs);
+static t_cm_error mmdsp_Configure(t_nmf_core_id coreId, t_mmdsp_hw_regs *pRegs, const t_dsp_desc *pDspDesc);
+static t_cm_error mmdsp_ConfigureAfterBoot(t_nmf_core_id coreId, t_uint8 nbXramBlocks, t_uint8 nbYramBlocks);
+static void cm_DSP_SEM_Init(t_nmf_core_id coreId);
+
+PUBLIC const t_dsp_desc* cm_DSP_GetState(t_nmf_core_id coreId)
+{
+ return &mpcDesc[coreId];
+}
+PUBLIC void cm_DSP_SetStatePanic(t_nmf_core_id coreId)
+{
+ mpcDesc[coreId].state = MPC_STATE_PANIC;
+}
+
+PUBLIC void cm_DSP_Init(const t_nmf_memory_segment *pEsramDesc)
+{
+ t_nmf_core_id coreId;
+ int i;
+
+ /* Create esram desc */
+ esramDesc.allocDesc = cm_MM_CreateAllocator(pEsramDesc->size, 0, "esram");
+ esramDesc.baseAddress = pEsramDesc->systemAddr;
+ esramDesc.referenceCounter = 1; // Don't free it with destroy mechanism
+
+ /* Create ARM */
+ arm_Init();
+
+ mpcDesc[ARM_CORE_ID].state = MPC_STATE_BOOTED;
+
+ /* Reset MPC configuration */
+ for (coreId = FIRST_MPC_ID; coreId <= LAST_CORE_ID; coreId++)
+ {
+ mpcDesc[coreId].state = MPC_STATE_UNCONFIGURED;
+
+ for(i = 0; i < NB_DSP_MEMORY_TYPE; i++)
+ mpcDesc[coreId].allocator[i] = NULL;
+ }
+
+}
+
+PUBLIC void cm_DSP_Destroy(void)
+{
+ t_nmf_core_id coreId;
+ int i;
+
+ for (coreId = ARM_CORE_ID; coreId <= LAST_CORE_ID; coreId++)
+ {
+ for(i = 0; i < NB_DSP_MEMORY_TYPE; i++)
+ {
+ if (mpcDesc[coreId].allocator[i] != NULL)
+ {
+ if(--mpcDesc[coreId].allocator[i]->referenceCounter == 0)
+ {
+ cm_MM_DeleteAllocator(mpcDesc[coreId].allocator[i]->allocDesc);
+
+ OSAL_Free(mpcDesc[coreId].allocator[i]);
+ }
+ }
+ }
+ }
+
+ cm_MM_DeleteAllocator(esramDesc.allocDesc);
+}
+
+
+PUBLIC t_cm_error cm_DSP_Add(t_nmf_core_id coreId,
+ t_uint8 nbYramBanks,
+ const t_cm_system_address *pDspMapDesc,
+ const t_cm_domain_id eeDomain,
+ t_dsp_allocator_desc *sdramCodeAllocDesc,
+ t_dsp_allocator_desc *sdramDataAllocDesc)
+{
+ t_cm_error error;
+
+ /* checking nbYramBanks is valid */
+ if (nbYramBanks >= SxA_NB_BLOCK_RAM)
+ return CM_MPC_INVALID_CONFIGURATION;
+
+ if((error = cm_DM_CheckDomain(eeDomain, DOMAIN_NORMAL)) != CM_OK)
+ return error;
+
+ mpcDesc[coreId].domainEE = eeDomain;
+ mpcDesc[coreId].nbYramBank = nbYramBanks;
+ mpcDesc[coreId].state = MPC_STATE_BOOTABLE;
+
+ return mmdsp_Init(
+ pDspMapDesc,
+ SxA_NB_BLOCK_RAM, /* nb of data tcm bank minus one (reserved for cache) */
+ nbYramBanks,
+ sdramCodeAllocDesc,
+ sdramDataAllocDesc,
+ eeDomain,
+ &mpcDesc[coreId],
+ &pMmdspRegs[coreId]
+ );
+}
+
+PUBLIC t_cm_error cm_DSP_Boot(t_nmf_core_id coreId)
+{
+ t_cm_error error;
+
+ // Enable the associated power domain
+ if((error = cm_PWR_EnableMPC(MPC_PWR_CLOCK, coreId)) != CM_OK)
+ return error;
+
+ cm_SEM_PowerOn[coreId](coreId);
+
+ if((error = mmdsp_Configure(
+ coreId,
+ pMmdspRegs[coreId],
+ &mpcDesc[coreId])) != CM_OK)
+ {
+ cm_PWR_DisableMPC(MPC_PWR_CLOCK, coreId);
+ }
+
+ // Put it in auto idle mode ; it's the default in Step 2 of power implementation
+ if((error = cm_PWR_EnableMPC(MPC_PWR_AUTOIDLE, coreId)) != CM_OK)
+ return error;
+
+ return error;
+}
+
+/*
+ * This method is required since MMDSP C bootstrap set some value that must be set differently !!!
+ */
+PUBLIC void cm_DSP_ConfigureAfterBoot(t_nmf_core_id coreId)
+{
+ mpcDesc[coreId].state = MPC_STATE_BOOTED;
+
+ mmdsp_ConfigureAfterBoot(coreId, SxA_NB_BLOCK_RAM, mpcDesc[coreId].nbYramBank);
+
+ cm_DSP_SEM_Init(coreId);
+}
+
+PUBLIC void cm_DSP_Stop(t_nmf_core_id coreId)
+{
+ MMDSP_STOP_CORE(pMmdspRegs[coreId]);
+
+ {
+ volatile t_uint32 loopme = 0xfff;
+ while(loopme--) ;
+ }
+}
+
+PUBLIC void cm_DSP_Start(t_nmf_core_id coreId)
+{
+ MMDSP_START_CORE(pMmdspRegs[coreId]);
+
+ {
+ volatile t_uint32 loopme = 0xfff;
+ while(loopme--) ;
+ }
+}
+
+PUBLIC void cm_DSP_Shutdown(t_nmf_core_id coreId)
+{
+ MMDSP_FLUSH_DCACHE(pMmdspRegs[coreId]);
+ MMDSP_FLUSH_ICACHE(pMmdspRegs[coreId]);
+
+ // Due to a hardware bug that breaks MTU when DSP are powered off, don't do that
+ // on mop500_ed for now
+#if !defined(__STN_8500) || (__STN_8500 > 10)
+ MMDSP_RESET_CORE(pMmdspRegs[coreId]);
+ {
+ volatile t_uint32 loopme = 0xfff;
+ while(loopme--) ;
+ }
+ MMDSP_STOP_CORE(pMmdspRegs[coreId]);
+ {
+ volatile t_uint32 loopme = 0xfff;
+ while(loopme--) ;
+ }
+#endif
+
+ mpcDesc[coreId].state = MPC_STATE_BOOTABLE;
+
+ cm_SEM_PowerOff[coreId](coreId);
+
+ cm_PWR_DisableMPC(MPC_PWR_AUTOIDLE, coreId);
+ cm_PWR_DisableMPC(MPC_PWR_CLOCK, coreId);
+}
+
+PUBLIC t_uint32 cm_DSP_ReadXRamWord(t_nmf_core_id coreId, t_uint32 dspOffset)
+{
+ t_uint32 value;
+
+ value = pMmdspRegs[coreId]->mem24[dspOffset];
+
+ LOG_INTERNAL(3, "cm_DSP_ReadXRamWord: [%x]=%x\n",
+ dspOffset, value,
+ 0, 0, 0, 0);
+
+ return value;
+}
+
+
+PUBLIC void cm_DSP_WriteXRamWord(t_nmf_core_id coreId, t_uint32 dspOffset, t_uint32 value)
+{
+ LOG_INTERNAL(3, "cm_DSP_WriteXRamWord: [%x]<-%x\n",
+ dspOffset, value,
+ 0, 0, 0, 0);
+
+ pMmdspRegs[coreId]->mem24[dspOffset] = value;
+}
+
+static void cm_DSP_SEM_Init(t_nmf_core_id coreId)
+{
+ pMmdspRegs[coreId]->mmio_16.sem[1].value = 1;
+}
+
+PUBLIC void cm_DSP_SEM_Take(t_nmf_core_id coreId, t_semaphore_id semId)
+{
+ /* take semaphore */
+ while(pMmdspRegs[coreId]->mmio_16.sem[1].value) ;
+}
+
+PUBLIC void cm_DSP_SEM_Give(t_nmf_core_id coreId, t_semaphore_id semId)
+{
+ /* release semaphore */
+ pMmdspRegs[coreId]->mmio_16.sem[1].value = 1;
+}
+
+PUBLIC void cm_DSP_SEM_GenerateIrq(t_nmf_core_id coreId, t_semaphore_id semId)
+{
+ MMDSP_ASSERT_IRQ(pMmdspRegs[coreId], ARM2DSP_IRQ_0);
+}
+
+
+PUBLIC void cm_DSP_AssertDspIrq(t_nmf_core_id coreId, t_host2mpc_irq_num irqNum)
+{
+ MMDSP_ASSERT_IRQ(pMmdspRegs[coreId], irqNum);
+ return;
+}
+
+PUBLIC void cm_DSP_AcknowledgeDspIrq(t_nmf_core_id coreId, t_mpc2host_irq_num irqNum)
+{
+ MMDSP_ACKNOWLEDGE_IRQ(pMmdspRegs[coreId], irqNum);
+ return;
+}
+
+//TODO, juraj, cleanup INTERNAL_XRAM vs INTERNAL_XRAM16/24
+static const t_uint32 dspMemoryTypeId2OffsetShifter[NB_DSP_MEMORY_TYPE] =
+{
+ 2, /* INTERNAL_XRAM24: Internal X memory but seen by host as 32-bit memory */
+ 2, /* INTERNAL_XRAM16: Internal X memory but seen by host as 16-bit memory */
+ 2, /* INTERNAL_YRAM24: Internal Y memory but seen by host as 32-bit memory */
+ 2, /* INTERNAL_YRAM16: Internal Y memory but seen by host as 16-bit memory */
+ 2, /* SDRAM_EXT24: 24-bit external "X" memory */
+ 1, /* SDRAM_EXT16: 16-bit external "X" memory */
+ 2, /* ESRAM_EXT24: ESRAM24 */
+ 1, /* ESRAM_EXT16: ESRAM16 */
+ 3, /* SDRAM_CODE: Program memory */
+ 3, /* ESRAM_CODE: ESRAM code */
+ 3, /* LOCKED_CODE: ESRAM code */
+};
+
+//TODO, juraj, use these values in mmdsp_Configure
+static const t_uint32 dspMemoryTypeId2DspAddressOffset[NB_DSP_MEMORY_TYPE] =
+{
+ 0, /* INTERNAL_XRAM24 */
+ 0, /* INTERNAL_XRAM16 */
+ 0, /* INTERNAL_YRAM24 */
+ 0, /* INTERNAL_YRAM16 */
+ SDRAMMEM24_BASE_ADDR, /* SDRAM_EXT24: 24-bit external "X" memory */
+ SDRAMMEM16_BASE_ADDR, /* SDRAM_EXT16: 16-bit external "X" memory */
+ ESRAMMEM24_BASE_ADDR, /* ESRAM_EXT24: ESRAM24 */
+ ESRAMMEM16_BASE_ADDR, /* ESRAM_EXT16: ESRAM16 */
+ SDRAMTEXT_BASE_ADDR, /* SDRAM_CODE: Program memory */
+ ESRAMTEXT_BASE_ADDR, /* ESRAM_CODE: ESRAM code */
+ SDRAMTEXT_BASE_ADDR, /* ESRAM_CODE: ESRAM code */
+};
+
+PUBLIC t_cm_allocator_desc* cm_DSP_GetAllocator(t_nmf_core_id coreId, t_dsp_memory_type_id memType)
+{
+ return mpcDesc[coreId].allocator[memType] ? mpcDesc[coreId].allocator[memType]->allocDesc : NULL;
+}
+
+PUBLIC void cm_DSP_GetDspChunkInfo(t_memory_handle memHandle, t_dsp_chunk_info *info)
+{
+ t_uint16 userData;
+
+ cm_MM_GetMemoryHandleUserData(memHandle, &userData, &info->alloc);
+
+ info->coreId = (t_nmf_core_id) ((userData >> SHIFT_BYTE1) & MASK_BYTE0);
+ info->memType = (t_dsp_memory_type_id)((userData >> SHIFT_BYTE0) & MASK_BYTE0);
+}
+
+PUBLIC t_cm_error cm_DSP_GetInternalMemoriesInfo(t_cm_domain_id domainId, t_dsp_memory_type_id memType,
+ t_uint32 *offset, t_uint32 *size)
+{
+ t_nmf_core_id coreId = domainDesc[domainId].domain.coreId;
+
+ switch(memType)
+ {
+ case INTERNAL_XRAM24:
+ case INTERNAL_XRAM16:
+ *offset = 0;
+ *size = mpcDesc[coreId].yram_offset;
+ break;
+ case INTERNAL_YRAM24:
+ case INTERNAL_YRAM16:
+ *offset = mpcDesc[coreId].yram_offset;
+ *size = mpcDesc[coreId].yram_size;
+ break;
+ case LOCKED_CODE:
+ *offset = mpcDesc[coreId].locked_offset;
+ *size = mpcDesc[coreId].locked_size;
+ break;
+ case SDRAM_EXT24:
+ case SDRAM_EXT16:
+ *offset = domainDesc[domainId].domain.sdramData.offset;
+ *size = domainDesc[domainId].domain.sdramData.size;
+ break;
+ case ESRAM_EXT24:
+ case ESRAM_EXT16:
+ *offset = domainDesc[domainId].domain.esramData.offset;
+ *size = domainDesc[domainId].domain.esramData.size;
+ break;
+ case SDRAM_CODE:
+ *offset = domainDesc[domainId].domain.sdramCode.offset;
+ *size = domainDesc[domainId].domain.sdramCode.size;
+
+ // update domain size to take into account .locked section
+ if(*offset + *size > mpcDesc[coreId].locked_offset)
+ *size = mpcDesc[coreId].locked_offset - *offset;
+ break;
+ case ESRAM_CODE:
+ *offset = domainDesc[domainId].domain.esramCode.offset;
+ *size = domainDesc[domainId].domain.esramCode.size;
+ break;
+ default:
+ //return CM_INVALID_PARAMETER;
+ //params are checked at the level above, so this should never occur
+ ERROR("Invalid memType\n",0,0,0,0,0,0);
+ *offset = 0;
+ *size = 0;
+ CM_ASSERT(0);
+ }
+
+ return CM_OK;
+}
+
+
+PUBLIC t_uint32 cm_DSP_ConvertSize(t_dsp_memory_type_id memType, t_uint32 wordSize)
+{
+ return wordSize << dspMemoryTypeId2OffsetShifter[memType];
+}
+
+PUBLIC t_cm_logical_address cm_DSP_ConvertDspAddressToHostLogicalAddress(t_nmf_core_id coreId, t_shared_addr dspAddress)
+{
+ t_dsp_address_info info;
+ cm_DSP_GetDspDataAddressInfo(coreId, dspAddress, &info);
+ return mpcDesc[coreId].segments[info.segmentType].base.logical + info.baseOffset;
+}
+
+PUBLIC t_cm_error cm_DSP_GetAllocatorStatus(t_nmf_core_id coreId, t_dsp_memory_type_id dspMemType, t_uint32 offset, t_uint32 size, t_cm_allocator_status *pStatus)
+{
+ t_cm_error error;
+
+ if(mpcDesc[coreId].allocator[dspMemType] == NULL)
+ return CM_UNKNOWN_MEMORY_HANDLE;
+
+ error = cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(coreId, dspMemType), offset, size, pStatus);
+ if (error != CM_OK)
+ return error;
+
+ // complete status with stack sizes, for all dsps
+ //NOTE, well, surely this isn't very clean, as dsp and memory allocator are different things ..
+ {
+ t_uint8 i;
+ for (i = 0; i < NB_CORE_IDS; i++) {
+ //*(pStatus->stack[i].sizes) = *(eeState[i].currentStackSize);
+ pStatus->stack[i].sizes[0] = eeState[i].currentStackSize[0];
+ pStatus->stack[i].sizes[1] = eeState[i].currentStackSize[1];
+ pStatus->stack[i].sizes[2] = eeState[i].currentStackSize[2];
+ }
+ }
+
+ // Change bytes to words
+ pStatus->global.accumulate_free_memory = pStatus->global.accumulate_free_memory >> dspMemoryTypeId2OffsetShifter[dspMemType];
+ pStatus->global.accumulate_used_memory = pStatus->global.accumulate_used_memory >> dspMemoryTypeId2OffsetShifter[dspMemType];
+ pStatus->global.maximum_free_size = pStatus->global.maximum_free_size >> dspMemoryTypeId2OffsetShifter[dspMemType];
+ pStatus->global.minimum_free_size = pStatus->global.minimum_free_size >> dspMemoryTypeId2OffsetShifter[dspMemType];
+
+ return error;
+}
+
+PUBLIC void cm_DSP_GetHostSystemAddress(t_memory_handle memHandle, t_cm_system_address *pAddr)
+{
+ t_dsp_chunk_info chunk_info;
+ t_uint32 offset; //in bytes
+
+ cm_DSP_GetDspChunkInfo(memHandle, &chunk_info);
+
+ offset = cm_MM_GetOffset(memHandle);
+
+ /* MMDSP mem16 array is very specific to host access, so .... */
+ /* We compute by hand the Host System address to take into account the specifities of the mmdsp mem16 array */
+ /* 1 dsp word = 2 host bytes AND mem16 array is "exported" by MMDSP External Bus wrapper at the 0x40000 offet */
+ if (chunk_info.memType == INTERNAL_XRAM16 || chunk_info.memType == INTERNAL_YRAM16) {
+ offset = (offset >> 1) + FIELD_OFFSET(t_mmdsp_hw_regs, mem16);
+ }
+
+ //TODO, juraj, calculate correct value here - based on segments desc etc..
+ pAddr->logical = mpcDesc[chunk_info.coreId].allocator[chunk_info.memType]->baseAddress.logical + offset;
+ pAddr->physical = mpcDesc[chunk_info.coreId].allocator[chunk_info.memType]->baseAddress.physical + offset;
+}
+
+
+PUBLIC t_physical_address cm_DSP_GetPhysicalAdress(t_memory_handle memHandle)
+{
+ t_cm_system_address addr;
+ cm_DSP_GetHostSystemAddress(memHandle, &addr);
+ return addr.physical;
+}
+
+PUBLIC t_cm_logical_address cm_DSP_GetHostLogicalAddress(t_memory_handle memHandle)
+{
+ t_cm_system_address addr;
+ cm_DSP_GetHostSystemAddress(memHandle, &addr);
+ return addr.logical;
+}
+
+PUBLIC void cm_DSP_GetDspAddress(t_memory_handle memHandle, t_uint32 *pDspAddress)
+{
+ t_dsp_chunk_info chunk_info;
+
+ cm_DSP_GetDspChunkInfo(memHandle, &chunk_info);
+
+ *pDspAddress =
+ (cm_MM_GetOffset(memHandle) >> dspMemoryTypeId2OffsetShifter[chunk_info.memType]) +
+ dspMemoryTypeId2DspAddressOffset[chunk_info.memType];
+}
+
+PUBLIC t_cm_error cm_DSP_GetDspBaseAddress(t_nmf_core_id coreId, t_dsp_memory_type_id memType, t_cm_system_address *pAddr)
+{
+ cm_migration_check_state(coreId, STATE_NORMAL);
+ if (mpcDesc[coreId].allocator[memType] == NULL)
+ return CM_INVALID_PARAMETER;
+ *pAddr = mpcDesc[coreId].allocator[memType]->baseAddress;
+ return CM_OK;
+}
+
+PUBLIC void cm_DSP_GetDspMemoryHandleSize(t_memory_handle memHandle, t_uint32 *pDspSize)
+{
+ t_dsp_chunk_info chunk_info;
+ cm_DSP_GetDspChunkInfo(memHandle, &chunk_info);
+ *pDspSize = cm_MM_GetSize(memHandle) >> dspMemoryTypeId2OffsetShifter[chunk_info.memType];
+}
+
+PUBLIC t_cm_error cm_DSP_setStackSize(t_nmf_core_id coreId, t_uint32 newStackSize)
+{
+ t_uint8 nbXramBanks;
+ t_uint32 xramSize;
+ t_cm_error error;
+
+ /* compute size of xram allocator */
+ nbXramBanks = SxA_NB_BLOCK_RAM - mpcDesc[coreId].nbYramBank;
+
+ /* check first that required stack size is less then xram memory ....*/
+ if (newStackSize >= nbXramBanks * 4 * ONE_KB) {
+ ERROR("CM_NO_MORE_MEMORY: cm_DSP_setStackSize(), required stack size doesn't fit in XRAM.\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ /* compute new xram allocator size */
+ xramSize = nbXramBanks * 4 * ONE_KB - newStackSize;
+
+ /* try to resize it */
+ if ((error = cm_MM_ResizeAllocator(cm_DSP_GetAllocator(coreId, INTERNAL_XRAM24),
+ xramSize << dspMemoryTypeId2OffsetShifter[INTERNAL_XRAM24])) == CM_NO_MORE_MEMORY) {
+ ERROR("CM_NO_MORE_MEMORY: Couldn't resize stack in cm_DSP_setStackSize()\n", 0, 0, 0, 0, 0, 0);
+ }
+
+ return error;
+}
+
+PUBLIC t_cm_error cm_DSP_IsNbYramBanksValid(t_nmf_core_id coreId, t_uint8 nbYramBanks)
+{
+ /* we use one bank for cache */
+ t_uint8 nbOfRamBanksWithCacheReserved = SxA_NB_BLOCK_RAM;
+
+ /* we want to keep at least one bank of xram */
+ if (nbYramBanks < nbOfRamBanksWithCacheReserved) {return CM_OK;}
+ else {return CM_MPC_INVALID_CONFIGURATION;}
+}
+
+PUBLIC t_uint32 cm_DSP_getStackAddr(t_nmf_core_id coreId)
+{
+ /* we use one bank for cache */
+ //t_uint8 nbOfRamBanksWithCacheReserved = SxA_NB_BLOCK_RAM;
+ /* */
+ //return ((nbOfRamBanksWithCacheReserved * MMDSP_RAM_BLOCK_SIZE * MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE) - mpcDesc[coreId].yram_offset);
+ return mpcDesc[coreId].yram_offset / MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE;
+}
+
+static void arm_Init(void)
+{
+ mpcDesc[ARM_CORE_ID].allocator[INTERNAL_XRAM24] = 0;
+ mpcDesc[ARM_CORE_ID].allocator[INTERNAL_XRAM16] = 0;
+
+ mpcDesc[ARM_CORE_ID].allocator[INTERNAL_YRAM24] = 0;
+ mpcDesc[ARM_CORE_ID].allocator[INTERNAL_YRAM16] = 0;
+
+ mpcDesc[ARM_CORE_ID].allocator[SDRAM_CODE] = 0;
+ mpcDesc[ARM_CORE_ID].allocator[ESRAM_CODE] = 0;
+
+ mpcDesc[ARM_CORE_ID].allocator[SDRAM_EXT16] = 0;
+ mpcDesc[ARM_CORE_ID].allocator[SDRAM_EXT24] = 0;
+
+ mpcDesc[ARM_CORE_ID].allocator[ESRAM_EXT16] = &esramDesc;
+ mpcDesc[ARM_CORE_ID].allocator[ESRAM_EXT16]->referenceCounter++;
+ mpcDesc[ARM_CORE_ID].allocator[ESRAM_EXT24] = &esramDesc;
+ mpcDesc[ARM_CORE_ID].allocator[ESRAM_EXT24]->referenceCounter++;
+}
+
+static void _init_Segment(
+ t_dsp_segment *seg,
+ const t_cm_system_address base, const t_uint32 arm_offset,
+ const t_uint32 size)
+{
+ seg->base.logical = base.logical + arm_offset;
+ seg->base.physical = base.physical + arm_offset;
+ seg->size = size;
+}
+
+static t_cm_error mmdsp_Init(
+ const t_cm_system_address *dspSystemAddr,
+ t_uint8 nbXramBlocks, t_uint8 nbYramBlocks,
+ t_dsp_allocator_desc *sdramCodeDesc,
+ t_dsp_allocator_desc *sdramDataDesc,
+ t_cm_domain_id eeDomain,
+ t_dsp_desc *pDspDesc,
+ t_mmdsp_hw_regs **pRegs)
+{
+ t_cm_system_address xramSysAddr;
+ t_uint32 sizeInBytes;
+
+ /* Initialize reference on hw ressources */
+ *pRegs = (t_mmdsp_hw_regs *) dspSystemAddr->logical;
+
+ /* Initialize memory segments management */
+ xramSysAddr.logical = (t_cm_logical_address)(((t_mmdsp_hw_regs *)dspSystemAddr->logical)->mem24);
+ xramSysAddr.physical = (t_cm_physical_address)(((t_mmdsp_hw_regs *)dspSystemAddr->physical)->mem24);
+
+ /* The last (x)ram block will be used by cache, so ... */
+ /* And the NB_YRAM_BLOCKS last available block(s) will be used as YRAM */
+
+ /* XRAM*/
+ pDspDesc->allocator[INTERNAL_XRAM16] = pDspDesc->allocator[INTERNAL_XRAM24] = (t_dsp_allocator_desc*)OSAL_Alloc(sizeof (t_dsp_allocator_desc));
+ if (pDspDesc->allocator[INTERNAL_XRAM24] == NULL)
+ return CM_NO_MORE_MEMORY;
+
+ pDspDesc->allocator[INTERNAL_XRAM24]->allocDesc = cm_MM_CreateAllocator(
+ ((nbXramBlocks-nbYramBlocks)*MMDSP_RAM_BLOCK_SIZE)*MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE,
+ 0,
+ "XRAM");
+ pDspDesc->allocator[INTERNAL_XRAM24]->baseAddress = xramSysAddr;
+ pDspDesc->allocator[INTERNAL_XRAM24]->referenceCounter = 2;
+
+ /* YRAM */
+ pDspDesc->allocator[INTERNAL_YRAM16] = pDspDesc->allocator[INTERNAL_YRAM24] = (t_dsp_allocator_desc*)OSAL_Alloc(sizeof (t_dsp_allocator_desc));
+ if (pDspDesc->allocator[INTERNAL_YRAM24] == 0) {
+ OSAL_Free(pDspDesc->allocator[INTERNAL_XRAM24]);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ pDspDesc->allocator[INTERNAL_YRAM24]->allocDesc = cm_MM_CreateAllocator(
+ (nbYramBlocks*MMDSP_RAM_BLOCK_SIZE)*MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE,
+ ((nbXramBlocks-nbYramBlocks)*MMDSP_RAM_BLOCK_SIZE)*MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE,
+ "YRAM");
+ pDspDesc->allocator[INTERNAL_YRAM24]->baseAddress = xramSysAddr; /* use xram base address but offset is not null */
+ pDspDesc->allocator[INTERNAL_YRAM24]->referenceCounter = 2;
+
+ pDspDesc->yram_offset = ((nbXramBlocks-nbYramBlocks)*MMDSP_RAM_BLOCK_SIZE)*MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE;
+ pDspDesc->yram_size = (nbYramBlocks*MMDSP_RAM_BLOCK_SIZE)*MMDSP_DATA_WORD_SIZE_IN_HOST_SPACE;
+
+ /* SDRAM & ESRAM */
+ pDspDesc->allocator[SDRAM_CODE] = sdramCodeDesc;
+ pDspDesc->allocator[SDRAM_CODE]->referenceCounter++;
+ pDspDesc->allocator[ESRAM_CODE] = &esramDesc;
+ pDspDesc->allocator[ESRAM_CODE]->referenceCounter++;
+
+ /* LOCKED CODE at end of SDRAM code*/
+ pDspDesc->allocator[LOCKED_CODE] = sdramCodeDesc;
+ pDspDesc->allocator[LOCKED_CODE]->referenceCounter++;
+
+ pDspDesc->locked_offset = cm_MM_GetAllocatorSize(pDspDesc->allocator[SDRAM_CODE]->allocDesc) - MMDSP_CODE_CACHE_WAY_SIZE * 8 * SxA_LOCKED_WAY;
+ pDspDesc->locked_size = MMDSP_CODE_CACHE_WAY_SIZE * 8 * SxA_LOCKED_WAY;
+
+ /* Data_16/24 memory management */
+ pDspDesc->allocator[SDRAM_EXT16] = sdramDataDesc;
+ pDspDesc->allocator[SDRAM_EXT16]->referenceCounter++;
+ pDspDesc->allocator[SDRAM_EXT24] = sdramDataDesc;
+ pDspDesc->allocator[SDRAM_EXT24]->referenceCounter++;
+
+ pDspDesc->allocator[ESRAM_EXT16] = &esramDesc;
+ pDspDesc->allocator[ESRAM_EXT16]->referenceCounter++;
+ pDspDesc->allocator[ESRAM_EXT24] = &esramDesc;
+ pDspDesc->allocator[ESRAM_EXT24]->referenceCounter++;
+
+ sizeInBytes = cm_MM_GetAllocatorSize(pDspDesc->allocator[SDRAM_CODE]->allocDesc);
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ _init_Segment(&pDspDesc->segments[SDRAM_CODE_EE],
+ pDspDesc->allocator[SDRAM_CODE]->baseAddress,
+ domainDesc[eeDomain].domain.sdramCode.offset,
+ domainDesc[eeDomain].domain.sdramCode.size);
+ _init_Segment(&pDspDesc->segments[SDRAM_CODE_USER],
+ pDspDesc->allocator[SDRAM_CODE]->baseAddress,
+ domainDesc[eeDomain].domain.sdramCode.offset + domainDesc[eeDomain].domain.sdramCode.size,
+ sizeInBytes - domainDesc[eeDomain].domain.sdramCode.size);
+#else
+ _init_Segment(&pDspDesc->segments[SDRAM_CODE_EE],
+ pDspDesc->allocator[SDRAM_CODE]->baseAddress,
+ 0x0,
+ sizeInBytes);
+#endif
+
+ sizeInBytes = cm_MM_GetAllocatorSize(pDspDesc->allocator[ESRAM_CODE]->allocDesc);
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ _init_Segment(&pDspDesc->segments[ESRAM_CODE_EE],
+ pDspDesc->allocator[ESRAM_CODE]->baseAddress,
+ domainDesc[eeDomain].domain.esramCode.offset,
+ domainDesc[eeDomain].domain.esramCode.size);
+ _init_Segment(&pDspDesc->segments[ESRAM_CODE_USER],
+ pDspDesc->allocator[ESRAM_CODE]->baseAddress,
+ domainDesc[eeDomain].domain.esramCode.offset + domainDesc[eeDomain].domain.esramCode.size,
+ sizeInBytes - domainDesc[eeDomain].domain.esramCode.size);
+#else
+ _init_Segment(&pDspDesc->segments[ESRAM_CODE_EE],
+ pDspDesc->allocator[ESRAM_CODE]->baseAddress,
+ 0x0,
+ sizeInBytes);
+#endif
+
+ //the difference in the following code is the segment size used to calculate the top!!
+ sizeInBytes = cm_MM_GetAllocatorSize(pDspDesc->allocator[SDRAM_EXT16]->allocDesc);
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ _init_Segment(&pDspDesc->segments[SDRAM_DATA_EE],
+ pDspDesc->allocator[SDRAM_EXT16]->baseAddress,
+ domainDesc[eeDomain].domain.sdramData.offset,
+ domainDesc[eeDomain].domain.sdramData.size);
+ _init_Segment(&pDspDesc->segments[SDRAM_DATA_USER],
+ pDspDesc->allocator[SDRAM_EXT16]->baseAddress,
+ domainDesc[eeDomain].domain.sdramData.offset + domainDesc[eeDomain].domain.sdramData.size,
+ sizeInBytes - domainDesc[eeDomain].domain.sdramData.size);
+#else
+ _init_Segment(&pDspDesc->segments[SDRAM_DATA_EE],
+ pDspDesc->allocator[SDRAM_EXT16]->baseAddress,
+ 0x0,
+ sizeInBytes);
+#endif
+
+ sizeInBytes = cm_MM_GetAllocatorSize(pDspDesc->allocator[ESRAM_EXT16]->allocDesc);
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ _init_Segment(&pDspDesc->segments[ESRAM_DATA_EE],
+ pDspDesc->allocator[ESRAM_EXT16]->baseAddress,
+ domainDesc[eeDomain].domain.esramData.offset,
+ domainDesc[eeDomain].domain.esramData.size);
+ _init_Segment(&pDspDesc->segments[ESRAM_DATA_USER],
+ pDspDesc->allocator[ESRAM_EXT16]->baseAddress,
+ domainDesc[eeDomain].domain.esramData.offset + domainDesc[eeDomain].domain.esramData.size,
+ sizeInBytes - domainDesc[eeDomain].domain.esramData.size);
+#else
+ _init_Segment(&pDspDesc->segments[ESRAM_DATA_EE],
+ pDspDesc->allocator[ESRAM_EXT16]->baseAddress,
+ 0x0,
+ sizeInBytes);
+#endif
+
+ return CM_OK;
+}
+
+//TODO, juraj, reuse cm_DSP_UpdateBase functions
+static t_cm_error mmdsp_Configure(t_nmf_core_id coreId, t_mmdsp_hw_regs *pRegs, const t_dsp_desc *pDspDesc)
+{
+ t_uint64 regValue;
+ static const t_uint64 coreId2stbusId[NB_CORE_IDS] =
+ {
+ 0, /* ARM_CORE_ID no meaning */
+ SVA_STBUS_ID, /* SVA_CORE_ID */
+ SIA_STBUS_ID /* SIA_CORE_ID */
+ };
+
+ //t_cm_system_address sysAddr;
+ //t_cm_size sizeInBytes;
+
+ /* Stop core (stop clock) */
+ MMDSP_RESET_CORE(pRegs);
+ {
+ volatile t_uint32 loopme = 0xfff;
+ while(loopme--) ;
+ }
+ MMDSP_STOP_CORE(pRegs);
+ {
+ volatile t_uint32 loopme = 0xfff;
+ while(loopme--) ;
+ }
+
+#if 0
+ /* Reset DSP internal memory (xram) */
+ {
+ t_uint32 *pSrc = (t_uint32 *)(pRegs->mem24);
+ t_uint32 tcmSize;
+ int i;
+ cm_MM_GetAllocatorSize(pDspDesc->allocator[INTERNAL_XRAM], &sizeInBytes);
+ tcmSize = sizeInBytes;
+ cm_MM_GetAllocatorSize(pDspDesc->allocator[INTERNAL_YRAM], &sizeInBytes);
+ tcmSize += sizeInBytes;
+ for (i = 0; i < (tcmSize/sizeof(t_uint32)); i++)
+ *(pSrc++) = 0;
+ }
+#endif
+
+ /* Configure all blocks as X only, except the Y ones (MOVED TO mmdsp_InitAfterBoot()) */
+
+ /* __STN_8815 --> __STN_8820 or __STN_8500 */
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_STBUS_ID_CONF_REG, coreId2stbusId[coreId]);
+
+ /* Configure External Bus timeout reg */
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_EN_EXT_BUS_TIMEOUT_REG, IHOST_TIMEOUT_ENABLE);
+
+ /* Program memory management */
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ {
+ const t_uint32 r0 = CODE_ADDRESS_BASE[1] >> 10;
+ const t_uint32 r1 = CODE_ADDRESS_BASE[2] >> 10;
+ const t_uint32 r2 = CODE_ADDRESS_BASE[3] >> 10;
+ const t_uint32 sdram0 = pDspDesc->segments[SDRAM_CODE_EE].base.physical;
+ const t_uint32 sdram1 = pDspDesc->segments[SDRAM_CODE_USER].base.physical;
+ const t_uint32 esram0 = pDspDesc->segments[ESRAM_CODE_EE].base.physical;
+ const t_uint32 esram1 = pDspDesc->segments[ESRAM_CODE_USER].base.physical;
+
+ /* Bases for first two segments, going to sdram */
+ regValue = ((t_uint64)(sdram1) << IHOST_PRG_BASE2_ADDR_SHIFT) + (t_uint64)sdram0;
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_PRG_BASE_ADDR_REG, regValue);
+
+ /* Bases for second two segments, going to esram */
+ regValue = ((t_uint64)(esram1) << IHOST_PRG_BASE4_ADDR_SHIFT) + (t_uint64)esram0;
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_PRG_BASE_34_ADDR_REG, regValue);
+
+ /* Split mmdsp program adress-space and activate the mechanism */
+ regValue = (t_uint64)((t_uint64)(r2) << 48 | (t_uint64)(r1) <<32 | (t_uint64)(r0) << 16 | 1);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_PRG_BASE2_ACTIV_REG, regValue);
+ }
+#else
+ {
+ const t_uint32 sdram0 = pDspDesc->segments[SDRAM_CODE_EE].base.physical;
+ const t_uint32 esram0 = pDspDesc->segments[ESRAM_CODE_EE].base.physical;
+
+ regValue = (t_uint64)sdram0 | ( ((t_uint64)esram0) << IHOST_PRG_BASE2_ADDR_SHIFT );
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_PRG_BASE_ADDR_REG, regValue);
+
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_PRG_BASE2_ACTIV_REG, IHOST_PRG_BASE2_ACTIV_ON);
+ }
+#endif
+
+ /* Data_16/24 memory management */
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ /* Segments 1 and 2 for 16/24 map to sdram continuously */
+ /* Base 1 */
+ regValue = (((t_uint64)pDspDesc->segments[SDRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE_24_SHIFT) |
+ (((t_uint64)pDspDesc->segments[SDRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_BASE_REG, regValue);
+ /* Top 1 */
+ regValue = (((t_uint64)(pDspDesc->segments[SDRAM_DATA_EE].base.physical + pDspDesc->segments[SDRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP_24_SHIFT) |
+ (((t_uint64)(pDspDesc->segments[SDRAM_DATA_EE].base.physical + pDspDesc->segments[SDRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_TOP_16_24_REG, regValue);
+
+ /* Base 2 */
+ regValue = (((t_uint64)pDspDesc->segments[SDRAM_DATA_USER].base.physical) << IHOST_DATA_EXT_BUS_BASE2_24_SHIFT) |
+ (((t_uint64)pDspDesc->segments[SDRAM_DATA_USER].base.physical) << IHOST_DATA_EXT_BUS_BASE2_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_BASE2_REG, regValue);
+ /* Top 2 */
+ regValue = (((t_uint64)(pDspDesc->segments[SDRAM_DATA_USER].base.physical + pDspDesc->segments[SDRAM_DATA_USER].size - 1)) << IHOST_DATA_EXT_BUS_TOP2_24_SHIFT) |
+ (((t_uint64)(pDspDesc->segments[SDRAM_DATA_USER].base.physical + pDspDesc->segments[SDRAM_DATA_USER].size - 1)) << IHOST_DATA_EXT_BUS_TOP2_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_EXT_BUS_TOP2_16_24_REG, regValue);
+
+ /* Segments 3 and 4 for 16/24 map to esram continuously */
+ /* Base 3 */
+ regValue = (((t_uint64)pDspDesc->segments[ESRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE3_24_SHIFT) |
+ (((t_uint64)pDspDesc->segments[ESRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE3_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_BASE3_REG, regValue);
+ /* Top 3 */
+ regValue = (((t_uint64)(pDspDesc->segments[ESRAM_DATA_EE].base.physical + pDspDesc->segments[ESRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP3_24_SHIFT) |
+ (((t_uint64)(pDspDesc->segments[ESRAM_DATA_EE].base.physical + pDspDesc->segments[ESRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP3_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_EXT_BUS_TOP3_16_24_REG, regValue);
+
+ /* Base 4 */
+ regValue = (((t_uint64)pDspDesc->segments[ESRAM_DATA_USER].base.physical) << IHOST_DATA_EXT_BUS_BASE4_24_SHIFT) |
+ (((t_uint64)pDspDesc->segments[ESRAM_DATA_USER].base.physical) << IHOST_DATA_EXT_BUS_BASE4_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_BASE4_REG, regValue);
+ /* Top 4 */
+ regValue = (((t_uint64)(pDspDesc->segments[ESRAM_DATA_USER].base.physical + pDspDesc->segments[ESRAM_DATA_USER].size - 1)) << IHOST_DATA_EXT_BUS_TOP4_24_SHIFT) |
+ (((t_uint64)(pDspDesc->segments[ESRAM_DATA_USER].base.physical + pDspDesc->segments[ESRAM_DATA_USER].size - 1)) << IHOST_DATA_EXT_BUS_TOP4_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_EXT_BUS_TOP4_16_24_REG, regValue);
+
+ /* Define base 2 thresholds/offset (1MB for each up segment) */
+ regValue = ((t_uint64)DATA_ADDRESS_BASE[1].startAddress[1]>>SHIFT_HALFWORD1)<< IHOST_DATA2_24_XA_BASE_SHIFT;
+ regValue |= ((t_uint64)DATA_ADDRESS_BASE[1].startAddress[0]>>SHIFT_HALFWORD1)<< IHOST_DATA2_16_XA_BASE_SHIFT;
+
+ /* Define base 3 thresholds/offset (1MB for each up segment) */
+ regValue |= ((t_uint64)DATA_ADDRESS_BASE[2].startAddress[1]>>SHIFT_HALFWORD1)<< IHOST_DATA3_24_XA_BASE_SHIFT;
+ regValue |= ((t_uint64)DATA_ADDRESS_BASE[2].startAddress[0]>>SHIFT_HALFWORD1)<< IHOST_DATA3_16_XA_BASE_SHIFT;
+
+ /* Define base 4 thresholds/offset (1MB for each up segment) */
+ regValue |= ((t_uint64)DATA_ADDRESS_BASE[3].startAddress[1]>>SHIFT_HALFWORD1)<< IHOST_DATA4_24_XA_BASE_SHIFT;
+ regValue |= ((t_uint64)DATA_ADDRESS_BASE[3].startAddress[0]>>SHIFT_HALFWORD1)<< IHOST_DATA4_16_XA_BASE_SHIFT;
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA2_1624_XA_BASE_REG, regValue);
+
+#else
+ /* Program data24/16 base 1 */
+ regValue = (((t_uint64)pDspDesc->segments[SDRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE_24_SHIFT) |
+ (((t_uint64)pDspDesc->segments[SDRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_BASE_REG, regValue);
+
+ /* Program data24/16 top 1 */
+ regValue = (((t_uint64)(pDspDesc->segments[SDRAM_DATA_EE].base.physical + pDspDesc->segments[SDRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP_24_SHIFT) |
+ (((t_uint64)(pDspDesc->segments[SDRAM_DATA_EE].base.physical + pDspDesc->segments[SDRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_TOP_16_24_REG, regValue);
+
+ /* Program data24/16 base 2 */
+ regValue = (((t_uint64)pDspDesc->segments[ESRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE2_24_SHIFT) |
+ (((t_uint64)pDspDesc->segments[ESRAM_DATA_EE].base.physical) << IHOST_DATA_EXT_BUS_BASE2_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_EXT_BUS_BASE2_REG, regValue);
+
+ /* Program data24/16 top 2 */
+ regValue = (((t_uint64)(pDspDesc->segments[ESRAM_DATA_EE].base.physical + pDspDesc->segments[ESRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP2_24_SHIFT) |
+ (((t_uint64)(pDspDesc->segments[ESRAM_DATA_EE].base.physical + pDspDesc->segments[ESRAM_DATA_EE].size - 1)) << IHOST_DATA_EXT_BUS_TOP2_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_EXT_BUS_TOP2_16_24_REG, regValue);
+
+ /* Define base 2 thresholds/offset (1MB for each up segment) */
+ regValue = ((t_uint64)(DATA_ADDRESS_BASE[1].startAddress[1]>>SHIFT_HALFWORD1))<< IHOST_DATA2_24_XA_BASE_SHIFT; // Top address minus ONE_MB => 256KW (24/32-bit)
+ regValue |= ((t_uint64)(DATA_ADDRESS_BASE[1].startAddress[0]>>SHIFT_HALFWORD1))<< IHOST_DATA2_16_XA_BASE_SHIFT; // Top address minus ONE_MB => 512KW (16-bit)
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA2_1624_XA_BASE_REG, regValue);
+#endif
+
+ /* Enable top check */
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_TOP_16_24_CHK_REG, IHOST_DATA_TOP_16_24_CHK_ON);
+
+ /* Enable both bases */
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_DATA_BASE2_ACTIV_REG, IHOST_DATA_BASE2_ACTIV_ON);
+
+ /* MMIO management */
+ regValue = (((t_uint64)STM_BASE_ADDR) << IHOST_EXT_MMIO_BASE_ADDR_SHIFT) |
+ (((t_uint64)DMA_CTRL_END_ADDR) << IHOST_EXT_MMIO_DATA_EXT_BUS_TOP_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_EXT_MMIO_BASE_DATA_EXT_BUS_TOP_REG, regValue);
+
+ /* Configure Icache */
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_INST_BURST_SZ_REG, IHOST_INST_BURST_SZ_AUTO);
+
+ regValue = (t_uint64)(IHOST_ICACHE_MODE_PERFMETER_OFF | IHOST_ICACHE_MODE_L2_CACHE_ON |
+ IHOST_ICACHE_MODE_L1_CACHE_ON | IHOST_ICACHE_MODE_FILL_MODE_OFF);
+ WRITE_INDIRECT_HOST_REG(pRegs, IHOST_ICACHE_MODE_REG, regValue);
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_DSP_updateCodeBase(
+ t_nmf_core_id coreId,
+ t_dsp_segment_type hwSegment,
+ t_cm_system_address src,
+ t_cm_system_address dst
+ )
+{
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ t_mmdsp_hw_regs *pRegs = pMmdspRegs[coreId];
+ t_uint32 offset = src.physical - mpcDesc[coreId].segments[hwSegment].base.physical;
+ t_cm_system_address base;
+ t_uint32 altBase = 0;
+ t_uint64 regValue = 0;
+ t_uint8 reg = 0;
+
+ base.physical = dst.physical - offset;
+ base.logical = dst.logical - offset;
+
+ switch(hwSegment) {
+ case SDRAM_CODE_EE:
+ altBase = mpcDesc[coreId].segments[SDRAM_CODE_USER].base.physical;
+ regValue = ((t_uint64)(altBase) << IHOST_PRG_BASE2_ADDR_SHIFT) + (t_uint64)base.physical;
+ reg = IHOST_PRG_BASE_ADDR_REG;
+ break;
+ case SDRAM_CODE_USER:
+ altBase = mpcDesc[coreId].segments[SDRAM_CODE_EE].base.physical;
+ regValue = ((t_uint64)(base.physical) << IHOST_PRG_BASE2_ADDR_SHIFT) + (t_uint64)altBase;
+ reg = IHOST_PRG_BASE_ADDR_REG;
+ break;
+ case ESRAM_CODE_EE:
+ altBase = mpcDesc[coreId].segments[ESRAM_CODE_USER].base.physical;
+ regValue = ((t_uint64)(altBase) << IHOST_PRG_BASE4_ADDR_SHIFT) + (t_uint64)base.physical;
+ reg = IHOST_PRG_BASE_34_ADDR_REG;
+ break;
+ case ESRAM_CODE_USER:
+ altBase = mpcDesc[coreId].segments[ESRAM_CODE_EE].base.physical;
+ regValue = ((t_uint64)(base.physical) << IHOST_PRG_BASE4_ADDR_SHIFT) + (t_uint64)altBase;
+ reg = IHOST_PRG_BASE_34_ADDR_REG;
+ break;
+ default:
+ CM_ASSERT(0);
+ }
+
+ LOG_INTERNAL(1, "##### DSP Code Base Update [%d]: 0x%x -> 0x%x (0x%x)\n",
+ hwSegment, mpcDesc[coreId].segments[hwSegment].base.physical, base.physical, base.logical, 0, 0);
+
+ WRITE_INDIRECT_HOST_REG(pRegs, reg, regValue);
+
+ mpcDesc[coreId].segments[hwSegment].base = base;
+#endif
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_DSP_updateDataBase(
+ t_nmf_core_id coreId,
+ t_dsp_segment_type hwSegment,
+ t_cm_system_address src,
+ t_cm_system_address dst
+ )
+{
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ t_mmdsp_hw_regs *pRegs = pMmdspRegs[coreId];
+ t_uint32 offset = src.physical - mpcDesc[coreId].segments[hwSegment].base.physical;
+ t_cm_system_address base;
+ t_uint32 size = mpcDesc[coreId].segments[hwSegment].size; //in bytes
+ t_uint64 regValue;
+ t_uint8 reg = 0;
+ t_uint8 top = 0;
+
+ base.physical = dst.physical - offset;
+ base.logical = dst.logical - offset;
+
+ switch(hwSegment) {
+ case SDRAM_DATA_EE:
+ reg = IHOST_DATA_EXT_BUS_BASE_REG;
+ top = IHOST_DATA_EXT_BUS_TOP_16_24_REG;
+ break;
+ case SDRAM_DATA_USER:
+ reg = IHOST_DATA_EXT_BUS_BASE2_REG;
+ top = IHOST_EXT_BUS_TOP2_16_24_REG;
+ break;
+ case ESRAM_DATA_EE:
+ reg = IHOST_DATA_EXT_BUS_BASE3_REG;
+ top = IHOST_EXT_BUS_TOP3_16_24_REG;
+ break;
+ case ESRAM_DATA_USER:
+ reg = IHOST_DATA_EXT_BUS_BASE4_REG;
+ top = IHOST_EXT_BUS_TOP4_16_24_REG;
+ break;
+ default:
+ CM_ASSERT(0);
+ }
+
+ LOG_INTERNAL(1, "##### DSP Data Base Update [%d]: 0x%x -> 0x%x (0x%x)\n",
+ hwSegment, mpcDesc[coreId].segments[hwSegment].base.physical, base.physical, base.logical, 0, 0);
+
+ /* Program data24/16 base */
+ regValue = (((t_uint64)(base.physical)) << IHOST_DATA_EXT_BUS_BASE2_24_SHIFT) |
+ (((t_uint64)(base.physical)) << IHOST_DATA_EXT_BUS_BASE2_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, reg, regValue);
+
+ /* Program data24/16 top */
+ regValue = (((t_uint64)(base.physical + size - 1)) << IHOST_DATA_EXT_BUS_TOP2_24_SHIFT) |
+ (((t_uint64)(base.physical + size - 1)) << IHOST_DATA_EXT_BUS_TOP2_16_SHIFT);
+ WRITE_INDIRECT_HOST_REG(pRegs, top, regValue);
+
+ mpcDesc[coreId].segments[hwSegment].base = base;
+#endif
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_DSP_GetDspDataAddressInfo(t_nmf_core_id coreId, t_uint32 addr, t_dsp_address_info *info)
+{
+ t_uint32 i, j;
+
+ for(j = 0; j < 2; j++)
+ {
+ for(i = 0; i < DATA_BASE_NUMBER; i++)
+ {
+ if(DATA_ADDRESS_BASE[i].startAddress[j] <= addr && addr < DATA_ADDRESS_BASE[i + 1].startAddress[j])
+ {
+ info->segmentType = DATA_ADDRESS_BASE[i].segmentType;
+ info->baseOffset = (addr - DATA_ADDRESS_BASE[i].startAddress[j]) * (2 + j * 2);
+
+ return CM_OK;
+ }
+ }
+ }
+
+ CM_ASSERT(0);
+ //return CM_INVALID_PARAMETER;
+}
+
+static t_cm_error mmdsp_ConfigureAfterBoot(t_nmf_core_id coreId, t_uint8 nbXramBlocks, t_uint8 nbYramBlocks)
+{
+ /* Configure all blocks as X only, except the Y ones */
+ pMmdspRegs[coreId]->mmio_16.config_data_mem.value = (t_uint16)(~(((1U << nbYramBlocks) - 1) << (nbXramBlocks-nbYramBlocks)));
+
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ /* enable write posting */
+ MMDSP_ENABLE_WRITE_POSTING(pMmdspRegs[coreId]);
+#endif
+
+ return CM_OK;
+}
+
+
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/bfd.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/bfd.h
new file mode 100644
index 00000000000..2bccf9c073b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/bfd.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Elf bfd relocation.
+ *
+ * \defgroup ELFLOADER MMDSP ELF loader.
+ */
+#ifndef __INC_CM_ELF_BFD_H
+#define __INC_CM_ELF_BFD_H
+
+#include <cm/inc/cm_type.h>
+
+/*
+ * Relocation spcification
+ */
+enum complain_overflow
+{
+ /* Do not complain on overflow. */
+ complain_overflow_dont,
+
+ /* Complain if the bitfield overflows, whether it is considered
+ as signed or unsigned. */
+ complain_overflow_bitfield,
+
+ /* Complain if the value overflows when considered as signed
+ number. */
+ complain_overflow_signed,
+
+ /* Complain if the value overflows when considered as an
+ unsigned number. */
+ complain_overflow_unsigned
+};
+
+struct reloc_howto_struct
+{
+ /* The type field has mainly a documentary use - the back end can
+ do what it wants with it, though normally the back end's
+ external idea of what a reloc number is stored
+ in this field. For example, a PC relative word relocation
+ in a coff environment has the type 023 - because that's
+ what the outside world calls a R_PCRWORD reloc. */
+ unsigned int type;
+
+ /* The value the final relocation is shifted right by. This drops
+ unwanted data from the relocation. */
+ unsigned int rightshift;
+
+ /* The size of the item to be relocated. This is *not* a
+ power-of-two measure. To get the number of bytes operated
+ on by a type of relocation, use bfd_get_reloc_size. */
+ int size;
+
+ /* The number of bits in the item to be relocated. This is used
+ when doing overflow checking. */
+ unsigned int bitsize;
+
+ /* Notes that the relocation is relative to the location in the
+ data section of the addend. The relocation function will
+ subtract from the relocation value the address of the location
+ being relocated. */
+ t_uint64 pc_relative;
+
+ /* The bit position of the reloc value in the destination.
+ The relocated value is left shifted by this amount. */
+ unsigned int bitpos;
+
+ /* What type of overflow error should be checked for when
+ relocating. */
+ enum complain_overflow complain_on_overflow;
+
+ void (*special_function)(void);
+
+ /* The textual name of the relocation type. */
+ char *name;
+
+ /* Some formats record a relocation addend in the section contents
+ rather than with the relocation. For ELF formats this is the
+ distinction between USE_REL and USE_RELA (though the code checks
+ for USE_REL == 1/0). The value of this field is TRUE if the
+ addend is recorded with the section contents; when performing a
+ partial link (ld -r) the section contents (the data) will be
+ modified. The value of this field is FALSE if addends are
+ recorded with the relocation (in arelent.addend); when performing
+ a partial link the relocation will be modified.
+ All relocations for all ELF USE_RELA targets should set this field
+ to FALSE (values of TRUE should be looked on with suspicion).
+ However, the converse is not true: not all relocations of all ELF
+ USE_REL targets set this field to TRUE. Why this is so is peculiar
+ to each particular target. For relocs that aren't used in partial
+ links (e.g. GOT stuff) it doesn't matter what this is set to. */
+ char partial_inplace;
+
+ /* src_mask selects the part of the instruction (or data) to be used
+ in the relocation sum. If the target relocations don't have an
+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
+ dst_mask to extract the addend from the section contents. If
+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
+ field should be zero. Non-zero values for ELF USE_RELA targets are
+ bogus as in those cases the value in the dst_mask part of the
+ section contents should be treated as garbage. */
+ t_uint64 src_mask;
+
+ /* dst_mask selects which parts of the instruction (or data) are
+ replaced with a relocated value. */
+ t_uint64 dst_mask;
+
+ /* When some formats create PC relative instructions, they leave
+ the value of the pc of the place being relocated in the offset
+ slot of the instruction, so that a PC relative relocation can
+ be made just by adding in an ordinary offset (e.g., sun3 a.out).
+ Some formats leave the displacement part of an instruction
+ empty (e.g., m88k bcs); this flag signals the fact. */
+ char pcrel_offset;
+};
+
+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/common.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/common.h
new file mode 100644
index 00000000000..c51845d5f96
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/common.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Elf common definition.
+ */
+#ifndef __INC_CM_ELF_COMMON_H
+#define __INC_CM_ELF_COMMON_H
+
+#include <cm/engine/component/inc/nmfheaderabi.h>
+#include <cm/engine/elf/inc/elfabi.h>
+#include <cm/engine/elf/inc/reloc.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/component/inc/description.h>
+#include <cm/engine/utils/inc/string.h>
+
+
+#define MAX_SEGMENT 20 // Just in order to not allocate them dynamically
+
+struct XXElf;
+
+/**
+ * \brief Structure used as database of pushed component.
+ */
+typedef struct {
+ t_instance_property instanceProperty;
+ t_uint32 magicNumber; //!< Magic Number
+ t_dup_char foundedTemplateName;
+ t_uint32 minStackSize; //!< Minimum stack size
+
+ struct XXElf *ELF;
+
+ t_elfSegment segments[NUMBER_OF_MMDSP_MEMORY];
+
+ t_bool temporaryDescription;
+
+ t_memory_reference memoryForConstruct;
+ t_memory_reference memoryForStart;
+ t_memory_reference memoryForStop;
+ t_memory_reference memoryForDestroy;
+
+ t_uint8 requireNumber; //!< Number of interface required by this template
+ t_uint8 attributeNumber; //!< Number of attributes in this template
+ t_uint8 propertyNumber; //!< Number of properties in this template
+ t_uint8 provideNumber; //!< Number of interface provided by this template
+
+ t_interface_require *requires; //!< Array of interface required by this template
+ t_attribute *attributes; //!< Array of attributes in this template
+ t_property *properties; //!< Array of properties in this template
+ t_interface_provide *provides; //!< Array of interface provided by this template
+
+} t_elfdescription;
+
+/**
+ * \brief Temporary structure used as database when pushing component.
+ */
+typedef struct
+{
+ const char *elfdata;
+ const char *sectionData[50]; // YES it must be dynamic, but i'm tired.
+
+ t_bool isExecutable;
+
+ t_sint32 nmfSectionIndex;
+ const void *relaNmfSegment, *relaNmfSegmentEnd;
+ const void *relaNmfSegmentSymbols;
+ const char *relaNmfSegmentStrings;
+
+ const t_elf_component_header*elfheader;
+
+
+} t_tmp_elfdescription;
+
+
+t_cm_error ELF64_LoadComponent(
+ t_uint16 e_machine,
+ const char *elfdata,
+ t_elfdescription **elfhandlePtr,
+ t_tmp_elfdescription *elftmp);
+t_cm_error ELF64_ComputeSegment(
+ t_elfdescription *elfhandle,
+ t_tmp_elfdescription *elftmp);
+
+void ELF64_UnloadComponent(
+ t_elfdescription *elfhandle);
+
+t_cm_error ELF64_loadSegment(
+ t_elfdescription *elfhandle,
+ t_memory_handle *memory,
+ t_memory_property property);
+t_cm_error ELF64_relocateSegments(
+ t_memory_handle *memories,
+ t_elfdescription *elf,
+ t_memory_property property,
+ void *cbContext);
+t_cm_error ELF64_getRelocationMemory(
+ t_elfdescription *elfhandle,
+ t_tmp_elfdescription *elftmp,
+ t_uint32 offsetInNmf,
+ t_memory_reference *memory);
+
+const t_elfmemory* MMDSP_getMappingById(t_memory_id memId);
+const t_elfmemory* MMDSP_getMappingByName(const char* sectionName, t_instance_property property);
+void MMDSP_serializeMemories(t_instance_property property,
+ const t_elfmemory** codeMemory, const t_elfmemory** thisMemory);
+void MMDSP_copySection(t_uint32 origAddr, t_uint32 remoteAddr, t_uint32 sizeInByte);
+void MMDSP_bzeroSection(t_uint32 remoteAddr, t_uint32 sizeInByte);
+void MMDSP_loadedSection(t_nmf_core_id coreId, t_memory_id memId, t_memory_handle handle);
+void MMDSP_unloadedSection(t_nmf_core_id coreId, t_memory_id memId, t_memory_handle handle);
+
+void MMDSP_copyCode(t_uint64 * remoteAddr64, const char* origAddr, int nb);
+void MMDSP_copyData24(t_uint32 * remoteAddr32, const char* origAddr, int nb);
+void MMDSP_copyData16(t_uint16 * remoteAddr16, const char* origAddr, int nb);
+
+t_uint32 cm_resolvSymbol(
+ void* context,
+ t_uint32 type,
+ t_dup_char symbolName,
+ char* reloc_addr);
+
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/elfabi.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/elfabi.h
new file mode 100644
index 00000000000..cbcc6db3b9b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/elfabi.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#ifndef _CM_ELF_H
+#define _CM_ELF_H 1
+
+typedef t_uint16 Elf32_Half;
+typedef t_uint16 Elf64_Half;
+
+typedef t_uint32 Elf32_Word;
+typedef t_sint32 Elf32_Sword;
+typedef t_uint32 Elf64_Word;
+typedef t_sint32 Elf64_Sword;
+
+typedef t_uint64 Elf32_Xword;
+typedef t_sint64 Elf32_Sxword;
+typedef t_uint64 Elf64_Xword;
+typedef t_sint64 Elf64_Sxword;
+
+typedef t_uint32 Elf32_Addr;
+typedef t_uint64 Elf64_Addr;
+
+typedef t_uint32 Elf32_Off;
+typedef t_uint64 Elf64_Off;
+
+typedef t_uint16 Elf32_Section;
+typedef t_uint16 Elf64_Section;
+
+typedef Elf32_Half Elf32_Versym;
+typedef Elf64_Half Elf64_Versym;
+
+
+/*********************************************
+ * Header
+ *********************************************/
+#define EI_NIDENT (16) //!< Size of e_ident[]
+
+#define EI_MAG0 0 //!< File identification
+#define ELFMAG0 0x7f
+
+#define EI_MAG1 1 //!< File identification
+#define ELFMAG1 'E'
+
+#define EI_MAG2 2 //!< File identification
+#define ELFMAG2 'L'
+
+#define EI_MAG3 3 //!< File identification
+#define ELFMAG3 'F'
+
+#define EI_CLASS 4 //!< File class
+#define ELFCLASSNONE 0 //!< Invalid class
+#define ELFCLASS32 1 //!< 32-bit objects
+#define ELFCLASS64 2 //!< 64-bit objects
+
+#define EI_DATA 5 //!< Data encoding
+#define ELFDATANONE 0 //!< Invalid data encoding
+#define ELFDATA2LSB 1 //!< 2's complement, little endian
+#define ELFDATA2MSB 2 //!< 2's complement, big endian
+
+#define EI_VERSION 6 //!< File version
+
+#define EI_OSABI 7 //!< OS ABI identification
+#define ELFOSABI_NONE 0 //!< No extension
+#define ELFOSABI_HPUX 1 //!< HP-UX
+#define ELFOSABI_NETBSD 2 //!< NetBSD
+#define ELFOSABI_LINUX 3 //!< Linux
+#define ELFOSABI_SOLARIS 6 //!< Sun Solaris
+#define ELFOSABI_AIX 7 //!< AIX
+#define ELFOSABI_IRIX 8 //!< IRIX
+#define ELFOSABI_FREEBSD 9 //!< FreeBSD
+#define ELFOSABI_TRU64 10 //!< Compaq TRU64 UNIX
+#define ELFOSABI_MODESTO 11 //!< Novell Modesto
+#define ELFOSABI_OPENBSD 12 //!< Open BSD
+#define ELFOSABI_OPENVMS 13 //!< Open VMS
+#define ELFOSABI_NSK 14 //!< HP Non-Stop-Kernel
+
+#define EI_ABIVERSION 8 //!< ABI version
+
+#define EI_PAD 9 //!< Start of padding byte
+
+
+typedef struct
+{
+ unsigned char e_ident[EI_NIDENT]; //!< The initial bytes mark the file as an object file and provide machine-independent data with which to decode and interpret the file's contents
+ Elf32_Half e_type; //!< This member identifies the object file type
+ Elf32_Half e_machine; //!< This member's value specifies the required architecture for an individual file
+ Elf32_Word e_version; //!< This member identifies the object file version
+ Elf32_Addr e_entry; //!< This member gives the virtual address to which the system first transfers control, thus starting the process
+ Elf32_Off e_phoff; //!< This member holds the program header table's file offset in bytes
+ Elf32_Off e_shoff; //!< This member holds the section header table's file offset in bytes
+ Elf32_Word e_flags; //!< This member holds processor-specific flags associated with the file
+ Elf32_Half e_ehsize; //!< This member holds the ELF header's size in bytes
+ Elf32_Half e_phentsize; //!< This member holds the size in bytes of one entry in the file's program header table; all entries are the same size
+ Elf32_Half e_phnum; //!< This member holds the number of entries in the program header table
+ Elf32_Half e_shentsize; //!< This member holds a section header's size in bytes
+ Elf32_Half e_shnum; //!< This member holds the number of entries in the section header table
+ Elf32_Half e_shstrndx; //!< This member holds the section header table index of the entry associated with the section name string table
+} Elf32_Ehdr; //!< 32bit Entry Header
+
+typedef struct
+{
+ unsigned char e_ident[EI_NIDENT]; //!< The initial bytes mark the file as an object file and provide machine-independent data with which to decode and interpret the file's contents
+ Elf64_Half e_type; //!< This member identifies the object file type
+ Elf64_Half e_machine; //!< This member's value specifies the required architecture for an individual file
+ Elf64_Word e_version; //!< This member identifies the object file version
+ Elf64_Addr e_entry; //!< This member gives the virtual address to which the system first transfers control, thus starting the process
+ Elf64_Off e_phoff; //!< This member holds the program header table's file offset in bytes
+ Elf64_Off e_shoff; //!< This member holds the section header table's file offset in bytes
+ Elf64_Word e_flags; //!< This member holds processor-specific flags associated with the file
+ Elf64_Half e_ehsize; //!< This member holds the ELF header's size in bytes
+ Elf64_Half e_phentsize; //!< This member holds the size in bytes of one entry in the file's program header table; all entries are the same size
+ Elf64_Half e_phnum; //!< This member holds the number of entries in the program header table
+ Elf64_Half e_shentsize; //!< This member holds a section header's size in bytes
+ Elf64_Half e_shnum; //!< This member holds the number of entries in the section header table
+ Elf64_Half e_shstrndx; //!< This member holds the section header table index of the entry associated with the section name string table
+} Elf64_Ehdr; //!< 64bit Entry Header
+
+/*
+ * e_type
+ */
+#define ET_NONE 0 //!< No file type
+#define ET_REL 1 //!< Relocatable file
+#define ET_EXEC 2 //!< Executable file
+#define ET_DYN 3 //!< Shared object file
+#define ET_CORE 4 //!< Core file
+#define ET_LOOS 0xfe00 //!< Operating system-specific
+#define ET_HIOS 0xfeff //!< Operating system-specific
+#define ET_LOPROC 0xff00 //!< Processor-specific
+#define ET_HIPROC 0xffff //!< Processor-specific
+
+/*
+ * e_machine
+ */
+#define EM_NONE 0 //!< No machine
+#define EM_M32 1 //!< AT&T WE 32100
+#define EM_SPARC 2 //!< SUN SPARC
+#define EM_386 3 //!< Intel 80386
+#define EM_68K 4 //!< Motorola 68000
+#define EM_88K 5 //!< Motorola 88000
+#define EM_860 7 //!< Intel 80860
+#define EM_MIPS 8 //!< MIPS I architecture
+#define EM_S370 9 //!< IBM System/370
+#define EM_MIPS_RS3_LE 10 //!< MIPS R3000 little-endian
+#define EM_PARISC 15 //!< HPPA
+#define EM_VPP500 17 //!< Fujitsu VPP500
+#define EM_SPARC32PLUS 18 //!< Enhanced instruction set SPARC
+#define EM_960 19 //!< Intel 80960
+#define EM_PPC 20 //!< PowerPC
+#define EM_PPC64 21 //!< 64-bit PowerPC
+#define EM_S390 22 //!< IBM System/390 Processor
+#define EM_V800 36 //!< NEC V800
+#define EM_FR20 37 //!< Fujitsu FR20
+#define EM_RH32 38 //!< TRW RH-32
+#define EM_RCE 39 //!< Motorola RCE
+#define EM_ARM 40 //!< Advanced RISC Machines ARM
+#define EM_FAKE_ALPHA 41 //!< Digital Alpha
+#define EM_SH 42 //!< Hitachi SH
+#define EM_SPARCV9 43 //!< SPARC Version 9
+#define EM_TRICORE 44 //!< Siemens TriCore embedded processor
+#define EM_ARC 45 //!< Argonaut RISC Core, Argonaut Technologies Inc
+#define EM_H8_300 46 //!< Hitachi H8/300
+#define EM_H8_300H 47 //!< Hitachi H8/300H
+#define EM_H8S 48 //!< Hitachi H8S
+#define EM_H8_500 49 //!< Hitachi H8/500
+#define EM_IA_64 50 //!< Intel IA-64 processor architecture
+#define EM_MIPS_X 51 //!< Stanford MIPS-X
+#define EM_COLDFIRE 52 //!< Motorola ColdFire
+#define EM_68HC12 53 //!< Motorola M68HC12
+#define EM_MMA 54 //!< Fujitsu MMA Multimedia Accelerator
+#define EM_PCP 55 //!< Siemens PCP
+#define EM_NCPU 56 //!< Sony nCPU embedded RISC processor
+#define EM_NDR1 57 //!< Denso NDR1 microprocessor
+#define EM_STARCORE 58 //!< Motorola Start*Core processor
+#define EM_ME16 59 //!< Toyota ME16 processor
+#define EM_ST100 60 //!< STMicroelectronics ST100 processor
+#define EM_TINYJ 61 //!< Advanced Logic Corp. TinyJ embedded processor family
+#define EM_X86_64 62 //!< AMD x86-64 architecture
+#define EM_PDSP 63 //!< Sony DSP Processor
+#define EM_PDP10 64 //!< Digital Equipment Corp. PDP-10
+#define EM_PDP11 65 //!< Digital Equipment Corp. PDP-11
+#define EM_FX66 66 //!< Siemens FX66 microcontroller
+#define EM_ST9PLUS 67 //!< STMicroelectronics ST9+ 8/16 bit microcontroller
+#define EM_ST7 68 //!< STMicroelectronics ST7 8-bit microcontroller
+#define EM_68HC16 69 //!< Motorola MC68HC16 Microcontroller
+#define EM_68HC11 70 //!< Motorola MC68HC11 Microcontroller
+#define EM_68HC08 71 //!< Motorola MC68HC08 Microcontroller
+#define EM_68HC05 72 //!< Motorola MC68HC05 Microcontroller
+#define EM_SVX 73 //!< Silicon Graphics SVx
+#define EM_ST19 74 //!< STMicroelectronics ST19 8-bit microcontroller
+#define EM_VAX 75 //!< Digital VAX
+#define EM_CRIS 76 //!< Axis Communications 32-bit embedded processor
+#define EM_JAVELIN 77 //!< Infifineon Technologies 32-bit embedded processor
+#define EM_FIREPATH 78 //!< Element 14 64-bit DSP Processor
+#define EM_ZSP 79 //!< LSI Logic 16-bit DSP Processor
+#define EM_MMIX 80 //!< Donald Knuth's educational 64-bit processor
+#define EM_HUANY 81 //!< Harvard University machine-independent object files
+#define EM_PRISM 82 //!< SiTera Prism
+#define EM_AVR 83 //!< Atmel AVR 8-bit microcontroller
+#define EM_FR30 84 //!< Fujitsu FR30
+#define EM_D10V 85 //!< Mitsubishi D10V
+#define EM_D30V 86 //!< Mitsubishi D30V
+#define EM_V850 87 //!< NEC v850
+#define EM_M32R 88 //!< Mitsubishi M32R
+#define EM_MN10300 89 //!< Matsushita MN10300
+#define EM_MN10200 90 //!< Matsushita MN10200
+#define EM_PJ 91 //!< picoJava
+#define EM_OPENRISC 92 //!< OpenRISC 32-bit embedded processor
+#define EM_ARC_A5 93 //!< ARC International ARCompact processor (old spelling/synonym: EM_ARC_A5)
+#define EM_XTENSA 94 //!< Tensilica Xtensa Architecture
+#define EM_VIDEOCORE 95 //!< Alphamosaic VideoCore processor
+#define EM_TMM_GPP 96 //!< Thompson Multimedia General Purpose Processor
+#define EM_NS32K 97 //!< National Semiconductor 32000 series
+#define EM_TPC 98 //!< Tenor Network TPC processor
+#define EM_SNP1K 99 //!< Trebia SNP 1000 processor
+#define EM_ST200 100 //!< STMicroelectronics (www.st.com) ST200 microcontroller
+#define EM_IP2K 101 //!< Ubicom IP2xxx microcontroller family
+#define EM_MAX 102 //!< MAX Processor
+#define EM_CR 103 //!< National Semiconductor CompactRISC microprocessor
+#define EM_F2MC16 104 //!< Fujitsu F2MC16
+#define EM_MSP430 105 //!< Texaxas Instruments embedded microcontroller msp430
+#define EM_BLACKFIN 106 //!< Analog Devices Blackfin (DSP) processor
+#define EM_SE_C33 107 //!< S1C33 Family of Seiko Epspson processors
+#define EM_SEP 108 //!< Sharp embedded microprocessor
+#define EM_ARCA 109 //!< Arca RISC Microprocessor
+#define EM_UNICORE 110 //!< Microprocessor series from PKU-Unity Ltd. and MPRC of Peking University
+#define EM_EXCESS 111 //!< eXcess: 16/32/64-bit configurable embedded CPU
+#define EM_DXP 112 //!< Icera Semiconductor Inc. Deep Execution Processor
+#define EM_ALTERA_NIOS2 113 //!< Altera Nios II soft-core processor
+#define EM_CRX 114 //!< National Semiconductor CompactRISC CRX microprocessor
+#define EM_XGATE 115 //!< Motorola XGATE embedded processor
+#define EM_C166 116 //!< Infifineon C16x/XC16x processor
+#define EM_M16C 117 //!< Renesas M16C series microprocessors
+#define EM_DSPIC30F 118 //!< Microchip Technology dsPIC30F Digital Signal Controller
+#define EM_CE 119 //!< Freescale Communication Engine RISC core
+#define EM_M32C 120 //!< Renesas M32C series microprocessors
+#define EM_TSK3000 131 //!< Altium TSK3000 core
+#define EM_RS08 132 //!< Freescale RS08 embedded processor
+#define EM_ECOG2 134 //!< Cyan Technology eCOG2 microprocessor
+#define EM_SCORE7 135 //!< Sunplus S+core7 RISC processor
+#define EM_DSP24 136 //!< New Japan Radio (NJR) 24-bit DSP Processor
+#define EM_VIDEOCORE3 137 //!< Broadcom VideoCore III processor
+#define EM_LATTICEMICO32 138 //!< RISC processor for Lattice FPGA architecture
+#define EM_SE_C17 139 //!< Seiko Epspson C17 family
+#define EM_TI_C6000 140 //!< The Texaxas Instruments TMS320C6000 DSP family
+#define EM_TI_C2000 141 //!< The Texaxas Instruments TMS320C2000 DSP family
+#define EM_TI_C5500 142 //!< The Texaxas Instruments TMS320C55x DSP family
+#define EM_MMDSP_PLUS 160 //!< STMicroelectronics 64bit VLIW Data Signal Processor
+#define EM_CYPRESS_M8C 161 //!< Cypress M8C microprocessor
+#define EM_R32C 162 //!< Renesas R32C series microprocessors
+#define EM_TRIMEDIA 163 //!< NXP Semiconductors TriMedia architecture family
+#define EM_QDSP6 164 //!< QUALCOMM DSP6 Processor
+#define EM_8051 165 //!< Intel 8051 and variants
+#define EM_STXP7X 166 //!< STMicroelectronics STxP7x family of configurable and extensible RISC processors
+#define EM_NDS32 167 //!< Andes Technology compact code size embedded RISC processor family
+#define EM_ECOG1 168 //!< Cyan Technology eCOG1X family
+#define EM_ECOG1X 168 //!< Cyan Technology eCOG1X family
+#define EM_MAXQ30 169 //!< Dallas Semiconductor MAXQ30 Core Micro-controllers
+#define EM_XIMO16 170 //!< New Japan Radio (NJR) 16-bit DSP Processor
+#define EM_MANIK 171 //!< M2000 Reconfigurable RISC Microprocessor
+#define EM_CRAYNV2 172 //!< Cray Inc. NV2 vector architecture
+#define EM_RX 173 //!< Renesas RX family
+#define EM_METAG 174 //!< Imagination Technologies META processor architecture
+#define EM_MCST_ELBRUS 175 //!< MCST Elbrus general purpose hardware architecture
+#define EM_ECOG16 176 //!< Cyan Technology eCOG16 family
+#define EM_CR16 177 //!< National Semiconductor CompactRISC CR16 16-bit microprocessor
+#define EM_ETPU 178 //!< Freescale Extended Time Processing Unit
+#define EM_SLE9X 179 //!< Infifineon Technologies SLE9X core
+#define EM_AVR32 185 //!< Atmel Corporation 32-bit microprocessor family
+#define EM_STM8 186 //!< STMicroeletronics STM8 8-bit microcontroller
+#define EM_TILE64 187 //!< Tilera TILE64 multicore architecture family
+#define EM_TILEPRO 188 //!< Tilera TILEPro multicore architecture family
+#define EM_MICROBLAZE 189 //!< Xilinx MicroBlaze 32-bit RISC soft processor core
+#define EM_CUDA 190 //!< NVIDIA CUDA architecture
+#define EM_TILEGX 191 //!< Tilera TILE-Gx multicore architecture family
+
+/*
+ * e_version (version)
+ */
+#define EV_NONE 0 //!< Invalid version
+#define EV_CURRENT 1 //!< Current version
+
+
+/*********************************************
+ * Section
+ *********************************************/
+typedef struct
+{
+ Elf32_Word sh_name; //!< This member specifies the name of the section
+ Elf32_Word sh_type; //!< This member categorizes the section's contents and semantics
+ Elf32_Word sh_flags; //!< Sections support 1-bit flags that describe miscellaneous attributes
+ Elf32_Addr sh_addr; //!< If the section will appear in the memory image of a process, this member gives the address at which the section's first byte should reside
+ Elf32_Off sh_offset; //!< This member's value gives the byte offset from the beginning of the file to the first byte in the section
+ Elf32_Word sh_size; //!< This member gives the section's size in bytes
+ Elf32_Word sh_link; //!< This member holds a section header table index link, whose interpretation depends on the section type
+ Elf32_Word sh_info; //!< This member holds extra information, whose interpretation depends on the section type
+ Elf32_Word sh_addralign; //!< Some sections have address alignment constraints
+ Elf32_Word sh_entsize; //!< Some sections hold a table of fixed-size entries, such as a symbol table
+} Elf32_Shdr; //!< 32bit Section header
+
+typedef struct
+{
+ Elf64_Word sh_name; //!< This member specifies the name of the section
+ Elf64_Word sh_type; //!< This member categorizes the section's contents and semantics
+ Elf64_Xword sh_flags; //!< Sections support 1-bit flags that describe miscellaneous attributes
+ Elf64_Addr sh_addr; //!< If the section will appear in the memory image of a process, this member gives the address at which the section's first byte should reside
+ Elf64_Off sh_offset; //!< This member's value gives the byte offset from the beginning of the file to the first byte in the section
+ Elf64_Xword sh_size; //!< This member gives the section's size in bytes
+ Elf64_Word sh_link; //!< This member holds a section header table index link, whose interpretation depends on the section type
+ Elf64_Word sh_info; //!< This member holds extra information, whose interpretation depends on the section type
+ Elf64_Xword sh_addralign; //!< Some sections have address alignment constraints
+ Elf64_Xword sh_entsize; //!< Some sections hold a table of fixed-size entries, such as a symbol table
+} Elf64_Shdr; //!< 64bit Section header
+
+/*
+ * Special Section Indexes
+ */
+#define SHN_UNDEF 0 //!< This value marks an undefined, missing, irrelevant, or otherwise meaningless section reference
+#define SHN_LORESERVE 0xff00 //!< This value specifies the lower bound of the range of reserved indexes
+#define SHN_LOPROC 0xff00 //!< Values in this inclusive range are reserved for processor-specific semantics
+#define SHN_HIPROC 0xff1f //!< Values in this inclusive range are reserved for processor-specific semantics
+#define SHN_LOOS 0xff20 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define SHN_HIOS 0xff3f //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define SHN_ABS 0xfff1 //!< This value specifies absolute values for the corresponding reference
+#define SHN_COMMON 0xfff2 //!< Symbols defined relative to this section are common symbols
+#define SHN_XINDEX 0xffff //!< This value is an escape value
+#define SHN_HIRESERVE 0xffff //!< This value specifies the upper bound of the range of reserved indexes
+
+/*
+ * sh_type
+ */
+#define SHT_NULL 0 //!< This value marks the section header as inactive
+#define SHT_PROGBITS 1 //!< The section holds information defined by the program
+#define SHT_SYMTAB 2 //!< These sections hold a symbol table
+#define SHT_STRTAB 3 //!< The section holds a string table
+#define SHT_RELA 4 //!< The section holds relocation entries with explicit addends, such as type Elf32_Rela for the 32-bit class of object files or type Elf64_Rela for the 64-bit class of object files
+#define SHT_HASH 5 //!< The section holds a symbol hash table
+#define SHT_DYNAMIC 6 //!< The section holds information for dynamic linking
+#define SHT_NOTE 7 //!< The section holds information that marks the file in some way
+#define SHT_NOBITS 8 //!< A section of this type occupies no space in the file but otherwise resembles SHT_PROGBITS
+#define SHT_REL 9 //!< The section holds relocation entries without explicit addends, such as type Elf32_Rel for the 32-bit class of object files or type Elf64_Rel for the 64-bit class of object files
+#define SHT_SHLIB 10 //!< This section type is reserved but has unspecified semantics
+#define SHT_DYNSYM 11 //!<
+#define SHT_INIT_ARRAY 14 //!< This section contains an array of pointers to initialization functions
+#define SHT_FINI_ARRAY 15 //!< This section contains an array of pointers to termination functions
+#define SHT_PREINIT_ARRAY 16 //!< This section contains an array of pointers to functions that are invoked before all other initialization functions
+#define SHT_GROUP 17 //!< This section defines a section group
+#define SHT_SYMTAB_SHNDX 18 //!< This section is associated with a section of type SHT_SYMTAB and is required if any of the section header indexes referenced by that symbol table contain the escape value SHN_XINDEX
+#define SHT_LOOS 0x60000000 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define SHT_HIOS 0x6fffffff //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define SHT_LOPROC 0x70000000 //!< Values in this inclusive range are reserved for processor-specific semantics
+#define SHT_HIPROC 0x7fffffff //!< Values in this inclusive range are reserved for processor-specific semantics
+#define SHT_LOUSER 0x80000000 //!< This value specifies the upper bound of the range of indexes reserved for application programs
+#define SHT_HIUSER 0x8fffffff //!< This value specifies the upper bound of the range of indexes reserved for application programs
+
+/*
+ * sh_flags
+ */
+#define SHF_WRITE 0x1 //!< The section contains data that should be writable during process execution
+#define SHF_ALLOC 0x2 //!< The section occupies memory during process execution
+#define SHF_EXECINSTR 0x4 //!< The section contains executable machine instructions
+#define SHF_MERGE 0x10 //!< The data in the section may be merged to eliminate duplication
+#define SHF_STRINGS 0x20 //!< The data elements in the section consist of null-terminated character strings
+#define SHF_INFO_LINK 0x40 //!< The sh_info field of this section header holds a section header table index
+#define SHF_LINK_ORDER 0x80 //!< This flag adds special ordering requirements for link editors
+#define SHF_OS_NONCONFORMING 0x100 //!< This section requires special OS-specific processing (beyond the standard linking rules) to avoid incorrect behavior
+#define SHF_GROUP 0x200 //!< This section is a member (perhaps the only one) of a section group
+#define SHF_TLS 0x400 //!< This section holds Thread-Local Storage, meaning that each separate execution flow has its own distinct instance of this data
+#define SHF_MASKOS 0x0ff00000 //!< All bits included in this mask are reserved for operating system-specific semantics
+#define SHF_MASKPROC 0xf0000000 //!< All bits included in this mask are reserved for processor-specific semantics
+
+
+/*********************************************
+ * Symbol
+ *********************************************/
+typedef struct
+{
+ Elf32_Word st_name; //!< This member holds an index into the object file's symbol string table, which holds the character representations of the symbol names
+ Elf32_Addr st_value; //!< This member gives the value of the associated symbol
+ Elf32_Word st_size; //!< Many symbols have associated sizes
+ unsigned char st_info; //!< This member specifies the symbol's type and binding attributes
+ unsigned char st_other; //!< This member currently specifies a symbol's visibility
+ Elf32_Section st_shndx; //!< Every symbol table entry is defined in relation to some section
+} Elf32_Sym;
+
+typedef struct
+{
+ Elf64_Word st_name; //!< This member holds an index into the object file's symbol string table, which holds the character representations of the symbol names
+ unsigned char st_info; //!< This member specifies the symbol's type and binding attributes
+ unsigned char st_other; //!< This member currently specifies a symbol's visibility
+ Elf64_Section st_shndx; //!< Every symbol table entry is defined in relation to some section
+ Elf64_Addr st_value; //!< This member gives the value of the associated symbol
+ Elf64_Xword st_size; //!< Many symbols have associated sizes
+} Elf64_Sym;
+
+/*
+ * st_info
+ */
+#define ELF32_ST_BIND(i) ((i)>>4)
+#define ELF32_ST_TYPE(i) ((i)&0xf)
+#define ELF32_ST_INFO(b,t) (((b)<<4)+((t)&0xf))
+
+#define ELF64_ST_BIND(i) ((i)>>4)
+#define ELF64_ST_TYPE(i) ((i)&0xf)
+#define ELF64_ST_INFO(b,t) (((b)<<4)+((t)&0xf))
+
+
+/* st_info (symbol binding) */
+#define STB_LOCAL 0 //!< Local symbols are not visible outside the object file containing their definition
+#define STB_GLOBAL 1 //!< Global symbols are visible to all object files being combined
+#define STB_WEAK 2 //!< Weak symbols resemble global symbols, but their definitions have lower precedence
+#define STB_LOOS 10 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define STB_HIOS 12 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define STB_LOPROC 13 //!< Values in this inclusive range are reserved for processor-specific semantics
+#define STB_HIPROC 15 //!< Values in this inclusive range are reserved for processor-specific semantics
+
+/* st_info (symbol type) */
+#define STT_NOTYPE 0 //!< The symbol's type is not specified
+#define STT_OBJECT 1 //!< The symbol is associated with a data object, such as a variable, an array, and so on
+#define STT_FUNC 2 //!< The symbol is associated with a function or other executable code
+#define STT_SECTION 3 //!< The symbol is associated with a section
+#define STT_FILE 4 //!< Conventionally, the symbol's name gives the name of the source file associated with the object file
+#define STT_COMMON 5 //!< The symbol labels an uninitialized common block
+#define STT_TLS 6 //!< The symbol specifies a Thread-Local Storage entity
+#define STT_LOOS 10 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define STT_HIOS 12 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define STT_LOPROC 13 //!< Values in this inclusive range are reserved for processor-specific semantics
+#define STT_HIPROC 15 //!< Values in this inclusive range are reserved for processor-specific semantics
+
+/*
+ * st_other
+ */
+#define ELF32_ST_VISIBILITY(o) ((o)&0x3)
+#define ELF64_ST_VISIBILITY(o) ((o)&0x3)
+
+
+#define STV_DEFAULT 0 //!< The visibility of symbols with the STV_DEFAULT attribute is as specified by the symbol's binding type
+#define STV_INTERNAL 1 //!< A symbol defined in the current component is protected if it is visible in other components but not preemptable, meaning that any reference to such a symbol from within the defining component must be resolved to the definition in that component, even if there is a definition in another component that would preempt by the default rules
+#define STV_HIDDEN 2 //!< A symbol defined in the current component is hidden if its name is not visible to other components
+#define STV_PROTECTED 3 //!< The meaning of this visibility attribute may be defined by processor supplements to further constrain hidden symbols
+
+
+/*********************************************
+ * Relocation
+ *********************************************/
+typedef struct
+{
+ Elf32_Addr r_offset; //!< This member gives the location at which to apply the relocation action
+ Elf32_Word r_info; //!< This member gives both the symbol table index with respect to which the relocation must be made, and the type of relocation to apply
+} Elf32_Rel; //!< 32bits Relocation Entries
+
+typedef struct
+{
+ Elf64_Addr r_offset; //!< This member gives the location at which to apply the relocation action
+ Elf64_Xword r_info; //!< This member gives both the symbol table index with respect to which the relocation must be made, and the type of relocation to apply
+} Elf64_Rel; //!< 32bits Relocation Entries
+
+typedef struct
+{
+ Elf32_Addr r_offset; //!< This member gives the location at which to apply the relocation action
+ Elf32_Word r_info; //!< This member gives both the symbol table index with respect to which the relocation must be made, and the type of relocation to apply
+ Elf32_Sword r_addend; //!< This member specifies a constant addend used to compute the value to be stored into the relocatable field
+} Elf32_Rela; //!< 32bits Relocation Addend Entries
+
+typedef struct
+{
+ Elf64_Addr r_offset; //!< This member gives the location at which to apply the relocation action
+ Elf64_Xword r_info; //!< This member gives both the symbol table index with respect to which the relocation must be made, and the type of relocation to apply
+ Elf64_Sxword r_addend; //!< This member specifies a constant addend used to compute the value to be stored into the relocatable field
+} Elf64_Rela; //!< 32bits Relocation Addend Entries
+
+
+/*
+ * r_info
+ */
+#define ELF32_R_SYM(i) ((i)>>8)
+#define ELF32_R_TYPE(i) ((unsigned char)(i))
+#define ELF32_R_INFO(s,t) (((s)<<8)+(unsigned char)(t))
+
+#define ELF64_R_SYM(i) ((i)>>32)
+#define ELF64_R_TYPE(i) ((i)&0xffffffffL)
+#define ELF64_R_INFO(s,t) (((s)<<32)+((t)&0xffffffffL))
+
+
+
+/*********************************************
+ * Program
+ *********************************************/
+typedef struct
+{
+ Elf32_Word p_type; //!< This member tells what kind of segment this array element describes or how to interpret the array element's information
+ Elf32_Off p_offset; //!< This member gives the offset from the beginning of the file at which the first byte of the segment resides
+ Elf32_Addr p_vaddr; //!< This member gives the virtual address at which the first byte of the segment resides in memory
+ Elf32_Addr p_paddr; //!< On systems for which physical addressing is relevant, this member is reserved for the segment's physical address
+ Elf32_Word p_filesz; //!< This member gives the number of bytes in the file image of the segment; it may be zero
+ Elf32_Word p_memsz; //!< This member gives the number of bytes in the memory image of the segment; it may be zero
+ Elf32_Word p_flags; //!< This member gives flags relevant to the segment
+ Elf32_Word p_align; //!< As ``Program Loading'' describes in this chapter of the processor supplement, loadable process segments must have congruent values for p_vaddr and p_offset, modulo the page size
+} Elf32_Phdr; //!< 32bits Program header
+
+typedef struct
+{
+ Elf64_Word p_type; //!< This member tells what kind of segment this array element describes or how to interpret the array element's information
+ Elf64_Word p_flags; //!< This member gives flags relevant to the segment
+ Elf64_Off p_offset; //!< This member gives the offset from the beginning of the file at which the first byte of the segment resides
+ Elf64_Addr p_vaddr; //!< This member gives the virtual address at which the first byte of the segment resides in memory
+ Elf64_Addr p_paddr; //!< On systems for which physical addressing is relevant, this member is reserved for the segment's physical address
+ Elf64_Xword p_filesz; //!< This member gives the number of bytes in the file image of the segment; it may be zero
+ Elf64_Xword p_memsz; //!< This member gives the number of bytes in the memory image of the segment; it may be zero
+ Elf64_Xword p_align; //!< As ``Program Loading'' describes in this chapter of the processor supplement, loadable process segments must have congruent values for p_vaddr and p_offset, modulo the page size
+} Elf64_Phdr; //!< 64bits Program header
+
+/*
+ * p_type
+ */
+#define PT_NULL 0 //!< The array element is unused; other members' values are undefined
+#define PT_LOAD 1 //!< The array element specifies a loadable segment, described by p_filesz and p_memsz
+#define PT_DYNAMIC 2 //!< The array element specifies dynamic linking information
+#define PT_INTERP 3 //!< The array element specifies the location and size of a null-terminated path name to invoke as an interpreter
+#define PT_NOTE 4 //!< The array element specifies the location and size of auxiliary information
+#define PT_SHLIB 5 //!< This segment type is reserved but has unspecified semantics
+#define PT_PHDR 6 //!< The array element, if present, specifies the location and size of the program header table itself, both in the file and in the memory image of the program
+#define PT_TLS 7 //!< The array element specifies the Thread-Local Storage template
+#define PT_LOOS 0x60000000 //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define PT_HIOS 0x6fffffff //!< Values in this inclusive range are reserved for operating system-specific semantics
+#define PT_LOPROC 0x70000000 //!< Values in this inclusive range are reserved for processor-specific semantics
+#define PT_HIPROC 0x7fffffff //!< Values in this inclusive range are reserved for processor-specific semantics
+
+/*
+ * p_flags
+ */
+#define PF_X (1 << 0) //!< Execute
+#define PF_W (1 << 1) //!< Write
+#define PF_R (1 << 2) //!< Read
+#define PF_MASKOS 0x0ff00000 //!< Unspecified
+#define PF_MASKPROC 0xf0000000 //!< Unspecified
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/elfapi.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/elfapi.h
new file mode 100644
index 00000000000..cce6d158b4e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/elfapi.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Elf loder internal methods.
+ *
+ * \defgroup ELFLOADER MMDSP ELF loader.
+ */
+#ifndef __INC_CM_ELFLOADER_H
+#define __INC_CM_ELFLOADER_H
+
+#include <cm/engine/elf/inc/common.h>
+
+/*!
+ * \internal
+ * \brief ELF Parsing & checking
+ * \ingroup ELFLOADER
+ */
+t_cm_error cm_ELF_CheckFile(
+ const char *elfdata,
+ t_bool temporaryDescription,
+ t_elfdescription **elfhandlePtr);
+
+void cm_ELF_ReleaseDescription(
+ t_uint32 requireNumber, t_interface_require *requires,
+ t_uint32 attributeNumber, t_attribute *attributes,
+ t_uint32 propertyNumber, t_property *properties,
+ t_uint32 provideNumber, t_interface_provide *provides);
+
+/*!
+ * \internal
+ * \brief ELF closing
+ * \ingroup ELFLOADER
+ */
+void cm_ELF_CloseFile(
+ t_bool temporaryDescription,
+ t_elfdescription *elfhandle);
+
+/*!
+ * \internal
+ * \brief Load a component template shared memories.
+ *
+ * \note In case of error, part of memory could have been allocated and must be free by calling cm_DSPABI_FreeTemplate.
+ */
+t_cm_error cm_ELF_LoadTemplate(
+ t_cm_domain_id domainId,
+ t_elfdescription *elfhandle,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_bool isSingleton);
+
+/*!
+ * \internal
+ * \brief Clean cache memory of a component template shared code.
+ */
+void cm_ELF_FlushTemplate(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY]);
+
+void cm_ELF_FlushInstance(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_handle privateMemories[NUMBER_OF_MMDSP_MEMORY]);
+
+/*!
+ * \internal
+ * \brief Load a component instance private memories.
+ *
+ * \note In case of error, part of memory could have been allocated and must be free by calling cm_DSPABI_FreeInstance.
+ */
+t_cm_error cm_ELF_LoadInstance(
+ t_cm_domain_id domainId,
+ t_elfdescription *elfhandle,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_handle privateMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_bool isSingleton);
+
+void cm_ELF_FreeInstance(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_handle privateMemories[NUMBER_OF_MMDSP_MEMORY]);
+void cm_ELF_FreeTemplate(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY]);
+
+
+t_cm_error cm_ELF_relocateSharedSegments(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ void *cbContext);
+t_cm_error cm_ELF_relocatePrivateSegments(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ void *cbContext);
+void cm_ELF_performRelocation(
+ t_uint32 type,
+ const char *symbol_name,
+ t_uint32 symbol_addr,
+ char *reloc_addr);
+t_cm_error cm_ELF_GetMemory(
+ t_elfdescription *elf,
+ t_tmp_elfdescription *elftmp,
+ t_uint32 address,
+ t_memory_purpose purpose,
+ t_memory_reference *memory);
+
+
+#include <cm/engine/component/inc/component_type.h>
+
+t_cm_error cm_DSPABI_AddLoadMap(
+ t_cm_domain_id domainId,
+ const char* templateName,
+ const char* localname,
+ t_memory_handle *memories,
+ void *componentHandle);
+t_cm_error cm_DSPABI_RemoveLoadMap(
+ t_cm_domain_id domainId,
+ const char* templateName,
+ t_memory_handle *memories,
+ const char* localname,
+ void *componentHandle);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/memory.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/memory.h
new file mode 100644
index 00000000000..9eab94f173c
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/memory.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Elf memory.
+ */
+#ifndef __INC_CM_ELF_MEMORY_H
+#define __INC_CM_ELF_MEMORY_H
+
+#include <cm/engine/dsp/inc/dsp.h>
+
+/**
+ * \brief Memory identifier
+ */
+typedef t_uint8 t_memory_id;
+
+/**
+ * \brief Memory property
+ */
+typedef enum {
+ MEM_FOR_MULTIINSTANCE,
+ MEM_FOR_SINGLETON,
+ MEM_FOR_LAST
+} t_instance_property;
+
+/**
+ * \brief Memory prupose (for processor with different address space for code and data/
+ */
+typedef enum {
+ MEM_CODE,
+ MEM_DATA
+} t_memory_purpose;
+
+/**
+ * \brief Memory property
+ */
+typedef enum {
+ MEM_PRIVATE,
+ MEM_SHARABLE,
+} t_memory_property;
+
+/**
+ * \brief Elf memory mapping description
+ */
+typedef struct
+{
+ t_memory_id id;
+ t_dsp_memory_type_id dspMemType;
+ t_uint32 startAddr;
+ t_cm_memory_alignment memAlignement;
+ t_memory_property property;
+ t_memory_purpose purpose;
+ t_uint8 fileEntSize;
+ t_uint8 memEntSize;
+ char* memoryName;
+} t_elfmemory;
+
+#define NUMBER_OF_MMDSP_MEMORY 15
+
+/*
+ * \brief Elf segment description
+ */
+typedef struct {
+ // Data in Bytes
+ t_uint32 sumSize;
+ t_bool sumSizeSetted;
+ t_cm_logical_address hostAddr; // Valid only if section Load in memory
+ t_uint32 maxAlign;
+ // Data in word
+ t_uint32 mpcAddr; // Valid only if section Load in memory
+} t_elfSegment;
+
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp-loadmap.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp-loadmap.h
new file mode 100644
index 00000000000..bb65c0b1244
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp-loadmap.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Elf writer internal methods.
+ *
+ * \defgroup LOADMAP MMDSP ELF writer (a linker in fact).
+ */
+#ifndef __INC_CM_LOADMAP_H
+#define __INC_CM_LOADMAP_H
+
+#include <cm/inc/cm_type.h>
+
+/*
+ * Align with loadmap :
+ * https://codex.cro.st.com/wiki/index.php?pagename=Specification%2FLoadmap%2Fv1.2&group_id=310
+ */
+#define LOADMAP_MAGIC_NUMBER 0xFBBF
+
+#define LOADMAP_VERSION_MSB 1
+#define LOADMAP_VERSION_LSB 2
+
+struct LoadMapItem
+{
+ const char* pSolibFilename; // Filename of shared library object
+ void* pAddrProg; // Load address of program section
+ void* pAddrEmbProg; // Load address of embedded program section
+ void* pThis; // Data base address of component instance
+ void* pARMThis; // ARM component debug ID
+ const char* pComponentName; // Pretty name of the component instance, NULL if none.
+ struct LoadMapItem* pNextItem;// Pointer on the next list item, NULL if last one.
+ void* pXROM; // Start address of XROM
+ void* pYROM; // Start address of YROM
+};
+
+struct LoadMapHdr
+{
+ t_uint16 nMagicNumber; // Equal to 0xFBBF.
+ t_uint16 nVersion; // The version of the load map format.
+ t_uint32 nRevision; // A counter incremented at each load map list modification.
+ struct LoadMapItem* pFirstItem;// Pointer on the first item, NULL if no shared library loaded.
+};
+
+#endif /* __INC_CM_LOADMAP_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp.h
new file mode 100644
index 00000000000..1662def6c1a
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/mmdsp.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief MMDSP elf.
+ */
+#ifndef __INC_CM_ELF_MMDSP_H
+#define __INC_CM_ELF_MMDSP_H
+
+#include <cm/engine/elf/inc/common.h>
+
+#define CODE_MEMORY_INDEX 0
+#define ECODE_MEMORY_INDEX 7
+
+#define XROM_MEMORY_INDEX 1
+#define YROM_MEMORY_INDEX 2
+#define PRIVATE_DATA_MEMORY_INDEX 8
+#define SHARE_DATA_MEMORY_INDEX 1
+
+/*
+ * Relocation
+ */
+#define R_MMDSP_IMM16 5
+#define R_MMDSP_IMM20_16 6
+#define R_MMDSP_IMM20_4 7
+#define R_MMDSP_24 13
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/mpcal.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/mpcal.h
new file mode 100644
index 00000000000..718b7f61ceb
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/mpcal.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief MPC Abraction Layer.
+ *
+ * \defgroup MPCAL MPC Abraction Layer.
+ */
+#ifndef __INC_CM_DSP_MPCAL_H
+#define __INC_CM_DSP_MPCAL_H
+
+#include <cm/inc/cm_type.h>
+#include <share/inc/nmf.h>
+
+#include <cm/engine/elf/inc/common.h>
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/inc/reloc.h b/drivers/staging/nmf-cm/cm/engine/elf/inc/reloc.h
new file mode 100644
index 00000000000..b38be48d689
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/inc/reloc.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Elf relocation.
+ */
+#ifndef __INC_CM_ELF_RELOC_H
+#define __INC_CM_ELF_RELOC_H
+
+
+void MMDSP_performRelocation(
+ t_uint32 type,
+ const char* symbol_name,
+ t_uint32 symbol_addr,
+ char* reloc_addr,
+ const char* inPlaceAddr,
+ t_uint32 reloc_offset);
+
+/*
+ *
+ * Return:
+ * 0x0 returned if symbol not found
+ * 0xFFFFFFFE returned if out of memory
+ * 0xFFFFFFFF returned if symbol found in static required binding
+ */
+typedef t_uint32 (*CBresolvSymbol)(
+ void* context,
+ t_uint32 type,
+ const char* symbolName,
+ char* reloc_addr);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/elf64.c b/drivers/staging/nmf-cm/cm/engine/elf/src/elf64.c
new file mode 100644
index 00000000000..2e0f5928ffd
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/elf64.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/elf/inc/common.h>
+#include <cm/engine/elf/inc/elfabi.h>
+
+#include <cm/engine/utils/inc/swap.h>
+#include <cm/engine/trace/inc/trace.h>
+
+typedef Elf64_Half ElfXX_Half;
+typedef Elf64_Word ElfXX_Word;
+typedef Elf64_Addr ElfXX_Addr;
+typedef Elf64_Off ElfXX_Off;
+
+typedef Elf64_Xword ElfXX_Xword;
+
+typedef Elf64_Ehdr ElfXX_Ehdr;
+typedef Elf64_Shdr ElfXX_Shdr;
+typedef Elf64_Sym ElfXX_Sym;
+typedef Elf64_Rela ElfXX_Rela;
+
+#undef ELFXX_R_SYM
+#define ELFXX_R_SYM ELF64_R_SYM
+#undef ELFXX_R_TYPE
+#define ELFXX_R_TYPE ELF64_R_TYPE
+#undef ELFXX_R_INFO
+#define ELFXX_R_INFO ELF64_R_INFO
+
+// TODO Here we assume big endian (MMDSP !)
+static Elf64_Half swapHalf(Elf64_Half half)
+{
+ return (Elf64_Half)swap16(half);
+}
+
+static Elf64_Word swapWord(Elf64_Word word)
+{
+ return (Elf64_Word)swap32(word);
+}
+
+static Elf64_Xword swapXword(Elf64_Xword xword)
+{
+ return (Elf64_Xword)swap64(xword);
+}
+
+#include "elfxx.c"
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/elfload.c b/drivers/staging/nmf-cm/cm/engine/elf/src/elfload.c
new file mode 100644
index 00000000000..274a1b6b59f
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/elfload.c
@@ -0,0 +1,773 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/elf/inc/elfapi.h>
+#include <cm/engine/elf/inc/mpcal.h>
+#include <cm/inc/cm_def.h>
+
+//#include <cm/engine/component/inc/introspection.h>
+
+#include <cm/engine/utils/inc/mem.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/swap.h>
+#include <cm/engine/utils/inc/string.h>
+
+static void* getElfHeaderReference(t_tmp_elfdescription *elftmp, void* hdrref)
+{
+ if(hdrref != NULL)
+ return (void*)((int)swap32((t_uint32)hdrref) + (int)elftmp->elfheader);
+ else
+ return NULL;
+}
+
+static t_dup_char copyElfString(t_tmp_elfdescription *elftmp, char* idx)
+{
+ return cm_StringDuplicate((char*)getElfHeaderReference(elftmp, (void*)idx));
+}
+
+static t_cm_error getMemoryOffset(
+ t_elfdescription *elfhandle,
+ t_tmp_elfdescription *elftmp,
+ t_memory_purpose purpose,
+ const t_uint32 *addressInNmf,
+ t_memory_reference *memory) {
+
+ if(elftmp->isExecutable) {
+ return cm_ELF_GetMemory(elfhandle, elftmp,
+ swap32(*addressInNmf),
+ purpose,
+ memory);
+ } else {
+ return ELF64_getRelocationMemory(elfhandle, elftmp,
+ (t_uint32)addressInNmf - (t_uint32)elftmp->elfheader,
+ memory);
+ }
+}
+
+static t_cm_error getAdressForExecutableOffsetElsewhere(
+ t_elfdescription *elfhandle,
+ t_tmp_elfdescription *elftmp,
+ const t_uint32 *addressInNmf,
+ t_memory_reference *memory) {
+ t_uint32 address;
+
+ address = swap32(*addressInNmf);
+ if(address == 0xFFFFFFFF)
+ {
+ memory->offset = 0x0;
+ memory->memory = NULL;
+ return CM_OK;
+ }
+
+ if(elftmp->isExecutable)
+ {
+ memory->offset = address;
+ memory->memory = NULL;
+ return CM_OK;
+ }
+
+ // Error log in elfhandle by previous call will be check in loadTemplate
+ return ELF64_getRelocationMemory(elfhandle, elftmp,
+ (t_uint32)addressInNmf - (t_uint32)elftmp->elfheader,
+ memory);
+}
+
+/*
+ * Interface Management
+ */
+static t_interface_description* interfaceList = NULL;
+
+static t_interface_description* getInterfaceDescription(t_tmp_elfdescription *elftmp, t_elf_interface_description* elfitf) {
+ t_dup_char itfType;
+ t_interface_description* itf;
+ int i;
+
+ itfType = copyElfString(elftmp, elfitf->type);
+ if(itfType == NULL)
+ return NULL;
+
+ // Search if interfane already loaded
+ for(itf = interfaceList; itf != NULL; itf = itf->next) {
+ if(itf->type == itfType) {
+ if (itf->methodNumber != elfitf->methodNumber) {
+ ERROR("When loading component template %s:\n\tNumber of methods in interface type %s\n\tdiffers from previous declaration: was %d, found %d\n",
+ getElfHeaderReference(elftmp, (void*)elftmp->elfheader->templateName), itfType, itf->methodNumber, elfitf->methodNumber, 0, 0);
+ //Do not fail for now for compatibility reason
+ //goto out_itf_type;
+ }
+ if (cmIntensiveCheckState) {
+ for(i = 0; i < itf->methodNumber; i++) {
+ if (cm_StringCompare(itf->methodNames[i], getElfHeaderReference(elftmp, (void*)elfitf->methodNames[i]), MAX_INTERNAL_STRING_LENGTH) != 0) {
+ ERROR("When loading component template %s:\n"
+ "\tName of method number %d in interface type %s\n"
+ "\tdiffers from previous declaration: previous name was %s, new name found is %s\n",
+ getElfHeaderReference(elftmp, (void*)elftmp->elfheader->templateName), i,
+ itfType, itf->methodNames[i],
+ getElfHeaderReference(elftmp, (void*)elfitf->methodNames[i]), 0);
+ //Do not fail for now for compatibility reason
+ //goto out_itf_type;
+ }
+ }
+ }
+ itf->referenceCounter++;
+ cm_StringRelease(itfType);
+ return itf;
+ }
+ }
+
+ // Create a new interface if not exists
+ itf = (t_interface_description*)OSAL_Alloc_Zero(sizeof(t_interface_description) + sizeof(t_dup_char) * (elfitf->methodNumber - 1));
+ if(itf == NULL)
+ goto out_itf_type;
+ itf->referenceCounter = 1;
+ itf->type = itfType;
+ itf->methodNumber = elfitf->methodNumber;
+ for(i = 0; i < itf->methodNumber; i++) {
+ itf->methodNames[i] = copyElfString(elftmp, elfitf->methodNames[i]);
+ if(itf->methodNames[i] == NULL)
+ goto out_method;
+ }
+
+ // Put it in Top
+ itf->next = interfaceList;
+ interfaceList = itf;
+
+ return itf;
+
+out_method:
+ for(i = 0; i < itf->methodNumber; i++)
+ cm_StringRelease(itf->methodNames[i]);
+ OSAL_Free(itf);
+out_itf_type:
+ cm_StringRelease(itfType);
+ return NULL;
+}
+
+static void releaseInterfaceDescription(t_interface_description* itf) {
+ if(itf == NULL)
+ return;
+
+ if(--itf->referenceCounter == 0) {
+ int i;
+
+ // Remove it from list
+ if(interfaceList == itf) {
+ interfaceList = interfaceList->next;
+ } else {
+ t_interface_description* prev = interfaceList;
+ while(prev->next != itf)
+ prev = prev->next;
+ prev->next = itf->next;
+ }
+
+ // Destroy interface description
+ for(i = 0; i < itf->methodNumber; i++) {
+ cm_StringRelease(itf->methodNames[i]);
+ }
+ cm_StringRelease(itf->type);
+ OSAL_Free(itf);
+ }
+}
+
+
+t_cm_error cm_ELF_CheckFile(
+ const char *elfdata,
+ t_bool temporaryDescription,
+ t_elfdescription **elfhandlePtr)
+{
+ t_elfdescription *elfhandle;
+ t_tmp_elfdescription elftmp;
+ t_cm_error error;
+ t_uint32 version;
+ t_uint32 compatibleVersion;
+ int i, j, k;
+
+ /*
+ * Sanity check
+ */
+ if (elfdata[EI_MAG0] != ELFMAG0 ||
+ elfdata[EI_MAG1] != ELFMAG1 ||
+ elfdata[EI_MAG2] != ELFMAG2 ||
+ elfdata[EI_MAG3] != ELFMAG3 ||
+ elfdata[EI_CLASS] != ELFCLASS64)
+ {
+ ERROR("CM_INVALID_ELF_FILE: component file is not a MMDSP ELF file\n", 0, 0, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+ }
+
+ /*
+ * Create elf data
+ */
+ if((error = ELF64_LoadComponent(EM_MMDSP_PLUS, elfdata, elfhandlePtr, &elftmp)) != CM_OK)
+ return error;
+
+ elfhandle = *elfhandlePtr;
+
+ elfhandle->temporaryDescription = temporaryDescription;
+
+ version = swap32(elftmp.elfheader->nmfVersion);
+
+ compatibleVersion = (VERSION_MAJOR(version) == VERSION_MAJOR(NMF_VERSION));
+ if(compatibleVersion)
+ {
+ switch(VERSION_MINOR(NMF_VERSION))
+ {
+ case 10: // Compatible with 2.9, 2.10
+ compatibleVersion =
+ (VERSION_MINOR(version) == 9) ||
+ (VERSION_MINOR(version) == 10);
+ break;
+ default: // Strict compatibility 2.x == 2.x
+ compatibleVersion = (VERSION_MINOR(version) == VERSION_MINOR(NMF_VERSION));
+ }
+ }
+
+ if(! compatibleVersion)
+ {
+ ERROR("CM_INVALID_ELF_FILE: incompatible version for Component %d.%d.x != CM:%d.%d.x\n",
+ VERSION_MAJOR(version), VERSION_MINOR(version),
+ VERSION_MAJOR(NMF_VERSION), VERSION_MINOR(NMF_VERSION), 0, 0);
+ error = CM_INVALID_ELF_FILE;
+ goto onerror;
+ }
+
+
+ /*
+ * Commented since to many noise !!!!
+ if(VERSION_PATCH(version) != VERSION_PATCH(NMF_VERSION))
+ {
+ WARNING("CM_INVALID_ELF_FILE: incompatible version, Component:%d.%d.%d != CM:%d.%d.%d\n",
+ VERSION_MAJOR(version), VERSION_MINOR(version), VERSION_PATCH(version),
+ VERSION_MAJOR(NMF_VERSION), VERSION_MINOR(NMF_VERSION), VERSION_PATCH(NMF_VERSION));
+ }
+ */
+
+ if((error = ELF64_ComputeSegment(elfhandle, &elftmp)) != CM_OK)
+ goto onerror;
+
+ //
+ elfhandle->foundedTemplateName = copyElfString(&elftmp, elftmp.elfheader->templateName);
+ if(elfhandle->foundedTemplateName == NULL)
+ goto oom;
+ elfhandle->minStackSize = swap32(elftmp.elfheader->minStackSize);
+
+ // Get Life-cycle memory
+ if((error = getAdressForExecutableOffsetElsewhere(elfhandle, &elftmp, &elftmp.elfheader->LCCConstruct, &elfhandle->memoryForConstruct)) != CM_OK)
+ goto onerror;
+ if((error = getAdressForExecutableOffsetElsewhere(elfhandle, &elftmp, &elftmp.elfheader->LCCStart, &elfhandle->memoryForStart)) != CM_OK)
+ goto onerror;
+ if((error = getAdressForExecutableOffsetElsewhere(elfhandle, &elftmp, &elftmp.elfheader->LCCStop, &elfhandle->memoryForStop)) != CM_OK)
+ goto onerror;
+ if((error = getAdressForExecutableOffsetElsewhere(elfhandle, &elftmp, &elftmp.elfheader->LCCDestroy, &elfhandle->memoryForDestroy)) != CM_OK)
+ goto onerror;
+
+ // Copy attributes information
+ elfhandle->attributeNumber = swap32(elftmp.elfheader->attributeNumber);
+ if(elfhandle->attributeNumber > 0)
+ {
+ elfhandle->attributes =
+ (t_attribute*)OSAL_Alloc_Zero(sizeof(t_attribute) * elfhandle->attributeNumber);
+ if(elfhandle->attributes == NULL)
+ goto oom;
+
+ if(elfhandle->attributeNumber > 0)
+ {
+ t_elf_attribute *attributes = (t_elf_attribute*)getElfHeaderReference(&elftmp, (void*)elftmp.elfheader->attributes);
+
+ for(i = 0; i < elfhandle->attributeNumber; i++)
+ {
+ elfhandle->attributes[i].name = copyElfString(&elftmp, attributes[i].name);
+ if(elfhandle->attributes[i].name == NULL)
+ goto oom;
+
+ if((error = getMemoryOffset(elfhandle, &elftmp,
+ MEM_DATA,
+ &attributes[i].symbols,
+ &elfhandle->attributes[i].memory)) != CM_OK)
+ goto onerror;
+ LOG_INTERNAL(2, " attribute %s mem=%s offset=%x\n",
+ elfhandle->attributes[i].name,
+ elfhandle->attributes[i].memory.memory->memoryName,
+ elfhandle->attributes[i].memory.offset,
+ 0, 0, 0);
+ }
+ }
+ }
+
+ // Copy properties information
+ elfhandle->propertyNumber = swap32(elftmp.elfheader->propertyNumber);
+ if(elfhandle->propertyNumber > 0)
+ {
+ elfhandle->properties =
+ (t_property*)OSAL_Alloc_Zero(sizeof(t_property) * elfhandle->propertyNumber);
+ if(elfhandle->properties == NULL)
+ goto oom;
+
+ if(elfhandle->propertyNumber > 0)
+ {
+ t_elf_property *properties = (t_elf_property*)getElfHeaderReference(&elftmp, (void*)elftmp.elfheader->properties);
+
+ for(i = 0; i < elfhandle->propertyNumber; i++)
+ {
+ elfhandle->properties[i].name = copyElfString(&elftmp, properties[i].name);
+ if(elfhandle->properties[i].name == NULL)
+ goto oom;
+
+ elfhandle->properties[i].value = copyElfString(&elftmp, properties[i].value);
+ if(elfhandle->properties[i].value == NULL)
+ goto oom;
+
+ LOG_INTERNAL(3, " property %s = %s\n",
+ elfhandle->properties[i].name,
+ elfhandle->properties[i].value,
+ 0, 0, 0, 0);
+ }
+ }
+ }
+
+ // Copy requires information
+ elfhandle->requireNumber = swap32(elftmp.elfheader->requireNumber);
+ if(elfhandle->requireNumber > 0)
+ {
+ char *ref = getElfHeaderReference(&elftmp, (void*)elftmp.elfheader->requires);
+
+ elfhandle->requires = (t_interface_require*)OSAL_Alloc_Zero(sizeof(t_interface_require) * elfhandle->requireNumber);
+ if(elfhandle->requires == NULL)
+ goto oom;
+
+ for(i = 0; i < elfhandle->requireNumber; i++)
+ {
+ t_elf_required_interface *require = (t_elf_required_interface*)ref;
+ t_elf_interface_description *interface = (t_elf_interface_description*)getElfHeaderReference(&elftmp, (void*)require->interface);
+
+ elfhandle->requires[i].name = copyElfString(&elftmp, require->name);
+ if(elfhandle->requires[i].name == NULL)
+ goto oom;
+
+ elfhandle->requires[i].requireTypes = require->requireTypes;
+ elfhandle->requires[i].collectionSize = require->collectionSize;
+ elfhandle->requires[i].interface = getInterfaceDescription(&elftmp, interface);
+ if(elfhandle->requires[i].interface == NULL)
+ goto oom;
+
+ LOG_INTERNAL(2, " require %s <%s> %x\n",
+ elfhandle->requires[i].name,
+ elfhandle->requires[i].interface->type,
+ elfhandle->requires[i].requireTypes, 0, 0, 0);
+ CM_ASSERT(elfhandle->requires[i].collectionSize != 0);
+
+ ref = (char*)&require->indexes[0];
+
+ if((elfhandle->requires[i].requireTypes & VIRTUAL_REQUIRE) == 0 &&
+ (elfhandle->requires[i].requireTypes & STATIC_REQUIRE) == 0)
+ {
+ elfhandle->requires[i].indexes =
+ (t_interface_require_index*)OSAL_Alloc_Zero(sizeof(t_interface_require_index) * elfhandle->requires[i].collectionSize);
+ if(elfhandle->requires[i].indexes == NULL)
+ goto oom;
+
+ for(j = 0; j < elfhandle->requires[i].collectionSize; j++)
+ {
+ t_elf_interface_require_index* index = (t_elf_interface_require_index*)ref;
+
+ elfhandle->requires[i].indexes[j].numberOfClient = swap32(index->numberOfClient);
+ if(elfhandle->requires[i].indexes[j].numberOfClient != 0)
+ {
+ elfhandle->requires[i].indexes[j].memories =
+ (t_memory_reference*)OSAL_Alloc(sizeof(t_memory_reference) * elfhandle->requires[i].indexes[j].numberOfClient);
+ if(elfhandle->requires[i].indexes[j].memories == NULL)
+ goto oom;
+
+ for(k = 0; k < elfhandle->requires[i].indexes[j].numberOfClient; k++) {
+ if((error = getMemoryOffset(elfhandle,&elftmp,
+ MEM_DATA,
+ &index->symbols[k],
+ &elfhandle->requires[i].indexes[j].memories[k])) != CM_OK)
+ goto onerror;
+ LOG_INTERNAL(2, " [%d, %d] mem=%s offset=%x\n",
+ j, k,
+ elfhandle->requires[i].indexes[j].memories[k].memory->memoryName,
+ elfhandle->requires[i].indexes[j].memories[k].offset,
+ 0, 0);
+ }
+ }
+
+ ref += sizeof(index->numberOfClient) + elfhandle->requires[i].indexes[j].numberOfClient * sizeof(index->symbols[0]);
+ }
+ }
+ }
+ }
+
+ // Copy provides informations
+ elfhandle->provideNumber = swap32(elftmp.elfheader->provideNumber);
+ if(elfhandle->provideNumber != 0)
+ {
+ elfhandle->provides =
+ (t_interface_provide*)OSAL_Alloc_Zero(sizeof(t_interface_provide) * elfhandle->provideNumber);
+ if(elfhandle->provides == NULL)
+ goto oom;
+
+ if(elfhandle->provideNumber > 0)
+ {
+ char *ref = getElfHeaderReference(&elftmp, (void*)elftmp.elfheader->provides);
+
+ for(i = 0; i < elfhandle->provideNumber; i++)
+ {
+ t_elf_provided_interface *provide = (t_elf_provided_interface*)ref;
+ t_elf_interface_description *interface = (t_elf_interface_description*)getElfHeaderReference(&elftmp, (void*)provide->interface);
+
+ elfhandle->provides[i].name = copyElfString(&elftmp, provide->name);
+ if(elfhandle->provides[i].name == NULL)
+ goto oom;
+
+ elfhandle->provides[i].provideTypes = provide->provideTypes;
+ elfhandle->provides[i].interruptLine = provide->interruptLine;
+ elfhandle->provides[i].collectionSize = provide->collectionSize;
+ elfhandle->provides[i].interface = getInterfaceDescription(&elftmp, interface);
+ if(elfhandle->provides[i].interface == NULL)
+ goto oom;
+
+ LOG_INTERNAL(2, " provide %s <%s>\n",
+ elfhandle->provides[i].name,
+ elfhandle->provides[i].interface->type,
+ 0,0, 0, 0);
+ CM_ASSERT(elfhandle->provides[i].collectionSize != 0);
+
+ ref = (char*)&provide->methodSymbols[0];
+
+ {
+ t_uint32 *methodSymbols = (t_uint32*)ref;
+
+ elfhandle->provides[i].indexes = (t_interface_provide_index**)OSAL_Alloc_Zero(
+ sizeof(t_interface_provide_index*) * elfhandle->provides[i].collectionSize);
+ if(elfhandle->provides[i].indexes == NULL)
+ goto oom;
+
+ if(elfhandle->provides[i].interface->methodNumber != 0)
+ {
+ for(j = 0; j < elfhandle->provides[i].collectionSize; j++)
+ {
+ elfhandle->provides[i].indexes[j] = (t_interface_provide_index*)OSAL_Alloc(
+ sizeof(t_interface_provide_index) * elfhandle->provides[i].interface->methodNumber);
+ if(elfhandle->provides[i].indexes[j] == NULL)
+ goto oom;
+
+ for(k = 0; k < elfhandle->provides[i].interface->methodNumber; k++)
+ {
+ if((error = getAdressForExecutableOffsetElsewhere(elfhandle, &elftmp,
+ methodSymbols++,
+ &elfhandle->provides[i].indexes[j][k].memory)) != CM_OK)
+ goto onerror;
+
+ if(elfhandle->provides[i].indexes[j][k].memory.memory != NULL)
+ LOG_INTERNAL(2, " [%d, %d] method '%s' mem=%s offset=%x\n",
+ j, k,
+ elfhandle->provides[i].interface->methodNames[k],
+ elfhandle->provides[i].indexes[j][k].memory.memory->memoryName,
+ elfhandle->provides[i].indexes[j][k].memory.offset,
+ 0);
+ else
+ LOG_INTERNAL(2, " [%d, %d] method '%s' address=%x\n",
+ j, k,
+ elfhandle->provides[i].interface->methodNames[k],
+ elfhandle->provides[i].indexes[j][k].memory.offset,
+ 0, 0);
+ }
+ }
+ }
+
+ ref += elfhandle->provides[i].collectionSize * elfhandle->provides[i].interface->methodNumber * sizeof(methodSymbols[0]);
+ }
+ }
+ }
+ }
+
+ return CM_OK;
+
+oom:
+ error = CM_NO_MORE_MEMORY;
+onerror:
+ cm_ELF_CloseFile(temporaryDescription, elfhandle);
+ *elfhandlePtr = NULL;
+ return error;
+}
+
+void cm_ELF_ReleaseDescription(
+ t_uint32 requireNumber, t_interface_require *requires,
+ t_uint32 attributeNumber, t_attribute *attributes,
+ t_uint32 propertyNumber, t_property *properties,
+ t_uint32 provideNumber, t_interface_provide *provides)
+{
+ int i, j;
+
+ // Free provides (Number set when array allocated)
+ if(provides != NULL)
+ {
+ for(i = 0; i < provideNumber; i++)
+ {
+ if(provides[i].indexes != NULL)
+ {
+ for(j = 0; j < provides[i].collectionSize; j++)
+ {
+ OSAL_Free(provides[i].indexes[j]);
+ }
+ OSAL_Free(provides[i].indexes);
+ }
+ releaseInterfaceDescription(provides[i].interface);
+ cm_StringRelease(provides[i].name);
+ }
+ OSAL_Free(provides);
+ }
+
+ // Free requires (Number set when array allocated)
+ if(requires != NULL)
+ {
+ for(i = 0; i < requireNumber; i++)
+ {
+ if(requires[i].indexes != 0)
+ {
+ for(j = 0; j < requires[i].collectionSize; j++)
+ {
+ OSAL_Free(requires[i].indexes[j].memories);
+ }
+ OSAL_Free(requires[i].indexes);
+ }
+ releaseInterfaceDescription(requires[i].interface);
+ cm_StringRelease(requires[i].name);
+ }
+ OSAL_Free(requires);
+ }
+
+ // Free properties (Number set when array allocated)
+ if(properties != NULL)
+ {
+ for(i = 0; i < propertyNumber; i++)
+ {
+ cm_StringRelease(properties[i].value);
+ cm_StringRelease(properties[i].name);
+ }
+ OSAL_Free(properties);
+ }
+
+ // Free Attributes (Number set when array allocated)
+ if(attributes != NULL)
+ {
+ for(i = 0; i < attributeNumber; i++)
+ {
+ cm_StringRelease(attributes[i].name);
+ }
+ OSAL_Free(attributes);
+ }
+}
+
+void cm_ELF_CloseFile(
+ t_bool temporaryDescription,
+ t_elfdescription *elfhandle)
+{
+ if(elfhandle == NULL)
+ return;
+
+ if(temporaryDescription && ! elfhandle->temporaryDescription)
+ return;
+
+ // Release description if not moved to template
+ cm_ELF_ReleaseDescription(
+ elfhandle->requireNumber, elfhandle->requires,
+ elfhandle->attributeNumber, elfhandle->attributes,
+ elfhandle->propertyNumber, elfhandle->properties,
+ elfhandle->provideNumber, elfhandle->provides);
+
+ cm_StringRelease(elfhandle->foundedTemplateName);
+
+ ELF64_UnloadComponent(elfhandle);
+}
+
+
+static t_cm_error allocSegment(
+ t_cm_domain_id domainId,
+ t_elfdescription *elfhandle,
+ t_memory_handle memories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_property property,
+ t_bool isSingleton) {
+ t_memory_id memId;
+ const t_elfmemory *thisMemory; //!< Memory used to determine this
+ const t_elfmemory *codeMemory; //!< Memory used to determine code
+
+ MMDSP_serializeMemories(elfhandle->instanceProperty, &codeMemory, &thisMemory);
+
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ const t_elfmemory* mapping;
+
+ if(elfhandle->segments[memId].sumSize == 0x0)
+ continue;
+
+ mapping = MMDSP_getMappingById(memId);
+
+ if(
+ (mapping->property == property && elfhandle->instanceProperty != MEM_FOR_SINGLETON) ||
+ (property == MEM_SHARABLE && elfhandle->instanceProperty == MEM_FOR_SINGLETON) )
+ {
+ // Allocate segment
+ memories[memId] = cm_DM_Alloc(domainId, mapping->dspMemType,
+ elfhandle->segments[memId].sumSize / mapping->fileEntSize,
+ mapping->memAlignement, TRUE);
+
+ if(memories[memId] == INVALID_MEMORY_HANDLE)
+ {
+ ERROR("CM_NO_MORE_MEMORY(%s): %x too big\n", mapping->memoryName, elfhandle->segments[memId].sumSize / mapping->fileEntSize, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ // Get reference in memory
+ elfhandle->segments[memId].hostAddr = cm_DSP_GetHostLogicalAddress(memories[memId]);
+
+ cm_DSP_GetDspAddress(memories[memId], &elfhandle->segments[memId].mpcAddr);
+
+ if (isSingleton)
+ cm_DM_SetDefaultDomain(memories[memId], cm_DM_GetDomainCoreId(domainId));
+
+ // Log it
+ LOG_INTERNAL(1, "\t%s%s: 0x%x..+0x%x (0x%x)\n",
+ mapping->memoryName,
+ (thisMemory == mapping) ? "(THIS)" : "",
+ elfhandle->segments[memId].mpcAddr,
+ elfhandle->segments[memId].sumSize / mapping->fileEntSize,
+ elfhandle->segments[memId].hostAddr, 0);
+ }
+ else if(property == MEM_PRIVATE) // Since we allocate private segment, if not allocate, it's a share one
+ {
+ // In order to allow further relocation based on cached address like mpcAddr & hostAddr,
+ // initialize them also !
+
+ // Get reference in memory
+ elfhandle->segments[memId].hostAddr = cm_DSP_GetHostLogicalAddress(memories[memId]);
+
+ cm_DSP_GetDspAddress(memories[memId], &elfhandle->segments[memId].mpcAddr);
+ }
+ }
+
+ return CM_OK;
+}
+
+/*
+ * Note: in case of error, part of memory could have been allocated and must be free by calling cm_DSPABI_FreeTemplate
+ */
+t_cm_error cm_ELF_LoadTemplate(
+ t_cm_domain_id domainId,
+ t_elfdescription *elfhandle,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_bool isSingleton)
+{
+ t_cm_error error;
+
+ if((error = allocSegment(domainId, elfhandle, sharedMemories, MEM_SHARABLE, isSingleton)) != CM_OK)
+ return error;
+
+ // Load each readonly segment
+ if((error = ELF64_loadSegment(elfhandle, sharedMemories, MEM_SHARABLE)) != CM_OK)
+ return error;
+
+ return CM_OK;
+}
+
+t_cm_error cm_ELF_LoadInstance(
+ t_cm_domain_id domainId,
+ t_elfdescription *elfhandle,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_handle privateMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_bool isSingleton)
+{
+ t_memory_id memId;
+ t_cm_error error;
+
+ // Erase whole memories to make free in case of error
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ privateMemories[memId] = sharedMemories[memId];
+ }
+
+ if((error = allocSegment(domainId, elfhandle, privateMemories, MEM_PRIVATE, isSingleton)) != CM_OK)
+ return error;
+
+ // Load each writable memory
+ if((error = ELF64_loadSegment(elfhandle, privateMemories, MEM_PRIVATE)) != CM_OK)
+ return error;
+
+ return CM_OK;
+}
+
+void cm_ELF_FlushTemplate(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY])
+{
+ t_memory_id memId;
+
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ if(sharedMemories[memId] != INVALID_MEMORY_HANDLE)
+ MMDSP_loadedSection(
+ coreId, memId,
+ sharedMemories[memId]);
+ }
+}
+
+void cm_ELF_FlushInstance(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_handle privateMemories[NUMBER_OF_MMDSP_MEMORY])
+{
+ t_memory_id memId;
+
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ if(privateMemories[memId] != INVALID_MEMORY_HANDLE && privateMemories[memId] != sharedMemories[memId])
+ MMDSP_loadedSection(
+ coreId, memId,
+ privateMemories[memId]);
+ }
+}
+
+void cm_ELF_FreeInstance(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY],
+ t_memory_handle privateMemories[NUMBER_OF_MMDSP_MEMORY])
+{
+ t_memory_id memId;
+
+ if(privateMemories == NULL)
+ return;
+
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ if(privateMemories[memId] != INVALID_MEMORY_HANDLE && privateMemories[memId] != sharedMemories[memId])
+ {
+ MMDSP_unloadedSection(coreId, memId, privateMemories[memId]);
+ cm_DM_Free(privateMemories[memId], TRUE);
+ }
+ }
+}
+
+void cm_ELF_FreeTemplate(
+ t_nmf_core_id coreId,
+ t_memory_handle sharedMemories[NUMBER_OF_MMDSP_MEMORY])
+{
+ t_memory_id memId;
+
+ if(sharedMemories == NULL)
+ return;
+
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ if(sharedMemories[memId] != INVALID_MEMORY_HANDLE)
+ {
+ MMDSP_unloadedSection(coreId, memId, sharedMemories[memId]);
+ cm_DM_Free(sharedMemories[memId], TRUE);
+ }
+ }
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/elfmmdsp.c b/drivers/staging/nmf-cm/cm/engine/elf/src/elfmmdsp.c
new file mode 100644
index 00000000000..5f6641b188d
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/elfmmdsp.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/elf/inc/mmdsp.h>
+#include <cm/engine/elf/inc/bfd.h>
+#include <cm/engine/elf/inc/mpcal.h>
+
+#include <cm/engine/component/inc/initializer.h>
+
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/utils/inc/swap.h>
+#include <cm/engine/trace/inc/trace.h>
+
+#include <cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h>
+
+static const t_elfmemory mmdspMemories[NUMBER_OF_MMDSP_MEMORY] = {
+ {0, SDRAM_CODE, SDRAMTEXT_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_CODE, 8, 8, "SDRAM_CODE"}, /* 0: Program memory */
+ {1, INTERNAL_XRAM24, 0, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_DATA, 3, 4, "XROM"}, /* 1: Internal X memory */
+ {2, INTERNAL_YRAM24, 0, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_DATA, 3, 4, "YROM"}, /* 2: Y memory */
+ {3, SDRAM_EXT24, SDRAMMEM24_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_DATA, 3, 4, "SDR0M24"}, /* 5: SDRAM24 */
+ {4, SDRAM_EXT16, SDRAMMEM16_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_DATA, 3, 2, "SDROM16"}, /* 6: SDRAM16 */
+ {5, ESRAM_EXT24, ESRAMMEM24_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_DATA, 3, 4, "ESROM24"}, /* 8: ESRAM24 */
+ {6, ESRAM_EXT16, ESRAMMEM16_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_DATA, 3, 2, "ESROM16"}, /* 9: ESRAM16 */
+ {7, ESRAM_CODE, ESRAMTEXT_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_CODE, 8, 8, "ESRAM_CODE"}, /*10: ESRAM code */
+ {8, INTERNAL_XRAM24, 0, CM_MM_ALIGN_2WORDS, MEM_PRIVATE, MEM_DATA, 3, 4, "XRAM"}, /* 1: Internal X memory */
+ {9, INTERNAL_YRAM24, 0, CM_MM_ALIGN_2WORDS, MEM_PRIVATE, MEM_DATA, 3, 4, "YRAM"}, /* 2: Y memory */
+ {10, SDRAM_EXT24, SDRAMMEM24_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_PRIVATE, MEM_DATA, 3, 4, "SDRAM24"}, /* 5: SDRAM24 */
+ {11, SDRAM_EXT16, SDRAMMEM16_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_PRIVATE, MEM_DATA, 3, 2, "SDRAM16"}, /* 6: SDRAM16 */
+ {12, ESRAM_EXT24, ESRAMMEM24_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_PRIVATE, MEM_DATA, 3, 4, "ESRAM24"}, /* 8: ESRAM24 */
+ {13, ESRAM_EXT16, ESRAMMEM16_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_PRIVATE, MEM_DATA, 3, 2, "ESRAM16"}, /* 9: ESRAM16 */
+ {14, LOCKED_CODE, SDRAMTEXT_BASE_ADDR, CM_MM_ALIGN_2WORDS, MEM_SHARABLE, MEM_CODE, 8, 8, "LOCKED_CODE"}, /* : .locked */
+};
+
+#define MAX_ELFSECTIONNAME 10
+struct memoryMapping {
+ char *elfSectionName;
+ t_uint32 memoryIndex[MEM_FOR_LAST]; // memoryIndex[t_instance_property]
+};
+
+static const struct memoryMapping mappingmem0[] = {
+ {"mem0.0", {0, 0}},
+ {"mem0.1", {0, 0}},
+ {"mem0.2", {0, 0}}
+};
+static const struct memoryMapping mappingmem10 =
+ {"mem10", {7, 7}};
+static const struct memoryMapping mappinglocked =
+ {".locked", {14, 14}};
+static const struct memoryMapping mappingmem1[] = {
+ {"", {0xff, 0xff}},
+ {"mem1.1", {1, 1}},
+ {"mem1.2", {8, 1}},
+ {"mem1.3", {1, 1}},
+ {"mem1.4", {8, 1}},
+ {"mem1.stack", {8, 1}}
+};
+static const struct memoryMapping mappingmem2[] = {
+ {"", {0xff, 0xff}},
+ {"mem2.1", {2, 2}},
+ {"mem2.2", {9, 2}},
+ {"mem2.3", {2, 2}},
+ {"mem2.4", {9, 2}}
+};
+static const struct memoryMapping mappingmem5[] = {
+ {"", {0xff, 0xff}},
+ {"mem5.1", {3, 3}},
+ {"mem5.2", {10, 3}},
+ {"mem5.3", {3, 3}},
+ {"mem5.4", {10, 3}}
+};
+static const struct memoryMapping mappingmem6[] = {
+ {"", {0xff, 0xff}},
+ {"mem6.1", {4, 4}},
+ {"mem6.2", {11, 4}},
+ {"mem6.3", {4, 4}},
+ {"mem6.4", {11, 4}}
+};
+static const struct memoryMapping mappingmem8[] = {
+ {"", {0xff, 0xff}},
+ {"mem8.1", {5, 5}},
+ {"mem8.2", {12, 5}},
+ {"mem8.3", {5, 5}},
+ {"mem8.4", {12, 5}}
+};
+static const struct memoryMapping mappingmem9[] = {
+ {"", {0xff, 0xff}},
+ {"mem9.1", {6, 6}},
+ {"mem9.2", {13, 6}},
+ {"mem9.3", {6, 6}},
+ {"mem9.4", {13, 6}}
+};
+
+static const struct {
+ const struct memoryMapping* mapping;
+ unsigned int number;
+} hashMappings[10] = {
+ {mappingmem0, sizeof(mappingmem0) / sizeof(mappingmem0[0])},
+ {mappingmem1, sizeof(mappingmem1) / sizeof(mappingmem1[0])},
+ {mappingmem2, sizeof(mappingmem2) / sizeof(mappingmem2[0])},
+ {0x0, 0},
+ {0x0, 0},
+ {mappingmem5, sizeof(mappingmem5) / sizeof(mappingmem5[0])},
+ {mappingmem6, sizeof(mappingmem6) / sizeof(mappingmem6[0])},
+ {0x0, 0},
+ {mappingmem8, sizeof(mappingmem8) / sizeof(mappingmem8[0])},
+ {mappingmem9, sizeof(mappingmem9) / sizeof(mappingmem9[0])},
+};
+
+const t_elfmemory* MMDSP_getMappingById(t_memory_id memId)
+{
+ return &mmdspMemories[memId];
+}
+
+const t_elfmemory* MMDSP_getMappingByName(const char* sectionName, t_instance_property property)
+{
+ if(sectionName[0] == 'm' && sectionName[1] == 'e' && sectionName[2] == 'm')
+ {
+ if(sectionName[4] == '.')
+ {
+ if(sectionName[5] >= '0' && sectionName[5] <= '9')
+ {
+ if(sectionName[3] >= '0' && sectionName[3] <= '9')
+ {
+ unsigned int m, sm;
+
+ m = sectionName[3] - '0';
+ sm = sectionName[5] - '0';
+ if(sm < hashMappings[m].number)
+ return &mmdspMemories[hashMappings[m].mapping[sm].memoryIndex[property]];
+ }
+ } else if(sectionName[3] == '1' && sectionName[5] == 's')
+ return &mmdspMemories[mappingmem1[5].memoryIndex[property]];
+ }
+ else if(sectionName[3] == '1' && sectionName[4] == '0')
+ return &mmdspMemories[mappingmem10.memoryIndex[property]];
+ }
+ else if(sectionName[0] == '.' && sectionName[1] == 'l' && sectionName[2] == 'o' && sectionName[3] == 'c' &&
+ sectionName[4] == 'k' && sectionName[5] == 'e' && sectionName[6] == 'd')
+ {
+ return &mmdspMemories[mappinglocked.memoryIndex[property]];
+ }
+
+ return NULL;
+}
+
+void MMDSP_serializeMemories(t_instance_property property,
+ const t_elfmemory** codeMemory, const t_elfmemory** thisMemory) {
+ // Return meory reference
+ *codeMemory = &mmdspMemories[0];
+ if(property == MEM_FOR_SINGLETON)
+ {
+ *thisMemory = &mmdspMemories[1];
+ }
+ else
+ {
+ *thisMemory = &mmdspMemories[8];
+ }
+}
+
+void MMDSP_copyCode(t_uint64 * remoteAddr64, const char* origAddr, int nb)
+{
+ int m;
+
+ // Linux allow unaligned access
+#ifdef LINUX
+ t_uint64 *origAddr64 = (t_uint64*)origAddr;
+#else
+ __packed t_uint64 *origAddr64 = (__packed t_uint64*)origAddr;
+#endif
+
+ for (m = 0; m < nb; m += 8)
+ {
+ *remoteAddr64++ = swap64(*origAddr64++);
+ }
+}
+
+void MMDSP_copyData24(t_uint32 * remoteAddr32, const char* origAddr, int nb)
+{
+ int m;
+
+ for (m = 0; m < nb; m+=4)
+ {
+ t_uint32 value1;
+
+ value1 = (*origAddr++ << 16);
+ value1 |= (*origAddr++ << 8);
+ value1 |= (*origAddr++ << 0);
+ *remoteAddr32++ = value1;
+ }
+}
+
+void MMDSP_copyData16(t_uint16 * remoteAddr16, const char* origAddr, int nb)
+{
+ int m;
+
+ for (m = 0; m < nb; m+=2)
+ {
+ t_uint16 value1;
+
+ origAddr++; // Skip this byte (which is put in elf file for historical reason)
+ value1 = (*origAddr++ << 8);
+ value1 |= (*origAddr++ << 0);
+ *remoteAddr16++ = value1;
+ }
+}
+
+#if 0
+__asm void MMDSP_copyCode(void* dst, const void* src, int nb)
+{
+ PUSH {r4-r8, lr}
+ SUBS r2,r2,#0x20
+ BCC l4
+
+l5
+ SETEND BE
+ LDR r4, [r1], #0x4
+ LDR r3, [r1], #0x4
+ LDR r6, [r1], #0x4
+ LDR r5, [r1], #0x4
+ LDR r8, [r1], #0x4
+ LDR r7, [r1], #0x4
+ LDR lr, [r1], #0x4
+ LDR r12, [r1], #0x4
+
+ SETEND LE
+ STM r0!,{r3-r8,r12, lr}
+ SUBS r2,r2,#0x20
+ BCS l5
+
+l4
+ LSLS r12,r2,#28
+
+ SETEND BE
+ LDRCS r4, [r1], #0x4
+ LDRCS r3, [r1], #0x4
+ LDRCS r6, [r1], #0x4
+ LDRCS r5, [r1], #0x4
+ SETEND LE
+ STMCS r0!,{r3-r6}
+
+ SETEND BE
+ LDRMI r4, [r1], #0x4
+ LDRMI r3, [r1], #0x4
+ SETEND LE
+ STMMI r0!,{r3-r4}
+
+ POP {r4-r8, pc}
+}
+#endif
+
+#ifdef LINUX
+static void PLD5(int r)
+{
+ asm volatile (
+ "PLD [r0, #0x20] \n\t"
+ "PLD [r0, #0x40] \n\t"
+ "PLD [r0, #0x60] \n\t"
+ "PLD [r0, #0x80] \n\t"
+ "PLD [r0, #0xA0]" );
+}
+
+static void PLD1(int r)
+{
+ asm volatile (
+ "PLD [r0, #0xC0]" );
+}
+#else /* Symbian, Think -> We assume ARMCC */
+static __asm void PLD5(int r)
+{
+ PLD [r0, #0x20]
+ PLD [r0, #0x40]
+ PLD [r0, #0x60]
+ PLD [r0, #0x80]
+ PLD [r0, #0xA0]
+
+ bx lr
+}
+
+static __asm void PLD1(int r)
+{
+ PLD [r0, #0xC0]
+
+ bx lr
+}
+#endif
+
+#if 0
+__asm void COPY(void* dst, const void* src, int nb)
+{
+ PUSH {r4-r8, lr}
+ SUBS r2,r2,#0x20
+ BCC l4a
+ PLD [r1, #0x20]
+ PLD [r1, #0x40]
+ PLD [r1, #0x60]
+ PLD [r1, #0x80]
+ PLD [r1, #0xA0]
+
+l5a
+ PLD [r1, #0xC0]
+ LDM r1!,{r3-r8,r12,lr}
+ STM r0!,{r3-r8,r12,lr}
+ SUBS r2,r2,#0x20
+ BCS l5a
+
+l4a
+ LSLS r12,r2,#28
+ LDMCS r1!,{r3,r4,r12,lr}
+ STMCS r0!,{r3,r4,r12,lr}
+ LDMMI r1!,{r3,r4}
+ STMMI r0!,{r3,r4}
+ POP {r4-r8,lr}
+ LSLS r12,r2,#30
+ LDRCS r3,[r1],#4
+ STRCS r3,[r0],#4
+ BXEQ lr
+l6b
+ LSLS r2,r2,#31
+ LDRHCS r3,[r1],#2
+ LDRBMI r2,[r1],#1
+ STRHCS r3,[r0],#2
+ STRBMI r2,[r0],#1
+ BX lr
+}
+#endif
+
+
+void MMDSP_copySection(t_uint32 origAddr, t_uint32 remoteAddr, t_uint32 sizeInByte) {
+ t_uint32 endAddr = remoteAddr + sizeInByte;
+
+ PLD5(origAddr);
+
+ // Align on 32bits
+ if((remoteAddr & 0x3) != 0)
+ {
+ *(t_uint16*)remoteAddr = *(t_uint16*)origAddr;
+ remoteAddr += sizeof(t_uint16);
+ origAddr += sizeof(t_uint16);
+ }
+
+ // Align on 64bits
+ if((remoteAddr & 0x7) != 0 && (remoteAddr <= endAddr - sizeof(t_uint32)))
+ {
+ *(t_uint32*)remoteAddr = *(t_uint32*)origAddr;
+ remoteAddr += sizeof(t_uint32);
+ origAddr += sizeof(t_uint32);
+ }
+
+ // 64bits burst access
+ for(; remoteAddr <= endAddr - sizeof(t_uint64); remoteAddr += sizeof(t_uint64), origAddr += sizeof(t_uint64))
+ {
+ PLD1(origAddr);
+ *(volatile t_uint64*)remoteAddr = *(t_uint64*)origAddr;
+ }
+
+ // Remain 32bits access
+ if(remoteAddr <= endAddr - sizeof(t_uint32))
+ {
+ *(t_uint32*)remoteAddr = *(t_uint32*)origAddr;
+ remoteAddr += sizeof(t_uint32);
+ origAddr += sizeof(t_uint32);
+ }
+
+ // Remain 16bits access
+ if(remoteAddr <= endAddr - sizeof(t_uint16))
+ *(t_uint16*)remoteAddr = *(t_uint16*)origAddr;
+}
+
+
+void MMDSP_bzeroSection(t_uint32 remoteAddr, t_uint32 sizeInByte) {
+ t_uint32 endAddr = remoteAddr + sizeInByte;
+
+ // Align on 32bits
+ if((remoteAddr & 0x3) != 0)
+ {
+ *(t_uint16*)remoteAddr = 0;
+ remoteAddr += sizeof(t_uint16);
+ }
+
+ // Align on 64bits
+ if((remoteAddr & 0x7) != 0 && (remoteAddr <= endAddr - sizeof(t_uint32)))
+ {
+ *(t_uint32*)remoteAddr = 0;
+ remoteAddr += sizeof(t_uint32);
+ }
+
+ // 64bits burst access
+ for(; remoteAddr <= endAddr - sizeof(t_uint64); remoteAddr += sizeof(t_uint64))
+ *(volatile t_uint64*)remoteAddr = 0ULL;
+
+ // Remain 32bits access
+ if(remoteAddr <= endAddr - sizeof(t_uint32))
+ {
+ *(t_uint32*)remoteAddr = 0;
+ remoteAddr += sizeof(t_uint32);
+ }
+
+ // Remain 16bits access
+ if(remoteAddr <= endAddr - sizeof(t_uint16))
+ *(t_uint16*)remoteAddr = 0;
+}
+
+void MMDSP_loadedSection(t_nmf_core_id coreId, t_memory_id memId, t_memory_handle handle)
+{
+ if(mmdspMemories[memId].purpose == MEM_CODE)
+ {
+ OSAL_CleanDCache(cm_DSP_GetHostLogicalAddress(handle), cm_MM_GetSize(handle));
+ }
+
+ if(memId == LOCKED_CODE)
+ {
+ t_uint32 DspAddress, DspSize;
+
+ cm_DSP_GetDspMemoryHandleSize(handle, &DspSize);
+ cm_DSP_GetDspAddress(handle, &DspAddress);
+
+ cm_COMP_InstructionCacheLock(coreId, DspAddress, DspSize);
+ }
+}
+
+void MMDSP_unloadedSection(t_nmf_core_id coreId, t_memory_id memId, t_memory_handle handle)
+{
+ if(memId == LOCKED_CODE)
+ {
+ t_uint32 DspAddress, DspSize;
+
+ cm_DSP_GetDspMemoryHandleSize(handle, &DspSize);
+ cm_DSP_GetDspAddress(handle, &DspAddress);
+
+ cm_COMP_InstructionCacheUnlock(coreId, DspAddress, DspSize);
+ }
+
+}
+
+static struct reloc_howto_struct elf64_mmdsp_howto_table[] =
+{
+ HOWTO (R_MMDSP_IMM20_16, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ FALSE, /* pc_relative */
+ 8, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0x0, /* special_function */
+ "R_MMDSP_IMM20_16", /* name */
+ FALSE, /* partial_inplace */
+ 0x0, /* src_mask */
+ 0x0000000000ffff00, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ /* A 4-bit absolute relocation for splitted 20 bits immediate, shifted by 56 */
+
+ HOWTO (R_MMDSP_IMM20_4, /* type */
+ 16, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 4, /* bitsize */
+ FALSE, /* pc_relative */
+ 56, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0x0, /* special_function */
+ "R_MMDSP_IMM20_4", /* name */
+ FALSE, /* partial_inplace */
+ 0x0, /* src_mask */
+ 0x0f00000000000000LL, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (R_MMDSP_24, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 24, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ 0x0, /* special_function */
+ "R_MMDSP_24", /* name */
+ FALSE, /* partial_inplace */
+ 0x0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (R_MMDSP_IMM16, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ FALSE, /* pc_relative */
+ 8, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ 0x0, /* special_function */
+ "R_MMDSP_IMM16", /* name */
+ FALSE, /* partial_inplace */
+ 0x0, /* src_mask */
+ 0x0000000000ffff00, /* dst_mask */
+ FALSE), /* pcrel_offset */
+};
+
+static const char* lastInPlaceAddr = 0;
+static long long lastInPlaceValue;
+
+void MMDSP_performRelocation(
+ t_uint32 type,
+ const char* symbol_name,
+ t_uint32 symbol_addr,
+ char* reloc_addr,
+ const char* inPlaceAddr,
+ t_uint32 reloc_offset) {
+ int i;
+
+ for(i = 0; i < sizeof(elf64_mmdsp_howto_table) / sizeof(elf64_mmdsp_howto_table[0]); i++)
+ {
+ struct reloc_howto_struct* howto = &elf64_mmdsp_howto_table[i];
+ if(howto->type == type)
+ {
+ t_uint64 relocation;
+
+ LOG_INTERNAL(2, "reloc '%s:0x%x' type %s at 0x%x (0x%x)\n",
+ symbol_name ? symbol_name : "??", symbol_addr,
+ howto->name,
+ reloc_offset, reloc_addr, 0);
+
+ relocation = symbol_addr;
+
+ if (howto->pc_relative) {
+ // Not handle yet
+ }
+
+ if (howto->complain_on_overflow != complain_overflow_dont) {
+ // Not handle yet
+ }
+
+ relocation >>= howto->rightshift;
+
+ relocation <<= howto->bitpos;
+
+#define DOIT(x) \
+ x = ( (x & ~howto->dst_mask) | (((x & howto->src_mask) + relocation) & howto->dst_mask))
+
+ switch (howto->size) {
+ case 2: {
+ long x = *(long*)inPlaceAddr;
+
+ // CM_ASSERT(*(long*)inPlaceAddr == *(long*)reloc_addr);
+
+ DOIT (x);
+ *(long*)reloc_addr = x;
+ }
+ break;
+ case 4: {
+ long long x;
+ if(lastInPlaceAddr == inPlaceAddr)
+ {
+ x = lastInPlaceValue;
+ }
+ else
+ {
+ // CM_ASSERT(*(__packed long long*)inPlaceAddr == *(long long*)reloc_addr);
+ x = *(long long*)inPlaceAddr;
+ lastInPlaceAddr = inPlaceAddr;
+ }
+
+ DOIT (x);
+ *(long long*)reloc_addr = lastInPlaceValue = x;
+ }
+ break;
+ default:
+ CM_ASSERT(0);
+ }
+
+ return;
+ }
+ }
+
+ ERROR("Relocation type %d not supported for '%s'\n", type, symbol_name, 0, 0, 0, 0);
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/elfrelocate.c b/drivers/staging/nmf-cm/cm/engine/elf/src/elfrelocate.c
new file mode 100644
index 00000000000..b08ac6a361e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/elfrelocate.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/elf/inc/bfd.h>
+#include <cm/engine/elf/inc/mpcal.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/string.h>
+
+t_cm_error cm_ELF_relocateSharedSegments(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ void *cbContext)
+{
+ return ELF64_relocateSegments(
+ memories,
+ elfhandle,
+ MEM_SHARABLE,
+ cbContext);
+}
+
+t_cm_error cm_ELF_relocatePrivateSegments(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ void *cbContext)
+{
+ return ELF64_relocateSegments(
+ memories,
+ elfhandle,
+ MEM_PRIVATE,
+ cbContext);
+}
+
+void cm_ELF_performRelocation(
+ t_uint32 type,
+ const char* symbol_name,
+ t_uint32 symbol_addr,
+ char* reloc_addr)
+{
+ MMDSP_performRelocation(
+ type,
+ symbol_name,
+ symbol_addr,
+ reloc_addr,
+ reloc_addr,
+ 0xBEEF);
+
+ OSAL_CleanDCache((t_uint32)reloc_addr, 8);
+}
+
+t_cm_error cm_ELF_GetMemory(
+ t_elfdescription *elf,
+ t_tmp_elfdescription *elftmp,
+ t_uint32 address,
+ t_memory_purpose purpose,
+ t_memory_reference *memory) {
+ t_memory_id memId;
+
+ for(memId = 0; memId < NUMBER_OF_MMDSP_MEMORY; memId++)
+ {
+ const t_elfmemory* mem = MMDSP_getMappingById(memId);
+
+ if(mem->purpose == purpose && // Memory correspond
+ elf->segments[mem->id].sumSize != 0 && // Segment allocated
+ (elf->segments[mem->id].mpcAddr <= address) &&
+ (address < elf->segments[mem->id].mpcAddr + elf->segments[mem->id].sumSize / mem->fileEntSize)) {
+ memory->memory = mem;
+ memory->offset = address - elf->segments[mem->id].mpcAddr;
+ return CM_OK;
+ }
+ }
+
+ ERROR("Memory %x,%d not found\n", address, purpose, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/elfxx.c b/drivers/staging/nmf-cm/cm/engine/elf/src/elfxx.c
new file mode 100644
index 00000000000..4a2976a6bc1
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/elfxx.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/elf/inc/mpcal.h>
+
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/utils/inc/mem.h>
+
+
+static t_uint32 max(t_uint32 a, t_uint32 b)
+{
+ return (a >= b) ? a : b;
+}
+/*
+static t_uint32 min(t_uint32 a, t_uint32 b)
+{
+ return (a <= b) ? a : b;
+}
+*/
+
+struct XXrelocation
+{
+ t_uint32 st_value;
+ ElfXX_Half st_shndx;
+ Elf64_Sxword r_addend;
+ t_uint32 OffsetInElf;
+ t_uint32 type;
+
+ t_dup_char symbol_name; // Valid only if st_shndx == SHN_UNDEF
+};
+
+struct XXSection {
+ ElfXX_Word sh_type; /* Section type */
+ t_uint32 sh_size; /* Section size in bytes */
+ ElfXX_Word sh_info; /* Additional section information */
+ ElfXX_Word sh_link; /* Link to another section */
+ t_uint32 sh_addralign; /* Some sections have address alignment constraints */
+ t_uint32 sh_addr; /* Section addr */
+ ElfXX_Xword sh_flags; /* Section flags */
+
+ const char *data;
+ t_uint32 trueDataSize; /* Valid if different from sh_size */
+ const char *sectionName;
+
+ t_uint32 offsetInSegment;
+ const t_elfmemory *meminfo;
+
+ t_uint32 relocationNumber;
+ struct XXrelocation *relocations;
+};
+
+struct XXElf {
+ t_uint32 e_shnum;
+ struct XXSection sectionss[1];
+};
+
+t_cm_error ELF64_LoadComponent(
+ t_uint16 e_machine,
+ const char *elfdata,
+ t_elfdescription **elfhandlePtr,
+ t_tmp_elfdescription *elftmp)
+{
+ t_elfdescription *elfhandle;
+ const ElfXX_Ehdr *header = (ElfXX_Ehdr*)elfdata;
+ const ElfXX_Shdr *sections;
+ const char *strings;
+ struct XXElf* ELF;
+ int i, nb;
+
+ elftmp->elfdata = elfdata;
+
+ /* Sanity check */
+ if (swapHalf(header->e_machine) != e_machine)
+ {
+ ERROR("This is not a executable for such MPC\n", 0, 0, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+ }
+
+ // Cache elf file informations
+ nb = swapHalf(header->e_shnum);
+ elftmp->isExecutable = (swapHalf(header->e_type) == ET_EXEC);
+
+ elfhandle = (t_elfdescription*)OSAL_Alloc_Zero(
+ sizeof(t_elfdescription) + sizeof(struct XXElf) + sizeof(struct XXSection) * (nb - 1));
+ if(elfhandle == NULL)
+ return CM_NO_MORE_MEMORY;
+
+ ELF = elfhandle->ELF = (struct XXElf*)(elfhandle + 1);
+
+ ELF->e_shnum = nb;
+
+ sections = (ElfXX_Shdr*)&elfdata[swapXword(header->e_shoff)];
+ // Compute and swap section infromation
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ ELF->sectionss[i].sh_type = swapWord(sections[i].sh_type);
+ ELF->sectionss[i].sh_info = swapWord(sections[i].sh_info);
+ ELF->sectionss[i].sh_link = swapWord(sections[i].sh_link);
+ ELF->sectionss[i].sh_size = (t_uint32)swapXword(sections[i].sh_size);
+ ELF->sectionss[i].sh_addralign = (t_uint32)swapXword(sections[i].sh_addralign);
+ ELF->sectionss[i].sh_addr = (t_uint32)swapXword(sections[i].sh_addr);
+ ELF->sectionss[i].sh_flags = swapXword(sections[i].sh_flags);
+
+ elftmp->sectionData[i] = &elfdata[(t_uint32)swapXword(sections[i].sh_offset)];
+ }
+
+ /*
+ * search nmf_segment
+ */
+ strings = elftmp->sectionData[swapHalf(header->e_shstrndx)];
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ ELF->sectionss[i].sectionName = &strings[swapWord(sections[i].sh_name)];
+
+ // Found nmf_segment to see if it's
+ if(cm_StringCompare("nmf_segment", ELF->sectionss[i].sectionName, 11) == 0) {
+ elftmp->nmfSectionIndex = i;
+ elftmp->elfheader = (const t_elf_component_header*)elftmp->sectionData[i];
+ }
+ }
+
+ if(elftmp->nmfSectionIndex == 0)
+ {
+ ERROR("This is not a NMF component\n", 0, 0, 0, 0, 0, 0);
+ goto invalid;
+ }
+
+ /*
+ * Determine component type
+ */
+ elfhandle->magicNumber = swap32(elftmp->elfheader->magic);
+ switch(elfhandle->magicNumber) {
+ case MAGIC_COMPONENT:
+ elfhandle->instanceProperty = MEM_FOR_MULTIINSTANCE;
+ break;
+ case MAGIC_SINGLETON:
+ case MAGIC_FIRMWARE:
+ elfhandle->instanceProperty = MEM_FOR_SINGLETON;
+ break;
+ }
+
+ // Copy content
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ ELF->sectionss[i].meminfo = MMDSP_getMappingByName(
+ ELF->sectionss[i].sectionName,
+ elfhandle->instanceProperty);
+
+ if(ELF->sectionss[i].meminfo != NULL)
+ ELF->sectionss[i].trueDataSize = (ELF->sectionss[i].sh_size / ELF->sectionss[i].meminfo->fileEntSize) * ELF->sectionss[i].meminfo->memEntSize;
+
+ if(ELF->sectionss[i].sh_size != 0 &&
+ ELF->sectionss[i].sh_type == SHT_PROGBITS &&
+ (ELF->sectionss[i].sh_flags & SHF_ALLOC) != 0)
+ {
+ const char* elfAddr = elftmp->sectionData[i];
+
+ ELF->sectionss[i].data = OSAL_Alloc(ELF->sectionss[i].trueDataSize);
+ if(ELF->sectionss[i].data == NULL)
+ goto oom;
+
+ if(ELF->sectionss[i].meminfo->purpose == MEM_CODE)
+ {
+ MMDSP_copyCode(
+ (t_uint64*)ELF->sectionss[i].data,
+ elfAddr,
+ ELF->sectionss[i].trueDataSize);
+ }
+ else if(ELF->sectionss[i].meminfo->purpose == MEM_DATA &&
+ // Always 3 for data ELF->sectionss[i].meminfo->fileEntSize == 3 &&
+ ELF->sectionss[i].meminfo->memEntSize == 4)
+ {
+ MMDSP_copyData24(
+ (t_uint32*)ELF->sectionss[i].data,
+ elfAddr,
+ ELF->sectionss[i].trueDataSize);
+ }
+ else if(ELF->sectionss[i].meminfo->purpose == MEM_DATA &&
+ // Always 3 for data ELF->sectionss[i].meminfo->fileEntSize == 3 &&
+ ELF->sectionss[i].meminfo->memEntSize == 2)
+ {
+ MMDSP_copyData16(
+ (t_uint16*)ELF->sectionss[i].data,
+ elfAddr,
+ ELF->sectionss[i].trueDataSize);
+ }
+ else
+ CM_ASSERT(0);
+ }
+ }
+
+ // Copy relocation
+ // Loop on all relocation section
+ for(i=0; i < ELF->e_shnum; i++)
+ {
+ int sh_info;
+
+ // Does this section is a relocation table (only RELA supported)
+ if((ELF->sectionss[i].sh_type != SHT_RELA) ||
+ ELF->sectionss[i].sh_size == 0) continue;
+
+ // Copy only relocation for loaded section
+ sh_info = ELF->sectionss[i].sh_info;
+ if(ELF->sectionss[sh_info].meminfo != NULL)
+ {
+ const ElfXX_Sym* symtab;
+ const char* strtab;
+ ElfXX_Rela* rel_start;
+ int n;
+
+ ELF->sectionss[sh_info].relocationNumber = ELF->sectionss[i].sh_size / sizeof(ElfXX_Rela);
+ ELF->sectionss[sh_info].relocations = (struct XXrelocation*)OSAL_Alloc_Zero(sizeof(struct XXrelocation) * ELF->sectionss[sh_info].relocationNumber);
+ if(ELF->sectionss[sh_info].relocations == NULL)
+ goto oom;
+
+ symtab = (ElfXX_Sym *)elftmp->sectionData[ELF->sectionss[i].sh_link];
+ strtab = elftmp->sectionData[ELF->sectionss[ELF->sectionss[i].sh_link].sh_link];
+ rel_start = (ElfXX_Rela*)elftmp->sectionData[i];
+ for(n = 0; n < ELF->sectionss[sh_info].relocationNumber; n++, rel_start++)
+ {
+ struct XXrelocation* relocation = &ELF->sectionss[sh_info].relocations[n];
+ ElfXX_Xword r_info = swapXword(rel_start->r_info);
+ int strtab_index = ELFXX_R_SYM(r_info);
+ const char* symbol_name = &strtab[swapWord(symtab[strtab_index].st_name)];
+
+ relocation->st_shndx = swapHalf(symtab[strtab_index].st_shndx);
+ relocation->st_value = (t_uint32)swapXword(symtab[strtab_index].st_value);
+ relocation->r_addend = swapXword(rel_start->r_addend);
+ relocation->OffsetInElf = (t_uint32)swapXword(rel_start->r_offset) / ELF->sectionss[sh_info].meminfo->fileEntSize;
+ relocation->type = ELFXX_R_TYPE(r_info);
+
+ switch(relocation->st_shndx) {
+ case SHN_UNDEF:
+ relocation->symbol_name = cm_StringDuplicate(symbol_name + 1); /* Remove '_' prefix */
+ if(relocation->symbol_name == NULL)
+ goto oom;
+ break;
+ case SHN_COMMON:
+ ERROR("SHN_COMMON not handle for %s\n", symbol_name, 0, 0, 0, 0, 0);
+ goto invalid;
+ }
+ }
+ }
+ }
+
+ *elfhandlePtr = elfhandle;
+ return CM_OK;
+invalid:
+ ELF64_UnloadComponent(elfhandle);
+ return CM_INVALID_ELF_FILE;
+oom:
+ ELF64_UnloadComponent(elfhandle);
+ return CM_NO_MORE_MEMORY;
+}
+
+t_cm_error ELF64_ComputeSegment(
+ t_elfdescription *elfhandle,
+ t_tmp_elfdescription *elftmp)
+{
+ struct XXElf* ELF = elfhandle->ELF;
+ int i;
+
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ ELF->sectionss[i].offsetInSegment = 0xFFFFFFFF;
+
+ if(ELF->sectionss[i].sh_type == SHT_PROGBITS || ELF->sectionss[i].sh_type == SHT_NOBITS) {
+ // This is a loadable memory (memory size could be zero since we can have symbol on it)...
+ const t_elfmemory* meminfo = ELF->sectionss[i].meminfo;
+
+ if(meminfo != NULL) {
+ // Which correspond to MPC memory
+
+ if(elftmp->isExecutable)
+ {
+ if(! elfhandle->segments[meminfo->id].sumSizeSetted)
+ {
+ CM_ASSERT(ELF->sectionss[i].sh_addr >= meminfo->startAddr * meminfo->fileEntSize);
+
+ elfhandle->segments[meminfo->id].sumSizeSetted = TRUE;
+ elfhandle->segments[meminfo->id].sumSize = ELF->sectionss[i].sh_addr - meminfo->startAddr * meminfo->fileEntSize;
+ }
+ else
+ CM_ASSERT(elfhandle->segments[meminfo->id].sumSize == ELF->sectionss[i].sh_addr - meminfo->startAddr * meminfo->fileEntSize);
+ }
+ else
+ {
+ while(elfhandle->segments[meminfo->id].sumSize % ELF->sectionss[i].sh_addralign != 0)
+ elfhandle->segments[meminfo->id].sumSize++;
+ }
+
+ elfhandle->segments[meminfo->id].maxAlign = max(elfhandle->segments[meminfo->id].maxAlign, ELF->sectionss[i].sh_addralign);
+ ELF->sectionss[i].offsetInSegment = elfhandle->segments[meminfo->id].sumSize / meminfo->fileEntSize;
+ elfhandle->segments[meminfo->id].sumSize += ELF->sectionss[i].sh_size;
+ }
+ } else if(ELF->sectionss[i].sh_type == SHT_RELA && ELF->sectionss[i].sh_info == elftmp->nmfSectionIndex) {
+ int secsym = ELF->sectionss[i].sh_link;
+ elftmp->relaNmfSegment = (ElfXX_Rela*)elftmp->sectionData[i];
+ elftmp->relaNmfSegmentEnd = (ElfXX_Rela*)((t_uint32)elftmp->relaNmfSegment + ELF->sectionss[i].sh_size);
+ elftmp->relaNmfSegmentSymbols = (ElfXX_Sym*)elftmp->sectionData[secsym];
+ elftmp->relaNmfSegmentStrings = elftmp->sectionData[ELF->sectionss[secsym].sh_link];
+ }
+ }
+
+ return CM_OK;
+}
+
+void ELF64_UnloadComponent(
+ t_elfdescription *elfhandle)
+{
+ struct XXElf* ELF = elfhandle->ELF;
+ int i, n;
+
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ if(ELF->sectionss[i].relocations != NULL)
+ {
+ for(n = 0; n < ELF->sectionss[i].relocationNumber; n++)
+ cm_StringRelease(ELF->sectionss[i].relocations[n].symbol_name);
+ OSAL_Free(ELF->sectionss[i].relocations);
+ }
+
+ OSAL_Free((void*)ELF->sectionss[i].data);
+ }
+ OSAL_Free(elfhandle);
+}
+
+t_cm_error ELF64_loadSegment(
+ t_elfdescription *elfhandle,
+ t_memory_handle *memory,
+ t_memory_property property)
+{
+ struct XXElf* ELF = elfhandle->ELF;
+ int i;
+
+ /*
+ * Copy ELF data in this segment
+ */
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ const t_elfmemory* mapping = ELF->sectionss[i].meminfo;
+
+ if(mapping == NULL)
+ continue;
+ if((! (ELF->sectionss[i].sh_flags & SHF_ALLOC)) || (ELF->sectionss[i].sh_size == 0))
+ continue;
+
+ // This is a loadable memory ...
+ if(
+ (mapping->property == property && elfhandle->instanceProperty != MEM_FOR_SINGLETON) ||
+ (property == MEM_SHARABLE && elfhandle->instanceProperty == MEM_FOR_SINGLETON) )
+ {
+ // Where memory exist and waited share/private correspond
+ t_uint32 remoteData = elfhandle->segments[mapping->id].hostAddr +
+ ELF->sectionss[i].offsetInSegment * mapping->memEntSize;
+
+ if(ELF->sectionss[i].sh_type != SHT_NOBITS)
+ {
+ LOG_INTERNAL(2, "loadSection(%s, 0x%x, 0x%x, 0x%08x)\n",
+ ELF->sectionss[i].sectionName, remoteData, ELF->sectionss[i].trueDataSize,
+ (t_uint32)ELF->sectionss[i].data, 0, 0);
+
+ MMDSP_copySection((t_uint32)ELF->sectionss[i].data, remoteData, ELF->sectionss[i].trueDataSize);
+ }
+ else
+ {
+ LOG_INTERNAL(2, "bzeroSection(%s, 0x%x, 0x%x)\n",
+ ELF->sectionss[i].sectionName, remoteData, ELF->sectionss[i].trueDataSize, 0, 0, 0);
+
+ MMDSP_bzeroSection(remoteData, ELF->sectionss[i].trueDataSize);
+ }
+ }
+ }
+
+ return CM_OK;
+}
+
+
+
+static const t_elfmemory* getSectionAddress(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ t_uint32 sectionIdx,
+ t_uint32 *sectionOffset,
+ t_cm_logical_address *sectionAddr) {
+ struct XXElf* ELF = elfhandle->ELF;
+ const t_elfmemory* mapping = ELF->sectionss[sectionIdx].meminfo;
+
+ if(mapping != NULL) {
+ *sectionOffset = (elfhandle->segments[mapping->id].mpcAddr +
+ ELF->sectionss[sectionIdx].offsetInSegment);
+
+ *sectionAddr = (t_cm_logical_address)(elfhandle->segments[mapping->id].hostAddr +
+ ELF->sectionss[sectionIdx].offsetInSegment * mapping->memEntSize);
+ }
+
+ return mapping;
+}
+
+static t_uint32 getSymbolAddress(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ t_uint32 symbolSectionIdx,
+ t_uint32 symbolOffet) {
+ struct XXElf* ELF = elfhandle->ELF;
+ const t_elfmemory* mapping = ELF->sectionss[symbolSectionIdx].meminfo;
+
+ if(mapping == NULL)
+ return 0xFFFFFFFF;
+ // CM_ASSERT(elfhandle->segments[mapping->id].sumSize != 0);
+ // CM_ASSERT(elfhandle->sections[symbolSectionIdx].offsetInSegment != 0xFFFFFFFF);
+
+ return elfhandle->segments[mapping->id].mpcAddr +
+ ELF->sectionss[symbolSectionIdx].offsetInSegment +
+ symbolOffet;
+}
+
+#if 0
+t_bool ELFXX_getSymbolLocation(
+ const t_mpcal_memory *mpcalmemory,
+ t_elfdescription *elf,
+ char *symbolName,
+ const t_elfmemory **memory,
+ t_uint32 *offset) {
+ const ElfXX_Ehdr *header = (ElfXX_Ehdr*)elf->elfdata;
+ const ElfXX_Shdr *sections = (ElfXX_Shdr*)&elf->elfdata[swapXword(header->e_shoff)];
+ const char *strings = &elf->elfdata[swapXword(sections[swapHalf(header->e_shstrndx)].sh_offset)];
+ int len = cm_StringLength(symbolName, 256); // TO BE FIXED
+ int i;
+
+ for(i = 0; i < ELF->e_shnum; i++)
+ {
+ ElfXX_Sym* symtab;
+ const char* strtab;
+ unsigned int size, j;
+
+ if(ELF->sectionss[i].sh_type != SHT_SYMTAB && ELF->sectionss[i].sh_type != SHT_DYNSYM) continue;
+
+ // Section is a symbol table
+ symtab = (ElfXX_Sym*)&elf->elfdata[swapXword(sections[i].sh_offset)];
+ strtab = &elf->elfdata[swapXword(sections[swapWord(sections[i].sh_link)].sh_offset)];
+ size = ELF->sectionss[i].sh_size / (unsigned int)swapXword(sections[i].sh_entsize);
+
+ for(j = 0; j < size; j++) {
+ const char* foundName = &strtab[swapWord(symtab[j].st_name)];
+
+ if(cm_StringCompare(symbolName, foundName, len) == 0) {
+ if(swapHalf(symtab[j].st_shndx) != SHN_UNDEF) {
+ int sectionIdx = (int)swapHalf(symtab[j].st_shndx);
+ ElfXX_Xword sh_flags = swapXword(sections[sectionIdx].sh_flags);
+
+ *memory = mpcalmemory->getMappingByName(&strings[swapWord(sections[sectionIdx].sh_name)],
+ sh_flags & SHF_WRITE ? MEM_RW : (sh_flags & SHF_EXECINSTR ? MEM_X : MEM_RO));
+ *offset = (t_uint32)swapXword(symtab[j].st_value);
+
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+t_cm_error ELF64_relocateSegments(
+ t_memory_handle *memories,
+ t_elfdescription *elfhandle,
+ t_memory_property property,
+ void *cbContext) {
+ struct XXElf* ELF = elfhandle->ELF;
+ int sec, n;
+
+ // Loop on all relocation section
+ for(sec=0; sec < ELF->e_shnum; sec++)
+ {
+ t_cm_logical_address sectionAddr = 0;
+ t_uint32 sectionOffset = 0;
+ const t_elfmemory* mapping;
+
+ if(ELF->sectionss[sec].relocations == NULL)
+ continue;
+
+ // Relocate only section in memory
+ mapping = getSectionAddress(memories,
+ elfhandle,
+ sec,
+ &sectionOffset,
+ &sectionAddr);
+ if(mapping == NULL)
+ continue;
+
+ if(
+ (mapping->property == property && elfhandle->instanceProperty != MEM_FOR_SINGLETON) ||
+ (property == MEM_SHARABLE && elfhandle->instanceProperty == MEM_FOR_SINGLETON) )
+ {
+ LOG_INTERNAL(2, "relocSection(%s)\n", ELF->sectionss[sec].sectionName, 0, 0, 0, 0, 0);
+
+ for(n = 0; n < ELF->sectionss[sec].relocationNumber; n++)
+ {
+ struct XXrelocation* relocation = &ELF->sectionss[sec].relocations[n];
+ t_uint32 symbol_addr;
+ char* relocAddr = (char*)(sectionAddr + relocation->OffsetInElf * mapping->memEntSize);
+
+ switch(relocation->st_shndx) {
+ case SHN_ABS: // Absolute external reference
+ symbol_addr = relocation->st_value;
+ break;
+ case SHN_UNDEF: // External reference
+ // LOG_INTERNAL(0, "cm_resolvSymbol(%d, %s)\n", relocation->type, relocation->symbol_name, 0,0, 0, 0);
+ symbol_addr = cm_resolvSymbol(cbContext,
+ relocation->type,
+ relocation->symbol_name,
+ relocAddr);
+ if(symbol_addr == 0x0) { // Not defined symbol
+ ERROR("Symbol %s not found\n", relocation->symbol_name, 0, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+ } else if(symbol_addr == 0xFFFFFFFE) { // OOM
+ return CM_NO_MORE_MEMORY;
+ } else if(symbol_addr == 0xFFFFFFFF) { // Defined inside static binding
+ continue;
+ }
+ break;
+ default: // Internal reference in loaded section
+ symbol_addr = getSymbolAddress(
+ memories,
+ elfhandle,
+ (t_uint32)relocation->st_shndx,
+ relocation->st_value);
+ if(symbol_addr == 0xFFFFFFFF) {
+ ERROR("Symbol in section %s+%d not loaded\n",
+ ELF->sectionss[relocation->st_shndx].sectionName,
+ relocation->st_value, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+ }
+ break;
+ }
+
+ symbol_addr += relocation->r_addend;
+
+ MMDSP_performRelocation(
+ relocation->type,
+ relocation->symbol_name,
+ symbol_addr,
+ relocAddr,
+ ELF->sectionss[sec].data + relocation->OffsetInElf * mapping->memEntSize,
+ sectionOffset + relocation->OffsetInElf);
+ }
+ }
+ }
+
+ return CM_OK;
+}
+
+t_cm_error ELF64_getRelocationMemory(
+ t_elfdescription *elfhandle,
+ t_tmp_elfdescription *elftmp,
+ t_uint32 offsetInNmf,
+ t_memory_reference *memory) {
+ struct XXElf* ELF = elfhandle->ELF;
+ const ElfXX_Rela* rel_start;
+ const ElfXX_Sym* relaNmfSegmentSymbols = (ElfXX_Sym*)elftmp->relaNmfSegmentSymbols;
+
+ for(rel_start = (ElfXX_Rela*)elftmp->relaNmfSegment; rel_start < (ElfXX_Rela*)elftmp->relaNmfSegmentEnd; rel_start++)
+ {
+ if((t_uint32)swapXword(rel_start->r_offset) == offsetInNmf)
+ {
+ int strtab_index = ELFXX_R_SYM(swapXword(rel_start->r_info));
+ int sectionIdx = (int)swapHalf(relaNmfSegmentSymbols[strtab_index].st_shndx);
+
+ memory->memory = ELF->sectionss[sectionIdx].meminfo;
+
+ if(memory->memory != NULL) {
+ memory->offset = (
+ ELF->sectionss[sectionIdx].offsetInSegment + // Offset in Segment
+ (t_uint32)swapXword(relaNmfSegmentSymbols[strtab_index].st_value) + // Offset in Elf Section
+ (t_uint32)swapXword(rel_start->r_addend)); // Addend
+
+ return CM_OK;
+ } else {
+ const char* symbol_name = &elftmp->relaNmfSegmentStrings[swapWord(relaNmfSegmentSymbols[strtab_index].st_name)];
+ ERROR("Symbol %s not found\n", symbol_name, 0, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+ }
+ }
+ }
+
+ ERROR("Unknown relocation error\n", 0, 0, 0, 0, 0, 0);
+ return CM_INVALID_ELF_FILE;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/mmdsp-debug.c b/drivers/staging/nmf-cm/cm/engine/elf/src/mmdsp-debug.c
new file mode 100644
index 00000000000..c6c316046b3
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/mmdsp-debug.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/elf/inc/mmdsp-loadmap.h>
+#include <cm/engine/elf/inc/mmdsp.h>
+#include <cm/engine/dsp/inc/semaphores_dsp.h>
+#include <cm/engine/dsp/mmdsp/inc/mmdsp_hwp.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+#include <cm/engine/power_mgt/inc/power.h>
+
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/component/inc/component_type.h>
+#include <inc/nmf-limits.h>
+
+#define LOADMAP_SEMAPHORE_USE_NB 7
+
+static t_memory_handle headerHandle[NB_CORE_IDS] = {INVALID_MEMORY_HANDLE, };
+static struct LoadMapHdr *headerAddresses[NB_CORE_IDS] = {0, };
+static t_uint32 headerOffsets[NB_CORE_IDS] = {0, };
+static t_uint32 entryNumber[NB_CORE_IDS] = {0, };
+
+#undef myoffsetof
+#define myoffsetof(TYPE, MEMBER) ((unsigned int) &((TYPE *)0)->MEMBER)
+
+t_cm_error cm_DSPABI_AddLoadMap(
+ t_cm_domain_id domainId,
+ const char* templateName,
+ const char* localname,
+ t_memory_handle *memories,
+ void *componentHandle)
+{
+ t_nmf_core_id coreId = cm_DM_GetDomainCoreId(domainId);
+ int count=0;
+ struct LoadMapItem* curItem = NULL;
+
+ if (headerHandle[coreId] == 0) /* Create loadmap header */
+ {
+ headerHandle[coreId] = cm_DM_Alloc(domainId, SDRAM_EXT16,
+ sizeof(struct LoadMapHdr)/2, CM_MM_ALIGN_2WORDS, TRUE);
+ if (headerHandle[coreId] == INVALID_MEMORY_HANDLE) {
+ ERROR("CM_NO_MORE_MEMORY: Unable to allocate loadmap in cm_DSPABI_AddLoadMap()\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ headerAddresses[coreId] = (struct LoadMapHdr*)cm_DSP_GetHostLogicalAddress(headerHandle[coreId]);
+
+ headerAddresses[coreId]->nMagicNumber = LOADMAP_MAGIC_NUMBER;
+ headerAddresses[coreId]->nVersion = (LOADMAP_VERSION_MSB<<8)|(LOADMAP_VERSION_LSB);
+ headerAddresses[coreId]->nRevision = 0;
+ headerAddresses[coreId]->pFirstItem = 0;
+
+ //Register Header into XRAM:2
+ cm_DSP_GetDspAddress(headerHandle[coreId], &headerOffsets[coreId]);
+ cm_DSP_WriteXRamWord(coreId, 2, headerOffsets[coreId]);
+ }
+
+ // update Header nRevision field
+ headerAddresses[coreId]->nRevision++;
+
+ /*
+ * Build loadmap entry
+ */
+ {
+ t_memory_handle handle;
+ struct LoadMapItem* pItem;
+ t_uint32 dspentry;
+ unsigned char* pos;
+ t_uint32 fnlen, lnlen;
+ t_uint32 fnlenaligned, lnlenaligned;
+ t_uint32 address;
+ t_uint32 postStringLength;
+ int i;
+
+ postStringLength = cm_StringLength(".elf", 16);
+ fnlenaligned = fnlen = cm_StringLength(templateName, MAX_COMPONENT_FILE_PATH_LENGTH) + postStringLength + 2;
+ if((fnlenaligned % 2) != 0) fnlenaligned++;
+ lnlenaligned = lnlen = cm_StringLength(localname, MAX_TEMPLATE_NAME_LENGTH);
+ if((lnlenaligned % 2) != 0) lnlenaligned++;
+
+ // Allocate new loap map
+ handle = cm_DM_Alloc(domainId, SDRAM_EXT16,
+ sizeof(struct LoadMapItem)/2 + (1 + fnlenaligned/2) + (1 + lnlenaligned/2),
+ CM_MM_ALIGN_2WORDS, TRUE);
+ if (handle == INVALID_MEMORY_HANDLE) {
+ ERROR("CM_NO_MORE_MEMORY: Unable to allocate loadmap entry in cm_DSPABI_AddLoadMap\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ pItem = (struct LoadMapItem*)cm_DSP_GetHostLogicalAddress(handle);
+ cm_DSP_GetDspAddress(handle, &dspentry);
+ count++;
+ entryNumber[coreId]++;
+
+ // Link this new loadmap with the previous one
+ if(headerAddresses[coreId]->pFirstItem == NULL)
+ headerAddresses[coreId]->pFirstItem = (struct LoadMapItem *)dspentry;
+ else
+ {
+ const t_dsp_desc* pDspDesc = cm_DSP_GetState(coreId);
+ t_uint32 endSegmentAddr = SDRAMMEM16_BASE_ADDR + pDspDesc->segments[SDRAM_DATA_USER].size / 2;
+ struct LoadMapItem* curItem, *prevItem = NULL;
+ t_uint32 curItemDspAdress;
+
+ if(
+ ((t_uint32)headerAddresses[coreId]->pFirstItem < SDRAMMEM16_BASE_ADDR) ||
+ ((t_uint32)headerAddresses[coreId]->pFirstItem > endSegmentAddr))
+ {
+ ERROR("Memory corruption in MMDSP: at data DSP address=%x or ARM address=%x\n",
+ headerOffsets[coreId], &headerAddresses[coreId]->pFirstItem, 0, 0, 0, 0);
+
+ return CM_INVALID_DATA;
+ }
+ curItemDspAdress = (t_uint32)headerAddresses[coreId]->pFirstItem;
+ curItem = (struct LoadMapItem*)((curItemDspAdress - headerOffsets[coreId]) * 2 + (t_uint32)headerAddresses[coreId]); // To ARM address
+ count++;
+ while(curItem->pNextItem != NULL)
+ {
+ if(((t_uint32)curItem->pNextItem < SDRAMMEM16_BASE_ADDR) || ((t_uint32)curItem->pNextItem > endSegmentAddr))
+ {
+ if (prevItem == NULL)
+ ERROR("AddLoadMap: Memory corruption in MMDSP: at data DSP address=%x or ARM address=%x\n",
+ curItemDspAdress + myoffsetof(struct LoadMapItem, pNextItem), &curItem->pNextItem,
+ 0, 0, 0, 0);
+ else
+ ERROR("AddLoadMap: Memory corruption in MMDSP: at data DSP address=%x or ARM address=%x\n",
+ curItemDspAdress + myoffsetof(struct LoadMapItem, pNextItem), &curItem->pNextItem,
+ 0, 0, 0, 0);
+ return CM_INVALID_DATA;
+ }
+ curItemDspAdress = (t_uint32)curItem->pNextItem;
+ prevItem = curItem;
+ curItem = (struct LoadMapItem*)((curItemDspAdress - headerOffsets[coreId]) * 2 + (t_uint32)headerAddresses[coreId]); // To ARM address
+ count++;
+ }
+ curItem->pNextItem = (struct LoadMapItem *)dspentry;
+ }
+
+ // DSP Address of the string at the end of the load map
+ pos = (unsigned char*)pItem + sizeof(struct LoadMapItem);
+
+ /*
+ * Set SolibFilename address information
+ * -> string = "./origfilename"
+ */
+ pItem->pSolibFilename = (char*)(dspentry + sizeof(struct LoadMapItem) / 2);
+ *(t_uint16*)pos = fnlen;
+ pos += 2;
+ *pos++ = '.';
+ *pos++ = '\\';
+ for(i = 0; i < fnlen - 2 - postStringLength; i++)
+ {
+ *pos++ = (templateName[i] == '.') ? '\\' : templateName[i];
+ }
+ *pos++ = '.';
+ *pos++ = 'e';
+ *pos++ = 'l';
+ *pos++ = 'f';
+ // add padding if needed
+ if ((t_uint32)pos & 1)
+ *pos++ = '\0';
+
+ /*
+ * Set Component Name address information
+ */
+ if (lnlen != 0)
+ {
+ pItem->pComponentName = (char*)(dspentry + sizeof(struct LoadMapItem) / 2 + 1 + fnlenaligned / 2);
+
+ *(t_uint16*)pos = lnlen;
+ pos += 2;
+ for(i = 0; i < lnlenaligned; i++)
+ {
+ // If not aligned null ending copied
+ *pos++ = localname[i];
+ }
+ }
+ else
+ {
+ pItem->pComponentName = 0;
+ }
+
+ /*
+ * Set PROG information
+ */
+ if(memories[CODE_MEMORY_INDEX] == INVALID_MEMORY_HANDLE)
+ address = 0;
+ else
+ cm_DSP_GetDspAddress(memories[CODE_MEMORY_INDEX], &address);
+ pItem->pAddrProg = (void*)address;
+
+ /*
+ * Set ERAMCODE information
+ */
+ if(memories[ECODE_MEMORY_INDEX] == INVALID_MEMORY_HANDLE)
+ address = 0;
+ else
+ cm_DSP_GetDspAddress(memories[ECODE_MEMORY_INDEX], &address);
+ pItem->pAddrEmbProg = (void*)address;
+
+ /*
+ * Set THIS information
+ */
+ if(memories[PRIVATE_DATA_MEMORY_INDEX] != INVALID_MEMORY_HANDLE) {
+ // Standard component
+ cm_DSP_GetDspAddress(memories[PRIVATE_DATA_MEMORY_INDEX], &address);
+ } else if(memories[SHARE_DATA_MEMORY_INDEX] != INVALID_MEMORY_HANDLE) {
+ // Singleton component where data are shared (simulate THIS with shared memory)
+ cm_DSP_GetDspAddress(memories[SHARE_DATA_MEMORY_INDEX], &address);
+ } else {
+ // Component without data (take unique identifier -> arbitrary take host component handle)
+ address = (t_uint32)componentHandle;
+ }
+ pItem->pThis = (void*)address;
+
+ /*
+ * Set ARM THIS information
+ */
+ pItem->pARMThis = componentHandle;
+
+ /*
+ * Set Link to null (end of list)
+ */
+ pItem->pNextItem = 0;
+
+ /*
+ * Set XROM information
+ */
+ if(memories[XROM_MEMORY_INDEX] == INVALID_MEMORY_HANDLE)
+ address = 0;
+ else
+ cm_DSP_GetDspAddress(memories[XROM_MEMORY_INDEX], &address);
+ pItem->pXROM = (void*)address;
+
+ /*
+ * Set YROM information
+ */
+ if(memories[YROM_MEMORY_INDEX] == INVALID_MEMORY_HANDLE)
+ address = 0;
+ else
+ cm_DSP_GetDspAddress(memories[YROM_MEMORY_INDEX], &address);
+ pItem->pYROM = (void*)address;
+
+ /*
+ * Set memory handle (not used externally)
+ */
+ ((t_component_instance *)componentHandle)->loadMapHandle = handle;
+ }
+
+ OSAL_mb();
+
+ if (count != entryNumber[coreId]) {
+ ERROR("AddLoadMap: corrumption, number of component differs: count=%d, expected %d (last item @ %p)\n",
+ count, entryNumber[coreId], curItem, 0, 0, 0);
+ return CM_INVALID_DATA;
+ }
+ return CM_OK;
+}
+
+t_cm_error cm_DSPABI_RemoveLoadMap(
+ t_cm_domain_id domainId,
+ const char* templateName,
+ t_memory_handle *memories,
+ const char* localname,
+ void *componentHandle)
+{
+ struct LoadMapItem **prevItemReference;
+ t_uint32 prevItemReferenceDspAddress, curItemDspAdress;
+ t_nmf_core_id coreId = cm_DM_GetDomainCoreId(domainId);
+ const t_dsp_desc* pDspDesc = cm_DSP_GetState(coreId);
+ t_uint32 endSegmentAddr = SDRAMMEM16_BASE_ADDR + pDspDesc->segments[SDRAM_DATA_USER].size / 2;
+ struct LoadMapItem* curItem = NULL;
+
+ CM_ASSERT (headerHandle[coreId] != INVALID_MEMORY_HANDLE);
+
+ /* parse list until we find this */
+ prevItemReferenceDspAddress = 0x2; // DSP address of load map head pointer
+ prevItemReference = &headerAddresses[coreId]->pFirstItem;
+ curItemDspAdress = (t_uint32)*prevItemReference;
+ while(curItemDspAdress != 0x0)
+ {
+ if((curItemDspAdress < SDRAMMEM16_BASE_ADDR) || (curItemDspAdress > endSegmentAddr))
+ {
+ ERROR("Memory corruption in MMDSP: at data DSP address=%x or ARM address=%x\n",
+ prevItemReferenceDspAddress, prevItemReference, 0, 0, 0, 0);
+
+ /* free the entry anyway to avoid leakage */
+ cm_DM_Free(((t_component_instance *)componentHandle)->loadMapHandle, TRUE);
+
+ return CM_OK;
+ }
+
+ curItem = (struct LoadMapItem*)((curItemDspAdress - headerOffsets[coreId]) * 2 + (t_uint32)headerAddresses[coreId]); // To ARM address
+
+ if(curItem->pARMThis == componentHandle)
+ {
+ // Remove component from loadmap
+
+ /* take local semaphore */
+ cm_DSP_SEM_Take(coreId,LOADMAP_SEMAPHORE_USE_NB);
+
+ /* remove element from list */
+ *prevItemReference = curItem->pNextItem;
+
+ /* update nRevision field in header */
+ headerAddresses[coreId]->nRevision++;
+
+ /* If this is the last item, deallocate !!! */
+ if(headerAddresses[coreId]->pFirstItem == NULL)
+ {
+ // Deallocate memory
+ cm_DM_Free(headerHandle[coreId], TRUE);
+ headerHandle[coreId] = INVALID_MEMORY_HANDLE;
+
+ //Register Header into XRAM:2
+ cm_DSP_WriteXRamWord(coreId, 2, 0);
+ }
+
+ /* deallocate memory */
+ cm_DM_Free(((t_component_instance *)componentHandle)->loadMapHandle, TRUE);
+
+ /* be sure memory is updated before releasing local semaphore */
+ OSAL_mb();
+
+ /* release local semaphore */
+ cm_DSP_SEM_Give(coreId,LOADMAP_SEMAPHORE_USE_NB);
+
+ entryNumber[coreId]--;
+
+ return CM_OK;
+ }
+
+ prevItemReferenceDspAddress = curItemDspAdress + myoffsetof(struct LoadMapItem, pNextItem);
+ prevItemReference = &curItem->pNextItem;
+ curItemDspAdress = (t_uint32)*prevItemReference;
+ };
+
+ ERROR("Memory corruption in MMDSP: component not in LoadMap %s\n", localname, 0, 0, 0, 0, 0);
+
+ /* free the entry anyway to avoid leakage */
+ cm_DM_Free(((t_component_instance *)componentHandle)->loadMapHandle, TRUE);
+
+ return CM_OK;
+}
+
+#if 0
+t_cm_error cm_DSPABI_CheckLoadMap_nolock(t_nmf_core_id coreId)
+{
+ int count=0;
+ static int dump = 5;
+ struct LoadMapItem* curItem = NULL;
+
+ if (!dump)
+ return CM_OK;
+ if (headerHandle[coreId] == 0) /* No load map yet */
+ return CM_OK;
+
+ {
+ // No entry in loadmap
+ if(headerAddresses[coreId]->pFirstItem == NULL)
+ return CM_OK;
+
+ {
+ const t_dsp_desc* pDspDesc = cm_DSP_GetState(coreId);
+ t_uint32 endSegmentAddr = SDRAMMEM16_BASE_ADDR + pDspDesc->segments[SDRAM_DATA_USER].size / 2;
+ struct LoadMapItem *prevItem=NULL;
+ t_uint32 curItemDspAdress;
+
+ if (((t_uint32)headerAddresses[coreId]->pFirstItem < SDRAMMEM16_BASE_ADDR) ||
+ ((t_uint32)headerAddresses[coreId]->pFirstItem > endSegmentAddr))
+ {
+ ERROR("CheckLoadMap: Memory corruption in MMDSP at first item: at data DSP address=%x or ARM address=%x\n",
+ headerOffsets[coreId], &headerAddresses[coreId]->pFirstItem, 0, 0, 0, 0);
+ dump--;
+ return CM_INVALID_COMPONENT_HANDLE;
+ }
+ curItemDspAdress = (t_uint32)headerAddresses[coreId]->pFirstItem;
+ curItem = (struct LoadMapItem*)((curItemDspAdress - headerOffsets[coreId]) * 2 + (t_uint32)headerAddresses[coreId]);
+ count++;
+ while(curItem->pNextItem != NULL)
+ {
+ if(((t_uint32)curItem->pNextItem < SDRAMMEM16_BASE_ADDR) || ((t_uint32)curItem->pNextItem > endSegmentAddr))
+ {
+ if (!prevItem)
+ ERROR("CheckLoadMap: Memory corruption in MMDSP (count=%d): at data DSP address=%x or ARM address=%x\n"
+ "Previous (first) component name %s<%s>\n",
+ count,
+ curItemDspAdress + myoffsetof(struct LoadMapItem, pNextItem), &curItem->pNextItem,
+ (char*)(((t_component_instance *)&curItem->pARMThis)->pathname),
+ (char*)(((t_component_instance *)&curItem->pARMThis)->Template->name), 0);
+ else
+ ERROR("CheckLoadMap: Memory corruption in MMDSP (count=%d): at data DSP address=%x or ARM address=%x\n"
+ "Previous valid component name %s<%s>",
+ count,
+ curItemDspAdress + myoffsetof(struct LoadMapItem, pNextItem), &curItem->pNextItem,
+ (char*)(((t_component_instance *)&prevItem->pARMThis)->pathname),
+ (char*)(((t_component_instance *)&prevItem->pARMThis)->Template->name), 0);
+ dump--;
+ return CM_INVALID_COMPONENT_HANDLE;
+ }
+ curItemDspAdress = (t_uint32)curItem->pNextItem;
+ prevItem = curItem;
+ curItem = (struct LoadMapItem*)((curItemDspAdress - headerOffsets[coreId]) * 2 + (t_uint32)headerAddresses[coreId]); // To ARM address
+ count++;
+ }
+ }
+
+ }
+
+ if (count != entryNumber[coreId]) {
+ ERROR("CheckLoadMap: number of component differs: count=%d, expected %d (last item @ %p)\n", count, entryNumber[coreId],
+ curItem, 0, 0, 0);
+ dump--;
+ return CM_INVALID_COMPONENT_HANDLE;
+ }
+ return CM_OK;
+}
+
+t_cm_error cm_DSPABI_CheckLoadMap(t_nmf_core_id coreId)
+{
+ t_cm_error error;
+ OSAL_LOCK_API();
+ error = cm_DSPABI_CheckLoadMap_nolock(coreId);
+ OSAL_UNLOCK_API();
+ return error;
+}
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/elf/src/mpcal.c b/drivers/staging/nmf-cm/cm/engine/elf/src/mpcal.c
new file mode 100644
index 00000000000..93d910a5ed6
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/elf/src/mpcal.c
@@ -0,0 +1,6 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/elf/inc/mpcal.h>
diff --git a/drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h b/drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h
new file mode 100644
index 00000000000..0894410ae0d
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_EE_MGT_H
+#define __INC_EE_MGT_H
+
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/dsp/inc/dsp.h>
+
+typedef struct {
+ t_component_instance *instance;
+ t_nmf_executive_engine_id executiveEngineId;
+ t_uint32 currentStackSize[NMF_SCHED_URGENT + 1];
+ t_uint32 voidAddr;
+ t_uint32 traceState;
+ t_uint32 printLevel;
+ t_uint32 nbOfForceWakeup;
+ struct {
+ t_memory_handle handle;
+ t_cm_logical_address addr;
+ } panicArea;
+
+ // Trace Management
+ t_uint32 readTracePointer;
+ t_uint32 lastReadedTraceRevision;
+ t_memory_handle traceDataHandle;
+ struct t_nmf_trace *traceDataAddr;
+} t_ee_state;
+
+//TODO, juraj, this should be done more properly, like accessor method, instead making this global variable..
+extern t_ee_state eeState[NB_CORE_IDS];
+
+/******************************************************************************/
+/************************ FUNCTIONS PROTOTYPES ********************************/
+/******************************************************************************/
+
+PUBLIC t_cm_error cm_EEM_Init(t_nmf_core_id coreId, const char *eeName, t_nmf_executive_engine_id executiveEngineId);
+PUBLIC void cm_EEM_Close(t_nmf_core_id coreId);
+PUBLIC t_uint32 cm_EEM_isStackUpdateNeed(t_nmf_core_id coreId, t_nmf_ee_priority priority, t_uint32 isInstantiate, t_uint32 needMinStackSize);
+PUBLIC t_cm_error cm_EEM_UpdateStack(t_nmf_core_id coreId, t_nmf_ee_priority priority, t_uint32 needMinStackSize, t_uint32 *pNewStackValue);
+PUBLIC t_ee_state* cm_EEM_getExecutiveEngine(t_nmf_core_id coreId);
+PUBLIC void cm_EEM_setTraceMode(t_nmf_core_id coreId, t_uint32 state);
+PUBLIC void cm_EEM_setPrintLevel(t_nmf_core_id coreId, t_uint32 level);
+t_cm_error cm_EEM_ForceWakeup(t_nmf_core_id coreId);
+void cm_EEM_AllowSleep(t_nmf_core_id coreId);
+
+#endif /* __INC_EE_MGT_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/src/executive_engine_mgt.c b/drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/src/executive_engine_mgt.c
new file mode 100644
index 00000000000..4df3d7ee0f5
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/executive_engine_mgt/src/executive_engine_mgt.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*----------------------------------------------------------------------------*
+ * This module provides functions that allow to manage DSPs' Firmwares. *
+ ******************************************************************************/
+
+
+/******************************************************************* Includes
+ ****************************************************************************/
+
+#include "../inc/executive_engine_mgt.h"
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/utils/inc/convert.h>
+#include <cm/engine/component/inc/initializer.h>
+#include <cm/engine/power_mgt/inc/power.h>
+#include <cm/engine/perfmeter/inc/mpcload.h>
+
+#include <cm/engine/trace/inc/xtitrace.h>
+
+#include <share/communication/inc/nmf_service.h>
+
+t_ee_state eeState[NB_CORE_IDS];
+
+/****************************************************************** Functions
+ ****************************************************************************/
+static t_cm_error cm_EEM_allocPanicArea(t_nmf_core_id coreId, t_cm_domain_id domainId);
+static void cm_EEM_freePanicArea(t_nmf_core_id coreId);
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetExecutiveEngineHandle(
+ t_cm_domain_id domainId,
+ t_cm_instance_handle *executiveEngineHandle)
+{
+ t_nmf_core_id coreId;
+
+ if (cm_DM_CheckDomain(domainId, DOMAIN_NORMAL) != CM_OK) {
+ return CM_INVALID_DOMAIN_HANDLE;
+ }
+
+ coreId = cm_DM_GetDomainCoreId(domainId);
+ //in case someone ask for ee on component manager !!!!
+ if (coreId == ARM_CORE_ID) {*executiveEngineHandle = 0;}
+ else {*executiveEngineHandle = eeState[coreId].instance->instance;}
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_EEM_Init(
+ t_nmf_core_id coreId,
+ const char *eeName,
+ t_nmf_executive_engine_id executiveEngineId)
+{
+ t_rep_component *pRepComponent;
+ t_cm_error error;
+ t_uint32 i;
+
+ eeState[coreId].instance = (t_component_instance *)0;
+ eeState[coreId].executiveEngineId = executiveEngineId;
+ for(i = NMF_SCHED_BACKGROUND; i < NMF_SCHED_URGENT + 1;i++)
+ {
+ eeState[coreId].currentStackSize[i] = MIN_STACK_SIZE;
+ }
+
+ // Try to load component file
+ if((error = cm_REP_lookupComponent(eeName, &pRepComponent)) != CM_OK)
+ {
+ if (error == CM_COMPONENT_NOT_FOUND)
+ ERROR("CM_COMPONENT_NOT_FOUND: Execution Engine %s\n", eeName, 0, 0, 0, 0, 0);
+ return error;
+ }
+
+ // Set to 1 during bootstrap since MMDSP forceWakeup is to one also in order to not go in idle state
+ // while configuration not finish !!!
+ eeState[coreId].nbOfForceWakeup = 1;
+
+ if ((error = cm_DSP_Boot(coreId)) != CM_OK)
+ return error;
+
+ if((error = cm_instantiateComponent(
+ eeName,
+ cm_DSP_GetState(coreId)->domainEE,
+ NMF_SCHED_URGENT,
+ eeName,
+ pRepComponent->elfhandle,
+ &eeState[coreId].instance)) != CM_OK)
+ {
+ cm_DSP_Shutdown(coreId);
+ return error;
+ }
+
+ /* Get Void Function */
+ eeState[coreId].voidAddr = cm_getFunction(eeState[coreId].instance, "helper", "Void");
+
+ /* allocate xram space for stack */
+ if (executiveEngineId == SYNCHRONOUS_EXECUTIVE_ENGINE)
+ {
+ error = cm_DSP_setStackSize(coreId, MIN_STACK_SIZE);
+ }
+ else
+ {
+ error = cm_DSP_setStackSize(coreId, (NMF_SCHED_URGENT + 1) * MIN_STACK_SIZE);
+ }
+ if (error != CM_OK)
+ {
+ cm_delayedDestroyComponent(eeState[coreId].instance);
+ eeState[coreId].instance = (t_component_instance *)0;
+ cm_DSP_Shutdown(coreId);
+ return error;
+ }
+
+ /* allocate sdram memory for panic area */
+ error = cm_EEM_allocPanicArea(coreId, cm_DSP_GetState(coreId)->domainEE);
+ if (error != CM_OK) {
+ cm_delayedDestroyComponent(eeState[coreId].instance);
+ eeState[coreId].instance = (t_component_instance *)0;
+ cm_DSP_Shutdown(coreId);
+ return error;
+ }
+
+ /* allocate sdram memory to share perfmeters data */
+ error = cm_PFM_allocatePerfmeterDataMemory(coreId, cm_DSP_GetState(coreId)->domainEE);
+ if (error != CM_OK) {
+ cm_EEM_freePanicArea(coreId);
+ cm_delayedDestroyComponent(eeState[coreId].instance);
+ eeState[coreId].instance = (t_component_instance *)0;
+ cm_DSP_Shutdown(coreId);
+ return error;
+ }
+
+ if((error = cm_SRV_allocateTraceBufferMemory(coreId, cm_DSP_GetState(coreId)->domainEE)) != CM_OK)
+ {
+ cm_PFM_deallocatePerfmeterDataMemory(coreId);
+ cm_EEM_freePanicArea(coreId);
+ cm_delayedDestroyComponent(eeState[coreId].instance);
+ eeState[coreId].instance = (t_component_instance *)0;
+ cm_DSP_Shutdown(coreId);
+ return error;
+ }
+
+ /* set initial stack value */
+ cm_writeAttribute(eeState[coreId].instance, "rtos/scheduler/topOfStack", cm_DSP_getStackAddr(coreId));
+
+ /* set myCoreId for trace */
+ cm_writeAttribute(eeState[coreId].instance, "xti/myCoreId", coreId - 1);
+
+#if defined(__STN_8500) && (__STN_8500 > 10)
+ /* set myCoreId for prcmu if exist */
+ cm_writeAttribute(eeState[coreId].instance, "sleep/prcmu/myCoreId", coreId + 1);
+#endif
+
+ /* go go go ... */
+ cm_DSP_Start(coreId);
+
+ /* Waiting for End Of Boot */
+ //TODO : remove infinite while loop
+ //TODO : to be paranoiac, add a read to serviceReasonOffset before starting core and check value is MPC_SERVICE_BOOT as it should be
+ {
+ while(cm_readAttributeNoError(eeState[coreId].instance, "rtos/commonpart/serviceReason") == MPC_SERVICE_BOOT)
+ {
+ volatile t_uint32 i;
+ for (i=0; i < 1000; i++);
+ }
+ }
+
+ /* set some attributes after boot to avoid being erase by mmdsp boot */
+ cm_writeAttribute(eeState[coreId].instance, "xti/traceActive", eeState[coreId].traceState);
+ cm_writeAttribute(eeState[coreId].instance, "rtos/commonpart/printLevel", eeState[coreId].printLevel);
+
+ cm_DSP_ConfigureAfterBoot(coreId);
+
+ return CM_OK;
+}
+
+/****************************************************************************/
+/* NAME: cm_EEM_Close */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: Inform us that ee for coreId has been destroyed */
+/* */
+/* PARAMETERS: id: dsp identifier */
+/* */
+/* RETURN: none */
+/* */
+/****************************************************************************/
+PUBLIC void cm_EEM_Close(t_nmf_core_id coreId)
+{
+ cm_DSP_setStackSize(coreId, 0);
+ cm_delayedDestroyComponent(eeState[coreId].instance);
+ eeState[coreId].instance = (t_component_instance *)0;
+ cm_SRV_deallocateTraceBufferMemory(coreId);
+ cm_PFM_deallocatePerfmeterDataMemory(coreId);
+ cm_EEM_freePanicArea(coreId);
+ cm_DSP_Shutdown(coreId);
+}
+
+/****************************************************************************/
+/* NAME: cm_EEM_isStackUpdateNeed( */
+/* t_nmf_core_id id, */
+/* t_nmf_ee_priority priority, */
+/* t_uint32 isInstantiate, */
+/* t_uint32 needMinStackSize */
+/* ) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: Return a boolean to inform if a ee stack size update is need*/
+/* when instantiate or destroying a component */
+/****************************************************************************/
+PUBLIC t_uint32 cm_EEM_isStackUpdateNeed(
+ t_nmf_core_id coreId,
+ t_nmf_ee_priority priority,
+ t_uint32 isInstantiate,
+ t_uint32 needMinStackSize)
+{
+ /* in case of SYNCHRONOUS_EXECUTIVE_ENGINE we only use currentStackSize[NMF_SCHED_BACKGROUND] */
+ if (eeState[coreId].executiveEngineId == SYNCHRONOUS_EXECUTIVE_ENGINE) {priority = NMF_SCHED_BACKGROUND;}
+ if (isInstantiate)
+ {
+ if (needMinStackSize > eeState[coreId].currentStackSize[priority]) {return TRUE;}
+ }
+ else
+ {
+ if (needMinStackSize == eeState[coreId].currentStackSize[priority]) {return TRUE;}
+ }
+
+ return FALSE;
+}
+
+/****************************************************************************/
+/* NAME: cm_EEM_UpdateStack( */
+/* t_nmf_core_id id, */
+/* t_nmf_ee_priority priority, */
+/* t_uint32 needMinStackSize, */
+/* t_uint32 *pNewStackValue */
+/* ) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: If cm_EEM_isStackUpdateNeed() has return true then caller */
+/* must inform EEM about new stack value for priority. */
+/* cm_EEM_UpdateStack() will return new global stack size to */
+/* provide to ee. */
+/****************************************************************************/
+PUBLIC t_cm_error cm_EEM_UpdateStack(
+ t_nmf_core_id coreId,
+ t_nmf_ee_priority priority,
+ t_uint32 needMinStackSize,
+ t_uint32 *pNewStackValue)
+{
+ t_cm_error error;
+ t_uint32 recoveryStackSize = eeState[coreId].currentStackSize[priority];
+ t_uint32 i;
+
+ /* in case of SYNCHRONOUS_EXECUTIVE_ENGINE we only use currentStackSize[NMF_SCHED_BACKGROUND] */
+ if (eeState[coreId].executiveEngineId == SYNCHRONOUS_EXECUTIVE_ENGINE) {priority = NMF_SCHED_BACKGROUND;}
+ eeState[coreId].currentStackSize[priority] = needMinStackSize;
+ if (eeState[coreId].executiveEngineId == SYNCHRONOUS_EXECUTIVE_ENGINE) {*pNewStackValue = needMinStackSize;}
+ else
+ {
+ *pNewStackValue = 0;
+ for(i = NMF_SCHED_BACKGROUND; i < NMF_SCHED_URGENT + 1;i++)
+ {
+ *pNewStackValue += eeState[coreId].currentStackSize[i];
+ }
+ }
+
+ /* try to increase size of stack by modifying xram allocator size */
+ error = cm_DSP_setStackSize(coreId, *pNewStackValue);
+ if (error != CM_OK) {
+ eeState[coreId].currentStackSize[priority] = recoveryStackSize;
+ } else {
+ LOG_INTERNAL(1, "\n##### Stack update: size=%d, prio=%d on %s #####\n", *pNewStackValue, priority, cm_getDspName(coreId), 0, 0, 0);
+ }
+
+ return error;
+}
+
+/****************************************************************************/
+/* NAME: t_nmf_executive_engine_id( */
+/* t_nmf_core_id id */
+/* ) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: return executive engine load on id core. */
+/****************************************************************************/
+PUBLIC t_ee_state * cm_EEM_getExecutiveEngine(t_nmf_core_id coreId)
+{
+ return &eeState[coreId];
+}
+
+/****************************************************************************/
+/* NAME: cm_EEM_setTraceMode( */
+/* t_nmf_core_id id, */
+/* t_uint32 state */
+/* ) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: activate/deactivate trace for ee running on id. In case ee */
+/* is not yet load then information is store. */
+/****************************************************************************/
+PUBLIC void cm_EEM_setTraceMode(t_nmf_core_id coreId, t_uint32 state)
+{
+ eeState[coreId].traceState = state;
+ if (eeState[coreId].instance)
+ {
+ if(cm_EEM_ForceWakeup(coreId) == CM_OK)
+ {
+ cm_writeAttribute(eeState[coreId].instance, "xti/traceActive", eeState[coreId].traceState);
+
+ cm_EEM_AllowSleep(coreId);
+ }
+ }
+}
+
+/****************************************************************************/
+/* NAME: cm_EEM_setPrintLevel( */
+/* t_nmf_core_id id, */
+/* t_uint32 level */
+/* ) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: set print level for ee running on id. In case ee */
+/* is not yet load then information is store. */
+/****************************************************************************/
+PUBLIC void cm_EEM_setPrintLevel(t_nmf_core_id coreId, t_uint32 level)
+{
+ eeState[coreId].printLevel = level;
+ if (eeState[coreId].instance)
+ {
+ if(cm_EEM_ForceWakeup(coreId) == CM_OK)
+ {
+ cm_writeAttribute(eeState[coreId].instance, "rtos/commonpart/printLevel", eeState[coreId].printLevel);
+
+ cm_EEM_AllowSleep(coreId);
+ }
+ }
+}
+
+t_cm_error cm_EEM_ForceWakeup(t_nmf_core_id coreId)
+{
+ if(eeState[coreId].nbOfForceWakeup++ == 0)
+ {
+ t_cm_error error;
+
+ LOG_INTERNAL(2, "ARM: Try to wake up\n", 0, 0, 0, 0, 0, 0);
+
+ if (cm_DSP_GetState(coreId)->state != MPC_STATE_BOOTED)
+ {
+ return CM_MPC_NOT_RESPONDING;
+ }
+ else if ((error = cm_COMP_ULPForceWakeup(coreId)) != CM_OK)
+ {
+ if (error == CM_MPC_NOT_RESPONDING) {
+ if(cm_DSP_GetState(coreId)->state == MPC_STATE_PANIC)
+ /* Don't print error which has been done by Panic handling */;
+ else
+ {
+ ERROR("CM_MPC_NOT_RESPONDING: DSP %s can't be wakeup'ed\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ cm_DSP_SetStatePanic(coreId);
+ }
+ }
+ return error;
+ }
+ }
+ return CM_OK;
+}
+
+void cm_EEM_AllowSleep(t_nmf_core_id coreId)
+{
+ if(--eeState[coreId].nbOfForceWakeup == 0)
+ {
+ LOG_INTERNAL(2, "ARM: Allow sleep\n", 0, 0, 0, 0, 0, 0);
+
+ if (cm_DSP_GetState(coreId)->state != MPC_STATE_BOOTED)
+ {
+ }
+ else if (cm_COMP_ULPAllowSleep(coreId) != CM_OK)
+ {
+ ERROR("CM_MPC_NOT_RESPONDING: DSP %s can't be allow sleep'ed\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ }
+ }
+}
+
+/* internal api */
+t_cm_error cm_EEM_allocPanicArea(t_nmf_core_id coreId, t_cm_domain_id domainId)
+{
+ t_cm_error error = CM_OK;
+
+ eeState[coreId].panicArea.handle = cm_DM_Alloc(cm_DSP_GetState(coreId)->domainEE, SDRAM_EXT24, 45 /* 42 registers, pc, 2 magic words */,CM_MM_ALIGN_WORD, TRUE);
+ if (eeState[coreId].panicArea.handle == INVALID_MEMORY_HANDLE)
+ error = CM_NO_MORE_MEMORY;
+ else {
+ t_uint32 mmdspAddr;
+
+ eeState[coreId].panicArea.addr = cm_DSP_GetHostLogicalAddress(eeState[coreId].panicArea.handle);
+ cm_DSP_GetDspAddress(eeState[coreId].panicArea.handle, &mmdspAddr);
+
+ cm_writeAttribute(eeState[coreId].instance, "rtos/commonpart/panicDataAddr", mmdspAddr);
+ }
+
+ return error;
+}
+
+void cm_EEM_freePanicArea(t_nmf_core_id coreId)
+{
+ eeState[coreId].panicArea.addr = 0;
+ cm_DM_Free(eeState[coreId].panicArea.handle, TRUE);
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h
new file mode 100644
index 00000000000..340301a9259
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/chunk_mgr.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef CHUNK_MGR_H_
+#define CHUNK_MGR_H_
+
+#include <cm/engine/memory/inc/remote_allocator.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+t_cm_error allocChunkPool(void);
+t_cm_error fillChunkPool(void);
+void freeChunkPool(void);
+
+/***************************************************************************/
+/*
+ * allocChunk
+ * param current : Pointer on chunck to free
+ *
+ * Add a chunk in the chunck list
+ *
+ */
+/***************************************************************************/
+t_cm_chunk* allocChunk(void);
+
+/***************************************************************************/
+/*
+ * freeChunk
+ * param current : Pointer on chunck to free
+ *
+ * Remove a chunk in the chunck list
+ *
+ */
+/***************************************************************************/
+void freeChunk(t_cm_chunk *chunk);
+
+#endif /*CHUNK_MGR_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h
new file mode 100644
index 00000000000..c9b39956c63
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/***************************************************************************/
+/* file : domain.h
+ * author : NMF team
+ * version : 1.0
+ *
+ * brief : NMF domain definitions
+ */
+/***************************************************************************/
+
+#ifndef DOMAIN_H_
+#define DOMAIN_H_
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/dsp/inc/dsp.h>
+
+/* These default domains are used for singleton only ! */
+#define DEFAULT_SVA_DOMAIN (t_cm_domain_id)1
+#define DEFAULT_SIA_DOMAIN (t_cm_domain_id)2
+
+/*!
+ * \brief Domain type.
+ * \internal
+ * \ingroup CM_DOMAIN_API
+ */
+typedef enum {
+ DOMAIN_ANY = 0,
+ DOMAIN_NORMAL,
+ DOMAIN_SCRATCH_PARENT,
+ DOMAIN_SCRATCH_CHILD
+} t_cm_domain_type;
+
+/*!
+ * \brief Domain descriptor. Holds offsets for all memory types present in the system.
+ * \internal
+ * \ingroup CM_DOMAIN_API
+ */
+typedef struct {
+ t_cm_domain_memory domain; // the actual memory ranges
+ t_cm_domain_type type; // domain type
+ t_uint32 refcount; // reference counter for scratch domain dependencies
+ t_nmf_client_id client; // client id for cleaning
+
+ union {
+ struct {
+ t_memory_handle handle; // memory handle of the allocated chunk the covers the esram-data scratch region
+ } parent;
+ struct {
+ t_cm_allocator_desc *alloc; //allocator descriptor for the scratch domain
+ t_cm_domain_id parent_ref; //parent domain reference
+ } child;
+ } scratch;
+ void *dbgCooky; //pointer to OS internal data
+} t_cm_domain_desc;
+
+#ifdef DEBUG
+#define DOMAIN_DEBUG(handle) \
+ handle = handle & ~0xc0;
+#else
+#define DOMAIN_DEBUG(handle)
+#endif
+
+/*!
+ * \brief Domain descriptor array.
+ */
+extern t_cm_domain_desc domainDesc[];
+
+typedef struct {
+ t_cm_domain_id parentId;
+ t_cm_domain_id domainId;
+ t_cm_allocator_desc *allocDesc;
+} t_cm_domain_scratch_desc;
+
+extern t_cm_domain_scratch_desc domainScratchDesc[];
+
+typedef struct {
+ t_cm_system_address sdramCode;
+ t_cm_system_address sdramData;
+ t_cm_system_address esramCode;
+ t_cm_system_address esramData;
+} t_cm_domain_info;
+
+/*!
+ * \brief Init of the domain subsystem.
+ */
+PUBLIC t_cm_error cm_DM_Init(void);
+
+/*!
+ * \brief Clean-up of the domain subsystem.
+ */
+PUBLIC void cm_DM_Destroy(void);
+
+/*!
+ * \brief Domain creation.
+ *
+ * Allocates in slot in the domain descriptors array and copies segment infos from the domain
+ * parameter to the descriptor. The resulting handle is returned via @param handle.
+ *
+ * Returns: CM_DOMAIN_INVALID in case of error, otherwise CM_OK.
+ */
+PUBLIC t_cm_error cm_DM_CreateDomain(const t_nmf_client_id client, const t_cm_domain_memory *domain, t_cm_domain_id *handle);
+
+/*!
+ * \brief Scratch (or overlap) domain creation.
+ *
+ * Create a scratch domain, ie domain where allocation may overlap.
+ */
+PUBLIC t_cm_error cm_DM_CreateDomainScratch(const t_nmf_client_id client, const t_cm_domain_id parentId, const t_cm_domain_memory *domain, t_cm_domain_id *handle);
+
+/* !
+ * \brief Retrieve the coreId from a given domain. Utility.
+ */
+PUBLIC t_nmf_core_id cm_DM_GetDomainCoreId(const t_cm_domain_id domainId);
+
+/*!
+ * \brief Destroy all domains belonging to a given client.
+ */
+PUBLIC t_cm_error cm_DM_DestroyDomains(const t_nmf_client_id client);
+
+/*!
+ * \brief Destroy a given domain.
+ */
+PUBLIC t_cm_error cm_DM_DestroyDomain(t_cm_domain_id handle);
+
+/*!
+ * \brief Check if the handle is valid.
+ */
+PUBLIC t_cm_error cm_DM_CheckDomain(t_cm_domain_id handle, t_cm_domain_type type);
+PUBLIC t_cm_error cm_DM_CheckDomainWithClient(t_cm_domain_id handle, t_cm_domain_type type, t_nmf_client_id client);
+
+/*!
+ * \brief Memory allocation in a given domain, for a given memory type (see CM_AllocMpcMemory).
+ */
+PUBLIC t_memory_handle cm_DM_Alloc(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_uint32 size, t_cm_mpc_memory_alignment memAlignment, t_bool powerOn);
+
+/*!
+ * \brief Memory free using a given domain handle
+ */
+PUBLIC void cm_DM_FreeWithInfo(t_memory_handle memHandle, t_nmf_core_id *coreId, t_dsp_memory_type_id *memType, t_bool powerOff);
+
+/*!
+ * \brief Memory free using a given domain handle
+ */
+PUBLIC void cm_DM_Free(t_memory_handle memHandle, t_bool powerOff);
+
+/*!
+ * \brief Wrapper function for CM_GetMpcMemoryStatus.
+ */
+PUBLIC t_cm_error cm_DM_GetAllocatorStatus(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_cm_allocator_status *pStatus);
+
+PUBLIC t_cm_error cm_DM_GetDomainAbsAdresses(t_cm_domain_id domainId, t_cm_domain_info *info);
+
+/*!
+ * \brief Change the domain for the given allocated chunk
+ */
+PUBLIC void cm_DM_SetDefaultDomain(t_memory_handle memHandle, t_nmf_core_id coreId);
+#endif /* DOMAIN_H_ */
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h
new file mode 100644
index 00000000000..354315d4d72
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/domain_type.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/***************************************************************************/
+/* file : domain.h
+ * author : NMF team
+ * version : 1.0
+ *
+ * brief : NMF domain definitions
+ */
+/***************************************************************************/
+
+#ifndef DOMAIN_TYPE_H_
+#define DOMAIN_TYPE_H_
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/memory/inc/memory_type.h>
+
+/*!
+ * \brief Domain identifier
+ * \ingroup CM_DOMAIN_API
+ */
+typedef t_uint8 t_cm_domain_id;
+
+/*!
+ * \brief Client identifier
+ * 0 (zero) is considered as an invalid or 'NO' client identifier
+ * \ingroup CM_DOMAIN_API
+ */
+typedef t_uint32 t_nmf_client_id;
+#define NMF_CORE_CLIENT (t_nmf_client_id)-1
+#define NMF_CURRENT_CLIENT (t_nmf_client_id)0
+
+typedef struct {
+ t_uint32 offset; //!< offset relativ to segment start in memory (in bytes)
+ t_uint32 size; //!< size in bytes of the domain segment
+} t_cm_domain_segment;
+
+/*!
+ * \brief Domain memory description structure
+ * \ingroup CM_DOMAIN_API
+ */
+typedef struct {
+ t_nmf_core_id coreId; //!< MMDSP Core Id for this domain (used for TCM-X and TCM-Y at instantiate)
+ t_cm_domain_segment esramCode; //!< ESRAM code segment
+ t_cm_domain_segment esramData; //!< ESRAM data segment
+ t_cm_domain_segment sdramCode; //!< SDRAM code segment
+ t_cm_domain_segment sdramData; //!< SDRAM data segment
+} t_cm_domain_memory;
+
+#define INIT_DOMAIN_SEGMENT {0, 0}
+#define INIT_DOMAIN {MASK_ALL8, INIT_DOMAIN_SEGMENT, INIT_DOMAIN_SEGMENT, INIT_DOMAIN_SEGMENT, INIT_DOMAIN_SEGMENT}
+
+
+#endif /* DOMAIN_TYPE_H_ */
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h
new file mode 100644
index 00000000000..c6d1fb2dfff
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Internal Memory Management API.
+ *
+ * \defgroup MEMORY_INTERNAL Private Memory API.
+ *
+ */
+#ifndef __INC_MEMORY_H
+#define __INC_MEMORY_H
+
+#include <cm/engine/api/control/configuration_engine.h>
+#include <cm/engine/memory/inc/remote_allocator.h>
+
+#endif /* __INC_MEMORY_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h
new file mode 100644
index 00000000000..dbdba4c3d9d
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/memory_type.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Public Component Manager Memory API type.
+ *
+ * This file contains the Component Manager API type for manipulating memory.
+ */
+#ifndef __INC_MEMORY_TYPE_H
+#define __INC_MEMORY_TYPE_H
+
+#include <cm/inc/cm_type.h>
+
+/*!
+ * @defgroup t_cm_mpc_memory_type t_cm_mpc_memory_type
+ * \brief Definition of symbols used to reference the various type of Media Processor Core adressable memory
+ * @{
+ * \ingroup MEMORY
+ */
+typedef t_uint8 t_cm_mpc_memory_type; //!< Fake enumeration type
+#define CM_MM_MPC_TCM16_X ((t_cm_mpc_memory_type)0)
+#define CM_MM_MPC_TCM24_X ((t_cm_mpc_memory_type)1)
+#define CM_MM_MPC_ESRAM16 ((t_cm_mpc_memory_type)2)
+#define CM_MM_MPC_ESRAM24 ((t_cm_mpc_memory_type)3)
+#define CM_MM_MPC_SDRAM16 ((t_cm_mpc_memory_type)4)
+#define CM_MM_MPC_SDRAM24 ((t_cm_mpc_memory_type)5)
+#define CM_MM_MPC_TCM16_Y ((t_cm_mpc_memory_type)6)
+#define CM_MM_MPC_TCM24_Y ((t_cm_mpc_memory_type)7)
+#define CM_MM_MPC_TCM16 CM_MM_MPC_TCM16_X
+#define CM_MM_MPC_TCM24 CM_MM_MPC_TCM24_X
+
+/* @} */
+
+/*!
+ * @defgroup t_cm_memory_alignment t_cm_memory_alignment
+ * \brief Definition of symbols used to constraint the alignment of the allocated memory
+ * @{
+ * \ingroup MEMORY
+ */
+typedef t_uint16 t_cm_memory_alignment; //!< Fake enumeration type
+#define CM_MM_ALIGN_NONE ((t_cm_memory_alignment)0x00000000)
+#define CM_MM_ALIGN_BYTE ((t_cm_memory_alignment)CM_MM_ALIGN_NONE)
+#define CM_MM_ALIGN_HALFWORD ((t_cm_memory_alignment)0x00000001)
+#define CM_MM_ALIGN_WORD ((t_cm_memory_alignment)0x00000003)
+#define CM_MM_ALIGN_2WORDS ((t_cm_memory_alignment)0x00000007)
+#define CM_MM_ALIGN_16BYTES ((t_cm_memory_alignment)0x0000000F)
+#define CM_MM_ALIGN_4WORDS ((t_cm_memory_alignment)0x0000000F)
+#define CM_MM_ALIGN_AHB_BURST ((t_cm_memory_alignment)0x0000000F)
+#define CM_MM_ALIGN_32BYTES ((t_cm_memory_alignment)0x0000001F)
+#define CM_MM_ALIGN_8WORDS ((t_cm_memory_alignment)0x0000001F)
+#define CM_MM_ALIGN_64BYTES ((t_cm_memory_alignment)0x0000003F)
+#define CM_MM_ALIGN_16WORDS ((t_cm_memory_alignment)0x0000003F)
+#define CM_MM_ALIGN_128BYTES ((t_cm_memory_alignment)0x0000007F)
+#define CM_MM_ALIGN_32WORDS ((t_cm_memory_alignment)0x0000007F)
+#define CM_MM_ALIGN_256BYTES ((t_cm_memory_alignment)0x000000FF)
+#define CM_MM_ALIGN_64WORDS ((t_cm_memory_alignment)0x000000FF)
+#define CM_MM_ALIGN_512BYTES ((t_cm_memory_alignment)0x000001FF)
+#define CM_MM_ALIGN_128WORDS ((t_cm_memory_alignment)0x000001FF)
+#define CM_MM_ALIGN_1024BYTES ((t_cm_memory_alignment)0x000003FF)
+#define CM_MM_ALIGN_256WORDS ((t_cm_memory_alignment)0x000003FF)
+#define CM_MM_ALIGN_2048BYTES ((t_cm_memory_alignment)0x000007FF)
+#define CM_MM_ALIGN_512WORDS ((t_cm_memory_alignment)0x000007FF)
+#define CM_MM_ALIGN_4096BYTES ((t_cm_memory_alignment)0x00000FFF)
+#define CM_MM_ALIGN_1024WORDS ((t_cm_memory_alignment)0x00000FFF)
+#define CM_MM_ALIGN_65536BYTES ((t_cm_memory_alignment)0x0000FFFF)
+#define CM_MM_ALIGN_16384WORDS ((t_cm_memory_alignment)0x0000FFFF)
+/* @} */
+
+/*!
+ * @defgroup t_cm_mpc_memory_alignment t_cm_mpc_memory_alignment
+ * \brief Definition of symbols used to constraint the alignment of the allocated mpc memory
+ * @{
+ * \ingroup MEMORY
+ */
+typedef t_uint16 t_cm_mpc_memory_alignment; //!< Fake enumeration type
+#define CM_MM_MPC_ALIGN_NONE ((t_cm_mpc_memory_alignment)0x00000000)
+#define CM_MM_MPC_ALIGN_HALFWORD ((t_cm_mpc_memory_alignment)0x00000001)
+#define CM_MM_MPC_ALIGN_WORD ((t_cm_mpc_memory_alignment)0x00000003)
+#define CM_MM_MPC_ALIGN_2WORDS ((t_cm_mpc_memory_alignment)0x00000007)
+#define CM_MM_MPC_ALIGN_4WORDS ((t_cm_mpc_memory_alignment)0x0000000F)
+#define CM_MM_MPC_ALIGN_8WORDS ((t_cm_mpc_memory_alignment)0x0000001F)
+#define CM_MM_MPC_ALIGN_16WORDS ((t_cm_mpc_memory_alignment)0x0000003F)
+#define CM_MM_MPC_ALIGN_32WORDS ((t_cm_mpc_memory_alignment)0x0000007F)
+#define CM_MM_MPC_ALIGN_64WORDS ((t_cm_mpc_memory_alignment)0x000000FF)
+#define CM_MM_MPC_ALIGN_128WORDS ((t_cm_mpc_memory_alignment)0x000001FF)
+#define CM_MM_MPC_ALIGN_256WORDS ((t_cm_mpc_memory_alignment)0x000003FF)
+#define CM_MM_MPC_ALIGN_512WORDS ((t_cm_mpc_memory_alignment)0x000007FF)
+#define CM_MM_MPC_ALIGN_1024WORDS ((t_cm_mpc_memory_alignment)0x00000FFF)
+#define CM_MM_MPC_ALIGN_65536BYTES ((t_cm_mpc_memory_alignment)0x0000FFFF)
+#define CM_MM_MPC_ALIGN_16384WORDS ((t_cm_mpc_memory_alignment)0x0000FFFF)
+/* @} */
+
+/*!
+ * \brief Identifier of a memory handle
+ * \ingroup MEMORY
+ */
+typedef t_uint32 t_cm_memory_handle;
+
+/*!
+ * \brief Description of a memory segment
+ *
+ * <=> allocable addressable space
+ * \ingroup MEMORY
+ */
+typedef struct {
+ t_cm_system_address systemAddr; //!< Logical AND physical segment start address
+ t_uint32 size; //!< segment size (in bytes)
+} t_nmf_memory_segment;
+#define INIT_MEMORY_SEGMENT {{0, 0}, 0}
+
+/*!
+ * \brief Definition of structure used for an allocator status
+ * \ingroup MEMORY
+ */
+typedef struct
+{
+ struct {
+ t_uint32 size; //!< size of the allocator
+ /* Block counters */
+ t_uint16 used_block_number; //!< used block number
+ t_uint16 free_block_number; //!< free block number
+
+ /* Free memory min/max */
+ t_uint32 maximum_free_size; //!< maximum free size
+ t_uint32 minimum_free_size; //!< minimum free size
+
+ /* Accumulation of free and used memory */
+ t_uint32 accumulate_free_memory; //!< accumulate free memory
+ t_uint32 accumulate_used_memory; //!< accumulate used memory
+ } global;
+
+ struct {
+ t_uint32 size; //!< size of the domain
+ t_uint32 maximum_free_size; //!< maximum free size in the given domain
+ t_uint32 minimum_free_size; //!< minimum free size in the given domain
+ t_uint32 accumulate_free_memory; //all free memory of the given domain
+ t_uint32 accumulate_used_memory; //all used memory of the given domain
+ } domain;
+
+ struct {
+ t_uint32 sizes[3];
+ } stack[NB_CORE_IDS];
+
+} t_cm_allocator_status;
+
+#endif /* __INC_MEMORY_TYPE_H */
+
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h
new file mode 100644
index 00000000000..824d25374b3
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/migration.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Migration API.
+ *
+ * \defgroup
+ *
+ */
+#ifndef __INC_MIGRATION_H
+#define __INC_MIGRATION_H
+
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/dsp/inc/dsp.h>
+
+typedef enum {
+ STATE_MIGRATED = 1,
+ STATE_NORMAL = 0,
+} t_cm_migration_state;
+
+PUBLIC t_cm_error cm_migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst);
+
+PUBLIC t_cm_error cm_unmigrate(void);
+
+PUBLIC t_uint32 cm_migration_translate(t_dsp_segment_type segmentType, t_uint32 addr);
+
+PUBLIC void cm_migration_check_state(t_nmf_core_id coreId, t_cm_migration_state expected);
+
+#endif /* __INC_MIGRATION_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h
new file mode 100644
index 00000000000..36994f37daa
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ *
+ * \note: In this module, we assume that parameters were checked !!
+ */
+#ifndef __REMOTE_ALLOCATOR_H_
+#define __REMOTE_ALLOCATOR_H_
+
+/*
+ * Include
+ */
+#include <cm/inc/cm_type.h>
+#include <cm/engine/memory/inc/memory_type.h>
+
+
+/*
+ * Description of the memory block status
+ */
+typedef enum {
+ MEM_USED = 0, /* Memory block is used */
+ MEM_FREE = 1 /* Memory block is free */
+} t_mem_status;
+
+/*
+ * Chunk structure.
+ */
+struct cm_allocator_desc;
+typedef struct chunk_struct
+{
+ /* Double linked list of chunks */
+ struct chunk_struct *prev;
+ struct chunk_struct *next;
+
+ /* Double linked list of free memory */
+ struct chunk_struct *prev_free_mem;
+ struct chunk_struct *next_free_mem;
+
+ /* Offset of the block memory */
+ t_uint32 offset;
+
+ /* Size of the block memory */
+ t_cm_size size;
+
+ /* Status of the block memory */
+ t_mem_status status;
+
+ /* User data */
+ t_uint16 userData;
+
+ /* Alloc debug info*/
+ t_uint32 domainId;
+
+ /* Alloc desc backlink */
+ struct cm_allocator_desc *alloc;
+} t_cm_chunk;
+
+/*!
+ * \brief Identifier of an internal memory handle
+ * \ingroup MEMORY_INTERNAL
+ */
+typedef t_cm_chunk* t_memory_handle;
+
+#define INVALID_MEMORY_HANDLE ((t_cm_chunk*)NULL)
+
+
+/*
+ * Context structure
+ */
+#define BINS 63
+
+//TODO, juraj, add memType to alloc struct ?
+typedef struct cm_allocator_desc {
+ const char *pAllocName; /* Name of the allocator */
+ t_uint32 maxSize; /* Max size of the allocator -> Potentially increase/decrease by stack management */
+ t_uint32 sbrkSize; /* Current size of allocator */
+ t_uint32 offset; /* Offset of the allocator */
+ t_cm_chunk *chunks; /* Array of chunk */
+ t_cm_chunk *lastChunk; /* Null terminated last chunk of previous array declaration */
+ t_cm_chunk *free_mem_chunks[BINS]; /* List of free memory */
+ struct cm_allocator_desc* next; /* List of allocator */
+} t_cm_allocator_desc;
+
+int bin_index(unsigned int sz);
+
+/*
+ * Functions
+ */
+/*!
+ * \brief Create a new allocator for a piece of memory (hw mapped (xram, yram))
+ * Any further allocation into this piece of memory will return an offset inside it.
+ * (a constant offset value can be added to this offset)
+ *
+ * \retval t_cm_allocator_desc* new memory allocator identifier
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_cm_allocator_desc* cm_MM_CreateAllocator(
+ t_cm_size size, //!< [in] Size of the addressable space in bytes
+ t_uint32 offset, //!< [in] Constant offset to add to each allocated block base address
+ const char* name //!< [in] Name of the allocator
+ );
+
+/*!
+ * \brief Free a memory allocator descriptor
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_cm_error cm_MM_DeleteAllocator(
+ t_cm_allocator_desc* alloc //!< [in] Identifier of the memory allocator to be freed
+ );
+
+
+/*!
+ * \brief Resize an allocator to the size value.
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_cm_error cm_MM_ResizeAllocator(
+ t_cm_allocator_desc* alloc, //!< [in] Identifier of the memory allocator used to allocate the piece of memory
+ t_cm_size size //!< [in] Size of the addressable space in allocDesc granularity
+ );
+
+/*!
+ * \brief Check validity of a user handle
+ */
+t_cm_error cm_MM_getValidMemoryHandle(t_cm_memory_handle handle, t_memory_handle* validHandle);
+
+/*!
+ * \brief Wrapper routine to allocate some memory into a given allocator
+ *
+ * \retval t_memory_handle handle on the new allocated piece of memory
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_memory_handle cm_MM_Alloc(
+ t_cm_allocator_desc* alloc, //!< [in] Identifier of the memory allocator
+ t_cm_size size, //!< [in] Size of the addressable space
+ t_cm_memory_alignment memAlignment, //!< [in] Alignment constraint
+ t_uint32 seg_offset, //!< [in] Offset of range where allocating
+ t_uint32 seg_size, //!< [in] Size of range where allocating
+ t_uint32 domainId
+ );
+
+
+/*!
+ * \brief Routine to reallocate memory for a given handle
+ *
+ * Routine to reallocate memory for a given handle. The chunk can be extended or shrinked in both
+ * directions - top and bottom, depending on the offset and size arguments.
+ *
+ * \retval t_memory_handle handle on the reallocated piece of memory
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_cm_error cm_MM_Realloc(
+ t_cm_allocator_desc* alloc,
+ const t_cm_size size,
+ const t_uint32 offset,
+ t_memory_handle *handle);
+/*!
+ * \brief Frees the allocated chunk
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC void cm_MM_Free(
+ t_cm_allocator_desc* alloc, //!< [in] Identifier of the memory allocator
+ t_memory_handle memHandle //!< [in] Memory handle to free
+ );
+
+
+/*!
+ * \brief Get the allocator status
+ *
+ * \param[in] alloc Identifier of the memory allocator
+ * \param[out] pStatus Status of the allocator
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_cm_error cm_MM_GetAllocatorStatus(t_cm_allocator_desc* alloc, t_uint32 offset, t_uint32 size, t_cm_allocator_status *pStatus);
+
+/*!
+ * \brief Returns the offset into a given memory allocator of an allocated piece of memory
+ *
+ * \param[in] memHandle handle on the given memory
+ *
+ * \retval t_uint32 offset into the given memory allocator
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_uint32 cm_MM_GetOffset(t_memory_handle memHandle);
+
+
+/*!
+ * \brief Returns the size in word size for a given memory allocator of an allocated piece of memory
+ *
+ * \param[in] memHandle handle on the given memory
+ *
+ * \retval t_uint32 size in wordsize for the given memory allocator
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_uint32 cm_MM_GetSize(t_memory_handle memHandle);
+
+/*!
+ * \brief Returns the size in bytes for a given memory allocator
+ *
+ * \param[in] allocDesc Identifier of the memory allocator
+ * \retval size
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC t_uint32 cm_MM_GetAllocatorSize(t_cm_allocator_desc* allocDesc);
+
+
+/*!
+ * \brief Set the user data of an allocated piece of memory
+ *
+ * \param[in] memHandle handle on the given memory
+ * \param[in] userData UsedData of the given memory piece
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC void cm_MM_SetMemoryHandleUserData (t_memory_handle memHandle, t_uint16 userData);
+
+
+/*!
+ * \brief Return the user data of an allocated piece of memory
+ *
+ * \param[in] memHandle handle on the given memory
+ * \param[out] pUserData returned UsedData of the given memory piece
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC void cm_MM_GetMemoryHandleUserData(t_memory_handle memHandle, t_uint16 *pUserData, t_cm_allocator_desc **alloc);
+
+/*!
+ * \brief Dump chunkd in the range of [start:end]
+ *
+ * \param[in] alloc Allocator descriptor
+ * \param[in] start Range start
+ * \param[in] end Range end
+ *
+ * \retval void
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC void cm_MM_DumpMemory(t_cm_allocator_desc* alloc, t_uint32 start, t_uint32 end);
+
+/*!
+ * \brief Change the domain for the given chunk of memory
+ *
+ * \param[in] memHandle The given chunk of memory
+ * \param[in] domainId The new domain id to set
+ *
+ * \retval void
+ *
+ * \ingroup MEMORY_INTERNAL
+ */
+PUBLIC void cm_MM_SetDefaultDomain(t_memory_handle memHandle, t_uint32 domainId);
+#endif /* _REMOTE_ALLOCATOR_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h
new file mode 100644
index 00000000000..0a7c901187b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/inc/remote_allocator_utils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef REMOTE_ALLOCATOR_UTILS_H_
+#define REMOTE_ALLOCATOR_UTILS_H_
+
+#include <cm/engine/memory/inc/remote_allocator.h>
+#include <cm/engine/memory/inc/chunk_mgr.h>
+
+typedef enum {
+ FREE_CHUNK_BEFORE,
+ FREE_CHUNK_AFTER,
+} t_mem_split_position;
+
+
+PUBLIC void updateFreeList(t_cm_allocator_desc* alloc, t_cm_chunk* chunk);
+
+PUBLIC void linkChunk(t_cm_allocator_desc* alloc, t_cm_chunk* prev,t_cm_chunk* add);
+PUBLIC void unlinkChunk(t_cm_allocator_desc* alloc,t_cm_chunk* current);
+
+PUBLIC void unlinkFreeMem(t_cm_allocator_desc* alloc,t_cm_chunk* current);
+PUBLIC void linkFreeMemBefore(t_cm_chunk* add, t_cm_chunk* next);
+PUBLIC void linkFreeMemAfter(t_cm_chunk* prev,t_cm_chunk* add);
+
+PUBLIC t_cm_chunk* splitChunk(t_cm_allocator_desc* alloc, t_cm_chunk *chunk, t_uint32 offset, t_mem_split_position position);
+
+#endif /*REMOTE_ALLOCATOR_UTILS_H_*/
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c b/drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c
new file mode 100644
index 00000000000..78a549a74ce
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/chunk_mgr.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ * Include
+ */
+#include <cm/inc/cm_type.h>
+#include "../inc/chunk_mgr.h"
+#include <cm/engine/trace/inc/trace.h>
+
+#define CHUNKS_PER_PAGE 500
+#define CHUNK_THRESOLD 5
+
+struct t_page_chuncks {
+ struct t_page_chuncks *nextPage;
+ // unsigned int freeChunkInPage;
+ t_cm_chunk chunks[CHUNKS_PER_PAGE];
+};
+
+static struct t_page_chuncks *firstPage = 0;
+
+static unsigned int freeChunks = 0;
+static t_cm_chunk *firstFreeChunk = 0;
+
+t_cm_chunk* allocChunk()
+{
+ t_cm_chunk* chunk = firstFreeChunk;
+
+ firstFreeChunk = chunk->next;
+
+ chunk->next_free_mem = 0;
+ chunk->prev_free_mem = 0;
+ chunk->prev = 0;
+ chunk->next = 0;
+ chunk->status = MEM_FREE;
+ // chunk->offset = 0;
+ // chunk->size = 0;
+ // chunk->alloc = 0;
+ // chunk->userData = 0;
+
+ freeChunks--;
+
+ return chunk;
+}
+
+void freeChunk(t_cm_chunk* chunk)
+{
+ // Link chunk in free list
+ chunk->next = firstFreeChunk;
+ firstFreeChunk = chunk;
+
+ // Increase counter
+ freeChunks++;
+}
+
+t_cm_error allocChunkPool(void)
+{
+ struct t_page_chuncks* newPage;
+ int i;
+
+ newPage = (struct t_page_chuncks*)OSAL_Alloc(sizeof(struct t_page_chuncks));
+ if(newPage == NULL)
+ return CM_NO_MORE_MEMORY;
+
+ // Link page
+ newPage->nextPage = firstPage;
+ firstPage = newPage;
+
+ // Put chunk in free list
+ for(i = 0; i < CHUNKS_PER_PAGE; i++)
+ freeChunk(&newPage->chunks[i]);
+
+ return CM_OK;
+}
+
+t_cm_error fillChunkPool(void)
+{
+ if(freeChunks < CHUNK_THRESOLD)
+ return allocChunkPool();
+
+ return CM_OK;
+}
+
+void freeChunkPool(void)
+{
+ while(firstPage != NULL)
+ {
+ struct t_page_chuncks* tofree = firstPage;
+ firstPage = firstPage->nextPage;
+ OSAL_Free(tofree);
+ }
+
+ firstFreeChunk = 0;
+ freeChunks = 0;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/domain.c b/drivers/staging/nmf-cm/cm/engine/memory/src/domain.c
new file mode 100644
index 00000000000..a605cc475a9
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/domain.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/inc/cm_type.h>
+#include <inc/nmf-limits.h>
+
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/memory/inc/migration.h>
+#include <cm/engine/memory/inc/chunk_mgr.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/power_mgt/inc/power.h>
+#include <cm/engine/trace/inc/trace.h>
+
+/*
+ * domain_memory structure is all we need
+ */
+#define MAX_USER_DOMAIN_NB 64
+#define MAX_SCRATCH_DOMAIN_NB 16
+
+t_cm_domain_desc domainDesc[MAX_USER_DOMAIN_NB];
+t_cm_domain_scratch_desc domainScratchDesc[MAX_SCRATCH_DOMAIN_NB];
+
+static t_cm_allocator_desc *cm_DM_getAllocator(t_cm_domain_id domainId, t_dsp_memory_type_id memType);
+static void cm_DM_DomainError(const t_cm_domain_id parentId, const t_nmf_client_id client);
+
+#define INIT_DOMAIN_STRUCT(domainDesc) do { \
+ domainDesc.client = 0; \
+ domainDesc.type = DOMAIN_NORMAL; \
+ domainDesc.refcount = 0; \
+ domainDesc.domain.coreId = MASK_ALL8; \
+ domainDesc.domain.esramCode.offset = 0; \
+ domainDesc.domain.esramCode.size = 0; \
+ domainDesc.domain.esramData.offset = 0; \
+ domainDesc.domain.esramData.size = 0; \
+ domainDesc.domain.sdramCode.offset = 0; \
+ domainDesc.domain.sdramCode.size = 0; \
+ domainDesc.domain.sdramData.offset = 0; \
+ domainDesc.domain.sdramData.size = 0; \
+ domainDesc.scratch.parent.handle = 0; \
+ domainDesc.scratch.child.alloc = 0; \
+ domainDesc.scratch.child.parent_ref = 0; \
+ domainDesc.dbgCooky = NULL; \
+ } while (0)
+
+#define FIND_DOMAIN_ID(domainId) \
+ { \
+ domainId = 0; \
+ while (domainDesc[domainId].client != 0 && domainId < MAX_USER_DOMAIN_NB) { \
+ domainId++; \
+ } \
+ if (domainId >= MAX_USER_DOMAIN_NB) { \
+ return CM_INTERNAL_DOMAIN_OVERFLOW; \
+ } \
+ }
+
+#define FIND_SCRATCH_DOMAIN_ID(domainId) \
+ { \
+ domainId = 0; \
+ while (domainScratchDesc[domainId].allocDesc != 0 && domainId < MAX_SCRATCH_DOMAIN_NB) { \
+ domainId++; \
+ } \
+ if (domainId >= MAX_SCRATCH_DOMAIN_NB) { \
+ return CM_INTERNAL_DOMAIN_OVERFLOW; \
+ } \
+ }
+
+PUBLIC t_cm_error cm_DM_CheckDomain(t_cm_domain_id handle, t_cm_domain_type type)
+{
+ if ((handle <= 3)
+ || (handle >= MAX_USER_DOMAIN_NB)) { //remember, domain[0-3] are reserved
+ return CM_INVALID_DOMAIN_HANDLE;
+ }
+
+ if (domainDesc[handle].client == 0) {
+ return CM_INVALID_DOMAIN_HANDLE;
+ }
+
+ if (type != DOMAIN_ANY) {
+ if (domainDesc[handle].type != type) {
+ return CM_INVALID_DOMAIN_HANDLE;
+ }
+ }
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_DM_CheckDomainWithClient(t_cm_domain_id handle, t_cm_domain_type type, t_nmf_client_id client)
+{
+ t_cm_error error;
+
+ if((error = cm_DM_CheckDomain(handle, type)) != CM_OK)
+ return error;
+
+#ifdef CHECK_TO_BE_REACTIVATED_IN_2_11
+ if(domainDesc[handle].client != client)
+ {
+ ERROR("CM_DOMAIN_VIOLATION: domain %d created by client %d not usable by client %d.", handle, domainDesc[handle].client, client, 0, 0, 0);
+ return CM_DOMAIN_VIOLATION;
+ }
+#endif
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_DM_Init(void)
+{
+ t_cm_error error;
+
+ int i = 0;
+ for(i = 0; i < MAX_USER_DOMAIN_NB; i++) {
+ INIT_DOMAIN_STRUCT(domainDesc[i]);
+ }
+
+ //domains[0-3] are reserved - allows to catch some cases of incorrect usage,
+ //especially when user uses coreId instead of domainId, ie id = 1, 2, 3
+ domainDesc[0].client = NMF_CORE_CLIENT;
+ domainDesc[1].client = NMF_CORE_CLIENT;
+ domainDesc[2].client = NMF_CORE_CLIENT;
+ domainDesc[3].client = NMF_CORE_CLIENT;
+
+ /* We use domain 1 and 2 for the singleton, only used for components structure */
+ domainDesc[DEFAULT_SVA_DOMAIN].type = DOMAIN_NORMAL;
+ domainDesc[DEFAULT_SVA_DOMAIN].domain.coreId= SVA_CORE_ID;
+ domainDesc[DEFAULT_SVA_DOMAIN].domain.esramCode.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SVA_DOMAIN].domain.esramData.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SVA_DOMAIN].domain.sdramCode.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SVA_DOMAIN].domain.sdramData.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SIA_DOMAIN].type = DOMAIN_NORMAL;
+ domainDesc[DEFAULT_SIA_DOMAIN].domain.coreId= SIA_CORE_ID;
+ domainDesc[DEFAULT_SIA_DOMAIN].domain.esramCode.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SIA_DOMAIN].domain.esramData.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SIA_DOMAIN].domain.sdramCode.size = (t_uint32)-1;
+ domainDesc[DEFAULT_SIA_DOMAIN].domain.sdramData.size = (t_uint32)-1;
+
+ for(i = 0; i < MAX_SCRATCH_DOMAIN_NB; i++) {
+ domainScratchDesc[i].domainId = 0;
+ domainScratchDesc[i].parentId = 0;
+ domainScratchDesc[i].allocDesc = 0;
+ }
+
+ // Alloc twice for having comfortable chunk
+ if((error = allocChunkPool()) != CM_OK)
+ return error;
+ if((error = allocChunkPool()) != CM_OK)
+ {
+ freeChunkPool();
+ return error;
+ }
+
+ return CM_OK;
+}
+
+PUBLIC void cm_DM_Destroy(void)
+{
+ //cm_DM_Init();
+ freeChunkPool();
+}
+
+PUBLIC t_nmf_core_id cm_DM_GetDomainCoreId(const t_cm_domain_id domainId)
+{
+
+ return domainDesc[domainId].domain.coreId;
+}
+
+#if 0
+static t_uint32 cm_DM_isSegmentOverlaping(const t_cm_domain_segment *d0, const t_cm_domain_segment *d1)
+{
+ t_uint32 min0 = d0->offset;
+ t_uint32 max0 = d0->offset + d0->size;
+ t_uint32 min1 = d1->offset;
+ t_uint32 max1 = d1->offset + d1->size;
+
+ if ( (min0 < min1) && (min1 < max0) ){ /* min0 < min1 < max0 OR min1 in [min0:max0] */
+ return 1;
+ }
+ if ( (min1 < min0) && (min0 <= max1) ){ /* min1 < min0 < max0 OR min0 in [min1:max1] */
+ return 1;
+ }
+
+ return 0;
+}
+{
+ ...
+
+ t_uint32 i;
+ //check non-overlapp with other domains
+ for (i = 0; i < MAX_USER_DOMAIN_NB; i++) {
+ if (domainDesc[i].client != 0) {
+ if (cm_DM_isSegmentOverlaping(&domainDesc[i].domain.esramData, &domain->esramData)) {
+ return CM_DOMAIN_OVERLAP;
+ }
+ /*
+ if (cm_DM_isSegmentOverlaping(&domainDesc[i].domain.esramData, &domain->esramData)) {
+ return CM_DOMAIN_OVERLAP;
+ }
+ */
+ }
+ }
+
+ ...
+}
+#endif
+
+PUBLIC t_cm_error cm_DM_CreateDomain(const t_nmf_client_id client, const t_cm_domain_memory *domain, t_cm_domain_id *handle)
+{
+ t_cm_domain_id domainId;
+ FIND_DOMAIN_ID(domainId);
+
+ if (client == 0)
+ return CM_INVALID_PARAMETER;
+
+ if (domain->coreId > LAST_CORE_ID)
+ return CM_INVALID_DOMAIN_DEFINITION;
+
+ //FIXME, juraj, check invalid domain definition
+ domainDesc[domainId].client = client;
+ domainDesc[domainId].domain = *domain;
+
+ if (osal_debug_ops.domain_create)
+ osal_debug_ops.domain_create(domainId);
+
+ *handle = domainId;
+
+ return CM_OK;
+}
+
+//TODO, juraj, add assert to cm_MM_GetOffset(), if domain is scratch parent
+PUBLIC t_cm_error cm_DM_CreateDomainScratch(const t_nmf_client_id client, const t_cm_domain_id parentId, const t_cm_domain_memory *domain, t_cm_domain_id *handle)
+{
+ t_cm_error error;
+ t_memory_handle memhandle;
+ t_cm_allocator_desc *alloc;
+ t_uint32 parentMin, parentMax;
+ t_uint32 scratchMin, scratchMax;
+
+ /* check if the parent domain exists */
+ /* parent could be DOMAIN_NORMAL (1st call) or DOMAIN_SCRATCH_PARENT (other calls) */
+ if ((error = cm_DM_CheckDomain(parentId, DOMAIN_ANY)) != CM_OK) {
+ return error;
+ }
+
+ parentMin = domainDesc[parentId].domain.esramData.offset;
+ parentMax = domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.size;
+ scratchMin = domain->esramData.offset;
+ scratchMax = domain->esramData.offset + domain->esramData.size;
+ /* check if the scratch domain respects the parent domain (esram data only )*/
+ if ( (parentMin > scratchMin) || (parentMax < scratchMax) ) {
+ return CM_INVALID_DOMAIN_DEFINITION;
+ }
+
+ /* create the scratch domain */
+ if ((error = cm_DM_CreateDomain(client, domain, handle)) != CM_OK) {
+ return error;
+ }
+
+ /* check if this is the first scratch domain */
+ if (domainDesc[parentId].scratch.parent.handle == 0) {
+ /* 1st scratch domain */
+ t_cm_domain_segment tmp;
+
+ /* reserve the zone for the scratch domain */
+ tmp = domainDesc[parentId].domain.esramData;
+ domainDesc[parentId].domain.esramData = domain->esramData;
+ memhandle = cm_DM_Alloc(parentId, ESRAM_EXT16, domain->esramData.size / 2, CM_MM_ALIGN_NONE, FALSE); //note byte to 16bit-word conversion
+ domainDesc[parentId].domain.esramData = tmp;
+ if (memhandle == 0) {
+ cm_DM_DestroyDomain(*handle);
+ cm_DM_DomainError(parentId, client);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ domainDesc[parentId].type = DOMAIN_SCRATCH_PARENT;
+ domainDesc[parentId].refcount = 0; //reinit the refcount
+ domainDesc[parentId].scratch.parent.handle = memhandle;
+
+ } else {
+ /* nth scratch domain */
+ t_uint32 i;
+ t_uint32 oldMin = domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.offset;
+ t_uint32 oldMax = 0;
+
+ /* compute the new scratch zone size */
+ for(i = 0; i < MAX_USER_DOMAIN_NB; i++) {
+ if ((domainDesc[i].type == DOMAIN_SCRATCH_CHILD) && (domainDesc[i].scratch.child.parent_ref == parentId)) {
+ /* ok, here we have a scratch domain created from the same child domain */
+ t_uint32 min = domainDesc[i].domain.esramData.offset;
+ t_uint32 max = domainDesc[i].domain.esramData.offset + domainDesc[i].domain.esramData.size;
+
+ oldMin = (min < oldMin)?min:oldMin;
+ oldMax = (max > oldMax)?max:oldMax;
+ }
+ }
+
+ /* resize the scratch zone */
+ if ((oldMin > scratchMin) || (oldMax < scratchMax)) {
+ t_uint32 newMin = (oldMin > scratchMin)?scratchMin:oldMin;
+ t_uint32 newMax = (oldMax < scratchMax)?scratchMax:oldMax;
+
+ if(cm_MM_Realloc(cm_DM_getAllocator(parentId, ESRAM_EXT16), newMax - newMin, newMin,
+ &domainDesc[parentId].scratch.parent.handle) != CM_OK)
+ {
+ /* failed to extend the zone */
+ cm_DM_DestroyDomain(*handle);
+ cm_DM_DomainError(parentId, client);
+ return CM_NO_MORE_MEMORY;
+ }
+ }
+ }
+
+ /* create esram-data allocator in the scratch domain */
+ alloc = cm_MM_CreateAllocator(domainDesc[*handle].domain.esramData.size,
+ domainDesc[*handle].domain.esramData.offset,
+ "scratch");
+
+ domainDesc[*handle].type = DOMAIN_SCRATCH_CHILD;
+ domainDesc[*handle].scratch.child.parent_ref = parentId;
+ domainDesc[*handle].scratch.child.alloc = alloc;
+ domainDesc[parentId].refcount++;
+
+ return error;
+}
+
+PUBLIC t_cm_error cm_DM_DestroyDomains(const t_nmf_client_id client)
+{
+ t_cm_domain_id handle;
+ t_cm_error error, status=CM_OK;
+
+ for (handle=0; handle<MAX_USER_DOMAIN_NB; handle++) {
+ if ((domainDesc[handle].client == client)
+ && ((error=cm_DM_DestroyDomain(handle)) != CM_OK)) {
+ LOG_INTERNAL(0, "Error (%d) destroying remaining domainId %d for client %u\n", error, handle, client, 0, 0, 0);
+ status = error;
+ }
+ }
+ return status;
+}
+
+PUBLIC t_cm_error cm_DM_DestroyDomain(t_cm_domain_id handle)
+{
+ t_cm_error error = CM_OK;
+ t_uint32 i;
+
+ if ((error = cm_DM_CheckDomain(handle, DOMAIN_ANY)) != CM_OK) {
+ return error;
+ }
+
+ //forbid destruction of cm domains
+ //if (handle == cm_DSP_GetState(domainDesc[handle].domain.coreId)->domainEE)
+ // return CM_INVALID_DOMAIN_HANDLE;
+
+ /* loop all components and check if there are still components instantiated with this handle */
+ //actually this check is redundant with the usage counters as component instantiations allocate memory
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ if (NULL != componentEntry(i) && componentEntry(i)->domainId == handle) {
+ return CM_ILLEGAL_DOMAIN_OPERATION;
+ }
+ }
+
+ //perform check based on usage counters
+ if (domainDesc[handle].refcount != 0) {
+ return CM_ILLEGAL_DOMAIN_OPERATION;
+ }
+
+ if (domainDesc[handle].type == DOMAIN_SCRATCH_PARENT) {
+ return CM_ILLEGAL_DOMAIN_OPERATION; //parent destroyed implicitly with the last scratch
+ } else if (domainDesc[handle].type == DOMAIN_SCRATCH_CHILD) {
+ t_cm_allocator_status status;
+ t_cm_domain_id parentId = domainDesc[handle].scratch.child.parent_ref;
+
+ cm_MM_GetAllocatorStatus(domainDesc[handle].scratch.child.alloc, 0, 0xffff, &status);
+ if (status.global.accumulate_used_memory != 0) {
+ //something is still allocated
+ return CM_ILLEGAL_DOMAIN_OPERATION;
+ }
+
+ domainDesc[parentId].refcount--;
+ cm_MM_DeleteAllocator(domainDesc[handle].scratch.child.alloc); //returns no error
+
+ if (domainDesc[parentId].refcount == 0) {
+ /* last scratch domain */
+ cm_DM_Free(domainDesc[parentId].scratch.parent.handle, FALSE);
+ domainDesc[parentId].scratch.parent.handle = 0;
+ domainDesc[parentId].type = DOMAIN_NORMAL;
+ } else {
+ /* other child scratch domains exist, check if the reserved zone needs resize, ie reduce */
+
+ t_uint32 i;
+ /* init oldMin and oldMax to values we are sure will get overwritten below */
+ t_uint32 oldMin = 0xffffffff;
+ t_uint32 oldMax = 0x0;
+ t_uint32 scratchMin = domainDesc[handle].domain.esramData.offset;
+ t_uint32 scratchMax = domainDesc[handle].domain.esramData.offset + domainDesc[handle].domain.esramData.size;
+
+ /* compute the remaining reserved zone size */
+ for(i = 0; i < MAX_USER_DOMAIN_NB; i++) {
+ if (i == handle)
+ continue; //do not consider the current domain to be destroyed later in this function
+ if ((domainDesc[i].type == DOMAIN_SCRATCH_CHILD) && (domainDesc[i].scratch.child.parent_ref == parentId)) {
+ /* ok, here we have a scratch domain created from the same child domain */
+ t_uint32 min = domainDesc[i].domain.esramData.offset;
+ t_uint32 max = domainDesc[i].domain.esramData.offset + domainDesc[i].domain.esramData.size;
+
+ oldMin = (min < oldMin)?min:oldMin;
+ oldMax = (max > oldMax)?max:oldMax;
+ }
+ }
+
+ /* resize the scratch zone */
+ if ((oldMin > scratchMin) || (oldMax < scratchMax)) {
+ CM_ASSERT(cm_MM_Realloc(cm_DM_getAllocator(parentId, ESRAM_EXT16), oldMax - oldMin, oldMin,
+ &domainDesc[parentId].scratch.parent.handle) == CM_OK); //the realloc shouldn't fail..
+ }
+ }
+ }
+
+ if (osal_debug_ops.domain_destroy)
+ osal_debug_ops.domain_destroy(handle);
+
+ //reset the domain desc
+ INIT_DOMAIN_STRUCT(domainDesc[handle]);
+
+ return CM_OK;
+}
+
+/*
+ * - if the domainId is scratch parent, all allocations are done as in normal domains
+ * - if the domainId is scratch child
+ * if allocation type is esram, retrieve the allocator from the domainDesc
+ * else allocation is done as for normal domain
+ * - if the domainId is normal, allocator is retrieved from mpcDesc via cm_DSP_GetAllocator()
+ */
+static t_cm_allocator_desc *cm_DM_getAllocator(t_cm_domain_id domainId, t_dsp_memory_type_id memType)
+{
+ t_cm_allocator_desc *alloc = 0;
+
+ if ((domainDesc[domainId].type == DOMAIN_SCRATCH_CHILD)
+ && ((memType == ESRAM_EXT16) || (memType == ESRAM_EXT24))) {
+ alloc = domainDesc[domainId].scratch.child.alloc;
+ } else {
+ alloc = cm_DSP_GetAllocator(domainDesc[domainId].domain.coreId, memType);
+ }
+
+ return alloc;
+}
+
+void START(void);
+void END(const char*);
+
+//TODO, juraj, alloc would need to return finer errors then 0
+PUBLIC t_memory_handle cm_DM_Alloc(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_uint32 wordSize, t_cm_mpc_memory_alignment memAlignment, t_bool powerOn)
+{
+ t_nmf_core_id coreId = domainDesc[domainId].domain.coreId;
+ t_memory_handle handle;
+ t_cm_allocator_desc *alloc;
+ t_uint32 offset;
+ t_uint32 size;
+
+ cm_DSP_GetInternalMemoriesInfo(domainId, memType, &offset, &size);
+
+ if ((alloc = cm_DM_getAllocator(domainId, memType)) == 0) {
+ return 0;
+ }
+
+ handle = cm_MM_Alloc(alloc,
+ cm_DSP_ConvertSize(memType, wordSize),
+ (t_cm_memory_alignment) memAlignment,
+ offset, size, domainId);
+
+ if(handle != INVALID_MEMORY_HANDLE)
+ {
+ cm_MM_SetMemoryHandleUserData(handle, (coreId << SHIFT_BYTE1) | (memType << SHIFT_BYTE0));
+
+ if (powerOn) {
+ // [Pwr] The associated power domain can be enabled only after the Alloc request.
+ // Associated MPC memory chunk is not accessed (Remote allocator feature)
+ cm_PWR_EnableMemory(
+ coreId,
+ memType,
+ /*
+ * Compute physical address based on cm_DSP_GetHostSystemAddress but in optimized way
+ * -> See it for information
+ * -> Note TCM memory is not correctly compute, but it's not used
+ */
+ cm_DSP_GetState(coreId)->allocator[memType]->baseAddress.physical + cm_MM_GetOffset(handle),
+ cm_MM_GetSize(handle));
+ }
+ } else {
+ LOG_INTERNAL(0, "CM_NO_MORE_MEMORY domainId: %d, memType %d, wordSize %d, alignement %d\n",
+ domainId, memType, wordSize, memAlignment, 0, 0);
+ cm_MM_DumpMemory(alloc, offset, offset + size);
+ }
+
+ return handle;
+}
+
+PUBLIC void cm_DM_FreeWithInfo(t_memory_handle memHandle, t_nmf_core_id *coreId, t_dsp_memory_type_id *memType, t_bool powerOff)
+{
+ t_dsp_chunk_info chunk_info;
+
+ cm_DSP_GetDspChunkInfo(memHandle, &chunk_info);
+
+ if (powerOff) {
+ cm_PWR_DisableMemory(
+ chunk_info.coreId,
+ chunk_info.memType,
+ cm_DSP_GetPhysicalAdress(memHandle),
+ cm_MM_GetSize(memHandle));
+ }
+
+ cm_MM_Free(chunk_info.alloc, memHandle);
+
+ *coreId = chunk_info.coreId;
+ *memType = chunk_info.memType;
+}
+
+PUBLIC void cm_DM_Free(t_memory_handle memHandle, t_bool powerOff)
+{
+ t_nmf_core_id coreId;
+ t_dsp_memory_type_id memType;
+
+ cm_DM_FreeWithInfo(memHandle, &coreId, &memType, powerOff);
+}
+
+PUBLIC t_cm_error cm_DM_GetAllocatorStatus(t_cm_domain_id domainId, t_dsp_memory_type_id memType, t_cm_allocator_status *pStatus)
+{
+ t_cm_error error;
+ t_uint32 dOffset;
+ t_uint32 dSize;
+
+ //TODO, scratch
+ error = cm_DM_CheckDomain(domainId, DOMAIN_ANY);
+ if (error != CM_OK) {
+ return error;
+ }
+
+ cm_DSP_GetInternalMemoriesInfo(domainId, memType, &dOffset, &dSize);
+
+ return cm_DSP_GetAllocatorStatus(domainDesc[domainId].domain.coreId, memType,
+ dOffset, dSize, pStatus);
+}
+
+//WARNING: this function is only correct *before* migration! because
+//the computation of absolute adresses of a domain is based on the allocator for the given
+//segment (this is hidden in cm_DSP_GetDspBaseAddress and this info is not valid
+//after migration (non-contiguous address-space from the ARM-side)
+PUBLIC t_cm_error cm_DM_GetDomainAbsAdresses(t_cm_domain_id domainId, t_cm_domain_info *info)
+{
+ t_cm_error error;
+ t_nmf_core_id coreId = domainDesc[domainId].domain.coreId;
+
+ cm_migration_check_state(coreId, STATE_NORMAL);
+
+ error = cm_DM_CheckDomain(domainId, DOMAIN_NORMAL);
+ if (error != CM_OK) {
+ return error;
+ }
+
+ cm_DSP_GetDspBaseAddress(coreId, SDRAM_CODE, &info->sdramCode);
+ cm_DSP_GetDspBaseAddress(coreId, ESRAM_CODE, &info->esramCode);
+ cm_DSP_GetDspBaseAddress(coreId, SDRAM_EXT24, &info->sdramData);
+ cm_DSP_GetDspBaseAddress(coreId, ESRAM_EXT24, &info->esramData);
+
+ info->sdramCode.physical += domainDesc[domainId].domain.sdramCode.offset;
+ info->sdramCode.logical += domainDesc[domainId].domain.sdramCode.offset;
+ info->esramCode.physical += domainDesc[domainId].domain.esramCode.offset;
+ info->esramCode.logical += domainDesc[domainId].domain.esramCode.offset;
+ info->sdramData.physical += domainDesc[domainId].domain.sdramData.offset;
+ info->sdramData.logical += domainDesc[domainId].domain.sdramData.offset;
+ info->esramData.physical += domainDesc[domainId].domain.esramData.offset;
+ info->esramData.logical += domainDesc[domainId].domain.esramData.offset;
+
+ return CM_OK;
+}
+
+static void cm_DM_DomainError(const t_cm_domain_id parentId, const t_nmf_client_id client)
+{
+ int i;
+ LOG_INTERNAL(0, "NMF_DEBUG_SCRATCH failed to allocate domain (client %u): 0x%08x -> 0x%08x\n",
+ client,
+ domainDesc[parentId].domain.esramData.offset,
+ domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.size,
+ 0, 0, 0);
+ for(i = 0; i < MAX_USER_DOMAIN_NB; i++) {
+ if (domainDesc[i].type == DOMAIN_SCRATCH_CHILD) {
+ LOG_INTERNAL(0, "NMF_DEBUG_SCRATCH scratch domain %d allocated (client %u): 0x%08x -> 0x%08x\n",
+ i, domainDesc[i].client,
+ domainDesc[i].domain.esramData.offset,
+ domainDesc[i].domain.esramData.offset + domainDesc[i].domain.esramData.size,
+ 0, 0);
+ }
+ }
+ cm_MM_DumpMemory(cm_DM_getAllocator(parentId, ESRAM_EXT16),
+ domainDesc[parentId].domain.esramData.offset,
+ domainDesc[parentId].domain.esramData.offset + domainDesc[parentId].domain.esramData.size);
+}
+
+PUBLIC void cm_DM_SetDefaultDomain(t_memory_handle memHandle, t_nmf_core_id coreId)
+{
+ if (coreId == SVA_CORE_ID)
+ cm_MM_SetDefaultDomain(memHandle, DEFAULT_SVA_DOMAIN);
+ else if (coreId == SIA_CORE_ID)
+ cm_MM_SetDefaultDomain(memHandle, DEFAULT_SIA_DOMAIN);
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c b/drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c
new file mode 100644
index 00000000000..ec305812f15
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/domain_wrapper.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/api/domain_engine.h>
+#include <cm/engine/api/migration_engine.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/memory/inc/migration.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_CreateMemoryDomain(
+ const t_nmf_client_id client,
+ const t_cm_domain_memory *domain,
+ t_cm_domain_id *handle
+ )
+{
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+ error = cm_DM_CreateDomain(client, domain, handle);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_CreateMemoryDomainScratch(
+ const t_nmf_client_id client,
+ const t_cm_domain_id parentId,
+ const t_cm_domain_memory *domain,
+ t_cm_domain_id *handle
+ )
+{
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+ error = cm_DM_CreateDomainScratch(client, parentId, domain, handle);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_DestroyMemoryDomain(
+ t_cm_domain_id handle)
+{
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+ error = cm_DM_DestroyDomain(handle);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_FlushMemoryDomains(
+ t_nmf_client_id client)
+{
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+ error = cm_DM_DestroyDomains(client);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetDomainCoreId(const t_cm_domain_id domainId, t_nmf_core_id *coreId)
+{
+ t_cm_error error;
+ OSAL_LOCK_API();
+ //TODO, scratch
+ error = cm_DM_CheckDomain(domainId, DOMAIN_NORMAL);
+ if (error != CM_OK) {
+ OSAL_UNLOCK_API();
+ return error;
+ }
+
+ *coreId = cm_DM_GetDomainCoreId(domainId);
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_Migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst)
+{
+ t_cm_error error;
+ OSAL_LOCK_API();
+ error = cm_migrate(srcShared, src, dst);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_Unmigrate(void)
+{
+ t_cm_error error;
+ OSAL_LOCK_API();
+ error = cm_unmigrate();
+ OSAL_UNLOCK_API();
+ return error;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c b/drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c
new file mode 100644
index 00000000000..37bea690e48
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/memory_wrapper.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/component/inc/instance.h>
+#include <cm/engine/configuration/inc/configuration.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/trace/inc/trace.h>
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_AllocMpcMemory(
+ t_cm_domain_id domainId,
+ t_nmf_client_id clientId,
+ t_cm_mpc_memory_type memType,
+ t_cm_size size,
+ t_cm_mpc_memory_alignment memAlignment,
+ t_cm_memory_handle *pHandle
+ )
+{
+ t_dsp_memory_type_id dspMemType;
+
+ switch(memType)
+ {
+ case CM_MM_MPC_TCM16_X:
+ dspMemType = INTERNAL_XRAM16;
+ break;
+ case CM_MM_MPC_TCM24_X:
+ dspMemType = INTERNAL_XRAM24;
+ break;
+ case CM_MM_MPC_TCM16_Y:
+ dspMemType = INTERNAL_YRAM16;
+ break;
+ case CM_MM_MPC_TCM24_Y:
+ dspMemType = INTERNAL_YRAM24;
+ break;
+#ifndef __STN_8810
+ case CM_MM_MPC_ESRAM16:
+ dspMemType = ESRAM_EXT16;
+ break;
+ case CM_MM_MPC_ESRAM24:
+ dspMemType = ESRAM_EXT24;
+ break;
+#endif /* ndef __STN_8810 */
+ case CM_MM_MPC_SDRAM16:
+ dspMemType = SDRAM_EXT16;
+ break;
+ case CM_MM_MPC_SDRAM24:
+ dspMemType = SDRAM_EXT24;
+ break;
+ default:
+ return CM_INVALID_PARAMETER;
+ }
+
+ OSAL_LOCK_API();
+ {
+ t_cm_error error;
+ error = cm_DM_CheckDomainWithClient(domainId, DOMAIN_ANY, clientId);
+ if (error != CM_OK) {
+ OSAL_UNLOCK_API();
+ return error;
+ }
+ }
+
+ switch(memAlignment) {
+ case CM_MM_MPC_ALIGN_NONE :
+ case CM_MM_MPC_ALIGN_HALFWORD :
+ case CM_MM_MPC_ALIGN_WORD :
+ case CM_MM_MPC_ALIGN_2WORDS :
+ case CM_MM_MPC_ALIGN_4WORDS :
+ case CM_MM_MPC_ALIGN_8WORDS :
+ case CM_MM_MPC_ALIGN_16WORDS :
+ case CM_MM_MPC_ALIGN_32WORDS :
+ case CM_MM_MPC_ALIGN_64WORDS :
+ case CM_MM_MPC_ALIGN_128WORDS :
+ case CM_MM_MPC_ALIGN_256WORDS :
+ case CM_MM_MPC_ALIGN_512WORDS :
+ case CM_MM_MPC_ALIGN_1024WORDS :
+ case CM_MM_MPC_ALIGN_65536BYTES :
+ //case CM_MM_MPC_ALIGN_16384WORDS : maps to the same value as above
+ break;
+ default:
+ OSAL_UNLOCK_API();
+ return CM_INVALID_PARAMETER;
+ }
+
+ /* in case we allocate in tcm x be sure ee is load before */
+ if ( memType == CM_MM_MPC_TCM16_X || memType == CM_MM_MPC_TCM24_X ||
+ memType == CM_MM_MPC_TCM16_Y || memType == CM_MM_MPC_TCM24_Y )
+ {
+ t_cm_error error;
+ if ((error = cm_CFG_CheckMpcStatus(cm_DM_GetDomainCoreId(domainId))) != CM_OK)
+ {
+ OSAL_UNLOCK_API();
+ return error;
+ }
+ }
+
+ /* alloc memory */
+ *pHandle = (t_cm_memory_handle)cm_DM_Alloc(domainId, dspMemType, size, memAlignment, TRUE);
+ if(*pHandle == (t_cm_memory_handle)INVALID_MEMORY_HANDLE)
+ {
+ OSAL_UNLOCK_API();
+ ERROR("CM_NO_MORE_MEMORY: CM_AllocMpcMemory() failed\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_FreeMpcMemory(t_cm_memory_handle handle)
+{
+ t_cm_error error;
+ t_memory_handle validHandle;
+ t_nmf_core_id coreId;
+ t_dsp_memory_type_id memType;
+
+ OSAL_LOCK_API();
+
+ if((error = cm_MM_getValidMemoryHandle(handle, &validHandle)) != CM_OK)
+ {
+ OSAL_UNLOCK_API();
+ return error;
+ }
+
+ cm_DM_FreeWithInfo(validHandle, &coreId, &memType, TRUE);
+
+ /* in case we allocate in tcm x be sure ee is load before */
+ if ( memType == INTERNAL_XRAM16 || memType == INTERNAL_XRAM24 ||
+ memType == INTERNAL_YRAM16 || memType == INTERNAL_YRAM24 )
+ {
+ cm_CFG_ReleaseMpc(coreId);
+ }
+
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemorySystemAddress(t_cm_memory_handle handle, t_cm_system_address *pSystemAddress)
+{
+ t_cm_error error;
+ t_memory_handle validHandle;
+
+ OSAL_LOCK_API();
+
+ if((error = cm_MM_getValidMemoryHandle(handle, &validHandle)) != CM_OK)
+ {
+ OSAL_UNLOCK_API();
+ return error;
+ }
+
+ cm_DSP_GetHostSystemAddress(validHandle, pSystemAddress);
+
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemoryMpcAddress(t_cm_memory_handle handle, t_uint32 *pDspAddress)
+{
+ t_cm_error error;
+ t_memory_handle validHandle;
+
+ OSAL_LOCK_API();
+
+ if((error = cm_MM_getValidMemoryHandle(handle, &validHandle)) != CM_OK)
+ {
+ OSAL_UNLOCK_API();
+ return error;
+ }
+
+ cm_DSP_GetDspAddress(validHandle, pDspAddress);
+
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetMpcMemoryStatus(t_nmf_core_id coreId, t_cm_mpc_memory_type memType, t_cm_allocator_status *pStatus)
+{
+ t_dsp_memory_type_id dspMemType;
+ t_cm_error error;
+
+ switch(memType)
+ {
+ case CM_MM_MPC_TCM16_X:
+ dspMemType = INTERNAL_XRAM16;
+ break;
+ case CM_MM_MPC_TCM24_X:
+ dspMemType = INTERNAL_XRAM24;
+ break;
+ case CM_MM_MPC_TCM16_Y:
+ dspMemType = INTERNAL_YRAM16;
+ break;
+ case CM_MM_MPC_TCM24_Y:
+ dspMemType = INTERNAL_YRAM24;
+ break;
+#ifndef __STN_8810
+ case CM_MM_MPC_ESRAM16:
+ dspMemType = ESRAM_EXT16;
+ break;
+ case CM_MM_MPC_ESRAM24:
+ dspMemType = ESRAM_EXT24;
+ break;
+#endif /* ndef __STN_8810 */
+ case CM_MM_MPC_SDRAM16:
+ dspMemType = SDRAM_EXT16;
+ break;
+ case CM_MM_MPC_SDRAM24:
+ dspMemType = SDRAM_EXT24;
+ break;
+ default:
+ return CM_INVALID_PARAMETER;
+ }
+
+ OSAL_LOCK_API();
+ error = cm_DSP_GetAllocatorStatus(coreId, dspMemType, 0, 0, pStatus);
+ OSAL_UNLOCK_API();
+
+ return error;
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/migration.c b/drivers/staging/nmf-cm/cm/engine/memory/src/migration.c
new file mode 100644
index 00000000000..d68898d830e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/migration.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/inc/cm_type.h>
+#include <inc/type.h>
+#include <inc/nmf-limits.h>
+
+#include <cm/engine/communication/fifo/inc/nmf_fifo_arm.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/memory/inc/domain.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/memory/inc/migration.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/mem.h>
+
+#if defined(__STN_8500) && (__STN_8500 > 10)
+
+typedef enum {
+ CM_MIGRATION_OK = 0,
+ CM_MIGRATION_INVALID_ARGUMENT = 1,
+ CM_MIGRATION_ERROR = 2,
+} t_cm_migration_error;
+
+extern t_nmf_fifo_arm_desc* mpc2mpcComsFifoId[NB_CORE_IDS][NB_CORE_IDS];
+
+/*!
+ * \brief Data structure representing a segment to migrate
+ *
+ * segment:
+ * - used to determine which mmdsp-hw base is to be updated, index in mpcDesc->segments[] structure
+ * * this is hard-coded in cm_migrate(), could be computed (would be nice) LIMITATION
+ * srcAdr.physical:
+ * - new base setting
+ * * computed from the src domain in cm_DM_GetAbsAdresses() which uses the start of the allocator for the memory
+ * this is a LIMITATION, as this information is valid only before migration
+ * srcAdr.logical:
+ * - cm_MemCopy()
+ * * computed as srcAdr.logical
+ * dstAdr.physical: see srcAdr.physical
+ * dstAdr.logical: see srcAdr.logical
+ * size:
+ * - cm_MemCopy()
+ * - setting the top when new base is set
+ */
+typedef struct {
+ t_dsp_segment_type segment; //!< the link to the segment type
+ t_cm_system_address srcAdr; //!< source address
+ t_cm_system_address dstAdr; //!< destination address
+ t_uint32 size; //!< size of the segment
+} t_cm_migration_segment;
+
+/*!
+ * \brief Internal data structure 1/ during migration, and 2/ between migration and unmigration calls
+ *
+ * all needed information are computed before calling _cm_migration_move()
+ */
+typedef struct {
+ t_cm_migration_state state; //!< migration state
+ t_nmf_core_id coreId; //!< migration only on one mpc
+ t_cm_migration_segment segments[NB_MIGRATION_SEGMENT]; //!< segments to migrate (selected on migration_move)
+ t_memory_handle handles[NB_MIGRATION_SEGMENT]; //!< memory handles for destination chunks allocated prior migration
+} t_cm_migration_internal_state;
+
+static t_cm_migration_internal_state migrationState = {STATE_NORMAL, };
+
+static t_cm_error _cm_migration_initSegment(
+ t_dsp_segment_type dspSegment,
+ t_cm_system_address *srcAdr,
+ t_uint32 size,
+ t_cm_domain_id dst,
+ t_cm_migration_internal_state *info
+ )
+{
+ t_cm_system_address dstAdr;
+ t_cm_migration_segment *segment = &info->segments[dspSegment];
+ t_memory_handle handle;
+
+ handle = cm_DM_Alloc(dst, ESRAM_EXT16, size >> 1, CM_MM_ALIGN_AHB_BURST, TRUE); //note: byte to half-word conversion
+ if (handle == 0) {
+ ERROR("CM_NO_MORE_MEMORY: Unable to init segment for migration\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ info->handles[dspSegment] = handle;
+
+ cm_DSP_GetHostSystemAddress(handle, &dstAdr);
+
+ segment->segment = dspSegment; //this is redundant and could be avoided by recoding move(), but nice to have for debug
+ segment->size = size;
+ segment->srcAdr = *srcAdr;
+ segment->dstAdr = dstAdr;
+
+ return CM_OK;
+}
+
+static void _cm_migration_releaseSegment(t_cm_migration_internal_state *info, t_dsp_segment_type segId)
+{
+ cm_DM_Free(info->handles[segId], TRUE);
+}
+
+static t_cm_migration_error _cm_migration_release(t_cm_migration_internal_state *info)
+{
+ t_uint32 i = 0;
+ for (i = 0; i < NB_MIGRATION_SEGMENT; i++) {
+ cm_DM_Free(info->handles[i], TRUE);
+ }
+
+ return CM_MIGRATION_OK;
+}
+
+#define SEGMENT_START(seg) \
+ seg.offset
+
+#define SEGMENT_END(seg) \
+ seg.offset + seg.size
+
+static t_cm_error _cm_migration_check(
+ const t_cm_domain_id srcShared,
+ const t_cm_domain_id src,
+ const t_cm_domain_id dst,
+ t_cm_migration_internal_state *info
+ )
+{
+ t_cm_error error = CM_OK;
+ t_cm_domain_info domainInfoSrc;
+ t_cm_domain_info domainInfoShared;
+ t_cm_domain_desc *domainEE;
+ t_cm_domain_desc *domainShared;
+ t_nmf_core_id coreId = cm_DM_GetDomainCoreId(src);
+
+ //coreIds in src, srcShared and dst match
+ if (!((domainDesc[src].domain.coreId == domainDesc[srcShared].domain.coreId)
+ && (domainDesc[src].domain.coreId == domainDesc[dst].domain.coreId))) {
+ return CM_INVALID_PARAMETER;
+ }
+
+ //check srcShared starts at 0
+ //FIXME, juraj, today EE code is in SDRAM, but this is flexible, so must find out where EE is instantiated
+ if (domainDesc[srcShared].domain.sdramCode.offset != 0x0) {
+ return CM_INVALID_PARAMETER;
+ }
+
+ //check srcShared contains EE domain
+ domainEE = &domainDesc[cm_DSP_GetState(coreId)->domainEE];
+ domainShared = &domainDesc[srcShared];
+ if ((SEGMENT_START(domainEE->domain.esramCode) < SEGMENT_START(domainShared->domain.esramCode))
+ ||(SEGMENT_END(domainEE->domain.esramCode) > SEGMENT_END(domainShared->domain.esramCode))
+ ||(SEGMENT_START(domainEE->domain.esramData) < SEGMENT_START(domainShared->domain.esramData))
+ ||(SEGMENT_END(domainEE->domain.esramData) > SEGMENT_END(domainShared->domain.esramData))
+ ||(SEGMENT_START(domainEE->domain.sdramCode) < SEGMENT_START(domainShared->domain.sdramCode))
+ ||(SEGMENT_END(domainEE->domain.sdramCode) > SEGMENT_END(domainShared->domain.sdramCode))
+ ||(SEGMENT_START(domainEE->domain.sdramData) < SEGMENT_START(domainShared->domain.sdramData))
+ ||(SEGMENT_END(domainEE->domain.sdramData) > SEGMENT_END(domainShared->domain.sdramData))
+ ) {
+ return CM_INVALID_PARAMETER;
+ }
+
+ info->coreId = coreId;
+ cm_DM_GetDomainAbsAdresses(srcShared, &domainInfoShared);
+ cm_DM_GetDomainAbsAdresses(src, &domainInfoSrc);
+
+ if ((error = _cm_migration_initSegment(SDRAM_CODE_EE, &domainInfoShared.sdramCode,
+ domainDesc[srcShared].domain.sdramCode.size, dst, info)) != CM_OK)
+ goto _migration_error1;
+ if ((error = _cm_migration_initSegment(SDRAM_CODE_USER, &domainInfoSrc.sdramCode,
+ domainDesc[src].domain.sdramCode.size, dst, info)) != CM_OK)
+ goto _migration_error2;
+ if ((error = _cm_migration_initSegment(SDRAM_DATA_EE, &domainInfoShared.sdramData,
+ domainDesc[srcShared].domain.sdramData.size, dst, info)) != CM_OK)
+ goto _migration_error3;
+ if ((error = _cm_migration_initSegment(SDRAM_DATA_USER, &domainInfoSrc.sdramData,
+ domainDesc[src].domain.sdramData.size, dst, info)) != CM_OK)
+ goto _migration_error4;
+ return error;
+
+_migration_error4: _cm_migration_releaseSegment(info, SDRAM_DATA_EE);
+_migration_error3: _cm_migration_releaseSegment(info, SDRAM_CODE_USER);
+_migration_error2: _cm_migration_releaseSegment(info, SDRAM_CODE_EE);
+_migration_error1:
+ OSAL_Log("Couldn't allocate memory for migration\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+}
+
+typedef t_cm_error (*updateBase_t)(t_nmf_core_id, t_dsp_segment_type, t_cm_system_address, t_cm_system_address);
+
+static t_cm_migration_error _cm_migration_move(
+ t_nmf_core_id coreId,
+ t_cm_migration_segment *seg,
+ updateBase_t updateBase,
+ char* name
+ )
+{
+ LOG_INTERNAL(1, "##### Migration %s: 0x%x -> 0x%x\n", name, seg->srcAdr.logical, seg->dstAdr.logical, 0, 0, 0);
+ cm_MemCopy((void*)seg->dstAdr.logical, (void*)seg->srcAdr.logical, seg->size);
+ updateBase(coreId, seg->segment, seg->srcAdr, seg->dstAdr);
+ cm_MemSet((void*)seg->srcAdr.logical, 0xdead, seg->size); //for debug, to be sure that we have actually moved the code and bases
+
+ return CM_MIGRATION_OK;
+}
+
+static t_cm_migration_error _cm_migration_update_internal(
+ t_cm_migration_internal_state *info,
+ t_cm_migration_state state
+ )
+{
+ t_nmf_fifo_arm_desc *pArmFifo;
+
+ migrationState.state = state;
+
+ switch(state) {
+ case STATE_MIGRATED:
+ //move fifos
+ pArmFifo = mpc2mpcComsFifoId[ARM_CORE_ID][info->coreId];
+ pArmFifo->fifoDesc = (t_nmf_fifo_desc*)cm_migration_translate(pArmFifo->dspAddressInfo.segmentType, (t_shared_addr)pArmFifo->fifoDescShadow);
+ pArmFifo = mpc2mpcComsFifoId[info->coreId][ARM_CORE_ID];
+ pArmFifo->fifoDesc = (t_nmf_fifo_desc*)cm_migration_translate(pArmFifo->dspAddressInfo.segmentType, (t_shared_addr)pArmFifo->fifoDescShadow);
+ break;
+
+ case STATE_NORMAL:
+ //move fifos
+ pArmFifo = mpc2mpcComsFifoId[ARM_CORE_ID][info->coreId];
+ pArmFifo->fifoDesc = pArmFifo->fifoDescShadow;
+ pArmFifo = mpc2mpcComsFifoId[info->coreId][ARM_CORE_ID];
+ pArmFifo->fifoDesc = pArmFifo->fifoDescShadow;
+ break;
+
+ default:
+ OSAL_Log("unknown state", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+
+ return CM_MIGRATION_OK;
+}
+
+PUBLIC t_cm_error cm_migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst)
+{
+ t_cm_migration_error mError;
+ t_cm_error error;
+
+ if ((error = _cm_migration_check(srcShared, src, dst, &migrationState)) != CM_OK) {
+ return error;
+ }
+
+ /* stop DSP execution */
+ cm_DSP_Stop(migrationState.coreId);
+
+ /* migrate EE and FX */
+ mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_EE], cm_DSP_updateCodeBase, "code");
+ if (mError) {
+ OSAL_Log("EE code migration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_EE], cm_DSP_updateDataBase, "data");
+ if (mError) {
+ OSAL_Log("EE data migration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ /* migrate user domain */
+ mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_USER], cm_DSP_updateCodeBase, "code");
+ if (mError) {
+ OSAL_Log("User code migration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ mError = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_USER], cm_DSP_updateDataBase, "data");
+ if (mError) {
+ OSAL_Log("User data migration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ /* update CM internal structures */
+ mError = _cm_migration_update_internal(&migrationState, STATE_MIGRATED);
+ if (mError) {
+ OSAL_Log("Update internal data failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+
+ /* Be sure everything has been write before restarting mmdsp */
+ OSAL_mb();
+
+ /* resume DSP execution */
+ cm_DSP_Start(migrationState.coreId);
+
+ return CM_OK;
+}
+
+static void _cm_migration_swapSegments(
+ t_cm_migration_segment *segment
+ )
+{
+ t_cm_system_address tmp;
+ tmp = segment->dstAdr;
+ segment->dstAdr = segment->srcAdr;
+ segment->srcAdr = tmp;
+}
+
+PUBLIC t_cm_error cm_unmigrate(void)
+{
+ t_cm_migration_error merror;
+
+ if (migrationState.state != STATE_MIGRATED)
+ return CM_INVALID_PARAMETER; //TODO, juraj, define a proper error for this migration case
+
+ cm_DSP_Stop(migrationState.coreId);
+
+ _cm_migration_swapSegments(&migrationState.segments[SDRAM_CODE_EE]);
+ _cm_migration_swapSegments(&migrationState.segments[SDRAM_DATA_EE]);
+ _cm_migration_swapSegments(&migrationState.segments[SDRAM_CODE_USER]);
+ _cm_migration_swapSegments(&migrationState.segments[SDRAM_DATA_USER]);
+
+ merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_EE], cm_DSP_updateCodeBase, "code");
+ if (merror) {
+ OSAL_Log("EE code unmigration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_EE], cm_DSP_updateDataBase, "data");
+ if (merror) {
+ OSAL_Log("EE data unmigration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_CODE_USER], cm_DSP_updateCodeBase, "code");
+ if (merror) {
+ OSAL_Log("User code unmigration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+ merror = _cm_migration_move(migrationState.coreId, &migrationState.segments[SDRAM_DATA_USER], cm_DSP_updateDataBase, "data");
+ if (merror) {
+ OSAL_Log("User data unmigration failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+
+ /* update CM internal structures */
+ merror = _cm_migration_update_internal(&migrationState, STATE_NORMAL);
+ if (merror) {
+ OSAL_Log("Update internal data failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+
+ /* Be sure everything has been write before restarting mmdsp */
+ OSAL_mb();
+
+ cm_DSP_Start(migrationState.coreId);
+
+ /* update CM internal structures */
+ merror = _cm_migration_release(&migrationState);
+ if (merror) {
+ OSAL_Log("Update internal data failed", 0, 0, 0, 0, 0, 0);
+ CM_ASSERT(0);
+ }
+
+ return CM_OK;
+}
+
+// here we make the assumption that the offset doesn't depend from the dsp!!
+PUBLIC t_uint32 cm_migration_translate(t_dsp_segment_type segmentType, t_uint32 addr)
+{
+ //TODO, juraj, save delta instead of recalculating it
+ t_sint32 offset;
+ if (migrationState.state == STATE_MIGRATED) {
+ offset = migrationState.segments[segmentType].dstAdr.logical - migrationState.segments[segmentType].srcAdr.logical;
+ } else {
+ offset = 0;
+ }
+ return addr + offset;
+}
+
+PUBLIC void cm_migration_check_state(t_nmf_core_id coreId, t_cm_migration_state expected)
+{
+ CM_ASSERT(migrationState.state == expected);
+}
+
+#else
+PUBLIC t_cm_error cm_migrate(const t_cm_domain_id srcShared, const t_cm_domain_id src, const t_cm_domain_id dst)
+{
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_unmigrate(void)
+{
+ return CM_OK;
+}
+
+PUBLIC t_uint32 cm_migration_translate(t_dsp_segment_type segmentType, t_uint32 addr)
+{
+ return addr;
+}
+
+PUBLIC void cm_migration_check_state(t_nmf_core_id coreId, t_cm_migration_state expected)
+{
+ return;
+}
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c
new file mode 100644
index 00000000000..0d000d37371
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator.c
@@ -0,0 +1,656 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ * Include
+ */
+#include "../inc/remote_allocator.h"
+#include "../inc/remote_allocator_utils.h"
+#include "../inc/chunk_mgr.h"
+
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/trace/inc/xtitrace.h>
+
+static void cm_MM_RA_checkAllocator(t_cm_allocator_desc* alloc);
+//static void cm_MM_RA_checkAlloc(t_cm_allocator_desc* alloc, t_uint32 size, t_uint32 align, t_uint32 min, t_uint32 max);
+
+int bin_index(unsigned int sz) {
+ /*
+ * 32 bins of size 2
+ * 16 bins of size 16
+ * 8 bins of size 128
+ * 4 bins of size 1024
+ * 2 bins of size 8192
+ * 1 bin of size what's left
+ *
+ */
+ return (((sz >> 6) == 0) ? (sz >> 1): // 0 -> 0 .. 31
+ ((sz >> 6) <= 4) ? 28 + (sz >> 4): // 64 -> 32 .. 47
+ ((sz >> 6) <= 20) ? 46 + (sz >> 7): // 320 -> 48 .. 55
+ ((sz >> 6) <= 84) ? 55 + (sz >> 10): // 1344 -> 56 .. 59
+ ((sz >> 6) <= 340) ? 59 + (sz >> 13): // 5440 -> 60 .. 61
+ 62); // 21824..
+}
+
+static t_cm_allocator_desc* ListOfAllocators = NULL;
+
+PUBLIC t_cm_allocator_desc* cm_MM_CreateAllocator(t_cm_size size, t_uint32 offset, const char* name)
+{
+ t_cm_allocator_desc *alloc;
+
+ CM_ASSERT(fillChunkPool() == CM_OK);
+
+ /* Alloc structure */
+ alloc = (t_cm_allocator_desc*)OSAL_Alloc_Zero(sizeof(t_cm_allocator_desc));
+ CM_ASSERT(alloc != NULL);
+
+ // Add allocator in list
+ alloc->next = ListOfAllocators;
+ ListOfAllocators = alloc;
+
+ /* assign name */
+ alloc->pAllocName = name;
+
+ alloc->maxSize = size;
+ alloc->sbrkSize = 0;
+ alloc->offset = offset;
+
+ //TODO, juraj, alloc impacts trace format
+ cm_TRC_traceMemAlloc(TRACE_ALLOCATOR_COMMAND_CREATE, 0, size, name);
+
+ return alloc;
+}
+
+PUBLIC t_cm_error cm_MM_DeleteAllocator(t_cm_allocator_desc *alloc)
+{
+ t_cm_chunk *chunk, *next_cm_chunk;
+
+ cm_TRC_traceMemAlloc(TRACE_ALLOCATOR_COMMAND_DESTROY, 0, 0, alloc->pAllocName);
+
+ /* Parse all chunks and free them */
+ chunk = alloc->chunks;
+ while(chunk != 0)
+ {
+ next_cm_chunk = chunk->next;
+ unlinkChunk(alloc, chunk);
+ freeChunk(chunk);
+
+ chunk = next_cm_chunk;
+ }
+
+ // Remove allocator from the list
+ if(ListOfAllocators == alloc)
+ ListOfAllocators = alloc->next;
+ else {
+ t_cm_allocator_desc *prev = ListOfAllocators;
+ while(prev->next != alloc)
+ prev = prev->next;
+ prev->next = alloc->next;
+ }
+
+
+ /* Free allocator descriptor */
+ OSAL_Free(alloc);
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_MM_ResizeAllocator(t_cm_allocator_desc *alloc, t_cm_size size)
+{
+ /* sanity check */
+ if (size == 0)
+ return CM_INVALID_PARAMETER;
+
+ if(alloc->sbrkSize > size)
+ return CM_NO_MORE_MEMORY;
+
+ alloc->maxSize = size;
+
+ if (cmIntensiveCheckState)
+ cm_MM_RA_checkAllocator(alloc);
+
+ return CM_OK;
+}
+
+t_cm_error cm_MM_getValidMemoryHandle(t_cm_memory_handle handle, t_memory_handle* validHandle)
+{
+#ifdef LINUX
+ /* On linux, there is already a check within the linux part
+ * => we don't need to check twice */
+ *validHandle = (t_memory_handle)handle;
+ return CM_OK;
+#else
+ t_cm_allocator_desc *alloc = ListOfAllocators;
+
+ for(; alloc != NULL; alloc = alloc->next)
+ {
+ t_cm_chunk* chunk = alloc->chunks;
+
+ /* Parse all chunks */
+ for(; chunk != NULL; chunk = chunk->next)
+ {
+ if(chunk == (t_memory_handle)handle)
+ {
+ if(chunk->status == MEM_FREE)
+ return CM_MEMORY_HANDLE_FREED;
+
+ *validHandle = (t_memory_handle)handle;
+
+ return CM_OK;
+ }
+ }
+ }
+
+ return CM_UNKNOWN_MEMORY_HANDLE;
+#endif
+}
+
+//TODO, juraj, add appartenance to allocHandle (of chunk) and degage setUserData
+PUBLIC t_memory_handle cm_MM_Alloc(
+ t_cm_allocator_desc* alloc,
+ t_cm_size size,
+ t_cm_memory_alignment memAlignment,
+ t_uint32 seg_offset,
+ t_uint32 seg_size,
+ t_uint32 domainId)
+{
+ t_cm_chunk* chunk;
+ t_uint32 aligned_offset;
+ t_uint32 aligned_end;
+ t_uint32 seg_end = seg_offset + seg_size;
+ int i;
+
+ /* Sanity check */
+ if ( (size == 0) || (size > seg_size) )
+ return INVALID_MEMORY_HANDLE;
+
+ if(fillChunkPool() != CM_OK)
+ return INVALID_MEMORY_HANDLE;
+
+ /* Get first chunk available for the specific size */
+ // Search a list with a free chunk
+ for(i = bin_index(size); i < BINS; i++)
+ {
+ chunk = alloc->free_mem_chunks[i];
+ while (chunk != 0)
+ {
+ /* Alignment of the lower boundary */
+ aligned_offset = ALIGN_VALUE(MAX(chunk->offset, seg_offset), (memAlignment + 1));
+
+ aligned_end = aligned_offset + size;
+
+ if ((aligned_end <= seg_end)
+ && aligned_end <= (chunk->offset + chunk->size)
+ && aligned_offset >= seg_offset
+ && aligned_offset >= chunk->offset)
+ goto found;
+
+ chunk = chunk->next_free_mem;
+ }
+ }
+
+ // Try to increase sbrkSize through maxSize
+ aligned_offset = ALIGN_VALUE(MAX((alloc->offset + alloc->sbrkSize), seg_offset), (memAlignment + 1));
+
+ aligned_end = aligned_offset + size;
+
+ if ((aligned_end <= seg_end)
+ && aligned_end <= (alloc->offset + alloc->maxSize)
+ && aligned_offset >= seg_offset
+ && aligned_offset >= (alloc->offset + alloc->sbrkSize))
+ {
+ /* If that fit requirement, create a new free chunk at the end of current allocator */
+ chunk = allocChunk();
+
+ /* Update chunk size */
+ chunk->offset = alloc->offset + alloc->sbrkSize; // offset start at end of current allocator
+ chunk->size = aligned_end - chunk->offset;
+ chunk->alloc = alloc;
+
+ /* Chain it with latest chunk */
+ linkChunk(alloc, alloc->lastChunk, chunk);
+
+ /* Increase sbrkSize to end of this new chunk */
+ alloc->sbrkSize += chunk->size;
+
+ goto foundNew;
+ }
+
+ return INVALID_MEMORY_HANDLE;
+
+found:
+ /* Remove chunk from free list */
+ unlinkFreeMem(alloc, chunk);
+
+foundNew:
+ //create an empty chunk before the allocated one
+ if (chunk->offset < aligned_offset) {
+ chunk = splitChunk(alloc, chunk, aligned_offset, FREE_CHUNK_BEFORE);
+ }
+ //create an empty chunk after the allocated one
+ if (chunk->offset + chunk->size > aligned_end) {
+ splitChunk(alloc, chunk, aligned_end, FREE_CHUNK_AFTER);
+ }
+
+ chunk->status = MEM_USED;
+ chunk->prev_free_mem = 0;
+ chunk->next_free_mem = 0;
+ chunk->domainId = domainId;
+
+ //TODO, juraj, alloc impacts trace format
+ cm_TRC_traceMem(TRACE_ALLOC_COMMAND_ALLOC, 0, chunk->offset, chunk->size);
+
+ if (cmIntensiveCheckState) {
+ cm_MM_RA_checkAllocator(alloc);
+ }
+
+ return (t_memory_handle) chunk;
+}
+
+//caution - if successfull, the chunk offset will be aligned with seg_offset
+//caution++ the offset of the allocated chunk changes implicitly
+PUBLIC t_cm_error cm_MM_Realloc(
+ t_cm_allocator_desc* alloc,
+ const t_cm_size size,
+ const t_uint32 offset,
+ t_memory_handle *handle)
+{
+ t_cm_chunk *chunk = (t_cm_chunk*)*handle;
+ t_uint32 oldOffset = chunk->offset;
+ t_uint32 oldSize = chunk->size;
+ t_uint32 oldDomainId = chunk->domainId;
+ t_uint16 userData = chunk->userData;
+
+ cm_MM_Free(alloc, *handle);
+
+ *handle = cm_MM_Alloc(alloc, size, CM_MM_ALIGN_NONE, offset, size, oldDomainId);
+
+ if(*handle == INVALID_MEMORY_HANDLE)
+ {
+ *handle = cm_MM_Alloc(alloc, oldSize, CM_MM_ALIGN_NONE, oldOffset, oldSize, oldDomainId);
+
+ CM_ASSERT(*handle != INVALID_MEMORY_HANDLE);
+
+ chunk = (t_cm_chunk*)*handle;
+ chunk->userData = userData;
+
+ return CM_NO_MORE_MEMORY;
+ }
+
+ chunk = (t_cm_chunk*)*handle;
+ chunk->userData = userData;
+
+ return CM_OK;
+
+#if 0
+ /* check reallocation is related to this chunk! */
+ CM_ASSERT(chunk->offset <= (offset + size));
+ CM_ASSERT(offset <= (chunk->offset + chunk->size));
+ CM_ASSERT(size);
+
+ /* check if extend low */
+ if (offset < chunk->offset) {
+ /* note: it is enough to check only the previous chunk,
+ * because adjacent chunks of same status are merged
+ */
+ if ((chunk->prev == 0)
+ ||(chunk->prev->status != MEM_FREE)
+ ||(chunk->prev->offset > offset)) {
+ return INVALID_MEMORY_HANDLE;
+ }
+ }
+
+ /* check if extend high, extend sbrk if necessary */
+ if ( (offset + size) > (chunk->offset + chunk->size)) {
+ if(chunk->next == 0)
+ {
+ // check if allocator can be extended to maxSize
+ if((offset + size) > (alloc->offset + alloc->maxSize))
+ return INVALID_MEMORY_HANDLE;
+ }
+ else
+ {
+ if ((chunk->next->status != MEM_FREE)
+ ||( (chunk->next->offset + chunk->next->size) < (offset + size))) {
+ return INVALID_MEMORY_HANDLE;
+ }
+ }
+ }
+
+ if(fillChunkPool() != CM_OK)
+ return INVALID_MEMORY_HANDLE;
+
+
+ /* extend low
+ * all conditions should have been checked
+ * this must not fail
+ */
+ if (offset < chunk->offset) {
+ t_uint32 delta = chunk->prev->offset + chunk->prev->size - offset;
+ t_cm_chunk *prev = chunk->prev;
+
+ chunk->offset -= delta;
+ chunk->size += delta;
+
+ CM_ASSERT(prev->status == MEM_FREE); //TODO, juraj, already checked
+ unlinkFreeMem(alloc, prev);
+ prev->size -= delta;
+ if(prev->size == 0)
+ {
+ unlinkChunk(alloc, prev);
+ freeChunk(prev);
+ } else {
+ updateFreeList(alloc, prev);
+ }
+ }
+
+ /* extend high */
+ if ( (offset + size) > (chunk->offset + chunk->size)) {
+ t_uint32 delta = size - chunk->size;
+ t_cm_chunk *next = chunk->next;
+
+ chunk->size += delta;
+
+ if(next == 0)
+ {
+ alloc->sbrkSize += delta;
+ } else {
+ CM_ASSERT(next->status == MEM_FREE);
+ unlinkFreeMem(alloc, next);
+ next->offset += delta;
+ next->size -= delta;
+ if(next->size == 0)
+ {
+ unlinkChunk(alloc, next);
+ freeChunk(next);
+ } else {
+ updateFreeList(alloc, next);
+ }
+ }
+ }
+
+ /* reduce top */
+ if ((offset + size) < (chunk->offset + chunk->size)) {
+ t_uint32 delta = chunk->size - size;
+
+ if(chunk->next == 0) {
+ alloc->sbrkSize -= delta;
+ chunk->size -= delta;
+
+ } else if (chunk->next->status == MEM_FREE) {
+ unlinkFreeMem(alloc, chunk->next);
+ chunk->size -= delta;
+ chunk->next->offset -= delta;
+ chunk->next->size += delta;
+ updateFreeList(alloc, chunk->next);
+ } else {
+ t_cm_chunk *tmp = splitChunk(alloc, chunk, offset + size, FREE_CHUNK_AFTER); //tmp = chunk, chunk = result
+ tmp->status = MEM_USED;
+ tmp->next->status = MEM_FREE;
+ }
+ }
+
+ /* reduce bottom */
+ if (offset > chunk->offset) {
+ if (chunk->prev->status == MEM_FREE) {
+ t_uint32 delta = offset - chunk->offset;
+ unlinkFreeMem(alloc, chunk->prev);
+ chunk->prev->size += delta;
+ chunk->offset = offset;
+ chunk->size -= delta;
+ updateFreeList(alloc, chunk->prev);
+ } else {
+ t_cm_chunk *tmp = splitChunk(alloc, chunk, offset, FREE_CHUNK_BEFORE); //tmp->next = chunk, tmp = result
+ tmp->status = MEM_USED;
+ tmp->prev->status = MEM_FREE;
+ }
+ }
+
+ cm_MM_RA_checkAllocator(alloc);
+
+ return (t_memory_handle)chunk;
+#endif
+}
+
+PUBLIC void cm_MM_Free(t_cm_allocator_desc* alloc, t_memory_handle memHandle)
+{
+ t_cm_chunk* chunk = (t_cm_chunk*)memHandle;
+
+ //TODO, juraj, alloc impacts trace format
+ cm_TRC_traceMem(TRACE_ALLOC_COMMAND_FREE, 0,
+ chunk->offset, chunk->size);
+
+ /* Update chunk status */
+ chunk->status = MEM_FREE;
+ chunk->domainId = 0x0;
+
+ // Invariant: Current chunk is free but not in free list
+
+ /* Check if the previous chunk is free */
+ if((chunk->prev != 0) && (chunk->prev->status == MEM_FREE))
+ {
+ t_cm_chunk* prev = chunk->prev;
+
+ // Remove chunk to be freed from memory list
+ unlinkChunk(alloc, chunk);
+
+ // Remove previous from free list
+ unlinkFreeMem(alloc, prev);
+
+ // Update previous size
+ prev->size += chunk->size;
+
+ freeChunk(chunk);
+
+ chunk = prev;
+ }
+
+ /* Check if the next chunk is free */
+ if((chunk->next != 0) && (chunk->next->status == MEM_FREE))
+ {
+ t_cm_chunk* next = chunk->next;
+
+ // Remove next from memory list
+ unlinkChunk(alloc, next);
+
+ // Remove next from free list
+ unlinkFreeMem(alloc, next);
+
+ // Update previous size
+ chunk->size += next->size;
+
+ freeChunk(next);
+ }
+
+ if(chunk->next == 0)
+ {
+ // If we are the last one, decrease sbrkSize
+ alloc->sbrkSize -= chunk->size;
+
+ unlinkChunk(alloc, chunk);
+ freeChunk(chunk);
+
+ }
+ else
+ {
+ // Add it in free list
+ updateFreeList(alloc, chunk);
+ }
+
+ if (cmIntensiveCheckState) {
+ cm_MM_RA_checkAllocator(alloc);
+ }
+}
+
+PUBLIC t_cm_error cm_MM_GetAllocatorStatus(t_cm_allocator_desc* alloc, t_uint32 offset, t_uint32 size, t_cm_allocator_status *pStatus)
+{
+ t_cm_chunk* chunk = alloc->chunks;
+ t_uint32 sbrkFree = alloc->maxSize - alloc->sbrkSize;
+ t_uint8 min_free_size_updated = FALSE;
+
+ /* Init status */
+ pStatus->global.used_block_number = 0;
+ pStatus->global.free_block_number = 0;
+ pStatus->global.maximum_free_size = 0;
+ pStatus->global.minimum_free_size = 0xFFFFFFFF;
+ pStatus->global.accumulate_free_memory = 0;
+ pStatus->global.accumulate_used_memory = 0;
+ pStatus->global.size = alloc->maxSize;
+ pStatus->domain.maximum_free_size = 0;
+ pStatus->domain.minimum_free_size = 0xFFFFFFFF;
+ pStatus->domain.accumulate_free_memory = 0;
+ pStatus->domain.accumulate_used_memory = 0;
+ pStatus->domain.size= size;
+
+ /* Parse all chunks */
+ while(chunk != 0)
+ {
+
+ /* Chunk is free */
+ if (chunk->status == MEM_FREE) {
+ pStatus->global.free_block_number++;
+ pStatus->global.accumulate_free_memory += chunk->size;
+
+ /* Check max size */
+ if (chunk->size > pStatus->global.maximum_free_size)
+ {
+ pStatus->global.maximum_free_size = chunk->size;
+ }
+
+ /* Check min size */
+ if (chunk->size < pStatus->global.minimum_free_size)
+ {
+ pStatus->global.minimum_free_size = chunk->size;
+ min_free_size_updated = TRUE;
+ }
+ } else {/* Chunk used */
+ pStatus->global.used_block_number++;
+ pStatus->global.accumulate_used_memory += chunk->size;
+ }
+
+ chunk = chunk->next;
+ }
+
+ /* Accumulate free space between sbrkSize and maxSize */
+ pStatus->global.accumulate_free_memory += sbrkFree;
+ if (sbrkFree > 0)
+ pStatus->global.free_block_number++;
+ if (pStatus->global.maximum_free_size < sbrkFree)
+ pStatus->global.maximum_free_size = sbrkFree;
+ if (pStatus->global.minimum_free_size > sbrkFree) {
+ pStatus->global.minimum_free_size = sbrkFree;
+ min_free_size_updated = TRUE;
+ }
+
+ /* Put max free size to min free size */
+ if (min_free_size_updated == FALSE) {
+ pStatus->global.minimum_free_size = pStatus->global.maximum_free_size;
+ }
+
+ return CM_OK;
+}
+
+PUBLIC t_uint32 cm_MM_GetOffset(t_memory_handle memHandle)
+{
+ /* Provide offset */
+ return ((t_cm_chunk*)memHandle)->offset;
+}
+
+PUBLIC t_uint32 cm_MM_GetSize(t_memory_handle memHandle)
+{
+ return ((t_cm_chunk*)memHandle)->size;
+}
+
+PUBLIC t_uint32 cm_MM_GetAllocatorSize(t_cm_allocator_desc* alloc)
+{
+ return alloc->maxSize;
+}
+
+PUBLIC void cm_MM_SetMemoryHandleUserData(t_memory_handle memHandle, t_uint16 userData)
+{
+ ((t_cm_chunk*)memHandle)->userData = userData;
+}
+
+PUBLIC void cm_MM_GetMemoryHandleUserData(t_memory_handle memHandle, t_uint16 *pUserData, t_cm_allocator_desc **alloc)
+{
+ *pUserData = ((t_cm_chunk*)memHandle)->userData;
+ if (alloc)
+ *alloc = ((t_cm_chunk*)memHandle)->alloc;
+}
+
+/*
+ * check free list is ordered
+ * check all chunks are correctly linked
+ * check adjacent chunks are not FREE
+ */
+static void cm_MM_RA_checkAllocator(t_cm_allocator_desc* alloc)
+{
+ t_cm_chunk *chunk = alloc->chunks;
+ t_uint32 size = 0;
+ int i;
+
+ CM_ASSERT(alloc->sbrkSize <= alloc->maxSize);
+
+ while(chunk != 0) {
+ if(chunk == alloc->chunks)
+ CM_ASSERT(chunk->prev == 0);
+ if(chunk == alloc->lastChunk)
+ CM_ASSERT(chunk->next == 0);
+
+ CM_ASSERT(chunk->alloc == alloc);
+
+ if (chunk->next != 0) {
+ CM_ASSERT(!((chunk->status == MEM_FREE) && (chunk->next->status == MEM_FREE))); //two free adjacent blocks
+ CM_ASSERT(chunk->offset < chunk->next->offset); //offsets reverted
+ CM_ASSERT(chunk->offset + chunk->size == chunk->next->offset); // Not hole in allocator
+ }
+ size += chunk->size;
+ chunk = chunk->next;
+ }
+
+ CM_ASSERT(size == alloc->sbrkSize);
+
+ for(i = 0; i < BINS; i++)
+ {
+ chunk = alloc->free_mem_chunks[i];
+ while(chunk != 0) {
+ if (chunk->next_free_mem != 0) {
+ CM_ASSERT(chunk->size <= chunk->next_free_mem->size); //free list not ordered
+ }
+ chunk = chunk->next_free_mem;
+ }
+ }
+}
+
+PUBLIC void cm_MM_DumpMemory(t_cm_allocator_desc* alloc, t_uint32 start, t_uint32 end)
+{
+ t_cm_chunk *chunk = alloc->chunks;
+
+ LOG_INTERNAL(0, "ALLOCATOR Dumping allocator \"%s\" [0x%08x:0x%08x]\n", alloc->pAllocName, start, end, 0, 0, 0);
+ while(chunk != 0) {
+ if (((chunk->offset < start) && (chunk->offset + chunk->size > start))
+ || ((chunk->offset < end) && (chunk->offset + chunk->size > end))
+ || ((chunk->offset > start) && (chunk->offset + chunk->size < end))
+ || ((chunk->offset < start) && (chunk->offset + chunk->size > end)))
+ {
+ LOG_INTERNAL(0, "ALLOCATOR chunk [0x%08x -> 0x%08x[: status:%s, domainId: 0x%x\n",
+ chunk->offset,
+ chunk->offset + chunk->size,
+ chunk->status?"FREE":"USED",
+ chunk->domainId, 0, 0);
+ }
+ chunk = chunk->next;
+ }
+}
+
+PUBLIC void cm_MM_SetDefaultDomain(t_memory_handle memHandle, t_uint32 domainId)
+{
+ ((t_cm_chunk *) memHandle)->domainId = domainId;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c
new file mode 100644
index 00000000000..4e800376dbb
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/memory/src/remote_allocator_utils.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/memory/inc/remote_allocator_utils.h>
+#include <cm/engine/trace/inc/trace.h>
+
+/***************************************************************************/
+/*
+ * linkChunk
+ * param prev : Pointer on previous chunk where the chunk will be added
+ * param add : Pointer on chunk to add
+ *
+ * Add a chunk in the memory list
+ *
+ */
+/***************************************************************************/
+PUBLIC void linkChunk(t_cm_allocator_desc* alloc, t_cm_chunk* prev, t_cm_chunk* add)
+{
+ // Link previous
+ if(prev == 0)
+ {
+ add->next = alloc->chunks;
+ alloc->chunks = add;
+ }
+ else
+ {
+ add->prev = prev;
+ add->next = prev->next;
+ prev->next = add;
+ }
+
+ // Link next
+ if(add->next == 0)
+ {
+ // Link at the end
+ alloc->lastChunk = add;
+ }
+ else
+ add->next->prev = add;
+}
+
+/***************************************************************************/
+/*
+ * unlinkChunk
+ * param allocHandle : Allocator handle
+ * param current : Pointer on chunk to remove
+ *
+ * Remove a chunk in the memory list and update first pointer
+ *
+ */
+/***************************************************************************/
+PUBLIC void unlinkChunk(t_cm_allocator_desc* alloc, t_cm_chunk* current)
+{
+ /* Link previous with next */
+ if (current->prev != 0)
+ current->prev->next = current->next;
+ else
+ {
+ CM_ASSERT(alloc->chunks == current);
+
+ // We remove the first, update chunks
+ alloc->chunks = current->next;
+ }
+
+ /* Link next with previous */
+ if(current->next != 0)
+ current->next->prev= current->prev;
+ else
+ {
+ CM_ASSERT(alloc->lastChunk == current);
+
+ // We remove the last, update lastChunk
+ alloc->lastChunk = current->prev;
+ }
+}
+
+
+/***************************************************************************/
+/*
+ * unlinkFreeMem() unlinks chunk from free memory double-linked list
+ * makes the previous and next chunk in the list point to each other..
+ * param allocHandle : Allocator handle
+ * param current : Pointer on chunk to remove
+ *
+ * Remove a chunk in the free memory list and update pointer
+ *
+ */
+/***************************************************************************/
+PUBLIC void unlinkFreeMem(t_cm_allocator_desc* alloc ,t_cm_chunk* current)
+{
+ int bin = bin_index(current->size);
+
+ /* unlink previous */
+ if (current->prev_free_mem != 0)
+ {
+ current->prev_free_mem->next_free_mem = current->next_free_mem;
+ }
+
+ /* Unlink next */
+ if (current->next_free_mem !=0 )
+ {
+ current->next_free_mem->prev_free_mem = current->prev_free_mem;
+ }
+
+ /* update first free pointer */
+ if (alloc->free_mem_chunks[bin] == current)
+ {
+ alloc->free_mem_chunks[bin] = current->next_free_mem;
+ }
+
+ current->prev_free_mem = 0;
+ current->next_free_mem = 0;
+}
+
+/***************************************************************************/
+/*
+ * linkFreeMemBefore
+ * param add : Pointer on chunk to add
+ * param next : Pointer on next chunk where the chunk will be added before
+ *
+ * Add a chunk in the free memory list
+ *
+ */
+/***************************************************************************/
+PUBLIC void linkFreeMemBefore(t_cm_chunk* add, t_cm_chunk* next)
+{
+ /* Link next */
+ add->prev_free_mem = next->prev_free_mem;
+ add->next_free_mem = next;
+
+ /* Link previous */
+ if (next->prev_free_mem != 0)
+ {
+ next->prev_free_mem->next_free_mem = add;
+ }
+ next->prev_free_mem = add;
+}
+
+/***************************************************************************/
+/*
+ * linkFreeMemAfter
+ * param add : Pointer on chunk to add
+ * param prev : Pointer on previous chunk where the chunk will be added after
+ *
+ * Add a chunk in the free memory list
+ *
+ */
+/***************************************************************************/
+PUBLIC void linkFreeMemAfter(t_cm_chunk* prev,t_cm_chunk* add)
+{
+ /* Link previous */
+ add->prev_free_mem = prev;
+ add->next_free_mem = prev->next_free_mem;
+
+ /* Link next */
+ if (prev->next_free_mem != 0)
+ {
+ prev->next_free_mem->prev_free_mem = add;
+ }
+ prev->next_free_mem = add;
+}
+
+
+/***************************************************************************/
+/*
+ * updateFreeList
+ * param allocHandle : Allocator handle
+ * param offset : Pointer on chunk
+ *
+ * Update free memory list, ordered by size
+ *
+ */
+/***************************************************************************/
+PUBLIC void updateFreeList(t_cm_allocator_desc* alloc , t_cm_chunk* chunk)
+{
+ t_cm_chunk* free_chunk;
+ int bin = bin_index(chunk->size);
+
+ /* check case with no more free block */
+ if (alloc->free_mem_chunks[bin] == 0)
+ {
+ alloc->free_mem_chunks[bin] = chunk;
+ return ;
+ }
+
+ /* order list */
+ free_chunk = alloc->free_mem_chunks[bin];
+ while ((free_chunk->next_free_mem != 0) && (chunk->size > free_chunk->size))
+ {
+ free_chunk = free_chunk->next_free_mem;
+ }
+
+ /* Add after free chunk if smaller -> we are the last */
+ if(free_chunk->size <= chunk->size)
+ {
+ linkFreeMemAfter(free_chunk,chunk);
+ }
+ else // This mean that we are smaller
+ {
+ linkFreeMemBefore(chunk,free_chunk);
+
+ /* Update first free chunk */
+ if (alloc->free_mem_chunks[bin] == free_chunk)
+ {
+ alloc->free_mem_chunks[bin] = chunk;
+ }
+ }
+}
+
+
+/***************************************************************************/
+/*
+ * splitChunk
+ * param allocHandle : Allocator handle
+ * param chunk : Current chunk (modified in place)
+ * param offset : Offset address of the start memory
+ * return : New chunk handle or 0 if an error occurs
+ *
+ * Create new chunk before/after the current chunk with the size
+ */
+/***************************************************************************/
+PUBLIC t_cm_chunk* splitChunk(t_cm_allocator_desc* alloc ,t_cm_chunk *chunk,
+ t_uint32 offset, t_mem_split_position position)
+{
+ t_cm_chunk *free;
+ t_cm_chunk *returned;
+
+ t_cm_chunk* new_chunk = allocChunk();
+
+ if (position == FREE_CHUNK_AFTER) {
+ returned = chunk;
+ free = new_chunk;
+ } else { //FREE_CHUNK_BEFORE
+ returned = new_chunk;
+ free = chunk;
+ }
+
+ new_chunk->offset = offset;
+ new_chunk->size = chunk->offset + chunk->size - offset;
+ new_chunk->alloc = alloc;
+ chunk->size = offset - chunk->offset;
+
+ linkChunk(alloc, chunk, new_chunk);
+ unlinkFreeMem(alloc, free);
+ updateFreeList(alloc, free);
+
+ return returned;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h b/drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h
new file mode 100644
index 00000000000..c9ec864795f
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \brief OS Adaptation Layer API
+ *
+ * \defgroup CM_ENGINE_OSAL_API CM Engine OSAL (Operating System Abstraction Layer) API
+ * \ingroup CM_ENGINE_MODULE
+ */
+#ifndef __INC_CM_OSAL_H
+#define __INC_CM_OSAL_H
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/communication/inc/communication_type.h>
+#include <cm/engine/component/inc/instance.h>
+
+/*!
+ * \brief Identifier of a trace channel (id in [0..255])
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+typedef t_uint8 t_nmf_trace_channel;
+
+/*!
+ * \brief Identifier of lock create by OSAL
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+typedef t_uint32 t_nmf_osal_sync_handle;
+
+/*!
+ * \brief Identifier of semaphore create by OSAL
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+typedef t_uint32 t_nmf_osal_sem_handle;
+
+/*!
+ * \brief Identifier of semaphore wait error return by semaphore OSAL API
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+typedef t_uint8 t_nmf_osal_sync_error;
+#define SYNC_ERROR_TIMEOUT ((t_nmf_osal_sync_error)-1)
+#define SYNC_OK ((t_nmf_osal_sync_error)0)
+#define SEM_TIMEOUT_NORMAL 3000
+#define SEM_TIMEOUT_DEBUG 300000
+
+/*!
+ * \brief Operations used to support additionnal OS-specific debug feature
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+struct osal_debug_operations {
+ void (*component_create)(t_component_instance *component);
+ void (*component_destroy)(t_component_instance *component);
+ void (*domain_create)(t_cm_domain_id id);
+ void (*domain_destroy)(t_cm_domain_id id);
+};
+
+extern struct osal_debug_operations osal_debug_ops;
+
+/*!
+ * \brief Description of the Scheduling part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Support of uplink communication path (from Media Processors to Host (ARM))
+ *
+ * Post a function call outside of Host CPU Interrupt mode in order to minimize ISR execution time
+ * \param[in] upLayerTHIS : this one provided by user when calling CM_ENGINE_BindComponentToCMCore() (first field of the interface context) \n
+ * \param[in] methodIndex : index method to be called \n
+ * \param[in] anyPtr : internal NMF marshaled parameters block (to be passed as second parameter when calling the previous pSkeleton method) \n
+ * \param[in] ptrSize : size of anyPtr in bytes \n
+ *
+ * Called by:
+ * - CM_ProcessMpcEvent() call (shall be bound by OS integrator to HSEM IRQ)
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+
+PUBLIC void OSAL_PostDfc(
+ t_nmf_mpc2host_handle upLayerTHIS,
+ t_uint32 methodIndex,
+ t_event_params_handle anyPtr,
+ t_uint32 ptrSize);
+
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to protect global variable against multiple call. Interrupt and scheduler function are use when
+ * we take hardware/local semaphore. Scheduler lock functions can have empty implementation but this may
+ * impact performance (dsp waiting semaphore because host thread was preempted whereas it has already take semaphore
+ * but not yet release it).
+ *
+ * \return handle of the Mutex created
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC t_nmf_osal_sync_handle OSAL_CreateLock(void);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to protect global variable against multiple call. Interrupt and scheduler function are use when
+ * we take hardware/local semaphore. Scheduler lock functions can have empty implementation but this may
+ * impact performance (dsp waiting semaphore because host thread was preempted whereas it has already take semaphore
+ * but not yet release it).
+ *
+ * \param[in] handle handle of the Mutex to be locked
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_Lock(
+ t_nmf_osal_sync_handle handle);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to protect global variable against multiple call. Interrupt and scheduler function are use when
+ * we take hardware/local semaphore. Scheduler lock functions can have empty implementation but this may
+ * impact performance (dsp waiting semaphore because host thread was preempted whereas it has already take semaphore
+ * but not yet release it).
+ *
+ * \param[in] handle handle of the Mutex to be unlocked
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_Unlock(
+ t_nmf_osal_sync_handle handle);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to protect global variable against multiple call. Interrupt and scheduler function are use when
+ * we take hardware/local semaphore. Scheduler lock functions can have empty implementation but this may
+ * impact performance (dsp waiting semaphore because host thread was preempted whereas it has already take semaphore
+ * but not yet release it).
+ *
+ * \param[in] handle handle of the Mutex to be destroyed
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_DestroyLock(
+ t_nmf_osal_sync_handle handle);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to allow to synchronize with code running on mpc side.
+ *
+ * \param[in] value : Initial value of semaphore.
+ *
+ * \return handle of the Semaphore created
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC t_nmf_osal_sem_handle OSAL_CreateSemaphore(
+ t_uint32 value);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to allow to synchronize with code running on mpc side. This function can be call under
+ * Irq context by CM.
+ *
+ * \param[in] handle handle of the Semaphore for which we increase value and so potentially wake up thread.
+ *
+ * \param[in] aCtx is a hint to indicate to os that we are in a none normal context (e.g under interruption).
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_SemaphorePost(
+ t_nmf_osal_sem_handle handle,
+ t_uint8 aCtx);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to allow to synchronize with code running on mpc side.
+ *
+ * \param[in] handle of the Semaphore for which we decrease value and so potentially block current thread.
+ *
+ * \param[in] timeOutInMs maximun time in ms after which the block thread is wake up. In this case function return SYNC_ERROR_TIMEOUT value.
+ *
+ * \return error number: SYNC_ERROR_TIMEOUT in case semaphore is not release withing timeOutInMs.
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC t_nmf_osal_sync_error OSAL_SemaphoreWaitTimed(
+ t_nmf_osal_sem_handle handle,
+ t_uint32 timeOutInMs);
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Use by CM to allow to synchronize with code running on mpc side.
+ *
+ * \param[in] handle handle of the Semaphore to be destroyed
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_DestroySemaphore(
+ t_nmf_osal_sem_handle handle);
+
+/*!
+ * \brief Description of the System Memory Allocator part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Allocate CM some cacheable and bufferable memory (SDRAM) for internal usage \n
+ * This memory will be accessed only by Host CPU (ARM)
+ *
+ * This function provide a simple, general-purpose memory allocation. The
+ * OSAL_Alloc macro returns a pointer to a block of at least size bytes
+ * suitably aligned for any use. If there is no available memory, this
+ * function returns a null pointer.
+ *
+ * \param[in] size size in bytes, of memory to be allocated
+ * \return pointer on the beginning of the allocated memory
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void* OSAL_Alloc(
+ t_cm_size size);
+
+/*!
+ * \brief Description of the System Memory Allocator part of the OS Adaptation Layer with memory set to zero
+ *
+ * Compare to \see OSAL_Alloc, same allocation is done but memory is set with zero before returning.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void* OSAL_Alloc_Zero(
+ t_cm_size size);
+
+/*!
+ * \brief Description of the System Memory Allocator part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Free CM some cacheable and bufferable memory (SDRAM) for internal usage \n
+ * This memory will be accessed only by Host CPU (ARM)
+ *
+ * \param[in] pHandle pointer on the begining of the memory previously allocated
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_Free(
+ void *pHandle);
+
+/*!
+ * \brief Clean data cache in DDR in order to be accessible from peripheral.
+ *
+ * This method must be synchronized with MMDSP Code cache attribute.
+ * Strongly Ordered -> nothing
+ * Shared device -> dsb + L2 Write buffer drain
+ * Non cacheable, Bufferable -> dsb + L2 Write buffer drain
+ * WT or WB -> Flush cache range + dsb + L2 Write buffer drain
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_CleanDCache(
+ t_uint32 startAddr, //!< [in] Start data address of range to clean
+ t_uint32 Size //!< [in] Size of range to clean
+ );
+
+/*!
+ * \brief Flush write buffer.
+ *
+ * This method must be synchronized with MMDSP Data cache attribute.
+ * Strongly Ordered -> nothing
+ * Shared device -> dsb + L2 Write buffer drain
+ * Non cacheable, Bufferable -> dsb + L2 Write buffer drain
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_mb(void);
+
+/*!
+ * \brief Description of the System Memory part of the OS Adaptation Layer
+ *
+ * <B>Goal:</B> Copy some cacheable and bufferable memory (SDRAM) provided by a client to\n
+ * internal memory.
+ *
+ * \param[in] dst : pointer on the begining of the internal memory previously allocated
+ * \param[in] src : pointer on the begining of the client's memory
+ * \param[in] size : The size of the data to copy
+ *
+ * Called by:
+ * - CM_ENGINE_PushComponent()
+ *
+ * \note This API is mainly provided for the OS were the client application does execute in the same
+ * address space as the CM.
+ * For example in Linux or Symbian, the client's address space is userland but the CM execute in
+ * kernel space. Thus, 'dst' is supposed to be a kernel address but src is supposed to be a user
+ * space address
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC t_cm_error OSAL_Copy(
+ void *dst,
+ const void *src,
+ t_cm_size size);
+
+/*!
+ * \brief Description of the internal log traces configuration of the Component Manager
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_Log(
+ const char *format,
+ int param1,
+ int param2,
+ int param3,
+ int param4,
+ int param5,
+ int param6);
+
+/*!
+ * \brief Generate an OS-Panic. Called in from CM_ASSERT().
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_Panic(void);
+
+/*!
+ * \brief Description of the configuration of the trace features
+ *
+ * (trace output itself is provided by user through his custom implementation of the generic APIs)
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_Write64(
+ t_nmf_trace_channel channel,
+ t_uint8 isTimestamped,
+ t_uint64 value);
+
+/*!
+ * \brief Power enabling/disabling commands description.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+typedef enum
+{
+ CM_OSAL_POWER_SxA_CLOCK, //!< SxA Power & Clock, firstParam contains Core ID
+ CM_OSAL_POWER_SxA_AUTOIDLE, //!< SxA AutoIdle, firstParam contains Core ID
+ CM_OSAL_POWER_SxA_HARDWARE, //!< SxA Hardware Power, firstParam contains Core ID
+ CM_OSAL_POWER_HSEM, //!< HSEM Power
+ CM_OSAL_POWER_SDRAM, //!< SDRAM memory, firstParam contains physical resource address, secondParam contains size
+ CM_OSAL_POWER_ESRAM //!< ESRAM memory, firstParam contains physical resource address, secondParam contains size
+} t_nmf_power_resource;
+
+/*!
+ * \brief Description of the Power Management part of the OS Adaptation Layer
+ *
+ * Use by CM engine to disable a logical power domain (see \ref t_nmf_power_resource)
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_DisablePwrRessource(
+ t_nmf_power_resource resource, //!< [in] Describe the domain which must be disabled
+ t_uint32 firstParam, //!< [in] Eventual first parameter to power to disable
+ t_uint32 secondParam //!< [in] Eventual second parameter to power to disable
+ );
+
+/*!
+ * \brief Description of the Power Management part of the OS Adaptation Layer
+ *
+ * Use by CM engine to enable a logical power domain (see \ref t_nmf_power_resource)
+ *
+ * \return
+ * - \ref CM_OK
+ * - \ref CM_PWR_NOT_AVAILABLE A specified power domain is not managed (see returned value in aPowerMask)
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC t_cm_error OSAL_EnablePwrRessource(
+ t_nmf_power_resource resource, //!< [in] Describing the domains which must be enabled
+ t_uint32 firstParam, //!< [in] Eventual first parameter to power to disable
+ t_uint32 secondParam //!< [in] Eventual second parameter to power to disable
+ );
+
+
+/*!
+ * \brief return prcmu timer value.
+ *
+ * This is need for perfmeter api (see \ref t_nmf_power_resource)
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC t_uint64 OSAL_GetPrcmuTimer(void);
+
+/*!
+ * \brief Disable the service message handling (panic, etc)
+ *
+ * It must disable the handling of all service messages
+ * If a service message is currently handled, it must wait till the end
+ * of its managment before returning.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_DisableServiceMessages(void);
+
+/*!
+ * \brief Enable the service message handling (panic, etc)
+ *
+ * It enables the handling of all service messages
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_EnableServiceMessages(void);
+
+/*!
+ * \brief Generate 'software' panic due to dsp crash
+ *
+ * We request that the os part generate a panic to notify cm users
+* that a problem occur but not dsp panic has been sent (for example
+* a dsp crash)
+ *
+ * \param[in] t_nmf_core_id : core_id is the id of dsp for which we need to generate a panic.
+ * \param[in] reason : additional information. Today only 0 is valid.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_GeneratePanic(t_nmf_core_id coreId, t_uint32 reason);
+
+extern /*const*/ t_nmf_osal_sync_handle lockHandleApi;
+extern /*const*/ t_nmf_osal_sync_handle lockHandleCom;
+extern /*const*/ t_nmf_osal_sem_handle semHandle;
+
+/*!
+ * \brief Take a lock before entering critical section. Can suspend current thread if lock already taken. \n
+ * Use this macro in api function. For com function use OSAL_LOCK_COM.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+#define OSAL_LOCK_API() OSAL_Lock(lockHandleApi)
+
+/*!
+ * \brief Release lock before leaving critical section.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+#define OSAL_UNLOCK_API() OSAL_Unlock((lockHandleApi))
+
+/*!
+ * \brief Take a lock before entering critical section. Can suspend current thread if lock already taken. \n
+ * Use this macro in com function. For com function use OSAL_LOCK_API.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+#define OSAL_LOCK_COM() OSAL_Lock(lockHandleCom)
+
+/*!
+ * \brief Release lock before leaving critical section.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+#define OSAL_UNLOCK_COM() OSAL_Unlock((lockHandleCom))
+
+/*!
+ * \brief Go to sleep untill post done on semaphore or timeout expire. In that case SYNC_ERROR_TIMEOUT is return.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+#define OSAL_SEMAPHORE_WAIT_TIMEOUT(semHandle) OSAL_SemaphoreWaitTimed(semHandle, (cm_PWR_GetMode() == NORMAL_PWR_MODE)?SEM_TIMEOUT_NORMAL:SEM_TIMEOUT_DEBUG)
+
+/****************/
+/* Generic part */
+/****************/
+t_cm_error cm_OSAL_Init(void);
+void cm_OSAL_Destroy(void);
+
+#endif /* __INC_CM_OSAL_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/src/os_adaptation_layer.c b/drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/src/os_adaptation_layer.c
new file mode 100644
index 00000000000..380692e3cd8
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/os_adaptation_layer/src/os_adaptation_layer.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/utils/inc/mem.h>
+
+t_nmf_osal_sync_handle lockHandleApi;
+t_nmf_osal_sync_handle lockHandleCom;
+t_nmf_osal_sem_handle semHandle;
+struct osal_debug_operations osal_debug_ops;
+
+/****************/
+/* Generic part */
+/****************/
+PUBLIC t_cm_error cm_OSAL_Init(void)
+{
+
+ /* create locks */
+ lockHandleApi = OSAL_CreateLock();
+ if (lockHandleApi == 0) {return CM_INVALID_PARAMETER;}
+ lockHandleCom = OSAL_CreateLock();
+ if (lockHandleCom == 0) {return CM_INVALID_PARAMETER;}
+
+ /* create semaphore */
+ semHandle = OSAL_CreateSemaphore(0);
+ if (semHandle == 0) {return CM_INVALID_PARAMETER;}
+
+ /* init to zero */
+ cm_MemSet(&osal_debug_ops, 0, sizeof(osal_debug_ops));
+
+ return CM_OK;
+}
+
+PUBLIC void cm_OSAL_Destroy(void)
+{
+ /* destroy locks */
+ OSAL_DestroyLock(lockHandleApi);
+ OSAL_DestroyLock(lockHandleCom);
+
+ /* destroy semaphore */
+ OSAL_DestroySemaphore(semHandle);
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/perfmeter/inc/mpcload.h b/drivers/staging/nmf-cm/cm/engine/perfmeter/inc/mpcload.h
new file mode 100644
index 00000000000..0831f1940ca
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/perfmeter/inc/mpcload.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ */
+#ifndef MPCLOAD_H_
+#define MPCLOAD_H_
+
+#include <cm/engine/component/inc/instance.h>
+
+/******************************************************************************/
+/************************ FUNCTIONS PROTOTYPES ********************************/
+/******************************************************************************/
+
+PUBLIC t_cm_error cm_PFM_allocatePerfmeterDataMemory(t_nmf_core_id coreId, t_cm_domain_id domainId);
+PUBLIC void cm_PFM_deallocatePerfmeterDataMemory(t_nmf_core_id coreId);
+
+#endif /* MPCLOAD_H_ */
diff --git a/drivers/staging/nmf-cm/cm/engine/perfmeter/inc/perfmeter_type.h b/drivers/staging/nmf-cm/cm/engine/perfmeter/inc/perfmeter_type.h
new file mode 100644
index 00000000000..8733c20b21b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/perfmeter/inc/perfmeter_type.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Public Component Manager Performance Meter API type.
+ *
+ * This file contains the Component Manager API type for performance meter.
+ *
+ * \defgroup PERFMETER CM Monitoring API
+ * \ingroup CM_USER_API
+ */
+#ifndef CM_COMMON_PERFMETER_TYPE_H_
+#define CM_COMMON_PERFMETER_TYPE_H_
+
+#include <cm/inc/cm_type.h>
+/*!
+ * \brief Description of mpc load structure.
+ *
+ * This contain mpc load value.
+ *
+ * \ingroup PERFMETER
+ */
+typedef struct {
+ t_uint64 totalCounter;
+ t_uint64 loadCounter;
+} t_cm_mpc_load_counter;
+
+
+#endif /* CM_COMMON_PERFMETER_TYPE_H_ */
diff --git a/drivers/staging/nmf-cm/cm/engine/perfmeter/src/mpcload.c b/drivers/staging/nmf-cm/cm/engine/perfmeter/src/mpcload.c
new file mode 100644
index 00000000000..193d155b97b
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/perfmeter/src/mpcload.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/engine/perfmeter/inc/mpcload.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+
+#include <cm/engine/api/perfmeter_engine.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+#include <cm/engine/trace/inc/trace.h>
+
+#define PERFMETER_MAX_RETRIES 32
+#define PERFMETER_DATA_WORD_NB 7
+
+/* private type */
+typedef struct {
+ t_memory_handle perfmeterDataHandle;
+ t_cm_logical_address perfmeterDataAddr;
+} t_mpcLoad;
+
+/* private globals */
+t_mpcLoad mpcLoad_i[NB_CORE_IDS];
+
+/* engine api */
+PUBLIC EXPORT_SHARED t_cm_error CM_GetMpcLoadCounter(
+ t_nmf_core_id coreId,
+ t_cm_mpc_load_counter *pMpcLoadCounter
+)
+{
+ t_uint24 data[PERFMETER_DATA_WORD_NB];
+ t_uint32 i;
+ t_uint64 prcmuBeforeAttributes;
+ t_uint32 retryCounter = 0;
+ volatile t_uint32 *pData;
+
+ pMpcLoadCounter->totalCounter = 0;
+ pMpcLoadCounter->loadCounter = 0;
+ /* check core id is an mpc */
+ if (coreId < FIRST_MPC_ID || coreId > LAST_CORE_ID) {return CM_INVALID_PARAMETER;}
+
+ /* check core has been booted */
+ pData = (t_uint32 *) mpcLoad_i[coreId].perfmeterDataAddr;
+ if (pData == NULL) {return CM_OK;}
+
+ do {
+ prcmuBeforeAttributes = OSAL_GetPrcmuTimer();
+ /* get attributes */
+ do
+ {
+ for(i = 0;i < PERFMETER_DATA_WORD_NB;i++)
+ data[i] = pData[i];
+ }
+ while(((data[0] & 0xff0000) != (data[1] & 0xff0000) || (data[0] & 0xff0000) != (data[2] & 0xff0000) ||
+ (data[0] & 0xff0000) != (data[3] & 0xff0000) || (data[0] & 0xff0000) != (data[4] & 0xff0000) ||
+ (data[0] & 0xff0000) != (data[5] & 0xff0000) || (data[0] & 0xff0000) != (data[6] & 0xff0000) ||
+ (data[0] & 0xff0000) != (data[6] & 0xff0000))
+ && retryCounter-- < PERFMETER_MAX_RETRIES); // check data coherence
+ if (retryCounter >= PERFMETER_MAX_RETRIES)
+ return CM_MPC_NOT_RESPONDING;
+
+ /* read forever counter for totalCounter */
+ pMpcLoadCounter->totalCounter = OSAL_GetPrcmuTimer();
+ } while(pMpcLoadCounter->totalCounter - prcmuBeforeAttributes >= 32); //we loop until it seems we have not be preempt too long (< 1ms)
+
+ /* we got coherent data, use them */
+ pMpcLoadCounter->loadCounter = ((data[0] & (t_uint64)0xffff) << 32) + ((data[1] & (t_uint64)0xffff) << 16) + ((data[2] & (t_uint64)0xffff) << 0);
+ //fix load counter if needed
+ if ((data[6] & 0xffff) == 1) {
+ t_uint64 lastEvent;
+
+ lastEvent = ((data[3] & (t_uint64)0xffff) << 32) + ((data[4] & (t_uint64)0xffff) << 16) + ((data[5] & (t_uint64)0xffff) << 0);
+ pMpcLoadCounter->loadCounter += pMpcLoadCounter->totalCounter - lastEvent;
+ }
+
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_getMpcLoadCounter(
+ t_nmf_core_id coreId,
+ t_cm_mpc_load_counter *pMpcLoadCounter
+)
+{
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+ error = CM_GetMpcLoadCounter(coreId, pMpcLoadCounter);
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+/* internal api */
+PUBLIC t_cm_error cm_PFM_allocatePerfmeterDataMemory(t_nmf_core_id coreId, t_cm_domain_id domainId)
+{
+ t_cm_error error = CM_OK;
+ t_mpcLoad *pMpcLoad = (t_mpcLoad *) &mpcLoad_i[coreId];
+
+ pMpcLoad->perfmeterDataHandle = cm_DM_Alloc(domainId, SDRAM_EXT24, PERFMETER_DATA_WORD_NB, CM_MM_ALIGN_WORD, TRUE);
+ if (pMpcLoad->perfmeterDataHandle == INVALID_MEMORY_HANDLE) {
+ error = CM_NO_MORE_MEMORY;
+ ERROR("CM_NO_MORE_MEMORY: Unable to allocate perfmeter\n", 0, 0, 0, 0, 0, 0);
+ } else {
+ t_uint32 mmdspAddr;
+
+ pMpcLoad->perfmeterDataAddr = cm_DSP_GetHostLogicalAddress(pMpcLoad->perfmeterDataHandle);
+ cm_DSP_GetDspAddress(pMpcLoad->perfmeterDataHandle, &mmdspAddr);
+ cm_writeAttribute(cm_EEM_getExecutiveEngine(coreId)->instance, "rtos/perfmeter/perfmeterDataAddr", mmdspAddr);
+ }
+
+ return error;
+}
+
+PUBLIC void cm_PFM_deallocatePerfmeterDataMemory(t_nmf_core_id coreId)
+{
+ mpcLoad_i[coreId].perfmeterDataAddr = 0;
+ cm_DM_Free(mpcLoad_i[coreId].perfmeterDataHandle, TRUE);
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/power_mgt/inc/power.h b/drivers/staging/nmf-cm/cm/engine/power_mgt/inc/power.h
new file mode 100644
index 00000000000..942805df2f3
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/power_mgt/inc/power.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Enable a CM power domain by CoreID.
+ *
+ * \ingroup COMPONENT_INTERNAL
+ */
+#ifndef __INC_NMF_POWER
+#define __INC_NMF_POWER
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/dsp/inc/dsp.h>
+
+typedef enum
+{
+ DISABLE_PWR_MODE = 0x0, //!< Disable mode - CM Power management is disabled. CM Power domain are always enabled and the EEs are loaded by default
+ NORMAL_PWR_MODE = 0x1 //!< Normal mode
+} t_nmf_power_mode;
+
+extern t_nmf_power_mode powerMode;
+
+PUBLIC t_cm_error cm_PWR_Init(void);
+void cm_PWR_SetMode(t_nmf_power_mode aMode);
+t_nmf_power_mode cm_PWR_GetMode(void);
+t_uint32 cm_PWR_GetMPCMemoryCount(t_nmf_core_id coreId);
+
+typedef enum
+{
+ MPC_PWR_CLOCK,
+ MPC_PWR_AUTOIDLE,
+ MPC_PWR_HWIP
+} t_mpc_power_request;
+
+PUBLIC t_cm_error cm_PWR_EnableMPC(
+ t_mpc_power_request request,
+ t_nmf_core_id coreId);
+PUBLIC void cm_PWR_DisableMPC(
+ t_mpc_power_request request,
+ t_nmf_core_id coreId);
+
+PUBLIC t_cm_error cm_PWR_EnableHSEM(void);
+PUBLIC void cm_PWR_DisableHSEM(void);
+
+PUBLIC t_cm_error cm_PWR_EnableMemory(
+ t_nmf_core_id coreId,
+ t_dsp_memory_type_id dspMemType,
+ t_cm_physical_address address,
+ t_cm_size size);
+PUBLIC void cm_PWR_DisableMemory(
+ t_nmf_core_id coreId,
+ t_dsp_memory_type_id dspMemType,
+ t_cm_physical_address address,
+ t_cm_size size);
+
+
+#endif /* __INC_NMF_POWER */
diff --git a/drivers/staging/nmf-cm/cm/engine/power_mgt/src/cmpower.c b/drivers/staging/nmf-cm/cm/engine/power_mgt/src/cmpower.c
new file mode 100644
index 00000000000..a104486db6c
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/power_mgt/src/cmpower.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include "../inc/power.h"
+
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/convert.h>
+#include <cm/engine/dsp/inc/dsp.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+
+// -------------------------------------------------------------------------------
+// Compilation flags
+// -------------------------------------------------------------------------------
+#define __PWR_DEBUG_TRACE_LEVEL 2 // Debug trave level for CM power module
+
+// -------------------------------------------------------------------------------
+// Internal counter to store the TCM allocated chunk (by MPC)
+// -------------------------------------------------------------------------------
+static t_uint32 _pwrMPCHWIPCountT[NB_CORE_IDS];
+
+// -------------------------------------------------------------------------------
+// Internal counter to store the TCM allocated chunk (by MPC)
+// -------------------------------------------------------------------------------
+static t_uint32 _pwrMPCMemoryCountT[NB_CORE_IDS];
+
+
+// -------------------------------------------------------------------------------
+// Internal data to store the global Power Manager mode (see cm_PWR_Init fct)
+// -------------------------------------------------------------------------------
+t_nmf_power_mode powerMode = NORMAL_PWR_MODE;
+
+// -------------------------------------------------------------------------------
+// cm_PWR_Init
+// -------------------------------------------------------------------------------
+PUBLIC t_cm_error cm_PWR_Init(void)
+{
+ int i;
+
+ for (i=0; i<NB_CORE_IDS;i++)
+ {
+ _pwrMPCHWIPCountT[i] = 0;
+ _pwrMPCMemoryCountT[i] = 0;
+ }
+
+ return CM_OK;
+}
+
+// -------------------------------------------------------------------------------
+// cm_PWR_SetMode
+// -------------------------------------------------------------------------------
+void cm_PWR_SetMode(t_nmf_power_mode aMode)
+{
+ powerMode = aMode;
+}
+
+t_nmf_power_mode cm_PWR_GetMode()
+{
+ return powerMode;
+}
+
+t_uint32 cm_PWR_GetMPCMemoryCount(t_nmf_core_id coreId)
+{
+ return _pwrMPCMemoryCountT[coreId];
+}
+
+
+PUBLIC t_cm_error cm_PWR_EnableMPC(
+ t_mpc_power_request request,
+ t_nmf_core_id coreId)
+{
+ t_cm_error error;
+
+ switch(request)
+ {
+ case MPC_PWR_CLOCK:
+ LOG_INTERNAL(__PWR_DEBUG_TRACE_LEVEL, "[Pwr] MPC %s enable clock\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ if((error = OSAL_EnablePwrRessource(CM_OSAL_POWER_SxA_CLOCK, coreId, 0)) != CM_OK)
+ {
+ ERROR("[Pwr] MPC %s clock can't be enabled\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ return error;
+ }
+ break;
+ case MPC_PWR_AUTOIDLE:
+ if((error = OSAL_EnablePwrRessource(CM_OSAL_POWER_SxA_AUTOIDLE, coreId, 0)) != CM_OK)
+ {
+ ERROR("[Pwr] MPC %s clock can't be auto-idle\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ return error;
+ }
+ break;
+ case MPC_PWR_HWIP:
+ if(_pwrMPCHWIPCountT[coreId]++ == 0)
+ {
+ LOG_INTERNAL(__PWR_DEBUG_TRACE_LEVEL, "[Pwr] MPC %s HW IP enable clock\n",cm_getDspName(coreId), 0, 0, 0, 0, 0);
+
+ // The PRCMU seem not supporting the transition of asking HW IP on while DSP in retention
+ // -> Thus force wake up of the MMDSP before asking the transition
+ if ((error = cm_EEM_ForceWakeup(coreId)) != CM_OK)
+ return error;
+
+ if((error = OSAL_EnablePwrRessource(CM_OSAL_POWER_SxA_HARDWARE, coreId, 0)) != CM_OK)
+ {
+ ERROR("[Pwr] MPC %s HW IP clock can't be enabled\n", cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ cm_EEM_AllowSleep(coreId);
+ return error;
+ }
+
+ cm_EEM_AllowSleep(coreId);
+ }
+ break;
+ }
+
+ return CM_OK;
+}
+
+PUBLIC void cm_PWR_DisableMPC(
+ t_mpc_power_request request,
+ t_nmf_core_id coreId)
+{
+ switch(request)
+ {
+ case MPC_PWR_CLOCK:
+ LOG_INTERNAL(__PWR_DEBUG_TRACE_LEVEL, "[Pwr] MPC %s disable clock\n",cm_getDspName(coreId), 0, 0, 0, 0, 0);
+ OSAL_DisablePwrRessource(CM_OSAL_POWER_SxA_CLOCK, coreId, 0);
+ break;
+ case MPC_PWR_AUTOIDLE:
+ OSAL_DisablePwrRessource(CM_OSAL_POWER_SxA_AUTOIDLE, coreId, 0);
+ break;
+ case MPC_PWR_HWIP:
+ if(--_pwrMPCHWIPCountT[coreId] == 0)
+ {
+ LOG_INTERNAL(__PWR_DEBUG_TRACE_LEVEL, "[Pwr] MPC %s HW IP disable clock\n",cm_getDspName(coreId), 0, 0, 0, 0, 0);
+
+ // The PRCMU seem not supporting the transition of asking HW IP on while DSP in retention
+ // -> Thus force wake up of the MMDSP before asking the transition
+ if (cm_EEM_ForceWakeup(coreId) != CM_OK)
+ return;
+
+ OSAL_DisablePwrRessource(CM_OSAL_POWER_SxA_HARDWARE, coreId, 0);
+
+ cm_EEM_AllowSleep(coreId);
+ }
+ break;
+ }
+}
+
+PUBLIC t_cm_error cm_PWR_EnableHSEM(void)
+{
+ t_cm_error error;
+
+ LOG_INTERNAL(__PWR_DEBUG_TRACE_LEVEL, "[Pwr] HSEM enable clock\n",0 , 0, 0, 0, 0, 0);
+ if((error = OSAL_EnablePwrRessource(CM_OSAL_POWER_HSEM, 0, 0)) != CM_OK)
+ {
+ ERROR("[Pwr] HSEM clock can't be enabled\n", 0, 0, 0, 0, 0, 0);
+ return error;
+ }
+
+ return CM_OK;
+}
+
+PUBLIC void cm_PWR_DisableHSEM(void)
+{
+ LOG_INTERNAL(__PWR_DEBUG_TRACE_LEVEL, "[Pwr] HSEM disable clock\n",0 , 0, 0, 0, 0, 0);
+ OSAL_DisablePwrRessource(CM_OSAL_POWER_HSEM, 0, 0);
+}
+
+PUBLIC t_cm_error cm_PWR_EnableMemory(
+ t_nmf_core_id coreId,
+ t_dsp_memory_type_id dspMemType,
+ t_cm_physical_address address,
+ t_cm_size size)
+{
+ switch(dspMemType)
+ {
+ case INTERNAL_XRAM24:
+ case INTERNAL_XRAM16:
+ case INTERNAL_YRAM24:
+ case INTERNAL_YRAM16:
+ _pwrMPCMemoryCountT[coreId]++;
+ break;
+ case SDRAM_EXT24:
+ case SDRAM_EXT16:
+ case SDRAM_CODE:
+ case LOCKED_CODE:
+ return OSAL_EnablePwrRessource(
+ CM_OSAL_POWER_SDRAM,
+ address,
+ size);
+ case ESRAM_EXT24:
+ case ESRAM_EXT16:
+ case ESRAM_CODE:
+ return OSAL_EnablePwrRessource(
+ CM_OSAL_POWER_ESRAM,
+ address,
+ size);
+ default:
+ CM_ASSERT(0);
+ }
+
+ return CM_OK;
+}
+
+PUBLIC void cm_PWR_DisableMemory(
+ t_nmf_core_id coreId,
+ t_dsp_memory_type_id dspMemType,
+ t_cm_physical_address address,
+ t_cm_size size)
+{
+ switch(dspMemType)
+ {
+ case INTERNAL_XRAM24:
+ case INTERNAL_XRAM16:
+ case INTERNAL_YRAM24:
+ case INTERNAL_YRAM16:
+ _pwrMPCMemoryCountT[coreId]--;
+ break;
+ case SDRAM_EXT24:
+ case SDRAM_EXT16:
+ case SDRAM_CODE:
+ case LOCKED_CODE:
+ OSAL_DisablePwrRessource(
+ CM_OSAL_POWER_SDRAM,
+ address,
+ size);
+ break;
+ case ESRAM_EXT24:
+ case ESRAM_EXT16:
+ case ESRAM_CODE:
+ OSAL_DisablePwrRessource(
+ CM_OSAL_POWER_ESRAM,
+ address,
+ size);
+ break;
+ default:
+ CM_ASSERT(0);
+ }
+}
+
+
+
+
+
diff --git a/drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_mgt.h b/drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_mgt.h
new file mode 100644
index 00000000000..d2c7185b24f
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_mgt.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Component repository internal methods.
+ *
+ * \defgroup REPOSITORY_INTERNAL Component repository.
+ */
+#ifndef __INC_CM_REP_MGT_H
+#define __INC_CM_REP_MGT_H
+
+#include <cm/inc/cm_type.h>
+#include <inc/nmf-limits.h>
+
+/*!
+ * \brief Identification of a component entry.
+ * \ingroup REPOSITORY_INTERNAL
+ */
+typedef struct t_rep_component {
+ t_dup_char name;
+ struct t_rep_component *prev;
+ struct t_rep_component *next;
+ t_elfdescription *elfhandle; //!< Must be last as data will be stored here
+} t_rep_component;
+
+/*!
+ * \brief Search a component entry by name.
+ *
+ * \param[in] name The name of the component to look for.
+ * \param[out] component The corresponding component entry in the repository
+ *
+ * \retval t_cm_error
+ *
+ * \ingroup REPOSITORY_INTERNAL
+ */
+PUBLIC t_cm_error cm_REP_lookupComponent(const char *name, t_rep_component **component);
+
+/*!
+ * \brief Helper method that return the dataFile found in parameter or in the cache
+ */
+t_elfdescription* cm_REP_getComponentFile(t_dup_char templateName, t_elfdescription* elfhandle);
+
+/*!
+ * \brief Destroy the full repository (remove and free all components)
+ *
+ * \retval none
+ *
+ * \ingroup REPOSITORY_INTERNAL
+ */
+PUBLIC void cm_REP_Destroy(void);
+
+#endif /* __INC_CM_REP_MGT_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_type.h b/drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_type.h
new file mode 100644
index 00000000000..30ef8004c48
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/repository_mgt/inc/repository_type.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Components Component Manager API type.
+ *
+ * \defgroup COMPONENT CM Components API
+ * \ingroup CM_USER_API
+ */
+
+#ifndef REPOSITORY_TYPE_H_
+#define REPOSITORY_TYPE_H_
+
+typedef enum
+{
+ BIND_ASYNC,
+ BIND_TRACE,
+ BIND_FROMUSER,
+ BIND_TOUSER
+} t_action_to_do;
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/repository_mgt/src/repository_mgt.c b/drivers/staging/nmf-cm/cm/engine/repository_mgt/src/repository_mgt.c
new file mode 100644
index 00000000000..f6ccb4a9992
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/repository_mgt/src/repository_mgt.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/utils/inc/string.h>
+
+#include <cm/engine/component/inc/component_type.h>
+#include <cm/engine/component/inc/bind.h>
+#include <cm/engine/configuration/inc/configuration.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/repository_mgt/inc/repository_mgt.h>
+#include <cm/engine/api/repository_mgt_engine.h>
+#include <cm/engine/trace/inc/trace.h>
+
+
+#undef NHASH
+#define NHASH 157 //Use a prime number!
+#define MULT 17
+
+static t_rep_component *componentCaches[NHASH];
+
+static unsigned int repcomponentHash(const char *str)
+{
+ unsigned int h = 0;
+ for(; *str; str++)
+ h = MULT * h + *str;
+ return h % NHASH;
+}
+
+static void repcomponentAdd(t_rep_component *component)
+{
+ unsigned int h = repcomponentHash(component->name);
+
+ if(componentCaches[h] != NULL)
+ componentCaches[h]->prev = component;
+ component->next = componentCaches[h];
+ component->prev = NULL;
+ componentCaches[h] = component;
+}
+
+static void repcomponentRemove(t_rep_component *component)
+{
+ unsigned int h = repcomponentHash(component->name);
+
+ if(component->prev != NULL)
+ component->prev->next = component->next;
+ if(component->next != NULL)
+ component->next->prev = component->prev;
+ if(component == componentCaches[h])
+ componentCaches[h] = component->next;
+}
+
+
+PUBLIC t_cm_error cm_REP_lookupComponent(const char *name, t_rep_component **component)
+{
+ t_rep_component *tmp;
+
+ for(tmp = componentCaches[repcomponentHash(name)]; tmp != NULL; tmp = tmp->next)
+ {
+ if(cm_StringCompare(name, tmp->name, MAX_TEMPLATE_NAME_LENGTH) == 0)
+ {
+ if(component != NULL)
+ *component = tmp;
+ return CM_OK;
+ }
+ }
+
+ return CM_COMPONENT_NOT_FOUND;
+}
+
+t_elfdescription* cm_REP_getComponentFile(t_dup_char templateName, t_elfdescription* elfhandle)
+{
+ if(elfhandle == NULL)
+ {
+ t_rep_component *pRepComponent;
+
+ for(pRepComponent = componentCaches[repcomponentHash(templateName)]; pRepComponent != NULL; pRepComponent = pRepComponent->next)
+ {
+ if(pRepComponent->name == templateName)
+ return pRepComponent->elfhandle;
+ }
+
+ return NULL;
+ }
+
+ return elfhandle;
+}
+
+
+PUBLIC void cm_REP_Destroy(void)
+{
+ t_rep_component *component, *next;
+ int i;
+
+ for(i = 0; i < NHASH; i++)
+ {
+ for (component = componentCaches[i]; component != NULL; component = next)
+ {
+ next = component->next;
+ cm_ELF_CloseFile(FALSE, component->elfhandle);
+ cm_StringRelease(component->name);
+ OSAL_Free(component);
+ }
+ componentCaches[i] = NULL;
+ }
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_GetRequiredComponentFiles(
+ // IN
+ t_action_to_do action,
+ const t_cm_instance_handle client,
+ const char *requiredItfClientName,
+ const t_cm_instance_handle server,
+ const char *providedItfServerName,
+ // OUT component to be pushed
+ char fileList[][MAX_INTERFACE_TYPE_NAME_LENGTH],
+ t_uint32 listSize,
+ // OUT interface information
+ char type[MAX_INTERFACE_TYPE_NAME_LENGTH],
+ t_uint32 *methodNumber)
+{
+ t_cm_error error;
+ t_component_instance* compClient, *compServer;
+ int n;
+
+ OSAL_LOCK_API();
+
+ // No component required
+ for(n = 0; n < listSize; n++)
+ fileList[n][0] = 0;
+
+ compClient = cm_lookupComponent(client);
+ compServer = cm_lookupComponent(server);
+ switch(action)
+ {
+ case BIND_FROMUSER:{
+ t_interface_provide_description itfProvide;
+
+ // Check server validity
+ if((error = cm_checkValidServer(compServer, providedItfServerName,
+ &itfProvide)) == CM_OK)
+ {
+ cm_StringCopy(type, itfProvide.server->Template->provides[itfProvide.provideIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+
+ cm_StringCopy(fileList[0], "_sk.", MAX_INTERFACE_TYPE_NAME_LENGTH);
+ cm_StringConcatenate(fileList[0], itfProvide.server->Template->provides[itfProvide.provideIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+ }
+ } break;
+
+ case BIND_TOUSER: {
+ /* Get Components names for a BindComponentToCMCore */
+ t_interface_require_description itfRequire;
+ t_bool bindable;
+
+ // Check client validity
+ if((error = cm_checkValidClient(compClient, requiredItfClientName,
+ &itfRequire, &bindable)) == CM_OK)
+ {
+ cm_StringCopy(type, itfRequire.client->Template->requires[itfRequire.requireIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+ *methodNumber = itfRequire.client->Template->requires[itfRequire.requireIndex].interface->methodNumber;
+
+ cm_StringCopy(fileList[0], "_st.", MAX_INTERFACE_TYPE_NAME_LENGTH);
+ cm_StringConcatenate(fileList[0], itfRequire.client->Template->requires[itfRequire.requireIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+ }
+ }; break;
+
+ case BIND_ASYNC: {
+ /* Get Components names for an asynchronous binding */
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_bool bindable;
+
+ // Check invalid binding
+ if((error = cm_checkValidBinding(compClient, requiredItfClientName,
+ compServer, providedItfServerName,
+ &itfRequire, &itfProvide, &bindable)) == CM_OK)
+ {
+ if(compClient->Template->dspId != compServer->Template->dspId)
+ {
+ cm_StringCopy(fileList[0], "_sk.", MAX_INTERFACE_TYPE_NAME_LENGTH);
+ cm_StringConcatenate(fileList[0], itfRequire.client->Template->requires[itfRequire.requireIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+
+ cm_StringCopy(fileList[1], "_st.", MAX_INTERFACE_TYPE_NAME_LENGTH);
+ cm_StringConcatenate(fileList[1], itfRequire.client->Template->requires[itfRequire.requireIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+ }
+ else
+ {
+ cm_StringCopy(fileList[0], "_ev.", MAX_INTERFACE_TYPE_NAME_LENGTH);
+ cm_StringConcatenate(fileList[0], itfRequire.client->Template->requires[itfRequire.requireIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+ }
+ }
+ }; break;
+
+ case BIND_TRACE: {
+ /* Get Components names for an asynchronous binding */
+ t_interface_require_description itfRequire;
+ t_interface_provide_description itfProvide;
+ t_bool bindable;
+
+ // Check invalid binding
+ if((error = cm_checkValidBinding(compClient, requiredItfClientName,
+ compServer, providedItfServerName,
+ &itfRequire, &itfProvide, &bindable)) == CM_OK)
+ {
+ cm_StringCopy(fileList[0], "_tr.", MAX_INTERFACE_TYPE_NAME_LENGTH);
+ cm_StringConcatenate(fileList[0], itfRequire.client->Template->requires[itfRequire.requireIndex].interface->type, MAX_INTERFACE_TYPE_NAME_LENGTH);
+ }
+ }; break;
+
+ default:
+ error = CM_OK;
+ break;
+ }
+
+ if(error == CM_OK)
+ {
+ for(n = 0; n < listSize; n++)
+ {
+ t_rep_component *comp;
+
+ // If already loaded, don't ask to load it and put the name to NULL
+ if (fileList[n][0] != 0 &&
+ cm_REP_lookupComponent(fileList[n], &comp) == CM_OK)
+ fileList[n][0] = 0;
+ }
+ }
+
+
+ OSAL_UNLOCK_API();
+ return error;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_PushComponent(const char *name, const void *data, t_cm_size size)
+{
+ t_rep_component *comp;
+ t_cm_error error;
+
+ OSAL_LOCK_API();
+
+ if (cm_REP_lookupComponent(name, &comp) == CM_OK) {
+ /* Component is already there: silently ignore it */
+ OSAL_UNLOCK_API();
+ return CM_OK;
+ }
+
+ comp = OSAL_Alloc(sizeof(*comp));
+ if (comp == NULL) {
+ OSAL_UNLOCK_API();
+ return CM_NO_MORE_MEMORY;
+ }
+
+ comp->name = cm_StringDuplicate(name);
+ if(comp->name == NULL)
+ {
+ OSAL_Free(comp);
+ OSAL_UNLOCK_API();
+ return CM_NO_MORE_MEMORY;
+ }
+
+ if((error = cm_ELF_CheckFile(
+ data,
+ FALSE,
+ &comp->elfhandle)) != CM_OK) {
+ cm_StringRelease(comp->name);
+ OSAL_Free(comp);
+ OSAL_UNLOCK_API();
+ return error;
+ }
+/*
+ if (OSAL_Copy(comp->data, data, size)) {
+ OSAL_Free(comp);
+ OSAL_UNLOCK_API();
+ return CM_UNKNOWN_MEMORY_HANDLE;
+ }*/
+
+ repcomponentAdd(comp);
+
+ OSAL_UNLOCK_API();
+ return CM_OK;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ENGINE_ReleaseComponent (const char *name)
+{
+ t_rep_component *component;
+ t_cm_error err;
+
+ OSAL_LOCK_API();
+ err = cm_REP_lookupComponent(name , &component);
+
+ if (CM_OK == err)
+ {
+ repcomponentRemove(component);
+
+ cm_ELF_CloseFile(FALSE, component->elfhandle);
+ cm_StringRelease(component->name);
+ OSAL_Free(component);
+ }
+
+ OSAL_UNLOCK_API();
+
+ return err;
+}
+
+PUBLIC EXPORT_SHARED t_bool CM_ENGINE_IsComponentCacheEmpty(void)
+{
+ int i;
+
+ OSAL_LOCK_API();
+ for(i = 0; i < NHASH; i++) {
+ if (componentCaches[i] != NULL) {
+ OSAL_UNLOCK_API();
+ return FALSE;
+ }
+ }
+ OSAL_UNLOCK_API();
+ return TRUE;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h b/drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h
new file mode 100644
index 00000000000..bd914195b6d
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_HW_SEMA_H_
+#define __INC_HW_SEMA_H_
+
+#include <cm/inc/cm_type.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <share/semaphores/inc/hwsem_hwp.h>
+
+
+/******************************************************************************/
+/************************ FUNCTIONS PROTOTYPES ********************************/
+/******************************************************************************/
+
+PUBLIC t_cm_error cm_HSEM_Init(const t_cm_system_address *pSystemAddr);
+PUBLIC t_cm_error cm_HSEM_EnableSemIrq(t_semaphore_id semId, t_nmf_core_id toCoreId);
+PUBLIC void cm_HSEM_Take(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC void cm_HSEM_Give(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC void cm_HSEM_GiveWithInterruptGeneration(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC void cm_HSEM_GenerateIrq(t_nmf_core_id coreId, t_semaphore_id semId);
+PUBLIC t_nmf_core_id cm_HSEM_GetCoreIdFromIrqSrc(void);
+
+PUBLIC t_cm_error cm_HSEM_PowerOn(t_nmf_core_id coreId);
+PUBLIC void cm_HSEM_PowerOff(t_nmf_core_id coreId);
+
+#endif /* __INC_HW_SEMA_H_ */
diff --git a/drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/src/hw_semaphores.c b/drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/src/hw_semaphores.c
new file mode 100644
index 00000000000..932058cd4f2
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/semaphores/hw_semaphores/src/hw_semaphores.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/******************************************************************* Includes
+ ****************************************************************************/
+
+#include "../inc/hw_semaphores.h"
+#include <share/semaphores/inc/hwsem_hwp.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/power_mgt/inc/power.h>
+static t_hw_semaphore_regs *pHwSemRegs = (t_hw_semaphore_regs *)0;
+
+static t_uint32 semaphoreUseCounter = 0;
+static t_uint32 imsc[HSEM_MAX_INTR];
+PRIVATE void restoreMask(void);
+
+/****************************************************************************/
+/* NAME: t_cm_error cm_HSEM_Init(const t_cm_system_address *pSystemAddr) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: Initialize the HW Semaphores module */
+/* */
+/* PARAMETERS: */
+/* (in) pSystemAddr: system base address of the HW semaphores IP */
+/* */
+/* RETURN: CM_OK always */
+/* */
+/****************************************************************************/
+PUBLIC t_cm_error cm_HSEM_Init(const t_cm_system_address *pSystemAddr)
+{
+ t_uint8 i;
+
+ pHwSemRegs = (t_hw_semaphore_regs *)pSystemAddr->logical;
+
+ for (i=HSEM_FIRST_INTR; i < HSEM_MAX_INTR; i++)
+ {
+ imsc[i] = 0; // Mask all interrupt
+ }
+
+ return CM_OK;
+}
+
+static void cm_HSEM_ReInit(void)
+{
+ t_uint8 i;
+
+ pHwSemRegs->icrall = MASK_ALL16;
+
+ for (i=HSEM_FIRST_INTR; i < HSEM_MAX_INTR; i++)
+ {
+ pHwSemRegs->it[i].imsc = imsc[i];
+ pHwSemRegs->it[i].icr = MASK_ALL16;
+ }
+
+ for (i=0; i < NUM_HW_SEMAPHORES; i++)
+ {
+ pHwSemRegs->sem[i] = 0;
+ }
+}
+
+/****************************************************************************/
+/* NAME: t_cm_error cm_HSEM_EnableSemIrq( */
+/* t_semaphore_id semId, */
+/* t_nmf_core_id toCoreId */
+/* ) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: Enable Irq for a given coreId (communication receiver) */
+/* */
+/* PARAMETERS: */
+/* (in) semId: identifier of the semaphore */
+/* (in) toCoreId: identifier of coreId destination of the coms */
+/* */
+/* RETURN: CM_OK always */
+/* */
+/****************************************************************************/
+PUBLIC t_cm_error cm_HSEM_EnableSemIrq(t_semaphore_id semId, t_nmf_core_id toCoreId)
+{
+ static t_uint32 CoreIdToIntr[NB_CORE_IDS] = {0, 2, 3};
+ int i = CoreIdToIntr[toCoreId];
+
+ imsc[i] |= (1UL << semId);
+
+ // Allow cm_HSEM_EnableSemIrq to be called before real start in order to save power
+ if(semaphoreUseCounter > 0)
+ {
+ pHwSemRegs->it[i].imsc = imsc[i];
+ }
+
+ return CM_OK;
+}
+
+/****************************************************************************/
+/* NAME: void cm_HSEM_GenerateIrq(t_semaphore_id semId) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: Generate an irq toward correct core according to semId */
+/* */
+/* PARAMETERS: */
+/* (in) semId: identifier of the semaphore to handle */
+/* */
+/* RETURN: none */
+/* */
+/****************************************************************************/
+PUBLIC void cm_HSEM_GenerateIrq(t_nmf_core_id coreId, t_semaphore_id semId)
+{
+ // TODO Move restore in OS BSP or in PRCMU in order to to it only when wake-up, for now do it always !!!!!!!!!!!!
+ restoreMask();
+
+ pHwSemRegs->sem[semId] = CORE_ID_2_HW_CORE_ID(ARM_CORE_ID);
+ pHwSemRegs->sem[semId] = (HSEM_INTRA_MASK|HSEM_INTRB_MASK|HSEM_INTRC_MASK|HSEM_INTRD_MASK);
+}
+
+/****************************************************************************/
+/* NAME: t_nmf_core_id cm_HSEM_GetCoreIdFromIrqSrc(void) */
+/*--------------------------------------------------------------------------*/
+/* DESCRIPTION: Check Masked Interrupt Status to know which semaphore(s) */
+/* have pending interrupt and return the identifier of the given dsp */
+/* */
+/* PARAMETERS: none */
+/* */
+/* RETURN: none */
+/* */
+/****************************************************************************/
+PUBLIC t_nmf_core_id cm_HSEM_GetCoreIdFromIrqSrc(void)
+{
+ t_uword misValue = pHwSemRegs->it[ARM_CORE_ID].mis;
+ t_uint32 mask = 1 << FIRST_NEIGHBOR_SEMID(ARM_CORE_ID) /* == 0 here */;
+ t_nmf_core_id coreId = FIRST_MPC_ID;
+
+ while ((misValue & mask) == 0)
+ {
+ mask <<= 1;
+
+ coreId++;
+ if(coreId > LAST_MPC_ID)
+ return coreId;
+ }
+
+ /* Acknowledge Hsem interrupt */
+ pHwSemRegs->it[ARM_CORE_ID].icr = mask;
+
+ return coreId;
+}
+
+PUBLIC t_cm_error cm_HSEM_PowerOn(t_nmf_core_id coreId)
+{
+ if(semaphoreUseCounter++ == 0)
+ {
+ cm_PWR_EnableHSEM();
+
+ cm_HSEM_ReInit(); // HSEM is called one time only when the HSEM is switched ON
+ }
+
+ return CM_OK;
+}
+
+PUBLIC void cm_HSEM_PowerOff(t_nmf_core_id coreId)
+{
+ if(--semaphoreUseCounter == 0)
+ {
+ cm_PWR_DisableHSEM();
+ }
+}
+
+PRIVATE void restoreMask()
+{
+ t_uint8 i;
+
+ for (i=HSEM_FIRST_INTR; i < HSEM_MAX_INTR; i++)
+ pHwSemRegs->it[i].imsc = imsc[i];
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/semaphores/inc/semaphores.h b/drivers/staging/nmf-cm/cm/engine/semaphores/inc/semaphores.h
new file mode 100644
index 00000000000..7636d8e7c9d
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/semaphores/inc/semaphores.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/**
+ * \internal
+ */
+#ifndef __INC_NMF_SEMAPHORE_H
+#define __INC_NMF_SEMAPHORE_H
+
+#include <cm/engine/api/control/configuration_engine.h>
+#include <share/semaphores/inc/semaphores.h>
+#include <cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h>
+
+PUBLIC t_cm_error cm_SEM_Init(const t_cm_system_address *pSystemAddr);
+PUBLIC t_cm_error cm_SEM_InitMpc(t_nmf_core_id coreId, t_nmf_semaphore_type_id semTypeId);
+PUBLIC t_semaphore_id cm_SEM_Alloc(t_nmf_core_id fromCoreId, t_nmf_core_id toCoreId);
+
+/* Semaphores management virtualized functions */
+extern void (*cm_SEM_GenerateIrq[NB_CORE_IDS])(t_nmf_core_id coreId, t_semaphore_id semId);
+extern t_cm_error (*cm_SEM_PowerOn[NB_CORE_IDS])(t_nmf_core_id coreId);
+extern void (*cm_SEM_PowerOff[NB_CORE_IDS])(t_nmf_core_id coreId);
+
+#endif /* __INC_NMF_SEMAPHORE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/semaphores/src/semaphores.c b/drivers/staging/nmf-cm/cm/engine/semaphores/src/semaphores.c
new file mode 100644
index 00000000000..daf95355a56
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/semaphores/src/semaphores.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/inc/cm_type.h>
+#include <cm/engine/semaphores/inc/semaphores.h>
+#include <cm/engine/semaphores/hw_semaphores/inc/hw_semaphores.h>
+#include <cm/engine/dsp/inc/semaphores_dsp.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <share/inc/nmf.h>
+
+void (*cm_SEM_GenerateIrq[NB_CORE_IDS])(t_nmf_core_id coreId, t_semaphore_id semId);
+t_cm_error (*cm_SEM_PowerOn[NB_CORE_IDS])(t_nmf_core_id coreId);
+void (*cm_SEM_PowerOff[NB_CORE_IDS])(t_nmf_core_id coreId);
+
+#define SEM_TYPE_ID_DEFAULT_VALUE ((t_nmf_semaphore_type_id)MASK_ALL32)
+static t_nmf_semaphore_type_id semaphoreTypePerCoreId[NB_CORE_IDS];
+
+static t_cm_error cm_LSEM_PowerOn(t_nmf_core_id coreId)
+{
+ return CM_OK;
+}
+
+static void cm_LSEM_PowerOff(t_nmf_core_id coreId)
+{
+}
+
+PUBLIC t_cm_error cm_SEM_Init(const t_cm_system_address *pSystemAddr)
+{
+ t_nmf_core_id coreId;
+
+ for (coreId = ARM_CORE_ID; coreId < NB_CORE_IDS; coreId++)
+ {
+ semaphoreTypePerCoreId[coreId] = SEM_TYPE_ID_DEFAULT_VALUE;
+
+ /* By default, we suppose that we use a full feature NMF ;) */
+ cm_SEM_GenerateIrq[coreId] = NULL;
+ cm_SEM_PowerOn[coreId] = NULL;
+ cm_SEM_PowerOff[coreId] = NULL;
+ }
+
+ cm_HSEM_Init(pSystemAddr);
+ /* if needed local semaphore init will be done coreId per coreId */
+
+ return CM_OK;
+}
+
+PUBLIC t_cm_error cm_SEM_InitMpc(t_nmf_core_id coreId, t_nmf_semaphore_type_id semTypeId)
+{
+ if (semaphoreTypePerCoreId[coreId] != SEM_TYPE_ID_DEFAULT_VALUE)
+ return CM_MPC_ALREADY_INITIALIZED;
+
+ if(semTypeId == SYSTEM_SEMAPHORES)
+ {
+ cm_SEM_GenerateIrq[coreId] = cm_HSEM_GenerateIrq;
+ cm_SEM_PowerOn[coreId] = cm_HSEM_PowerOn;
+ cm_SEM_PowerOff[coreId] = cm_HSEM_PowerOff;
+ }
+ else if (semTypeId == LOCAL_SEMAPHORES)
+ {
+ cm_SEM_GenerateIrq[coreId] = cm_DSP_SEM_GenerateIrq;
+ cm_SEM_PowerOn[coreId] = cm_LSEM_PowerOn;
+ cm_SEM_PowerOff[coreId] = cm_LSEM_PowerOff;
+ }
+
+ semaphoreTypePerCoreId[coreId] = semTypeId;
+
+ return CM_OK;
+}
+
+PUBLIC t_semaphore_id cm_SEM_Alloc(t_nmf_core_id fromCoreId, t_nmf_core_id toCoreId)
+{
+ t_semaphore_id semId;
+ t_nmf_core_id corex;
+
+ semId = FIRST_NEIGHBOR_SEMID(toCoreId);
+ for (corex = FIRST_CORE_ID; corex < fromCoreId; corex++)
+ {
+ if (corex == toCoreId)
+ continue;
+ semId++;
+ }
+
+ if (
+ (toCoreId == ARM_CORE_ID && semaphoreTypePerCoreId[fromCoreId] == SYSTEM_SEMAPHORES) ||
+ (semaphoreTypePerCoreId[toCoreId] == SYSTEM_SEMAPHORES)
+ )
+ {
+ cm_HSEM_EnableSemIrq(semId, toCoreId);
+ }
+
+ return semId;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/trace/inc/trace.h b/drivers/staging/nmf-cm/cm/engine/trace/inc/trace.h
new file mode 100644
index 00000000000..111eaf1324e
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/trace/inc/trace.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Trace facilities management API
+ *
+ * \defgroup Trace Facilities
+ */
+#ifndef __INC_CM_TRACE_H
+#define __INC_CM_TRACE_H
+
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/configuration/inc/configuration_status.h>
+
+/*********************/
+/* Log related stuff */
+/*********************/
+#define ERROR(format, param1, param2, param3, param4, param5, param6) \
+do { \
+ if (cm_debug_level != -1) \
+ OSAL_Log("Error: " format, (int)(param1), (int)(param2), (int)(param3), (int)(param4), (int)(param5), (int)(param6)); \
+ while(cm_error_break);\
+} while(0)
+
+#define WARNING(format, param1, param2, param3, param4, param5, param6) \
+do { \
+ if (cm_debug_level != -1) \
+ OSAL_Log("Warning: " format, (int)(param1), (int)(param2), (int)(param3), (int)(param4), (int)(param5), (int)(param6)); \
+} while(0)
+
+#define LOG_INTERNAL(level, format, param1, param2, param3, param4, param5, param6) \
+do { \
+ if (level <= cm_debug_level) \
+ OSAL_Log((const char *)format, (int)(param1), (int)(param2), (int)(param3), (int)(param4), (int)(param5), (int)(param6)); \
+} while(0)
+
+/*************************/
+/* Panic related stuff */
+/*************************/
+#define CM_ASSERT(cond) \
+do { \
+ if(!(cond)) { OSAL_Log("CM_ASSERT at %s:%d\n", (int)__FILE__, (int)__LINE__, 0, 0, 0, 0); OSAL_Panic(); while(1); } \
+} while (0)
+
+#endif /* __INC_CM_TRACE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/trace/inc/xtitrace.h b/drivers/staging/nmf-cm/cm/engine/trace/inc/xtitrace.h
new file mode 100644
index 00000000000..1efd4f1e699
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/trace/inc/xtitrace.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#ifndef __INC_CM_XTITRACE_H
+#define __INC_CM_XTITRACE_H
+
+#include <cm/engine/component/inc/instance.h>
+
+#include <inc/nmf-tracedescription.h>
+
+extern t_bool cm_trace_enabled;
+
+/*************************/
+/* Trace related stuff */
+/*************************/
+void cm_TRC_Dump(void);
+
+void cm_TRC_traceReset(void);
+
+void cm_TRC_traceLoadMap(
+ t_nmfTraceComponentCommandDescription cmd,
+ const t_component_instance* component);
+
+#define ARM_TRACE_COMPONENT ((const t_component_instance*)0xFFFFFFFF)
+
+void cm_TRC_traceBinding(
+ t_nmfTraceBindCommandDescription command,
+ const t_component_instance* clientComponent, const t_component_instance* serverComponent,
+ const char *requiredItfName, const char *providedItfName);
+
+void cm_TRC_traceCommunication(
+ t_nmfTraceCommunicationCommandDescription command,
+ t_nmf_core_id coreId,
+ t_nmf_core_id remoteCoreId);
+
+void cm_TRC_traceMemAlloc(t_nmfTraceAllocatorCommandDescription command, t_uint8 allocId, t_uint32 memorySize, const char *allocname);
+
+void cm_TRC_traceMem(t_nmfTraceAllocCommandDescription command, t_uint8 allocId, t_uint32 startAddress, t_uint32 memorySize);
+
+/*************************/
+/* MMDSP trace buffer */
+/*************************/
+PUBLIC t_cm_error cm_SRV_allocateTraceBufferMemory(t_nmf_core_id coreId, t_cm_domain_id domainId);
+PUBLIC void cm_SRV_deallocateTraceBufferMemory(t_nmf_core_id coreId);
+
+
+
+#endif /* __INC_CM_TRACE_H */
diff --git a/drivers/staging/nmf-cm/cm/engine/trace/src/panic.c b/drivers/staging/nmf-cm/cm/engine/trace/src/panic.c
new file mode 100644
index 00000000000..e59d9f8b1ba
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/trace/src/panic.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <cm/inc/cm_type.h>
+#include <cm/engine/component/inc/introspection.h>
+#include <cm/engine/component/inc/bind.h>
+#include <cm/engine/executive_engine_mgt/inc/executive_engine_mgt.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/api/control/irq_engine.h>
+
+#include <cm/engine/utils/inc/convert.h>
+#include <share/communication/inc/nmf_service.h>
+
+PUBLIC t_cm_error cm_SRV_allocateTraceBufferMemory(t_nmf_core_id coreId, t_cm_domain_id domainId)
+{
+ t_ee_state *state = cm_EEM_getExecutiveEngine(coreId);
+
+ state->traceDataHandle = cm_DM_Alloc(domainId, SDRAM_EXT16,
+ TRACE_BUFFER_SIZE * sizeof(struct t_nmf_trace) / 2, CM_MM_ALIGN_WORD, TRUE);
+ if (state->traceDataHandle == INVALID_MEMORY_HANDLE)
+ return CM_NO_MORE_MEMORY;
+ else
+ {
+ t_uint32 mmdspAddr;
+ int i;
+
+ state->traceDataAddr = (struct t_nmf_trace*)cm_DSP_GetHostLogicalAddress(state->traceDataHandle);
+ cm_DSP_GetDspAddress(state->traceDataHandle, &mmdspAddr);
+ cm_writeAttribute(state->instance, "rtos/commonpart/traceDataAddr", mmdspAddr);
+
+ eeState[coreId].readTracePointer = 0;
+ eeState[coreId].lastReadedTraceRevision = 0;
+
+ for(i = 0; i < TRACE_BUFFER_SIZE; i++)
+ state->traceDataAddr[i].revision = 0;
+
+ return CM_OK;
+ }
+}
+
+PUBLIC void cm_SRV_deallocateTraceBufferMemory(t_nmf_core_id coreId)
+{
+ t_ee_state *state = cm_EEM_getExecutiveEngine(coreId);
+
+ state->traceDataAddr = 0;
+ cm_DM_Free(state->traceDataHandle, TRUE);
+}
+
+static t_uint32 swapHalfWord(t_uint32 word)
+{
+ return (word >> 16) | (word << 16);
+}
+
+PUBLIC EXPORT_SHARED t_cm_trace_type CM_ENGINE_GetNextTrace(
+ t_nmf_core_id coreId,
+ struct t_nmf_trace *trace)
+{
+ t_ee_state *state = cm_EEM_getExecutiveEngine(coreId);
+ t_uint32 foundRevision;
+ t_cm_trace_type type;
+
+ OSAL_LOCK_API();
+ if (state->traceDataAddr == NULL) {
+ type = CM_MPC_TRACE_NONE;
+ goto out;
+ }
+
+ foundRevision = swapHalfWord(state->traceDataAddr[state->readTracePointer].revision);
+
+ if(foundRevision <= state->lastReadedTraceRevision)
+ {
+ // It's an old trace forgot it
+ type = CM_MPC_TRACE_NONE;
+ }
+ else
+ {
+ struct t_nmf_trace *traceRaw;
+
+ if(foundRevision == state->lastReadedTraceRevision + 1)
+ {
+ type = CM_MPC_TRACE_READ;
+ }
+ else
+ {
+ type = CM_MPC_TRACE_READ_OVERRUN;
+ /*
+ * If we find that revision is bigger, thus we are in overrun, then we take the writePointer + 1 which
+ * correspond to the older one.
+ * => Here there is a window where the MMDSP could update writePointer just after
+ */
+ state->readTracePointer = (cm_readAttributeNoError(state->instance, "rtos/commonpart/writePointer") + 1) % TRACE_BUFFER_SIZE;
+ }
+
+ traceRaw = &state->traceDataAddr[state->readTracePointer];
+
+ trace->timeStamp = swapHalfWord(traceRaw->timeStamp);
+ trace->componentId = swapHalfWord(traceRaw->componentId);
+ trace->traceId = swapHalfWord(traceRaw->traceId);
+ trace->paramOpt = swapHalfWord(traceRaw->paramOpt);
+ trace->componentHandle = swapHalfWord(traceRaw->componentHandle);
+ trace->parentHandle = swapHalfWord(traceRaw->parentHandle);
+
+ trace->params[0] = swapHalfWord(traceRaw->params[0]);
+ trace->params[1] = swapHalfWord(traceRaw->params[1]);
+ trace->params[2] = swapHalfWord(traceRaw->params[2]);
+ trace->params[3] = swapHalfWord(traceRaw->params[3]);
+
+ state->readTracePointer = (state->readTracePointer + 1) % TRACE_BUFFER_SIZE;
+ state->lastReadedTraceRevision = swapHalfWord(traceRaw->revision);
+ trace->revision = state->lastReadedTraceRevision;
+ }
+
+out:
+ OSAL_UNLOCK_API();
+
+ return type;
+}
+
+
+/*
+ * Panic
+ */
+const struct {
+ char* name;
+ unsigned int info1:1;
+ unsigned int PC:1;
+ unsigned int SP:1;
+ unsigned int interface:1;
+} reason_descrs[] = {
+ {"NONE_PANIC", 0, 0, 0, 0},
+ {"INTERNAL_PANIC", 1, 0, 0, 0},
+ {"MPC_NOT_RESPONDING_PANIC", 0, 0, 0, 0}, /* Should not be useful since in that case CM_getServiceDescription() not call */
+ {"USER_STACK_OVERFLOW", 0, 1, 1, 0},
+ {"SYSTEM_STACK_OVERFLOW", 0, 1, 1, 0},
+ {"UNALIGNED_LONG_ACCESS", 0, 1, 0, 0},
+ {"EVENT_FIFO_OVERFLOW", 0, 0, 0, 1},
+ {"PARAM_FIFO_OVERFLOW", 0, 0, 0, 1},
+ {"INTERFACE_NOT_BINDED", 0, 1, 0, 0},
+ {"USER_PANIC", 1, 0, 0, 0}
+};
+
+static t_component_instance* getCorrespondingInstance(
+ t_panic_reason panicReason,
+ t_uint32 panicThis,
+ t_dup_char *itfName,
+ t_cm_instance_handle *instHandle) {
+ t_component_instance *instance;
+ t_uint32 k;
+
+ for (k=0; k<ComponentTable.idxMax; k++) {
+ if ((instance = componentEntry(k)) == NULL)
+ continue;
+ if(panicReason == PARAM_FIFO_OVERFLOW ||
+ panicReason == EVENT_FIFO_OVERFLOW) {
+ // Panic has been generated by binding component, search the client who has call it
+ // and return the client handle (not the BC one).
+ int i;
+
+ if(instance->thisAddress == panicThis && panicThis == 0) {
+ *itfName = "Internal NMF service";
+ *instHandle = ENTRY2HANDLE(instance, k);
+ return instance;
+ }
+
+ for(i = 0; i < instance->Template->requireNumber; i++) {
+ int nb = instance->Template->requires[i].collectionSize, j;
+ for(j = 0; j < nb; j++) {
+ if(instance->interfaceReferences[i][j].instance != NULL &&
+ instance->interfaceReferences[i][j].instance != (t_component_instance *)NMF_HOST_COMPONENT &&
+ instance->interfaceReferences[i][j].instance != (t_component_instance *)NMF_VOID_COMPONENT &&
+ instance->interfaceReferences[i][j].instance->thisAddress == panicThis)
+ {
+ *itfName = instance->Template->requires[i].name;
+ *instHandle = ENTRY2HANDLE(instance, k);
+ return instance;
+ }
+ }
+ }
+ } else {
+ // The component which has generated the panic is the good one.
+
+ if(instance->thisAddress == panicThis) {
+ *itfName = "?";
+ *instHandle = ENTRY2HANDLE(instance, k);
+ return instance;
+ }
+ }
+ }
+
+ *itfName = "?";
+ *instHandle = 0;
+ return 0;
+}
+
+PUBLIC EXPORT_SHARED t_cm_error CM_ReadMPCString(
+ t_nmf_core_id coreId,
+ t_uint32 dspAddress,
+ char * buffer,
+ t_uint32 bufferSize) {
+
+ while(--bufferSize > 0)
+ {
+ char ch = cm_DSP_ReadXRamWord(coreId, dspAddress++);
+ if(ch == 0)
+ break;
+
+ *buffer++ = ch;
+ };
+
+ *buffer = 0;
+
+ // Reset panicReason
+ cm_writeAttribute(cm_EEM_getExecutiveEngine(coreId)->instance,
+ "rtos/commonpart/serviceReason", MPC_SERVICE_NONE);
+
+ return CM_OK;
+}
+
+/****************/
+/* Generic part */
+/****************/
+PUBLIC EXPORT_SHARED t_cm_error CM_getServiceDescription(
+ t_nmf_core_id coreId,
+ t_cm_service_type *srcType,
+ t_cm_service_description *srcDescr)
+{
+ t_uint32 serviceReason;
+ t_component_instance *ee;
+
+ // Acknowledge interrupt (do it before resetting panicReason)
+ cm_DSP_AcknowledgeDspIrq(coreId, DSP2ARM_IRQ_1);
+
+ ee = cm_EEM_getExecutiveEngine(coreId)->instance;
+
+ // Read panicReason
+ serviceReason = cm_readAttributeNoError(ee, "rtos/commonpart/serviceReason");
+ if(serviceReason == MPC_SERVICE_PRINT)
+ {
+ *srcType = CM_MPC_SERVICE_PRINT;
+
+ srcDescr->u.print.dspAddress = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo0");
+ srcDescr->u.print.value1 = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo1");
+ srcDescr->u.print.value2 = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo2");
+ }
+ else if(serviceReason == MPC_SERVICE_TRACE)
+ {
+ *srcType = CM_MPC_SERVICE_TRACE;
+ }
+ else if(serviceReason != MPC_SERVICE_NONE)
+ {
+ t_uint32 panicThis;
+ t_dup_char itfName;
+ t_component_instance *instance;
+
+ *srcType = CM_MPC_SERVICE_PANIC;
+ srcDescr->u.panic.panicReason = (t_panic_reason)serviceReason;
+ srcDescr->u.panic.panicSource = MPC_EE;
+ srcDescr->u.panic.info.mpc.coreid = coreId;
+
+ // Read panicThis
+ panicThis = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo0");
+
+ instance = getCorrespondingInstance(srcDescr->u.panic.panicReason, panicThis, &itfName, &srcDescr->u.panic.info.mpc.faultingComponent);
+
+ LOG_INTERNAL(0, "Error: Panic(%s, %s), This=%x", cm_getDspName(coreId),
+ reason_descrs[srcDescr->u.panic.panicReason].name, (void*)panicThis, 0, 0, 0);
+
+ if(reason_descrs[srcDescr->u.panic.panicReason].interface != 0)
+ {
+ LOG_INTERNAL(0, ", interface=%s", itfName, 0, 0, 0, 0, 0);
+ }
+
+ if(reason_descrs[srcDescr->u.panic.panicReason].info1 != 0)
+ {
+ // Info 1
+ srcDescr->u.panic.info.mpc.panicInfo1 = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo1");
+
+ LOG_INTERNAL(0, ", Info=%x", srcDescr->u.panic.info.mpc.panicInfo1, 0, 0, 0, 0, 0);
+ }
+
+ if(reason_descrs[srcDescr->u.panic.panicReason].PC != 0)
+ {
+ t_uint32 DspAddress = 0xFFFFFFFF;
+ t_uint32 DspSize = 0x0;
+
+ // PC need to be read in rtos/commonpart/serviceInfo1
+ srcDescr->u.panic.info.mpc.panicInfo1 = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo1");
+
+ if(instance != 0)
+ {
+ cm_DSP_GetDspAddress(instance->memories[instance->Template->codeMemory->id], &DspAddress);
+ cm_DSP_GetDspMemoryHandleSize(instance->memories[instance->Template->codeMemory->id], &DspSize);
+ }
+
+ if(DspAddress <= srcDescr->u.panic.info.mpc.panicInfo1 &&
+ srcDescr->u.panic.info.mpc.panicInfo1 < (DspAddress + DspSize))
+ LOG_INTERNAL(0, ", PC:off=%x <abs=%x>",
+ srcDescr->u.panic.info.mpc.panicInfo1 - DspAddress,
+ srcDescr->u.panic.info.mpc.panicInfo1, 0, 0, 0, 0);
+ else
+ LOG_INTERNAL(0, ", PC:<abs=%x>", srcDescr->u.panic.info.mpc.panicInfo1, 0, 0, 0, 0, 0);
+ }
+
+ if(reason_descrs[srcDescr->u.panic.panicReason].SP != 0)
+ {
+ srcDescr->u.panic.info.mpc.panicInfo2 = cm_readAttributeNoError(ee, "rtos/commonpart/serviceInfo2");
+
+ LOG_INTERNAL(0, ", SP=%x", srcDescr->u.panic.info.mpc.panicInfo2, 0, 0, 0, 0, 0);
+ }
+
+ LOG_INTERNAL(0, "\n", 0, 0, 0, 0, 0, 0);
+
+ if(instance != 0)
+ {
+ LOG_INTERNAL(0, "Error: Component=%s<%s>\n",
+ instance->pathname, instance->Template->name, 0, 0, 0, 0);
+ }
+
+ // We don't set rtos/commonpart/serviceReason = MPC_SERVICE_NONE, since we don't want the
+ // MMDSP to continue execution, and we put in in Panic state
+ cm_DSP_SetStatePanic(coreId);
+ }
+ else
+ {
+ *srcType = CM_MPC_SERVICE_NONE;
+ }
+
+ return CM_OK;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/trace/src/trace.c b/drivers/staging/nmf-cm/cm/engine/trace/src/trace.c
new file mode 100644
index 00000000000..e27d3284ed2
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/trace/src/trace.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include "../inc/trace.h"
+#include "../inc/xtitrace.h"
+#include <inc/nmf-tracedescription.h>
+#include <inc/nmf-limits.h>
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+t_bool cm_trace_enabled = FALSE;
+
+/*
+ * STM message dump
+ */
+#define HEADER(t, s) ((t) | (s << 16))
+
+static void writeN(struct t_nmfTraceChannelHeader* header)
+{
+ t_uint64* data = (t_uint64*)header;
+ t_uint64 *end = (t_uint64*)(((unsigned int)data) + header->traceSize - sizeof(t_uint64));
+
+ while(data < end)
+ {
+ OSAL_Write64(CM_CHANNEL, 0, *data++);
+ }
+
+ OSAL_Write64(CM_CHANNEL, 1, *data);
+}
+
+void cm_TRC_Dump(void)
+{
+ t_uint32 i;
+
+ cm_TRC_traceReset();
+
+ for (i=0; i<ComponentTable.idxMax; i++)
+ {
+ if (componentEntry(i) != NULL)
+ cm_TRC_traceLoadMap(TRACE_COMPONENT_COMMAND_ADD, componentEntry(i));
+ }
+}
+
+void cm_TRC_traceReset(void)
+{
+ if(cm_trace_enabled)
+ {
+ struct t_nmfTraceReset trace;
+
+ trace.header.v = HEADER(TRACE_TYPE_RESET, sizeof(trace));
+
+ trace.minorVersion = TRACE_MINOR_VERSION;
+ trace.majorVersion = TRACE_MAJOR_VERSION;
+
+ writeN((struct t_nmfTraceChannelHeader*)&trace);
+ }
+}
+
+void cm_TRC_traceLoadMap(
+ t_nmfTraceComponentCommandDescription command,
+ const t_component_instance* component)
+{
+ if(cm_trace_enabled)
+ {
+ struct t_nmfTraceComponent trace;
+
+ /*
+ * Generate instantiate trace
+ */
+ trace.header.v = HEADER(TRACE_TYPE_COMPONENT, sizeof(trace));
+
+ trace.command = (t_uint16)command;
+ trace.domainId = (t_uint16)component->Template->dspId + 1;
+ trace.componentContext = (t_uint32)component->thisAddress;
+ trace.componentUserContext = (t_uint32)component;
+ cm_StringCopy((char*)trace.componentLocalName, component->pathname, MAX_COMPONENT_NAME_LENGTH);
+ cm_StringCopy((char*)trace.componentTemplateName, component->Template->name, MAX_TEMPLATE_NAME_LENGTH);
+
+ writeN((struct t_nmfTraceChannelHeader*)&trace);
+
+ if(command == TRACE_COMPONENT_COMMAND_ADD)
+ {
+ struct t_nmfTraceMethod tracemethod;
+ int i, j, k;
+
+ /*
+ * Generate method trace
+ */
+ tracemethod.header.v = HEADER(TRACE_TYPE_METHOD, sizeof(tracemethod));
+
+ tracemethod.domainId = (t_uint16)component->Template->dspId + 1;
+ tracemethod.componentContext = (t_uint32)component->thisAddress;
+
+ for(i = 0; i < component->Template->provideNumber; i++)
+ {
+ t_interface_provide* provide = &component->Template->provides[i];
+ t_interface_provide_loaded* provideLoaded = &component->Template->providesLoaded[i];
+
+ for(j = 0; j < provide->collectionSize; j++)
+ {
+ for(k = 0; k < provide->interface->methodNumber; k++)
+ {
+ tracemethod.methodId = provideLoaded->indexesLoaded[j][k].methodAddresses;
+
+ cm_StringCopy((char*)tracemethod.methodName, provide->interface->methodNames[k], MAX_INTERFACE_METHOD_NAME_LENGTH);
+
+ writeN((struct t_nmfTraceChannelHeader*)&tracemethod);
+ }
+ }
+ }
+ }
+ }
+}
+
+void cm_TRC_traceBinding(
+ t_nmfTraceBindCommandDescription command,
+ const t_component_instance* clientComponent, const t_component_instance* serverComponent,
+ const char *requiredItfName, const char *providedItfName)
+{
+ if(cm_trace_enabled)
+ {
+ struct t_nmfTraceBind trace;
+
+ trace.header.v = HEADER(TRACE_TYPE_BIND, sizeof(trace));
+
+ trace.command = (t_uint16)command;
+
+ if(clientComponent == ARM_TRACE_COMPONENT) // ARM
+ {
+ trace.clientDomainId = 0x1;
+ trace.clientComponentContext = 0x0;
+ }
+ else
+ {
+ trace.clientDomainId = (t_uint16)clientComponent->Template->dspId + 1;
+ trace.clientComponentContext = (t_uint32)clientComponent->thisAddress;
+ }
+ if(requiredItfName != NULL)
+ cm_StringCopy((char*)trace.requiredItfName, requiredItfName, MAX_INTERFACE_NAME_LENGTH);
+ else
+ trace.requiredItfName[0] = 0;
+
+ if(serverComponent == NULL)
+ { // Unbind or VOID
+ trace.serverDomainId = 0;
+ trace.serverComponentContext = 0x0;
+ }
+ else if(serverComponent == ARM_TRACE_COMPONENT)
+ { // ARM
+ trace.serverDomainId = 0x1;
+ trace.serverComponentContext = 0x0;
+ }
+ else
+ {
+ trace.serverDomainId = (t_uint16)serverComponent->Template->dspId + 1;
+ trace.serverComponentContext = (t_uint32)serverComponent->thisAddress;
+ }
+ if(providedItfName != NULL)
+ cm_StringCopy((char*)trace.providedItfName, providedItfName, MAX_INTERFACE_NAME_LENGTH);
+ else
+ trace.providedItfName[0] = 0;
+
+ writeN((struct t_nmfTraceChannelHeader*)&trace);
+ }
+}
+
+void cm_TRC_traceCommunication(
+ t_nmfTraceCommunicationCommandDescription command,
+ t_nmf_core_id coreId,
+ t_nmf_core_id remoteCoreId)
+{
+ if(cm_trace_enabled)
+ {
+ struct t_nmfTraceCommunication trace;
+
+ trace.header.v = HEADER(TRACE_TYPE_COMMUNICATION, sizeof(trace));
+
+ trace.command = (t_uint16)command;
+ trace.domainId = (t_uint16)coreId + 1;
+ trace.remoteDomainId = (t_uint16)remoteCoreId + 1;
+
+ writeN((struct t_nmfTraceChannelHeader*)&trace);
+ }
+}
+
+void cm_TRC_traceMemAlloc(t_nmfTraceAllocatorCommandDescription command, t_uint8 allocId, t_uint32 memorySize, const char *allocname)
+{
+ if(cm_trace_enabled)
+ {
+ struct t_nmfTraceAllocator trace;
+
+ trace.header.v = HEADER(TRACE_TYPE_ALLOCATOR, sizeof(trace));
+
+ trace.command = (t_uint16)command;
+ trace.allocId = (t_uint16)allocId;
+ trace.size = memorySize;
+ cm_StringCopy((char*)trace.name, allocname, sizeof(trace.name));
+
+ writeN((struct t_nmfTraceChannelHeader*)&trace);
+ }
+}
+
+void cm_TRC_traceMem(t_nmfTraceAllocCommandDescription command, t_uint8 allocId, t_uint32 startAddress, t_uint32 memorySize)
+{
+ if(cm_trace_enabled)
+ {
+ struct t_nmfTraceAlloc trace;
+
+ trace.header.v = HEADER(TRACE_TYPE_ALLOC, sizeof(trace));
+
+ trace.command = (t_uint16)command;
+ trace.allocId = (t_uint16)allocId;
+ trace.offset = startAddress;
+ trace.size = memorySize;
+
+ writeN((struct t_nmfTraceChannelHeader*)&trace);
+ }
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/inc/convert.h b/drivers/staging/nmf-cm/cm/engine/utils/inc/convert.h
new file mode 100644
index 00000000000..d6912e58687
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/inc/convert.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Conversion utility methods.
+ */
+#ifndef H_CM_CONVERTS_MEM
+#define H_CM_CONVERTS_MEM
+
+#include <share/inc/nmf.h>
+
+/*
+ * Utils convert methods
+ */
+const char* cm_getDspName(t_nmf_core_id dsp);
+
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/inc/mem.h b/drivers/staging/nmf-cm/cm/engine/utils/inc/mem.h
new file mode 100644
index 00000000000..c950a94023d
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/inc/mem.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Memory manipulation.
+ */
+#ifndef H_CM_UTILS_MEM
+#define H_CM_UTILS_MEM
+
+/*
+ * Utils libc methods
+ */
+void cm_MemCopy(void* dest, const void *src, int count);
+void cm_MemSet(void *str, int c, int count);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/inc/string.h b/drivers/staging/nmf-cm/cm/engine/utils/inc/string.h
new file mode 100644
index 00000000000..d2b7c0b0823
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/inc/string.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief String manipulation.
+ */
+#ifndef H_CM_UTILS_STRING
+#define H_CM_UTILS_STRING
+
+#include <cm/engine/memory/inc/memory.h>
+
+#define MAX_INTERNAL_STRING_LENGTH 2048
+
+typedef const char *t_dup_char;
+
+t_dup_char cm_StringGet(const char* str);
+t_dup_char cm_StringReference(t_dup_char str);
+t_dup_char cm_StringDuplicate(const char* orig);
+void cm_StringRelease(t_dup_char orig);
+
+/*
+ * Utils libc methods
+ */
+void cm_StringCopy(char* dest, const char* src, int count);
+int cm_StringCompare(const char* str1, const char* str2, int count);
+int cm_StringLength(const char * str, int count);
+void cm_StringConcatenate(char* dest, const char* src, int count);
+char* cm_StringSearch(const char* str, int c);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/inc/swap.h b/drivers/staging/nmf-cm/cm/engine/utils/inc/swap.h
new file mode 100644
index 00000000000..e4f5acb3010
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/inc/swap.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Swap integer manipulation.
+ */
+#ifndef H_CM_UTILS_SWAP
+#define H_CM_UTILS_SWAP
+
+#include <cm/inc/cm_type.h>
+
+/*
+ * Swap methods
+ */
+t_uint16 swap16(t_uint16 x);
+t_uint32 swap32(t_uint32 x);
+t_uint64 swap64(t_uint64 x);
+t_uint32 noswap32(t_uint32 x);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/inc/table.h b/drivers/staging/nmf-cm/cm/engine/utils/inc/table.h
new file mode 100644
index 00000000000..9d9828a81f6
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/inc/table.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*!
+ * \internal
+ * \brief Dynamic table manipulation.
+ */
+#ifndef H_CM_UTILS_TABLE
+#define H_CM_UTILS_TABLE
+
+#include <cm/inc/cm_type.h>
+
+/*
+ This implement a (generic) dynamic table (the size is dynamic)
+ to register some pointers of a given kind of elements
+
+ It also allows to compute/convert each kernel pointer registered in the
+ table to a user handler, that can be checked.
+
+ The "user" handler is composed by the index in this table
+ (the low INDEX_SHIFT bits) and the low bits of the "local" pointer
+ shifted by INDEX_SHIFT are stored in the high bits:
+
+ handle bits: 31 ................................ 12 11 ...... 0
+ | lower bits of of the local pointer | index |
+
+ This allows a straight translation from a user handle to a local pointer
+ + a strong check to validate the value of a user handle.
+ The reverse translation from pointer to a user handle is
+ slower as it requires an explicit search in the list.
+ */
+
+
+/* INDEX_SHIFT determines the index size and thus the max index */
+#define INDEX_SHIFT 12
+#define INDEX_MAX (1UL << INDEX_SHIFT)
+#define INDEX_MASK (INDEX_MAX-1)
+#define ENTRY2HANDLE(pointer, index) (((unsigned int)pointer << INDEX_SHIFT) | index)
+#define TABLE_DEF_SIZE 0x1000
+
+typedef struct {
+ t_uint32 idxNb; /**< number of entries used */
+ t_uint32 idxCur; /**< current index: point to next supposed
+ free entry: used to look for the next
+ free entry */
+ t_uint32 idxMax; /**< index max currently allowed */
+ void **entries; /**< table itself */
+} t_nmf_table;
+
+t_cm_error cm_initTable(t_nmf_table* table);
+void cm_destroyTable(t_nmf_table* table);
+t_uint32 cm_addEntry(t_nmf_table *table, void *entry);
+void cm_delEntry(t_nmf_table *table, t_uint32 idx);
+void *cm_lookupEntry(const t_nmf_table *table, const t_uint32 hdl);
+t_uint32 cm_lookupHandle(const t_nmf_table *table, const void *entry);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/src/convert.c b/drivers/staging/nmf-cm/cm/engine/utils/src/convert.c
new file mode 100644
index 00000000000..ad6e097bfe6
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/src/convert.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/utils/inc/convert.h>
+
+const char* dspNames[NB_CORE_IDS] = {
+ "ARM",
+ "SVA",
+ "SIA"
+};
+
+
+const char* cm_getDspName(t_nmf_core_id dsp) {
+ return dspNames[dsp];
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/src/mem.c b/drivers/staging/nmf-cm/cm/engine/utils/src/mem.c
new file mode 100644
index 00000000000..130a044bbf8
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/src/mem.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/utils/inc/mem.h>
+
+
+/*
+ * Methods
+ */
+void cm_MemCopy(void* dest, const void *src, int count) {
+ char *tmp = (char *) dest, *s = (char *) src;
+
+ while (count--)
+ *tmp++ = *s++;
+}
+
+void cm_MemSet(void *str, int c, int count) {
+ char *tmp = (char *)str;
+
+ while (count--)
+ *tmp++ = c;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/src/string.c b/drivers/staging/nmf-cm/cm/engine/utils/src/string.c
new file mode 100644
index 00000000000..89058d5825a
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/src/string.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ * Shared string manipulation.
+ * TODO This is a list today, must be a hash later !!!!!
+ */
+#include <cm/engine/utils/inc/string.h>
+#include <cm/engine/trace/inc/trace.h>
+
+#include <cm/engine/memory/inc/memory.h>
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+#undef NHASH
+#define NHASH 257 //Use a prime number!
+#define MULT 17
+
+/*
+ * Data
+ */
+struct t_linkedstring
+{
+ struct t_linkedstring *next;
+ int referencer;
+ char string[1];
+};
+
+static struct t_linkedstring *list[NHASH];
+
+#undef myoffsetof
+#define myoffsetof(st, m) \
+ ((int) ( (char *)&((st *)(0))->m - (char *)0 ))
+
+unsigned int hash(const char *str)
+{
+ unsigned int h = 0;
+ for(; *str; str++)
+ h = MULT * h + *str;
+ return h % NHASH;
+}
+/*
+ * Methods
+ */
+PRIVATE struct t_linkedstring *lookupString(
+ const char* str,
+ struct t_linkedstring *first)
+{
+ while(first != 0)
+ {
+ if(cm_StringCompare(str, first->string, MAX_INTERNAL_STRING_LENGTH) == 0)
+ break;
+ first = first->next;
+ }
+
+ return first;
+}
+
+t_dup_char cm_StringGet(const char* str)
+{
+ struct t_linkedstring *entry;
+
+ entry = lookupString(str, list[hash(str)]);
+ CM_ASSERT(entry != 0);
+
+ return (t_dup_char)entry->string;
+}
+
+t_dup_char cm_StringReference(t_dup_char str)
+{
+ struct t_linkedstring* entry = (struct t_linkedstring*)((t_uint32)str - myoffsetof(struct t_linkedstring, string));
+
+ // One more referencer
+ entry->referencer++;
+
+ return (t_dup_char)entry->string;
+}
+
+t_dup_char cm_StringDuplicate(const char* str)
+{
+ struct t_linkedstring *entry;
+ unsigned int h;
+
+ h = hash(str);
+ entry = lookupString(str, list[h]);
+ if(entry != 0)
+ {
+ // One more referencer
+ entry->referencer++;
+ }
+ else
+ {
+ // Allocate new entry
+ entry = (struct t_linkedstring *)OSAL_Alloc(sizeof(struct t_linkedstring)-1 + cm_StringLength(str, MAX_INTERNAL_STRING_LENGTH)+1);
+ if(entry == NULL)
+ return NULL;
+
+ entry->referencer = 1;
+ cm_StringCopy(entry->string, str, MAX_INTERNAL_STRING_LENGTH);
+
+ // Link it in list
+ entry->next = list[h];
+ list[h] = entry;
+ }
+
+ return (t_dup_char)entry->string;
+}
+
+void cm_StringRelease(t_dup_char str)
+{
+ if(str != NULL)
+ {
+ struct t_linkedstring* entry = (struct t_linkedstring*)((t_uint32)str - myoffsetof(struct t_linkedstring, string));
+
+ // One less referencer
+ entry->referencer--;
+
+ if(entry->referencer == 0)
+ {
+ int h = hash(entry->string);
+
+ if(list[h] == entry) // This first first one
+ {
+ list[h] = entry->next;
+ }
+ else
+ {
+ struct t_linkedstring *tmp = list[h];
+
+ // Here we assume that entry is in the list
+ while(/*tmp != NULL && */tmp->next != entry)
+ tmp = tmp->next;
+
+ tmp->next = entry->next;
+ }
+ OSAL_Free(entry);
+ }
+ }
+}
+
+#if 0
+void checkString()
+{
+ struct t_linkedstring *tmp = list;
+
+ while(tmp != 0)
+ {
+ printf(" stay %s %d\n", tmp->string, tmp->referencer);
+ tmp = tmp->next;
+ }
+}
+#endif
+
+/*
+ * LibC method
+ */
+void cm_StringCopy(char* dest, const char *src, int count)
+{
+ while (count-- && (*dest++ = *src++) != '\0')
+ /* nothing */
+ ;
+}
+#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
+
+int cm_StringCompare(const char* str1, const char* str2, int count)
+{
+ /* If s1 and s2 are word-aligned, compare them a word at a time. */
+ if ((((int)str1 & 3) | ((int)str2 & 3)) == 0)
+ {
+ unsigned int *a1 = (unsigned int*)str1;
+ unsigned int *a2 = (unsigned int*)str2;
+
+ while (count >= sizeof (unsigned int) && *a1 == *a2)
+ {
+ count -= sizeof (unsigned int);
+
+ /* If we've run out of bytes or hit a null, return zero since we already know *a1 == *a2. */
+ if (count == 0 || DETECTNULL (*a1))
+ return 0;
+
+ a1++;
+ a2++;
+ }
+
+ /* A difference was detected in last few bytes of s1, so search bytewise */
+ str1 = (char*)a1;
+ str2 = (char*)a2;
+ }
+
+ while (count-- > 0 && *str1 == *str2)
+ {
+ /* If we've run out of bytes or hit a null, return zero
+ since we already know *s1 == *s2. */
+ if (count == 0 || *str1 == '\0')
+ return 0;
+ str1++;
+ str2++;
+ }
+
+ return (*(unsigned char *) str1) - (*(unsigned char *) str2);
+}
+
+int cm_StringLength(const char * str, int count)
+{
+ const char *sc;
+
+ for (sc = str; count-- && *sc != '\0'; ++sc)
+ /* nothing */
+ ;
+ return sc - str;
+}
+
+void cm_StringConcatenate(char* dest, const char* src, int count)
+{
+ while ((*dest) != '\0')
+ {
+ dest++;
+ count--;
+ }
+ cm_StringCopy(dest, src, count);
+}
+
+char* cm_StringSearch(const char* str, int c)
+{
+ for(; *str != (char) c; ++str)
+ if (*str == '\0')
+ return 0;
+ return (char *) str;
+}
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/src/swap.c b/drivers/staging/nmf-cm/cm/engine/utils/src/swap.c
new file mode 100644
index 00000000000..e3e2d536144
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/src/swap.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/utils/inc/swap.h>
+
+
+/*
+ * Methods
+ */
+t_uint16 swap16(t_uint16 x)
+{
+ return ((x >> 8) |
+ ((x << 8) & 0xff00U));
+}
+
+#ifdef LINUX
+
+#if defined(__STN_8815) /* __STN_8815 -> ARMv5*/
+t_uint32 swap32(t_uint32 x)
+{
+ asm volatile (
+ "EOR r1, r0, r0, ROR #16 \n\t"
+ "BIC r1, r1, #0xFF0000 \n\t"
+ "MOV r0, r0, ROR #8 \n\t"
+ "EOR r0, r0, r1, LSR #8"
+ : : : "r3" );
+
+ return x;
+}
+
+t_uint64 swap64(t_uint64 x)
+{
+ asm volatile (
+ "MOV r2, r1 \n\t"
+ " \n\t"
+ "EOR r3, r0, r0, ROR #16 \n\t"
+ "BIC r3, r3, #0xFF0000 \n\t"
+ "MOV r0, r0, ROR #8 \n\t"
+ "EOR r1, r0, r3, LSR #8 \n\t"
+ " \n\t"
+ "EOR r3, r2, r2, ROR #16 \n\t"
+ "BIC r3, r3, #0xFF0000 \n\t"
+ "MOV r2, r2, ROR #8 \n\t"
+ "EOR r0, r2, r3, LSR #8"
+ : : : "r3", "r2" );
+
+ return x;
+}
+#else /* -> ARMv6 or later */
+
+t_uint32 swap32(t_uint32 x)
+{
+ asm volatile (
+ "REV %0, %0"
+ : "+r"(x) : );
+
+ return x;
+}
+
+t_uint64 swap64(t_uint64 x)
+{
+ asm volatile (
+ "REV r2, %Q0 \n\t"
+ "REV %Q0, %R0 \n\t"
+ "MOV %R0, r2"
+ : "+&r" (x) : : "r2" );
+
+ return x;
+}
+
+#endif
+
+#else /* Symbian, Think -> We assume ARMCC */
+
+#if defined(__thumb__)
+
+t_uint32 swap32(t_uint32 x)
+{
+ return ((x >> 24) |
+ ((x >> 8) & 0xff00U) |
+ ((x << 8) & 0xff0000U) |
+ ((x << 24) & 0xff000000U));
+}
+
+t_uint64 swap64(t_uint64 x)
+{
+ return ((x >> 56) |
+ ((x >> 40) & 0xff00UL) |
+ ((x >> 24) & 0xff0000UL) |
+ ((x >> 8) & 0xff000000UL) |
+ ((x << 8) & 0xff00000000ULL) |
+ ((x << 24) & 0xff0000000000ULL) |
+ ((x << 40) & 0xff000000000000ULL) |
+ ((x << 56)));
+}
+
+#elif (__TARGET_ARCH_ARM < 6)
+
+__asm t_uint32 swap32(t_uint32 x)
+{
+ EOR r1, r0, r0, ROR #16
+ BIC r1, r1, #0xFF0000
+ MOV r0, r0, ROR #8
+ EOR r0, r0, r1, LSR #8
+
+ BX lr
+}
+
+__asm t_uint64 swap64(t_uint64 x)
+{
+ MOV r2, r1
+
+ EOR r3, r0, r0, ROR #16 // Swap low (r0) and store it in high (r1)
+ BIC r3, r3, #0xFF0000
+ MOV r0, r0, ROR #8
+ EOR r1, r0, r3, LSR #8
+
+ EOR r3, r2, r2, ROR #16 // Swap high (r2 = ex r1) and store it in low (r0)
+ BIC r3, r3, #0xFF0000
+ MOV r2, r2, ROR #8
+ EOR r0, r2, r3, LSR #8
+
+ BX lr
+}
+
+#else /* -> ARMv6 or later */
+
+__asm t_uint32 swap32(t_uint32 x)
+{
+ REV r0, r0
+
+ BX lr
+}
+
+__asm t_uint64 swap64(t_uint64 x)
+{
+ REV r2, r0
+ REV r0, r1
+ MOV r1, r2
+
+ BX lr
+}
+
+#endif
+
+#endif
+
+t_uint32 noswap32(t_uint32 x) {
+ return x;
+}
+
diff --git a/drivers/staging/nmf-cm/cm/engine/utils/src/table.c b/drivers/staging/nmf-cm/cm/engine/utils/src/table.c
new file mode 100644
index 00000000000..708396a01b2
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/engine/utils/src/table.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+/*
+ *
+ */
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+#include <cm/engine/trace/inc/trace.h>
+#include <cm/engine/utils/inc/mem.h>
+#include <cm/engine/utils/inc/table.h>
+
+/*
+ * Methods
+ */
+t_cm_error cm_initTable(t_nmf_table* table)
+{
+ table->idxMax = TABLE_DEF_SIZE / sizeof(table->entries);
+
+ table->entries = OSAL_Alloc_Zero(table->idxMax*sizeof(table->entries));
+
+ if (table->entries == NULL) {
+ table->idxMax = 0;
+ return CM_NO_MORE_MEMORY;
+ }
+
+ return CM_OK;
+}
+
+void cm_destroyTable(t_nmf_table* table)
+{
+ if (table->idxNb) {
+ ERROR("Attempt to free non-empty table !!!\n", 0, 0, 0, 0, 0, 0);
+ return;
+ }
+ OSAL_Free(table->entries);
+ table->idxMax = 0;
+}
+
+static t_cm_error cm_increaseTable(t_nmf_table* table)
+{
+ t_uint32 new_max;
+ void *mem;
+
+ if (table->idxMax == INDEX_MASK) {
+ ERROR("CM_NO_MORE_MEMORY: Maximum table entries reached\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ new_max = table->idxMax
+ + TABLE_DEF_SIZE / sizeof(table->entries);
+
+ if (new_max > INDEX_MAX)
+ new_max = INDEX_MAX;
+
+ mem = OSAL_Alloc(new_max * sizeof(table->entries));
+
+ if (mem == NULL) {
+ ERROR("CM_NO_MORE_MEMORY: Unable to allocate memory for a table\n", 0, 0, 0, 0, 0, 0);
+ return CM_NO_MORE_MEMORY;
+ }
+
+ cm_MemCopy(mem, table->entries,
+ table->idxMax*sizeof(table->entries));
+ cm_MemSet((void *)((t_uint32) mem + table->idxMax*sizeof(*table->entries)), 0,
+ (new_max-table->idxMax) * sizeof(*table->entries));
+
+ OSAL_Free(table->entries);
+ table->entries = mem;
+ table->idxMax = new_max;
+
+ return CM_OK;
+}
+
+/** cm_addEntry - Add an local pointer to an element to the list
+ *
+ * 1. Increase the size of the list if it's full
+ * 2. Search an empty entry
+ * 3. Add the element to the list
+ * 4. Compute and return the "user handle"
+ */
+t_uint32 cm_addEntry(t_nmf_table *table, void *entry)
+{
+ unsigned int i;
+ t_uint32 hdl = 0;
+
+ if (table->idxNb == table->idxMax)
+ cm_increaseTable(table);
+
+ for (i = table->idxCur;
+ table->entries[i] != 0 && i != (table->idxCur-1);
+ i = (i+1)%table->idxMax);
+
+ if (table->entries[i] == 0) {
+ table->entries[i] = entry;
+ table->idxCur = (i+1) % table->idxMax;
+ table->idxNb++;
+ hdl = ENTRY2HANDLE(entry, i);
+ } else
+ ERROR("No free entry found in table\n", 0, 0, 0, 0, 0, 0);
+
+ return hdl;
+}
+
+/** cm_delEntry - remove the given element from the list
+ *
+ * 1. Check if the handle is valid
+ * 2. Search the entry and free it
+ */
+void cm_delEntry(t_nmf_table *table, t_uint32 idx)
+{
+ table->entries[idx] = NULL;
+ table->idxNb--;
+}
+
+/** cm_lookupEntry - search the entry corresponding to
+ * the user handle.
+ *
+ * 1. Check if the handle is valid
+ * 2. Return a pointer to the element
+ */
+void *cm_lookupEntry(const t_nmf_table *table, const t_uint32 hdl)
+{
+ unsigned int idx = hdl & INDEX_MASK;
+
+ if ((idx >= table->idxMax)
+ || (((unsigned int)table->entries[idx] << INDEX_SHIFT) != (hdl & ~INDEX_MASK)))
+ return NULL;
+ else
+ return table->entries[idx];
+}
+
+/** cm_lookupHandle - search the handle corresponding
+ * to the given element
+ *
+ * 1. Check if the handler is valid or is a special handler
+ * 2. Loop in the table to retrieve the entry matching and return its value
+ */
+t_uint32 cm_lookupHandle(const t_nmf_table *table, const void *entry)
+{
+ t_uint32 i;
+
+ /* NULL is an invalid value that must be handle separatly
+ as it'll match all used/free entries value */
+ if (entry == NULL)
+ return 0;
+
+ for (i=0; i < table->idxMax; i++) {
+ if (table->entries[i] == entry)
+ return ENTRY2HANDLE(table->entries[i], i);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/nmf-cm/cm/inc/cm.h b/drivers/staging/nmf-cm/cm/inc/cm.h
new file mode 100644
index 00000000000..37ccb36a5ee
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/inc/cm.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_CM_H
+#define __INC_CM_H
+
+#include <cm/inc/cm_def.h>
+
+/********************************************************************************/
+/* Component Manager API prototypes */
+/********************************************************************************/
+
+/*
+ * User level wrapper
+ */
+#include <cm/proxy/api/cm_proxy.h>
+
+#endif /* __INC_CM_H */
diff --git a/drivers/staging/nmf-cm/cm/inc/cm_def.h b/drivers/staging/nmf-cm/cm/inc/cm_def.h
new file mode 100644
index 00000000000..dc7a1fdad66
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/inc/cm_def.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+/*!
+ * \brief Component Manager API.
+ *
+ * This file contains the Component Manager API for manipulating components.
+ *
+ */
+
+#ifndef __INC_CM_DEF_H
+#define __INC_CM_DEF_H
+
+#include <cm/inc/cm_type.h>
+#include <inc/nmf-def.h>
+
+/*!
+ * \brief Get the version of the NMF CM engine at runtime
+ *
+ * This method should be used to query the version number of the
+ * NMF Component Manager engine at runtime. This is useful when using
+ * to check if version of the engine linked with application correspond
+ * to engine used for development.
+ *
+ * Such code can be used to check compatibility: \code
+ t_uint32 nmfversion;
+
+ // Print NMF version
+ CM_GetVersion(&nmfversion);
+ LOG("NMF Version %d-%d-%d\n",
+ VERSION_MAJOR(nmfversion),
+ VERSION_MINOR(nmfversion),
+ VERSION_PATCH(nmfversion));
+ if(NMF_VERSION != nmfversion) {
+ LOG("Error: Incompatible API version %d != %d\n", NMF_VERSION, nmfversion);
+ EXIT();
+ }
+ * \endcode
+ *
+ * \param[out] version Internal hardcoded version (use \ref VERSION_MAJOR, \ref VERSION_MINOR, \ref VERSION_PATCH macros to decode it).
+ *
+ * \ingroup CM
+ */
+PUBLIC IMPORT_SHARED void CM_GetVersion(t_uint32 *version);
+
+#endif /* __INC_CM_H */
diff --git a/drivers/staging/nmf-cm/cm/inc/cm_macros.h b/drivers/staging/nmf-cm/cm/inc/cm_macros.h
new file mode 100644
index 00000000000..2279c204a20
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/inc/cm_macros.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+/*!
+ * \brief Component Manager Macros.
+ *
+ * \defgroup CM_MACROS NMF Macros (ANSI C99)
+ * The Component Manager Macros are provided to ease FromHost interface call and ToHost callback definition.
+ * \attention <b>These macros are only ANSI C99 compliant</b> (ARM RVCT 2.x/3.x, GNU gcc 4.x, ...)
+ * \ingroup CM_USER_API
+ */
+
+#ifndef __INC_CM_MACROS_H
+#define __INC_CM_MACROS_H
+
+/*
+ * The next macros are supported only with C Ansi 99, so....
+ */
+
+/*
+ * The Symbian environment dependency, computation which uses an old gnu cpp,
+ * does not accept "..." parameters.
+ * However the actual compiler (armcc) does.
+ * So remove the macro definitions when computing dependencies.
+ */
+#if ( defined(__CC_ARM) && !defined(__STRICT_ANSI__) ) || !defined(__SYMBIAN32__)
+
+/*
+ * Only for skilled eyes ;)
+ * The following macros are used to implement NMFCALL[VOID] and NMFMETH[VOID] macros in an elegant way
+ */
+#define WITH_PARAM(...) __VA_ARGS__)
+#define WITH_NOPARAM(...) )
+
+/*!
+ * \brief Macro to ease Host to Dsp interface calling
+ *
+ * \attention <b>This macro is only ANSI C99 compliant</b>
+ *
+ * The <i>NMFCALL</i> macro can be used to call one method of any previously FromHost bounded interface.\n
+ * From Host side, today, we have no way to mask the multi-instance handling, so
+ * this macro is provided to ease FromHost interface calling and to avoid any mistake into the THIS parameter passing.
+ *
+ * So, any fromHost interface method call like: \code
+ * itf.method(itf.THIS, param1, param2, ...);
+ * \endcode
+ * can be replaced by: \code
+ * NMFCALL(itf, method)(param1, param2, ...);
+ * \endcode
+ *
+ * \warning Don't forget to use NMFCALLVOID macro when declaring a FromHost interface method having none application parameter,
+ * else it will lead to erroneous C code expansion
+ * \see NMFCALLVOID
+ * \hideinitializer
+ * \ingroup CM_MACROS
+ */
+#define NMFCALL(itfHandle, itfMethodName) \
+ (itfHandle).itfMethodName((itfHandle).THIS, WITH_PARAM
+
+/*!
+ * \brief Macro to ease Host to Dsp interface calling (method without any user parameter)
+ *
+ * \attention <b>This macro is only ANSI C99 compliant</b>
+ *
+ * The <i>NMFCALLVOID</i> macro can be used to call one method (those without any user parameter) of any previously FromHost bounded interface.\n
+ * From Host side, today, we have no way to mask the multi-instance handling, so
+ * this macro is provided to ease FromHost interface calling and to avoid any mistake into the THIS parameter passing.
+ *
+ * So, any FromHost interface method call without any application parameter like:\code
+ * itf.method(itf.THIS);
+ * \endcode
+ * can be replaced by: \code
+ * NMFCALLVOID(itf, method)();
+ * \endcode
+ * \see NMFCALL
+ * \hideinitializer
+ * \ingroup CM_MACROS
+ */
+#define NMFCALLVOID(itfHandle, itfMethodName) \
+ (itfHandle).itfMethodName((itfHandle).THIS WITH_NOPARAM
+
+/*!
+ * \brief Macro to ease Dsp to Host interface method declaration
+ *
+ * \attention <b>This macro definition is only ANSI C99 compliant</b>
+ *
+ * The <i>NMFMETH</i> macro can be used to ease the ToHost interface method declaration.\n
+ * From Host side, today, we have no way to mask the multi-intance handling, so the user shall handle it by hand
+ * by passing the "component" context as first parameter of each ToHost interface method through the void *THIS parameter.
+ * This macro could avoid any mistake into the THIS parameter declaration when never used by the user code.
+ *
+ * So, any ToHost interface method declaration like:\code
+ * void mynotify(void *THIS, mytype1 myparam1, mytype2 myparam2, ...) {
+ * <body of the interface routine>
+ * }
+ * \endcode
+ * can be replaced by: \code
+ * void NMFMETH(mynotify)(mytype1 myparam1, mytype2 myparam2, ...) {
+ * <body of the interface routine>
+ * }
+ * \endcode
+ *
+ * \warning Don't forget to use NMFMETHVOID macro when declaring a ToHost interface method having none application parameter,
+ * else it will lead to erroneous C code expansion
+ *
+ * \see NMFMETHVOID
+ * \hideinitializer
+ * \ingroup CM_MACROS
+ */
+#define NMFMETH(itfMethodName) \
+ itfMethodName(void *THIS, WITH_PARAM
+
+/*!
+ * \brief Macro to ease Dsp to Host interface method declaration (method without any user parameter)
+ *
+ * \attention <b>This macro is only ANSI C99 compliant</b>
+ *
+ * The <i>NMFMETHVOID</i> macro can be used to ease the ToHost interface method (those without any user parameter) declaration.\n
+ * From Host side, today, we have no way to mask the multi-intance handling, so the user shall handle it by hand
+ * by passing the "component" context as first parameter of each ToHost interface method through the void *THIS parameter.
+ * This macro could avoid any mistake into the THIS parameter declaration when never used by the user code.
+ *
+ * So, any ToHost interface method declaration having none application parameter like:\code
+ * void mynotify(void *THIS) {
+ * <body of the interface routine>
+ * }
+ * \endcode
+ * can be replaced by: \code
+ * void NMFMETHVOID(mynotify)(void) {
+ * <body of the interface routine>
+ * }
+ * \endcode
+ *
+ * \see NMFMETH
+ * \hideinitializer
+ * \ingroup CM_MACROS
+ */
+#define NMFMETHVOID(itfMethodName) \
+ itfMethodName(void *THIS WITH_NOPARAM
+
+#endif /* not Symbian environment or compiling with ARMCC and not in strict ANSI */
+
+#endif /* __INC_CM_MACROS_H */
+
diff --git a/drivers/staging/nmf-cm/cm/inc/cm_type.h b/drivers/staging/nmf-cm/cm/inc/cm_type.h
new file mode 100644
index 00000000000..780e27ca600
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm/inc/cm_type.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Component Manager types.
+ *
+ * This file contains the Component Manager types.
+ *
+ * \defgroup CM CM Type Definitions
+ * \ingroup CM_USER_API
+ */
+#ifndef _CM_TYPE_H_
+#define _CM_TYPE_H_
+
+#include <share/inc/nmf.h>
+#include <share/inc/macros.h>
+
+#include <nmf/inc/channel_type.h>
+
+/*!
+ * @defgroup t_cm_error t_cm_error
+ * \brief Description of the various errors returned by CM API routines
+ * @{
+ * \ingroup CM
+ */
+typedef t_nmf_error t_cm_error; //!< Error type returned by CM API routines
+
+/*********************************************************************************/
+/* WARNING: UPDATE CM_StringError() func each time an error is added/removed !!! */
+/* CM_StringError() is defined twice in: */
+/* nmf_core/host/cm/proxy/common/wrapper/src/wrapper.c */
+/* tests/src/common/nte/src/nte.c */
+/*********************************************************************************/
+#define CM_LAST_ERROR_ID ((t_cm_error)-128)
+#define CM_INTEGRATION_ERROR NMF_INTEGRATION_ERROR0 //!< \ref NMF_INTEGRATION_ERROR0
+
+ /* Communication */
+#define CM_FLUSH_MESSAGE NMF_FLUSH_MESSAGE //!< Message send after call to CM_FlushChannel()
+#define CM_BUFFER_OVERFLOW ((t_cm_error)-105) //!< Buffer overflow (interface binding message bigger than buffer)
+#define CM_USER_NOT_REGISTERED ((t_cm_error)-104) //!< User not registered
+#define CM_NO_MESSAGE NMF_NO_MESSAGE //!< \ref NMF_NO_MESSAGE
+#define CM_PARAM_FIFO_OVERFLOW ((t_cm_error)-102) //!< Param fifo overflow
+#define CM_INTERNAL_FIFO_OVERFLOW ((t_cm_error)-101) //!< Internal services fifo overflow (not returned to user)
+#define CM_MPC_NOT_RESPONDING ((t_cm_error)-100) //!< MPC not responding (either crash, interrupt handler too long, internal NMF fifo coms overflow, ...).
+
+ /* ELF & File system */
+#define CM_FS_ERROR ((t_cm_error)-96) //!< FileSystem error
+#define CM_NO_SUCH_FILE ((t_cm_error)-95) //!< No such file or directory
+#define CM_INVALID_ELF_FILE ((t_cm_error)-94) //!< File isn't a valid MMDSP ELF file
+#define CM_NO_SUCH_BASE ((t_cm_error)-93) //!< The memory base doesn't exist
+
+ /* Introspection */
+#define CM_NO_SUCH_ATTRIBUTE NMF_NO_SUCH_ATTRIBUTE //!< \ref NMF_NO_SUCH_ATTRIBUTE
+#define CM_NO_SUCH_PROPERTY NMF_NO_SUCH_PROPERTY //!< \ref NMF_NO_SUCH_PROPERTY
+
+ /* Component Life Cycle */
+#define CM_COMPONENT_NOT_STOPPED NMF_COMPONENT_NOT_STOPPED //!< \ref NMF_COMPONENT_NOT_STOPPED
+#define CM_COMPONENT_NOT_UNBINDED ((t_cm_error)-79) //!< Component must be fully unbinded before perform operation
+#define CM_COMPONENT_NOT_STARTED ((t_cm_error)-78) //!< Component must be started to perform operation
+#define CM_COMPONENT_WAIT_RUNNABLE ((t_cm_error)-76) //!< Component need acknowlegdment of life cycle start function before perform operation
+#define CM_REQUIRE_INTERFACE_UNBINDED ((t_cm_error)-75) //!< Required component interfaces must be binded before perform operation
+#define CM_INVALID_COMPONENT_HANDLE ((t_cm_error)-74) //!< Try to access a component already destroyed
+
+ /* Binder */
+#define CM_NO_SUCH_PROVIDED_INTERFACE NMF_NO_SUCH_PROVIDED_INTERFACE //!< \ref NMF_NO_SUCH_PROVIDED_INTERFACE
+#define CM_NO_SUCH_REQUIRED_INTERFACE NMF_NO_SUCH_REQUIRED_INTERFACE //!< \ref NMF_NO_SUCH_REQUIRED_INTERFACE
+#define CM_ILLEGAL_BINDING ((t_cm_error)-62) //!< Client and server interface type mismatch
+#define CM_ILLEGAL_UNBINDING ((t_cm_error)-61) //!< Try to unbind component with bad binding Factories
+#define CM_INTERFACE_ALREADY_BINDED NMF_INTERFACE_ALREADY_BINDED//!< \ref NMF_INTERFACE_ALREADY_BINDED
+#define CM_INTERFACE_NOT_BINDED NMF_INTERFACE_NOT_BINDED //!< \ref NMF_INTERFACE_NOT_BINDED
+
+ /* Loader */
+#define CM_BINDING_COMPONENT_NOT_FOUND ((t_cm_error)-48) //!< Binding Component template name don't exist on components repository (should be generated thanks nkitf tool)
+#define CM_COMPONENT_NOT_FOUND ((t_cm_error)-47) //!< Component template name doesn't exist on components repository
+#define CM_NO_SUCH_SYMBOL ((t_cm_error)-46) //!< Symbol name doesn't exported by the underlying component
+#define CM_COMPONENT_EXIST ((t_cm_error)-45) //!< Component name already exists in the component cache
+
+ /* Fifo management related ones */
+#define CM_FIFO_FULL ((t_cm_error)-40) //!< Fifo is full
+#define CM_FIFO_EMPTY ((t_cm_error)-39) //!< Fifo is empty
+#define CM_UNKNOWN_FIFO_ID ((t_cm_error)-38) //!< Fifo handle doesn't exist
+
+ /* Memory management related ones */
+#define CM_DOMAIN_VIOLATION ((t_cm_error)-33) //!< Domain violation
+#define CM_CREATE_ALLOC_ERROR ((t_cm_error)-32) //!< Error during allocator creation
+#define CM_UNKNOWN_MEMORY_HANDLE ((t_cm_error)-31) //!< Handle doesn't exists
+#define CM_NO_MORE_MEMORY NMF_NO_MORE_MEMORY //!< \ref NMF_NO_MORE_MEMORY
+#define CM_BAD_MEMORY_ALIGNMENT ((t_cm_error)-29) //!< Memory alignment wanted is not correct
+#define CM_MEMORY_HANDLE_FREED ((t_cm_error)-28) //!< Handle was alread freed
+#define CM_INVALID_DOMAIN_DEFINITION ((t_cm_error)-27) //!< Domain to be created is not correctly defined
+#define CM_INTERNAL_DOMAIN_OVERFLOW ((t_cm_error)-26) //!< Internal domain descriptor overflow (too many domains) //TODO, juraj, remove this error
+#define CM_INVALID_DOMAIN_HANDLE ((t_cm_error)-25) //!< Invalid domain handle
+#define CM_ILLEGAL_DOMAIN_OPERATION ((t_cm_error)-21) //!< Operation on a domain is illegal (like destroy of a domain with referenced components)
+
+ /* Media Processor related ones */
+#define CM_MPC_INVALID_CONFIGURATION ((t_cm_error)-24) //!< Media Processor Core invalid configuration
+#define CM_MPC_NOT_INITIALIZED ((t_cm_error)-23) //!< Media Processor Core not yet initialized
+#define CM_MPC_ALREADY_INITIALIZED ((t_cm_error)-22) //!< Media Processor Core already initialized
+//ERROR 21 is defined above, with the domains
+
+ /* Power Mgt related ones */
+#define CM_PWR_NOT_AVAILABLE ((t_cm_error)-16) //!< No modification of the state of the power input
+
+ /* Common errors */
+#define CM_INVALID_DATA ((t_cm_error)-4) //!< Invalid internal data encountered
+#define CM_OUT_OF_LIMITS ((t_cm_error)-3) //!< User reach an internal nmf limits of limits.h file
+#define CM_INVALID_PARAMETER NMF_INVALID_PARAMETER //!< \ref NMF_INVALID_PARAMETER
+#define CM_NOT_YET_IMPLEMENTED ((t_cm_error)-1) //!< CM API not yet implemented
+#define CM_OK NMF_OK //!< \ref NMF_OK
+
+/** @} */
+
+/*!
+ * \brief Definition of a physical memory address
+ * \ingroup MEMORY
+ */
+typedef t_uint32 t_cm_physical_address;
+
+/*!
+ * \brief Definition of a logical memory address
+ * \ingroup MEMORY
+ */
+typedef t_uint32 t_cm_logical_address;
+
+/*!
+ * \brief Definition of a system address into a system with MMU
+ * \ingroup MEMORY
+ */
+typedef struct {
+ t_cm_physical_address physical; //!< Physical memory address
+ t_cm_logical_address logical; //!< Logical memory address
+} t_cm_system_address;
+#define INVALID_SYSTEM_ADDRESS {(t_cm_physical_address)MASK_ALL32, (t_cm_logical_address)MASK_ALL32}
+
+
+/*!
+ * \brief Define a type used to manipulate size of various buffers
+ * \ingroup MEMORY
+ */
+typedef t_uint32 t_cm_size;
+
+#endif /* _CM_TYPE_H_ */
+
diff --git a/drivers/staging/nmf-cm/cm_debug.c b/drivers/staging/nmf-cm/cm_debug.c
new file mode 100644
index 00000000000..ffb067c65d9
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_debug.c
@@ -0,0 +1,840 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/proc_fs.h>
+
+#include "osal-kernel.h"
+#include "cm_debug.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/sched.h>
+
+static struct dentry *cm_dir; /* nmf-cm/ */
+static struct dentry *proc_dir; /* nmf-cm/proc/ */
+static struct dentry *core_dir; /* nmf-cm/dsp/ */
+static struct dentry *domain_dir; /* nmf-cm/domains/ */
+
+/* components data managment */
+struct cm_debug_component_cooky {
+ struct dentry *comp_file; /* entry in nmf-cm/dsp/sxa/components/ */
+ struct dentry *proc_link; /* entry in nmf-cm/proc/ */
+};
+
+static ssize_t component_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos) {
+ t_component_instance *component = file->f_dentry->d_inode->i_private;
+ char buf[640];
+ int ret=0;
+
+ OSAL_LOCK_API();
+ if ((component != NULL) && (component->dbgCooky != NULL)) {
+ char nb_i[16] = "";
+ int i;
+
+ if (component->Template->classe == SINGLETON)
+ snprintf(nb_i, sizeof(nb_i), " (%d)",
+ component->Template->numberOfInstance);
+
+ ret = snprintf(buf, sizeof(buf),
+ "Name:\t\t%s <%s>\n"
+ "Class:\t\t%s%s\n"
+ "State:\t\t%s\n"
+ "Priority:\t%u\n"
+ "Domain:\t\t%u\n\n"
+ "Memory : Physical address Logical address"
+ " DSP address Size\n"
+ "---------------------------------------------"
+ "-----------------------------\n",
+ component->pathname,
+ component->Template->name,
+ component->Template->classe == COMPONENT ?
+ "Component" :
+ (component->Template->classe == SINGLETON ?
+ "Singleton" :
+ (component->Template->classe == FIRMWARE ?
+ "Firmware" :
+ "?")),
+ nb_i,
+ component->state == STATE_RUNNABLE ? "Runnable" :
+ (component->state == STATE_STOPPED ? "Sopped" :
+ "None"),
+ (unsigned)component->priority,
+ component->domainId
+ );
+
+ for (i=0; i<NUMBER_OF_MMDSP_MEMORY && ret<sizeof(buf); i++) {
+ if (component->memories[i]) {
+ t_cm_system_address addr;
+ t_uint32 dspAddr, dspSize;
+ cm_DSP_GetHostSystemAddress(
+ component->memories[i], &addr);
+ cm_DSP_GetDspAddress(
+ component->memories[i], &dspAddr);
+ cm_DSP_GetDspMemoryHandleSize(
+ component->memories[i], &dspSize);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "%-10s: %p-%p %p-%p %p-%p %8lu\n",
+ MMDSP_getMappingById(i)->memoryName,
+ (void *)addr.physical,
+ (void *)addr.physical
+ + component->memories[i]->size-1,
+ (void *)addr.logical,
+ (void *)addr.logical
+ + component->memories[i]->size-1,
+ (void *)dspAddr,
+ (void *)dspAddr + dspSize - 1,
+ component->memories[i]->size);
+ }
+ }
+ }
+
+ OSAL_UNLOCK_API();
+ return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+}
+
+static const struct file_operations component_fops = {
+ .read = component_read,
+};
+
+static void cm_debug_component_create(t_component_instance *component)
+{
+ char tmp[12+MAX_COMPONENT_NAME_LENGTH];
+ struct cm_debug_component_cooky *cooky;
+ struct mpcConfig *mpc;
+ mpc = &osalEnv.mpc[COREIDX(component->Template->dspId)];
+
+ cooky = OSAL_Alloc_Zero(sizeof(*cooky));
+ if (cooky == NULL)
+ return;
+
+ component->dbgCooky = cooky;
+ sprintf(tmp, "%s-%08x", component->pathname,
+ (unsigned int)component->instance);
+ cooky->comp_file = debugfs_create_file(tmp, S_IRUSR|S_IRGRP,
+ mpc->comp_dir,
+ component, &component_fops);
+ if (IS_ERR(cooky->comp_file)) {
+ if (PTR_ERR(cooky->comp_file) != -ENODEV)
+ pr_info("CM: Can't create dsp/%s/components/%s"
+ "debugfs file: %ld\n",
+ mpc->name,
+ tmp,
+ PTR_ERR(cooky->comp_file));
+ cooky->comp_file = NULL;
+ } else {
+ char target_lnk[40+MAX_COMPONENT_NAME_LENGTH];
+ sprintf(target_lnk, "../../../dsp/%s/components/%s-%08x",
+ mpc->name,
+ component->pathname,
+ (unsigned int)component->instance);
+
+ /* Some firmware, like Executive Engine, do not belong
+ to any process */
+ if (domainDesc[component->domainId].client == current->tgid) {
+ struct list_head* head;
+ struct cm_process_priv *entry = NULL;
+ /* Search the entry for the calling process */
+ list_for_each(head, &process_list) {
+ entry = list_entry(head,
+ struct cm_process_priv,
+ entry);
+ if (entry->pid == current->tgid)
+ break;
+ }
+
+ if (entry) {
+ cooky->proc_link = debugfs_create_symlink(
+ tmp,
+ entry->comp_dir,
+ target_lnk);
+ if (IS_ERR(cooky->proc_link)) {
+ long err = PTR_ERR(cooky->proc_link);
+ if (err != -ENODEV)
+ pr_info("CM: Can't create "
+ "proc/%d/%s "
+ "debugfs link: %ld\n",
+ entry->pid, tmp, err);
+ cooky->proc_link = NULL;
+ }
+ }
+ }
+ }
+}
+
+static void cm_debug_component_destroy(t_component_instance *component)
+{
+ struct cm_debug_component_cooky *cooky = component->dbgCooky;
+
+ if (cooky) {
+ component->dbgCooky = NULL;
+ debugfs_remove(cooky->proc_link);
+ debugfs_remove(cooky->comp_file);
+ OSAL_Free(cooky);
+ }
+}
+
+/* domain data managment */
+struct cm_debug_domain_cooky {
+ struct dentry *domain_file; /* entry in nmf-cm/components/ */
+ struct dentry *proc_link; /* entry in nmf-cm/proc/ */
+ struct dentry *dsp_link; /* entry in nmf-cm/dsp/sxa/domains */
+};
+
+static ssize_t domain_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ t_cm_domain_id id =
+ (t_cm_domain_id)(long)file->f_dentry->d_inode->i_private;
+ t_cm_domain_desc *domain = &domainDesc[id];
+
+ char buf[640];
+ int ret=0;
+
+ OSAL_LOCK_API();
+ if ((domain->domain.coreId != MASK_ALL8)
+ && (domain->dbgCooky != NULL)) {
+ t_cm_allocator_status status;
+ t_uint32 dOffset;
+ t_uint32 dSize;
+ if (domain->domain.coreId != ARM_CORE_ID) {
+ t_cm_domain_info info;
+
+ cm_DM_GetDomainAbsAdresses(id, &info);
+ cm_DSP_GetInternalMemoriesInfo(id, ESRAM_CODE,
+ &dOffset, &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(domain->domain.coreId,
+ ESRAM_CODE),
+ dOffset, dSize, &status);
+ ret = snprintf(
+ buf, sizeof(buf),
+ "Core:\t%s\n\n"
+ "Memory : Physical address Logical address"
+ " Size Free Used\n"
+ "---------------------------------------------"
+ "-----------------------------\n"
+ "ESRAM Code: %08x-%08lx %08x-%08lx\t%8lu %8lu "
+ "%8lu\n",
+ osalEnv.mpc[COREIDX(domain->domain.coreId)].name,
+ (unsigned int)info.esramCode.physical,
+ domain->domain.esramCode.size ?
+ info.esramCode.physical
+ + domain->domain.esramCode.size - 1 : 0,
+ (unsigned int)info.esramCode.logical,
+ domain->domain.esramCode.size ?
+ info.esramCode.logical
+ + domain->domain.esramCode.size - 1 : 0,
+ domain->domain.esramCode.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_DSP_GetInternalMemoriesInfo(id, ESRAM_EXT24,
+ &dOffset, &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(domain->domain.coreId,
+ ESRAM_EXT24),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "ESRAM Data: %08x-%08lx "
+ "%08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)info.esramData.physical,
+ domain->domain.esramData.size ?
+ info.esramData.physical
+ + domain->domain.esramData.size - 1 : 0,
+ (unsigned int)info.esramData.logical,
+ domain->domain.esramData.size ?
+ info.esramData.logical
+ + domain->domain.esramData.size - 1 : 0,
+ domain->domain.esramData.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_DSP_GetInternalMemoriesInfo(id, SDRAM_CODE,
+ &dOffset, &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(domain->domain.coreId,
+ SDRAM_CODE),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "SDRAM Code: %08x-%08lx "
+ "%08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)info.sdramCode.physical,
+ domain->domain.sdramCode.size ?
+ info.sdramCode.physical +
+ domain->domain.sdramCode.size - 1 : 0,
+ (unsigned int)info.sdramCode.logical,
+ domain->domain.sdramCode.size ?
+ info.sdramCode.logical +
+ domain->domain.sdramCode.size - 1 : 0,
+ domain->domain.sdramCode.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_DSP_GetInternalMemoriesInfo(id, SDRAM_EXT24,
+ &dOffset, &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(domain->domain.coreId,
+ SDRAM_EXT24),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "SDRAM Data: %08x-%08lx "
+ "%08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)info.sdramData.physical,
+ domain->domain.sdramData.size ?
+ info.sdramData.physical +
+ domain->domain.sdramData.size - 1 : 0,
+ (unsigned int)info.sdramData.logical,
+ domain->domain.sdramData.size ?
+ info.sdramData.logical +
+ domain->domain.sdramData.size - 1 : 0,
+ domain->domain.sdramData.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+ } else {
+ t_cm_system_address addr;
+ ret = snprintf(
+ buf, sizeof(buf),
+ "Core:\tarm\n\n"
+ "Memory : Physical address Logical "
+ "address Size Free Used\n"
+ "---------------------------------------"
+ "-----------------------------------\n");
+ if (domain->domain.esramCode.size &&
+ cm_DSP_GetDspBaseAddress(ARM_CORE_ID,
+ ESRAM_CODE,
+ &addr) == CM_OK) {
+ cm_DSP_GetInternalMemoriesInfo(id, ESRAM_CODE,
+ &dOffset,
+ &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(ARM_CORE_ID,
+ ESRAM_CODE),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "ESRAM Code: %08x-%08lx "
+ "%08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical +
+ domain->domain.esramCode.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical +
+ domain->domain.esramCode.size - 1,
+ domain->domain.esramCode.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+ }
+ if (domain->domain.esramData.size &&
+ cm_DSP_GetDspBaseAddress(ARM_CORE_ID,
+ ESRAM_EXT24,
+ &addr) == CM_OK) {
+ cm_DSP_GetInternalMemoriesInfo(id, ESRAM_EXT24,
+ &dOffset,
+ &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(ARM_CORE_ID,
+ ESRAM_EXT24),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "ESRAM Data: %08x-%08lx "
+ "%08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical +
+ domain->domain.esramData.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical +
+ domain->domain.esramData.size - 1,
+ domain->domain.esramData.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+ }
+ if (domain->domain.sdramCode.size &&
+ cm_DSP_GetDspBaseAddress(ARM_CORE_ID,
+ SDRAM_CODE,
+ &addr) == CM_OK) {
+ cm_DSP_GetInternalMemoriesInfo(id, SDRAM_CODE,
+ &dOffset,
+ &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(ARM_CORE_ID,
+ SDRAM_CODE),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "SDRAM Code: %08x-%08lx %08x-%08lx\t"
+ "%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical +
+ domain->domain.sdramCode.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical +
+ domain->domain.sdramCode.size - 1,
+ domain->domain.sdramCode.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+ }
+ if (domain->domain.sdramData.size &&
+ cm_DSP_GetDspBaseAddress(ARM_CORE_ID,
+ SDRAM_EXT24,
+ &addr) == CM_OK) {
+ cm_DSP_GetInternalMemoriesInfo(id, SDRAM_EXT24,
+ &dOffset,
+ &dSize);
+ cm_MM_GetAllocatorStatus(
+ cm_DSP_GetAllocator(ARM_CORE_ID,
+ SDRAM_EXT24),
+ dOffset, dSize, &status);
+ ret += snprintf(
+ &buf[ret], sizeof(buf)-ret,
+ "SDRAM Data: %08x-%08lx %08x-%08lx\t"
+ "%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical +
+ domain->domain.sdramData.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical +
+ domain->domain.sdramData.size - 1,
+ domain->domain.sdramData.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+ }
+ }
+ }
+ OSAL_UNLOCK_API();
+ return simple_read_from_buffer(userbuf, count, ppos, buf, ret);;
+}
+
+static const struct file_operations domain_fops = {
+ .read = domain_read,
+};
+
+static void cm_debug_domain_create(t_cm_domain_id id)
+{
+ char tmp[12];
+ struct cm_debug_domain_cooky *cooky;
+
+ cooky = OSAL_Alloc_Zero(sizeof(*cooky));
+ if (cooky == NULL)
+ return;
+
+ domainDesc[id].dbgCooky = cooky;
+ sprintf(tmp, "%u", id);
+ cooky->domain_file = debugfs_create_file(tmp, S_IRUSR|S_IRGRP,
+ domain_dir,
+ (void *)(long)id,
+ &domain_fops);
+ if (IS_ERR(cooky->domain_file)) {
+ if (PTR_ERR(cooky->domain_file) != -ENODEV)
+ pr_err("CM: Can't create domains/%s debugfs "
+ "file: %ld\n", tmp,
+ PTR_ERR(cooky->domain_file));
+ cooky->domain_file = NULL;
+ } else {
+ char target_lnk[40];
+ sprintf(target_lnk, "../../../domains/%u", id);
+
+ if (domainDesc[id].client != NMF_CORE_CLIENT) {
+ struct list_head* head;
+ struct cm_process_priv *entry = NULL;
+
+ /* Search the entry for the target process */
+ list_for_each(head, &process_list) {
+ entry = list_entry(head,
+ struct cm_process_priv,
+ entry);
+ if (entry->pid == domainDesc[id].client)
+ break;
+ }
+
+ if (entry) {
+ cooky->proc_link = debugfs_create_symlink(
+ tmp,
+ entry->domain_dir,
+ target_lnk);
+ if (IS_ERR(cooky->proc_link)) {
+ long err = PTR_ERR(cooky->proc_link);
+ if (err != -ENODEV)
+ pr_err("CM: Can't create "
+ "proc/%d/domains/%s "
+ "debugfs link: %ld\n",
+ entry->pid, tmp, err);
+ cooky->proc_link = NULL;
+ }
+ }
+ }
+ if (domainDesc[id].domain.coreId != ARM_CORE_ID) {
+ cooky->dsp_link =
+ debugfs_create_symlink(
+ tmp,
+ osalEnv.mpc[COREIDX(domainDesc[id].domain.coreId)].domain_dir,
+ target_lnk);
+ if (IS_ERR(cooky->dsp_link)) {
+ if (PTR_ERR(cooky->dsp_link) != -ENODEV)
+ pr_err("CM: Can't create dsp/%s/domains/%s "
+ "debugfs link: %ld\n",
+ osalEnv.mpc[COREIDX(domainDesc[id].domain.coreId)].name,
+ tmp,
+ PTR_ERR(cooky->dsp_link));
+ cooky->dsp_link = NULL;
+ }
+ }
+ }
+}
+
+static void cm_debug_domain_destroy(t_cm_domain_id id)
+{
+ struct cm_debug_domain_cooky *cooky = domainDesc[id].dbgCooky;
+ if (cooky) {
+ domainDesc[id].dbgCooky = NULL;
+ debugfs_remove(cooky->proc_link);
+ debugfs_remove(cooky->dsp_link);
+ debugfs_remove(cooky->domain_file);
+ OSAL_Free(cooky);
+ }
+}
+
+/* proc directory */
+void cm_debug_proc_init(struct cm_process_priv *entry)
+{
+ char tmp[PROC_NUMBUF];
+ sprintf(tmp, "%d", entry->pid);
+ entry->dir = debugfs_create_dir(tmp, proc_dir);
+ if (IS_ERR(entry->dir)) {
+ if (PTR_ERR(entry->dir) != -ENODEV)
+ pr_info("CM: Can't create proc/%d debugfs directory: "
+ "%ld\n", entry->pid, PTR_ERR(entry->dir));
+ entry->dir = NULL;
+ return;
+ }
+ entry->comp_dir = debugfs_create_dir("components", entry->dir);
+ if (IS_ERR(entry->comp_dir)) {
+ if (PTR_ERR(entry->comp_dir) != -ENODEV)
+ pr_info("CM: Can't create proc/%d/components debugfs "
+ "directory: %ld\n", entry->pid,
+ PTR_ERR(entry->comp_dir));
+ entry->comp_dir = NULL;
+ }
+ entry->domain_dir = debugfs_create_dir("domains", entry->dir);
+ if (IS_ERR(entry->domain_dir)) {
+ if (PTR_ERR(entry->domain_dir) != -ENODEV)
+ pr_info("CM: Can't create proc/%d/domains debugfs "
+ "directory: %ld\n", entry->pid,
+ PTR_ERR(entry->domain_dir));
+ entry->domain_dir = NULL;
+ }
+}
+
+/* DSP meminfo */
+static ssize_t meminfo_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ const t_nmf_core_id id =
+ *(t_nmf_core_id *)file->f_dentry->d_inode->i_private;
+ char buf[640];
+ int ret=0;
+ t_cm_allocator_status status;
+ t_cm_system_address addr;
+
+ OSAL_LOCK_API();
+ cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(id, ESRAM_CODE),
+ 0, 0, &status);
+ cm_DSP_GetDspBaseAddress(id, ESRAM_CODE, &addr);
+ ret = snprintf(buf, sizeof(buf),
+ "Memory : Physical address Logical address Size "
+ " Free Used\n"
+ "-------------------------------------------------------"
+ "-------------------\n"
+ "ESRAM Code: %08x-%08lx %08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical + status.global.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical + status.global.size - 1,
+ status.global.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(id, ESRAM_EXT24),
+ 0, 0, &status);
+ cm_DSP_GetDspBaseAddress(id, ESRAM_EXT24, &addr);
+ ret += snprintf(&buf[ret], sizeof(buf)-ret,
+ "ESRAM Data: %08x-%08lx %08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical + status.global.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical + status.global.size - 1,
+ status.global.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(id, SDRAM_CODE),
+ 0, 0, &status);
+ cm_DSP_GetDspBaseAddress(id, SDRAM_CODE, &addr);
+ ret += snprintf(&buf[ret], sizeof(buf)-ret,
+ "SDRAM Code: %08x-%08lx %08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical + status.global.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical + status.global.size - 1,
+ status.global.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(id, SDRAM_EXT24),
+ 0, 0, &status);
+ cm_DSP_GetDspBaseAddress(id, SDRAM_EXT24, &addr);
+ ret += snprintf(&buf[ret], sizeof(buf)-ret,
+ "SDRAM Data: %08x-%08lx %08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical + status.global.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical + status.global.size - 1,
+ status.global.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(id, INTERNAL_XRAM24),
+ 0, 0, &status);
+ cm_DSP_GetDspBaseAddress(id, INTERNAL_XRAM24, &addr);
+ ret += snprintf(&buf[ret], sizeof(buf)-ret,
+ "TCM XRAM : %08x-%08lx %08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical + status.global.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical + status.global.size - 1,
+ status.global.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ cm_MM_GetAllocatorStatus(cm_DSP_GetAllocator(id, INTERNAL_YRAM24),
+ 0, 0, &status);
+ cm_DSP_GetDspBaseAddress(id, INTERNAL_YRAM24, &addr);
+ ret += snprintf(&buf[ret], sizeof(buf)-ret,
+ "TCM YRAM : %08x-%08lx %08x-%08lx\t%8lu %8lu %8lu\n",
+ (unsigned int)addr.physical,
+ addr.physical + status.global.size - 1,
+ (unsigned int)addr.logical,
+ addr.logical + status.global.size - 1,
+ status.global.size,
+ status.global.accumulate_free_memory,
+ status.global.accumulate_used_memory);
+
+ OSAL_UNLOCK_API();
+ return simple_read_from_buffer(userbuf, count, ppos, buf, ret);;
+}
+
+static const struct file_operations mem_fops = {
+ .read = meminfo_read,
+};
+
+/* ESRAM file operations */
+static int esram_open(struct inode *inode, struct file *file)
+{
+ int i, err=0;
+ for (i=0; i<NB_ESRAM; i++) {
+ if (regulator_enable(osalEnv.esram_regulator[i]) < 0) {
+ pr_err("CM (%s): can't enable regulator"
+ "for esram bank %s\n", __func__,
+ i ? "34" : "12");
+ err = -EIO;
+ break;
+ }
+ }
+
+ if (err) {
+ for (i--; i>=0; i--)
+ regulator_disable(osalEnv.esram_regulator[i]);
+ }
+
+ return err;
+}
+
+static int esram_release(struct inode *inode, struct file *file)
+{
+ int i;
+ for (i=0; i<NB_ESRAM; i++)
+ regulator_disable(osalEnv.esram_regulator[i]);
+ return 0;
+}
+
+static ssize_t esram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(user_buf, count, ppos,
+ osalEnv.esram_base,
+ cfgESRAMSize*ONE_KB);
+}
+
+static const struct file_operations esram_fops = {
+ .read = esram_read,
+ .open = esram_open,
+ .release = esram_release,
+};
+
+
+/* TCM file */
+void cm_debug_create_tcm_file(unsigned mpc_index)
+{
+ osalEnv.mpc[mpc_index].tcm_file = debugfs_create_blob(
+ "tcm24", S_IRUSR|S_IRGRP|S_IROTH,
+ osalEnv.mpc[mpc_index].snapshot_dir,
+ &osalEnv.mpc[mpc_index].base);
+ if (IS_ERR(osalEnv.mpc[mpc_index].tcm_file)) {
+ if (PTR_ERR(osalEnv.mpc[mpc_index].tcm_file) != -ENODEV)
+ pr_info("CM: Can't create dsp/%s/tcm24 debugfs "
+ "directory: %ld\n", osalEnv.mpc[mpc_index].name,
+ PTR_ERR(osalEnv.mpc[mpc_index].tcm_file));
+ osalEnv.mpc[mpc_index].tcm_file = NULL;
+ }
+}
+
+void cm_debug_destroy_tcm_file(unsigned mpc_index)
+{
+ debugfs_remove(osalEnv.mpc[mpc_index].tcm_file);
+}
+
+/* Global init */
+void cm_debug_init(void)
+{
+ int i;
+
+ cm_dir = debugfs_create_dir(DEBUGFS_ROOT, NULL);
+ if (IS_ERR(cm_dir)) {
+ if (PTR_ERR(cm_dir) != -ENODEV)
+ pr_info("CM: Can't create root debugfs directory: "
+ "%ld\n", PTR_ERR(cm_dir));
+ cm_dir = NULL;
+ return;
+ }
+
+ proc_dir = debugfs_create_dir("proc", cm_dir);
+ if (IS_ERR(proc_dir)) {
+ if (PTR_ERR(proc_dir) != -ENODEV)
+ pr_info("CM: Can't create 'proc' debugfs directory: "
+ "%ld\n", PTR_ERR(proc_dir));
+ proc_dir = NULL;
+ }
+
+ core_dir = debugfs_create_dir("dsp", cm_dir);
+ if (IS_ERR(core_dir)) {
+ if (PTR_ERR(core_dir) != -ENODEV)
+ pr_info("CM: Can't create 'dsp' debugfs directory: %ld\n",
+ PTR_ERR(core_dir));
+ core_dir = NULL;
+ }
+
+ domain_dir = debugfs_create_dir("domains", cm_dir);
+ if (IS_ERR(domain_dir)) {
+ if (PTR_ERR(domain_dir) != -ENODEV)
+ pr_info("CM: Can't create 'domains' debugfs directory: "
+ "%ld\n",
+ PTR_ERR(domain_dir));
+ domain_dir = NULL;
+ } else {
+ osal_debug_ops.domain_create = cm_debug_domain_create;
+ osal_debug_ops.domain_destroy = cm_debug_domain_destroy;
+ }
+
+ for (i=0; i<NB_MPC; i++) {
+ osalEnv.mpc[i].dir = debugfs_create_dir(osalEnv.mpc[i].name,
+ core_dir);
+ if (IS_ERR(osalEnv.mpc[i].dir)) {
+ if (PTR_ERR(osalEnv.mpc[i].dir) != -ENODEV)
+ pr_info("CM: Can't create %s debugfs directory: "
+ "%ld\n",
+ osalEnv.mpc[i].name,
+ PTR_ERR(osalEnv.mpc[i].dir));
+ osalEnv.mpc[i].dir = NULL;
+ } else {
+ osalEnv.mpc[i].mem_file =
+ debugfs_create_file("meminfo", S_IRUSR|S_IRGRP,
+ osalEnv.mpc[i].dir,
+ (void*)&osalEnv.mpc[i].coreId,
+ &mem_fops);
+ if (IS_ERR(osalEnv.mpc[i].mem_file)) {
+ if (PTR_ERR(osalEnv.mpc[i].mem_file) != -ENODEV)
+ pr_err("CM: Can't create dsp/%s/meminfo "
+ "debugfs file: %ld\n",
+ osalEnv.mpc[i].name,
+ PTR_ERR(osalEnv.mpc[i].mem_file));
+ osalEnv.mpc[i].mem_file = NULL;
+ }
+
+ osalEnv.mpc[i].comp_dir = debugfs_create_dir(
+ "components",
+ osalEnv.mpc[i].dir);
+ if (IS_ERR(osalEnv.mpc[i].comp_dir)) {
+ if (PTR_ERR(osalEnv.mpc[i].comp_dir) != -ENODEV)
+ pr_info("CM: Can't create "
+ "'dsp/%s/components' debugfs "
+ "directory: %ld\n",
+ osalEnv.mpc[i].name,
+ PTR_ERR(osalEnv.mpc[i].comp_dir));
+ osalEnv.mpc[i].comp_dir = NULL;
+ }
+
+ osalEnv.mpc[i].domain_dir =
+ debugfs_create_dir("domains",
+ osalEnv.mpc[i].dir);
+ if (IS_ERR(osalEnv.mpc[i].domain_dir)) {
+ if (PTR_ERR(osalEnv.mpc[i].domain_dir) != -ENODEV)
+ pr_info("CM: Can't create "
+ "'dsp/%s/domains' "
+ "debugfs directory: %ld\n",
+ osalEnv.mpc[i].name,
+ PTR_ERR(osalEnv.mpc[i].domain_dir));
+ osalEnv.mpc[i].domain_dir = NULL;
+ }
+
+ osalEnv.mpc[i].snapshot_dir = debugfs_create_dir(
+ "snapshot",
+ osalEnv.mpc[i].dir);
+ if (IS_ERR(osalEnv.mpc[i].snapshot_dir)) {
+ if (PTR_ERR(osalEnv.mpc[i].snapshot_dir) != -ENODEV)
+ pr_info("CM: Can't create "
+ "'dsp/%s/snapshot' debugfs "
+ "directory: %ld\n",
+ osalEnv.mpc[i].name,
+ PTR_ERR(osalEnv.mpc[i].snapshot_dir));
+ osalEnv.mpc[i].snapshot_dir = NULL;
+ } else {
+ debugfs_create_file("esram", S_IRUSR|S_IRGRP|S_IROTH,
+ osalEnv.mpc[i].snapshot_dir,
+ &osalEnv.esram_base,
+ &esram_fops);
+ debugfs_create_blob("sdram_data", S_IRUSR|S_IRGRP|S_IROTH,
+ osalEnv.mpc[i].snapshot_dir,
+ &osalEnv.mpc[i].sdram_data);
+ debugfs_create_blob("sdram_code", S_IRUSR|S_IRGRP|S_IROTH,
+ osalEnv.mpc[i].snapshot_dir,
+ &osalEnv.mpc[i].sdram_code);
+ }
+
+ debugfs_create_bool("running", S_IRUSR|S_IRGRP,
+ osalEnv.mpc[i].dir,
+ (u32 *)&osalEnv.mpc[i].monitor_tsk);
+ debugfs_create_u8("load", S_IRUSR|S_IRGRP,
+ osalEnv.mpc[i].dir,
+ &osalEnv.mpc[i].load);
+ debugfs_create_u8("requested_opp", S_IRUSR|S_IRGRP,
+ osalEnv.mpc[i].dir,
+ &osalEnv.mpc[i].opp_request);
+ }
+ }
+ osal_debug_ops.component_create = cm_debug_component_create;
+ osal_debug_ops.component_destroy = cm_debug_component_destroy;
+}
+
+void cm_debug_exit(void)
+{
+ debugfs_remove_recursive(cm_dir);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/staging/nmf-cm/cm_debug.h b/drivers/staging/nmf-cm/cm_debug.h
new file mode 100644
index 00000000000..26c80682d11
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_debug.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef CM_DEBUG_H
+#define CM_DEBUG_H
+
+#ifdef CONFIG_DEBUG_FS
+#include "cmld.h"
+
+void cm_debug_init(void);
+void cm_debug_exit(void);
+void cm_debug_proc_init(struct cm_process_priv *entry);
+void cm_debug_create_tcm_file(unsigned mpc_index);
+void cm_debug_destroy_tcm_file(unsigned mpc_index);
+
+#else
+
+#define cm_debug_init()
+#define cm_debug_exit()
+#define cm_debug_proc_init(entry)
+#define cm_debug_create_tcm_file(mpc_index)
+#define cm_debug_destroy_tcm_file(mpc_index)
+
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CM_DEBUG_H */
diff --git a/drivers/staging/nmf-cm/cm_dma.c b/drivers/staging/nmf-cm/cm_dma.c
new file mode 100644
index 00000000000..652b504324c
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_dma.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#include <mach/db8500-regs.h>
+
+#include "cm_dma.h"
+
+#define CMDMA_LIDX (2)
+#define CMDMA_REG_LCLA (0x024)
+
+static void __iomem *virtbase = NULL;
+
+static int cmdma_write_cyclic_list_mem2per(
+ unsigned int from_addr,
+ unsigned int to_addr,
+ unsigned int segments,
+ unsigned int segmentsize,
+ unsigned int LOS);
+
+static int cmdma_write_cyclic_list_per2mem(
+ unsigned int from_addr,
+ unsigned int to_addr,
+ unsigned int segments,
+ unsigned int segmentsize,
+ unsigned int LOS);
+
+static bool cmdma_setup_relink_area_called = false;
+
+int cmdma_setup_relink_area( unsigned int mem_addr,
+ unsigned int per_addr,
+ unsigned int segments,
+ unsigned int segmentsize,
+ unsigned int LOS,
+ enum cmdma_type type)
+{
+ if (!cmdma_setup_relink_area_called)
+ cmdma_setup_relink_area_called = true;
+
+ switch (type) {
+
+ case CMDMA_MEM_2_PER:
+ return cmdma_write_cyclic_list_mem2per(
+ mem_addr,
+ per_addr,
+ segments,
+ segmentsize,
+ LOS);
+
+ case CMDMA_PER_2_MEM:
+ return cmdma_write_cyclic_list_per2mem(
+ per_addr,
+ mem_addr,
+ segments,
+ segmentsize,
+ LOS);
+
+ default :
+ return -EINVAL;
+ }
+ }
+
+ static unsigned int cmdma_getlcla( void) {
+
+ if(!virtbase)
+ virtbase = ioremap(U8500_DMA_BASE, CMDMA_REG_LCLA + sizeof(int) );
+
+ return readl(virtbase + CMDMA_REG_LCLA);
+ }
+
+ static void cmdma_write_relink_params_mem2per (
+ int * relink,
+ unsigned int LOS,
+ unsigned int nb_element,
+ unsigned int src_addr,
+ unsigned int dst_addr,
+ unsigned int burst_size) {
+
+ relink[0] = (((long)(nb_element & 0xFFFF)) << 16) |
+ (src_addr & 0xFFFF);
+
+ relink[1] = (((src_addr >> 16) & 0xFFFFUL) << 16) |
+ (0x1200UL | (LOS << 1) | (burst_size<<10));
+
+ relink[2] = ((nb_element & 0xFFFF) << 16) |
+ (dst_addr & 0xFFFF);
+
+ relink[3] = (((dst_addr >> 16) & 0xFFFFUL) << 16 ) |
+ 0x8201UL | ((LOS+1) << 1) | (burst_size<<10);
+}
+
+static void cmdma_write_relink_params_per2mem (
+ int * relink,
+ unsigned int LOS,
+ unsigned int nb_element,
+ unsigned int src_addr,
+ unsigned int dst_addr,
+ unsigned int burst_size) {
+
+ relink[0] = (((long)(nb_element & 0xFFFF)) << 16) |
+ (src_addr & 0xFFFF);
+
+ relink[1] = (((src_addr >> 16) & 0xFFFFUL) << 16) |
+ (0x8201UL | (LOS << 1) | (burst_size<<10));
+
+ relink[2] = ((nb_element & 0xFFFF) << 16) |
+ (dst_addr & 0xFFFF);
+
+ relink[3] = (((dst_addr >> 16) & 0xFFFFUL) << 16 ) |
+ 0x1200UL | ((LOS+1) << 1) | (burst_size<<10);
+}
+
+static int cmdma_write_cyclic_list_mem2per(
+ unsigned int from_addr,
+ unsigned int to_addr,
+ unsigned int segments,
+ unsigned int segmentsize,
+ unsigned int LOS) {
+
+ unsigned int i,j;
+ int *relink;
+
+ j = LOS;
+
+ for ( i = 0; i < segments; i++) {
+ relink = ioremap_nocache (cmdma_getlcla() + 1024 * CMDMA_LIDX + 8 * j, 4 * sizeof(int));
+
+ if (i == (segments-1))
+ j = LOS;
+ else
+ j += 2;
+
+ cmdma_write_relink_params_mem2per (
+ relink,
+ j,
+ segmentsize / 4,
+ from_addr,
+ to_addr,
+ 0x2);
+
+ iounmap(relink);
+
+ from_addr += segmentsize;
+ }
+
+ return 0;
+}
+
+static int cmdma_write_cyclic_list_per2mem(
+ unsigned int from_addr,
+ unsigned int to_addr,
+ unsigned int segments,
+ unsigned int segmentsize,
+ unsigned int LOS) {
+
+ unsigned int i,j;
+ int *relink;
+ j = LOS;
+
+ for ( i = 0; i < segments; i++) {
+ relink = ioremap_nocache (cmdma_getlcla() + 1024 * CMDMA_LIDX + 8 * j, 4 * sizeof(int));
+
+ if (i == (segments-1))
+ j = LOS;
+ else
+ j += 2;
+
+ cmdma_write_relink_params_per2mem (
+ relink,
+ j,
+ segmentsize / 4,
+ from_addr,
+ to_addr,
+ 0x2);
+
+ iounmap(relink);
+
+ to_addr += segmentsize;
+ }
+
+ return 0;
+}
+
+static void __iomem *dmabase = 0;
+int cmdma_init(void)
+{
+ dmabase = ioremap_nocache(U8500_DMA_BASE, PAGE_SIZE);
+ if (dmabase == NULL)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+void cmdma_destroy(void)
+{
+ iounmap(dmabase);
+}
+
+#define SSLNK_CHAN_2 (0x40C + 0x20 * 2)
+#define SDLNK_CHAN_2 (0x41C + 0x20 * 2)
+
+void cmdma_stop_dma(void)
+{
+ if(cmdma_setup_relink_area_called) {
+ cmdma_setup_relink_area_called = false;
+ if (readl(dmabase + SSLNK_CHAN_2) & (0x3 << 28)) {
+ printk(KERN_ERR "CM: ERROR - RX DMA was running\n");
+ }
+ if (readl(dmabase + SDLNK_CHAN_2) & (0x3 << 28)) {
+ printk(KERN_ERR "CM: ERROR - TX DMA was running\n");
+ }
+
+ writel(~(1 << 28), dmabase + SSLNK_CHAN_2);
+ while (readl(dmabase + SSLNK_CHAN_2) & (0x3 << 28));
+
+ writel(~(1 << 28), dmabase + SDLNK_CHAN_2);
+ while (readl(dmabase + SDLNK_CHAN_2) & (0x3 << 28));
+ }
+}
diff --git a/drivers/staging/nmf-cm/cm_dma.h b/drivers/staging/nmf-cm/cm_dma.h
new file mode 100644
index 00000000000..4fccef03830
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_dma.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+
+#ifndef __CMDMA_H
+#define __CMDMA_H
+#include "cmioctl.h"
+
+int cmdma_setup_relink_area(
+ unsigned int mem_addr,
+ unsigned int per_addr,
+ unsigned int segments,
+ unsigned int segmentsize,
+ unsigned int LOS,
+ enum cmdma_type type);
+
+void cmdma_stop_dma(void);
+int cmdma_init(void);
+void cmdma_destroy(void);
+#endif
diff --git a/drivers/staging/nmf-cm/cm_service.c b/drivers/staging/nmf-cm/cm_service.c
new file mode 100644
index 00000000000..a2a6ffa5b57
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_service.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/** \file cm_service.c
+ *
+ * Nomadik Multiprocessing Framework Linux Driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/plist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+#include <cm/engine/api/control/irq_engine.h>
+
+#include "osal-kernel.h"
+#include "cmld.h"
+#include "cm_service.h"
+#include "cm_dma.h"
+
+/* Panic managment */
+static void service_tasklet_func(unsigned long);
+unsigned long service_tasklet_data = 0;
+DECLARE_TASKLET(cmld_service_tasklet, service_tasklet_func, 0);
+
+void dispatch_service_msg(struct osal_msg *msg)
+{
+ struct list_head *head, *next;
+#ifdef CONFIG_DEBUG_FS
+ bool dump_flag_to_set = true;
+#endif
+ /*
+ * Note: no lock needed to protect the channel_list against list
+ * changes, as the current tasklet is disabled each time we modify
+ * the list
+ */
+ list_for_each_safe(head, next, &channel_list) {
+ struct cm_channel_priv *channelPriv = list_entry(head, struct cm_channel_priv, entry);
+ struct osal_msg *new_msg;
+ size_t msg_size;
+
+ if (channelPriv->state == CHANNEL_CLOSED)
+ continue;
+ msg_size = sizeof(new_msg->hdr) + sizeof(new_msg->d.srv);
+ new_msg = kmalloc(msg_size, GFP_ATOMIC);
+ if (new_msg == NULL) {
+ pr_err("[CM] %s: can't allocate memory, service"
+ " message not dispatched !!\n", __func__);
+ continue;
+ }
+ memcpy(new_msg, msg, msg_size);
+ plist_node_init(&new_msg->msg_entry, 0);
+#ifdef CONFIG_DEBUG_FS
+ if (cmld_user_has_debugfs && dump_flag_to_set
+ && (new_msg->d.srv.srvType == NMF_SERVICE_PANIC)) {
+ /*
+ * The reciever of this message will do the DSP
+ * memory dump
+ */
+ new_msg->d.srv.srvData.panic.panicSource
+ |= DEBUGFS_DUMP_FLAG;
+ dump_flag_to_set = false;
+ cmld_dump_ongoing = true;
+ }
+#endif
+ spin_lock_bh(&channelPriv->bh_lock);
+ plist_add(&new_msg->msg_entry, &channelPriv->messageQueue);
+ spin_unlock_bh(&channelPriv->bh_lock);
+ wake_up(&channelPriv->waitq);
+ }
+}
+
+static void service_tasklet_func(unsigned long unused)
+{
+ t_cm_service_type type;
+ t_cm_service_description desc;
+ int i=0;
+
+ do {
+ if (test_and_clear_bit(i, &service_tasklet_data)) {
+ CM_getServiceDescription(osalEnv.mpc[i].coreId, &type, &desc);
+
+ switch (type) {
+ case CM_MPC_SERVICE_PANIC: {
+ struct osal_msg msg;
+
+ msg.msg_type = MSG_SERVICE;
+ msg.d.srv.srvType = NMF_SERVICE_PANIC;
+ msg.d.srv.srvData.panic = desc.u.panic;
+
+ dispatch_service_msg(&msg);
+ /*
+ * Stop DMA directly before shutdown, to avoid
+ * bad sound. Should be called after DSP has
+ * stopped executing, to avoid the DSP
+ * re-starting DMA
+ */
+ if (osalEnv.mpc[i].coreId == SIA_CORE_ID)
+ cmdma_stop_dma();
+ break;
+ }
+ case CM_MPC_SERVICE_PRINT: {
+ char msg[256];
+ if (CM_ReadMPCString(osalEnv.mpc[i].coreId,
+ desc.u.print.dspAddress, msg,
+ sizeof(msg)) == CM_OK)
+ printk(msg, desc.u.print.value1,
+ desc.u.print.value2);
+ break;
+ }
+ case CM_MPC_SERVICE_TRACE:
+ spin_lock_bh(&osalEnv.mpc[i].trace_reader_lock);
+ if (osalEnv.mpc[i].trace_reader)
+ wake_up_process(osalEnv.mpc[i].trace_reader);
+ spin_unlock_bh(&osalEnv.mpc[i].trace_reader_lock);
+ break;
+ default:
+ pr_err("[CM] %s: MPC Service Type %d not supported\n", __func__, type);
+ }
+ enable_irq(osalEnv.mpc[i].interrupt1);
+ }
+ i = (i+1) % NB_MPC;
+ } while (service_tasklet_data != 0);
+}
diff --git a/drivers/staging/nmf-cm/cm_service.h b/drivers/staging/nmf-cm/cm_service.h
new file mode 100644
index 00000000000..39582eae573
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_service.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/** \file cm_service.c
+ *
+ * Nomadik Multiprocessing Framework Linux Driver
+ *
+ */
+
+#ifndef CM_SERVICE_H
+#define CM_SERVICE_H
+
+#include <linux/interrupt.h>
+
+extern unsigned long service_tasklet_data;
+extern struct tasklet_struct cmld_service_tasklet;
+void dispatch_service_msg(struct osal_msg *msg);
+
+#endif
diff --git a/drivers/staging/nmf-cm/cm_syscall.c b/drivers/staging/nmf-cm/cm_syscall.c
new file mode 100644
index 00000000000..ca8d664abb4
--- /dev/null
+++ b/drivers/staging/nmf-cm/cm_syscall.c
@@ -0,0 +1,1413 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <cm/engine/api/cm_engine.h>
+#include "cmioctl.h"
+#include "osal-kernel.h"
+#include "cmld.h"
+#include "cm_dma.h"
+
+/** Dequeue and free per-process messages for specific binding
+ *
+ * \note
+ * This is only safe if the per process mutex is held when called.
+ */
+static inline void freeMessages(struct cm_channel_priv* cPriv, t_skelwrapper* binding)
+{
+ struct osal_msg *this, *next;
+
+ spin_lock_bh(&cPriv->bh_lock);
+
+ /* free any pending messages */
+ plist_for_each_entry_safe(this, next, &cPriv->messageQueue, msg_entry) {
+ if (this->msg_type == MSG_INTERFACE
+ && this->d.itf.skelwrap == binding) {
+ plist_del(&this->msg_entry, &cPriv->messageQueue);
+ kfree(this);
+ }
+ }
+ spin_unlock_bh(&cPriv->bh_lock);
+}
+
+static t_cm_error copy_string_from_user(char *dst, const char __user *src, int len)
+{
+ int ret;
+
+ ret = strncpy_from_user(dst, src, len);
+ if (ret < 0) /* -EFAULT */
+ return CM_INVALID_PARAMETER;
+
+ if (ret >= len)
+ return CM_OUT_OF_LIMITS;
+
+ return 0;
+}
+
+inline int cmld_InstantiateComponent(struct cm_process_priv* procPriv,
+ CM_InstantiateComponent_t __user *param)
+{
+ CM_InstantiateComponent_t data;
+ char templateName[MAX_TEMPLATE_NAME_LENGTH];
+ char localName[MAX_COMPONENT_NAME_LENGTH];
+ char *dataFile = NULL;
+
+ /* Copy all user data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if (data.in.dataFile != NULL) {
+ dataFile = vmalloc(data.in.dataFileSize);
+ if (dataFile == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFile, data.in.dataFile, data.in.dataFileSize)) {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+ }
+
+ if ((data.out.error = copy_string_from_user(templateName,
+ data.in.templateName,
+ sizeof(templateName))))
+ goto out;
+
+ if ((data.in.localName != NULL) &&
+ (data.out.error = copy_string_from_user(localName,
+ data.in.localName,
+ sizeof(localName))))
+ goto out;
+
+ /* Do appropriate CM Engine call */
+ data.out.error = CM_ENGINE_InstantiateComponent(templateName,
+ data.in.domainId,
+ procPriv->pid,
+ data.in.priority,
+ data.in.localName ? localName : NULL,
+ dataFile,
+ &data.out.component);
+
+out:
+ if (dataFile)
+ vfree(dataFile);
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_BindComponentFromCMCore(struct cm_process_priv* procPriv,
+ CM_BindComponentFromCMCore_t __user *param)
+{
+ CM_BindComponentFromCMCore_t data;
+ char providedItfServerName[MAX_INTERFACE_NAME_LENGTH];
+ char *dataFileSkeleton = NULL;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(providedItfServerName,
+ data.in.providedItfServerName,
+ sizeof(providedItfServerName))))
+ goto out;
+
+ if (data.in.dataFileSkeleton != NULL) {
+ dataFileSkeleton = OSAL_Alloc(data.in.dataFileSkeletonSize);
+ if (dataFileSkeleton == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFileSkeleton, data.in.dataFileSkeleton,
+ data.in.dataFileSkeletonSize)) {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+ }
+
+ data.out.error = CM_ENGINE_BindComponentFromCMCore(data.in.server,
+ providedItfServerName,
+ data.in.fifosize,
+ data.in.eventMemType,
+ &data.out.host2mpcId,
+ procPriv->pid,
+ dataFileSkeleton);
+out:
+ if (dataFileSkeleton)
+ OSAL_Free(dataFileSkeleton);
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+
+ return 0;
+}
+
+inline int cmld_UnbindComponentFromCMCore(CM_UnbindComponentFromCMCore_t __user *param)
+{
+ CM_UnbindComponentFromCMCore_t data;
+
+ /* Copy all user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_UnbindComponentFromCMCore(data.in.host2mpcId);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_BindComponentToCMCore(struct cm_channel_priv* channelPriv,
+ CM_BindComponentToCMCore_t __user *param)
+{
+ CM_BindComponentToCMCore_t data;
+ t_skelwrapper *skelwrapper;
+ struct cm_process_priv *procPriv = channelPriv->proc;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+ char *dataFileStub = NULL;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ /* Do appropriate CM Engine call */
+ skelwrapper = (t_skelwrapper *)OSAL_Alloc(sizeof(*skelwrapper));
+ if (skelwrapper == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+
+ if (data.in.dataFileStub != NULL) {
+ dataFileStub = OSAL_Alloc(data.in.dataFileStubSize);
+ if (dataFileStub == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFileStub, data.in.dataFileStub, data.in.dataFileStubSize)) {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+ }
+
+ if ((data.out.error = CM_ENGINE_BindComponentToCMCore(
+ data.in.client,
+ requiredItfClientName,
+ data.in.fifosize,
+ (t_nmf_mpc2host_handle)skelwrapper,
+ dataFileStub,
+ &data.out.mpc2hostId,
+ procPriv->pid)) != CM_OK) {
+ OSAL_Free(skelwrapper);
+ goto out;
+ }
+
+ skelwrapper->upperLayerThis = data.in.upLayerThis;
+ skelwrapper->mpc2hostId = data.out.mpc2hostId;
+ skelwrapper->channelPriv = channelPriv;
+ mutex_lock(&channelPriv->skelListLock);
+ list_add(&skelwrapper->entry, &channelPriv->skelList);
+ mutex_unlock(&channelPriv->skelListLock);
+out:
+ if (dataFileStub != NULL)
+ OSAL_Free(dataFileStub);
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_UnbindComponentToCMCore(struct cm_process_priv* procPriv,
+ CM_UnbindComponentToCMCore_t __user *param)
+{
+ CM_UnbindComponentToCMCore_t data;
+ t_skelwrapper *skelwrapper;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ data.out.error = CM_ENGINE_UnbindComponentToCMCore(
+ data.in.client, requiredItfClientName,
+ (t_nmf_mpc2host_handle*)&skelwrapper,
+ procPriv->pid);
+
+ if (data.out.error != CM_OK && data.out.error != CM_MPC_NOT_RESPONDING)
+ goto out;
+
+ data.out.upLayerThis = skelwrapper->upperLayerThis;
+
+ mutex_lock(&skelwrapper->channelPriv->msgQueueLock);
+ freeMessages(skelwrapper->channelPriv, skelwrapper);
+ mutex_lock(&skelwrapper->channelPriv->skelListLock);
+ list_del(&skelwrapper->entry);
+ mutex_unlock(&skelwrapper->channelPriv->skelListLock);
+ mutex_unlock(&skelwrapper->channelPriv->msgQueueLock);
+ OSAL_Free(skelwrapper);
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_BindComponentAsynchronous(struct cm_process_priv* procPriv,
+ CM_BindComponentAsynchronous_t __user *param)
+{
+ CM_BindComponentAsynchronous_t data;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+ char providedItfServerName[MAX_INTERFACE_NAME_LENGTH];
+ char *dataFileSkeletonOrEvent = NULL;
+ char *dataFileStub = NULL;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ if ((data.out.error = copy_string_from_user(providedItfServerName,
+ data.in.providedItfServerName,
+ sizeof(providedItfServerName))))
+ goto out;
+
+ if (data.in.dataFileSkeletonOrEvent != NULL) {
+ dataFileSkeletonOrEvent =
+ OSAL_Alloc(data.in.dataFileSkeletonOrEventSize);
+ if (dataFileSkeletonOrEvent == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFileSkeletonOrEvent, data.in.dataFileSkeletonOrEvent, data.in.dataFileSkeletonOrEventSize)) {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+ }
+
+ if (data.in.dataFileStub != NULL) {
+ dataFileStub = OSAL_Alloc(data.in.dataFileStubSize);
+ if (dataFileStub == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFileStub, data.in.dataFileStub, data.in.dataFileStubSize)) {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+ }
+
+ /* Do appropriate CM Engine call */
+ data.out.error = CM_ENGINE_BindComponentAsynchronous(data.in.client,
+ requiredItfClientName,
+ data.in.server,
+ providedItfServerName,
+ data.in.fifosize,
+ data.in.eventMemType,
+ procPriv->pid,
+ dataFileSkeletonOrEvent,
+ dataFileStub);
+
+out:
+ if (dataFileSkeletonOrEvent != NULL)
+ OSAL_Free(dataFileSkeletonOrEvent);
+ if (dataFileStub != NULL)
+ OSAL_Free(dataFileStub);
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_UnbindComponentAsynchronous(struct cm_process_priv* procPriv,
+ CM_UnbindComponentAsynchronous_t __user *param)
+{
+ CM_UnbindComponentAsynchronous_t data;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+
+ /* Copy all user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ /* Do appropriate CM Engine call */
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_UnbindComponentAsynchronous(data.in.client,
+ requiredItfClientName,
+ procPriv->pid);
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_BindComponent(struct cm_process_priv* procPriv,
+ CM_BindComponent_t __user *param)
+{
+ CM_BindComponent_t data;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+ char providedItfServerName[MAX_INTERFACE_NAME_LENGTH];
+ char *dataFileTrace = NULL;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ if ((data.out.error = copy_string_from_user(providedItfServerName,
+ data.in.providedItfServerName,
+ sizeof(providedItfServerName))))
+ goto out;
+
+ if (data.in.dataFileTrace != NULL) {
+ dataFileTrace = OSAL_Alloc(data.in.dataFileTraceSize);
+ if (dataFileTrace == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFileTrace, data.in.dataFileTrace,
+ data.in.dataFileTraceSize)) {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+ }
+
+ /* Do appropriate CM Engine call */
+ data.out.error = CM_ENGINE_BindComponent(data.in.client,
+ requiredItfClientName,
+ data.in.server,
+ providedItfServerName,
+ data.in.traced,
+ procPriv->pid,
+ dataFileTrace);
+out:
+ if (dataFileTrace != NULL)
+ OSAL_Free(dataFileTrace);
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_UnbindComponent(struct cm_process_priv* procPriv,
+ CM_UnbindComponent_t __user *param)
+{
+ CM_UnbindComponent_t data;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ /* Do appropriate CM Engine call */
+ data.out.error = CM_ENGINE_UnbindComponent(data.in.client,
+ requiredItfClientName,
+ procPriv->pid);
+
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_BindComponentToVoid(struct cm_process_priv* procPriv,
+ CM_BindComponentToVoid_t __user *param)
+{
+ CM_BindComponentToVoid_t data;
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ data.out.error = CM_ENGINE_BindComponentToVoid(data.in.client,
+ requiredItfClientName,
+ procPriv->pid);
+
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_DestroyComponent(struct cm_process_priv* procPriv,
+ CM_DestroyComponent_t __user *param)
+{
+ CM_DestroyComponent_t data;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_DestroyComponent(data.in.component,
+ procPriv->pid);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_CreateMemoryDomain(struct cm_process_priv *procPriv,
+ CM_CreateMemoryDomain_t __user *param)
+{
+ CM_CreateMemoryDomain_t data;
+ t_cm_domain_memory domain;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if (copy_from_user(&domain, data.in.domain, sizeof(domain)))
+ return -EFAULT;
+
+ if (data.in.client == NMF_CURRENT_CLIENT)
+ data.out.error = CM_ENGINE_CreateMemoryDomain(procPriv->pid,
+ &domain,
+ &data.out.handle);
+ else {
+ /* Check if client is valid (ie already registered) */
+ struct list_head* head;
+ struct cm_process_priv *entry;
+
+ list_for_each(head, &process_list) {
+ entry = list_entry(head, struct cm_process_priv,
+ entry);
+ if (entry->pid == data.in.client)
+ break;
+ }
+ if (head == &process_list)
+ data.out.error = CM_INVALID_PARAMETER;
+ else
+ data.out.error =
+ CM_ENGINE_CreateMemoryDomain(data.in.client,
+ &domain,
+ &data.out.handle);
+ }
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_CreateMemoryDomainScratch(struct cm_process_priv *procPriv,
+ CM_CreateMemoryDomainScratch_t __user *param)
+{
+ CM_CreateMemoryDomainScratch_t data;
+ t_cm_domain_memory domain;
+
+ /* Copy all user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if (copy_from_user(&domain, data.in.domain, sizeof(domain)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_CreateMemoryDomainScratch(procPriv->pid,
+ data.in.parentId,
+ &domain,
+ &data.out.handle);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_DestroyMemoryDomain(CM_DestroyMemoryDomain_t __user *param)
+{
+ CM_DestroyMemoryDomain_t data;
+
+ /* Copy all user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_DestroyMemoryDomain(data.in.domainId);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetDomainCoreId(CM_GetDomainCoreId_t __user *param)
+{
+ CM_GetDomainCoreId_t data;
+
+ /* Copy all user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_GetDomainCoreId(data.in.domainId,
+ &data.out.coreId);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_AllocMpcMemory(struct cm_process_priv *procPriv,
+ CM_AllocMpcMemory_t __user *param)
+{
+ t_cm_error err;
+ CM_AllocMpcMemory_t data;
+ t_cm_memory_handle handle = 0;
+ struct memAreaDesc_t* memAreaDesc;
+ t_cm_system_address systemAddress;
+ t_uint32 mpcAddress;
+
+ /* Copy all user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* Disregard alignment information and force 4kB memory alignment,
+ in any case (see devnotes.txt) */
+ /* PP: Disable this 'force' for now, because of the low amount of
+ available MPC Memory */
+ //data.in.memAlignment = CM_MM_MPC_ALIGN_1024WORDS;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_AllocMpcMemory(data.in.domainId,
+ procPriv->pid,
+ data.in.memType,
+ data.in.size,
+ data.in.memAlignment,
+ &handle);
+
+ data.out.pHandle = handle;
+
+ if (data.out.error != CM_OK)
+ goto out;
+
+ /* Get memory area decriptors in advance
+ so to fill in list elements right now */
+ err = CM_ENGINE_GetMpcMemorySystemAddress(handle, &systemAddress);
+ if (err != CM_OK) {
+ pr_err("%s: failed CM_ENGINE_GetMpcMemorySystemAddress (%i)\n", __func__, err);
+ /* If we can't manage internally this allocated memory latter, it's
+ better to report the error now.
+ Free the handle to not let the driver in an inconsistent state */
+ CM_ENGINE_FreeMpcMemory(handle);
+ return -EFAULT;
+ }
+
+ /* Get MPC address in advance so to fill in list elements right now */
+ err = CM_ENGINE_GetMpcMemoryMpcAddress(handle, &mpcAddress);
+ if (err != CM_OK) {
+ pr_err("%s: failed CM_ENGINE_GetMpcMemoryMpcAddress (%i)\n", __func__, err);
+ /* see comments above */
+ CM_ENGINE_FreeMpcMemory(handle);
+ return -EFAULT;
+ }
+
+ /* Allocate and fill a new memory area descriptor. Add it to the list */
+ memAreaDesc = OSAL_Alloc(sizeof(struct memAreaDesc_t));
+ if (memAreaDesc == NULL) {
+ pr_err("%s: failed allocating memAreaDesc\n", __func__);
+ /* see comments above */
+ CM_ENGINE_FreeMpcMemory(handle);
+ return -ENOMEM;
+ }
+
+ memAreaDesc->procPriv = procPriv;
+ memAreaDesc->handle = handle;
+ memAreaDesc->tid = 0;
+ memAreaDesc->physAddr = systemAddress.physical;
+ memAreaDesc->kernelLogicalAddr = systemAddress.logical;
+ memAreaDesc->userLogicalAddr = 0;
+ memAreaDesc->mpcPhysAddr = mpcAddress;
+ memAreaDesc->size = data.in.size * ((data.in.memType % 2) ? 4 : 2); // betzw: set size in bytes for host (ugly version)
+ atomic_set(&memAreaDesc->count, 0);
+
+ if (lock_process(procPriv)) {
+ /* may be rather call lock_process_uninterruptible() */
+ CM_ENGINE_FreeMpcMemory(handle);
+ OSAL_Free(memAreaDesc);
+ return -ERESTARTSYS;
+ }
+ list_add(&memAreaDesc->list, &procPriv->memAreaDescList);
+ unlock_process(procPriv);
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_FreeMpcMemory(struct cm_process_priv *procPriv,
+ CM_FreeMpcMemory_t __user *param)
+{
+ CM_FreeMpcMemory_t data;
+ struct list_head *cursor, *next;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* check that it is actually owned by the process */
+ data.out.error = CM_UNKNOWN_MEMORY_HANDLE;
+
+ if (lock_process(procPriv))
+ return -ERESTARTSYS;
+ list_for_each_safe(cursor, next, &procPriv->memAreaDescList){
+ struct memAreaDesc_t* curr;
+ curr = list_entry(cursor, struct memAreaDesc_t, list);
+ if (curr->handle == data.in.handle){
+ if (atomic_read(&curr->count) != 0) {
+ pr_err("%s: Memory area (phyAddr: %x, size: %d) "
+ "still in use (count=%d)!\n", __func__,
+ curr->physAddr, curr->size,
+ atomic_read(&curr->count));
+ data.out.error = CM_INVALID_PARAMETER;
+ } else {
+ data.out.error =
+ CM_ENGINE_FreeMpcMemory(data.in.handle);
+ if (data.out.error == CM_OK) {
+ list_del(cursor);
+ OSAL_Free(curr);
+ }
+ }
+ break;
+ }
+ }
+ unlock_process(procPriv);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetMpcMemoryStatus(CM_GetMpcMemoryStatus_t __user *param)
+{
+ CM_GetMpcMemoryStatus_t data;
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_GetMpcMemoryStatus(data.in.coreId,
+ data.in.memType,
+ &data.out.pStatus);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_StartComponent(struct cm_process_priv *procPriv,
+ CM_StartComponent_t __user *param)
+{
+ CM_StartComponent_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_StartComponent(data.in.client,
+ procPriv->pid);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_StopComponent(struct cm_process_priv *procPriv,
+ CM_StopComponent_t __user *param)
+{
+ CM_StopComponent_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_StopComponent(data.in.client,
+ procPriv->pid);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetMpcLoadCounter(CM_GetMpcLoadCounter_t __user *param)
+{
+ CM_GetMpcLoadCounter_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_getMpcLoadCounter(data.in.coreId,
+ &data.out.pMpcLoadCounter);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentDescription(struct cm_process_priv *procPriv,
+ CM_GetComponentDescription_t __user *param)
+{
+ CM_GetComponentDescription_t data;
+ char templateName[MAX_TEMPLATE_NAME_LENGTH];
+ char localName[MAX_COMPONENT_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentDescription(data.in.component,
+ templateName,
+ data.in.templateNameLength,
+ &data.out.coreId,
+ localName,
+ data.in.localNameLength,
+ &data.out.priority);
+
+ /* Copy results back to userspace */
+ if (data.out.error == CM_OK) {
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.templateName, templateName, data.in.templateNameLength))
+ return -EFAULT;
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.localName, localName, data.in.localNameLength))
+ return -EFAULT;
+ }
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentListHeader(struct cm_process_priv *procPriv,
+ CM_GetComponentListHeader_t __user *param)
+{
+ CM_GetComponentListHeader_t data;
+
+ data.out.error = CM_ENGINE_GetComponentListHeader(procPriv->pid,
+ &data.out.headerComponent);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentListNext(struct cm_process_priv *procPriv,
+ CM_GetComponentListNext_t __user *param)
+{
+ CM_GetComponentListNext_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentListNext(procPriv->pid,
+ data.in.prevComponent,
+ &data.out.nextComponent);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentRequiredInterfaceNumber(struct cm_process_priv *procPriv,
+ CM_GetComponentRequiredInterfaceNumber_t __user *param)
+{
+ CM_GetComponentRequiredInterfaceNumber_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentRequiredInterfaceNumber(data.in.component,
+ &data.out.numberRequiredInterfaces);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentRequiredInterface(struct cm_process_priv *procPriv,
+ CM_GetComponentRequiredInterface_t __user *param)
+{
+ CM_GetComponentRequiredInterface_t data;
+ char itfName[MAX_INTERFACE_NAME_LENGTH];
+ char itfType[MAX_INTERFACE_TYPE_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentRequiredInterface(data.in.component,
+ data.in.index,
+ itfName,
+ data.in.itfNameLength,
+ itfType,
+ data.in.itfTypeLength,
+ &data.out.requireState,
+ &data.out.collectionSize);
+
+ /* Copy results back to userspace */
+ if (data.out.error == CM_OK) {
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.itfName, itfName, data.in.itfNameLength))
+ return -EFAULT;
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.itfType, itfType, data.in.itfTypeLength))
+ return -EFAULT;
+ }
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentRequiredInterfaceBinding(struct cm_process_priv *procPriv,
+ CM_GetComponentRequiredInterfaceBinding_t __user *param)
+{
+ CM_GetComponentRequiredInterfaceBinding_t data;
+ char itfName[MAX_INTERFACE_NAME_LENGTH];
+ char serverItfName[MAX_INTERFACE_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+ if ((data.out.error = copy_string_from_user(itfName,
+ data.in.itfName,
+ sizeof(itfName))))
+ goto out;
+
+ data.out.error = CM_ENGINE_GetComponentRequiredInterfaceBinding(data.in.component,
+ itfName,
+ &data.out.server,
+ serverItfName,
+ data.in.serverItfNameLength);
+
+ /* Copy results back to userspace */
+ if (data.out.error != CM_OK)
+ goto out;
+
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.serverItfName, serverItfName, data.in.serverItfNameLength))
+ return -EFAULT;
+out:
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentProvidedInterfaceNumber(struct cm_process_priv *procPriv,
+ CM_GetComponentProvidedInterfaceNumber_t __user *param)
+{
+ CM_GetComponentProvidedInterfaceNumber_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentProvidedInterfaceNumber(data.in.component,
+ &data.out.numberProvidedInterfaces);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentProvidedInterface(struct cm_process_priv *procPriv,
+ CM_GetComponentProvidedInterface_t __user *param)
+{
+ CM_GetComponentProvidedInterface_t data;
+ char itfName[MAX_INTERFACE_NAME_LENGTH];
+ char itfType[MAX_INTERFACE_TYPE_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentProvidedInterface(data.in.component,
+ data.in.index,
+ itfName,
+ data.in.itfNameLength,
+ itfType,
+ data.in.itfTypeLength,
+ &data.out.collectionSize);
+
+ /* Copy results back to userspace */
+ if (data.out.error == CM_OK) {
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.itfName, itfName, data.in.itfNameLength))
+ return -EFAULT;
+ /* coverity[tainted_data : FALSE] */
+ if (copy_to_user(data.in.itfType, itfType, data.in.itfTypeLength))
+ return -EFAULT;
+ }
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentPropertyNumber(struct cm_process_priv *procPriv,
+ CM_GetComponentPropertyNumber_t __user *param)
+{
+ CM_GetComponentPropertyNumber_t data;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentPropertyNumber(data.in.component,
+ &data.out.numberProperties);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentPropertyName(struct cm_process_priv *procPriv,
+ CM_GetComponentPropertyName_t __user *param)
+{
+ CM_GetComponentPropertyName_t data;
+ char propertyName[MAX_PROPERTY_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_GetComponentPropertyName(data.in.component,
+ data.in.index,
+ propertyName,
+ data.in.propertyNameLength);
+
+ /* Copy results back to userspace */
+ /* coverity[tainted_data : FALSE] */
+ if ((data.out.error == CM_OK) &&
+ copy_to_user(data.in.propertyName, propertyName, data.in.propertyNameLength))
+ return -EFAULT;
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetComponentPropertyValue(struct cm_process_priv *procPriv,
+ CM_GetComponentPropertyValue_t __user *param)
+{
+ CM_GetComponentPropertyValue_t data;
+ char propertyName[MAX_PROPERTY_NAME_LENGTH];
+ char propertyValue[MAX_PROPERTY_VALUE_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(propertyName,
+ data.in.propertyName,
+ sizeof(propertyName))))
+ goto out;
+
+ data.out.error = CM_ENGINE_GetComponentPropertyValue(data.in.component,
+ propertyName,
+ propertyValue,
+ data.in.propertyValueLength);
+ /* Copy results back to userspace */
+ /* coverity[tainted_data : FALSE] */
+ if ((data.out.error == CM_OK) &&
+ copy_to_user(data.in.propertyValue, propertyValue, data.in.propertyValueLength))
+ return -EFAULT;
+out:
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_ReadComponentAttribute(struct cm_process_priv *procPriv,
+ CM_ReadComponentAttribute_t __user *param)
+{
+ CM_ReadComponentAttribute_t data;
+ char attrName[MAX_ATTRIBUTE_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(attrName,
+ data.in.attrName,
+ sizeof(attrName))))
+ goto out;
+
+ data.out.error = CM_ENGINE_ReadComponentAttribute(data.in.component,
+ attrName,
+ &data.out.value);
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetExecutiveEngineHandle(struct cm_process_priv *procPriv,
+ CM_GetExecutiveEngineHandle_t __user *param)
+{
+ CM_GetExecutiveEngineHandle_t data;
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_GetExecutiveEngineHandle(data.in.domainId,
+ &data.out.executiveEngineHandle);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_SetMode(CM_SetMode_t __user *param)
+{
+ CM_SetMode_t data;
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_SetMode(data.in.aCmdID, data.in.aParam);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_GetRequiredComponentFiles(struct cm_process_priv *procPriv,
+ CM_GetRequiredComponentFiles_t __user *param)
+{
+ CM_GetRequiredComponentFiles_t data;
+ char components[4][MAX_INTERFACE_TYPE_NAME_LENGTH];
+ char requiredItfClientName[MAX_INTERFACE_NAME_LENGTH];
+ char providedItfServerName[MAX_INTERFACE_NAME_LENGTH];
+ char type[MAX_INTERFACE_TYPE_NAME_LENGTH];
+ unsigned int i;
+ int err;
+
+ /* Copy user input data in kernel space */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if (data.in.requiredItfClientName &&
+ (data.out.error = copy_string_from_user(requiredItfClientName,
+ data.in.requiredItfClientName,
+ sizeof(requiredItfClientName))))
+ goto out;
+
+ if (data.in.providedItfServerName &&
+ (data.out.error = copy_string_from_user(providedItfServerName,
+ data.in.providedItfServerName,
+ sizeof(providedItfServerName))))
+ goto out;
+
+ data.out.error = CM_ENGINE_GetRequiredComponentFiles(data.in.action,
+ data.in.client,
+ requiredItfClientName,
+ data.in.server,
+ providedItfServerName,
+ components,
+ data.in.listSize,
+ data.in.type ? type : NULL,
+ &data.out.methodNumber);
+
+ if (data.out.error)
+ goto out;
+
+ if (data.in.fileList) {
+ /* Copy results back to userspace */
+ for (i=0; i<data.in.listSize; i++) {
+ err = copy_to_user(&((char*)data.in.fileList)[i*MAX_INTERFACE_TYPE_NAME_LENGTH], components[i], MAX_INTERFACE_TYPE_NAME_LENGTH);
+ if (err)
+ return -EFAULT;
+ }
+ }
+ if (data.in.type
+ && copy_to_user(data.in.type, type, MAX_INTERFACE_TYPE_NAME_LENGTH))
+ return -EFAULT;
+out:
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_Migrate(CM_Migrate_t __user *param)
+{
+ CM_Migrate_t data;
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ data.out.error = CM_ENGINE_Migrate(data.in.srcShared, data.in.src, data.in.dst);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_Unmigrate(CM_Unmigrate_t __user *param)
+{
+ CM_Unmigrate_t data;
+
+ data.out.error = CM_ENGINE_Unmigrate();
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+int cmld_SetupRelinkArea(struct cm_process_priv *procPriv,
+ CM_SetupRelinkArea_t __user *param)
+{
+ CM_SetupRelinkArea_t data;
+ struct list_head *cursor, *next;
+ struct memAreaDesc_t *entry = NULL;
+
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+
+ /* check that it is actually owned by the process */
+ data.out.error = CM_UNKNOWN_MEMORY_HANDLE;
+
+ if (lock_process(procPriv))
+ return -ERESTARTSYS;
+ list_for_each_safe(cursor, next, &procPriv->memAreaDescList){
+ entry = list_entry(cursor, struct memAreaDesc_t, list);
+ if (entry->handle == data.in.mem_handle)
+ break;
+ }
+ unlock_process(procPriv);
+
+ if ((entry == NULL) || (entry->handle != data.in.mem_handle))
+ goto out;
+
+ if (entry->size < data.in.segments * data.in.segmentsize)
+ {
+ data.out.error = CM_INVALID_PARAMETER;
+ goto out;
+ }
+
+ data.out.error = cmdma_setup_relink_area(
+ entry->physAddr,
+ data.in.peripheral_addr,
+ data.in.segments,
+ data.in.segmentsize,
+ data.in.LOS,
+ data.in.type);
+out:
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+inline int cmld_PushComponent(CM_PushComponent_t __user *param)
+{
+ CM_PushComponent_t data;
+ char name[MAX_INTERFACE_TYPE_NAME_LENGTH];
+ void *dataFile = NULL;
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(name,
+ data.in.name,
+ sizeof(name))))
+ goto out;
+
+ if (data.in.data != NULL) {
+ dataFile = OSAL_Alloc(data.in.size);
+ if (dataFile == NULL) {
+ data.out.error = CM_NO_MORE_MEMORY;
+ goto out;
+ }
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(dataFile, data.in.data, data.in.size))
+ data.out.error = CM_INVALID_PARAMETER;
+ else
+ data.out.error = CM_ENGINE_PushComponent(name, dataFile,
+ data.in.size);
+ OSAL_Free(dataFile);
+ }
+
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_ReleaseComponent(CM_ReleaseComponent_t __user *param)
+{
+ CM_ReleaseComponent_t data;
+ char name[MAX_INTERFACE_TYPE_NAME_LENGTH];
+
+ /* Copy user input data in kernel space */
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if ((data.out.error = copy_string_from_user(name,
+ data.in.name,
+ sizeof(name))))
+ goto out;
+
+ /* coverity[tainted_data : FALSE] */
+ data.out.error = CM_ENGINE_ReleaseComponent(name);
+
+out:
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+inline int cmld_PrivGetMPCMemoryDesc(struct cm_process_priv *procPriv, CM_PrivGetMPCMemoryDesc_t __user *param)
+{
+ CM_PrivGetMPCMemoryDesc_t data;
+ struct list_head* cursor;
+
+ if (copy_from_user(&data.in, &param->in, sizeof(data.in)))
+ return -EFAULT;
+
+ if (lock_process(procPriv))
+ return -ERESTARTSYS;
+ /* Scan the memory descriptors list looking for the requested handle */
+ data.out.error = CM_UNKNOWN_MEMORY_HANDLE;
+ list_for_each(cursor, &procPriv->memAreaDescList) {
+ struct memAreaDesc_t* curr;
+ curr = list_entry(cursor, struct memAreaDesc_t, list);
+ if (curr->handle == data.in.handle) {
+ data.out.size = curr->size;
+ data.out.physAddr = curr->physAddr;
+ data.out.kernelLogicalAddr = curr->kernelLogicalAddr;
+ data.out.userLogicalAddr = curr->userLogicalAddr;
+ data.out.mpcPhysAddr = curr->mpcPhysAddr;
+ data.out.error = CM_OK;
+ break;
+ }
+ }
+ unlock_process(procPriv);
+
+ /* Copy results back to userspace */
+ if (copy_to_user(&param->out, &data.out, sizeof(data.out)))
+ return -EFAULT;
+ return 0;
+}
+
+int cmld_PrivReserveMemory(struct cm_process_priv *procPriv, unsigned int physAddr)
+{
+ struct list_head* cursor;
+ struct memAreaDesc_t* curr;
+ int err = -ENXIO;
+
+ if (lock_process(procPriv))
+ return -ERESTARTSYS;
+ list_for_each(cursor, &procPriv->memAreaDescList) {
+ curr = list_entry(cursor, struct memAreaDesc_t, list);
+ if (curr->physAddr == physAddr) {
+ /* Mark this memory area reserved for a mapping for this thread ID */
+ /* It must not be already reserved but this should not happen */
+ if (curr->tid) {
+ pr_err("%s: thread %d can't reseveved memory %x already "
+ "reserved for %d\n",
+ __func__, current->pid, physAddr, curr->tid);
+ err = -EBUSY;
+ } else {
+ curr->tid = current->pid;
+ err = 0;
+ }
+ break;
+ }
+ }
+ unlock_process(procPriv);
+ return err;
+}
diff --git a/drivers/staging/nmf-cm/cmioctl.h b/drivers/staging/nmf-cm/cmioctl.h
new file mode 100644
index 00000000000..5f7d5b6a349
--- /dev/null
+++ b/drivers/staging/nmf-cm/cmioctl.h
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+
+#ifndef __CMIOCTL_H
+#define __CMIOCTL_H
+
+#ifndef __KERNEL__
+#define BITS_PER_BYTE 8
+#endif
+
+#include <cm/engine/component/inc/component_type.h>
+#include <cm/engine/communication/inc/communication_type.h>
+#include <cm/engine/configuration/inc/configuration_type.h>
+#include <cm/engine/memory/inc/domain_type.h>
+#include <cm/engine/memory/inc/memory_type.h>
+#include <cm/engine/perfmeter/inc/perfmeter_type.h>
+#include <cm/engine/repository_mgt/inc/repository_type.h>
+
+#define DEBUGFS_ROOT "nmf-cm"
+#define DEBUGFS_DUMP_FLAG (1 << (sizeof(t_panic_source)*BITS_PER_BYTE - 1))
+
+enum cmdma_type {
+ CMDMA_MEM_2_PER,
+ CMDMA_PER_2_MEM
+};
+
+#define CMLD_DEV_NAME \
+ { "cm_control", \
+ "cm_channel", \
+ "cm_sia_trace", \
+ "cm_sva_trace", \
+ }
+
+/*
+ * The following structures are used to exchange CM_SYSCALL parameters with
+ * the driver. There is one structure per ioctl command, ie per CM_SYSCAL.
+ * Each of them contains:
+ * - One set of fields placed in a struture 'in' which are all input
+ * parameters of the syscall (parameters that kernel side must retrieve
+ * from user space)
+ * - One set of fields placed in a struture 'out' which contains all output
+ * parameters of the syscall plus the error code.
+ *
+ * NOTE: all pointers to (user) buffer are always placed in struct 'in', including
+ * buffers used as output parameters; because the pointer itself is considered as
+ * an input parameter, as it is directly accessed from kernel space.
+ */
+typedef struct{
+ struct {
+ const char * templateName;
+ t_cm_domain_id domainId;
+ t_nmf_ee_priority priority;
+ const char * localName;
+ const char *dataFile;
+ t_uint32 dataFileSize;
+ } in;
+ struct {
+ t_cm_instance_handle component; /** < Output parameter */
+ t_cm_error error;
+ } out;
+} CM_InstantiateComponent_t;
+
+typedef struct {
+ struct {
+ t_cm_bf_host2mpc_handle host2mpcId;
+ t_event_params_handle h;
+ t_uint32 size;
+ t_uint32 methodIndex;
+ } in;
+ struct {
+ t_cm_error error;
+ } out;
+} CM_PushEventWithSize_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle server;
+ const char * providedItfServerName;
+ t_uint32 fifosize;
+ t_cm_mpc_memory_type eventMemType;
+ const char *dataFileSkeleton;
+ t_uint32 dataFileSkeletonSize;
+ } in;
+ struct {
+ t_cm_bf_host2mpc_handle host2mpcId; /** < Output parameter */
+ t_cm_error error;
+ } out;
+} CM_BindComponentFromCMCore_t;
+
+typedef struct {
+ struct {
+ t_cm_bf_host2mpc_handle host2mpcId;
+ } in;
+ struct {
+ t_cm_error error;
+ } out;
+} CM_UnbindComponentFromCMCore_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char *requiredItfClientName;
+ t_uint32 fifosize;
+ t_nmf_mpc2host_handle upLayerThis;
+ const char *dataFileStub;
+ t_uint32 dataFileStubSize;
+ } in;
+ struct {
+ t_cm_bf_mpc2host_handle mpc2hostId; /** < Output parameter */
+ t_cm_error error;
+ } out;
+} CM_BindComponentToCMCore_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char *requiredItfClientName;
+ } in;
+ struct {
+ t_nmf_mpc2host_handle upLayerThis; /** < Output parameter */
+ t_cm_error error;
+ } out;
+} CM_UnbindComponentToCMCore_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ } in;
+ struct {
+ t_cm_error error; /** < Output parameter */
+ } out; /** < Output parameter */
+} CM_DestroyComponent_t;
+
+typedef struct {
+ struct {
+ const t_cm_domain_memory *domain;
+ t_nmf_client_id client;
+ } in;
+ struct {
+ t_cm_domain_id handle; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_CreateMemoryDomain_t;
+
+typedef struct {
+ struct {
+ t_cm_domain_id parentId;
+ const t_cm_domain_memory *domain;
+ } in;
+ struct {
+ t_cm_domain_id handle; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_CreateMemoryDomainScratch_t;
+
+typedef struct {
+ struct {
+ t_cm_domain_id domainId;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_DestroyMemoryDomain_t;
+
+typedef struct {
+ struct {
+ t_cm_domain_id domainId; /** < In parameter */
+ } in;
+ struct {
+ t_nmf_core_id coreId; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_GetDomainCoreId_t;
+
+typedef struct {
+ struct {
+ t_cm_domain_id domainId;
+ t_cm_mpc_memory_type memType;
+ t_cm_size size;
+ t_cm_memory_alignment memAlignment;
+ } in;
+ struct {
+ t_cm_memory_handle pHandle; /** < Output parameter */
+ t_cm_error error;
+ } out; /** < Output parameter */
+} CM_AllocMpcMemory_t;
+
+typedef struct{
+ struct {
+ t_cm_memory_handle handle;
+ } in;
+ struct {
+ t_cm_error error;
+ } out; /** < Output parameter */
+} CM_FreeMpcMemory_t;
+
+typedef struct {
+ struct {
+ t_cm_memory_handle handle;
+ } in;
+ struct {
+ t_uint32 size; /** < Out parameter */
+ t_uint32 physAddr; /** < Out parameter */
+ t_uint32 kernelLogicalAddr; /** < Out parameter */
+ t_uint32 userLogicalAddr; /** < Out parameter */
+ t_uint32 mpcPhysAddr; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Output parameter */
+} CM_PrivGetMPCMemoryDesc_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char *requiredItfClientName;
+ t_cm_instance_handle server;
+ const char *providedItfServerName;
+ t_uint32 fifosize;
+ t_cm_mpc_memory_type eventMemType;
+ const char *dataFileSkeletonOrEvent;
+ t_uint32 dataFileSkeletonOrEventSize;
+ const char *dataFileStub;
+ t_uint32 dataFileStubSize;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_BindComponentAsynchronous_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char* requiredItfClientName;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_UnbindComponentAsynchronous_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char *requiredItfClientName;
+ t_cm_instance_handle server;
+ const char *providedItfServerName;
+ t_bool traced;
+ const char *dataFileTrace;
+ t_uint32 dataFileTraceSize;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_BindComponent_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char* requiredItfClientName;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_UnbindComponent_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ const char* requiredItfClientName;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_BindComponentToVoid_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_StartComponent_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle client;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_StopComponent_t;
+
+typedef struct {
+ struct {
+ t_nmf_core_id coreId;
+ } in;
+ struct {
+ t_cm_mpc_load_counter pMpcLoadCounter; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetMpcLoadCounter_t;
+
+typedef struct {
+ struct {
+ t_nmf_core_id coreId;
+ t_cm_mpc_memory_type memType;
+ } in;
+ struct {
+ t_cm_allocator_status pStatus; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_GetMpcMemoryStatus_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ t_uint32 templateNameLength;
+ t_uint32 localNameLength;
+ char *templateName; /** < Out parameter */
+ char *localName; /** < Out parameter */
+ } in;
+ struct {
+ t_nmf_core_id coreId; /** < Out parameter */
+ t_nmf_ee_priority priority; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_GetComponentDescription_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle headerComponent; /** < Output parameter */
+ t_cm_error error; /** < Out parameter */
+ } out; /** < Out parameter */
+} CM_GetComponentListHeader_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle prevComponent;
+ } in;
+ struct {
+ t_cm_instance_handle nextComponent; /** < Output parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentListNext_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ } in;
+ struct {
+ t_uint8 numberRequiredInterfaces; /** < Output parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentRequiredInterfaceNumber_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ t_uint8 index;
+ t_uint32 itfNameLength;
+ t_uint32 itfTypeLength;
+ char *itfName; /** < Out parameter */
+ char *itfType; /** < Out parameter */
+ } in;
+ struct {
+ t_cm_require_state requireState; /** < Out parameter */
+ t_sint16 collectionSize; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentRequiredInterface_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ const char *itfName;
+ t_uint32 serverItfNameLength;
+ char *serverItfName; /** < Out parameter */
+ } in;
+ struct {
+ t_cm_instance_handle server; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentRequiredInterfaceBinding_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ } in;
+ struct {
+ t_uint8 numberProvidedInterfaces; /** < Output parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentProvidedInterfaceNumber_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ t_uint8 index;
+ t_uint32 itfNameLength;
+ t_uint32 itfTypeLength;
+ char *itfName; /** < Out parameter */
+ char *itfType; /** < Out parameter */
+ } in;
+ struct {
+ t_sint16 collectionSize; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentProvidedInterface_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ } in;
+ struct {
+ t_uint8 numberProperties; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentPropertyNumber_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ const char *attrName;
+ t_uint8 index;
+ t_uint32 propertyNameLength;
+ char *propertyName; /** < Out parameter */
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentPropertyName_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ const char *propertyName;
+ t_uint32 propertyValueLength;
+ char *propertyValue; /** < Out parameter */
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetComponentPropertyValue_t;
+
+typedef struct {
+ struct {
+ t_cm_instance_handle component;
+ const char *attrName;
+ } in;
+ struct {
+ t_uint32 value; /** < Out parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_ReadComponentAttribute_t;
+
+typedef struct {
+ struct {
+ t_cm_domain_id domainId;
+ } in;
+ struct {
+ t_cm_instance_handle executiveEngineHandle;
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetExecutiveEngineHandle_t;
+
+typedef struct {
+ struct {
+ t_cm_cmd_id aCmdID;
+ t_sint32 aParam;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_SetMode_t;
+
+typedef struct {
+ struct {
+ t_action_to_do action;
+ t_cm_instance_handle client;
+ const char *requiredItfClientName;
+ t_cm_instance_handle server;
+ const char *providedItfServerName;
+ char **fileList;
+ unsigned int listSize;
+ char *type;
+ } in;
+ struct {
+ t_uint32 methodNumber; /** < Output parameter */
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_GetRequiredComponentFiles_t;
+
+typedef struct {
+ struct {
+ const char *name;
+ const void *data;
+ t_cm_size size;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_PushComponent_t;
+
+typedef struct {
+ struct {
+ const char *name;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_ReleaseComponent_t;
+
+typedef struct {
+ struct {
+ t_cm_domain_id srcShared;
+ t_cm_domain_id src;
+ t_cm_domain_id dst;
+ } in;
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_Migrate_t;
+
+typedef struct {
+ struct {
+ t_cm_error error; /** < Out parameter */
+ } out;
+} CM_Unmigrate_t;
+
+typedef struct{
+ struct {
+ t_cm_memory_handle mem_handle;
+ unsigned int peripheral_addr;
+ unsigned int segments;
+ unsigned int segmentsize;
+ unsigned int LOS;
+ enum cmdma_type type;
+ } in;
+ struct {
+ t_cm_error error;
+ } out;
+} CM_SetupRelinkArea_t;
+
+#define CM_PUSHEVENTWITHSIZE _IOWR('c', 0, CM_PushEventWithSize_t)
+#define CM_GETVERSION _IOR('c', 1, t_uint32)
+#define CM_INSTANTIATECOMPONENT _IOWR('c', 2, CM_InstantiateComponent_t)
+#define CM_BINDCOMPONENTFROMCMCORE _IOWR('c', 3, CM_BindComponentFromCMCore_t)
+#define CM_UNBINDCOMPONENTFROMCMCORE _IOWR('c', 4, CM_UnbindComponentFromCMCore_t)
+#define CM_BINDCOMPONENTTOCMCORE _IOWR('c', 5, CM_BindComponentToCMCore_t)
+#define CM_UNBINDCOMPONENTTOCMCORE _IOWR('c', 6, CM_UnbindComponentToCMCore_t)
+#define CM_DESTROYCOMPONENT _IOWR('c', 7, CM_DestroyComponent_t)
+#define CM_CREATEMEMORYDOMAIN _IOWR('c', 8, CM_CreateMemoryDomain_t)
+#define CM_CREATEMEMORYDOMAINSCRATCH _IOWR('c', 9, CM_CreateMemoryDomainScratch_t)
+#define CM_DESTROYMEMORYDOMAIN _IOWR('c', 10, CM_DestroyMemoryDomain_t)
+#define CM_GETDOMAINCOREID _IOWR('c', 11, CM_GetDomainCoreId_t)
+#define CM_ALLOCMPCMEMORY _IOWR('c', 12, CM_AllocMpcMemory_t)
+#define CM_FREEMPCMEMORY _IOWR('c', 13, CM_FreeMpcMemory_t)
+#define CM_BINDCOMPONENTASYNCHRONOUS _IOWR('c', 14, CM_BindComponentAsynchronous_t)
+#define CM_UNBINDCOMPONENTASYNCHRONOUS _IOWR('c', 15, CM_UnbindComponentAsynchronous_t)
+#define CM_BINDCOMPONENT _IOWR('c', 16, CM_BindComponent_t)
+#define CM_UNBINDCOMPONENT _IOWR('c', 17, CM_UnbindComponent_t)
+#define CM_BINDCOMPONENTTOVOID _IOWR('c', 18, CM_BindComponentToVoid_t)
+#define CM_STARTCOMPONENT _IOWR('c', 19, CM_StartComponent_t)
+#define CM_STOPCOMPONENT _IOWR('c', 20, CM_StopComponent_t)
+#define CM_GETMPCLOADCOUNTER _IOWR('c', 21, CM_GetMpcLoadCounter_t)
+#define CM_GETMPCMEMORYSTATUS _IOWR('c', 22, CM_GetMpcMemoryStatus_t)
+#define CM_GETCOMPONENTDESCRIPTION _IOWR('c', 23, CM_GetComponentDescription_t)
+#define CM_GETCOMPONENTLISTHEADER _IOWR('c', 24, CM_GetComponentListHeader_t)
+#define CM_GETCOMPONENTLISTNEXT _IOWR('c', 25, CM_GetComponentListNext_t)
+#define CM_GETCOMPONENTREQUIREDINTERFACENUMBER _IOWR('c', 26, CM_GetComponentRequiredInterfaceNumber_t)
+#define CM_GETCOMPONENTREQUIREDINTERFACE _IOWR('c', 27, CM_GetComponentRequiredInterface_t)
+#define CM_GETCOMPONENTREQUIREDINTERFACEBINDING _IOWR('c', 28, CM_GetComponentRequiredInterfaceBinding_t)
+#define CM_GETCOMPONENTPROVIDEDINTERFACENUMBER _IOWR('c', 29, CM_GetComponentProvidedInterfaceNumber_t)
+#define CM_GETCOMPONENTPROVIDEDINTERFACE _IOWR('c', 30, CM_GetComponentProvidedInterface_t)
+#define CM_GETCOMPONENTPROPERTYNUMBER _IOWR('c', 31, CM_GetComponentPropertyNumber_t)
+#define CM_GETCOMPONENTPROPERTYNAME _IOWR('c', 32, CM_GetComponentPropertyName_t)
+#define CM_GETCOMPONENTPROPERTYVALUE _IOWR('c', 33, CM_GetComponentPropertyValue_t)
+#define CM_READCOMPONENTATTRIBUTE _IOWR('c', 34, CM_ReadComponentAttribute_t)
+#define CM_GETEXECUTIVEENGINEHANDLE _IOWR('c', 35, CM_GetExecutiveEngineHandle_t)
+#define CM_SETMODE _IOWR('c', 36, CM_SetMode_t)
+#define CM_GETREQUIREDCOMPONENTFILES _IOWR('c', 37, CM_GetRequiredComponentFiles_t)
+#define CM_PUSHCOMPONENT _IOWR('c', 38, CM_PushComponent_t)
+#define CM_FLUSHCHANNEL _IO('c', 39)
+#define CM_MIGRATE _IOWR('c', 40, CM_Migrate_t)
+#define CM_UNMIGRATE _IOR('c', 41, CM_Unmigrate_t)
+#define CM_RELEASECOMPONENT _IOWR('c', 42, CM_ReleaseComponent_t)
+#define CM_SETUPRELINKAREA _IOWR('c', 43, CM_SetupRelinkArea_t)
+
+#define CM_PRIVGETMPCMEMORYDESC _IOWR('c', 100, CM_PrivGetMPCMemoryDesc_t)
+#define CM_PRIVRESERVEMEMORY _IOW('c', 101, unsigned int)
+#define CM_PRIV_GETBOARDVERSION _IOR('c', 102, unsigned int)
+#define CM_PRIV_ISCOMPONENTCACHEEMPTY _IO('c', 103)
+#define CM_PRIV_DEBUGFS_READY _IO('c', 104)
+#define CM_PRIV_DEBUGFS_WAIT_DUMP _IO('c', 105)
+#define CM_PRIV_DEBUGFS_DUMP_DONE _IO('c', 106)
+
+enum board_version {
+ U8500_V2
+};
+#endif
diff --git a/drivers/staging/nmf-cm/cmld.c b/drivers/staging/nmf-cm/cmld.c
new file mode 100644
index 00000000000..60c20cadaee
--- /dev/null
+++ b/drivers/staging/nmf-cm/cmld.c
@@ -0,0 +1,1403 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/** \file cmld.c
+ *
+ * Nomadik Multiprocessing Framework Linux Driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <cm/inc/cm_def.h>
+#include <cm/engine/api/cm_engine.h>
+#include <cm/engine/api/control/irq_engine.h>
+
+#include "osal-kernel.h"
+#include "cmld.h"
+#include "cmioctl.h"
+#include "cm_debug.h"
+#include "cm_service.h"
+#include "cm_dma.h"
+
+#define CMDRIVER_PATCH_VERSION 122
+#define O_FLUSH 0x1000000
+
+static int cmld_major;
+static struct cdev cmld_cdev;
+static struct class cmld_class = {
+ .name = "cm",
+ .owner = THIS_MODULE,
+};
+const char *cmld_devname[] = CMLD_DEV_NAME;
+static struct device *cmld_dev[ARRAY_SIZE(cmld_devname)];
+
+/* List of per process structure (struct cm_process_priv list) */
+LIST_HEAD(process_list);
+static DEFINE_MUTEX(process_lock); /* lock used to protect previous list */
+/* List of per channel structure (struct cm_channel_priv list).
+ A channel == One file descriptor */
+LIST_HEAD(channel_list);
+static DEFINE_MUTEX(channel_lock); /* lock used to protect previous list */
+
+#ifdef CONFIG_DEBUG_FS
+/* Debugfs support */
+bool cmld_user_has_debugfs = false;
+bool cmld_dump_ongoing = false;
+module_param(cmld_dump_ongoing, bool, S_IWUSR|S_IRUGO);
+static DECLARE_WAIT_QUEUE_HEAD(dump_waitq);
+#endif
+
+static inline struct cm_process_priv *getProcessPriv(void)
+{
+ struct list_head* head;
+ struct cm_process_priv *entry;
+
+ mutex_lock(&process_lock);
+
+ /* Look for an entry for the calling process */
+ list_for_each(head, &process_list) {
+ entry = list_entry(head, struct cm_process_priv, entry);
+ if (entry->pid == current->tgid) {
+ kref_get(&entry->ref);
+ goto out;
+ }
+ }
+ mutex_unlock(&process_lock);
+
+ /* Allocate, init and register a new one otherwise */
+ entry = OSAL_Alloc(sizeof(*entry));
+ if (entry == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* init host2mpcLock */
+ mutex_init(&entry->host2mpcLock);
+
+ INIT_LIST_HEAD(&entry->memAreaDescList);
+ kref_init(&entry->ref);
+ mutex_init(&entry->mutex);
+
+ entry->pid = current->tgid;
+ mutex_lock(&process_lock);
+ list_add(&entry->entry, &process_list);
+ cm_debug_proc_init(entry);
+out:
+ mutex_unlock(&process_lock);
+ return entry;
+}
+
+/* Free all messages */
+static inline void freeMessages(struct cm_channel_priv* channelPriv)
+{
+ struct osal_msg *this, *next;
+ int warn = 0;
+
+ spin_lock_bh(&channelPriv->bh_lock);
+ plist_for_each_entry_safe(this, next, &channelPriv->messageQueue, msg_entry) {
+ plist_del(&this->msg_entry, &channelPriv->messageQueue);
+ kfree(this);
+ warn = 1;
+ }
+ spin_unlock_bh(&channelPriv->bh_lock);
+ if (warn)
+ pr_err("[CM - PID=%d]: Some remaining"
+ " message(s) freed\n", current->tgid);
+}
+
+/* Free all pending memory areas and relative descriptors */
+static inline void freeMemHandles(struct cm_process_priv* processPriv)
+{
+ struct list_head* head, *next;
+ int warn = 0;
+
+ list_for_each_safe(head, next, &processPriv->memAreaDescList) {
+ struct memAreaDesc_t* curr;
+ int err;
+ curr = list_entry(head, struct memAreaDesc_t, list);
+ err=CM_ENGINE_FreeMpcMemory(curr->handle);
+ if (err)
+ pr_err("[CM - PID=%d]: Error (%d) freeing remaining memory area "
+ "handle\n", current->tgid, err);
+ list_del(head);
+ OSAL_Free(curr);
+ warn = 1;
+ }
+ if (warn) {
+ pr_err("[CM - PID=%d]: Some remaining memory area "
+ "handle(s) freed\n", current->tgid);
+ warn = 0;
+ }
+}
+
+/* Free any skeleton, called when freeing the process entry */
+static inline void freeSkelList(struct list_head* skelList)
+{
+ struct list_head* head, *next;
+ int warn = 0;
+
+ /* No lock held, we know that we are the only and the last user
+ of the list */
+ list_for_each_safe(head, next, skelList) {
+ t_skelwrapper* curr;
+ curr = list_entry(head, t_skelwrapper, entry);
+ list_del(head);
+ OSAL_Free(curr);
+ warn = 1;
+ }
+ if (warn)
+ pr_err("[CM - PID=%d]: Some remaining skeleton "
+ "wrapper(s) freed\n", current->tgid);
+}
+
+/* Free any remaining channels belonging to this process */
+/* Called _only_ when freeing the process entry, once the network constructed by
+ this process has been destroyed.
+ See cmld_release() to see why there can be some remaining non-freed channels */
+static inline void freeChannels(struct cm_process_priv* processPriv)
+{
+ struct list_head* head, *next;
+ int warn = 0;
+
+ mutex_lock(&channel_lock);
+ list_for_each_safe(head, next, &channel_list) {
+ struct cm_channel_priv *channelPriv;
+ channelPriv = list_entry(head, struct cm_channel_priv, entry);
+ /* Only channels belonging to this process are concerned */
+ if (channelPriv->proc == processPriv) {
+ tasklet_disable(&cmld_service_tasklet);
+ list_del(&channelPriv->entry);
+ tasklet_enable(&cmld_service_tasklet);
+
+ /* Free all remaining messages if any
+ (normally none, but double check) */
+ freeMessages(channelPriv);
+
+ /* Free any pending skeleton wrapper */
+ /* Here it's safe, we know that all bindings have been undone */
+ freeSkelList(&channelPriv->skelList);
+
+ /* Free the per-channel descriptor */
+ OSAL_Free(channelPriv);
+ }
+ warn = 1;
+ }
+ mutex_unlock(&channel_lock);
+
+ if (warn)
+ pr_err("[CM - PID=%d]: Some remaining channel entries "
+ "freed\n", current->tgid);
+}
+
+/* Free the process priv structure and all related stuff */
+/* Called only when the last ref to this structure is released */
+static void freeProcessPriv(struct kref *ref)
+{
+ struct cm_process_priv *entry = container_of(ref, struct cm_process_priv, ref);
+ t_nmf_error err;
+
+ mutex_lock(&process_lock);
+ list_del(&entry->entry);
+ mutex_unlock(&process_lock);
+
+ /* Destroy all remaining components */
+ err=CM_ENGINE_FlushComponents(entry->pid);
+ if (err != NMF_OK)
+ pr_err("[CM - PID=%d]: Error while flushing some remaining"
+ " components: error=%d\n", current->tgid, err);
+
+ freeChannels(entry);
+
+ /* Free any pending memory areas and relative descriptors */
+ freeMemHandles(entry);
+
+ /* Destroy all remaining domains */
+ err=CM_ENGINE_FlushMemoryDomains(entry->pid);
+ if (err != NMF_OK)
+ pr_err("[CM - PID=%d]: Error while flushing some remaining"
+ " domains: error=%d\n", current->tgid, err);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(entry->dir);
+#endif
+
+ /* Free the per-process descriptor */
+ OSAL_Free(entry);
+}
+
+/** Reads Component Manager messages destinated to this process.
+ * The message is composed by three fields:
+ * 1) mpc2host handle (distinguishes interfaces)
+ * 2) methodIndex (distinguishes interface's methods)
+ * 3) Variable length parameters (method's parameters values)
+ *
+ * \note cfr GetEvent()
+ * \return POSIX error code
+ */
+static ssize_t cmld_channel_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ int err = 0;
+ struct cm_channel_priv* channelPriv = file->private_data;
+ int msgSize = 0;
+ struct plist_head* messageQueue;
+ struct osal_msg* msg;
+ t_os_message *os_msg = (t_os_message *)buf;
+ int block = !(file->f_flags & O_NONBLOCK);
+
+ messageQueue = &channelPriv->messageQueue;
+
+ if (mutex_lock_killable(&channelPriv->msgQueueLock))
+ return -ERESTARTSYS;
+
+wait:
+ while (plist_head_empty(messageQueue)) {
+ mutex_unlock(&channelPriv->msgQueueLock);
+ if (block == 0)
+ return -EAGAIN;
+ /* Wait until there is a message to ferry up */
+ if (wait_event_interruptible(channelPriv->waitq, ((!plist_head_empty(messageQueue)) || (file->f_flags & O_FLUSH))))
+ return -ERESTARTSYS;
+ if (file->f_flags & O_FLUSH) {
+ file->f_flags &= ~O_FLUSH;
+ return 0;
+ }
+ if (mutex_lock_killable(&channelPriv->msgQueueLock))
+ return -ERESTARTSYS;
+ }
+
+ /* Pick up the first message from the queue, making sure that the
+ * hwsem tasklet does not wreak havoc the queue in the meantime
+ */
+ spin_lock_bh(&channelPriv->bh_lock);
+ msg = plist_first_entry(messageQueue, struct osal_msg, msg_entry);
+ plist_del(&msg->msg_entry, messageQueue);
+ spin_unlock_bh(&channelPriv->bh_lock);
+
+ switch (msg->msg_type) {
+ case MSG_INTERFACE: {
+
+ /* Check if enough space is available */
+ msgSize = sizeof(msg->msg_type) + msg->d.itf.ptrSize + sizeof(os_msg->data.itf) - sizeof(os_msg->data.itf.params) ;
+ if (msgSize > count) {
+ mutex_unlock(&channelPriv->msgQueueLock);
+ pr_err("CM: message size bigger than buffer size silently ignored!\n");
+ err = -EMSGSIZE;
+ goto out;
+ }
+
+ /* Copy to user message type */
+ err = put_user(msg->msg_type, &os_msg->type);
+ if (err) goto ack_evt;
+
+ /* Copy to user the t_nmf_mpc2host_handle */
+ err = put_user(msg->d.itf.skelwrap->upperLayerThis, &os_msg->data.itf.THIS);
+ if (err) goto ack_evt;
+
+ /* The methodIndex */
+ err = put_user(msg->d.itf.methodIdx, &os_msg->data.itf.methodIndex);
+ if (err) goto ack_evt;
+
+ /* And the parameters */
+ err = copy_to_user(os_msg->data.itf.params, msg->d.itf.anyPtr, msg->d.itf.ptrSize);
+
+ ack_evt:
+ /* This call is void */
+ /* Note: that we cannot release the lock before having called this function
+ as acknowledgements MUST be executed in the same order as their
+ respective messages have arrived! */
+ CM_ENGINE_AcknowledgeEvent(msg->d.itf.skelwrap->mpc2hostId);
+
+ mutex_unlock(&channelPriv->msgQueueLock);
+ break;
+ }
+ case MSG_SERVICE: {
+ mutex_unlock(&channelPriv->msgQueueLock);
+ msgSize = sizeof(msg->msg_type) + sizeof(msg->d.srv.srvType)
+ + sizeof(msg->d.srv.srvData);
+ if (count < msgSize) {
+ pr_err("CM: service message size bigger than buffer size - silently ignored!\n");
+ err = -EMSGSIZE;
+ }
+
+ /* Copy to user message type */
+ err = put_user(msg->msg_type, &os_msg->type);
+ if (err) goto out;
+ err = copy_to_user(&os_msg->data.srv, &msg->d.srv,
+ sizeof(msg->d.srv.srvType) + sizeof(msg->d.srv.srvData));
+ break;
+ }
+ default:
+ mutex_unlock(&channelPriv->msgQueueLock);
+ pr_err("CM: invalid message type %d discarded\n", msg->msg_type);
+ goto wait;
+ }
+out:
+ /* Destroy the message */
+ kfree(msg);
+
+ return err ? err : msgSize;
+}
+
+/** Part of driver's release method. (ie userspace close())
+ * It wakes up all waiter.
+ *
+ * \return POSIX error code
+ */
+static int cmld_channel_flush(struct file *file, fl_owner_t id)
+{
+ struct cm_channel_priv* channelPriv = file->private_data;
+ file->f_flags |= O_FLUSH;
+ wake_up(&channelPriv->waitq);
+ return 0;
+}
+
+static long cmld_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct cm_channel_priv *channelPriv = file->private_data;
+#ifdef CONFIG_DEBUG_FS
+ if (wait_event_interruptible(dump_waitq, (!cmld_dump_ongoing)))
+ return -ERESTARTSYS;
+#endif
+
+ switch(cmd) {
+ /*
+ * All channel CM SYSCALL
+ */
+ case CM_BINDCOMPONENTTOCMCORE:
+ return cmld_BindComponentToCMCore(channelPriv, (CM_BindComponentToCMCore_t *)arg);
+ case CM_FLUSHCHANNEL:
+ return cmld_channel_flush(file, 0);
+ default:
+ pr_err("CM(%s): unsupported command %i\n", __func__, cmd);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static long cmld_control_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct cm_process_priv* procPriv = file->private_data;
+#ifdef CONFIG_DEBUG_FS
+ if (cmd == CM_PRIV_DEBUGFS_DUMP_DONE) {
+ cmld_dump_ongoing = false;
+ wake_up(&dump_waitq);
+ return 0;
+ } else if (wait_event_interruptible(dump_waitq, (!cmld_dump_ongoing)))
+ return -ERESTARTSYS;
+#endif
+
+ switch(cmd) {
+ /*
+ * All wrapped CM SYSCALL
+ */
+ case CM_INSTANTIATECOMPONENT:
+ return cmld_InstantiateComponent(procPriv,
+ (CM_InstantiateComponent_t *)arg);
+
+ case CM_BINDCOMPONENTFROMCMCORE:
+ return cmld_BindComponentFromCMCore(procPriv,
+ (CM_BindComponentFromCMCore_t *)arg);
+
+ case CM_UNBINDCOMPONENTFROMCMCORE:
+ return cmld_UnbindComponentFromCMCore((CM_UnbindComponentFromCMCore_t *)arg);
+
+ case CM_UNBINDCOMPONENTTOCMCORE:
+ return cmld_UnbindComponentToCMCore(procPriv, (CM_UnbindComponentToCMCore_t *)arg);
+
+ case CM_BINDCOMPONENTASYNCHRONOUS:
+ return cmld_BindComponentAsynchronous(procPriv, (CM_BindComponentAsynchronous_t *)arg);
+
+ case CM_UNBINDCOMPONENTASYNCHRONOUS:
+ return cmld_UnbindComponentAsynchronous(procPriv, (CM_UnbindComponentAsynchronous_t *)arg);
+
+ case CM_BINDCOMPONENT:
+ return cmld_BindComponent(procPriv, (CM_BindComponent_t *)arg);
+
+ case CM_UNBINDCOMPONENT:
+ return cmld_UnbindComponent(procPriv, (CM_UnbindComponent_t *)arg);
+
+ case CM_BINDCOMPONENTTOVOID:
+ return cmld_BindComponentToVoid(procPriv, (CM_BindComponentToVoid_t *)arg);
+
+ case CM_DESTROYCOMPONENT:
+ return cmld_DestroyComponent(procPriv, (CM_DestroyComponent_t *)arg);
+
+ case CM_CREATEMEMORYDOMAIN:
+ return cmld_CreateMemoryDomain(procPriv, (CM_CreateMemoryDomain_t *)arg);
+
+ case CM_CREATEMEMORYDOMAINSCRATCH:
+ return cmld_CreateMemoryDomainScratch(procPriv, (CM_CreateMemoryDomainScratch_t *)arg);
+
+ case CM_DESTROYMEMORYDOMAIN:
+ return cmld_DestroyMemoryDomain((CM_DestroyMemoryDomain_t *)arg);
+
+ case CM_GETDOMAINCOREID:
+ return cmld_GetDomainCoreId((CM_GetDomainCoreId_t *)arg);
+
+ case CM_ALLOCMPCMEMORY:
+ return cmld_AllocMpcMemory(procPriv, (CM_AllocMpcMemory_t *)arg);
+
+ case CM_FREEMPCMEMORY:
+ return cmld_FreeMpcMemory(procPriv, (CM_FreeMpcMemory_t *)arg);
+
+ case CM_GETMPCMEMORYSTATUS:
+ return cmld_GetMpcMemoryStatus((CM_GetMpcMemoryStatus_t *)arg);
+
+ case CM_STARTCOMPONENT:
+ return cmld_StartComponent(procPriv, (CM_StartComponent_t *)arg);
+
+ case CM_STOPCOMPONENT:
+ return cmld_StopComponent(procPriv, (CM_StopComponent_t *)arg);
+
+ case CM_GETMPCLOADCOUNTER:
+ return cmld_GetMpcLoadCounter((CM_GetMpcLoadCounter_t *)arg);
+
+ case CM_GETCOMPONENTDESCRIPTION:
+ return cmld_GetComponentDescription(procPriv, (CM_GetComponentDescription_t *)arg);
+
+ case CM_GETCOMPONENTLISTHEADER:
+ return cmld_GetComponentListHeader(procPriv, (CM_GetComponentListHeader_t *)arg);
+
+ case CM_GETCOMPONENTLISTNEXT:
+ return cmld_GetComponentListNext(procPriv, (CM_GetComponentListNext_t *)arg);
+
+ case CM_GETCOMPONENTREQUIREDINTERFACENUMBER:
+ return cmld_GetComponentRequiredInterfaceNumber(procPriv,
+ (CM_GetComponentRequiredInterfaceNumber_t *)arg);
+
+ case CM_GETCOMPONENTREQUIREDINTERFACE:
+ return cmld_GetComponentRequiredInterface(procPriv,
+ (CM_GetComponentRequiredInterface_t *)arg);
+
+ case CM_GETCOMPONENTREQUIREDINTERFACEBINDING:
+ return cmld_GetComponentRequiredInterfaceBinding(procPriv,
+ (CM_GetComponentRequiredInterfaceBinding_t *)arg);
+
+ case CM_GETCOMPONENTPROVIDEDINTERFACENUMBER:
+ return cmld_GetComponentProvidedInterfaceNumber(procPriv,
+ (CM_GetComponentProvidedInterfaceNumber_t *)arg);
+
+ case CM_GETCOMPONENTPROVIDEDINTERFACE:
+ return cmld_GetComponentProvidedInterface(procPriv,
+ (CM_GetComponentProvidedInterface_t *)arg);
+
+ case CM_GETCOMPONENTPROPERTYNUMBER:
+ return cmld_GetComponentPropertyNumber(procPriv,
+ (CM_GetComponentPropertyNumber_t *)arg);
+
+ case CM_GETCOMPONENTPROPERTYNAME:
+ return cmld_GetComponentPropertyName(procPriv,
+ (CM_GetComponentPropertyName_t *)arg);
+
+ case CM_GETCOMPONENTPROPERTYVALUE:
+ return cmld_GetComponentPropertyValue(procPriv,
+ (CM_GetComponentPropertyValue_t *)arg);
+
+ case CM_READCOMPONENTATTRIBUTE:
+ return cmld_ReadComponentAttribute(procPriv,
+ (CM_ReadComponentAttribute_t *)arg);
+
+ case CM_GETEXECUTIVEENGINEHANDLE:
+ return cmld_GetExecutiveEngineHandle(procPriv,
+ (CM_GetExecutiveEngineHandle_t *)arg);
+
+ case CM_SETMODE:
+ return cmld_SetMode((CM_SetMode_t *)arg);
+
+ case CM_GETREQUIREDCOMPONENTFILES:
+ return cmld_GetRequiredComponentFiles(procPriv,
+ (CM_GetRequiredComponentFiles_t *)arg);
+
+ case CM_MIGRATE:
+ return cmld_Migrate((CM_Migrate_t *)arg);
+
+ case CM_UNMIGRATE:
+ return cmld_Unmigrate((CM_Unmigrate_t *)arg);
+
+ case CM_SETUPRELINKAREA:
+ return cmld_SetupRelinkArea(procPriv,
+ (CM_SetupRelinkArea_t *)arg);
+
+ case CM_PUSHCOMPONENT:
+ return cmld_PushComponent((CM_PushComponent_t *)arg);
+
+ case CM_RELEASECOMPONENT:
+ return cmld_ReleaseComponent((CM_ReleaseComponent_t *)arg);
+
+ /*
+ * NMF CALLS (Host->MPC bindings)
+ */
+ case CM_PUSHEVENTWITHSIZE: {
+ CM_PushEventWithSize_t data;
+ t_event_params_handle event;
+
+ /* coverity[tainted_data_argument : FALSE] */
+ if (copy_from_user(&data.in, (CM_PushEventWithSize_t*)arg, sizeof(data.in)))
+ return -EFAULT;
+
+ /* Take the lock to synchronize CM_ENGINE_AllocEvent()
+ * and CM_ENGINE_PushEvent()
+ */
+ if (mutex_lock_killable(&procPriv->host2mpcLock))
+ return -ERESTARTSYS;
+
+ event = CM_ENGINE_AllocEvent(data.in.host2mpcId);
+ if (event == NULL) {
+ mutex_unlock(&procPriv->host2mpcLock);
+ return put_user(CM_PARAM_FIFO_OVERFLOW,
+ &((CM_PushEventWithSize_t*)arg)->out.error);
+ }
+ if (data.in.size != 0)
+ /* coverity[tainted_data : FALSE] */
+ if (copy_from_user(event, data.in.h, data.in.size)) {
+ mutex_unlock(&procPriv->host2mpcLock);
+ return -EFAULT; // TODO: what about the already allocated and acknowledged event!?!
+ }
+
+ data.out.error = CM_ENGINE_PushEvent(data.in.host2mpcId, event, data.in.methodIndex);
+ mutex_unlock(&procPriv->host2mpcLock);
+
+ /* copy error value back */
+ return put_user(data.out.error, &((CM_PushEventWithSize_t*)arg)->out.error);
+ }
+
+ /*
+ * All private (internal) commands
+ */
+ case CM_PRIVGETMPCMEMORYDESC:
+ return cmld_PrivGetMPCMemoryDesc(procPriv, (CM_PrivGetMPCMemoryDesc_t *)arg);
+
+ case CM_PRIVRESERVEMEMORY:
+ return cmld_PrivReserveMemory(procPriv, arg);
+
+ case CM_GETVERSION: {
+ t_uint32 nmfversion = NMF_VERSION;
+ return copy_to_user((void*)arg, &nmfversion, sizeof(nmfversion));
+ }
+ case CM_PRIV_GETBOARDVERSION: {
+ enum board_version v = U8500_V2;
+ return copy_to_user((void*)arg, &v, sizeof(v));
+ }
+ case CM_PRIV_ISCOMPONENTCACHEEMPTY:
+ if (CM_ENGINE_IsComponentCacheEmpty())
+ return 0;
+ else
+ return -ENOENT;
+ case CM_PRIV_DEBUGFS_READY:
+#ifdef CONFIG_DEBUG_FS
+ cmld_user_has_debugfs = true;
+#endif
+ return 0;
+ case CM_PRIV_DEBUGFS_WAIT_DUMP:
+ return 0;
+ default:
+ pr_err("CM(%s): unsupported command %i\n", __func__, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** VMA open callback function
+ */
+static void cmld_vma_open(struct vm_area_struct* vma) {
+ struct memAreaDesc_t* curr = (struct memAreaDesc_t*)vma->vm_private_data;
+
+ atomic_inc(&curr->count);
+}
+
+/** VMA close callback function
+ */
+static void cmld_vma_close(struct vm_area_struct* vma) {
+ struct memAreaDesc_t* curr = (struct memAreaDesc_t*)vma->vm_private_data;
+
+ atomic_dec(&curr->count);
+}
+
+static struct vm_operations_struct cmld_remap_vm_ops = {
+ .open = cmld_vma_open,
+ .close = cmld_vma_close,
+};
+
+/** mmap implementation.
+ * Remaps just once.
+ *
+ * \return POSIX error code
+ */
+static int cmld_control_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct list_head* listHead;
+ struct list_head* cursor;
+ struct cm_process_priv* procPriv = file->private_data;
+ struct memAreaDesc_t* curr = NULL;
+ unsigned int vma_size = vma->vm_end-vma->vm_start;
+
+ listHead = &procPriv->memAreaDescList;
+
+ if (lock_process(procPriv)) return -ERESTARTSYS;
+ /* Make sure the memory area has not already been remapped */
+ list_for_each(cursor, listHead) {
+ curr = list_entry(cursor, struct memAreaDesc_t, list);
+ /* For now, the user space aligns any requested physaddr to a page-size limit
+ This is not safe and must be fixed. But this is the only way to
+ minimize the allocated TCM memory, needed because of low amount of
+ TCM memory
+ Another way is to add some more check before doing this mmap()
+ to allow this mmap, for example.
+ NOTE: this memory must be first reserved via the CM_PRIVRESERVEMEMORY ioctl()
+ */
+ if ((curr->physAddr&PAGE_MASK) == offset &&
+ curr->tid == current->pid) {
+ if (curr->userLogicalAddr) {
+ unlock_process(procPriv);
+ return -EINVAL; // already mapped!
+ }
+ /* reset the thread id value, to not confuse any further mmap() */
+ curr->tid = 0;
+ break;
+ }
+ }
+
+ if (cursor == listHead) {
+ unlock_process(procPriv);
+ return -EINVAL; // no matching memory area descriptor found!
+ }
+
+ /* Very, very important to have consistent buffer transition */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTEXPAND | VM_DONTCOPY;
+
+ if (remap_pfn_range(vma, vma->vm_start, offset>>PAGE_SHIFT,
+ vma_size, vma->vm_page_prot)) {
+ unlock_process(procPriv);
+ return -EAGAIN;
+ }
+
+ /* Offset represents the physical address.
+ * Update the list entry filling in the logical address assigned to the user
+ */
+ /*
+ * NOTE: here the useLogicalAddr is page-aligned, but not necessaly the
+ * phycical address. We mmap() more than originaly requested by the
+ * user, see in CM User Proxy (file cmsyscallwrapper.c)
+ */
+ curr->userLogicalAddr = vma->vm_start;
+
+ /* increment reference counter */
+ atomic_inc(&curr->count);
+
+ unlock_process(procPriv);
+
+ /* set private data structure and callbacks */
+ vma->vm_private_data = (void *)curr;
+ vma->vm_ops = &cmld_remap_vm_ops;
+
+ return 0;
+}
+
+/* Driver's release method for /dev/cm_channel */
+static int cmld_channel_release(struct inode *inode, struct file *file)
+{
+ struct cm_channel_priv* channelPriv = file->private_data;
+ struct cm_process_priv* procPriv = channelPriv->proc;
+
+ /*
+ * The driver must guarantee that all related resources are released.
+ * Thus all these checks below are necessary to release all remaining
+ * resources still linked to this 'client', in case of abnormal process
+ * exit.
+ * => These are error cases !
+ * In the usual case, nothing should be done except the free of
+ * the cmPriv itself
+ */
+
+ /* We don't need to synchronize here by using the skelListLock:
+ the list is only accessed during ioctl() and we can't be here
+ if an ioctl() is on-going */
+ if (list_empty(&channelPriv->skelList)) {
+ /* There is no pending MPC->HOST binding
+ => we can quietly delete the channel */
+ tasklet_disable(&cmld_service_tasklet);
+ mutex_lock(&channel_lock);
+ list_del(&channelPriv->entry);
+ mutex_unlock(&channel_lock);
+ tasklet_enable(&cmld_service_tasklet);
+
+ /* Free all remaining messages if any */
+ freeMessages(channelPriv);
+
+ /* Free the per-channel descriptor */
+ OSAL_Free(channelPriv);
+ } else {
+ /*
+ * Uh: there are still some MPC->HOST binding but we don't have
+ * the required info to unbind them.
+ * => we must keep all skel structures because possibly used in
+ * OSAL_PostDfc (incoming callback msg). We flag the channel as
+ * closed to discard any new msg that will never be read anyway
+ */
+ channelPriv->state = CHANNEL_CLOSED;
+
+ /* Already Free all remaining messages if any,
+ they will never be read anyway */
+ freeMessages(channelPriv);
+ }
+
+ kref_put(&procPriv->ref, freeProcessPriv);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+/* Driver's release method for /dev/cm_control */
+static int cmld_control_release(struct inode *inode, struct file *file)
+{
+ struct cm_process_priv* procPriv = file->private_data;
+
+ kref_put(&procPriv->ref, freeProcessPriv);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static struct file_operations cmld_control_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cmld_control_ioctl,
+ .mmap = cmld_control_mmap,
+ .release = cmld_control_release,
+};
+
+static int cmld_control_open(struct file *file)
+{
+ struct cm_process_priv *procPriv = getProcessPriv();
+ if (IS_ERR(procPriv))
+ return PTR_ERR(procPriv);
+ file->private_data = procPriv;
+ file->f_op = &cmld_control_fops;
+ return 0;
+}
+
+static struct file_operations cmld_channel_fops = {
+ .owner = THIS_MODULE,
+ .read = cmld_channel_read,
+ .unlocked_ioctl = cmld_channel_ioctl,
+ .flush = cmld_channel_flush,
+ .release = cmld_channel_release,
+};
+
+static int cmld_channel_open(struct file *file)
+{
+ struct cm_process_priv *procPriv = getProcessPriv();
+ struct cm_channel_priv *channelPriv;
+
+ if (IS_ERR(procPriv))
+ return PTR_ERR(procPriv);
+
+ channelPriv = (struct cm_channel_priv*)OSAL_Alloc(sizeof(*channelPriv));
+ if (channelPriv == NULL) {
+ kref_put(&procPriv->ref, freeProcessPriv);
+ return -ENOMEM;
+ }
+
+ channelPriv->proc = procPriv;
+ channelPriv->state = CHANNEL_OPEN;
+
+ /* Initialize wait_queue, lists and mutexes */
+ init_waitqueue_head(&channelPriv->waitq);
+ plist_head_init(&channelPriv->messageQueue);
+ INIT_LIST_HEAD(&channelPriv->skelList);
+ spin_lock_init(&channelPriv->bh_lock);
+ mutex_init(&channelPriv->msgQueueLock);
+ mutex_init(&channelPriv->skelListLock);
+
+ tasklet_disable(&cmld_service_tasklet);
+ mutex_lock(&channel_lock);
+ list_add(&channelPriv->entry, &channel_list);
+ mutex_unlock(&channel_lock);
+ tasklet_enable(&cmld_service_tasklet);
+
+ file->private_data = channelPriv;
+ file->f_op = &cmld_channel_fops;
+ return 0;
+}
+
+static ssize_t cmld_sxa_trace_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ struct mpcConfig *mpc = file->private_data;
+ size_t written = 0;
+ struct t_nmf_trace trace;
+ t_cm_trace_type traceType;
+ struct mmdsp_trace mmdsp_tr = {
+ .media = TB_MEDIA_FILE,
+ .receiver_dev = TB_DEV_PC,
+ .sender_dev = TB_DEV_TRACEBOX,
+ .unused = TB_TRACEBOX,
+ .receiver_obj = DEFAULT_RECEIVERR_OBJ,
+ .sender_obj = DEFAULT_SENDER_OBJ,
+ .transaction_id = 0,
+ .message_id = TB_TRACE_MSG,
+ .master_id = mpc->coreId+1,
+ .channel_id = 0,
+ .ost_version = OST_VERSION,
+ .entity = ENTITY,
+ .protocol_id = PROTOCOL_ID,
+ .btrace_hdr_flag = 0,
+ .btrace_hdr_subcategory = 0,
+ };
+
+ while ((count - written) >= sizeof(mmdsp_tr)) {
+ traceType = CM_ENGINE_GetNextTrace(mpc->coreId, &trace);
+
+ switch (traceType) {
+ case CM_MPC_TRACE_READ_OVERRUN:
+ mmdsp_tr.size =
+ cpu_to_be16(offsetof(struct mmdsp_trace,
+ ost_version)
+ -offsetof(struct mmdsp_trace,
+ receiver_obj));
+ mmdsp_tr.message_id = TB_TRACE_EXCEPTION_MSG;
+ mmdsp_tr.ost_master_id = TB_EXCEPTION_LONG_OVRF_PACKET;
+ if (copy_to_user(&buf[written], &mmdsp_tr,
+ offsetof(struct mmdsp_trace,
+ ost_version)))
+ return -EFAULT;
+ written += offsetof(struct mmdsp_trace, ost_version);
+ if ((count - written) < sizeof(mmdsp_tr))
+ break;
+ case CM_MPC_TRACE_READ: {
+ u16 param_nr = (u16)trace.paramOpt;
+ u16 handle_valid = (u16)(trace.paramOpt >> 16);
+ u32 to_write = offsetof(struct mmdsp_trace,
+ parent_handle);
+ mmdsp_tr.transaction_id = trace.revision%256;
+ mmdsp_tr.message_id = TB_TRACE_MSG;
+ mmdsp_tr.ost_master_id = OST_MASTERID;
+ mmdsp_tr.timestamp = cpu_to_be64(trace.timeStamp);
+ mmdsp_tr.timestamp2 = cpu_to_be64(trace.timeStamp);
+ mmdsp_tr.component_id = cpu_to_be32(trace.componentId);
+ mmdsp_tr.trace_id = cpu_to_be32(trace.traceId);
+ mmdsp_tr.btrace_hdr_category = (trace.traceId>>16)&0xFF;
+ mmdsp_tr.btrace_hdr_size = BTRACE_HEADER_SIZE
+ + sizeof(trace.params[0]) * param_nr;
+ if (handle_valid) {
+ mmdsp_tr.parent_handle = trace.parentHandle;
+ mmdsp_tr.component_handle =
+ trace.componentHandle;
+ to_write += sizeof(trace.parentHandle)
+ + sizeof(trace.componentHandle);
+ mmdsp_tr.btrace_hdr_size +=
+ sizeof(trace.parentHandle)
+ + sizeof(trace.componentHandle);
+ }
+ mmdsp_tr.size =
+ cpu_to_be16(to_write
+ + (sizeof(trace.params[0])*param_nr)
+ - offsetof(struct mmdsp_trace,
+ receiver_obj));
+ mmdsp_tr.length = to_write
+ + (sizeof(trace.params[0])*param_nr)
+ - offsetof(struct mmdsp_trace,
+ timestamp2);
+ if (copy_to_user(&buf[written], &mmdsp_tr, to_write))
+ return -EFAULT;
+ written += to_write;
+ /* write param */
+ to_write = sizeof(trace.params[0]) * param_nr;
+ if (copy_to_user(&buf[written], trace.params, to_write))
+ return -EFAULT;
+ written += to_write;
+ break;
+ }
+ case CM_MPC_TRACE_NONE:
+ default:
+ if ((file->f_flags & O_NONBLOCK) || written)
+ return written;
+ spin_lock_bh(&mpc->trace_reader_lock);
+ mpc->trace_reader = current;
+ spin_unlock_bh(&mpc->trace_reader_lock);
+ schedule_timeout_killable(msecs_to_jiffies(200));
+ spin_lock_bh(&mpc->trace_reader_lock);
+ mpc->trace_reader = NULL;
+ spin_unlock_bh(&mpc->trace_reader_lock);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ }
+ }
+ return written;
+}
+
+/* Driver's release method for /dev/cm_sxa_trace */
+static int cmld_sxa_trace_release(struct inode *inode, struct file *file)
+{
+ struct mpcConfig *mpc = file->private_data;
+ atomic_dec(&mpc->trace_read_count);
+ return 0;
+}
+
+static struct file_operations cmld_sxa_trace_fops = {
+ .owner = THIS_MODULE,
+ .read = cmld_sxa_trace_read,
+ .release = cmld_sxa_trace_release,
+};
+
+static int cmld_sxa_trace_open(struct file *file, struct mpcConfig *mpc)
+{
+ if (atomic_add_unless(&mpc->trace_read_count, 1, 1) == 0)
+ return -EBUSY;
+
+ file->private_data = mpc;
+ file->f_op = &cmld_sxa_trace_fops;
+ return 0;
+}
+
+/* driver open() call: specific */
+static int cmld_open(struct inode *inode, struct file *file)
+{
+ switch (iminor(inode)) {
+ case 0:
+ return cmld_control_open(file);
+ case 1:
+ return cmld_channel_open(file);
+ case 2:
+ return cmld_sxa_trace_open(file, &osalEnv.mpc[SIA]);
+ case 3:
+ return cmld_sxa_trace_open(file, &osalEnv.mpc[SVA]);
+ default:
+ return -ENOSYS;
+ }
+}
+
+/** MPC Events tasklet
+ * The parameter is used to know from which interrupts we're comming
+ * and which core to pass to CM_ProcessMpcEvent():
+ * 0 means HSEM => ARM_CORE_ID
+ * otherwise, it gives the index+1 of MPC within osalEnv.mpc table
+ */
+static void mpc_events_tasklet_handler(unsigned long core)
+{
+ /* This serves internal events directly. No propagation to user space.
+ * Calls OSAL_PostDfc implementation for user interface events */
+ if (core == 0) {
+ CM_ProcessMpcEvent(ARM_CORE_ID);
+ enable_irq(IRQ_DB8500_HSEM);
+ } else {
+ --core;
+ CM_ProcessMpcEvent(osalEnv.mpc[core].coreId);
+ enable_irq(osalEnv.mpc[core].interrupt0);
+ }
+}
+
+/** Hardware semaphore and MPC interrupt handler
+ * 'data' param is the one given when registering the IRQ hanlder,
+ * contains the source core (ARM or MPC), and follows the same logic
+ * as for mpc_events_tasklet_handler()
+ * This handler is used for all IRQ handling some com (ie HSEM or
+ * all MPC IRQ line0)
+ */
+static irqreturn_t mpc_events_irq_handler(int irq, void *data)
+{
+ unsigned core = (unsigned)data;
+
+ if (core != 0)
+ --core;
+ disable_irq_nosync(irq);
+ tasklet_schedule(&osalEnv.mpc[core].tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/** MPC panic handler
+ * 'idx' contains the index of the core within the osalEnv.mpc table.
+ * This handler is used for all MPC IRQ line1
+ */
+static irqreturn_t panic_handler(int irq, void *idx)
+{
+ set_bit((int)idx, &service_tasklet_data);
+ disable_irq_nosync(irq);
+ tasklet_schedule(&cmld_service_tasklet);
+ return IRQ_HANDLED;
+}
+
+/** Driver's operations
+ */
+static struct file_operations cmld_fops = {
+ .owner = THIS_MODULE,
+ .open = cmld_open,
+};
+
+/**
+ * Configure a MPC, called for each MPC to configure
+ *
+ * \param i index of the MPC to configure (refer to the index
+ * of the MPC within the osalEnvironment.mpc table)
+ * \param dataAllocId allocId of the data segment, passed through each call of
+ * this function, and initialized at the first call in case
+ * shared data segment
+ */
+static int configureMpc(unsigned i, t_cfg_allocator_id *dataAllocId)
+{
+ int err;
+ t_cm_system_address mpcSystemAddress;
+ t_nmf_memory_segment codeSegment, dataSegment;
+ t_cfg_allocator_id codeAllocId;
+ t_cm_domain_id eeDomainId;
+ t_cm_domain_memory eeDomain = INIT_DOMAIN;
+ char regulator_name[14];
+
+ getMpcSystemAddress(i, &mpcSystemAddress);
+ getMpcSdramSegments(i, &codeSegment, &dataSegment);
+
+ /* Create code segment */
+ err = CM_ENGINE_AddMpcSdramSegment(&codeSegment, &codeAllocId, "Code");
+ if (err != CM_OK) {
+ pr_err("CM_ENGINE_AddMpcSdramSegment() error code: %d\n", err);
+ return -EAGAIN;
+ }
+
+ /* Create data segment
+ * NOTE: in case of shared data segment, all MPC point to the same data segment
+ * (see in remapRegions()) and we need to create the segment only at first call.
+ * => we reuse the same allocId for the following MPCs
+ */
+ if ((osalEnv.mpc[i].sdram_data.data != osalEnv.mpc[0].sdram_data.data)
+ || *dataAllocId == -1) {
+ err = CM_ENGINE_AddMpcSdramSegment(&dataSegment, dataAllocId, "Data");
+ if (err != CM_OK) {
+ pr_err("CM_ENGINE_AddMpcSdramSegment() error code: %d\n", err);
+ return -EAGAIN;
+ }
+ }
+
+ /* create default domain for the given coreId
+ * this serves for instanciating EE and the LoadMap, only sdram segment is present
+ * this domain will probably overlap with other user domains
+ */
+ eeDomain.coreId = osalEnv.mpc[i].coreId;
+ eeDomain.sdramCode.offset = 0x0;
+ eeDomain.sdramData.offset = 0x0;
+ eeDomain.sdramCode.size = 0x8000;
+ eeDomain.sdramData.size = 0x40000;
+ eeDomain.esramCode.size = 0x4000;
+ eeDomain.esramData.size = 0x40000;
+ err = CM_ENGINE_CreateMemoryDomain(NMF_CORE_CLIENT, &eeDomain, &eeDomainId);
+ if (err != CM_OK) {
+ pr_err("Create EE domain on %s failed with error code: %d\n", osalEnv.mpc[i].name, err);
+ return -EAGAIN;
+ }
+
+ err = CM_ENGINE_ConfigureMediaProcessorCore(
+ osalEnv.mpc[i].coreId,
+ osalEnv.mpc[i].eeId,
+ (cfgSemaphoreTypeHSEM ? SYSTEM_SEMAPHORES : LOCAL_SEMAPHORES),
+ osalEnv.mpc[i].nbYramBanks,
+ &mpcSystemAddress,
+ eeDomainId,
+ codeAllocId,
+ *dataAllocId);
+
+ if (err != CM_OK) {
+ pr_err("CM_ConfigureMediaProcessorCore failed with error code: %d\n", err);
+ return -EAGAIN;
+ }
+
+ // Communication channel
+ if (! cfgSemaphoreTypeHSEM) {
+ tasklet_init(&osalEnv.mpc[i].tasklet, mpc_events_tasklet_handler, i+1);
+ err = request_irq(osalEnv.mpc[i].interrupt0, mpc_events_irq_handler, IRQF_DISABLED, osalEnv.mpc[i].name, (void*)(i+1));
+ if (err != 0) {
+ pr_err("CM: request_irq failed to register irq0 %i for %s (%i)\n", osalEnv.mpc[i].interrupt0, osalEnv.mpc[i].name, err);
+ return err;
+ }
+ }
+
+ // Panic channel
+ err = request_irq(osalEnv.mpc[i].interrupt1, panic_handler, IRQF_DISABLED, osalEnv.mpc[i].name, (void*)i);
+ if (err != 0) {
+ pr_err("CM: request_irq failed to register irq1 %i for %s (%i)\n", osalEnv.mpc[i].interrupt1, osalEnv.mpc[i].name, err);
+ free_irq(osalEnv.mpc[i].interrupt0, (void*)(i+1));
+ return err;
+ }
+
+ // Retrieve the regulators used for this MPCs
+ sprintf(regulator_name, "%s-mmdsp", osalEnv.mpc[i].name);
+ osalEnv.mpc[i].mmdsp_regulator = regulator_get(cmld_dev[0], regulator_name);
+ if (IS_ERR(osalEnv.mpc[i].mmdsp_regulator)) {
+ long err = PTR_ERR(osalEnv.mpc[i].mmdsp_regulator);
+ pr_err("CM: Error while retrieving the regulator %s: %ld\n", regulator_name, err);
+ osalEnv.mpc[i].mmdsp_regulator = NULL;
+ return err;
+ }
+ sprintf(regulator_name, "%s-pipe", osalEnv.mpc[i].name);
+ osalEnv.mpc[i].pipe_regulator = regulator_get(cmld_dev[0], regulator_name);
+ if (IS_ERR(osalEnv.mpc[i].pipe_regulator)) {
+ long err = PTR_ERR(osalEnv.mpc[i].pipe_regulator);
+ pr_err("CM: Error while retrieving the regulator %s: %ld\n", regulator_name, err);
+ osalEnv.mpc[i].pipe_regulator = NULL;
+ return err;
+ }
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&osalEnv.mpc[i].wakelock, WAKE_LOCK_SUSPEND, osalEnv.mpc[i].name);
+#endif
+ return 0;
+}
+
+/* Free all used MPC irqs and clocks.
+ * max_mpc allows it to be called from init_module and free
+ * only the already configured irqs.
+ */
+static void free_mpc_irqs(int max_mpc)
+{
+ int i;
+ for (i=0; i<max_mpc; i++) {
+ if (! cfgSemaphoreTypeHSEM)
+ free_irq(osalEnv.mpc[i].interrupt0, (void*)(i+1));
+ free_irq(osalEnv.mpc[i].interrupt1, (void*)i);
+ if (osalEnv.mpc[i].mmdsp_regulator)
+ regulator_put(osalEnv.mpc[i].mmdsp_regulator);
+ if (osalEnv.mpc[i].pipe_regulator)
+ regulator_put(osalEnv.mpc[i].pipe_regulator);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&osalEnv.mpc[i].wakelock);
+#endif
+ }
+}
+
+/** Module entry point
+ * Allocate memory chunks. Register hardware semaphore, SIA and SVA interrupts.
+ * Initialize Component Manager. Register hotplug for components download.
+ *
+ * \return POSIX error code
+ */
+static int __init cmld_init_module(void)
+{
+ int err;
+ unsigned i=0;
+ dev_t dev;
+ t_cfg_allocator_id dataAllocId = -1;
+ void *htim_base=NULL;
+
+ /* Component manager initialization descriptors */
+ t_nmf_hw_mapping_desc nmfHwMappingDesc;
+ t_nmf_config_desc nmfConfigDesc = { cfgCommunicationLocationInSDRAM ? COMS_IN_SDRAM : COMS_IN_ESRAM };
+
+ /* OSAL_*Resources() assumes the following, so check that it is correct */
+ if (SVA != COREIDX((int)SVA_CORE_ID)) {
+ pr_err("SVA and (SVA_CORE_ID-1) differs : code must be fixed !\n");
+ return -EIO;
+ }
+ if (SIA != COREIDX((int)SIA_CORE_ID)) {
+ pr_err("SIA and (SIA_CORE_ID-1) differs : code must be fixed !\n");
+ return -EIO;
+ }
+
+#ifdef CM_DEBUG_ALLOC
+ init_debug_alloc();
+#endif
+
+ err = -EIO;
+ prcmu_base = __io_address(U8500_PRCMU_BASE);
+
+ /* power on a clock/timer 90KHz used on SVA */
+ htim_base = ioremap_nocache(U8500_CR_BASE /*0xA03C8000*/, SZ_4K);
+ prcmu_tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+
+ /* Activate SVA 90 KHz timer */
+ if (htim_base == NULL)
+ goto out;
+ iowrite32((1<<26) | ioread32(htim_base), htim_base);
+ iounmap(htim_base);
+
+ /*i = ioread32(PRCM_SVAMMDSPCLK_MGT) & 0xFF;
+ if (i != 0x22)
+ pr_alert("CM: Looks like SVA is not clocked at 200MHz (PRCM_SVAMMDSPCLK_MGT=%x)\n", i);
+ i = ioread32(PRCM_SIAMMDSPCLK_MGT) & 0xFF;
+ if (i != 0x22)
+ pr_alert("CM: Looks like SIA is not clocked at 200MHz (PRCM_SIAMMDSPCLK_MGT=%x)\n", i);
+
+ i = 0;*/
+ err = init_config();
+ if (err)
+ goto out;
+
+ /* Remap all needed regions and store in osalEnv base addresses */
+ err = remapRegions();
+ if (err != 0)
+ goto out;
+
+ /* Initialize linux devices */
+ err = class_register(&cmld_class);
+ if (err) {
+ pr_err("CM: class_register failed (%d)\n", err);
+ goto out;
+ }
+
+ /* Register char device */
+ err = alloc_chrdev_region(&dev, 0, ARRAY_SIZE(cmld_devname), "cm");
+ if (err) {
+ pr_err("CM: alloc_chrdev_region failed (%d)\n", err);
+ goto out_destroy_class;
+ }
+ cmld_major = MAJOR(dev);
+
+ cdev_init(&cmld_cdev, &cmld_fops);
+ cmld_cdev.owner = THIS_MODULE;
+ err = cdev_add (&cmld_cdev, dev, ARRAY_SIZE(cmld_devname));
+ if (err) {
+ pr_err("CM: cdev_add failed (%d)\n", err);
+ goto out_destroy_chrdev;
+ }
+
+ for (i=0; i<ARRAY_SIZE(cmld_devname); i++) {
+ cmld_dev[i] = device_create(&cmld_class, NULL, MKDEV(cmld_major, i), NULL,
+ "%s", cmld_devname[i]);
+ if (IS_ERR(cmld_dev[i])) {
+ err = PTR_ERR(cmld_dev[i]);
+ pr_err("CM: device_create failed (%d)\n", err);
+ goto out_destroy_device;
+ }
+ }
+
+ osalEnv.esram_regulator[ESRAM_12] = regulator_get(cmld_dev[0], "esram12");
+ if (IS_ERR(osalEnv.esram_regulator[ESRAM_12])) {
+ err = PTR_ERR(osalEnv.esram_regulator[ESRAM_12]);
+ pr_err("CM: Error while retrieving the regulator for esram12: %d\n", err);
+ osalEnv.esram_regulator[ESRAM_12] = NULL;
+ goto out_destroy_device;
+ }
+ osalEnv.esram_regulator[ESRAM_34] = regulator_get(cmld_dev[0], "esram34");
+ if (IS_ERR(osalEnv.esram_regulator[ESRAM_34])) {
+ err = PTR_ERR(osalEnv.esram_regulator[ESRAM_34]);
+ pr_err("CM: Error while retrieving the regulator for esram34: %d\n", err);
+ osalEnv.esram_regulator[ESRAM_34] = NULL;
+ goto out_destroy_device;
+ }
+
+ /* Fill in the descriptors needed by CM_ENGINE_Init() */
+ getNmfHwMappingDesc(&nmfHwMappingDesc);
+
+ /* Initialize Component Manager */
+ err = CM_ENGINE_Init(&nmfHwMappingDesc, &nmfConfigDesc);
+ if (err != CM_OK) {
+ pr_err("CM: CM_Init failed with error code: %d\n", err);
+ err = -EAGAIN;
+ goto out_destroy_device;
+ } else {
+ pr_info("Initialize NMF %d.%d.%d Component Manager......\n",
+ VERSION_MAJOR(NMF_VERSION),
+ VERSION_MINOR(NMF_VERSION),
+ VERSION_PATCH(NMF_VERSION));
+ pr_info("[ CM Linux Driver %d.%d.%d ]\n",
+ VERSION_MAJOR(NMF_VERSION),
+ VERSION_MINOR(NMF_VERSION),
+ CMDRIVER_PATCH_VERSION);
+ }
+
+ cm_debug_init();
+ if (osal_debug_ops.domain_create) {
+ osal_debug_ops.domain_create(DEFAULT_SVA_DOMAIN);
+ osal_debug_ops.domain_create(DEFAULT_SIA_DOMAIN);
+ }
+
+ /* Configure MPC Cores */
+ for (i=0; i<NB_MPC; i++) {
+ err = configureMpc(i, &dataAllocId);
+ if (err)
+ goto out_all;
+ }
+ /* End of Component Manager initialization phase */
+
+
+ if (cfgSemaphoreTypeHSEM) {
+ /* We use tasklet of mpc[0]. See comments above osalEnvironnent struct */
+ tasklet_init(&osalEnv.mpc[0].tasklet, mpc_events_tasklet_handler, 0);
+ err = request_irq(IRQ_DB8500_HSEM, mpc_events_irq_handler, IRQF_DISABLED,
+ "hwsem", 0);
+ if (err) {
+ pr_err("CM: request_irq failed to register hwsem irq %i (%i)\n",
+ IRQ_DB8500_HSEM, err);
+ goto out_all;
+ }
+ }
+
+ err = cmdma_init();
+ if (err == 0)
+ return 0;
+
+out_all:
+ cm_debug_exit();
+ free_mpc_irqs(i);
+ CM_ENGINE_Destroy();
+ i=ARRAY_SIZE(cmld_devname);
+out_destroy_device:
+ if (osalEnv.esram_regulator[ESRAM_12])
+ regulator_put(osalEnv.esram_regulator[ESRAM_12]);
+ if (osalEnv.esram_regulator[ESRAM_34])
+ regulator_put(osalEnv.esram_regulator[ESRAM_34]);
+ while (i--)
+ device_destroy(&cmld_class, MKDEV(cmld_major, i));
+ cdev_del(&cmld_cdev);
+out_destroy_chrdev:
+ unregister_chrdev_region(dev, ARRAY_SIZE(cmld_devname));
+out_destroy_class:
+ class_unregister(&cmld_class);
+out:
+ unmapRegions();
+#ifdef CM_DEBUG_ALLOC
+ cleanup_debug_alloc();
+#endif
+ return err;
+}
+
+/** Module exit point
+ * Unregister the driver. This will lead to a 'remove' call.
+ */
+static void __exit cmld_cleanup_module(void)
+{
+ unsigned i;
+
+ if (!list_empty(&channel_list))
+ pr_err("CM Driver ending with non empty channel list\n");
+ if (!list_empty(&process_list))
+ pr_err("CM Driver ending with non empty process list\n");
+
+ if (cfgSemaphoreTypeHSEM)
+ free_irq(IRQ_DB8500_HSEM, NULL);
+ free_mpc_irqs(NB_MPC);
+ tasklet_kill(&cmld_service_tasklet);
+
+ if (osalEnv.esram_regulator[ESRAM_12])
+ regulator_put(osalEnv.esram_regulator[ESRAM_12]);
+ if (osalEnv.esram_regulator[ESRAM_34])
+ regulator_put(osalEnv.esram_regulator[ESRAM_34]);
+ for (i=0; i<ARRAY_SIZE(cmld_devname); i++)
+ device_destroy(&cmld_class, MKDEV(cmld_major, i));
+ cdev_del(&cmld_cdev);
+ unregister_chrdev_region(MKDEV(cmld_major, 0), ARRAY_SIZE(cmld_devname));
+ class_unregister(&cmld_class);
+
+ CM_ENGINE_Destroy();
+
+ cmdma_destroy();
+ unmapRegions();
+#ifdef CM_DEBUG_ALLOC
+ cleanup_debug_alloc();
+#endif
+ cm_debug_exit();
+}
+module_init(cmld_init_module);
+module_exit(cmld_cleanup_module);
+
+MODULE_AUTHOR("David Siorpaes");
+MODULE_AUTHOR("Wolfgang Betz");
+MODULE_AUTHOR("Pierre Peiffer");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Nomadik Multiprocessing Framework Component Manager Linux driver");
diff --git a/drivers/staging/nmf-cm/cmld.h b/drivers/staging/nmf-cm/cmld.h
new file mode 100644
index 00000000000..17e6c55ff61
--- /dev/null
+++ b/drivers/staging/nmf-cm/cmld.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef CMLD_H
+#define CMLD_H
+
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/wait.h>
+#include <inc/nmf-limits.h>
+#include "cmioctl.h"
+
+/** Channel state used within the per-channel private structure 'cm_channel_priv'
+ */
+enum channel_state {
+ CHANNEL_CLOSED = 0, /**< Channel already closed */
+ CHANNEL_OPEN, /**< Channel still open */
+};
+
+/** Component Manager per-process private structure
+ * It is created the first time a process opens /dev/cm0 or /dev/cm1
+ */
+struct cm_process_priv
+{
+ struct kref ref; /**< ref count */
+ struct list_head entry; /**< This entry */
+ pid_t pid; /**< pid of process owner */
+ struct mutex mutex; /**< per process mutex: protect memAreaDescList */
+ struct list_head memAreaDescList; /**< memAreaDesc_t list */
+ struct mutex host2mpcLock; /**< used to synchronize each AllocEvent + PushEvent */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir; /**< debugfs dir entry under nmf-cm/proc */
+ struct dentry *comp_dir; /**< debugfs dir entry under nmf-cm/proc/..%components */
+ struct dentry *domain_dir; /**< debugfs dir entry under nmf-cm/proc/..%domains */
+#endif
+};
+
+/** Component Manager per-channel private structure
+ * It is created when a user opens /dev/cm1
+ */
+struct cm_channel_priv
+{
+ enum channel_state state; /**< Channel state */
+ struct list_head entry; /**< This entry */
+ struct cm_process_priv *proc; /**< Pointer to the owner process structure */
+ struct list_head skelList; /**< t_skelwrapper list */
+ struct mutex skelListLock; /**< skelList mutex */
+ struct plist_head messageQueue; /**< queueelem_t list */
+ struct mutex msgQueueLock; /**< lock used to synchronize MPC to HOST bindings
+ in case of multiple read (see cmld_read comments) */
+ spinlock_t bh_lock; /**< lock used to synchronize add/removal of element in/from
+ the message queue in both user context and tasklet */
+ wait_queue_head_t waitq; /**< wait queue used to block read() call */
+};
+
+/** Memory area descriptor.
+ */
+struct memAreaDesc_t {
+ struct list_head list; /**< Doubly linked list descriptor */
+ atomic_t count; /**< Reference counter */
+ pid_t tid; /**< tid of the process this area is reserved for */
+ t_cm_memory_handle handle; /**< Component Manager handle */
+ unsigned int size; /**< Size */
+ unsigned int physAddr; /**< Physical address */
+ unsigned int kernelLogicalAddr; /**< Logical address as seen by kernel */
+ unsigned int userLogicalAddr; /**< Logical address as seen by user */
+ unsigned int mpcPhysAddr; /**< Physicaladdress as seen by MPC */
+ struct cm_process_priv* procPriv; /**< link to per process private structure */
+};
+
+extern struct list_head channel_list; /**< List of all allocated channel structures */
+extern struct list_head process_list; /**< List of all allocated process private structure */
+#ifdef CONFIG_DEBUG_FS
+extern bool cmld_user_has_debugfs; /**< Whether user side has proper support of debugfs to take a dump */
+extern bool cmld_dump_ongoing; /**< Whether a dump is on-going */
+#endif
+
+/* Structure used to embed DSP traces */
+#define TB_MEDIA_FILE 0x1C
+#define TB_DEV_PC 0x10
+#define TB_DEV_TRACEBOX 0x4C
+#define TB_TRACEBOX 0x7C
+#define DEFAULT_RECEIVERR_OBJ 0x0
+#define DEFAULT_SENDER_OBJ 0x0A
+#define TB_TRACE_MSG 0x94
+#define TB_TRACE_EXCEPTION_MSG 0x95
+#define TB_EXCEPTION_LONG_OVRF_PACKET 0x07
+#define OST_MASTERID 0x08
+#define OST_VERSION 0x05
+#define ENTITY 0xAA
+#define PROTOCOL_ID 0x03
+#define BTRACE_HEADER_SIZE 4
+
+struct __attribute__ ((__packed__)) mmdsp_trace {
+ u8 media;
+ u8 receiver_dev;
+ u8 sender_dev;
+ u8 unused;
+ u16 size;
+ u8 receiver_obj;
+ u8 sender_obj;
+ u8 transaction_id;
+ u8 message_id;
+ u8 master_id;
+ u8 channel_id;
+ u64 timestamp;
+ u8 ost_master_id;
+ u8 ost_version;
+ u8 entity;
+ u8 protocol_id;
+ u8 length;
+ u64 timestamp2;
+ u32 component_id;
+ u32 trace_id;
+ u8 btrace_hdr_size;
+ u8 btrace_hdr_flag;
+ u8 btrace_hdr_category;
+ u8 btrace_hdr_subcategory;
+ u32 parent_handle;
+ u32 component_handle;
+ u32 params[4];
+};
+
+/** Lock/unlock per process mutex
+ *
+ * \note Must be taken before tasklet_disable (if necessary)!
+ */
+#define lock_process_uninterruptible(proc) (mutex_lock(&proc->mutex))
+#define lock_process(proc) (mutex_lock_killable(&proc->mutex))
+#define unlock_process(proc) (mutex_unlock(&proc->mutex))
+
+
+
+int cmld_InstantiateComponent(struct cm_process_priv *, CM_InstantiateComponent_t __user *);
+int cmld_BindComponentFromCMCore(struct cm_process_priv *,
+ CM_BindComponentFromCMCore_t __user *);
+int cmld_UnbindComponentFromCMCore(CM_UnbindComponentFromCMCore_t __user *);
+int cmld_BindComponentToCMCore(struct cm_channel_priv *, CM_BindComponentToCMCore_t __user *);
+int cmld_UnbindComponentToCMCore(struct cm_process_priv*, CM_UnbindComponentToCMCore_t __user *);
+int cmld_BindComponentAsynchronous(struct cm_process_priv*, CM_BindComponentAsynchronous_t __user *);
+int cmld_UnbindComponentAsynchronous(struct cm_process_priv*, CM_UnbindComponentAsynchronous_t __user *);
+int cmld_BindComponent(struct cm_process_priv*, CM_BindComponent_t __user *);
+int cmld_UnbindComponent(struct cm_process_priv*, CM_UnbindComponent_t __user *);
+int cmld_BindComponentToVoid(struct cm_process_priv*, CM_BindComponentToVoid_t __user *);
+int cmld_DestroyComponent(struct cm_process_priv*, CM_DestroyComponent_t __user *);
+int cmld_CreateMemoryDomain(struct cm_process_priv*, CM_CreateMemoryDomain_t __user *);
+int cmld_CreateMemoryDomainScratch(struct cm_process_priv*, CM_CreateMemoryDomainScratch_t __user *);
+int cmld_DestroyMemoryDomain(CM_DestroyMemoryDomain_t __user *);
+int cmld_GetDomainCoreId(CM_GetDomainCoreId_t __user *);
+int cmld_AllocMpcMemory(struct cm_process_priv *, CM_AllocMpcMemory_t __user *);
+int cmld_FreeMpcMemory(struct cm_process_priv *, CM_FreeMpcMemory_t __user *);
+int cmld_GetMpcMemoryStatus(CM_GetMpcMemoryStatus_t __user *);
+int cmld_StartComponent(struct cm_process_priv *, CM_StartComponent_t __user *);
+int cmld_StopComponent(struct cm_process_priv *, CM_StopComponent_t __user *);
+int cmld_GetMpcLoadCounter(CM_GetMpcLoadCounter_t __user *);
+int cmld_GetComponentDescription(struct cm_process_priv *, CM_GetComponentDescription_t __user *);
+int cmld_GetComponentListHeader(struct cm_process_priv *, CM_GetComponentListHeader_t __user *);
+int cmld_GetComponentListNext(struct cm_process_priv *, CM_GetComponentListNext_t __user *);
+int cmld_GetComponentRequiredInterfaceNumber(struct cm_process_priv *,
+ CM_GetComponentRequiredInterfaceNumber_t __user *);
+int cmld_GetComponentRequiredInterface(struct cm_process_priv *,
+ CM_GetComponentRequiredInterface_t __user *);
+int cmld_GetComponentRequiredInterfaceBinding(struct cm_process_priv *,
+ CM_GetComponentRequiredInterfaceBinding_t __user *);
+int cmld_GetComponentProvidedInterfaceNumber(struct cm_process_priv *,
+ CM_GetComponentProvidedInterfaceNumber_t __user *);
+int cmld_GetComponentProvidedInterface(struct cm_process_priv *,
+ CM_GetComponentProvidedInterface_t __user *);
+int cmld_GetComponentPropertyNumber(struct cm_process_priv *,
+ CM_GetComponentPropertyNumber_t __user *);
+int cmld_GetComponentPropertyName(struct cm_process_priv *, CM_GetComponentPropertyName_t __user *);
+int cmld_GetComponentPropertyValue(struct cm_process_priv *, CM_GetComponentPropertyValue_t __user *);
+int cmld_ReadComponentAttribute(struct cm_process_priv *, CM_ReadComponentAttribute_t __user *);
+int cmld_GetExecutiveEngineHandle(struct cm_process_priv *, CM_GetExecutiveEngineHandle_t __user *);
+int cmld_SetMode(CM_SetMode_t __user *);
+int cmld_GetRequiredComponentFiles(struct cm_process_priv *cmPriv,
+ CM_GetRequiredComponentFiles_t __user *);
+int cmld_Migrate(CM_Migrate_t __user *);
+int cmld_Unmigrate(CM_Unmigrate_t __user *);
+int cmld_SetupRelinkArea(struct cm_process_priv *, CM_SetupRelinkArea_t __user *);
+int cmld_PushComponent(CM_PushComponent_t __user *);
+int cmld_ReleaseComponent(CM_ReleaseComponent_t __user *);
+int cmld_PrivGetMPCMemoryDesc(struct cm_process_priv *, CM_PrivGetMPCMemoryDesc_t __user *);
+int cmld_PrivReserveMemory(struct cm_process_priv *, unsigned int);
+#endif
diff --git a/drivers/staging/nmf-cm/configuration.c b/drivers/staging/nmf-cm/configuration.c
new file mode 100644
index 00000000000..523874fc586
--- /dev/null
+++ b/drivers/staging/nmf-cm/configuration.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/** \file configuration.c
+ *
+ * Nomadik Multiprocessing Framework Linux Driver
+ *
+ */
+
+#include <linux/module.h>
+#include <cm/engine/api/configuration_engine.h>
+#include <cm/engine/configuration/inc/configuration_status.h>
+#include <cm/engine/power_mgt/inc/power.h>
+#include "osal-kernel.h"
+
+/* Per-driver environment */
+struct OsalEnvironment osalEnv =
+{
+ .mpc = {
+ {
+ .coreId = SVA_CORE_ID,
+ .name = "sva",
+ .base_phys = (void*)U8500_SVA_BASE,
+ .interrupt0 = IRQ_DB8500_SVA,
+ .interrupt1 = IRQ_DB8500_SVA2,
+ .mmdsp_regulator = NULL,
+ .pipe_regulator = NULL,
+ .monitor_tsk = NULL,
+ .hwmem_code = NULL,
+ .hwmem_data = NULL,
+ .trace_read_count = ATOMIC_INIT(0),
+ },
+ {
+ .coreId = SIA_CORE_ID,
+ .name = "sia",
+ .base_phys = (void*)U8500_SIA_BASE,
+ .interrupt0 = IRQ_DB8500_SIA,
+ .interrupt1 = IRQ_DB8500_SIA2,
+ .mmdsp_regulator = NULL,
+ .pipe_regulator = NULL,
+ .monitor_tsk = NULL,
+ .hwmem_code = NULL,
+ .hwmem_data = NULL,
+ .trace_read_count = ATOMIC_INIT(0),
+ }
+ },
+ .esram_regulator = { NULL, NULL},
+ .dsp_sleep = {
+ .sia_auto_pm_enable = PRCMU_AUTO_PM_OFF,
+ .sia_power_on = 0,
+ .sia_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
+ .sva_auto_pm_enable = PRCMU_AUTO_PM_OFF,
+ .sva_power_on = 0,
+ .sva_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
+ },
+ .dsp_idle = {
+ .sia_auto_pm_enable = PRCMU_AUTO_PM_OFF,
+ .sia_power_on = 0,
+ .sia_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
+ .sva_auto_pm_enable = PRCMU_AUTO_PM_OFF,
+ .sva_power_on = 0,
+ .sva_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
+ },
+};
+
+module_param_call(cm_debug_level, param_set_int, param_get_int,
+ &cm_debug_level, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(cm_debug_level, "Debug level of NMF Core");
+
+module_param_call(cm_error_break, param_set_bool, param_get_bool,
+ &cm_error_break, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(cm_error_break, "Stop on error (in an infinite loop, for debugging purpose)");
+
+module_param_call(cmIntensiveCheckState, param_set_bool, param_get_bool,
+ &cmIntensiveCheckState, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(cmIntensiveCheckState, "Add additional intensive checks");
+
+DECLARE_MPC_PARAM(SVA, SDRAM_DATA_SIZE, "", 1);
+
+DECLARE_MPC_PARAM(SIA, 0, "\n\t\t\t(0 means shared with SVA)", 2);
+
+bool cfgCommunicationLocationInSDRAM = true;
+module_param(cfgCommunicationLocationInSDRAM, bool, S_IRUGO);
+MODULE_PARM_DESC(cfgCommunicationLocationInSDRAM, "Location of communications (SDRAM or ESRAM)");
+
+bool cfgSemaphoreTypeHSEM = true;
+module_param(cfgSemaphoreTypeHSEM, bool, S_IRUGO);
+MODULE_PARM_DESC(cfgSemaphoreTypeHSEM, "Semaphore used (HSEM or LSEM)");
+
+int cfgESRAMSize = ESRAM_SIZE;
+module_param(cfgESRAMSize, uint, S_IRUGO);
+MODULE_PARM_DESC(cfgESRAMSize, "Size of ESRAM used in the CM (in Kb)");
+
+static int set_param_powerMode(const char *val, const struct kernel_param *kp)
+{
+ /* No equals means "set"... */
+ if (!val) val = "1";
+
+ /* One of =[yYnN01] */
+ switch (val[0]) {
+ case 'y': case 'Y': case '1':
+ CM_ENGINE_SetMode(CM_CMD_DBG_MODE, 0);
+ break;
+ case 'n': case 'N': case '0':
+ CM_ENGINE_SetMode(CM_CMD_DBG_MODE, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+module_param_call(powerMode, set_param_powerMode, param_get_bool, &powerMode, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(powerMode, "DSP power mode enable");
+
+int init_config(void)
+{
+ if (cfgMpcSDRAMCodeSize_SVA == 0 || cfgMpcSDRAMCodeSize_SIA == 0) {
+ pr_err("SDRAM code size must be greater than 0\n");
+ return -EINVAL;
+ }
+
+ if (cfgMpcSDRAMDataSize_SVA == 0) {
+ pr_err("SDRAM data size for SVA must be greater than 0\n");
+ return -EINVAL;
+ }
+
+ osalEnv.mpc[SVA].nbYramBanks = cfgMpcYBanks_SVA;
+ osalEnv.mpc[SVA].eeId = cfgSchedulerTypeHybrid_SVA ? HYBRID_EXECUTIVE_ENGINE : SYNCHRONOUS_EXECUTIVE_ENGINE;
+ osalEnv.mpc[SVA].sdram_code.size = cfgMpcSDRAMCodeSize_SVA * ONE_KB;
+ osalEnv.mpc[SVA].sdram_data.size = cfgMpcSDRAMDataSize_SVA * ONE_KB;
+ osalEnv.mpc[SVA].base.size = 128*ONE_KB; //we expose only TCM24
+ spin_lock_init(&osalEnv.mpc[SVA].trace_reader_lock);
+
+ osalEnv.mpc[SIA].nbYramBanks = cfgMpcYBanks_SIA;
+ osalEnv.mpc[SIA].eeId = cfgSchedulerTypeHybrid_SIA ? HYBRID_EXECUTIVE_ENGINE : SYNCHRONOUS_EXECUTIVE_ENGINE;
+ osalEnv.mpc[SIA].sdram_code.size = cfgMpcSDRAMCodeSize_SIA * ONE_KB;
+ osalEnv.mpc[SIA].sdram_data.size = cfgMpcSDRAMDataSize_SIA * ONE_KB;
+ osalEnv.mpc[SIA].base.size = 128*ONE_KB; //we expose only TCM24
+ spin_lock_init(&osalEnv.mpc[SIA].trace_reader_lock);
+
+ return 0;
+}
diff --git a/drivers/staging/nmf-cm/configuration.h b/drivers/staging/nmf-cm/configuration.h
new file mode 100644
index 00000000000..39416cde686
--- /dev/null
+++ b/drivers/staging/nmf-cm/configuration.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef CONFIGURATION_H
+#define CONFIGURATION_H
+
+/** Peripherals description.
+ * Some of these values are taken from kernel header description (which should be the
+ * right place of these definition); the missing ones are defined here.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+
+/* Embedded Static RAM base address */
+/* config: 0-64k: secure */
+#define ESRAM_BASE (U8500_ESRAM_BASE + U8500_ESRAM_DMA_LCPA_OFFSET)
+
+/*
+ * Embedded ram size for CM (in Kb)
+ * 5 banks of 128k: skip the first half bank (secure) and the last
+ * one (used for MCDE/B2R2), but include DMA part (4k after the secure part)
+ * to give access from DSP side
+ */
+#define ESRAM_SIZE 448
+enum {
+ ESRAM_12,
+ ESRAM_34,
+ NB_ESRAM,
+};
+
+/** MPCs */
+enum {
+ SVA,
+ SIA,
+ NB_MPC,
+};
+#define COREIDX(id) (id-1)
+
+/** Base address of shared SDRAM: use upper SDRAM. We reserve a rather */
+#define SDRAM_CODE_SIZE_SVA (2*ONE_KB)
+#define SDRAM_CODE_SIZE_SIA (2*ONE_KB)
+#define SDRAM_DATA_SIZE (8*ONE_KB)
+
+extern bool cfgCommunicationLocationInSDRAM;
+extern bool cfgSemaphoreTypeHSEM;
+extern int cfgESRAMSize;
+
+int init_config(void);
+
+#define DECLARE_MPC_PARAM(mpc, sdramDataSize, extension, ybank) \
+ static unsigned int cfgMpcYBanks_##mpc = ybank; \
+ module_param(cfgMpcYBanks_##mpc, uint, S_IRUGO); \
+ MODULE_PARM_DESC(cfgMpcYBanks_##mpc, "Nb of Y-Ram banks used on " #mpc); \
+ \
+ static bool cfgSchedulerTypeHybrid_##mpc = 1; \
+ module_param(cfgSchedulerTypeHybrid_##mpc, bool, S_IRUGO); \
+ MODULE_PARM_DESC(cfgSchedulerTypeHybrid_##mpc, "Scheduler used on " #mpc " (Hybrid or Synchronous)"); \
+ \
+ static unsigned int cfgMpcSDRAMCodeSize_##mpc = SDRAM_CODE_SIZE_##mpc; \
+ module_param(cfgMpcSDRAMCodeSize_##mpc, uint, S_IRUGO); \
+ MODULE_PARM_DESC(cfgMpcSDRAMCodeSize_##mpc, "Size of code segment on " #mpc " (in Kb)"); \
+ \
+ static unsigned int cfgMpcSDRAMDataSize_##mpc = sdramDataSize; \
+ module_param(cfgMpcSDRAMDataSize_##mpc, uint, S_IRUGO); \
+ MODULE_PARM_DESC(cfgMpcSDRAMDataSize_##mpc, "Size of data segment on " #mpc " (in Kb)" extension)
+
+#endif
diff --git a/drivers/staging/nmf-cm/ee/api/panic.idt b/drivers/staging/nmf-cm/ee/api/panic.idt
new file mode 100644
index 00000000000..71996b8a55e
--- /dev/null
+++ b/drivers/staging/nmf-cm/ee/api/panic.idt
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \defgroup NMF_EE_TYPE Execution Engine Common Type Definitions
+ * \ingroup COMMON
+ */
+
+#ifndef __INC_PANIC_IDT
+#define __INC_PANIC_IDT
+
+/*!
+ * \brief Panic reason type
+ *
+ * For values, see \ref t_panic_reasonDescription.
+ *
+ * \ingroup NMF_EE_TYPE
+ */
+typedef t_uint8 t_panic_reason;
+
+/*!
+ * \brief The different panic reasons
+ *
+ * \verbatim
+ * Reason | Information | Behavior
+ * -------------------------------------------------------------------
+ * INTERNAL_PANIC | Not interpreted | Fatal panic, stop MPC
+ * MPC_NOT_RESPONDING_PANIC | Not interpreted | Fatal panic, stop MPC
+ * USER_STACK_OVERFLOW | Faulting address & SPu | Fatal panic, stop MPC
+ * SYSTEM_STACK_OVERFLOW | Faulting address & SPu | Fatal panic, stop MPC
+ * UNALIGNED_LONG_ACCESS | Indicative Faulting address & SPu | Fatal panic, stop MPC
+ * EVENT_FIFO_OVERFLOW | 0 | Abort current task, stop MPC
+ * PARAM_FIFO_OVERFLOW | 0 | idem
+ * INTERFACE_NOT_BINDED | 0 | idem
+ * USER_PANIC | Not interpreted | idem
+ * UNBIND_INTERRUPT | Interrupt number | Do nothing, just return from interrupt.
+ * EVENT_FIFO_IN_USE | Destroy event Fifo while event already schedule (only for HostEE)
+ * \endverbatim
+ *
+ * \ingroup NMF_EE_TYPE
+ */
+typedef enum {
+ INTERNAL_PANIC = 1,
+ MPC_NOT_RESPONDING_PANIC = 2,
+ USER_STACK_OVERFLOW = 3,
+ SYSTEM_STACK_OVERFLOW = 4,
+ UNALIGNED_LONG_ACCESS = 5,
+ EVENT_FIFO_OVERFLOW = 6,
+ PARAM_FIFO_OVERFLOW = 7,
+ INTERFACE_NOT_BINDED = 8,
+ USER_PANIC = 9,
+ UNBIND_INTERRUPT = 10,
+ EVENT_FIFO_IN_USE = 11,
+ RESERVED_PANIC = 2 //for COMPATIBILITY with previous versions of NMF, to be deprecated
+} t_panic_reasonDescription;
+
+/*!
+ * \brief Define the source of the panic
+ *
+ * It indicates the source core of the panic message.\n
+ * It gives the member to use within \ref t_nmf_panic_data (which is a member of the t_nmf_service_data service data structure).
+
+ * \ingroup NMF_EE_TYPE
+ */
+typedef enum {
+ HOST_EE, //!< If the source is the Executive Engine running on the ARM Core
+ MPC_EE //!< If the source is the Executive Engine running on one of the MPC Core
+} t_panic_source;
+
+#endif
diff --git a/drivers/staging/nmf-cm/ee/api/trace.idt b/drivers/staging/nmf-cm/ee/api/trace.idt
new file mode 100644
index 00000000000..f4d4c8615e2
--- /dev/null
+++ b/drivers/staging/nmf-cm/ee/api/trace.idt
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \defgroup NMF_EE_TYPE Execution Engine Common Type Definitions
+ * \ingroup COMMON
+ */
+
+#ifndef __INC_TRACE_IDT
+#define __INC_TRACE_IDT
+
+struct t_nmf_trace
+{
+ t_uint32 revision;
+ t_uint32 timeStamp;
+ t_uint32 componentId;
+ t_uint32 traceId;
+ t_uint32 paramOpt;
+ t_uint32 componentHandle;
+ t_uint32 parentHandle;
+ t_uint32 params[4];
+};
+
+#define TRACE_BUFFER_SIZE 128
+
+#endif
diff --git a/drivers/staging/nmf-cm/inc/nmf-def.h b/drivers/staging/nmf-cm/inc/nmf-def.h
new file mode 100644
index 00000000000..7cdea18996b
--- /dev/null
+++ b/drivers/staging/nmf-cm/inc/nmf-def.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+ /*!
+ * \brief NMF Version.
+ *
+ * This file contains the NMF Version.
+ *
+ * \defgroup NMF_VERSION NMF Version
+ * \ingroup COMMON
+ */
+
+#ifndef __INC_NMF_DEF_H
+#define __INC_NMF_DEF_H
+
+/*!
+ * \brief Current NMF version number
+ *
+ * \ingroup NMF_VERSION
+ */
+#define NMF_VERSION ((2 << 16) | (10 << 8) | (122))
+
+/*!
+ * \brief Get NMF major version corresponding to NMF version number
+ * \ingroup NMF_VERSION
+ */
+#define VERSION_MAJOR(version) (((version) >> 16) & 0xFF)
+/*!
+ * \brief Get NMF minor version corresponding to NMF version number
+ * \ingroup NMF_VERSION
+ */
+#define VERSION_MINOR(version) (((version) >> 8) & 0xFF)
+/*!
+ * \brief Get NMF patch version corresponding to NMF version number
+ * \ingroup NMF_VERSION
+ */
+#define VERSION_PATCH(version) (((version) >> 0) & 0xFF)
+
+#endif /* __INC_NMF_DEF_H */
diff --git a/drivers/staging/nmf-cm/inc/nmf-limits.h b/drivers/staging/nmf-cm/inc/nmf-limits.h
new file mode 100644
index 00000000000..374795f91e0
--- /dev/null
+++ b/drivers/staging/nmf-cm/inc/nmf-limits.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Common Nomadik Multiprocessing Framework limits definition
+ *
+ * This file contains the limit definitions used into NMF.
+ *
+ * \warning Don't modify it since it is also hardcoded in tools
+ *
+ * \defgroup NMF_LIMITS NMF limits definition
+ * \ingroup COMMON
+ */
+#ifndef __INC_NMF_LIMITS_H
+#define __INC_NMF_LIMITS_H
+
+/*!
+ * \brief Maximum interface name length
+ *
+ * Define the maximum interface name length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_INTERFACE_NAME_LENGTH 32
+
+/*!
+ * \brief Maximum interface method name length
+ *
+ * Define the maximum interface method name length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_INTERFACE_METHOD_NAME_LENGTH 64
+
+/*!
+ * \brief Maximum interface type name length
+ *
+ * Define the maximum interface type name length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_INTERFACE_TYPE_NAME_LENGTH 128
+
+
+/*!
+ * \brief Maximum template name length
+ *
+ * Define the maximum template name length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_TEMPLATE_NAME_LENGTH 128
+
+/*!
+ * \brief Maximum component local name length
+ *
+ * Define the maximum component local name length inside a composite component allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_COMPONENT_NAME_LENGTH 32
+
+/*!
+ * \brief Maximum property name length
+ *
+ * Define the maximum property name length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_PROPERTY_NAME_LENGTH 32
+
+/*!
+ * \brief Maximum property value length
+ *
+ * Define the maximum property value length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_PROPERTY_VALUE_LENGTH 128
+
+/*!
+ * \brief Maximum attribute name length
+ *
+ * Define the maximum attribute name length allowed by NMF.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_ATTRIBUTE_NAME_LENGTH 32
+
+/*!
+ * \brief Maximum fifo size allowed for binding component
+ *
+ * Define the maximum fifo size allowed for binding component allowed by NMF when calling
+ * CM_BindComponentFromHost and CM_BindComponentAsynchronous.
+ *
+ * \ingroup NMF_LIMITS
+ */
+#define MAX_COMMUNICATION_FIFO_SIZE 256
+
+#define MAX_COMPONENT_FILE_PATH_LENGTH 1024
+
+#endif /* __INC_NMF_LIMITS_H */
diff --git a/drivers/staging/nmf-cm/inc/nmf-tracedescription.h b/drivers/staging/nmf-cm/inc/nmf-tracedescription.h
new file mode 100644
index 00000000000..bce589079b9
--- /dev/null
+++ b/drivers/staging/nmf-cm/inc/nmf-tracedescription.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief NMF xti/stm trace format description
+ *
+ * \defgroup NMF_TRACE_FORMAT NMF xti/stm trace format description
+ *
+ * The NMF trace is output by either xti ip on 8815 or stm ip on 8820 and 8500.
+ * Each type of trace is output on a dedicated channel. Following is a description
+ * of each of this traces.
+ *
+ * Traces have generally a timestamp added by hardware but is not described here.
+ * \ingroup NMF_ABI
+ */
+#ifndef TRACE_FORMAT_H_
+#define TRACE_FORMAT_H_
+
+#include <inc/nmf-limits.h>
+
+/*!
+ * \brief XTI/STM Channel where trace are dumped
+ *
+ * \note This type is only for defining constants, please not reference it.
+ *
+ * \note Ever if this format is able to be generated on same channel, Host EE & CM channel are separated
+ * in order to avoir concurrency and access STM IP without require mutual exclusion.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ MPC_EE_CHANNEL = 100, //!< MPC EE channel (MPC activity) in 32bits bundle
+ CM_CHANNEL = 101, //!< CM channel (MPC deployment) in 64bits bundle
+ HOST_EE_CHANNEL = 151 //!< Host EE channel (deployment & activity) in 64bits bundle
+} t_nmfTraceChannelDescription;
+
+/*!
+ * \brief Message trace type
+ *
+ * \note This type is only for defining constants, please not reference it.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_TYPE_RESET = 1, //!< Reset trace type
+ TRACE_TYPE_COMPONENT = 2, //!< Component instantiate trace type
+ TRACE_TYPE_BIND = 3, //!< Component bind trace type
+ TRACE_TYPE_METHOD = 4, //!< Component method trace type
+ TRACE_TYPE_ACTIVITY = 5, //!< Activity trace type
+ TRACE_TYPE_PANIC = 6, //!< Panic trace type
+ TRACE_TYPE_COMMUNICATION = 7, //!< Communication trace type
+ TRACE_TYPE_ALLOCATOR = 8, //!< Allocator trace type
+ TRACE_TYPE_ALLOC = 9, //!< Alloc trace type
+ TRACE_TYPE_USER = 10 //!< User trace type
+} t_nmfTraceTypeDescription;
+
+#define TRACE_MAJOR_VERSION 1 //!< Current major trace version number \ingroup NMF_TRACE_FORMAT
+#define TRACE_MINOR_VERSION 2 //!< Current minor trace version number \ingroup NMF_TRACE_FORMAT
+
+/*!
+ * \brief Trace header description.
+ *
+ * \note XTI will add 64bits time-stamp in first field of this structure, but not generated by us !
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceChannelHeader {
+ // t_uint64 timeStamp;
+ t_uint8 traceType; //!< Trace type
+ t_uint8 reserved;
+ t_uint16 traceSize; //!< Trace size (depending on trace type description)
+};
+
+/*!
+ * \brief Trace header union
+ *
+ * The purpose of this is to optimize header setting in one instruction.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef union {
+ struct t_nmfTraceChannelHeader s;
+ t_uint32 v;
+} t_nmfTraceChannelHeaderUnion;
+
+
+/*!
+ * \brief Trace reset description
+ *
+ * Inform tools to reset their internal state because network will be dumped new time.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceReset {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 minorVersion; //!< NMF trace format minor version
+ t_uint16 majorVersion; //!< NMF trace format major version
+};
+
+/**
+ * \brief Component instantiation trace command description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_COMPONENT_COMMAND_ADD = 0x1,
+ TRACE_COMPONENT_COMMAND_REMOVE = 0x2
+} t_nmfTraceComponentCommandDescription;
+
+
+/*!
+ * \brief Component instantiation trace description
+ *
+ * Component instantiation trace is generated each time an instance of a component is added or removed.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceComponent {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 command; //!< See \ref t_nmfTraceComponentCommandDescription
+ t_uint16 domainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA, in SMPEE: 0x1
+ t_uint32 componentContext; //!< Component context belonging domain (DSP this or ARM class this)
+ t_uint32 componentUserContext; //!< User friendly component Id belonging the channel (CM handle or ARM class this)
+ t_uint8 componentLocalName[MAX_COMPONENT_NAME_LENGTH]; //!< local name of component as given by user (null terminated)
+ t_uint8 componentTemplateName[MAX_TEMPLATE_NAME_LENGTH];//!< template name of component (null terminated)
+};
+
+/**
+ * \brief Component binding trace command description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_BIND_COMMAND_BIND_SYNCHRONOUS = 0x1,
+ TRACE_BIND_COMMAND_UNBIND_SYNCHRONOUS = 0x2,
+ TRACE_BIND_COMMAND_BIND_ASYNCHRONOUS = 0x3,
+ TRACE_BIND_COMMAND_UNBIND_ASYNCHRONOUS = 0x4
+} t_nmfTraceBindCommandDescription;
+
+/**
+ * \brief Component binding trace description
+ *
+ * \note clientComponentContext & serverComponentContext take value 0xffffffff when client or server are Component Manager.
+ * \note serverComponentContext take value 0x00000000 when binding to void.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceBind {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 command; //!< See \ref t_nmfTraceBindCommandDescription
+ t_uint16 reserved;
+ t_uint16 clientDomainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA, in SMPEE: 0x1
+ t_uint16 serverDomainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA, in SMPEE: 0x1
+ t_uint32 clientComponentContext; //!< Component context belonging domain (DSP this or ARM class this)
+ t_uint32 serverComponentContext; //!< Component context belonging domain (DSP this or ARM class this)
+ t_uint8 requiredItfName[MAX_INTERFACE_NAME_LENGTH]; //!< Required interface name
+ t_uint8 providedItfName[MAX_INTERFACE_NAME_LENGTH]; //!< Provided interface name
+};
+
+/*!
+ * \brief Component interface method name trace description
+ *
+ * For each methods of each interfaces provided by a component, one such trace is dumped.
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceMethod {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 domainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA, in SMPEE: 0x1
+ t_uint16 reserved;
+ t_uint32 methodId; //!< Unique Method Id belonging the component
+ t_uint32 componentContext; //!< Component context belonging domain (DSP this or ARM class this)
+ t_uint8 methodName[MAX_INTERFACE_METHOD_NAME_LENGTH]; //!< Symbolic method name
+};
+
+/**
+ * \brief Activity trace trace command description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_ACTIVITY_START = 0x1, //!< Start method
+ TRACE_ACTIVITY_END = 0x2, //!< End method
+ TRACE_ACTIVITY_POST = 0x3, //!< Post method
+ TRACE_ACTIVITY_CALL = 0x4, //!< Synchronous call method
+ TRACE_ACTIVITY_RETURN = 0x5 //!< Synchronous return method
+} t_nmfTraceActivityCommandDescription;
+
+/*!
+ * \brief Execution Engine scheduling activity trace description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceActivity {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 command; //!< See \ref t_nmfTraceActivityCommandDescription
+ t_uint16 domainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA
+ t_uint32 componentContext; //!< Unique component Id (Component Handle for CM, Component this for EE)
+ t_uint32 methodId; //!< Unique Method Id belonging the component
+};
+
+/**
+ * \brief Component instantiation trace command description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_COMMUNICATION_COMMAND_SEND = 0x1,
+ TRACE_COMMUNICATION_COMMAND_RECEIVE = 0x2
+} t_nmfTraceCommunicationCommandDescription;
+
+/**
+ * \brief Inter-processor communication signaling trace description
+ *
+ * Use when trigging interrupt through core.
+ *
+ * \note Not used on SMP EE
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceCommunication {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 command; //!< See \ref t_nmfTraceCommunicationCommandDescription
+ t_uint16 reserved_0;
+ t_uint16 domainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA
+ t_uint16 remoteDomainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA
+};
+
+/**
+ * \brief Component instantiation trace command description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_ALLOCATOR_COMMAND_CREATE = 0x1,
+ TRACE_ALLOCATOR_COMMAND_DESTROY = 0x2
+} t_nmfTraceAllocatorCommandDescription;
+
+/*!
+ * \brief Panic trace description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceAllocator {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 command; //!< See \ref t_nmfTraceAllocatorCommandDescription
+ t_uint16 allocId; //!< Memory allocator ID
+ t_uint32 size; //!< Memory allocator size
+ t_uint8 name[32]; //!< Memory allocator name
+};
+
+/**
+ * \brief Component instantiation trace command description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+typedef enum {
+ TRACE_ALLOC_COMMAND_ALLOC = 0x1,
+ TRACE_ALLOC_COMMAND_FREE = 0x2
+} t_nmfTraceAllocCommandDescription;
+
+/*!
+ * \brief Panic trace description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceAlloc {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 command; //!< See \ref t_nmfTraceAllocatorCommandDescription
+ t_uint16 allocId; //!< Memory allocator ID
+ t_uint32 offset; //!< Memory chunk offet
+ t_uint32 size; //!< Memory chunk size
+};
+
+/*!
+ * \brief Panic trace description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTracePanic {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint16 reason; //!< See \ref t_panic_reason for description
+ t_uint16 domainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA
+ t_uint32 componentContext; //!< Unique component Id (Component Handle for CM, Component this for EE)
+ t_uint32 information1; //!< Reason dependent information 1st
+ t_uint32 information2; //!< Reason dependent information 2nd
+};
+
+/*!
+ * \brief User trace description
+ *
+ * \ingroup NMF_TRACE_FORMAT
+ */
+struct t_nmfTraceUser {
+ t_nmfTraceChannelHeaderUnion header; //!< Trace header
+
+ t_uint32 key; //!< User key
+ t_uint16 domainId; //!< In CM: 0x01:Arm | 0x02:SAA | 0x03:SVA | 0x04:SIA
+ t_uint16 reserved;
+ t_uint32 componentContext; //!< Unique component Id (Component Handle for CM, Component this for EE)
+ t_uint32 callerAddress; //!< Unique code address belonging the component
+};
+
+/*
+struct t_nmfTracePower{
+ struct t_nmfTraceChannelHeader header;
+};
+*/
+
+#endif /* TRACE_FORMAT_H_ */
diff --git a/drivers/staging/nmf-cm/inc/nmf_type.idt b/drivers/staging/nmf-cm/inc/nmf_type.idt
new file mode 100644
index 00000000000..dda547a463e
--- /dev/null
+++ b/drivers/staging/nmf-cm/inc/nmf_type.idt
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+#ifndef NMF_TYPE_H_
+#define NMF_TYPE_H_
+
+/*!
+ * \defgroup NMF_COMMON_TYPE NMF Common Type
+ * \ingroup COMMON
+ */
+
+/*!
+ * \brief Error type returned by NMF API routines
+ *
+ * Possible value describe by \ref t_nmf_errorDescription
+ *
+ * \ingroup NMF_COMMON_TYPE
+ */
+typedef t_sint8 t_nmf_error;
+
+/*!
+ * \brief Error type values
+ *
+ * \ingroup NMF_COMMON_TYPE
+ */
+typedef enum {
+ NMF_OK = 0, //!< No error
+ NMF_INVALID_PARAMETER = -2, //!< Invalid parameter
+ NMF_NO_MORE_MEMORY = -30, //!< Out of memory
+ NMF_INTERFACE_NOT_BINDED = -59, //!< Try to unbind not binded interface
+ NMF_INTERFACE_ALREADY_BINDED = -60, //!< Try to bind already binded interface
+ NMF_NO_SUCH_REQUIRED_INTERFACE = -63, //!< Interface name not required by a component
+ NMF_NO_SUCH_PROVIDED_INTERFACE = -64, //!< Interface name not provided by a component
+ NMF_COMPONENT_NOT_STOPPED = -80, //!< Component must be stopped to perform operation
+ NMF_INVALID_COMPONENT_STATE_TRANSITION = -81, //!< Invalid component state transition caused by user action
+ NMF_NO_SUCH_PROPERTY = -87, //!< Property name doesn't exported by the underlying component
+ NMF_NO_SUCH_ATTRIBUTE = -88, //!< Attribute name not shared (exported) by a component
+ NMF_NO_MESSAGE = -103, //!< No message available
+ NMF_FLUSH_MESSAGE = -106, //!< Message send after call to EE_FlushChannel()
+ NMF_INTEGRATION_ERROR0 = -112, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR1 = -113, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR2 = -114, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR3 = -115, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR4 = -116, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR5 = -117, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR6 = -118, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR7 = -119, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR8 = -120, //!< OS dependent integration Error [-112 -> -121]
+ NMF_INTEGRATION_ERROR9 = -121 //!< OS dependent integration Error [-112 -> -121]
+} t_nmf_errorDescription;
+
+/*!
+ * \brief Define t_nmf_channel type that identify a communication channel between nmf and user.
+ *
+ * \ingroup NMF_COMMON_TYPE
+ */
+typedef t_uint32 t_nmf_channel;
+
+#endif /* NMF_TYPE_H_ */
diff --git a/drivers/staging/nmf-cm/inc/type.h b/drivers/staging/nmf-cm/inc/type.h
new file mode 100644
index 00000000000..3075505aee5
--- /dev/null
+++ b/drivers/staging/nmf-cm/inc/type.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/* inc/type.h - Programming Model.
+ *
+ * Copyright (c) 2006, 2007, 2008 STMicroelectronics.
+ *
+ * Reproduction and Communication of this document is strictly prohibited
+ * unless specifically authorized in writing by STMicroelectronics.
+ *
+ * Written by NMF team.
+ */
+#ifndef _NMF_TYPE_H_
+#define _NMF_TYPE_H_
+
+#include <inc/typedef.h>
+
+PUBLIC IMPORT_SHARED void NMF_LOG(const char* fmt, ...);
+PUBLIC IMPORT_SHARED void NMF_PANIC(const char* fmt, ...);
+
+#define NMF_ASSERT(cond) do { if(!(cond)) NMF_PANIC("NMF_ASSERT at %s:%d\n", (int)__FILE__, (int)__LINE__); } while(0)
+
+#ifndef EXPORT_NMF_COMPONENT
+ #define EXPORT_NMF_COMPONENT EXPORT_SHARED
+#endif
+
+#ifndef IMPORT_NMF_COMPONENT
+ #define IMPORT_NMF_COMPONENT IMPORT_SHARED
+#endif
+
+#endif /* _NMF_TYPE_H_ */
diff --git a/drivers/staging/nmf-cm/inc/typedef.h b/drivers/staging/nmf-cm/inc/typedef.h
new file mode 100644
index 00000000000..a29e6b88fde
--- /dev/null
+++ b/drivers/staging/nmf-cm/inc/typedef.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \defgroup COMMON Common types and definitions
+ *
+ * \defgroup NMF_COMMON NMF common definition
+ * \ingroup COMMON
+ *
+ * \defgroup NMF_ABI NMF ABI specification
+ * \warning This page is not for multimedia developers !
+ */
+/*!
+ * \brief Primitive Type Definition
+ *
+ * \defgroup NMF_PRIMITIVE_TYPE Primitive type definition
+ * \ingroup COMMON
+ */
+
+#ifndef NMF_TYPEDEF_H_
+#define NMF_TYPEDEF_H_
+
+#undef PRIVATE
+#define PRIVATE static //!< Private macro declaration \ingroup NMF_PRIMITIVE_TYPE
+
+#undef PUBLIC
+#ifdef __cplusplus
+#define PUBLIC extern "C" //!< Public macro declaration \ingroup NMF_PRIMITIVE_TYPE
+#else
+#define PUBLIC extern //!< Public macro declaration \ingroup NMF_PRIMITIVE_TYPE
+#endif
+
+#if defined(__SYMBIAN32__)
+/*!
+ * \brief Declared IMPORT_SHARED to allow dll/shared library creation
+ *
+ * \note Value depend on OS.
+ *
+ * \ingroup NMF_PRIMITIVE_TYPE
+ */
+ #ifndef IMPORT_SHARED
+ #define IMPORT_SHARED IMPORT_C
+ #endif
+/*!
+ * \brief Declared EXPORT_SHARED to allow dll/shared library creation
+ *
+ * \note Value depend on OS.
+ *
+ * \ingroup NMF_PRIMITIVE_TYPE
+ */
+ #ifndef EXPORT_SHARED
+ #define EXPORT_SHARED EXPORT_C
+ #endif
+#elif defined(LINUX)
+ #ifndef IMPORT_SHARED
+ #define IMPORT_SHARED
+ #endif
+ #ifndef EXPORT_SHARED
+ #define EXPORT_SHARED __attribute__ ((visibility ("default")))
+ #endif
+#else
+ #ifndef IMPORT_SHARED
+ #define IMPORT_SHARED
+ #endif
+
+ #ifndef EXPORT_SHARED
+ #define EXPORT_SHARED
+ #endif
+#endif
+
+/*
+ * Definition of type that are used by interface.
+ */
+
+typedef unsigned int t_uword;
+typedef signed int t_sword;
+
+#ifdef __flexcc2__
+
+typedef unsigned char t_bool;
+
+#ifdef __mode16__
+
+typedef signed char t_sint8;
+typedef signed int t_sint16;
+typedef signed long t_sint24;
+typedef signed long t_sint32;
+typedef signed long long t_sint40;
+// bigger type are not handle on this mode
+
+typedef unsigned char t_uint8;
+typedef unsigned int t_uint16;
+typedef unsigned long t_uint24;
+typedef unsigned long t_uint32;
+typedef unsigned long long t_uint40;
+// bigger type are not handle on this mode
+
+// shared addr type definition
+//typedef __SHARED16 t_uint16 * t_shared_addr;
+typedef void * t_shared_field;
+
+#else /* __mode16__ -> __mode24__ */
+
+typedef signed char t_sint8;
+typedef signed short t_sint16;
+typedef signed int t_sint24;
+typedef signed long t_sint32;
+typedef signed long t_sint40;
+typedef signed long t_sint48;
+typedef signed long long t_sint56;
+
+typedef unsigned char t_uint8;
+typedef unsigned short t_uint16;
+typedef unsigned int t_uint24;
+typedef unsigned long t_uint32;
+typedef unsigned long t_uint40;
+typedef unsigned long t_uint48;
+typedef unsigned long long t_uint56;
+
+// shared addr type definition
+//typedef __SHARED16 t_uint16 * t_shared_addr;
+typedef t_uint24 t_shared_field;
+
+#endif /* MMDSP mode24 */
+
+// shared register (ARM world) type definition
+#if 0
+typedef struct {
+ t_uint16 lsb;
+ t_uint16 msb;
+} t_shared_reg;
+#endif
+typedef t_uint32 t_shared_reg;
+
+typedef t_uint32 t_physical_address;
+
+#include <stwdsp.h>
+
+#else /* __flexcc2__ -> RISC 32 Bits */
+
+#ifndef _HCL_DEFS_H
+typedef unsigned char t_bool; //!< Boolean primitive type \ingroup NMF_PRIMITIVE_TYPE
+
+typedef unsigned char t_uint8; //!< Unsigned 8 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef signed char t_sint8; //!< Signed 8 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef unsigned short t_uint16; //!< Unsigned 16 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef signed short t_sint16; //!< Signed 16 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef unsigned long t_uint32; //!< Unsigned 32 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef signed long t_sint32; //!< Signed 32 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef unsigned long long t_uint64; //!< Unsigned 64 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+typedef signed long long t_sint64; //!< Signed 64 bits primitive type \ingroup NMF_PRIMITIVE_TYPE
+
+typedef t_uint32 t_physical_address;
+#endif /* _HCL_DEFS_H */
+
+typedef unsigned long t_uint24;
+typedef signed long t_sint24;
+typedef unsigned long long t_uint48;
+typedef signed long long t_sint48;
+
+// shared addr type definition
+typedef t_uint32 t_shared_addr;
+
+// shared register (ARM world) type definition
+typedef t_uint32 t_shared_reg;
+typedef t_uint32 t_shared_field;
+
+#endif /* RISC 32 Bits */
+
+/*
+ * Define boolean type
+ */
+#undef FALSE
+#define FALSE 0 //!< Boolean FALSE value
+#undef TRUE
+#define TRUE 1 //!< Boolean TRUE value
+
+#ifndef NULL
+ #if defined __flexcc2__ || defined __SYMBIAN32__
+ #define NULL (0x0) //!< Null type \ingroup NMF_PRIMITIVE_TYPE
+ #else
+ #define NULL ((void*)0x0) //!< Null type \ingroup NMF_PRIMITIVE_TYPE
+ #endif
+#endif
+
+typedef t_uint32 t_nmf_component_handle;
+
+#endif /* NMF_TYPEDEF_H_ */
diff --git a/drivers/staging/nmf-cm/nmf/inc/channel_type.h b/drivers/staging/nmf-cm/nmf/inc/channel_type.h
new file mode 100644
index 00000000000..91a733dbbbf
--- /dev/null
+++ b/drivers/staging/nmf-cm/nmf/inc/channel_type.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Common Nomadik Multiprocessing Framework type definition
+ *
+ * This file contains the shared between cm and ee type definitions used into NMF for callback.
+ */
+/*!
+ * \defgroup _t_nmf_channel_flag t_nmf_channel_flag
+ * \ingroup NMF_COMMON
+ */
+
+#ifndef __INC_CHANNEL_TYPE_H
+#define __INC_CHANNEL_TYPE_H
+
+#include <inc/typedef.h>
+#include <inc/nmf_type.idt>
+
+/*!
+ * \brief Define t_nmf_channel_flag type that allow to control if/how a new communication channel is created.
+ * \ingroup _t_nmf_channel_flag
+ */
+typedef t_uint32 t_nmf_channel_flag;
+
+#define NMF_CHANNEL_SHARED ((t_nmf_channel_flag)0) //!< \ingroup _t_nmf_channel_flag
+#define NMF_CHANNEL_PRIVATE ((t_nmf_channel_flag)1) //!< \ingroup _t_nmf_channel_flag
+
+/*!
+ * \brief Define t_nmf_virtualInterruptHandler function type to allow to dispatch virtual interrupt
+ * \ingroup VIRTUAL_INTERRUPT
+ */
+typedef void (*t_nmf_virtualInterruptHandler)(void *interruptContext);
+
+#endif
+
diff --git a/drivers/staging/nmf-cm/nmf/inc/component_type.h b/drivers/staging/nmf-cm/nmf/inc/component_type.h
new file mode 100644
index 00000000000..26217554158
--- /dev/null
+++ b/drivers/staging/nmf-cm/nmf/inc/component_type.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Common Nomadik Multiprocessing Framework type definition
+ *
+ * This file contains the shared between cm and ee type definitions used into NMF for callback.
+ */
+#ifndef __INC_COMPONENT_TYPE_H
+#define __INC_COMPONENT_TYPE_H
+
+#include <inc/typedef.h>
+
+/*!
+ * \brief Identifier of a component instance handle
+ *
+ * \ingroup NMF_COMMON
+ */
+typedef t_nmf_component_handle t_cm_instance_handle;
+
+#endif
+
diff --git a/drivers/staging/nmf-cm/nmf/inc/service_type.h b/drivers/staging/nmf-cm/nmf/inc/service_type.h
new file mode 100644
index 00000000000..06d5c72dce9
--- /dev/null
+++ b/drivers/staging/nmf-cm/nmf/inc/service_type.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Service type and data used through service callback.
+ * \defgroup NMF_SERVICE NMF Service Callback types and data definition
+ * \ingroup NMF_COMMON
+ */
+#ifndef SERVICE_TYPE_H
+#define SERVICE_TYPE_H
+
+#include <ee/api/panic.idt>
+#include <nmf/inc/component_type.h>
+#include <share/inc/nmf.h>
+
+/*!
+ * \brief Define t_nmf_service_type type
+ *
+ * It gives the type of service message passed to service callback.
+ * \ingroup NMF_SERVICE
+ */
+typedef t_uint32 t_nmf_service_type;
+#define NMF_SERVICE_PANIC ((t_nmf_service_type)0) //!< \ingroup NMF_SERVICE
+#define NMF_SERVICE_SHUTDOWN ((t_nmf_service_type)1) //!< \ingroup NMF_SERVICE
+
+/*
+ * The following structured define each data structure used for each service type
+ * and given to each serviceCallback
+ */
+
+/*!
+ * \brief Define t_nmf_panic_data type
+ *
+ * This is the data structure passed to the service callback (inside \ref t_nmf_service_data)
+ * when t_nmf_service_type == NMF_SERVICE_PANIC
+ * \ingroup NMF_SERVICE
+ */
+typedef struct {
+ t_panic_reason panicReason; //!< The reason of the panic
+ t_panic_source panicSource; //!< THe source of the panic (One of the MPC or the ARM-EE)
+ /*!
+ * union of structures containing specific info, depending on the panicSource
+ */
+ union {
+ struct {
+ t_nmf_core_id coreid; //!< The coreId of the MPC on which the panic occured
+ t_cm_instance_handle faultingComponent; //!< The faulting component handle
+ t_uint32 panicInfo1; //!< First info (depend on \ref panicReason)
+ t_uint32 panicInfo2; //!< Second info (depend on \ref panicReason)
+ } mpc; //!< member to use if panicSource == MPC_EE
+ struct {
+ void * faultingComponent; //!< The faulting component handle
+ t_uint32 panicInfo1; //!< First info (depend on \ref panicReason)
+ t_uint32 panicInfo2; //!< Second info (depend on \ref panicReason)
+ } host; //!< member to use if panicSource == HOST_EE
+ } info; //!< union of structures containing specific info, depending on the panicSource
+} t_nmf_panic_data;
+
+/*!
+ * \brief Define t_nmf_shutdown_data type
+ *
+ * This is the data structure passed to the service callback (inside \ref t_nmf_service_data)
+ * when t_nmf_service_type == NMF_SERVICE_SHUTDOWN
+ * \ingroup NMF_SERVICE
+ */
+typedef struct {
+ t_nmf_core_id coreid; //!< The coreId of the MPC on which has been shutdown
+} t_nmf_shutdown_data;
+
+/*!
+ * \brief Define t_nmf_service_data type
+ *
+ * It gives the data passed to the service callbacks for each service type
+ * This is an union whose member to use is defined by the given \ref t_nmf_service_type
+ *
+ * \ingroup NMF_SERVICE
+ */
+typedef union {
+ t_nmf_panic_data panic; //!< if service_type == NMF_SERVICE_PANIC
+ t_nmf_shutdown_data shutdown; //!< if service_type == NMF_SERVICE_SHUTDOWN
+} t_nmf_service_data;
+
+/*!
+ * \brief Define t_nmf_serviceCallback function type to allow to dispatch service message to user.
+ * \ingroup NMF_SERVICE
+ */
+typedef void (*t_nmf_serviceCallback)(void *contextHandler, t_nmf_service_type serviceType, t_nmf_service_data *serviceData);
+
+#endif //SERVICE_TYPE_H
diff --git a/drivers/staging/nmf-cm/osal-kernel.c b/drivers/staging/nmf-cm/osal-kernel.c
new file mode 100644
index 00000000000..0dc8328dfc0
--- /dev/null
+++ b/drivers/staging/nmf-cm/osal-kernel.c
@@ -0,0 +1,1223 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/** \file osal-kernel.c
+ *
+ * Implements NMF OSAL for Linux kernel-space environment
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_STM_TRACE
+#include <trace/stm.h>
+#endif
+
+#include <cm/engine/configuration/inc/configuration_status.h>
+
+#include "cmioctl.h"
+#include "osal-kernel.h"
+#include "cm_service.h"
+#include "cmld.h"
+#include "cm_debug.h"
+#include "cm_dma.h"
+
+__iomem void *prcmu_base = NULL;
+__iomem void *prcmu_tcdm_base = NULL;
+
+/* DSP Load Monitoring */
+#define FULL_OPP 100
+#define HALF_OPP 50
+static unsigned long running_dsp = 0;
+static unsigned int dspLoadMonitorPeriod = 1000;
+module_param(dspLoadMonitorPeriod, uint, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(dspLoadMonitorPeriod, "Period of the DSP-Load monitoring in ms");
+static unsigned int dspLoadHighThreshold = 85;
+module_param(dspLoadHighThreshold, uint, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(dspLoadHighThreshold, "Threshold above which 100 APE OPP is requested");
+static unsigned int dspLoadLowThreshold = 35;
+module_param(dspLoadLowThreshold, uint, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(dspLoadLowThreshold, "Threshold below which 100 APE OPP request is removed");
+static bool cm_use_ftrace;
+module_param(cm_use_ftrace, bool, S_IWUSR|S_IRUGO);
+MODULE_PARM_DESC(cm_use_ftrace, "Whether all CM debug traces goes through ftrace or normal kernel output");
+
+/** \defgroup ENVIRONMENT_INITIALIZATION Environment initialization
+ * Includes functions that initialize the Linux OSAL itself plus functions that
+ * are responsible to factor configuration objects needed to initialize Component Manager library
+ */
+
+/** \defgroup OSAL_IMPLEMENTATION OSAL implementation
+ * Linux-specific implementation of the Component Manager OSAL interface.
+ */
+
+
+/** \ingroup ENVIRONMENT_INITIALIZATION
+ * Remaps IO, SDRAM and ESRAM regions
+ *
+ * \osalEnvironment NMF-Osal descriptor
+ * \return POSIX error code
+ */
+int remapRegions(void)
+{
+ unsigned i;
+
+ /* Remap DSP base areas */
+ for (i=0; i<NB_MPC; i++) {
+ osalEnv.mpc[i].base.data = ioremap_nocache((int)osalEnv.mpc[i].base_phys, ONE_MB);
+ if(osalEnv.mpc[i].base.data == NULL){
+ pr_err("%s: could not remap base address for %s\n", __func__, osalEnv.mpc[i].name);
+ return -ENOMEM;
+ }
+ }
+
+ /* Remap hardware semaphores */
+ osalEnv.hwsem_base = ioremap_nocache(U8500_HSEM_BASE, (4*ONE_KB));
+ if(osalEnv.hwsem_base == NULL){
+ pr_err("%s: could not remap HWSEM Base\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Remap _all_ ESRAM banks */
+ osalEnv.esram_base = ioremap_nocache(ESRAM_BASE, cfgESRAMSize*ONE_KB);
+ if(osalEnv.esram_base == NULL){
+ pr_err("%s: could not remap ESRAM Base\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Allocate code and data sections for MPC (SVA, SIA) */
+ for (i=0; i<NB_MPC; i++) {
+ /* Allocate MPC SDRAM code area */
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length;
+ osalEnv.mpc[i].hwmem_code = hwmem_alloc(osalEnv.mpc[i].sdram_code.size,
+ //HWMEM_ALLOC_HINT_CACHE_WB,
+ HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_UNCACHED,
+ HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE,
+ HWMEM_MEM_CONTIGUOUS_SYS);
+ if (IS_ERR(osalEnv.mpc[i].hwmem_code)) {
+ int err = PTR_ERR(osalEnv.mpc[i].hwmem_code);
+ osalEnv.mpc[i].hwmem_code = NULL;
+ pr_err("%s: could not allocate SDRAM Code for %s\n",
+ __func__, osalEnv.mpc[i].name);
+ return err;
+ }
+ osalEnv.mpc[i].sdram_code.data = hwmem_kmap(osalEnv.mpc[i].hwmem_code);
+ if (IS_ERR(osalEnv.mpc[i].sdram_code.data)) {
+ int err = PTR_ERR(osalEnv.mpc[i].sdram_code.data);
+ osalEnv.mpc[i].sdram_code.data = NULL;
+ pr_err("%s: could not map SDRAM Code for %s\n", __func__, osalEnv.mpc[i].name);
+ return err;
+ }
+ mem_chunk_length = 1;
+ (void)hwmem_pin(osalEnv.mpc[i].hwmem_code, &mem_chunk, &mem_chunk_length);
+ osalEnv.mpc[i].sdram_code_phys = mem_chunk.paddr;
+ /* Allocate MPC SDRAM data area by taking care wether the data are shared or not */
+ if (osalEnv.mpc[i].sdram_data.size == 0) {
+ /* size of 0 means shared data segment, reuse the same param as for first MPC */
+ osalEnv.mpc[i].sdram_data_phys = osalEnv.mpc[0].sdram_data_phys;
+ osalEnv.mpc[i].sdram_data.data = osalEnv.mpc[0].sdram_data.data;
+ osalEnv.mpc[i].sdram_data.size = osalEnv.mpc[0].sdram_data.size;
+ } else {
+ /* If we do not share the data segment or if this is the first MPC */
+ osalEnv.mpc[i].hwmem_data = hwmem_alloc(osalEnv.mpc[i].sdram_data.size,
+ HWMEM_ALLOC_HINT_WRITE_COMBINE | HWMEM_ALLOC_HINT_UNCACHED,
+ HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE,
+ HWMEM_MEM_CONTIGUOUS_SYS);
+ if (IS_ERR(osalEnv.mpc[i].hwmem_data)) {
+ int err = PTR_ERR(osalEnv.mpc[i].hwmem_data);
+ osalEnv.mpc[i].hwmem_data = NULL;
+ pr_err("%s: could not allocate SDRAM Data for %s\n",
+ __func__, osalEnv.mpc[i].name);
+ return err;
+ }
+ mem_chunk_length = 1;
+ (void)hwmem_pin(osalEnv.mpc[i].hwmem_data,
+ &mem_chunk, &mem_chunk_length);
+ osalEnv.mpc[i].sdram_data_phys = mem_chunk.paddr;
+ osalEnv.mpc[i].sdram_data.data = hwmem_kmap(osalEnv.mpc[i].hwmem_data);
+ if (IS_ERR(osalEnv.mpc[i].sdram_data.data)) {
+ int err = PTR_ERR(osalEnv.mpc[i].sdram_data.data);
+ osalEnv.mpc[i].sdram_data.data = NULL;
+ pr_err("%s: could not map SDRAM Data for %s\n",
+ __func__, osalEnv.mpc[i].name);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** \ingroup ENVIRONMENT_INITIALIZATION
+ * Unmaps IO, SDRAM and ESRAM regions
+ *
+ * \return POSIX error code
+ */
+void unmapRegions(void)
+{
+ unsigned i;
+
+ /* Release SVA, SIA, Hardware sempahores and embedded SRAM mappings */
+ for (i=0; i<NB_MPC; i++) {
+ if(osalEnv.mpc[i].base.data != NULL)
+ iounmap(osalEnv.mpc[i].base.data);
+ }
+
+ if(osalEnv.hwsem_base != NULL)
+ iounmap(osalEnv.hwsem_base);
+
+ if(osalEnv.esram_base != NULL)
+ iounmap(osalEnv.esram_base);
+
+ /*
+ * Free SVA and SIA code and data sections or release their mappings
+ * according on how memory allocations has been achieved
+ */
+ for (i=0; i<NB_MPC; i++) {
+ if (osalEnv.mpc[i].sdram_code.data != NULL) {
+ hwmem_unpin(osalEnv.mpc[i].hwmem_code);
+ hwmem_kunmap(osalEnv.mpc[i].hwmem_code);
+ if (osalEnv.mpc[i].hwmem_code != NULL)
+ hwmem_release(osalEnv.mpc[i].hwmem_code);
+ }
+
+ /* If data segment is shared, we must free only the first data segment */
+ if (((i == 0) || (osalEnv.mpc[i].sdram_data.data != osalEnv.mpc[0].sdram_data.data))
+ && (osalEnv.mpc[i].sdram_data.data != NULL)) {
+ hwmem_unpin(osalEnv.mpc[i].hwmem_data);
+ hwmem_kunmap(osalEnv.mpc[i].hwmem_data);
+ if (osalEnv.mpc[i].hwmem_data != NULL)
+ hwmem_release(osalEnv.mpc[i].hwmem_data);
+ }
+ }
+}
+
+
+/** \ingroup ENVIRONMENT_INITIALIZATION
+ * Fills a t_nmf_hw_mapping_desc object
+ *
+ * \param nmfHwMappingDesc Pointer to a t_nmf_hw_mapping_desc object
+ * \return POSIX error code
+ */
+int getNmfHwMappingDesc(t_nmf_hw_mapping_desc* nmfHwMappingDesc)
+{
+
+ if (nmfHwMappingDesc == NULL)
+ return -ENXIO;
+
+ nmfHwMappingDesc->esramDesc.systemAddr.physical = ESRAM_BASE;
+ nmfHwMappingDesc->esramDesc.systemAddr.logical = (t_cm_logical_address)osalEnv.esram_base;
+ nmfHwMappingDesc->esramDesc.size = cfgESRAMSize*ONE_KB;
+
+ nmfHwMappingDesc->hwSemaphoresMappingBaseAddr.physical = U8500_HSEM_BASE;
+ nmfHwMappingDesc->hwSemaphoresMappingBaseAddr.logical = (t_cm_logical_address)osalEnv.hwsem_base;
+
+ return 0;
+}
+
+/** \ingroup ENVIRONMENT_INITIALIZATION
+ * Fills a t_cm_system_address object
+ *
+ * \param mpcSystemAddress Pointer to a t_cm_system_address object
+ * \return POSIX error code
+ */
+void getMpcSystemAddress(unsigned i, t_cm_system_address* mpcSystemAddress)
+{
+ mpcSystemAddress->physical = (t_cm_physical_address)osalEnv.mpc[i].base_phys;
+ mpcSystemAddress->logical = (t_cm_logical_address)osalEnv.mpc[i].base.data;
+}
+
+
+/** \ingroup ENVIRONMENT_INITIALIZATION
+ * Fills t_nmf_memory_segment objects for MPC code and data segments
+ *
+ * \param i Index of the MPC to initialize
+ * \param codeSegment Pointer to a t_nmf_memory_segment (code segment)
+ * \param dataSegment Pointer to a t_nmf_memory_segment (data segment)
+ * \return Always 0
+ */
+void getMpcSdramSegments(unsigned i, t_nmf_memory_segment* codeSegment, t_nmf_memory_segment* dataSegment)
+{
+ codeSegment->systemAddr.logical = (t_cm_logical_address)osalEnv.mpc[i].sdram_code.data;
+ codeSegment->systemAddr.physical = osalEnv.mpc[i].sdram_code_phys;
+ codeSegment->size = osalEnv.mpc[i].sdram_code.size;
+
+ dataSegment->systemAddr.logical = (t_cm_logical_address)osalEnv.mpc[i].sdram_data.data;
+ dataSegment->systemAddr.physical = osalEnv.mpc[i].sdram_data_phys;
+ dataSegment->size = osalEnv.mpc[i].sdram_data.size;
+}
+
+#ifdef CM_DEBUG_ALLOC
+#include <linux/kallsyms.h>
+struct cm_alloc cm_alloc;
+
+/**
+ * These routines initializes the structures used to trace all alloc/free.
+ * These are used in debug mode to track all memory leak about the allocations
+ * done through the OSAL.
+ */
+void init_debug_alloc(void)
+{
+ INIT_LIST_HEAD(&cm_alloc.chain);
+ spin_lock_init(&cm_alloc.lock);
+}
+
+void cleanup_debug_alloc(void)
+{
+ struct cm_alloc_elem *entry, *next;
+ char buffer[128];
+
+ list_for_each_entry_safe(entry, next, &cm_alloc.chain, elem) {
+ sprint_symbol(buffer, (int)entry->caller);
+ pr_err("/!\\ ALLOC(size=%d) not freed from: 0x%p (%s)\n",
+ entry->size, entry->caller, buffer);
+ list_del(&entry->elem);
+ if ((void*)entry >= (void*)VMALLOC_START
+ && (void*)entry < (void*)VMALLOC_END)
+ vfree(entry);
+ else
+ kfree(entry);
+ }
+}
+
+void dump_debug_alloc(void)
+{
+ struct cm_alloc_elem *entry, *next;
+ char buffer[128];
+
+ pr_err("Current allocated memory:\n");
+ list_for_each_entry_safe(entry, next, &cm_alloc.chain, elem) {
+ sprint_symbol(buffer, (int)entry->caller);
+ pr_err("=> Alloc of size=%d from: 0x%p (%s)\n",
+ entry->size, entry->caller, buffer);
+ }
+}
+#endif
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * Called by CM_ProcessMpcEvent in interrupt/tasklet context. Schedules the DFC.
+ * Enqueues the new event in the process' message queue.
+ *
+ * \note This is _not_ called in response to internal events such as in
+ * response to a CM_InstantiateComponent. It is called when user-defined
+ * functions need to be called in skeletons. This behavior is different
+ * from 0.8.1 version.
+ */
+void OSAL_PostDfc(t_nmf_mpc2host_handle upLayerTHIS, t_uint32 methodIndex, t_event_params_handle ptr, t_uint32 size)
+{
+ /* skelwrapper has been created in CM_SYSCALL_BindComponentToCMCore and conveys per-process private data */
+ t_skelwrapper* skelwrapper = (t_skelwrapper*)upLayerTHIS;
+ struct osal_msg* message;
+
+ /* If the clannel has been closed, no more reader exists
+ => discard the message */
+ if (skelwrapper->channelPriv->state == CHANNEL_CLOSED) {
+ pr_warning("%s: message discarded (channel closed)\n",
+ __func__ );
+ return;
+ }
+
+ /* Create a new message */
+ message = kmalloc(sizeof(*message), GFP_ATOMIC);
+ if (!message) {
+ pr_err("%s: message discarded (alloc failed)\n", __func__ );
+ return;
+ }
+
+ /* Stuff it */
+ plist_node_init(&message->msg_entry, 0);
+ message->msg_type = MSG_INTERFACE;
+ message->d.itf.skelwrap = skelwrapper;
+ message->d.itf.methodIdx = methodIndex;
+ message->d.itf.anyPtr = ptr;
+ message->d.itf.ptrSize = size;
+
+ /* Enqueue it */
+ /* Should be protected with the cmPriv->msgQueueLock held
+ But we know by design that we are safe here. (Alone here in
+ tasklet (soft-interrupt) context.
+ When accessed in process context, soft-irq are disable)
+ */
+ spin_lock_bh(&skelwrapper->channelPriv->bh_lock);
+ plist_add(&message->msg_entry, &skelwrapper->channelPriv->messageQueue);
+ spin_unlock_bh(&skelwrapper->channelPriv->bh_lock);
+
+ /* Wake up process' wait queue */
+ wake_up(&skelwrapper->channelPriv->waitq);
+}
+
+
+#define MAX_LOCKS 8 // max number of locks/semaphores creatable
+static unsigned long semused = 0; // bit field for used semaphores
+static unsigned long lockused = 0; // bit field for used mutexes
+static struct mutex cmld_locks[MAX_LOCKS];
+
+/** \ingroup OSAL_IMPLEMENTATION
+ */
+t_nmf_osal_sync_handle OSAL_CreateLock(void)
+{
+ int i;
+
+ for (i=0; i<MAX_LOCKS; i++)
+ if (!test_and_set_bit(i, &lockused)) {
+ struct mutex* mutex = &cmld_locks[i];
+ mutex_init(mutex);
+ return (t_nmf_osal_sync_handle)mutex;
+ }
+
+ return (t_nmf_osal_sync_handle)NULL;
+}
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ */
+void OSAL_Lock(t_nmf_osal_sync_handle handle)
+{
+ // unfortunately there is no return value to this function
+ // so we cannot use 'mutex_lock_killable()'
+ mutex_lock((struct mutex*)handle);
+}
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ */
+void OSAL_Unlock(t_nmf_osal_sync_handle handle)
+{
+ mutex_unlock((struct mutex*)handle);
+}
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ */
+void OSAL_DestroyLock(t_nmf_osal_sync_handle handle)
+{
+ int i;
+
+ // clear the bit in the bits field about used locks
+ i = ((struct mutex*)handle - cmld_locks);
+
+ clear_bit(i, &lockused);
+}
+
+static struct semaphore cmld_semaphores[MAX_LOCKS];
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * Goal: Use by CM to allow to synchronize with code running on mpc side.
+ *
+ * \param[in] value : Initial value of semaphore.
+ *
+ * \return handle of the Semaphore created
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup OSAL
+ */
+t_nmf_osal_sem_handle OSAL_CreateSemaphore(t_uint32 value)
+{
+ int i;
+
+ for (i=0; i<MAX_LOCKS; i++)
+ if (!test_and_set_bit(i, &semused)) {
+ struct semaphore* sem = &cmld_semaphores[i];
+ sema_init(sem, value);
+ return (t_nmf_osal_sem_handle)sem;
+ }
+
+ return (t_nmf_osal_sem_handle)NULL;
+}
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * Goal: Use by CM to allow to synchronize with code running on mpc side. This function can be called under
+ * Irq context by CM.
+ *
+ * param[in] : handle of the Semaphore for which we increase value and so potentially wake up thread.
+ *
+ * param[in] : aCtx is a hint to indicate to os that we are in a none normal context (e.g under interruption).
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup OSAL
+ */
+void OSAL_SemaphorePost(t_nmf_osal_sem_handle handle, t_uint8 aCtx)
+{
+ up((struct semaphore*)handle);
+}
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * Goal: Use by CM to allow to synchronize with code running on mpc side.
+ *
+ * param[in] : handle of the Semaphore for which we decrease value and so potentially block current thread.
+ *
+ * param[in] : maximun time in ms after which the block thread is wake up. In this case function return SYNC_ERROR_TIMEOUT value.
+ *
+ * \return error number: SYNC_ERROR_TIMEOUT in case semaphore is not release withing timeOutInMs.
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup OSAL
+ */
+t_nmf_osal_sync_error OSAL_SemaphoreWaitTimed(t_nmf_osal_sem_handle handle,
+ t_uint32 timeOutInMs)
+{
+ if (down_timeout((struct semaphore*)handle, msecs_to_jiffies(timeOutInMs)))
+ return SYNC_ERROR_TIMEOUT;
+ else
+ return SYNC_OK;
+}
+
+/*!
+ * \brief Description of the Synchronization part of the OS Adaptation Layer
+ *
+ * Goal: Use by CM to allow to synchronize with code running on mpc side.
+ *
+ * param[in] : handle of the Semaphore to be destroyed
+ *
+ * Called by:
+ * - any CM API call
+ *
+ * \ingroup OSAL
+ */
+void OSAL_DestroySemaphore(t_nmf_osal_sem_handle handle)
+{
+ int i;
+
+ // clear the bit in the bits field about used locks
+ i = ((struct semaphore*)handle - cmld_semaphores);
+
+ clear_bit(i, &semused);
+}
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * OSAL alloc implementation
+ *
+ * In both OSAL_Alloc() and OSAL_Alloc_Zero() function, the strategy is to use
+ * kmalloc() as it is the more efficient and most common way to allocate memory.
+ * For big allocation, kmalloc may fail because memory is very fragmented
+ * (kmalloc() allocates contiguous memory). In that case, we fall to vmalloc()
+ * instead.
+ * In OSAL_Free(), we rely on the virtual address to know which of kfree() or
+ * vfree() to use (vmalloc() use its own range of virtual addresses)
+ */
+void* OSAL_Alloc(t_cm_size size)
+{
+#ifdef CM_DEBUG_ALLOC
+ struct cm_alloc_elem *entry;
+
+ if (size == 0)
+ return NULL;
+
+ entry = kmalloc(size + sizeof(*entry), GFP_KERNEL);
+
+ if (entry == NULL) {
+ entry = vmalloc(size + sizeof(*entry));
+
+ if (entry == NULL) {
+ pr_alert("%s: kmalloc(%d) and vmalloc(%d) failed\n",
+ __func__, (int)size, (int)size);
+ dump_debug_alloc();
+ return NULL;
+ }
+ }
+ /* return address of the caller */
+ entry->caller = __builtin_return_address(0);
+ entry->size = size;
+
+ spin_lock(&cm_alloc.lock);
+ list_add_tail(&entry->elem, &cm_alloc.chain);
+ spin_unlock(&cm_alloc.lock);
+
+ return entry->addr;
+#else
+ void* mem;
+
+ if (size == 0)
+ return NULL;
+ mem = kmalloc(size, GFP_KERNEL);
+ if (mem == NULL) {
+ mem = vmalloc(size);
+ if (mem == NULL)
+ pr_alert("CM (%s): No more memory (requested "
+ "size=%d) !!!\n", __func__, (int)size);
+ }
+ return mem;
+#endif
+}
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * OSAL alloc implementation
+ */
+void* OSAL_Alloc_Zero(t_cm_size size)
+{
+#ifdef CM_DEBUG_ALLOC
+ struct cm_alloc_elem *entry;
+
+ if (size == 0)
+ return NULL;
+
+ entry = kzalloc(size + sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ entry = vmalloc(size + sizeof(*entry));
+ if (entry == NULL) {
+ pr_alert("%s: kmalloc(%d) and vmalloc(%d) failed\n",
+ __func__, (int)size, (int)size);
+ dump_debug_alloc();
+ return NULL;
+ } else {
+ memset(entry, 0, size + sizeof(*entry));
+ }
+ }
+
+ /* return address of the caller */
+ entry->caller = __builtin_return_address(0);
+ entry->size = size;
+
+ spin_lock(&cm_alloc.lock);
+ list_add_tail(&entry->elem, &cm_alloc.chain);
+ spin_unlock(&cm_alloc.lock);
+
+ return entry->addr;
+#else
+ void* mem;
+
+ if (size == 0)
+ return NULL;
+ mem = kzalloc(size, GFP_KERNEL);
+ if (mem == NULL) {
+ mem = vmalloc(size);
+ if (mem == NULL)
+ pr_alert("CM (%s): No more memory (requested "
+ "size=%d) !!!\n", __func__, (int)size);
+ else
+ memset(mem, 0, size);
+ }
+
+ return mem;
+#endif
+}
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * OSAL free implementation
+ */
+void OSAL_Free(void* mem)
+{
+#ifdef CM_DEBUG_ALLOC
+ struct cm_alloc_elem *entry = container_of(mem, struct cm_alloc_elem, addr);
+ unsigned int i;
+ char pattern[4] = { 0xEF, 0xBE, 0xAD, 0xDE };
+
+ if (mem == NULL)
+ return;
+
+ /* fill with a pattern to detect bad re-use of this area */
+ for (i=0; i<entry->size; i++)
+ entry->addr[i] = pattern[i%4];
+
+ spin_lock(&cm_alloc.lock);
+ list_del(&entry->elem);
+ spin_unlock(&cm_alloc.lock);
+
+ if ((void*)entry >= (void*)VMALLOC_START
+ && (void*)entry < (void*)VMALLOC_END)
+ vfree(entry);
+ else
+ kfree(entry);
+#else
+ if (mem >= (void*)VMALLOC_START && mem < (void*)VMALLOC_END)
+ vfree(mem);
+ else
+ kfree(mem);
+#endif
+}
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * OSAL Copy implementation
+ * This copy some data from userspace (address to kernel space.
+ * This implementation differs on Symbian.
+ */
+t_cm_error OSAL_Copy(void *dst, const void *src, t_cm_size size)
+{
+ if (copy_from_user(dst, src, size))
+ return CM_UNKNOWN_MEMORY_HANDLE;
+ return CM_OK;
+}
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * OSAL write64 function implementation
+ */
+void OSAL_Write64(t_nmf_trace_channel channel, t_uint8 isTimestamped, t_uint64 value)
+{
+#ifdef CONFIG_STM_TRACE
+ if (isTimestamped)
+ stm_tracet_64(channel, value);
+ else
+ stm_trace_64(channel, value);
+#endif
+}
+
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * OSAL log function implementation
+ */
+void OSAL_Log(const char *format, int param1, int param2, int param3, int param4, int param5, int param6)
+{
+ if (cm_use_ftrace)
+ trace_printk(format,
+ param1, param2, param3, param4, param5, param6);
+ else
+ printk(format, param1, param2, param3, param4, param5, param6);
+}
+
+/**
+ * compute the dsp load
+ *
+ * return -1 if in case of failure, a value between 0 and 100 otherwise
+ */
+static s8 computeDspLoad(t_cm_mpc_load_counter *oldCounter, t_cm_mpc_load_counter *counter)
+{
+ u32 t, l;
+
+ if ((oldCounter->totalCounter == 0) && (oldCounter->loadCounter == 0))
+ return -1; // Failure or not started ?
+ if ((counter->totalCounter == 0) && (counter->loadCounter == 0))
+ return -1; // Failure or already stopped ?
+
+ if (counter->totalCounter < oldCounter->totalCounter)
+ t = (u32)((((u64)-1) - oldCounter->totalCounter)
+ + counter->totalCounter + 1);
+ else
+ t = (u32)(counter->totalCounter - oldCounter->totalCounter);
+
+ if (counter->loadCounter < oldCounter->loadCounter)
+ l = (u32)((((u64)-1) - oldCounter->loadCounter)
+ + counter->loadCounter + 1);
+ else
+ l = (u32)(counter->loadCounter - oldCounter->loadCounter);
+
+ if (t == 0) // not significant
+ return -1;
+
+ if (l > t) // not significant
+ return -1;
+
+ return (l*100) / t;
+}
+
+static void wakeup_process(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+/**
+ * Thread function entry for monitorin the CPU load
+ */
+static int dspload_monitor(void *idx)
+{
+ int i = (int)idx;
+ unsigned char current_opp_request = FULL_OPP;
+ struct mpcConfig *mpc = &osalEnv.mpc[i];
+ struct timer_list timer;
+
+ timer.function = wakeup_process;
+ timer.data = (unsigned long)current;
+ init_timer_deferrable(&timer);
+
+#ifdef CONFIG_DEBUG_FS
+ mpc->opp_request = current_opp_request;
+#endif
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char*)mpc->name,
+ current_opp_request))
+ pr_err("CM Driver: Add QoS failed\n");
+
+ /*
+ * Wait for 500ms before initializing the counter,
+ * to let the DSP boot (init of counter will failed if
+ * DSP is not booted).
+ */
+ schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+
+ /* init counter */
+ if (CM_GetMpcLoadCounter(mpc->coreId,
+ &mpc->oldLoadCounter) != CM_OK)
+ pr_warn("CM Driver: Failed to init load counter for %s\n",
+ mpc->name);
+
+ while (!kthread_should_stop()) {
+ t_cm_mpc_load_counter loadCounter;
+ s8 load = -1;
+ unsigned long expire;
+
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
+ expire = msecs_to_jiffies(dspLoadMonitorPeriod) + jiffies;
+
+ mod_timer(&timer, expire);
+ schedule();
+ /* We can be woken up before the expiration of the timer
+ but we don't need to handle that case as the
+ computation of the DSP load takes that into account */
+
+ if (!test_bit(i, &running_dsp))
+ continue;
+
+ if (CM_GetMpcLoadCounter(mpc->coreId,
+ &loadCounter) != CM_OK)
+ loadCounter = mpc->oldLoadCounter;
+
+#ifdef CONFIG_DEBUG_FS
+ mpc->load =
+#endif
+ load = computeDspLoad(&mpc->oldLoadCounter, &loadCounter);
+ mpc->oldLoadCounter = loadCounter;
+
+ if (load == -1)
+ continue;
+ /* check if we must request more opp */
+ if ((current_opp_request == HALF_OPP)
+ && (load > dspLoadHighThreshold)) {
+#ifdef CONFIG_DEBUG_FS
+ mpc->opp_request =
+#endif
+ current_opp_request = FULL_OPP;
+ if (cm_debug_level)
+ pr_info("CM Driver: Request QoS OPP %d for %s\n",
+ current_opp_request, mpc->name);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char*)mpc->name,
+ current_opp_request);
+ }
+ /* check if we can request less opp */
+ else if ((current_opp_request == FULL_OPP)
+ && (load < dspLoadLowThreshold)) {
+#ifdef CONFIG_DEBUG_FS
+ mpc->opp_request =
+#endif
+ current_opp_request = HALF_OPP;
+ if (cm_debug_level)
+ pr_info("CM Driver: Request QoS OPP %d for %s\n",
+ current_opp_request, mpc->name);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char*)mpc->name,
+ current_opp_request);
+ }
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ mpc->opp_request = mpc->load = 0;
+#endif
+ del_singleshot_timer_sync(&timer);
+ if (cm_debug_level)
+ pr_info("CM Driver: Remove QoS OPP for %s\n", mpc->name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char*)mpc->name);
+ return 0;
+}
+
+static bool enable_auto_pm = 1;
+module_param(enable_auto_pm, bool, S_IWUSR|S_IRUGO);
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * Used by CM to disable a power resource
+ */
+void OSAL_DisablePwrRessource(t_nmf_power_resource resource, t_uint32 firstParam, t_uint32 secondParam)
+{
+ switch (resource) {
+ case CM_OSAL_POWER_SxA_CLOCK: {
+ unsigned idx = COREIDX(firstParam);
+ struct osal_msg msg;
+
+ if (idx >= NB_MPC) {
+ pr_err("CM Driver(%s(res=%d)): core %u unknown\n",
+ __func__, (int)resource, (unsigned)firstParam);
+ return;
+ }
+
+ cm_debug_destroy_tcm_file(idx);
+
+ /* Stop the DSP load monitoring */
+ clear_bit(idx, &running_dsp);
+ if (osalEnv.mpc[idx].monitor_tsk) {
+ kthread_stop(osalEnv.mpc[idx].monitor_tsk);
+ osalEnv.mpc[idx].monitor_tsk = NULL;
+ }
+
+ /* Stop the DMA (normally done on DSP side, but be safe) */
+ if (firstParam == SIA_CORE_ID)
+ cmdma_stop_dma();
+
+ /* Stop the DSP */
+ if (regulator_disable(osalEnv.mpc[idx].mmdsp_regulator) < 0)
+ pr_err("CM Driver(%s): can't disable regulator %s-mmsdp\n",
+ __func__, osalEnv.mpc[idx].name);
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&osalEnv.mpc[idx].wakelock);
+#endif
+
+ /* Create and dispatch a shutdown service message */
+ msg.msg_type = MSG_SERVICE;
+ msg.d.srv.srvType = NMF_SERVICE_SHUTDOWN;
+ msg.d.srv.srvData.shutdown.coreid = firstParam;
+ dispatch_service_msg(&msg);
+ break;
+ }
+ case CM_OSAL_POWER_SxA_AUTOIDLE:
+ switch (firstParam) {
+ case SVA_CORE_ID:
+ osalEnv.dsp_sleep.sva_auto_pm_enable = PRCMU_AUTO_PM_OFF;
+ osalEnv.dsp_sleep.sva_power_on = 0;
+ osalEnv.dsp_sleep.sva_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF;
+ break;
+ case SIA_CORE_ID:
+ osalEnv.dsp_sleep.sia_auto_pm_enable = PRCMU_AUTO_PM_OFF;
+ osalEnv.dsp_sleep.sia_power_on = 0;
+ osalEnv.dsp_sleep.sia_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF;
+ break;
+ default:
+ pr_err("CM Driver(%s(res=%d)): core %u unknown\n", __func__, (int)resource, (unsigned)firstParam);
+ return;
+ }
+ if (enable_auto_pm)
+ prcmu_configure_auto_pm(&osalEnv.dsp_sleep, &osalEnv.dsp_idle);
+ break;
+ case CM_OSAL_POWER_SxA_HARDWARE: {
+ unsigned idx = COREIDX(firstParam);
+ if (idx >= NB_MPC) {
+ pr_err("CM Driver(%s(res=%d)): core %u unknown\n",
+ __func__, (int)resource, (unsigned)firstParam);
+ return;
+ }
+ if (regulator_disable(osalEnv.mpc[idx].pipe_regulator) < 0)
+ pr_err("CM Driver(%s): can't disable regulator %s-pipe\n",
+ __func__, osalEnv.mpc[idx].name);
+ break;
+ }
+ case CM_OSAL_POWER_HSEM:
+ break;
+ case CM_OSAL_POWER_SDRAM:
+ break;
+ case CM_OSAL_POWER_ESRAM: {
+ int i;
+ /* firstParam: base address; secondParam: size
+ U8500_ESRAM_BASE is the start address of BANK 0,
+ BANK size=0x20000 */
+
+ /* Compute the relative end address of the range,
+ relative to base address of BANK1 */
+ secondParam = (firstParam+secondParam-U8500_ESRAM_BANK1-1);
+
+ /* if end is below base address of BANK1, it means that full
+ range of addresses is on Bank0 */
+ if (((int)secondParam) < 0)
+ break;
+ /* Compute the index of the last bank accessed among
+ esram 1+2 and esram 3+4 banks */
+ secondParam /= (2*U8500_ESRAM_BANK_SIZE);
+ WARN_ON(secondParam > 1);
+
+ /* Compute the index of the first bank accessed among esram 1+2
+ and esram 3+4 banks
+ Do not manage Bank 0 (secured, must be always ON) */
+ if (firstParam < U8500_ESRAM_BANK1)
+ firstParam = 0;
+ else
+ firstParam = (firstParam-U8500_ESRAM_BANK1)/(2*U8500_ESRAM_BANK_SIZE);
+
+ /* power off the banks 1+2 and 3+4 if accessed. */
+ for (i=firstParam; i<=secondParam; i++) {
+ if (regulator_disable(osalEnv.esram_regulator[i]) < 0)
+ pr_err("CM Driver(%s): can't disable regulator"
+ "for esram bank %s\n", __func__,
+ i ? "34" : "12");
+ }
+ break;
+ }
+ default:
+ pr_err("CM Driver(%s): resource %d unknown/not supported\n",
+ __func__, (int)resource);
+ }
+}
+
+/** \ingroup OSAL_IMPLEMENTATION
+ * Used by CM to enable a power resource
+ */
+t_cm_error OSAL_EnablePwrRessource(t_nmf_power_resource resource, t_uint32 firstParam, t_uint32 secondParam)
+{
+ switch (resource) {
+ case CM_OSAL_POWER_SxA_CLOCK: {
+ unsigned idx = COREIDX(firstParam);
+
+ if (idx > NB_MPC) {
+ pr_err("CM Driver(%s(res=%d)): core %u unknown\n", __func__, (int)resource, (unsigned)firstParam);
+ return CM_INVALID_PARAMETER;
+ }
+
+ /* Start the DSP */
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&osalEnv.mpc[idx].wakelock);
+#endif
+ if (regulator_enable(osalEnv.mpc[idx].mmdsp_regulator) < 0)
+ pr_err("CM Driver(%s): can't enable regulator %s-mmsdp\n", __func__, osalEnv.mpc[idx].name);
+
+ /* Start the DSP load monitoring for this dsp */
+ set_bit(idx, &running_dsp);
+ osalEnv.mpc[idx].monitor_tsk = kthread_run(&dspload_monitor,
+ (void*)idx,
+ "%s-loadd",
+ osalEnv.mpc[idx].name);
+ if (IS_ERR(osalEnv.mpc[idx].monitor_tsk)) {
+ pr_err("CM Driver: failed to start dspmonitord "
+ "thread: %ld\n", PTR_ERR(osalEnv.mpc[idx].monitor_tsk));
+ osalEnv.mpc[idx].monitor_tsk = NULL;
+ }
+
+ cm_debug_create_tcm_file(idx);
+ break;
+ }
+ case CM_OSAL_POWER_SxA_AUTOIDLE:
+ switch (firstParam) {
+ case SVA_CORE_ID:
+ osalEnv.dsp_sleep.sva_auto_pm_enable = PRCMU_AUTO_PM_ON;
+ osalEnv.dsp_sleep.sva_power_on = PRCMU_AUTO_PM_POWER_ON_HSEM | PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT;
+ osalEnv.dsp_sleep.sva_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF;
+ break;
+ case SIA_CORE_ID:
+ osalEnv.dsp_sleep.sia_auto_pm_enable = PRCMU_AUTO_PM_ON;
+ osalEnv.dsp_sleep.sia_power_on = PRCMU_AUTO_PM_POWER_ON_HSEM | PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT;
+ osalEnv.dsp_sleep.sia_policy = PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF;
+ break;
+ default:
+ pr_err("CM Driver(%s(res=%d)): core %u unknown\n", __func__, (int)resource, (unsigned)firstParam);
+ return CM_INVALID_PARAMETER;
+ }
+ if (enable_auto_pm)
+ prcmu_configure_auto_pm(&osalEnv.dsp_sleep, &osalEnv.dsp_idle);
+ break;
+ case CM_OSAL_POWER_SxA_HARDWARE: {
+ unsigned idx = COREIDX(firstParam);
+
+ if (idx > NB_MPC) {
+ pr_err("CM Driver(%s(res=%d)): core %u unknown\n", __func__, (int)resource, (unsigned)firstParam);
+ return CM_INVALID_PARAMETER;
+ }
+ if (regulator_enable(osalEnv.mpc[idx].pipe_regulator) < 0)
+ pr_err("CM Driver(%s): can't enable regulator %s-pipe\n", __func__, osalEnv.mpc[idx].name);
+ break;
+ }
+ case CM_OSAL_POWER_HSEM:
+ return CM_OK;
+ case CM_OSAL_POWER_SDRAM:
+ break;
+ case CM_OSAL_POWER_ESRAM:
+ {
+ int i;
+ /* firstParam: base address; secondParam: size
+ U8500_ESRAM_BASE is the start address of BANK 0,
+ BANK size=0x20000 */
+
+ /* Compute the relative end address of the range, relative
+ to base address of BANK1 */
+ secondParam = (firstParam+secondParam-U8500_ESRAM_BANK1-1);
+
+ /* if end is below base address of BANK1, it means that full
+ range of addresses is on Bank0 */
+ if (((int)secondParam) < 0)
+ break;
+ /* Compute the index of the last bank accessed among esram 1+2
+ and esram 3+4 banks */
+ secondParam /= (2*U8500_ESRAM_BANK_SIZE);
+ WARN_ON(secondParam > 1);
+
+ /* Compute the index of the first bank accessed among esram 1+2
+ and esram 3+4 banks
+ Do not manage Bank 0 (secured, must be always ON) */
+ if (firstParam < U8500_ESRAM_BANK1)
+ firstParam = 0;
+ else
+ firstParam = (firstParam-U8500_ESRAM_BANK1)/(2*U8500_ESRAM_BANK_SIZE);
+
+ /* power on the banks 1+2 and 3+4 if accessed. */
+ for (i=firstParam; i<=secondParam; i++) {
+ if (regulator_enable(osalEnv.esram_regulator[i]) < 0)
+ pr_err("CM Driver(%s): can't enable regulator "
+ "for esram bank %s\n", __func__,
+ i ? "34" : "12");
+ }
+ break;
+ }
+ default:
+ pr_err("CM Driver(%s): resource %x unknown/not supported\n",
+ __func__, (int)resource);
+ return CM_INVALID_PARAMETER;
+ }
+
+ return CM_OK;
+}
+
+/*!
+ * \brief Generate 'software' panic to notify cm users
+ * that a problem occurs but no dsp panic has been sent yet
+ * (for example a dsp crash)
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+void OSAL_GeneratePanic(t_nmf_core_id coreId, t_uint32 reason)
+{
+ struct osal_msg msg;
+
+ /* Create and dispatch a shutdown service message */
+ msg.msg_type = MSG_SERVICE;
+ msg.d.srv.srvType = NMF_SERVICE_PANIC;
+ msg.d.srv.srvData.panic.panicReason = MPC_NOT_RESPONDING_PANIC;
+ msg.d.srv.srvData.panic.panicSource = MPC_EE;
+ msg.d.srv.srvData.panic.info.mpc.coreid = coreId;
+ msg.d.srv.srvData.panic.info.mpc.faultingComponent = 0;
+ msg.d.srv.srvData.panic.info.mpc.panicInfo1 = reason;
+ msg.d.srv.srvData.panic.info.mpc.panicInfo2 = 0;
+ dispatch_service_msg(&msg);
+}
+
+/*!
+ * \brief Generate an OS-Panic. Called in from CM_ASSERT().
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+void OSAL_Panic(void)
+{
+ panic("FATAL ISSUE IN THE CM DRIVER !!");
+}
+#include <mach/dcache.h>
+/*!
+ * \brief Clean data cache in DDR in order to be accessible from peripheral.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+void OSAL_CleanDCache(t_uint32 startAddr, t_uint32 size)
+{
+#if 0
+ /*
+ * Currently, the code sections are non-cached/buffered,
+ * which normally doesn't required the maintenance done below.
+ * As the cost is low (doesn't do much thing), I keep it in case
+ * of the memory settings are changed later.
+ */
+
+ struct hwmem_region region;
+ struct mpcConfig *mpc;
+ t_uint32 endAddr = startAddr + size;
+
+ if (startAddr >= (u32)osalEnv.mpc[0].sdram_code.data
+ && endAddr <= (u32)(osalEnv.mpc[0].sdram_code.data
+ + osalEnv.mpc[0].sdram_code.size)) {
+ mpc = &osalEnv.mpc[0];
+ } else if (startAddr >= (u32)osalEnv.mpc[1].sdram_code.data
+ && endAddr <= (u32)(osalEnv.mpc[1].sdram_code.data
+ + osalEnv.mpc[1].sdram_code.size)) {
+ mpc = &osalEnv.mpc[1];
+ } else {
+ /* The code may be in esram, in that case, nothing to do */
+ return;
+ }
+
+ region.offset = startAddr - (u32)mpc->sdram_code.data;
+ region.count = 1;
+ region.start = 0;
+ region.end = size;
+ region.size = size;
+ hwmem_set_domain(mpc->hwmem_code, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &region);
+ /*
+ * The hwmem keep track of region being sync or not.
+ * Mark the region as being write-accessed here right now
+ * to let following clean being done as expected. Today,
+ * there is no other place to do that in CM Core right now
+ */
+ hwmem_set_domain(mpc->hwmem_code, HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_CPU, &region);
+#else
+ dsb();
+ outer_cache.sync();
+#endif
+}
+
+/*!
+ * \brief Flush write-buffer of L2 cache
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+void OSAL_mb(void)
+{
+ mb();
+}
+
+/*!
+ * \brief return prcmu timer value.
+ *
+ * This is need for perfmeter api (see \ref t_nmf_power_resource)
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+t_uint64 OSAL_GetPrcmuTimer()
+{
+ t_uint64 msbBefore;
+ t_uint32 lsb;
+ t_uint64 msbAfter;
+
+ /* read prcmu timers */
+ msbBefore = ~ioread32(prcmu_tcdm_base+0xDE4);
+ lsb = ~ioread32(prcmu_base+0x454);
+ msbAfter = ~ioread32(prcmu_tcdm_base+0xDE4);
+
+ /* handle rollover test case */
+ // NOTE : there is still a window in prcmu side between counter rollover
+ // and prcmu interrupt handling
+ // to update msb register => this can lead to erroneous value return here
+ if (msbBefore == msbAfter || lsb >= 0x80000000UL)
+ return (((msbBefore & 0xffffffUL) << 32) + lsb);
+ else
+ return (((msbAfter & 0xffffffUL) << 32) + lsb);
+}
+
+/*!
+ * \brief Disable the service message handling (panic, etc)
+ *
+ * It must disable the handling of all service messages
+ * If a service message is currently handled, it must wait till the end
+ * of its managment before returning.
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_DisableServiceMessages(void) {
+ tasklet_disable(&cmld_service_tasklet);
+}
+
+/*!
+ * \brief Enable the service message handling (panic, etc)
+ *
+ * It enables the handling of all service messages
+ *
+ * \ingroup CM_ENGINE_OSAL_API
+ */
+PUBLIC void OSAL_EnableServiceMessages(void) {
+ tasklet_enable(&cmld_service_tasklet);
+}
diff --git a/drivers/staging/nmf-cm/osal-kernel.h b/drivers/staging/nmf-cm/osal-kernel.h
new file mode 100644
index 00000000000..29b82368d8d
--- /dev/null
+++ b/drivers/staging/nmf-cm/osal-kernel.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef OSAL_KERNEL_H
+#define OSAL_KERNEL_H
+
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/hwmem.h>
+#include <linux/regulator/consumer.h>
+#include <linux/plist.h>
+#include <linux/version.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#include <linux/mfd/dbx500-prcmu.h>
+#include <cm/engine/api/channel_engine.h>
+#include <cm/engine/api/control/configuration_engine.h>
+#include <cm/engine/api/perfmeter_engine.h>
+/*
+ * Do not include ELF definition from cm/engine/elf/inc/elfapi.h file
+ * because it conflicts with definition from linux/elf.h file
+ */
+#define _CM_ELF_H
+#include <cm/engine/os_adaptation_layer/inc/os_adaptation_layer.h>
+
+#include "configuration.h"
+
+/*
+ * Per-MPC configuration structure
+ * Use struct debugfs_blob_wrapper to store pointer and size of section
+ * to allow easy re-use of data through debugfs
+ */
+struct mpcConfig {
+ const t_nmf_core_id coreId; /**< MPC coreId */
+ const char *name; /**< MPC name */
+ t_uint8 nbYramBanks; /**< number of TCM ram banks to reserve for y memory */
+ t_nmf_executive_engine_id eeId; /**< Type of Executive Engine */
+ const void *base_phys; /**< Physical base address of the MPC */
+ struct debugfs_blob_wrapper base;/**< Remapped base address of the MPC and size of TCM24 */
+ struct hwmem_alloc *hwmem_code; /**< hwmem code segment */
+ u32 sdram_code_phys; /**< Physical base address for MPC SDRAM Code region */
+ struct debugfs_blob_wrapper sdram_code; /**< Remapped base address and size for MPC SDRAM Code */
+ struct hwmem_alloc *hwmem_data; /**< hwmem data segment */
+ u32 sdram_data_phys; /**< Physical base address for MPC SDRAM Data region */
+ struct debugfs_blob_wrapper sdram_data; /**< Remapped base address and size for MPC SDRAM Data */
+ const unsigned int interrupt0; /**< interrupt line triggered by the MPC, for MPC events (if HSEM not used) */
+ const unsigned int interrupt1; /**< interrupt line triggered by the MPC, for PANIC events */
+ struct tasklet_struct tasklet; /**< taskket used to process MPC events */
+ struct regulator *mmdsp_regulator; /**< mmdsp regulator linked to this MPC */
+ struct regulator *pipe_regulator; /**< hardware pipe linked to this MPC */
+#ifdef CONFIG_HAS_WAKELOCK
+ struct wake_lock wakelock; /**< wakelock for this MPC to prevent ARM to go in APSLEEP state */
+#endif
+ struct task_struct *monitor_tsk; /**< task to monitor the dsp load; */
+ t_cm_mpc_load_counter oldLoadCounter; /**< previous load counter of the DSP */
+ atomic_t trace_read_count; /**< number of trace reader */
+ spinlock_t trace_reader_lock;
+ struct task_struct *trace_reader;/**< current reader task */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dir; /**< debugfs dir entry */
+ struct dentry *comp_dir; /**< debugfs component dir entry */
+ struct dentry *domain_dir; /**< debugfs domain dir entry */
+ struct dentry *snapshot_dir; /**< debugfs snapshot dir entry */
+ struct dentry *mem_file; /**< debugfs meminfo file entry */
+ struct dentry *tcm_file; /**< debugfs meminfo file entry */
+ struct dentry *esram_file; /**< debugfs meminfo file entry */
+ s8 load; /**< current load of the DSP */
+ s8 opp_request; /**< current requested opp of the DSP */
+#endif
+};
+
+/** Describes current Kernel OSAL environment
+ *
+ * Note about mpc.tasklet : we declare one tasklet per MPC but their usage depends
+ * on cfgSemaphoreTypeHSEM.
+ *
+ * This tasklet is scheduled by the interrupt handler to process MPC Events.
+ * - If we use Hardware Semaphore, there is only one interrupt handler used
+ * and thus only one tasklet, tasklet of MPC 0 (ie osalEnv.mpc[0].tasklet)
+ * - If we use local semaphore, there is one interrupt handler and tasklet per mpc
+ */
+struct OsalEnvironment
+{
+ struct mpcConfig mpc[NB_MPC];
+ void* hwsem_base; /** < Remapped base address of the hardware semaphores */
+ void* esram_base; /**< Remapped base address embedded RAM used within the CM */
+ struct regulator *esram_regulator[NB_ESRAM]; /**< regulator for ESRAM bank 1+2 and 3+4 */
+ struct prcmu_auto_pm_config dsp_sleep;
+ struct prcmu_auto_pm_config dsp_idle;
+};
+
+
+/** Structure used to store the skeleton related data.
+ * It is used for communicattion from a MPC to a user process (=host)
+ */
+typedef struct {
+ struct list_head entry; /**< Doubly linked list descriptor */
+ t_cm_bf_mpc2host_handle mpc2hostId; /**< mpc2host ID */
+ t_nmf_mpc2host_handle upperLayerThis;/**< upper-layer handle */
+ struct cm_channel_priv* channelPriv; /**< Per-channel private data. The actual message queue is hold here */
+} t_skelwrapper;
+
+/** Message description for MPC to HOST communication
+ */
+struct osal_msg {
+ struct {
+ struct plist_node entry; /**< Doubly linked list descriptor */
+ t_message_type type; /**< Type of message (callback, service or interrupt for now) */
+ } hdr; /**< Header of the message */
+#define msg_entry hdr.entry
+#define msg_type hdr.type
+ union {
+ struct {
+ t_skelwrapper *skelwrap; /**< Link to the skelwrapper, to retrieve the channel on which this message has to be forwarded */
+ t_uint32 methodIdx; /**< callback data: method index*/
+ t_event_params_handle anyPtr; /**< callback data: method parameters */
+ t_uint32 ptrSize; /**< size of the parameters */
+ } itf; /**< structure holding callback data */
+ struct {
+ t_nmf_service_type srvType; /**< Type of the service */
+ t_nmf_service_data srvData; /**< Data of the service */
+ } srv; /**< structure holding service data */
+ } d; /**< data */
+};
+
+extern struct OsalEnvironment osalEnv;
+
+/** Environment initialization/deinitialization */
+int remapRegions(void);
+void unmapRegions(void);
+
+/** Component manager configuration getters for CM_ENGINE_Init() */
+int getNmfHwMappingDesc(t_nmf_hw_mapping_desc* nmfHwMappingDesc);
+
+/** Component manager configuration getters for CM_ConfigureMediaProcessorCore (SVA and SIA) */
+void getMpcSystemAddress(unsigned i, t_cm_system_address* mpcSystemAddress);
+void getMpcSdramSegments(unsigned i, t_nmf_memory_segment* codeSegment, t_nmf_memory_segment* dataSegment);
+
+#ifdef CM_DEBUG_ALLOC
+struct cm_alloc {
+ spinlock_t lock;
+ struct list_head chain;
+};
+
+struct cm_alloc_elem {
+ struct list_head elem;
+ void *caller;
+ size_t size;
+ char addr[0];
+};
+
+void init_debug_alloc(void);
+void cleanup_debug_alloc(void);
+#endif /* CM_DEBUG_ALLOC */
+
+/* TODO: To remove later */
+extern __iomem void *prcmu_base;
+extern __iomem void *prcmu_tcdm_base;
+extern const char *cmld_devname[];
+
+#define PRCM_SVAMMDSPCLK_MGT (prcmu_base + 0x008)
+#define PRCM_SIAMMDSPCLK_MGT (prcmu_base + 0x00c)
+
+#endif /* OSAL_KERNEL_H */
diff --git a/drivers/staging/nmf-cm/share/communication/inc/communication_fifo.h b/drivers/staging/nmf-cm/share/communication/inc/communication_fifo.h
new file mode 100644
index 00000000000..ea24e82ceae
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/communication/inc/communication_fifo.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_NMF_COM_FIFO
+#define __INC_NMF_COM_FIFO
+
+#include <inc/typedef.h>
+
+#define EVENT_ELEM_METHOD_IDX 0
+#define EVENT_ELEM_PARAM_IDX 1
+#define EVENT_ELEM_EXTFIELD_IDX 2
+
+#define EVENT_ELEM_SIZE_IN_BYTE (3 * sizeof(t_shared_field))
+
+#endif /* __INC_NMF_COM_FIFO */
diff --git a/drivers/staging/nmf-cm/share/communication/inc/initializer.h b/drivers/staging/nmf-cm/share/communication/inc/initializer.h
new file mode 100644
index 00000000000..10985c3981a
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/communication/inc/initializer.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_SHARE_INITIALIZER
+#define __INC_SHARE_INITIALIZER
+
+#define NMF_CONSTRUCT_INDEX 0
+#define NMF_START_INDEX 1
+#define NMF_STOP_INDEX 2
+#define NMF_DESTROY_INDEX 3
+#define NMF_UPDATE_STACK 4
+#define NMF_LOCK_CACHE 5
+#define NMF_UNLOCK_CACHE 6
+#define NMF_ULP_FORCEWAKEUP 7
+#define NMF_ULP_ALLOWSLEEP 8
+#define NMF_CONSTRUCT_SYNC_INDEX 9
+#define NMF_START_SYNC_INDEX 10
+#define NMF_STOP_SYNC_INDEX 11
+
+/*
+ * Index of datas in command parameter format
+ */
+#define INIT_COMPONENT_CMD_HANDLE_INDEX 0
+#define INIT_COMPONENT_CMD_THIS_INDEX 2
+#define INIT_COMPONENT_CMD_METHOD_INDEX 4
+#define INIT_COMPONENT_CMD_SIZE 6
+
+/*
+ * Index of datas in acknowledge parameter format
+ */
+#define INIT_COMPONENT_ACK_HANDLE_INDEX 0
+#define INIT_COMPONENT_ACK_SIZE 2
+
+#endif /* __INC_SHARE_INITIALIZER */
diff --git a/drivers/staging/nmf-cm/share/communication/inc/nmf_fifo_desc.h b/drivers/staging/nmf-cm/share/communication/inc/nmf_fifo_desc.h
new file mode 100644
index 00000000000..99caa48b05c
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/communication/inc/nmf_fifo_desc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_NMF_FIFO_DESC
+#define __INC_NMF_FIFO_DESC
+
+#include <inc/typedef.h>
+#include <share/semaphores/inc/semaphores.h>
+
+/*
+ * SHOULD be mapped onto a AHB burst (16 bytes=8x16-bit)
+ */
+typedef struct {
+ t_semaphore_id semId;
+
+ t_uint16 elemSize;
+ t_uint16 fifoFullValue;
+ t_uint16 readIndex;
+ t_uint16 writeIndex;
+ t_uint16 wrappingValue;
+
+ t_uint32 extendedField; /* in DSP 24 memory when to MPC in Logical Host when to ARM */
+} t_nmf_fifo_desc;
+
+#define EXTENDED_FIELD_BCTHIS_OR_TOP 0 //<! This field will be used:
+ //<! - as hostBCThis for DSP->HOST binding
+ //<! - as TOP else
+#define EXTENDED_FIELD_BCDESC 1 //<! This field will be used for:
+ //<! - interface method address for ->MPC binding
+ //<! - for params size for ->Host binding (today only [0] is used as max size)
+
+#endif /* __INC_NMF_FIFO */
diff --git a/drivers/staging/nmf-cm/share/communication/inc/nmf_service.h b/drivers/staging/nmf-cm/share/communication/inc/nmf_service.h
new file mode 100644
index 00000000000..71dfc534f97
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/communication/inc/nmf_service.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_NMF_SERVICE_H
+#define __INC_NMF_SERVICE_H
+
+/* 1 - 0xff Reserved for Panic Reason */
+#define MPC_SERVICE_NONE 0
+#define MPC_SERVICE_BOOT 0xB001
+#define MPC_SERVICE_PRINT 0x1234
+#define MPC_SERVICE_TRACE 0x789
+
+#endif
diff --git a/drivers/staging/nmf-cm/share/inc/macros.h b/drivers/staging/nmf-cm/share/inc/macros.h
new file mode 100644
index 00000000000..7d2c2289cd3
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/inc/macros.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief NMF Macro API.
+ */
+
+#ifndef _COMMON_MACROS_H_
+#define _COMMON_MACROS_H_
+
+#undef ALIGN_VALUE
+#define ALIGN_VALUE(value, alignment) (((value) + (alignment - 1)) & ~(alignment - 1))
+
+#undef MIN
+#define MIN(a,b) (((a)>(b))?(b):(a))
+
+#undef MAX
+#define MAX(a,b) (((a)<(b))?(b):(a))
+
+/*-----------------------------------------------------------------------------
+ * endianess switch macros (32 bits and 16 bits)
+ *---------------------------------------------------------------------------*/
+#define ENDIANESS_32_SWITCH(value) ( \
+ (((value) & MASK_BYTE3) >> SHIFT_BYTE3) | \
+ (((value) & MASK_BYTE2) >> SHIFT_BYTE1) | \
+ (((value) & MASK_BYTE1) << SHIFT_BYTE1) | \
+ (((value) & MASK_BYTE0) << SHIFT_BYTE3) \
+ )
+
+#define ENDIANESS_16_SWITCH(value) ( \
+ (((value) & MASK_BYTE0) << SHIFT_BYTE1) | \
+ (((value) & MASK_BYTE1) >> SHIFT_BYTE1) \
+ )
+
+/*-----------------------------------------------------------------------------
+ * field offset extraction from a structure
+ *---------------------------------------------------------------------------*/
+#undef FIELD_OFFSET
+#define FIELD_OFFSET(typeName, fieldName) ((t_uint32)(&(((typeName *)0)->fieldName)))
+
+#undef MASK_BIT
+#define MASK_BIT(n) (1UL << ((n) - 1))
+
+/*-----------------------------------------------------------------------------
+ * Misc definition
+ *---------------------------------------------------------------------------*/
+
+#undef ONE_KB
+#define ONE_KB (1024)
+#undef ONE_MB
+#define ONE_MB (ONE_KB * ONE_KB)
+
+/*-----------------------------------------------------------------------------
+ * Bit mask definition
+ *---------------------------------------------------------------------------*/
+#undef MASK_NULL8
+#define MASK_NULL8 0x00U
+#undef MASK_NULL16
+#define MASK_NULL16 0x0000U
+#undef MASK_NULL32
+#define MASK_NULL32 0x00000000UL
+#undef MASK_ALL8
+#define MASK_ALL8 0xFFU
+#undef MASK_ALL16
+#define MASK_ALL16 0xFFFFU
+#undef MASK_ALL32
+#define MASK_ALL32 0xFFFFFFFFUL
+
+#undef MASK_BIT0
+#define MASK_BIT0 (1UL<<0)
+#undef MASK_BIT1
+#define MASK_BIT1 (1UL<<1)
+#undef MASK_BIT2
+#define MASK_BIT2 (1UL<<2)
+#undef MASK_BIT3
+#define MASK_BIT3 (1UL<<3)
+#undef MASK_BIT4
+#define MASK_BIT4 (1UL<<4)
+#undef MASK_BIT5
+#define MASK_BIT5 (1UL<<5)
+#undef MASK_BIT6
+#define MASK_BIT6 (1UL<<6)
+#undef MASK_BIT7
+#define MASK_BIT7 (1UL<<7)
+#undef MASK_BIT8
+#define MASK_BIT8 (1UL<<8)
+#undef MASK_BIT9
+#define MASK_BIT9 (1UL<<9)
+#undef MASK_BIT10
+#define MASK_BIT10 (1UL<<10)
+#undef MASK_BIT11
+#define MASK_BIT11 (1UL<<11)
+#undef MASK_BIT12
+#define MASK_BIT12 (1UL<<12)
+#undef MASK_BIT13
+#define MASK_BIT13 (1UL<<13)
+#undef MASK_BIT14
+#define MASK_BIT14 (1UL<<14)
+#undef MASK_BIT15
+#define MASK_BIT15 (1UL<<15)
+#undef MASK_BIT16
+#define MASK_BIT16 (1UL<<16)
+#undef MASK_BIT17
+#define MASK_BIT17 (1UL<<17)
+#undef MASK_BIT18
+#define MASK_BIT18 (1UL<<18)
+#undef MASK_BIT19
+#define MASK_BIT19 (1UL<<19)
+#undef MASK_BIT20
+#define MASK_BIT20 (1UL<<20)
+#undef MASK_BIT21
+#define MASK_BIT21 (1UL<<21)
+#undef MASK_BIT22
+#define MASK_BIT22 (1UL<<22)
+#undef MASK_BIT23
+#define MASK_BIT23 (1UL<<23)
+#undef MASK_BIT24
+#define MASK_BIT24 (1UL<<24)
+#undef MASK_BIT25
+#define MASK_BIT25 (1UL<<25)
+#undef MASK_BIT26
+#define MASK_BIT26 (1UL<<26)
+#undef MASK_BIT27
+#define MASK_BIT27 (1UL<<27)
+#undef MASK_BIT28
+#define MASK_BIT28 (1UL<<28)
+#undef MASK_BIT29
+#define MASK_BIT29 (1UL<<29)
+#undef MASK_BIT30
+#define MASK_BIT30 (1UL<<30)
+#undef MASK_BIT31
+#define MASK_BIT31 (1UL<<31)
+
+/*-----------------------------------------------------------------------------
+ * quartet shift definition
+ *---------------------------------------------------------------------------*/
+#undef MASK_QUARTET
+#define MASK_QUARTET (0xFUL)
+#undef SHIFT_QUARTET0
+#define SHIFT_QUARTET0 0
+#undef SHIFT_QUARTET1
+#define SHIFT_QUARTET1 4
+#undef SHIFT_QUARTET2
+#define SHIFT_QUARTET2 8
+#undef SHIFT_QUARTET3
+#define SHIFT_QUARTET3 12
+#undef SHIFT_QUARTET4
+#define SHIFT_QUARTET4 16
+#undef SHIFT_QUARTET5
+#define SHIFT_QUARTET5 20
+#undef SHIFT_QUARTET6
+#define SHIFT_QUARTET6 24
+#undef SHIFT_QUARTET7
+#define SHIFT_QUARTET7 28
+#undef MASK_QUARTET0
+#define MASK_QUARTET0 (MASK_QUARTET << SHIFT_QUARTET0)
+#undef MASK_QUARTET1
+#define MASK_QUARTET1 (MASK_QUARTET << SHIFT_QUARTET1)
+#undef MASK_QUARTET2
+#define MASK_QUARTET2 (MASK_QUARTET << SHIFT_QUARTET2)
+#undef MASK_QUARTET3
+#define MASK_QUARTET3 (MASK_QUARTET << SHIFT_QUARTET3)
+#undef MASK_QUARTET4
+#define MASK_QUARTET4 (MASK_QUARTET << SHIFT_QUARTET4)
+#undef MASK_QUARTET5
+#define MASK_QUARTET5 (MASK_QUARTET << SHIFT_QUARTET5)
+#undef MASK_QUARTET6
+#define MASK_QUARTET6 (MASK_QUARTET << SHIFT_QUARTET6)
+#undef MASK_QUARTET7
+#define MASK_QUARTET7 (MASK_QUARTET << SHIFT_QUARTET7)
+
+/*-----------------------------------------------------------------------------
+ * Byte shift definition
+ *---------------------------------------------------------------------------*/
+#undef MASK_BYTE
+#define MASK_BYTE (0xFFUL)
+#undef SHIFT_BYTE0
+#define SHIFT_BYTE0 0U
+#undef SHIFT_BYTE1
+#define SHIFT_BYTE1 8U
+#undef SHIFT_BYTE2
+#define SHIFT_BYTE2 16U
+#undef SHIFT_BYTE3
+#define SHIFT_BYTE3 24U
+#undef MASK_BYTE0
+#define MASK_BYTE0 (MASK_BYTE << SHIFT_BYTE0)
+#undef MASK_BYTE1
+#define MASK_BYTE1 (MASK_BYTE << SHIFT_BYTE1)
+#undef MASK_BYTE2
+#define MASK_BYTE2 (MASK_BYTE << SHIFT_BYTE2)
+#undef MASK_BYTE3
+#define MASK_BYTE3 (MASK_BYTE << SHIFT_BYTE3)
+
+/*-----------------------------------------------------------------------------
+ * Halfword shift definition
+ *---------------------------------------------------------------------------*/
+#undef MASK_HALFWORD
+#define MASK_HALFWORD (0xFFFFUL)
+#undef SHIFT_HALFWORD0
+#define SHIFT_HALFWORD0 0U
+#undef SHIFT_HALFWORD1
+#define SHIFT_HALFWORD1 16U
+#undef MASK_HALFWORD0
+#define MASK_HALFWORD0 (MASK_HALFWORD << SHIFT_HALFWORD0)
+#undef MASK_HALFWORD1
+#define MASK_HALFWORD1 (MASK_HALFWORD << SHIFT_HALFWORD1)
+
+#endif /* _COMMON_MACROS_H_ */
+
diff --git a/drivers/staging/nmf-cm/share/inc/nmf.h b/drivers/staging/nmf-cm/share/inc/nmf.h
new file mode 100644
index 00000000000..2f73311c2f3
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/inc/nmf.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Jean-Philippe FASSINO <jean-philippe.fassino@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2, with
+ * user space exemption described in the top-level COPYING file in
+ * the Linux kernel source tree.
+ */
+/*!
+ * \brief Common Nomadik Multiprocessing Framework type definition
+ *
+ * This file contains the shared type definitions used into NMF.
+ */
+
+#ifndef __INC_NMF_H
+#define __INC_NMF_H
+
+#include <inc/typedef.h>
+
+/*!
+ * \brief Identification of the various cores (host cpu and Media Processors) into Nomadik Platform
+ * In order to improve performance, these ids are those used to interconnect HW Semaphores IP with Cores (Interrupt lines)
+ * \ingroup NMF_COMMON
+ */
+#if defined(__STN_8500)
+ //#warning "TODO : mapping below is not correct, need to think how to change it"
+#endif
+typedef t_uint8 t_nmf_core_id;
+#define ARM_CORE_ID ((t_nmf_core_id)0) //!< HOST CPU Id
+#define SVA_CORE_ID ((t_nmf_core_id)1) //!< Smart Video Accelerator Media Processor Code Id
+#define SIA_CORE_ID ((t_nmf_core_id)2) //!< Smart Imaging Accelerator Media Processor Code Id
+#define NB_CORE_IDS ((t_nmf_core_id)3)
+
+#define FIRST_CORE_ID ((t_nmf_core_id)ARM_CORE_ID)
+#define FIRST_MPC_ID ((t_nmf_core_id)SVA_CORE_ID)
+#define LAST_CORE_ID ((t_nmf_core_id)SIA_CORE_ID)
+#define LAST_MPC_ID ((t_nmf_core_id)SIA_CORE_ID)
+
+
+/*!
+ * \brief Define minimal stack size use by execution engine
+ */
+#define MIN_STACK_SIZE 128
+
+
+
+#endif /* __INC_NMF_H */
diff --git a/drivers/staging/nmf-cm/share/inc/nomadik_mapping.h b/drivers/staging/nmf-cm/share/inc/nomadik_mapping.h
new file mode 100644
index 00000000000..bec221aa111
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/inc/nomadik_mapping.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_NOMADIK_MAPPING_H
+#define __INC_NOMADIK_MAPPING_H
+
+/*--------------------------------------------------------------------------*/
+#if defined(__STN_8810)
+
+/* XTI (CPU OSMO/OSMOT address space) */
+#define XTI_CPU_BASE_ADDR 0x10000000
+#define XTI_CPU_END_ADDR 0x100FFFFF
+
+/* XTI configuration registers */
+#define XTI_CFG_REG_BASE_ADDR 0x101A0000
+#define XTI_CFG_REG_END_ADDR 0x101AFFFF
+
+/* Core APB Peripherals */
+#define CORE_APB_BASE_ADDR 0x101E0000
+#define CORE_APB_END_ADDR 0x101EFFFF
+
+/* DMA APB Peripherals */
+#define DMA_APB_BASE_ADDR 0x101F0000
+#define DMA_APB_END_ADDR 0x101FFFFF
+
+/* XTI (DSP OSMO/OSMOT address space) */
+#define XTI_DSP_BASE_ADDR 0x10200000
+#define XTI_DSP_END_ADDR 0x1020FFFF
+
+#endif /* defined(__STN_8810) */
+
+/*--------------------------------------------------------------------------*/
+#if defined(__STN_8815)
+
+/* XTI (CPU OSMO/OSMOT address space) */
+#define XTI_CPU_BASE_ADDR 0x10000000
+#define XTI_CPU_END_ADDR 0x100FFFFF
+
+/* XTI configuration registers */
+#define XTI_CFG_REG_BASE_ADDR 0x101A0000
+#define XTI_CFG_REG_END_ADDR 0x101AFFFF
+
+/* Core APB Peripherals */
+#define CORE_APB_BASE_ADDR 0x101E0000
+#define CORE_APB_END_ADDR 0x101EFFFF
+
+/* DMA APB Peripherals */
+#define DMA_APB_BASE_ADDR 0x101F0000
+#define DMA_APB_END_ADDR 0x101FFFFF
+
+/* XTI (DSP OSMO/OSMOT address space) */
+#define XTI_DSP_BASE_ADDR 0x10220000
+#define XTI_DSP_END_ADDR 0x1022FFFF
+
+#endif /* defined(__STN_8815) */
+
+
+/*--------------------------------------------------------------------------*/
+#if defined(__STN_8820)
+
+/* STM (System Trace Module address space) */
+#define STM_BASE_ADDR 0x700F0000
+#define STM_END_ADDR 0x700FFFFF
+
+/* AHB2 Peripherals */
+#define AHB2_PERIPH_BASE_ADDR 0x70100000
+#define AHB2_PERIPH_END_ADDR 0x7010FFFF
+
+/* APB2 Peripherals */
+#define APB2_PERIPH_BASE_ADDR 0x70110000
+#define APB2_PERIPH_END_ADDR 0x7011FFFF
+
+/* APB1 Peripherals */
+#define APB1_PERIPH_BASE_ADDR 0x70120000
+#define APB1_PERIPH_END_ADDR 0x7012FFFF
+
+#endif /* defined(__STN_8820) */
+
+/*--------------------------------------------------------------------------*/
+#if defined(__STN_8500)
+/* STM (System Trace Module address space) */
+#define STM_BASE_ADDR 0x80100000
+#define STM_END_ADDR 0x8010FFFF
+
+#define HSEM_BASE_ADDR 0x80140000
+#define HSEM_END_ADDR 0x8014FFFF
+
+#define DMA_CTRL_BASE_ADDR 0x801C0000
+#define DMA_CTRL_END_ADDR 0x801C0FFF
+
+
+#endif /* defined(__STN_8500) */
+
+#endif /*__INC_NOMADIK_MAPPING_H */
diff --git a/drivers/staging/nmf-cm/share/semaphores/inc/hwsem_hwp.h b/drivers/staging/nmf-cm/share/semaphores/inc/hwsem_hwp.h
new file mode 100644
index 00000000000..b573627beae
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/semaphores/inc/hwsem_hwp.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_HWSEM_HWP_H
+#define __INC_HWSEM_HWP_H
+
+#include <share/semaphores/inc/semaphores.h>
+
+#define CORE_ID_2_HW_CORE_ID(coreId) (1U << (coreId))
+
+/*
+ * Definition of the number of hw semaphores into the Nomadik IP
+ */
+#define NUM_HW_SEMAPHORES 32
+
+
+/*
+ * Definition of how HSEM IP interrupts are interconnected with cores
+ */
+typedef enum {
+ HSEM_FIRST_INTR = 0,
+ HSEM_INTRA = HSEM_FIRST_INTR,
+ HSEM_INTRB = 1,
+ HSEM_INTRC = 2,
+ HSEM_INTRD = 3,
+ HSEM_INTRE = 4,
+ HSEM_MAX_INTR
+} t_hw_semaphore_irq_id;
+
+/*
+ * Description of the registers of the HW Sem IP
+ */
+#define HSEM_INTRA_MASK (1<<(4+HSEM_INTRA))
+#define HSEM_INTRB_MASK (1<<(4+HSEM_INTRB))
+#define HSEM_INTRC_MASK (1<<(4+HSEM_INTRC))
+#define HSEM_INTRD_MASK (1<<(4+HSEM_INTRD))
+#define HSEM_INTRE_MASK (1<<(4+HSEM_INTRE))
+
+typedef struct {
+ t_shared_reg imsc;
+ t_shared_reg ris;
+ t_shared_reg mis;
+ t_shared_reg icr;
+} t_hsem_it_regs;
+
+typedef volatile struct {
+#if defined(__STN_8500)
+ t_shared_reg cr;
+ t_shared_reg dummy;
+#endif
+ t_shared_reg sem[NUM_HW_SEMAPHORES];
+#if defined(__STN_8820)
+ t_shared_reg RESERVED1[(0x90 - 0x80)>>2];
+#elif defined(__STN_8500)
+ t_shared_reg RESERVED1[(0x90 - 0x88)>>2];
+#else /* __STN_8820 or __STN_8500 -> _STN_8815 */
+ t_shared_reg RESERVED1[(0x90 - 0x40)>>2];
+#endif /* __STN_8820 or __STN_8500 -> _STN_8815 */
+ t_shared_reg icrall;
+ t_shared_reg RESERVED2[(0xa0 - 0x94)>>2];
+ t_hsem_it_regs it[HSEM_MAX_INTR];
+#if defined(__STN_8820) || defined(__STN_8500)
+ t_shared_reg RESERVED3[(0x100 - 0xf0)>>2];
+#else /* __STN_8820 or __STN_8500 -> _STN_8815 */
+ t_shared_reg RESERVED3[(0x100 - 0xe0)>>2];
+#endif /* __STN_8820 or __STN_8500 -> _STN_8815 */
+ t_shared_reg itcr;
+ t_shared_reg RESERVED4;
+ t_shared_reg itop;
+ t_shared_reg RESERVED5[(0xfe0 - 0x10c)>>2];
+ t_shared_reg pid0;
+ t_shared_reg pid1;
+ t_shared_reg pid2;
+ t_shared_reg pid3;
+ t_shared_reg pcid0;
+ t_shared_reg pcid1;
+ t_shared_reg pcid2;
+ t_shared_reg pcid3;
+} t_hw_semaphore_regs, *tp_hw_semaphore_regs;
+
+#endif /* __INC_HWSEM_HWP_H */
diff --git a/drivers/staging/nmf-cm/share/semaphores/inc/semaphores.h b/drivers/staging/nmf-cm/share/semaphores/inc/semaphores.h
new file mode 100644
index 00000000000..c72b64cd709
--- /dev/null
+++ b/drivers/staging/nmf-cm/share/semaphores/inc/semaphores.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ */
+
+#ifndef __INC_SHARED_SEMAPHORE_H
+#define __INC_SHARED_SEMAPHORE_H
+
+#include <share/inc/nmf.h>
+
+typedef t_uint16 t_semaphore_id;
+
+/*
+ * HW semaphore allocation
+ * -----------------------
+ * We want to optimize interrupt demultiplexing at dsp interrupt handler level
+ * so a good solution would be to have sequentially the semaphores for each neighbors
+ *
+ * STn8500 :
+ * ---------
+ * ARM <- SVA COMS => 0
+ * ARM <- SIA COMS => 1
+ * SVA <- ARM COMS => 2
+ * SVA <- SIA COMS => 3
+ * SIA <- ARM COMS => 4
+ * SIA <- SVA COMS => 5
+
+ * The first neighbor is always the ARM, then the other ones (SVA,SIA)
+ */
+
+/*
+ * Local semaphore allocation
+ * -----------------------
+ * 0 : ARM <- DSP
+ * 1 : DSP <- ARM
+ */
+
+#define NB_USED_HSEM_PER_CORE (NB_CORE_IDS - 1)
+#define FIRST_NEIGHBOR_SEMID(coreId) ((coreId)*NB_USED_HSEM_PER_CORE)
+
+#endif /* __INC_SHARED_SEMAPHORE_H */
diff --git a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
index a272e488e5b..6f9029d81ab 100644
--- a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
+++ b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
@@ -22,6 +22,7 @@ static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = {
.irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
.x_flip = false,
.y_flip = true,
+ .regulator_en = true,
};
struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 11728a03f8a..fd7fed743f7 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1,11 +1,10 @@
-/**
- *
+/*
* Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
* Copyright (c) 2007-2010, Synaptics Incorporated
*
* Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Copyright 2010 (c) ST-Ericsson AB
+ * Copyright 2010 (c) ST-Ericsson SA
*/
/*
* This file is licensed under the GPL2 license.
@@ -27,6 +26,7 @@
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
@@ -36,8 +36,10 @@
/* TODO: for multiple device support will need a per-device mutex */
#define DRIVER_NAME "synaptics_rmi4_i2c"
+#define DELTA 8
#define MAX_ERROR_REPORT 6
-#define MAX_TOUCH_MAJOR 15
+#define TIMEOUT_PERIOD 1
+#define MAX_WIDTH_MAJOR 255
#define MAX_RETRY_COUNT 5
#define STD_QUERY_LEN 21
#define PAGE_LEN 2
@@ -45,6 +47,7 @@
#define BUF_LEN 37
#define QUERY_LEN 9
#define DATA_LEN 12
+#define RESUME_DELAY 100 /* msecs */
#define HAS_TAP 0x01
#define HAS_PALMDETECT 0x01
#define HAS_ROTATE 0x02
@@ -164,6 +167,8 @@ struct synaptics_rmi4_device_info {
* @regulator: pointer to the regulator structure
* @wait: wait queue structure variable
* @touch_stopped: flag to stop the thread function
+ * @enable: flag to enable/disable the driver event.
+ * @resume_wq_handler: work queue for resume the device
*
* This structure gives the device data information.
*/
@@ -184,6 +189,8 @@ struct synaptics_rmi4_data {
struct regulator *regulator;
wait_queue_head_t wait;
bool touch_stopped;
+ bool enable;
+ struct work_struct resume_wq_handler;
};
/**
@@ -291,6 +298,133 @@ exit:
}
/**
+ * synaptics_rmi4_enable() - enable the touchpad driver event
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is to enable the touchpad driver event and returns integer.
+ */
+static int synaptics_rmi4_enable(struct synaptics_rmi4_data *pdata)
+{
+ int retval;
+ unsigned char intr_status;
+
+ if (pdata->board->regulator_en)
+ regulator_enable(pdata->regulator);
+ enable_irq(pdata->board->irq_number);
+ pdata->touch_stopped = false;
+
+ msleep(RESUME_DELAY);
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ &intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ pdata->fn01_ctrl_base_addr + 1,
+ (intr_status | TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_disable() - disable the touchpad driver event
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is to disable the driver event and returns integer.
+ */
+
+static int synaptics_rmi4_disable(struct synaptics_rmi4_data *pdata)
+{
+ int retval;
+ unsigned char intr_status;
+
+ pdata->touch_stopped = true;
+ disable_irq(pdata->board->irq_number);
+
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ &intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ pdata->fn01_ctrl_base_addr + 1,
+ (intr_status & ~TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+ if (pdata->board->regulator_en)
+ regulator_disable(pdata->regulator);
+
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_show_attr_enable() - show the touchpad enable value
+ * @dev: pointer to device data structure
+ * @attr: pointer to attribute structure
+ * @buf: pointer to character buffer
+ *
+ * This function is to show the touchpad enable value and returns ssize_t.
+ */
+static ssize_t synaptics_rmi4_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", pdata->enable);
+}
+
+/**
+ * synaptics_rmi4_store_attr_enable() - store the touchpad enable value
+ * @dev: pointer to device data structure
+ * @attr: pointer to attribute structure
+ * @buf: pointer to character buffer
+ * @count: number fo arguments
+ *
+ * This function is to store the touchpad enable value and returns ssize_t.
+ */
+static ssize_t synaptics_rmi4_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev);
+ unsigned long val;
+ int retval = 0;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->enable != val) {
+ pdata->enable = val ? true : false;
+ if (pdata->enable)
+ retval = synaptics_rmi4_enable(pdata);
+ else
+ retval = synaptics_rmi4_disable(pdata);
+
+ }
+ return ((retval < 0) ? retval : count);
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ synaptics_rmi4_show_attr_enable, synaptics_rmi4_store_attr_enable);
+
+static struct attribute *synaptics_rmi4_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group synaptics_rmi4_attr_group = {
+ .attrs = synaptics_rmi4_attrs,
+};
+
+/**
* synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
@@ -316,8 +450,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
unsigned char data[DATA_LEN];
int x[RMI4_NUMBER_OF_MAX_FINGERS];
int y[RMI4_NUMBER_OF_MAX_FINGERS];
- int wx[RMI4_NUMBER_OF_MAX_FINGERS];
- int wy[RMI4_NUMBER_OF_MAX_FINGERS];
+ int w[RMI4_NUMBER_OF_MAX_FINGERS];
+ static int prv_x[RMI4_NUMBER_OF_MAX_FINGERS];
+ static int prv_y[RMI4_NUMBER_OF_MAX_FINGERS];
struct i2c_client *client = pdata->i2c_client;
/* get 2D sensor finger data */
@@ -376,11 +511,7 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
y[touch_count] =
(data[1] << 4) |
((data[2] >> 4) & MASK_4BIT);
- wy[touch_count] =
- (data[3] >> 4) & MASK_4BIT;
- wx[touch_count] =
- (data[3] & MASK_4BIT);
-
+ w[touch_count] = data[3];
if (pdata->board->x_flip)
x[touch_count] =
pdata->sensor_max_x -
@@ -389,6 +520,25 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
y[touch_count] =
pdata->sensor_max_y -
y[touch_count];
+ if (x[touch_count] < 0)
+ x[touch_count] = 0;
+ else if (x[touch_count] >= pdata->sensor_max_x)
+ x[touch_count] =
+ pdata->sensor_max_x - 1;
+
+ if (y[touch_count] < 0)
+ y[touch_count] = 0;
+ else if (y[touch_count] >= pdata->sensor_max_y)
+ y[touch_count] =
+ pdata->sensor_max_y - 1;
+ }
+ if ((abs(x[finger] - prv_x[finger]) < DELTA) &&
+ (abs(y[finger] - prv_y[finger]) < DELTA)) {
+ x[finger] = prv_x[finger];
+ y[finger] = prv_y[finger];
+ } else {
+ prv_x[finger] = x[finger];
+ prv_y[finger] = y[finger];
}
/* number of active touch points */
touch_count++;
@@ -399,7 +549,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
if (touch_count) {
for (finger = 0; finger < touch_count; finger++) {
input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
- max(wx[finger] , wy[finger]));
+ max(x[finger] , y[finger]));
+ input_report_abs(pdata->input_dev, ABS_MT_WIDTH_MAJOR,
+ w[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
x[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
@@ -502,7 +654,7 @@ static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
touch_count = synaptics_rmi4_sensor_report(pdata);
if (touch_count)
wait_event_timeout(pdata->wait, pdata->touch_stopped,
- msecs_to_jiffies(1));
+ msecs_to_jiffies(TIMEOUT_PERIOD));
else
break;
} while (!pdata->touch_stopped);
@@ -881,9 +1033,27 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
}
/**
+ * synaptics_rmi4_resume_handler() - work queue for resume handler
+ * @work:work_struct structure pointer
+ *
+ * This work queue handler used to resume the device and returns none
+ */
+static void synaptics_rmi4_resume_handler(struct work_struct *work)
+{
+ struct synaptics_rmi4_data *prmi4_data = container_of(work,
+ struct synaptics_rmi4_data, resume_wq_handler);
+ struct i2c_client *client = prmi4_data->i2c_client;
+ int retval;
+
+ retval = synaptics_rmi4_enable(prmi4_data);
+ if (retval < 0)
+ dev_err(&client->dev, "%s: resume failed\n", __func__);
+}
+
+/**
* synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
- * @i2c: i2c client structure pointer
- * @id:i2c device id pointer
+ * @client: i2c client structure pointer
+ * @dev_id:i2c device id pointer
*
* This function will allocate and initialize the instance
* data and request the irq and set the instance data as the clients
@@ -927,19 +1097,17 @@ static int __devinit synaptics_rmi4_probe
goto err_input;
}
- rmi4_data->regulator = regulator_get(&client->dev, "vdd");
- if (IS_ERR(rmi4_data->regulator)) {
- dev_err(&client->dev, "%s:get regulator failed\n",
- __func__);
- retval = PTR_ERR(rmi4_data->regulator);
- goto err_get_regulator;
- }
- retval = regulator_enable(rmi4_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "%s:regulator enable failed\n",
- __func__);
- goto err_regulator_enable;
+ if (platformdata->regulator_en) {
+ rmi4_data->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(rmi4_data->regulator)) {
+ dev_err(&client->dev, "%s:get regulator failed\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->regulator);
+ goto err_regulator;
+ }
+ regulator_enable(rmi4_data->regulator);
}
+
init_waitqueue_head(&rmi4_data->wait);
/*
* Copy i2c_client pointer into RTID's i2c_client pointer for
@@ -987,7 +1155,16 @@ static int __devinit synaptics_rmi4_probe
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
rmi4_data->sensor_max_y, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
- MAX_TOUCH_MAJOR, 0, 0);
+ max(rmi4_data->sensor_max_y, rmi4_data->sensor_max_y),
+ 0, 0);
+ input_set_abs_params(rmi4_data->input_dev, ABS_MT_WIDTH_MAJOR, 0,
+ MAX_WIDTH_MAJOR, 0, 0);
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(&client->dev, "%s:input register failed\n", __func__);
+ goto err_input_register;
+ }
/* Clear interrupts */
synaptics_rmi4_i2c_block_read(rmi4_data,
@@ -1000,24 +1177,34 @@ static int __devinit synaptics_rmi4_probe
if (retval) {
dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
__func__, platformdata->irq_number);
- goto err_query_dev;
+ goto err_request_irq;
}
- retval = input_register_device(rmi4_data->input_dev);
+ INIT_WORK(&rmi4_data->resume_wq_handler, synaptics_rmi4_resume_handler);
+
+ /* sysfs implementation for dynamic enable/disable the input event */
+ retval = sysfs_create_group(&client->dev.kobj,
+ &synaptics_rmi4_attr_group);
if (retval) {
- dev_err(&client->dev, "%s:input register failed\n", __func__);
- goto err_free_irq;
+ dev_err(&client->dev, "failed to create sysfs entries\n");
+ goto err_sysfs;
}
-
+ rmi4_data->enable = true;
return retval;
-err_free_irq:
+err_sysfs:
+ cancel_work_sync(&rmi4_data->resume_wq_handler);
+err_request_irq:
free_irq(platformdata->irq_number, rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+err_input_register:
+ i2c_set_clientdata(client, NULL);
err_query_dev:
- regulator_disable(rmi4_data->regulator);
-err_regulator_enable:
- regulator_put(rmi4_data->regulator);
-err_get_regulator:
+ if (platformdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+err_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input:
@@ -1037,12 +1224,16 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+ sysfs_remove_group(&client->dev.kobj, &synaptics_rmi4_attr_group);
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
+ cancel_work_sync(&rmi4_data->resume_wq_handler);
free_irq(pdata->irq_number, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
+ if (pdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
kfree(rmi4_data);
return 0;
@@ -1059,31 +1250,11 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
static int synaptics_rmi4_suspend(struct device *dev)
{
/* Touch sleep mode */
- int retval;
- unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
- rmi4_data->touch_stopped = true;
- disable_irq(pdata->irq_number);
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status & ~TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
-
- regulator_disable(rmi4_data->regulator);
-
- return 0;
+ return synaptics_rmi4_disable(rmi4_data);
}
+
/**
* synaptics_rmi4_resume() - resume the touch screen controller
* @dev: pointer to device structure
@@ -1093,28 +1264,9 @@ static int synaptics_rmi4_suspend(struct device *dev)
*/
static int synaptics_rmi4_resume(struct device *dev)
{
- int retval;
- unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
-
- regulator_enable(rmi4_data->regulator);
- enable_irq(pdata->irq_number);
- rmi4_data->touch_stopped = false;
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status | TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
+ schedule_work(&rmi4_data->resume_wq_handler);
return 0;
}
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
index 384436ef806..973abc97374 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -42,6 +42,7 @@ struct synaptics_rmi4_platform_data {
int irq_type;
bool x_flip;
bool y_flip;
+ bool regulator_en;
};
#endif
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 00000000000..a452e888d77
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,13 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Martin Hovang (martin.xm.hovang@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+# Trursted Execution Environment Configuration
+config TEE_SUPPORT
+ bool "Trusted Execution Environment Support"
+ default y
+ ---help---
+ This implements the Trusted Execution Environment (TEE) Client
+ API Specification from GlobalPlatform Device Technology.
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 00000000000..b937eb19d72
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Martin Hovang (martin.xm.hovang@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_TEE_SUPPORT) += tee_service.o
+obj-$(CONFIG_TEE_SUPPORT) += tee_driver.o
diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c
new file mode 100644
index 00000000000..442dec5fe06
--- /dev/null
+++ b/drivers/tee/tee_driver.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Hovang <martin.xm.hovang@stericsson.com>
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/tee.h>
+#include <linux/slab.h>
+#include <linux/hwmem.h>
+
+#define TEED_NAME "tee"
+#define TEED_PFX "TEE: "
+
+#define TEED_STATE_OPEN_DEV 0
+#define TEED_STATE_OPEN_SESSION 1
+
+static struct mutex sync;
+
+static int tee_open(struct inode *inode, struct file *file);
+static int tee_release(struct inode *inode, struct file *file);
+static int tee_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset);
+static int tee_write(struct file *filp, const char __user *buffer,
+ size_t length, loff_t *offset);
+
+static inline void set_emsg(struct tee_session *ts, u32 msg, int line)
+{
+ pr_err(TEED_PFX "msg: 0x%08x at line: %d\n", msg, line);
+ ts->err = msg;
+ ts->origin = TEED_ORIGIN_DRIVER;
+}
+
+static void reset_session(struct tee_session *ts)
+{
+ int i;
+
+ ts->state = TEED_STATE_OPEN_DEV;
+ ts->err = TEED_SUCCESS;
+ ts->origin = TEED_ORIGIN_DRIVER;
+ ts->id = 0;
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; i++)
+ ts->vaddr[i] = NULL;
+ ts->ta = NULL;
+ ts->uuid = NULL;
+ ts->cmd = 0;
+ ts->driver_cmd = TEED_OPEN_SESSION;
+ ts->ta_size = 0;
+ ts->op = NULL;
+}
+
+static int copy_ta(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ ts->ta = kmalloc(ku_buffer->ta_size, GFP_KERNEL);
+ if (ts->ta == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory (ta)\n",
+ __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__);
+ return -ENOMEM;
+ }
+
+ ts->ta_size = ku_buffer->ta_size;
+
+ memcpy(ts->ta, ku_buffer->ta, ku_buffer->ta_size);
+ return 0;
+}
+
+static int copy_uuid(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ ts->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL);
+
+ if (ts->uuid == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory (uuid)\n",
+ __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__);
+ return -ENOMEM;
+ }
+
+ memcpy(ts->uuid, ku_buffer->uuid, sizeof(struct tee_uuid));
+
+ return 0;
+}
+
+static inline void free_operation(struct tee_session *ts,
+ struct hwmem_alloc **alloc,
+ int memrefs_allocated)
+{
+ int i;
+
+ for (i = 0; i < memrefs_allocated; ++i) {
+ if (ts->op->shm[i].buffer) {
+ hwmem_kunmap(alloc[i]);
+ hwmem_unpin(alloc[i]);
+ hwmem_release(alloc[i]);
+ ts->op->shm[i].buffer = NULL;
+ }
+
+ if (ts->vaddr[i])
+ ts->vaddr[i] = NULL;
+ }
+
+ kfree(ts->op);
+ ts->op = NULL;
+}
+
+static inline void memrefs_phys_to_virt(struct tee_session *ts)
+{
+ int i;
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ if (ts->op->flags & (1 << i)) {
+ ts->op->shm[i].buffer =
+ phys_to_virt((unsigned long)
+ ts->op->shm[i].buffer);
+ }
+ }
+}
+
+static inline void memrefs_virt_to_phys(struct tee_session *ts)
+{
+ int i;
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ if (ts->op->flags & (1 << i)) {
+ ts->op->shm[i].buffer =
+ (void *)virt_to_phys(ts->op->shm[i].buffer);
+ }
+ }
+}
+
+static int copy_memref_to_user(struct tee_session *ts,
+ struct tee_operation __user *ubuf_op,
+ int memref)
+{
+ unsigned long bytes_left;
+
+ bytes_left = copy_to_user(ubuf_op->shm[memref].buffer,
+ ts->vaddr[memref],
+ ts->op->shm[memref].size);
+
+ if (bytes_left != 0) {
+ pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu "
+ "bytes left of buffer).\n", __func__, bytes_left);
+ return bytes_left;
+ }
+
+ bytes_left = put_user(ts->op->shm[memref].size,
+ &ubuf_op->shm[memref].size);
+
+ if (bytes_left != 0) {
+ pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu "
+ "bytes left of size).\n", __func__, bytes_left);
+ return -EINVAL;
+ }
+
+ bytes_left = put_user(ts->op->shm[memref].flags,
+ &ubuf_op->shm[memref].flags);
+ if (bytes_left != 0) {
+ pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu "
+ "bytes left of flags).\n", __func__, bytes_left);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int copy_memref_to_kernel(struct tee_session *ts,
+ struct tee_session *ku_buffer,
+ struct hwmem_alloc **alloc,
+ int memref)
+{
+ int ret = -EINVAL;
+ size_t mem_chunks_length = 1;
+ struct hwmem_mem_chunk mem_chunks;
+
+ if (ku_buffer->op->shm[memref].size == 0) {
+ pr_err(TEED_PFX "[%s] error, size of memref is zero "
+ "(memref: %d)\n", __func__, memref);
+ return ret;
+ }
+
+ alloc[memref] = hwmem_alloc(ku_buffer->op->shm[memref].size,
+ (HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE),
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+
+ if (IS_ERR(alloc[memref])) {
+ pr_err(TEED_PFX "[%s] couldn't alloc hwmem_alloc (memref: %d)"
+ "\n", __func__, memref);
+ return PTR_ERR(alloc[memref]);
+ }
+
+ ret = hwmem_pin(alloc[memref], &mem_chunks, &mem_chunks_length);
+ if (ret) {
+ pr_err(TEED_PFX "[%s] couldn't pin buffer (memref: %d)\n",
+ __func__, memref);
+ return ret;
+ }
+
+ /*
+ * Since phys_to_virt is not working for hwmem memory we are storing the
+ * virtual addresses in separate array in tee_session and we keep the
+ * address of the physical pointers in the memref buffer.
+ */
+ ts->op->shm[memref].buffer = (void *)mem_chunks.paddr;
+ ts->vaddr[memref] = hwmem_kmap(alloc[memref]);
+
+ /* Buffer unmapped/freed in invoke_command if this function fails. */
+ if (!ts->op->shm[memref].buffer || !ts->vaddr[memref]) {
+ pr_err(TEED_PFX "[%s] out of memory (memref: %d)\n",
+ __func__, memref);
+ return -ENOMEM;
+ }
+
+ if (ku_buffer->op->shm[memref].flags & TEEC_MEM_INPUT)
+ memcpy(ts->vaddr[memref],
+ ku_buffer->op->shm[memref].buffer,
+ ku_buffer->op->shm[memref].size);
+
+ ts->op->shm[memref].size = ku_buffer->op->shm[memref].size;
+ ts->op->shm[memref].flags = ku_buffer->op->shm[memref].flags;
+
+ return 0;
+}
+
+static int open_tee_device(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ int ret;
+
+ if (ku_buffer->driver_cmd != TEED_OPEN_SESSION) {
+ set_emsg(ts, TEED_ERROR_BAD_STATE, __LINE__);
+ return -EINVAL;
+ }
+
+ if (ku_buffer->ta) {
+ ret = copy_ta(ts, ku_buffer);
+ } else if (ku_buffer->uuid) {
+ ret = copy_uuid(ts, ku_buffer);
+ } else {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION, __LINE__);
+ return -EINVAL;
+ }
+
+ ts->id = 0;
+ ts->state = TEED_STATE_OPEN_SESSION;
+ return ret;
+}
+
+static int invoke_command(struct tee_session *ts,
+ struct tee_session *ku_buffer,
+ struct tee_session __user *u_buffer)
+{
+ int i;
+ int ret = 0;
+ /* To keep track of which memrefs to free when failure occurs. */
+ int memrefs_allocated = 0;
+ struct hwmem_alloc *alloc[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+
+ ts->op = kmalloc(sizeof(struct tee_operation), GFP_KERNEL);
+
+ if (!ts->op) {
+ if (ts->op == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory "
+ "(op)\n", __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ ts->op->flags = ku_buffer->op->flags;
+ ts->cmd = ku_buffer->cmd;
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ ts->op->shm[i].buffer = NULL;
+ memrefs_allocated++;
+
+ /* We only want to copy memrefs in use to kernel space. */
+ if (ku_buffer->op->flags & (1 << i)) {
+ ret = copy_memref_to_kernel(ts, ku_buffer, alloc, i);
+ if (ret) {
+ pr_err(TEED_PFX "[%s] failed copy memref[%d] "
+ "to kernel", __func__, i);
+ goto err;
+ }
+ } else {
+ ts->op->shm[i].size = 0;
+ ts->op->shm[i].flags = 0;
+ }
+ }
+
+ if (call_sec_world(ts, TEED_INVOKE)) {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION, __LINE__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ if ((ku_buffer->op->flags & (1 << i)) &&
+ (ku_buffer->op->shm[i].flags & TEEC_MEM_OUTPUT)) {
+ ret = copy_memref_to_user(ts, u_buffer->op, i);
+ if (ret) {
+ pr_err(TEED_PFX "[%s] failed copy memref[%d] "
+ "to user", __func__, i);
+ goto err;
+ }
+ }
+ }
+err:
+ free_operation(ts, alloc, memrefs_allocated);
+
+ return ret;
+}
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+ struct tee_session *ts;
+ filp->private_data = kmalloc(sizeof(struct tee_session),
+ GFP_KERNEL);
+
+ if (filp->private_data == NULL) {
+ pr_err(TEED_PFX "[%s] allocation failed", __func__);
+ return -ENOMEM;
+ }
+
+ ts = (struct tee_session *)(filp->private_data);
+ reset_session(ts);
+
+ return 0;
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * Called when a process, which already opened the dev file, attempts
+ * to read from it. This function gets the current status of the session.
+ */
+static int tee_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct tee_read buf;
+ struct tee_session *ts;
+
+ if (length != sizeof(struct tee_read)) {
+ pr_err(TEED_PFX "[%s] error, incorrect input length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ts = (struct tee_session *)(filp->private_data);
+
+ if (ts == NULL) {
+ pr_err(TEED_PFX "[%s] error, private_data not "
+ "initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sync);
+
+ buf.err = ts->err;
+ buf.origin = ts->origin;
+
+ mutex_unlock(&sync);
+
+ if (copy_to_user(buffer, &buf, length)) {
+ pr_err(TEED_PFX "[%s] error, copy_to_user failed!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return length;
+}
+
+/*
+ * Called when a process writes to a dev file.
+ */
+static int tee_write(struct file *filp, const char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct tee_session ku_buffer;
+ struct tee_session *ts;
+ int ret = 0;
+
+ if (length != sizeof(struct tee_session)) {
+ pr_err(TEED_PFX "[%s] error, incorrect input length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ku_buffer, buffer, length)) {
+ pr_err(TEED_PFX "[%s] error, tee_session "
+ "copy_from_user failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ts = (struct tee_session *)(filp->private_data);
+
+ if (ts == NULL) {
+ pr_err(TEED_PFX "[%s] error, private_data not "
+ "initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sync);
+
+ switch (ts->state) {
+ case TEED_STATE_OPEN_DEV:
+ ret = open_tee_device(ts, &ku_buffer);
+ break;
+
+ case TEED_STATE_OPEN_SESSION:
+ switch (ku_buffer.driver_cmd) {
+ case TEED_INVOKE:
+ ret = invoke_command(ts, &ku_buffer,
+ (struct tee_session *)buffer);
+ break;
+
+ case TEED_CLOSE_SESSION:
+ /* no caching implemented yet... */
+ if (call_sec_world(ts, TEED_CLOSE_SESSION)) {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION,
+ __LINE__);
+ ret = -EINVAL;
+ }
+
+ kfree(ts->ta);
+ ts->ta = NULL;
+
+ reset_session(ts);
+ break;
+
+ default:
+ set_emsg(ts, TEED_ERROR_BAD_PARAMETERS, __LINE__);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ pr_err(TEED_PFX "[%s] unknown state\n", __func__);
+ set_emsg(ts, TEED_ERROR_BAD_STATE, __LINE__);
+ ret = -EINVAL;
+ }
+
+ /*
+ * We expect that ret has value zero when reaching the end here.
+ * If it has any other value some error must have occured.
+ */
+ if (!ret) {
+ ret = length;
+ } else {
+ pr_err(TEED_PFX "[%s], forcing error to -EINVAL\n", __func__);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&sync);
+
+ return ret;
+}
+
+int teec_initialize_context(const char *name, struct tee_context *context)
+{
+ return TEED_SUCCESS;
+}
+EXPORT_SYMBOL(teec_initialize_context);
+
+int teec_finalize_context(struct tee_context *context)
+{
+ return TEED_SUCCESS;
+}
+EXPORT_SYMBOL(teec_finalize_context);
+
+int teec_open_session(struct tee_context *context,
+ struct tee_session *session,
+ const struct tee_uuid *destination,
+ unsigned int connection_method,
+ void *connection_data, struct tee_operation *operation,
+ unsigned int *error_origin)
+{
+ int res = TEED_SUCCESS;
+
+ if (session == NULL || destination == NULL) {
+ pr_err(TEED_PFX "[%s] session or destination == NULL\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ reset_session(session);
+
+ /*
+ * Open a session towards an application already loaded inside
+ * the TEE.
+ */
+ session->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL);
+
+ if (session->uuid == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory (uuid)\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ memcpy(session->uuid, destination, sizeof(struct tee_uuid));
+
+ session->ta = NULL;
+ session->id = 0;
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(teec_open_session);
+
+int teec_close_session(struct tee_session *session)
+{
+ int res = TEED_SUCCESS;
+
+ mutex_lock(&sync);
+
+ if (session == NULL) {
+ pr_err(TEED_PFX "[%s] error, session == NULL\n", __func__);
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ if (call_sec_world(session, TEED_CLOSE_SESSION)) {
+ pr_err(TEED_PFX "[%s] error, call_sec_world failed\n",
+ __func__);
+ res = TEED_ERROR_GENERIC;
+ goto exit;
+ }
+
+exit:
+ if (session != NULL) {
+ kfree(session->uuid);
+ session->uuid = NULL;
+ }
+
+ mutex_unlock(&sync);
+ return res;
+}
+EXPORT_SYMBOL(teec_close_session);
+
+int teec_invoke_command(
+ struct tee_session *session, unsigned int command_id,
+ struct tee_operation *operation,
+ unsigned int *error_origin)
+{
+ int res = TEED_SUCCESS;
+ int i;
+
+ mutex_lock(&sync);
+
+ if (session == NULL || operation == NULL || error_origin == NULL) {
+ pr_err(TEED_PFX "[%s] error, input parameters == NULL\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ /* We only want to translate memrefs in use. */
+ if (operation->flags & (1 << i)) {
+ operation->shm[i].buffer =
+ (void *)virt_to_phys(
+ operation->shm[i].buffer);
+ }
+ }
+ session->op = operation;
+ session->cmd = command_id;
+
+ /*
+ * Call secure world
+ */
+ if (call_sec_world(session, TEED_INVOKE)) {
+ pr_err(TEED_PFX "[%s] error, call_sec_world failed\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_GENERIC;
+ }
+ if (session->err != TEED_SUCCESS) {
+ pr_err(TEED_PFX "[%s] error, call_sec_world failed\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = session->origin;
+ res = session->err;
+ }
+
+ memrefs_phys_to_virt(session);
+ session->op = NULL;
+
+exit:
+ mutex_unlock(&sync);
+ return res;
+}
+EXPORT_SYMBOL(teec_invoke_command);
+
+int teec_allocate_shared_memory(struct tee_context *context,
+ struct tee_sharedmemory *shared_memory)
+{
+ int res = TEED_SUCCESS;
+
+ if (shared_memory == NULL) {
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ shared_memory->buffer = kmalloc(shared_memory->size,
+ GFP_KERNEL);
+
+ if (shared_memory->buffer == NULL) {
+ res = TEED_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(teec_allocate_shared_memory);
+
+void teec_release_shared_memory(struct tee_sharedmemory *shared_memory)
+{
+ kfree(shared_memory->buffer);
+}
+EXPORT_SYMBOL(teec_release_shared_memory);
+
+static const struct file_operations tee_fops = {
+ .owner = THIS_MODULE,
+ .read = tee_read,
+ .write = tee_write,
+ .open = tee_open,
+ .release = tee_release,
+};
+
+static struct miscdevice tee_dev = {
+ MISC_DYNAMIC_MINOR,
+ TEED_NAME,
+ &tee_fops
+};
+
+static int __init tee_init(void)
+{
+ int err = 0;
+
+ err = misc_register(&tee_dev);
+
+ if (err) {
+ pr_err(TEED_PFX "[%s] error %d adding character device "
+ "TEE\n", __func__, err);
+ }
+
+ mutex_init(&sync);
+
+ return err;
+}
+
+static void __exit tee_exit(void)
+{
+ misc_deregister(&tee_dev);
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Trusted Execution Enviroment driver");
diff --git a/drivers/tee/tee_service.c b/drivers/tee/tee_service.c
new file mode 100644
index 00000000000..b01e9d0ac39
--- /dev/null
+++ b/drivers/tee/tee_service.c
@@ -0,0 +1,17 @@
+/*
+ * TEE service to handle the calls to trusted applications.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/tee.h>
+#include <linux/device.h>
+
+int __weak call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ pr_info("[%s] Generic call_sec_world called!\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 2de99248dfa..fb18d37687b 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -63,6 +63,14 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_AMBA_PL011_CLOCK_CONTROL
+ bool "Support for clock control on AMBA serial port"
+ depends on SERIAL_AMBA_PL011
+ select CONSOLE_POLL
+ ---help---
+ Say Y here if you wish to use amba set_termios function to control
+ the pl011 clock. Any positive baudrate passed enables clock,
+
config SERIAL_SB1250_DUART
tristate "BCM1xxx on-chip DUART serial support"
depends on SIBYTE_SB1xxx_SOC=y
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 6800f5f2624..f36663af790 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -47,12 +47,14 @@
#include <linux/amba/serial.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
-#include <asm/io.h>
#include <asm/sizes.h>
#define UART_NR 14
@@ -63,9 +65,41 @@
#define AMBA_ISR_PASS_LIMIT 256
-#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
+#define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | \
+ UART011_DR_PE | UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+/*
+ * The console UART is handled differently for power management (it doesn't
+ * take the regulator, in order to allow the system to go to sleep even if the
+ * console is open). This should be removed once cable detect is in place.
+ */
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) ((port)->cons \
+ && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+/* Available amba pl011 port clock states */
+enum pl011_clk_states {
+ PL011_CLK_OFF = 0, /* clock disabled */
+ PL011_CLK_REQUEST_OFF, /* disable after TX flushed */
+ PL011_CLK_ON, /* clock enabled */
+ PL011_PORT_OFF, /* port disabled */
+};
+
+/*
+ * Backup registers to be used during regulator startup/shutdown
+ */
+static const u32 backup_regs[] = {
+ UART011_IBRD,
+ UART011_FBRD,
+ ST_UART011_LCRH_RX,
+ ST_UART011_LCRH_TX,
+ UART011_CR,
+ UART011_IMSC,
+};
#define UART_WA_SAVE_NR 14
@@ -88,7 +122,9 @@ static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
};
static u32 uart_wa_regdata[UART_WA_SAVE_NR];
-static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+static unsigned int uart_wa_tlet_line;
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa,
+ (unsigned long) &uart_wa_tlet_line);
/* There is by now at least one vendor with differing details, so handle it */
struct vendor_data {
@@ -157,10 +193,18 @@ struct uart_amba_port {
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
+ unsigned int ifls; /* vendor-specific */
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
bool autorts;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ enum pl011_clk_states clk_state; /* actual clock state */
+ struct delayed_work clk_off_work; /* work used for clock off */
+ unsigned int clk_off_delay; /* clock off delay */
+#endif
+ struct regulator *regulator;
+ u32 backup[ARRAY_SIZE(backup_regs)];
char type[12];
bool interrupt_may_hang; /* vendor-specific */
#ifdef CONFIG_DMA_ENGINE
@@ -306,7 +350,8 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
.src_maxburst = uap->fifosize >> 1,
};
- chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+ chan = dma_request_channel(mask,
+ plat->dma_filter, plat->dma_rx_param);
if (!chan) {
dev_err(uap->port.dev, "no RX DMA channel!\n");
return;
@@ -750,8 +795,9 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
*/
if (dma_count == pending && readfifo) {
/* Clear any error flags */
- writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
- uap->port.membase + UART011_ICR);
+ writew(UART011_OEIS | UART011_BEIS |
+ UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
/*
* If we read all the DMA'd characters, and we had an
@@ -948,11 +994,13 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
spin_unlock_irq(&uap->port.lock);
if (uap->using_tx_dma) {
- /* In theory, this should already be done by pl011_dma_flush_buffer */
+ /* In theory, this should already be done by
+ * pl011_dma_flush_buffer
+ */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_sg(uap->dmatx.chan->device->dev,
+ &uap->dmatx.sg, 1, DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
@@ -963,8 +1011,10 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
uap->using_rx_dma = false;
}
}
@@ -1054,13 +1104,17 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
*/
static void pl011_lockup_wa(unsigned long data)
{
- struct uart_amba_port *uap = amba_ports[0];
+ struct uart_amba_port *uap = amba_ports[*(unsigned int *)data];
void __iomem *base = uap->port.membase;
struct circ_buf *xmit = &uap->port.state->xmit;
struct tty_struct *tty = uap->port.state->port.tty;
int buf_empty_retries = 200;
int loop;
+ /* Exit early if there is no tty */
+ if (!tty)
+ return;
+
/* Stop HCI layer from submitting data for tx */
tty->hw_stopped = 1;
while (!uart_circ_empty(xmit)) {
@@ -1101,6 +1155,260 @@ static void pl011_lockup_wa(unsigned long data)
tty->hw_stopped = 0;
}
+static void __pl011_startup(struct uart_amba_port *uap)
+{
+ unsigned int cr;
+
+ writew(uap->ifls, uap->port.membase + UART011_IFLS);
+
+ /*
+ * Provoke TX FIFO interrupt into asserting.
+ */
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+ writew(cr, uap->port.membase + UART011_CR);
+ writew(0, uap->port.membase + UART011_FBRD);
+ writew(1, uap->port.membase + UART011_IBRD);
+ writew(0, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_tx != uap->lcrh_rx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(0, uap->port.membase + uap->lcrh_tx);
+ }
+ writew(0, uap->port.membase + UART01x_DR);
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
+ barrier();
+}
+
+/* Backup the registers during regulator startup/shutdown */
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+static int pl011_backup(struct uart_amba_port *uap, bool suspend)
+{
+ int i, cnt;
+
+ if (!suspend) {
+ __pl011_startup(uap);
+ writew(0, uap->port.membase + UART011_CR);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(backup_regs); i++) {
+ if (suspend)
+ uap->backup[i] = readw(uap->port.membase +
+ backup_regs[i]);
+ else {
+ if (backup_regs[i] == ST_UART011_LCRH_TX) {
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX
+ * register, to get this delay write read
+ * only register 10 times
+ */
+ for (cnt = 0; cnt < 10; ++cnt)
+ writew(0xff, uap->port.membase +
+ UART011_MIS);
+ }
+
+ writew(uap->backup[i],
+ uap->port.membase + backup_regs[i]);
+ }
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+/* Turn clock off if TX buffer is empty, otherwise reschedule */
+static void pl011_clock_off(struct work_struct *work)
+{
+ struct uart_amba_port *uap = container_of(work, struct uart_amba_port,
+ clk_off_work.work);
+ struct uart_port *port = &uap->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+ bool disable_regulator = false;
+ bool runtime_put = false;
+ unsigned int busy, interrupt_status;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ interrupt_status = readw(uap->port.membase + UART011_MIS);
+ busy = readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY;
+
+ if (uap->clk_state == PL011_CLK_REQUEST_OFF) {
+ if (uart_circ_empty(xmit) && !interrupt_status && !busy) {
+ if (!uart_console(&uap->port) && uap->regulator) {
+ pl011_backup(uap, true);
+ disable_regulator = true;
+ }
+ runtime_put = true;
+ uap->clk_state = PL011_CLK_OFF;
+ clk_disable(uap->clk);
+ } else
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (disable_regulator)
+ regulator_disable(uap->regulator);
+ if (runtime_put)
+ pm_runtime_put_sync(uap->port.dev);
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+static void pl011_clock_request_off(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_amba_port *uap = (struct uart_amba_port *)(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (uap->clk_state == PL011_CLK_ON) {
+ uap->clk_state = PL011_CLK_REQUEST_OFF;
+ /* Turn off later */
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Request to immediately turn on uart clock */
+static void pl011_clock_on(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_amba_port *uap = (struct uart_amba_port *)(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ switch (uap->clk_state) {
+ case PL011_CLK_OFF:
+ pm_runtime_get_sync(uap->port.dev);
+ clk_enable(uap->clk);
+ if (!uart_console(&uap->port) && uap->regulator) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ regulator_enable(uap->regulator);
+ spin_lock_irqsave(&port->lock, flags);
+ pl011_backup(uap, false);
+ }
+ /* fallthrough */
+ case PL011_CLK_REQUEST_OFF:
+ cancel_delayed_work(&uap->clk_off_work);
+ uap->clk_state = PL011_CLK_ON;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void pl011_clock_check(struct uart_amba_port *uap)
+{
+ /* Reshedule work during off request */
+ if (uap->clk_state == PL011_CLK_REQUEST_OFF)
+ /* New TX - restart work */
+ if (cancel_delayed_work(&uap->clk_off_work))
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+}
+
+static int pl011_power_startup(struct uart_amba_port *uap)
+{
+ int retval = 0;
+
+ if (uap->clk_state == PL011_PORT_OFF) {
+ pm_runtime_get_sync(uap->port.dev);
+ if (!uart_console(&uap->port) && uap->regulator)
+ regulator_enable(uap->regulator);
+ retval = clk_enable(uap->clk);
+ if (!retval) {
+ uap->clk_state = PL011_CLK_ON;
+ } else {
+ uap->clk_state = PL011_PORT_OFF;
+ pm_runtime_put_sync(uap->port.dev);
+ }
+ }
+
+ return retval;
+}
+
+static void pl011_power_shutdown(struct uart_amba_port *uap)
+{
+ bool disable_regulator = false;
+ bool runtime_put = false;
+
+ cancel_delayed_work_sync(&uap->clk_off_work);
+
+ spin_lock_irq(&uap->port.lock);
+ if (uap->clk_state == PL011_CLK_ON ||
+ uap->clk_state == PL011_CLK_REQUEST_OFF) {
+ clk_disable(uap->clk);
+ runtime_put = true;
+ if (!uart_console(&uap->port) && uap->regulator)
+ disable_regulator = true;
+ }
+ uap->clk_state = PL011_PORT_OFF;
+ spin_unlock_irq(&uap->port.lock);
+
+ if (disable_regulator)
+ regulator_disable(uap->regulator);
+ if (runtime_put)
+ pm_runtime_put_sync(uap->port.dev);
+}
+
+static void
+pl011_clock_control(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ speed_t new_baud = tty_termios_baud_rate(termios);
+
+ if (new_baud == 0)
+ pl011_clock_request_off(port);
+ else
+ pl011_clock_on(port);
+}
+
+static void pl011_clock_control_init(struct uart_amba_port *uap)
+{
+ uap->clk_state = PL011_PORT_OFF;
+ INIT_DELAYED_WORK(&uap->clk_off_work, pl011_clock_off);
+ uap->clk_off_delay = HZ / 10; /* 100 ms */
+}
+
+#else
+/* Blank functions for clock control */
+static inline void pl011_clock_check(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_power_startup(struct uart_amba_port *uap)
+{
+ pm_runtime_get_sync(uap->port.dev);
+ return clk_enable(uap->clk);
+}
+
+static inline void pl011_power_shutdown(struct uart_amba_port *uap)
+{
+ clk_disable(uap->clk);
+ pm_runtime_put_sync(uap->port.dev);
+}
+
+static inline void
+pl011_clock_control(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+}
+
+static inline void pl011_clock_control_init(struct uart_amba_port *uap)
+{
+}
+#endif
+
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1192,6 +1500,9 @@ static void pl011_tx_chars(struct uart_amba_port *uap)
break;
} while (--count > 0);
+ if (count)
+ pl011_clock_check(uap);
+
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
@@ -1237,7 +1548,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
do {
writew(status & ~(UART011_TXIS|UART011_RTIS|
UART011_RXIS),
- uap->port.membase + UART011_ICR);
+ uap->port.membase + UART011_ICR);
if (status & (UART011_RTIS|UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
@@ -1252,8 +1563,10 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
pl011_tx_chars(uap);
if (pass_counter-- == 0) {
- if (uap->interrupt_may_hang)
+ if (uap->interrupt_may_hang) {
+ uart_wa_tlet_line = uap->port.line;
tasklet_schedule(&pl011_lockup_tlet);
+ }
break;
}
@@ -1282,7 +1595,7 @@ static unsigned int pl01x_get_mctrl(struct uart_port *port)
#define TIOCMBIT(uartbit, tiocmbit) \
if (status & uartbit) \
- result |= tiocmbit
+ (result |= tiocmbit)
TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
@@ -1300,10 +1613,12 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
cr = readw(uap->port.membase + UART011_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
- if (mctrl & tiocmbit) \
- cr |= uartbit; \
- else \
- cr &= ~uartbit
+ do {\
+ if (mctrl & tiocmbit) \
+ cr |= uartbit; \
+ else \
+ cr &= ~uartbit; \
+ } while (0)
TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
@@ -1373,9 +1688,9 @@ static int pl011_startup(struct uart_port *port)
goto out;
/*
- * Try to enable the clock producer.
+ * Try to enable the clock producer and the regulator.
*/
- retval = clk_enable(uap->clk);
+ retval = pl011_power_startup(uap);
if (retval)
goto clk_unprep;
@@ -1388,29 +1703,7 @@ static int pl011_startup(struct uart_port *port)
if (retval)
goto clk_dis;
- writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
-
- /*
- * Provoke TX FIFO interrupt into asserting.
- */
- cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
- writew(cr, uap->port.membase + UART011_CR);
- writew(0, uap->port.membase + UART011_FBRD);
- writew(1, uap->port.membase + UART011_IBRD);
- writew(0, uap->port.membase + uap->lcrh_rx);
- if (uap->lcrh_tx != uap->lcrh_rx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(0, uap->port.membase + uap->lcrh_tx);
- }
- writew(0, uap->port.membase + UART01x_DR);
- while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
- barrier();
+ __pl011_startup(uap);
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
@@ -1424,7 +1717,8 @@ static int pl011_startup(struct uart_port *port)
/*
* initialise the old status of the modem signals
*/
- uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+ uap->old_status = readw(uap->port.membase + UART01x_FR) &
+ UART01x_FR_MODEM_ANY;
/* Startup DMA */
pl011_dma_startup(uap);
@@ -1452,7 +1746,7 @@ static int pl011_startup(struct uart_port *port)
return 0;
clk_dis:
- clk_disable(uap->clk);
+ pl011_power_shutdown(uap);
clk_unprep:
clk_unprepare(uap->clk);
out:
@@ -1462,11 +1756,11 @@ static int pl011_startup(struct uart_port *port)
static void pl011_shutdown_channel(struct uart_amba_port *uap,
unsigned int lcrh)
{
- unsigned long val;
+ unsigned long val;
- val = readw(uap->port.membase + lcrh);
- val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
- writew(val, uap->port.membase + lcrh);
+ val = readw(uap->port.membase + lcrh);
+ val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+ writew(val, uap->port.membase + lcrh);
}
static void pl011_shutdown(struct uart_port *port)
@@ -1510,10 +1804,18 @@ static void pl011_shutdown(struct uart_port *port)
if (uap->lcrh_rx != uap->lcrh_tx)
pl011_shutdown_channel(uap, uap->lcrh_tx);
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->exit)
+ plat->exit();
+ }
+
/*
- * Shut down the clock producer
+ * Shut down the clock producer and the producer
*/
- clk_disable(uap->clk);
+ pl011_power_shutdown(uap);
clk_unprepare(uap->clk);
if (uap->port.dev->platform_data) {
@@ -1526,6 +1828,32 @@ static void pl011_shutdown(struct uart_port *port)
}
+/* Power/Clock management. */
+static void pl011_serial_pm(struct uart_port *port, unsigned int state,
+unsigned int oldstate)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ switch (state) {
+ case 0: /*fully on */
+ /*
+ * Enable the peripheral clock for this serial port.
+ * This is called on uart_open() or a resume event.
+ */
+ pl011_power_startup(uap);
+ break;
+ case 3: /* powered down */
+ /*
+ * Disable the peripheral clock for this serial port.
+ * This is called on uart_close() or a suspend event.
+ */
+ pl011_power_shutdown(uap);
+ break;
+ default:
+ printk(KERN_ERR "pl011_serial: unknown pm %d\n", state);
+ }
+}
+
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -1539,7 +1867,12 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
clkdiv = 8;
else
clkdiv = 16;
-
+ /*
+ * Must be before uart_get_baud_rate() call, because
+ * this function changes baudrate to default in case of 0
+ * B0 hangup !!!
+ */
+ pl011_clock_control(port, termios, old);
/*
* Ask the core to calculate the divisor for us.
*/
@@ -1561,7 +1894,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
case CS7:
lcr_h = UART01x_LCRH_WLEN_7;
break;
- default: // CS8
+ default: /* CS8 */
lcr_h = UART01x_LCRH_WLEN_8;
break;
}
@@ -1727,14 +2060,13 @@ static struct uart_ops amba_pl011_pops = {
.request_port = pl010_request_port,
.config_port = pl010_config_port,
.verify_port = pl010_verify_port,
+ .pm = pl011_serial_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = pl010_get_poll_char,
.poll_put_char = pl010_put_poll_char,
#endif
};
-static struct uart_amba_port *amba_ports[UART_NR];
-
#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
static void pl011_console_putchar(struct uart_port *port, int ch)
@@ -1878,6 +2210,13 @@ static struct console amba_console = {
.data = &amba_reg,
};
+static int __init pl011_console_init(void)
+{
+ register_console(&amba_console);
+ return 0;
+}
+console_initcall(pl011_console_init);
+
#define AMBA_CONSOLE (&amba_console)
#else
#define AMBA_CONSOLE NULL
@@ -1892,7 +2231,6 @@ static struct uart_driver amba_reg = {
.nr = UART_NR,
.cons = AMBA_CONSOLE,
};
-
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -1921,6 +2259,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto free;
}
+ uap->regulator = regulator_get(&dev->dev, "v-uart");
+ if (IS_ERR(uap->regulator)) {
+ dev_warn(&dev->dev, "could not get uart regulator\n");
+ uap->regulator = NULL;
+ }
+
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
ret = PTR_ERR(uap->clk);
@@ -1928,6 +2272,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
}
uap->vendor = vendor;
+ uap->ifls = vendor->ifls;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->old_cr = 0;
@@ -1949,18 +2294,28 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
+
+ pl011_clock_control_init(uap);
+
ret = uart_add_one_port(&amba_reg, &uap->port);
+
+ if (!ret)
+ pm_runtime_put(&dev->dev);
+
if (ret) {
amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
pl011_dma_remove(uap);
clk_put(uap->clk);
unmap:
+ if (uap->regulator)
+ regulator_put(uap->regulator);
iounmap(base);
free:
kfree(uap);
}
out:
+
return ret;
}
@@ -1971,6 +2326,8 @@ static int pl011_remove(struct amba_device *dev)
amba_set_drvdata(dev, NULL);
+ pm_runtime_get_sync(uap->port.dev);
+
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
@@ -1979,6 +2336,8 @@ static int pl011_remove(struct amba_device *dev)
pl011_dma_remove(uap);
iounmap(uap->port.membase);
+ if (uap->regulator)
+ regulator_put(uap->regulator);
clk_put(uap->clk);
kfree(uap);
return 0;
@@ -1991,7 +2350,12 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state)
if (!uap)
return -EINVAL;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ cancel_delayed_work_sync(&uap->clk_off_work);
+ if (uap->clk_state == PL011_CLK_OFF)
+ return 0;
+#endif
return uart_suspend_port(&amba_reg, &uap->port);
}
@@ -2001,6 +2365,10 @@ static int pl011_resume(struct amba_device *dev)
if (!uap)
return -EINVAL;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ if (uap->clk_state == PL011_CLK_OFF)
+ return 0;
+#endif
return uart_resume_port(&amba_reg, &uap->port);
}
@@ -2059,7 +2427,7 @@ static void __exit pl011_exit(void)
* While this can be a module, if builtin it's most likely the console
* So let's leave module_exit but move module_init to an earlier place
*/
-arch_initcall(pl011_init);
+subsys_initcall(pl011_init);
module_exit(pl011_exit);
MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 265c2f675d0..0c2bc00cc12 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -30,6 +30,12 @@
#include "usb.h"
+#ifdef CONFIG_ARCH_U8500
+#define MAX_TOPO_LEVEL_U8500 2
+#define MAX_USB_DEVICE_U8500 8
+int usb_device_count;
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@ -1302,11 +1308,20 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!hub_is_superspeed(hdev) || !hdev->parent)
usb_enable_autosuspend(hdev);
+#ifdef CONFIG_ARCH_U8500
+ if (hdev->level > MAX_TOPO_LEVEL_U8500) {
+ dev_err(&intf->dev,
+ "Unsupported bus topology: > %d "
+ " hub nesting\n", MAX_TOPO_LEVEL_U8500);
+ return -E2BIG;
+ }
+#else
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
return -E2BIG;
}
+#endif
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
@@ -1588,12 +1603,14 @@ static void choose_devnum(struct usb_device *udev)
* bus->devnum_next. */
devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
bus->devnum_next);
- if (devnum >= 128)
+ /* Due to Hardware bugs we need to reserve a device address
+ * for flushing of endpoints. */
+ if (devnum >= 127)
devnum = find_next_zero_bit(bus->devmap.devicemap,
128, 1);
- bus->devnum_next = ( devnum >= 127 ? 1 : devnum + 1);
+ bus->devnum_next = devnum >= 126 ? 1 : devnum + 1;
}
- if (devnum < 128) {
+ if (devnum < 127) {
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
@@ -3320,6 +3337,22 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto loop;
}
+#ifdef CONFIG_ARCH_U8500
+ if (hdev->parent == NULL)
+ usb_device_count = 1;
+
+ if (usb_device_count > MAX_USB_DEVICE_U8500) {
+
+ dev_err(&udev->dev,
+ "device connected is more than %d\n",
+ MAX_USB_DEVICE_U8500);
+
+ status = -ENOTCONN; /* Don't retry */
+ goto loop;
+ }
+#endif
+
+
/* reset (non-USB 3.0 devices) and get descriptor */
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2..a5fdc3ac0d7 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -46,11 +46,18 @@ EXPORT_SYMBOL_GPL(usb_unregister_notify);
void usb_notify_add_device(struct usb_device *udev)
{
+#ifdef CONFIG_ARCH_U8500
+ usb_device_count++;
+#endif
+
blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
}
void usb_notify_remove_device(struct usb_device *udev)
{
+#ifdef CONFIG_ARCH_U8500
+ usb_device_count--;
+#endif
/* Protect against simultaneous usbfs open */
mutex_lock(&usbfs_mutex);
blocking_notifier_call_chain(&usb_notifier_list,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 45e8479c377..72c96c376e1 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,5 +1,9 @@
#include <linux/pm.h>
+#ifdef CONFIG_ARCH_U8500
+extern int usb_device_count;
+#endif
+
/* Functions local to drivers/usb/core/ */
extern int usb_create_sysfs_dev_files(struct usb_device *dev);
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index e0e6375ef5d..0bf715a68b1 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -315,6 +315,12 @@ struct usb_ep *usb_ep_autoconfig_ss(
#endif
}
+ if (gadget->ops->configure_ep) {
+ ep = gadget->ops->configure_ep(gadget, type, desc);
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
+ return ep;
+ }
+
/* Second, look at endpoints until an unclaimed one looks usable */
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
if (ep_matches(gadget, ep, desc, ep_comp))
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 11c07cb7d33..1cd2c1cf1cb 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -97,6 +97,19 @@ static inline unsigned ecm_bitrate(struct usb_gadget *g)
/* interface descriptor: */
+static struct usb_interface_assoc_descriptor
+ecm_iad_descriptor = {
+ .bLength = sizeof ecm_iad_descriptor,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_COMM,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ /* .iFunction = DYNAMIC */
+};
+
static struct usb_interface_descriptor ecm_control_intf = {
.bLength = sizeof ecm_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -199,6 +212,7 @@ static struct usb_endpoint_descriptor fs_ecm_out_desc = {
static struct usb_descriptor_header *ecm_fs_function[] = {
/* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
@@ -247,6 +261,7 @@ static struct usb_endpoint_descriptor hs_ecm_out_desc = {
static struct usb_descriptor_header *ecm_hs_function[] = {
/* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_iad_descriptor,
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
@@ -339,6 +354,7 @@ static struct usb_string ecm_string_defs[] = {
[0].s = "CDC Ethernet Control Model (ECM)",
[1].s = NULL /* DYNAMIC */,
[2].s = "CDC Ethernet Data",
+ [3].s = "CDC ECM",
{ } /* end of list */
};
@@ -674,6 +690,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
if (status < 0)
goto fail;
ecm->ctrl_id = status;
+ ecm_iad_descriptor.bFirstInterface = status;
ecm_control_intf.bInterfaceNumber = status;
ecm_union_desc.bMasterInterface0 = status;
@@ -864,6 +881,13 @@ ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
return status;
ecm_string_defs[1].id = status;
ecm_desc.iMACAddress = status;
+
+ /* IAD label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_string_defs[3].id = status;
+ ecm_iad_descriptor.iFunction = status;
}
/* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 704d1d94f72..f381a604114 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -174,8 +174,8 @@ rndis_iad_descriptor = {
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
- .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
+ .bFunctionProtocol = USB_CDC_ACM_PROTO_VENDOR,
/* .iFunction = DYNAMIC */
};
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f70cab3beee..f9e42041b5f 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -34,6 +34,8 @@ if USB_MUSB_HDRC
choice
prompt "Platform Glue Layer"
+ bool
+ default USB_MUSB_UX500 if ARCH_U8500 || ARCH_U5500
config USB_MUSB_DAVINCI
tristate "DaVinci"
@@ -60,7 +62,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "U8500 and U5500"
- depends on (ARCH_U8500 && AB8500_USB)
+ depends on (ARCH_U8500) || (ARCH_U5500)
endchoice
@@ -114,4 +116,13 @@ config MUSB_PIO_ONLY
endchoice
+config USB_MUSB_DEBUG
+ depends on USB_MUSB_HDRC
+ bool "Enable debugging messages"
+ default n
+ help
+ This enables musb debugging. To set the logging level use the debug
+ module parameter. Starting at level 3, per-transfer (urb, usb_request,
+ packet, or dma transfer) tracing may kick in.
+
endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 3d11cf64ebd..37d53511531 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1018,9 +1018,6 @@ static void musb_shutdown(struct platform_device *pdev)
|| defined(CONFIG_USB_MUSB_AM35X) \
|| defined(CONFIG_USB_MUSB_AM35X_MODULE)
static ushort __initdata fifo_mode = 4;
-#elif defined(CONFIG_USB_MUSB_UX500) \
- || defined(CONFIG_USB_MUSB_UX500_MODULE)
-static ushort __initdata fifo_mode = 5;
#else
static ushort __initdata fifo_mode = 2;
#endif
@@ -1105,8 +1102,8 @@ static struct musb_fifo_cfg __initdata mode_4_cfg[] = {
/* mode 5 - fits in 8KB */
static struct musb_fifo_cfg __initdata mode_5_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
@@ -1868,7 +1865,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
status = -ENODEV;
goto fail0;
}
-
/* allocate */
musb = allocate_instance(dev, plat->config, ctrl);
if (!musb) {
@@ -2306,7 +2302,7 @@ static int musb_suspend(struct device *dev)
return 0;
}
-static int musb_resume_noirq(struct device *dev)
+static int musb_resume(struct device *dev)
{
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
@@ -2347,12 +2343,12 @@ static int musb_runtime_resume(struct device *dev)
static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
- .resume_noirq = musb_resume_noirq,
+ .resume = musb_resume,
.runtime_suspend = musb_runtime_suspend,
.runtime_resume = musb_runtime_resume,
};
-#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
+#define MUSB_DEV_PM_OPS NULL
#else
#define MUSB_DEV_PM_OPS NULL
#endif
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 3d28fb8a2dc..73fca530aac 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -229,6 +229,8 @@ struct musb_platform_ops {
int (*adjust_channel_params)(struct dma_channel *channel,
u16 packet_sz, u8 *mode,
dma_addr_t *dma_addr, u32 *len);
+ struct usb_ep* (*configure_endpoints)(struct musb *musb, u8 type,
+ struct usb_endpoint_descriptor *desc);
};
/*
@@ -603,4 +605,13 @@ static inline int musb_platform_exit(struct musb *musb)
return musb->ops->exit(musb);
}
+static inline struct usb_ep *musb_platform_configure_ep(struct musb *musb,
+ u8 type, struct usb_endpoint_descriptor *desc)
+{
+ struct usb_ep *ep = NULL;
+
+ if (musb->ops->configure_endpoints)
+ ep = musb->ops->configure_endpoints(musb, type, desc);
+ return ep;
+}
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ac3d2eec20f..270cf819a65 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -401,7 +401,21 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
- if (!musb_ep->hb_mult)
+ /*
+ * Enable Autoset according to table
+ * below
+ * ************************************
+ * bulk_split hb_mult Autoset_Enable
+ * ************************************
+ * 0 0 Yes(Normal)
+ * 0 >0 No(High BW ISO)
+ * 1 0 Yes(HS bulk)
+ * 1 >0 Yes(FS bulk)
+ */
+ if (!musb_ep->hb_mult ||
+ (musb_ep->hb_mult &&
+ can_bulk_split(musb,
+ musb_ep->type)))
csr |= MUSB_TXCSR_AUTOSET;
}
csr &= ~MUSB_TXCSR_P_UNDERRUN;
@@ -1079,6 +1093,12 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
+ /* Set the TXMAXP register correctly for Bulk IN
+ * endpoints in device mode
+ */
+ if (can_bulk_split(musb, musb_ep->type))
+ musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
+ musb_ep->packet_sz) - 1;
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
@@ -1735,6 +1755,14 @@ static int musb_gadget_start(struct usb_gadget *g,
static int musb_gadget_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver);
+static struct usb_ep *musb_gadget_configure_ep(struct usb_gadget *gadget,
+ u8 type, struct usb_endpoint_descriptor *desc)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return musb_platform_configure_ep(musb, type, desc);
+}
+
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
.wakeup = musb_gadget_wakeup,
@@ -1744,6 +1772,7 @@ static const struct usb_gadget_ops musb_gadget_operations = {
.pullup = musb_gadget_pullup,
.udc_start = musb_gadget_start,
.udc_stop = musb_gadget_stop,
+ .configure_ep = musb_gadget_configure_ep,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 79cb0af779f..938377a50a9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -46,7 +46,6 @@
#include "musb_core.h"
#include "musb_host.h"
-
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
@@ -108,24 +107,41 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
+ void __iomem *regs = ep->musb->mregs;
u16 csr;
- u16 lastcsr = 0;
- int retries = 1000;
+ u8 addr;
+ int retries = 3000; /* 3ms */
+ /*
+ * NOTE: We are using a hack here because the FIFO-FLUSH
+ * bit is broken in hardware! The hack consists of changing
+ * the TXFUNCADDR to an unused device address and waiting
+ * for any pending USB packets to hit the 3-strikes and your
+ * gone rule.
+ */
+ addr = musb_readb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR));
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- if (csr != lastcsr)
- dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
- lastcsr = csr;
- csr |= MUSB_TXCSR_FLUSHFIFO;
- musb_writew(epio, MUSB_TXCSR, csr);
+ musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum,
+ MUSB_TXFUNCADDR), 127);
csr = musb_readw(epio, MUSB_TXCSR);
- if (WARN(retries-- < 1,
- "Could not flush host TX%d fifo: csr: %04x\n",
- ep->epnum, csr))
- return;
- mdelay(1);
+ retries--;
+ if (retries == 0) {
+ /* can happen if the USB clocks are OFF */
+ dev_dbg(musb->controller, "Could not flush host TX%d "
+ "fifo: csr=0x%04x\n", ep->epnum, csr);
+ break;
+ }
+ udelay(1);
}
+ /* clear any errors */
+ csr &= ~(MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT);
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ /* restore endpoint address */
+ musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR), addr);
}
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
@@ -615,16 +631,26 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
u16 csr;
u8 mode;
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (length > channel->max_len)
length = channel->max_len;
csr = musb_readw(epio, MUSB_TXCSR);
- if (length > pkt_size) {
+ if (length >= pkt_size) {
mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
- if (qh->hb_mult == 1)
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 1 Yes(Normal)
+ * 0 >1 No(High BW ISO)
+ * 1 1 Yes(HS bulk)
+ * 1 >1 Yes(FS bulk)
+ */
+ if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
+ can_bulk_split(hw_ep->musb, qh->type)))
csr |= MUSB_TXCSR_AUTOSET;
} else {
mode = 0;
@@ -771,6 +797,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ /*
+ * Set the TXMAXP register correctly for Bulk OUT
+ * endpoints in host mode
+ */
+ if (can_bulk_split(musb, qh->type))
+ qh->hb_mult = hw_ep->max_packet_sz_tx
+ / packet_sz;
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
@@ -802,6 +835,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (load_count) {
/* PIO to load FIFO */
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
qh->segsize = load_count;
musb_write_fifo(hw_ep, load_count, buf);
}
@@ -894,6 +929,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count < len)
urb->status = -EOVERFLOW;
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_read_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
@@ -933,6 +970,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
@@ -1134,6 +1173,22 @@ void musb_host_tx(struct musb *musb, u8 epnum)
dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
status = -ETIMEDOUT;
+ } else if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
+ /* BUSY - can happen during USB transfer cancel */
+
+ /* MUSB_TXCSR_TXPKTRDY indicates that the data written
+ * to the FIFO by DMA has not still gone on the USB bus.
+ * DMA completion callback doesn't indicate that data has
+ * gone on the USB bus. So, if we reach this case, need to
+ * wait for the MUSB_TXCSR_TXPKTRDY to be cleared and then
+ * proceed.
+ */
+ dev_dbg(musb->controller, "TXPKTRDY set. Data transfer ongoing. Wait...\n");
+
+ do {
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ } while ((tx_csr & MUSB_TXCSR_TXPKTRDY) != 0);
+ dev_dbg(musb->controller, "TXPKTRDY Cleared. Continue...\n");
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
@@ -1427,7 +1482,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
size_t xfer_len;
void __iomem *mbase = musb->mregs;
int pipe;
- u16 rx_csr, val;
+ u16 rx_csr, val, restore_csr;
bool iso_err = false;
bool done = false;
u32 status;
@@ -1537,7 +1592,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-#ifndef CONFIG_USB_INVENTRA_DMA
+#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
@@ -1569,7 +1624,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1625,7 +1680,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
@@ -1709,6 +1764,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
*/
val = musb_readw(epio, MUSB_RXCSR);
+
+ /* retain the original value,
+ * which will be used to reset CSR
+ */
+ restore_csr = val;
val &= ~MUSB_RXCSR_H_REQPKT;
if (dma->desired_mode == 0)
@@ -1736,7 +1796,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
c->channel_release(dma);
hw_ep->rx_channel = NULL;
dma = NULL;
- /* REVISIT reset CSR */
+ musb_writew(epio, MUSB_RXCSR, restore_csr);
}
}
#endif /* Mentor DMA */
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index f7e04bf34a1..ce169460148 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -25,9 +25,13 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <mach/usb.h>
#include "musb_core.h"
+#define DEFAULT_DEVCTL 0x81
+static void ux500_musb_set_vbus(struct musb *musb, int is_on);
+
struct ux500_glue {
struct device *dev;
struct platform_device *musb;
@@ -35,17 +39,394 @@ struct ux500_glue {
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
+static struct timer_list notify_timer;
+struct musb_context_registers context;
+struct musb *_musb;
+void ux500_store_context(struct musb *musb)
+{
+#ifdef CONFIG_PM
+ int i;
+ void __iomem *musb_base;
+ void __iomem *epio;
+
+ if (musb != NULL)
+ _musb = musb;
+ else
+ return;
+
+ musb_base = musb->mregs;
+
+ if (is_host_enabled(musb)) {
+ context.frame = musb_readw(musb_base, MUSB_FRAME);
+ context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
+ }
+ context.power = musb_readb(musb_base, MUSB_POWER);
+ context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+ context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+ context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ context.index = musb_readb(musb_base, MUSB_INDEX);
+ context.devctl = DEFAULT_DEVCTL;
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ context.index_regs[i].txmaxp =
+ musb_readw(epio, MUSB_TXMAXP);
+ context.index_regs[i].txcsr =
+ musb_readw(epio, MUSB_TXCSR);
+ context.index_regs[i].rxmaxp =
+ musb_readw(epio, MUSB_RXMAXP);
+ context.index_regs[i].rxcsr =
+ musb_readw(epio, MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
+ if (is_host_enabled(musb)) {
+ context.index_regs[i].txtype =
+ musb_readb(epio, MUSB_TXTYPE);
+ context.index_regs[i].txinterval =
+ musb_readb(epio, MUSB_TXINTERVAL);
+ context.index_regs[i].rxtype =
+ musb_readb(epio, MUSB_RXTYPE);
+ context.index_regs[i].rxinterval =
+ musb_readb(epio, MUSB_RXINTERVAL);
+
+ context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+ }
+#endif
+}
+void ux500_restore_context(void)
+{
+#ifdef CONFIG_PM
+ int i;
+ struct musb *musb;
+ void __iomem *musb_base;
+ void __iomem *ep_target_regs;
+ void __iomem *epio;
+
+ if (_musb != NULL)
+ musb = _musb;
+ else
+ return;
+
+ musb_base = musb->mregs;
+ if (is_host_enabled(musb)) {
+ musb_writew(musb_base, MUSB_FRAME, context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, context.testmode);
+ musb_write_ulpi_buscontrol(musb->mregs, context.busctl);
+ }
+ musb_writeb(musb_base, MUSB_POWER, context.power);
+ musb_writew(musb_base, MUSB_INTRTXE, context.intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, context.intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, context.devctl);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ musb_writew(epio, MUSB_TXMAXP,
+ context.index_regs[i].txmaxp);
+ musb_writew(epio, MUSB_TXCSR,
+ context.index_regs[i].txcsr);
+ musb_writew(epio, MUSB_RXMAXP,
+ context.index_regs[i].rxmaxp);
+ musb_writew(epio, MUSB_RXCSR,
+ context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ context.index_regs[i].rxfifoadd);
+ }
+
+ if (is_host_enabled(musb)) {
+ musb_writeb(epio, MUSB_TXTYPE,
+ context.index_regs[i].txtype);
+ musb_writeb(epio, MUSB_TXINTERVAL,
+ context.index_regs[i].txinterval);
+ musb_writeb(epio, MUSB_RXTYPE,
+ context.index_regs[i].rxtype);
+ musb_writeb(epio, MUSB_RXINTERVAL,
+
+ musb->context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ context.index_regs[i].rxhubport);
+ }
+ }
+ musb_writeb(musb_base, MUSB_INDEX, context.index);
+#endif
+}
+
+static void musb_notify_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+
+ u8 devctl;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/* blocking notifier support */
+static int musb_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct musb *musb = container_of(nb, struct musb, nb);
+
+ switch (event) {
+ case USB_EVENT_ID:
+ case USB_EVENT_RIDA:
+ dev_dbg(musb->controller, "ID GND\n");
+ if (is_otg_enabled(musb)) {
+ ux500_musb_set_vbus(musb, 1);
+ }
+ break;
+
+ case USB_EVENT_VBUS:
+ dev_dbg(musb->controller, "VBUS Connect\n");
+
+ break;
+
+ case USB_EVENT_NONE:
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
+ if (is_otg_enabled(musb))
+ ux500_musb_set_vbus(musb, 0);
+
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static void ux500_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ int ret = 1;
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ /*
+ * Wait for the musb to set as A device to enable the
+ * VBUS
+ */
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ } else {
+ musb->is_active = 1;
+ musb->xceiv->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ }
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+
+ musb->xceiv->default_a = 0;
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb->xceiv->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static void ux500_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ otg_state_string(musb->xceiv->state));
+ del_timer(&notify_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&notify_timer))
+ last_timer = timeout;
+ else {
+ dev_dbg(musb->controller, "Longer idle timer "
+ "already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb->xceiv->state),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&notify_timer, timeout);
+}
+
+static void ux500_musb_enable(struct musb *musb)
+{
+ ux500_store_context(musb);
+}
+
+static struct usb_ep *ux500_musb_configure_endpoints(struct musb *musb,
+ u8 type, struct usb_endpoint_descriptor *desc)
+{
+ struct usb_ep *ep = NULL;
+ struct usb_gadget *gadget = &musb->g;
+ char name[4];
+
+ if (USB_ENDPOINT_XFER_INT == type) {
+ list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ if (ep->maxpacket == 512)
+ continue;
+ if (NULL == ep->driver_data) {
+ strncpy(name, (ep->name + 3), 4);
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ if (strcmp("in", name) == 0)
+ return ep;
+ }
+ }
+ }
+ return ep;
+}
+
static int ux500_musb_init(struct musb *musb)
{
+ int status;
musb->xceiv = otg_get_transceiver();
if (!musb->xceiv) {
pr_err("HS USB OTG: no transceiver configured\n");
return -ENODEV;
}
+ musb->nb.notifier_call = musb_otg_notifications;
+ status = otg_register_notifier(musb->xceiv, &musb->nb);
+
+ if (status < 0) {
+ dev_dbg(musb->controller, "notification register failed\n");
+ goto err1;
+ }
+
+ setup_timer(&notify_timer, musb_notify_idle, (unsigned long) musb);
+
return 0;
+err1:
+ return status;
}
+/**
+ * ux500_musb_exit() - unregister the platform USB driver.
+ * @musb: struct musb pointer.
+ *
+ * This function unregisters the USB controller.
+ */
static int ux500_musb_exit(struct musb *musb)
{
otg_put_transceiver(musb->xceiv);
@@ -56,8 +437,21 @@ static int ux500_musb_exit(struct musb *musb)
static const struct musb_platform_ops ux500_ops = {
.init = ux500_musb_init,
.exit = ux500_musb_exit,
+
+ .set_vbus = ux500_musb_set_vbus,
+ .try_idle = ux500_musb_try_idle,
+
+ .enable = ux500_musb_enable,
+ .configure_endpoints = ux500_musb_configure_endpoints,
};
+/**
+ * ux500_probe() - Allocate the resources.
+ * @pdev: struct platform_device.
+ *
+ * This function allocates the required memory for the
+ * structures and initialize interrupts.
+ */
static int __init ux500_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
@@ -155,6 +549,13 @@ static int __exit ux500_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
+/**
+ * ux500_suspend() - Handles the platform suspend.
+ * @dev: struct device
+ *
+ * This function gets triggered when the platform
+ * is going to suspend
+ */
static int ux500_suspend(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
@@ -166,6 +567,13 @@ static int ux500_suspend(struct device *dev)
return 0;
}
+/**
+ * ux500_resume() - Handles the platform resume.
+ * @dev: struct device
+ *
+ * This function gets triggered when the platform
+ * is going to resume
+ */
static int ux500_resume(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index 97cb45916c4..09f3c5470f8 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -32,6 +32,11 @@
#include <linux/pfn.h>
#include <mach/usb.h>
#include "musb_core.h"
+#undef DBG
+#undef WARNING
+#undef INFO
+#include <linux/usb/composite.h>
+#define Ux500_USB_DMA_MIN_TRANSFER_SIZE 512
struct ux500_dma_channel {
struct dma_channel channel;
@@ -64,14 +69,14 @@ void ux500_dma_callback(void *private_data)
struct musb *musb = hw_ep->musb;
unsigned long flags;
- dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
+ dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
hw_ep->epnum);
spin_lock_irqsave(&musb->lock, flags);
ux500_channel->channel.actual_len = ux500_channel->cur_len;
ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
musb_dma_completion(musb, hw_ep->epnum,
- ux500_channel->is_tx);
+ ux500_channel->is_tx);
spin_unlock_irqrestore(&musb->lock, flags);
}
@@ -134,6 +139,15 @@ static bool ux500_configure_channel(struct dma_channel *channel,
return true;
}
+/**
+ * ux500_dma_controller_allocate() - allocates the DMA channels
+ * @c: pointer to DMA controller
+ * @hw_ep: pointer to endpoint
+ * @is_tx: transmit or receive direction
+ *
+ * This function allocates the DMA channel and initializes
+ * the channel
+*/
static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 is_tx)
{
@@ -172,7 +186,13 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
return &(ux500_channel->channel);
}
-
+/**
+ * ux500_dma_channel_release() - releases the DMA channel
+ * @channel: channel to be released
+ *
+ * This function releases the DMA channel
+ *
+*/
static void ux500_dma_channel_release(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
@@ -190,26 +210,71 @@ static void ux500_dma_channel_release(struct dma_channel *channel)
static int ux500_dma_is_compatible(struct dma_channel *channel,
u16 maxpacket, void *buf, u32 length)
{
- if ((maxpacket & 0x3) ||
- ((int)buf & 0x3) ||
- (length < 512) ||
- (length & 0x3))
- return false;
- else
- return true;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ struct usb_descriptor_header **descriptors;
+ struct usb_function *f;
+ struct usb_gadget *gadget = &musb->g;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+
+ if (length < Ux500_USB_DMA_MIN_TRANSFER_SIZE)
+ return 0;
+
+ list_for_each_entry(f, &cdev->config->functions, list) {
+ if (!strcmp(f->name, "cdc_ethernet") ||
+ !strcmp(f->name, "rndis") ||
+ !strcmp(f->name, "mtp") ||
+ !strcmp(f->name, "phonet") ||
+ !strcmp(f->name, "adb")) {
+ if (gadget->speed == USB_SPEED_HIGH)
+ descriptors = f->hs_descriptors;
+ else
+ descriptors = f->descriptors;
+
+ for (; *descriptors; ++descriptors) {
+ struct usb_endpoint_descriptor *ep;
+
+ if ((*descriptors)->bDescriptorType !=
+ USB_DT_ENDPOINT)
+ continue;
+
+ ep = (struct usb_endpoint_descriptor *)
+ *descriptors;
+ if (ep->bEndpointAddress ==
+ ux500_channel->hw_ep->epnum)
+ return 0;
+ }
+ }
+ }
+
+ return 1;
}
+/**
+ * ux500_dma_channel_program() - Configures the channel and initiates transfer
+ * @channel: pointer to DMA channel
+ * @packet_sz: packet size
+ * @mode: mode
+ * @dma_addr: physical address of memory
+ * @len: length
+ *
+ * This function configures the channel and initiates the DMA transfer
+*/
static int ux500_dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
int ret;
+ struct ux500_dma_channel *ux500_dma_channel = channel->private_data;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
- if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
- return false;
+ if (len < Ux500_USB_DMA_MIN_TRANSFER_SIZE)
+ return 0;
+ if (!ux500_dma_channel->is_tx && len < packet_sz)
+ return 0;
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
@@ -220,6 +285,12 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
return ret;
}
+/**
+ * ux500_dma_channel_abort() - aborts the DMA transfer
+ * @channel: pointer to DMA channel.
+ *
+ * This function aborts the DMA transfer.
+*/
static int ux500_dma_channel_abort(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
@@ -254,6 +325,12 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
return 0;
}
+/**
+ * ux500_dma_controller_stop() - releases all the channels and frees the DMA pipes
+ * @c: pointer to DMA controller
+ *
+ * This function frees all of the logical channels and frees the DMA pipes
+*/
static int ux500_dma_controller_stop(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -285,6 +362,15 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
return 0;
}
+
+/**
+ * ux500_dma_controller_start() - creates the logical channels pool and registers callbacks
+ * @c: pointer to DMA Controller
+ *
+ * This function requests the logical channels from the DMA driver and creates
+ * logical channels based on event lines and also registers the callbacks which
+ * are invoked after data transfer in the transmit or receive direction.
+*/
static int ux500_dma_controller_start(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -356,6 +442,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
return 0;
}
+/**
+ * dma_controller_destroy() - deallocates the DMA controller
+ * @c: pointer to dma controller.
+ *
+ * This function deallocates the DMA controller.
+*/
void dma_controller_destroy(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -364,6 +456,15 @@ void dma_controller_destroy(struct dma_controller *c)
kfree(controller);
}
+/**
+ * dma_controller_create() - creates the dma controller and initializes callbacks
+ *
+ * @musb: pointer to mentor core driver data instance|
+ * @base: base address of musb registers.
+ *
+ * This function creates the DMA controller and initializes the callbacks
+ * that are invoked from the Mentor IP core.
+*/
struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *base)
{
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 735ef4c2339..0e112b1223f 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -108,6 +108,15 @@ config AB8500_USB
This transceiver supports high and full speed devices plus,
in host mode, low speed.
+config AB5500_USB
+ tristate "AB5500 USB Transceiver Driver"
+ depends on AB5500_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver in AB5500 chip.
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
+
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 41aa5098b13..e227d9add96 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_USB_ULPI) += ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o
obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
+obj-$(CONFIG_AB5500_USB) += ab5500-usb.o
fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o
obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o
obj-$(CONFIG_USB_MV_OTG) += mv_otg.o
diff --git a/drivers/usb/otg/ab5500-usb.c b/drivers/usb/otg/ab5500-usb.c
new file mode 100644
index 00000000000..c57234d92be
--- /dev/null
+++ b/drivers/usb/otg/ab5500-usb.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Avinash Kumar <avinash.kumar@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/usb.h>
+#include <linux/kernel_stat.h>
+#include <mach/gpio.h>
+#include <mach/reboot_reasons.h>
+
+/* AB5500 USB macros
+ */
+#define AB5500_USB_HOST_ENABLE 0x1
+#define AB5500_USB_DEVICE_ENABLE 0x2
+#define AB5500_MAIN_WATCHDOG_ENABLE 0x1
+#define AB5500_MAIN_WATCHDOG_KICK 0x2
+#define AB5500_MAIN_WATCHDOG_DISABLE 0x0
+#define AB5500_USB_ADP_ENABLE 0x1
+#define AB5500_WATCHDOG_DELAY 10
+#define AB5500_WATCHDOG_DELAY_US 100
+#define AB5500_PHY_DELAY_US 100
+#define AB5500_MAIN_WDOG_CTRL_REG 0x01
+#define AB5500_USB_LINE_STAT_REG 0x80
+#define AB5500_USB_PHY_CTRL_REG 0x8A
+#define AB5500_MAIN_WATCHDOG_ENABLE 0x1
+#define AB5500_MAIN_WATCHDOG_KICK 0x2
+#define AB5500_MAIN_WATCHDOG_DISABLE 0x0
+#define AB5500_SYS_CTRL2_BLOCK 0x2
+
+/* UsbLineStatus register bit masks */
+#define AB5500_USB_LINK_STATUS_MASK_V1 0x78
+#define AB5500_USB_LINK_STATUS_MASK_V2 0xF8
+
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+
+#define PUBLIC_ID_BACKUPRAM1 (U5500_BACKUPRAM1_BASE + 0x0FC0)
+#define MAX_USB_SERIAL_NUMBER_LEN 31
+
+/* UsbLineStatus register - usb types */
+enum ab8500_usb_link_status {
+ USB_LINK_NOT_CONFIGURED,
+ USB_LINK_STD_HOST_NC,
+ USB_LINK_STD_HOST_C_NS,
+ USB_LINK_STD_HOST_C_S,
+ USB_LINK_HOST_CHG_NM,
+ USB_LINK_HOST_CHG_HS,
+ USB_LINK_HOST_CHG_HS_CHIRP,
+ USB_LINK_DEDICATED_CHG,
+ USB_LINK_ACA_RID_A,
+ USB_LINK_ACA_RID_B,
+ USB_LINK_ACA_RID_C_NM,
+ USB_LINK_ACA_RID_C_HS,
+ USB_LINK_ACA_RID_C_HS_CHIRP,
+ USB_LINK_HM_IDGND,
+ USB_LINK_OTG_HOST_NO_CURRENT,
+ USB_LINK_NOT_VALID_LINK,
+ USB_LINK_HM_IDGND_V2 = 18,
+};
+
+/**
+ * ab5500_usb_mode - Different states of ab usb_chip
+ *
+ * Used for USB cable plug-in state machine
+ */
+enum ab5500_usb_mode {
+ USB_IDLE,
+ USB_DEVICE,
+ USB_HOST,
+ USB_DEDICATED_CHG,
+};
+struct ab5500_usb {
+ struct otg_transceiver otg;
+ struct device *dev;
+ int irq_num_id_fall;
+ int irq_num_vbus_rise;
+ int irq_num_vbus_fall;
+ int irq_num_link_status;
+ unsigned vbus_draw;
+ struct delayed_work dwork;
+ struct work_struct phy_dis_work;
+ unsigned long link_status_wait;
+ int rev;
+ int usb_cs_gpio;
+ enum ab5500_usb_mode mode;
+ struct clk *sysclk;
+ struct regulator *v_ape;
+ struct abx500_usbgpio_platform_data *usb_gpio;
+ struct delayed_work work_usb_workaround;
+ bool phy_enabled;
+};
+
+static int ab5500_usb_irq_setup(struct platform_device *pdev,
+ struct ab5500_usb *ab);
+static int ab5500_usb_boot_detect(struct ab5500_usb *ab);
+static int ab5500_usb_link_status_update(struct ab5500_usb *ab);
+
+static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host);
+
+static inline struct ab5500_usb *xceiv_to_ab(struct otg_transceiver *x)
+{
+ return container_of(x, struct ab5500_usb, otg);
+}
+
+/**
+ * ab5500_usb_wd_workaround() - Kick the watch dog timer
+ *
+ * This function used to Kick the watch dog timer
+ */
+static void ab5500_usb_wd_workaround(struct ab5500_usb *ab)
+{
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ AB5500_MAIN_WATCHDOG_ENABLE);
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ (AB5500_MAIN_WATCHDOG_ENABLE
+ | AB5500_MAIN_WATCHDOG_KICK));
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ AB5500_MAIN_WATCHDOG_DISABLE);
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+}
+
+static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ /* Workaround for spurious interrupt to be checked with Hardware Team*/
+ if (ab->phy_enabled == true)
+ return;
+ ab->phy_enabled = true;
+ bit = sel_host ? AB5500_USB_HOST_ENABLE :
+ AB5500_USB_DEVICE_ENABLE;
+
+ ab->usb_gpio->enable();
+ clk_enable(ab->sysclk);
+ regulator_enable(ab->v_ape);
+
+ if (!sel_host) {
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+ }
+
+ ux500_restore_context();
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ bit, bit);
+}
+
+static void ab5500_usb_phy_disable(struct ab5500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ /* Workaround for spurious interrupt to be checked with Hardware Team*/
+ if (ab->phy_enabled == false)
+ return;
+ ab->phy_enabled = false;
+ bit = sel_host ? AB5500_USB_HOST_ENABLE :
+ AB5500_USB_DEVICE_ENABLE;
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ bit, 0);
+ /* Needed to disable the phy.*/
+ ab5500_usb_wd_workaround(ab);
+ clk_disable(ab->sysclk);
+ regulator_disable(ab->v_ape);
+ ab->usb_gpio->disable();
+}
+
+#define ab5500_usb_peri_phy_en(ab) ab5500_usb_phy_enable(ab, false)
+#define ab5500_usb_peri_phy_dis(ab) ab5500_usb_phy_disable(ab, false)
+#define ab5500_usb_host_phy_en(ab) ab5500_usb_phy_enable(ab, true)
+#define ab5500_usb_host_phy_dis(ab) ab5500_usb_phy_disable(ab, true)
+
+/* Work created after an link status update handler*/
+static int ab5500_usb_link_status_update(struct ab5500_usb *ab)
+{
+ u8 val = 0;
+ int ret = 0;
+ int gpioval = 0;
+ enum ab8500_usb_link_status lsts;
+ enum usb_xceiv_events event = USB_EVENT_NONE;
+
+ (void)abx500_get_register_interruptible(ab->dev,
+ AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &val);
+
+ if (ab->rev >= AB5500_2_0)
+ lsts = (val & AB5500_USB_LINK_STATUS_MASK_V2) >> 3;
+ else
+ lsts = (val & AB5500_USB_LINK_STATUS_MASK_V1) >> 3;
+
+ switch (lsts) {
+
+ case USB_LINK_STD_HOST_NC:
+ case USB_LINK_STD_HOST_C_NS:
+ case USB_LINK_STD_HOST_C_S:
+ case USB_LINK_HOST_CHG_NM:
+ case USB_LINK_HOST_CHG_HS:
+ case USB_LINK_HOST_CHG_HS_CHIRP:
+
+ event = USB_EVENT_VBUS;
+ ab5500_usb_peri_phy_en(ab);
+
+ break;
+
+ case USB_LINK_HM_IDGND:
+ if (ab->rev >= AB5500_2_0)
+ return -1;
+
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, gpioval);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ ab5500_usb_host_phy_en(ab);
+
+ ab->otg.default_a = true;
+ event = USB_EVENT_ID;
+
+ break;
+
+ case USB_LINK_HM_IDGND_V2:
+ if (!(ab->rev >= AB5500_2_0))
+ return -1;
+
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, gpioval);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ ab5500_usb_host_phy_en(ab);
+
+ ab->otg.default_a = true;
+ event = USB_EVENT_ID;
+
+ break;
+ case USB_LINK_DEDICATED_CHG:
+ /* TODO: vbus_draw */
+ event = USB_EVENT_CHARGER;
+ break;
+ default:
+ break;
+ }
+
+ atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw);
+
+ return 0;
+}
+
+static void ab5500_usb_delayed_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ab5500_usb *ab = container_of(dwork, struct ab5500_usb, dwork);
+
+ ab5500_usb_link_status_update(ab);
+}
+
+/**
+ * This function is used to signal the completion of
+ * USB Link status register update
+ */
+static irqreturn_t ab5500_usb_link_status_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ ab5500_usb_link_status_update(ab);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ab5500_usb_device_insert_irq(int irq, void *data)
+{
+ int ret = 0, val = 1;
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+
+ enum usb_xceiv_events event;
+
+ ab->mode = USB_DEVICE;
+
+ /* enable usb chip Select */
+ event = USB_EVENT_VBUS;
+ ret = gpio_direction_output(ab->usb_cs_gpio, val);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * This function used to remove the voltage for USB ab->dev mode.
+ */
+static irqreturn_t ab5500_usb_device_disconnect_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ /* disable usb chip Select */
+ gpio_set_value(ab->usb_cs_gpio, 0);
+ ab5500_usb_peri_phy_dis(ab);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_usb_host_disconnect_irq : work handler for host cable insert.
+ * @work: work structure
+ *
+ * This function is used to handle the host cable insert work.
+ */
+static irqreturn_t ab5500_usb_host_disconnect_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ /* disable usb chip Select */
+ gpio_set_value(ab->usb_cs_gpio, 0);
+ ab5500_usb_host_phy_dis(ab);
+ return IRQ_HANDLED;
+}
+
+static void ab5500_usb_irq_free(struct ab5500_usb *ab)
+{
+ if (ab->irq_num_id_fall)
+ free_irq(ab->irq_num_id_fall, ab);
+
+ if (ab->irq_num_vbus_rise)
+ free_irq(ab->irq_num_vbus_rise, ab);
+
+ if (ab->irq_num_vbus_fall)
+ free_irq(ab->irq_num_vbus_fall, ab);
+
+ if (ab->irq_num_link_status)
+ free_irq(ab->irq_num_link_status, ab);
+}
+
+/**
+ * ab5500_usb_irq_setup : register USB callback handlers for ab5500
+ * @mode: value for mode.
+ *
+ * This function is used to register USB callback handlers for ab5500.
+ */
+static int ab5500_usb_irq_setup(struct platform_device *pdev,
+ struct ab5500_usb *ab)
+{
+ int ret = 0;
+ int irq, err;
+
+ if (!ab->dev)
+ return -EINVAL;
+
+ irq = platform_get_irq_byname(pdev, "usb_idgnd_f");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ID fall irq not found\n");
+ err = irq;
+ goto irq_fail;
+ }
+ ab->irq_num_id_fall = irq;
+
+ irq = platform_get_irq_byname(pdev, "VBUS_F");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "VBUS fall irq not found\n");
+ err = irq;
+ goto irq_fail;
+
+ }
+ ab->irq_num_vbus_fall = irq;
+
+ irq = platform_get_irq_byname(pdev, "VBUS_R");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "VBUS raise irq not found\n");
+ err = irq;
+ goto irq_fail;
+
+ }
+ ab->irq_num_vbus_rise = irq;
+
+ irq = platform_get_irq_byname(pdev, "Link_Update");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Link Update irq not found\n");
+ err = irq;
+ goto irq_fail;
+ }
+ ab->irq_num_link_status = irq;
+
+ ret = request_threaded_irq(ab->irq_num_link_status,
+ NULL, ab5500_usb_link_status_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-link-status-update", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb charge"
+ " detect done\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ret = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
+ ab5500_usb_device_insert_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-vbus-rise", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb ab->dev"
+ " insertion\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ret = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
+ ab5500_usb_device_disconnect_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-vbus-fall", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb ab->dev"
+ " removal\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ret = request_threaded_irq((ab->irq_num_id_fall), NULL,
+ ab5500_usb_host_disconnect_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-id-fall", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb host"
+ " removal\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ab5500_usb_wd_workaround(ab);
+ return 0;
+
+irq_fail:
+ ab5500_usb_irq_free(ab);
+ return err;
+}
+
+/* Sys interfaces */
+static ssize_t
+serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 bufer[5];
+ void __iomem *backup_ram = NULL;
+ backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14);
+
+ if (backup_ram) {
+ bufer[0] = readl(backup_ram);
+ bufer[1] = readl(backup_ram + 4);
+ bufer[2] = readl(backup_ram + 8);
+ bufer[3] = readl(backup_ram + 0x0c);
+ bufer[4] = readl(backup_ram + 0x10);
+
+ snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1,
+ "%.8X%.8X%.8X%.8X%.8X",
+ bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]);
+
+ iounmap(backup_ram);
+ } else
+ dev_err(dev, "$$\n");
+
+ return strlen(buf);
+}
+
+
+static DEVICE_ATTR(serial_number, 0644, serial_number_show, NULL);
+
+static struct attribute *ab5500_usb_attributes[] = {
+ &dev_attr_serial_number.attr,
+ NULL
+};
+static const struct attribute_group ab5500_attr_group = {
+ .attrs = ab5500_usb_attributes,
+};
+
+static int ab5500_create_sysfsentries(struct ab5500_usb *ab)
+{
+ int err;
+
+ err = sysfs_create_group(&ab->dev->kobj, &ab5500_attr_group);
+ if (err)
+ sysfs_remove_group(&ab->dev->kobj, &ab5500_attr_group);
+
+ return err;
+}
+
+/**
+ * ab5500_usb_boot_detect : detect the USB cable during boot time.
+ * @mode: value for mode.
+ *
+ * This function is used to detect the USB cable during boot time.
+ */
+static int ab5500_usb_boot_detect(struct ab5500_usb *ab)
+{
+ int ret;
+ int val = 1;
+ int usb_status = 0;
+ int gpioval = 0;
+ enum ab8500_usb_link_status lsts;
+ if (!ab->dev)
+ return -EINVAL;
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_DEVICE_ENABLE,
+ AB5500_USB_DEVICE_ENABLE);
+
+ udelay(AB5500_PHY_DELAY_US);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_DEVICE_ENABLE, 0);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_HOST_ENABLE,
+ AB5500_USB_HOST_ENABLE);
+
+ udelay(AB5500_PHY_DELAY_US);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB5500_BANK_USB,
+ AB5500_USB_PHY_CTRL_REG,
+ AB5500_USB_HOST_ENABLE, 0);
+
+ (void)abx500_get_register_interruptible(ab->dev,
+ AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &usb_status);
+
+ if (ab->rev >= AB5500_2_0)
+ lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V2) >> 3;
+ else
+ lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V1) >> 3;
+
+ switch (lsts) {
+
+ case USB_LINK_STD_HOST_NC:
+ case USB_LINK_STD_HOST_C_NS:
+ case USB_LINK_STD_HOST_C_S:
+ case USB_LINK_HOST_CHG_NM:
+ case USB_LINK_HOST_CHG_HS:
+ case USB_LINK_HOST_CHG_HS_CHIRP:
+ /*
+ * If Power on key was not pressed then enter charge only
+ * mode and dont enumerate
+ */
+ if ((!(ab5500_get_turn_on_status() &
+ (P_ON_KEY1_EVENT | P_ON_KEY2_EVENT))) &&
+ (prcmu_get_reset_code() ==
+ SW_RESET_COLDSTART)) {
+ dev_dbg(ab->dev, "USB entered charge only mode");
+ return 0;
+ }
+ ab5500_usb_peri_phy_en(ab);
+
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, val);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+
+ break;
+
+ case USB_LINK_HM_IDGND:
+ case USB_LINK_HM_IDGND_V2:
+ /* enable usb chip Select */
+ ret = gpio_direction_output(ab->usb_cs_gpio, gpioval);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return ret;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+ ab5500_usb_host_phy_en(ab);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ab5500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ ab->vbus_draw = mA;
+
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ USB_EVENT_VBUS, &ab->vbus_draw);
+ return 0;
+}
+
+static int ab5500_usb_set_suspend(struct otg_transceiver *x, int suspend)
+{
+ /* TODO */
+ return 0;
+}
+
+static int ab5500_usb_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ /* Some drivers call this function in atomic context.
+ * Do not update ab5500 registers directly till this
+ * is fixed.
+ */
+
+ if (!host) {
+ ab->otg.host = NULL;
+ schedule_work(&ab->phy_dis_work);
+ } else {
+ ab->otg.host = host;
+ }
+
+ return 0;
+}
+
+static int ab5500_usb_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ /* Some drivers call this function in atomic context.
+ * Do not update ab5500 registers directly till this
+ * is fixed.
+ */
+
+ if (!gadget) {
+ ab->otg.gadget = NULL;
+ schedule_work(&ab->phy_dis_work);
+ } else {
+ ab->otg.gadget = gadget;
+ }
+
+ return 0;
+}
+
+static int __devinit ab5500_usb_probe(struct platform_device *pdev)
+{
+ struct ab5500_usb *ab;
+ struct abx500_usbgpio_platform_data *usb_pdata =
+ pdev->dev.platform_data;
+ int err;
+ int ret = -1;
+ ab = kzalloc(sizeof *ab, GFP_KERNEL);
+ if (!ab)
+ return -ENOMEM;
+
+ ab->dev = &pdev->dev;
+ ab->otg.dev = ab->dev;
+ ab->otg.label = "ab5500";
+ ab->otg.state = OTG_STATE_B_IDLE;
+ ab->otg.set_host = ab5500_usb_set_host;
+ ab->otg.set_peripheral = ab5500_usb_set_peripheral;
+ ab->otg.set_suspend = ab5500_usb_set_suspend;
+ ab->otg.set_power = ab5500_usb_set_power;
+ ab->usb_gpio = usb_pdata;
+ ab->mode = USB_IDLE;
+
+ platform_set_drvdata(pdev, ab);
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
+
+ /* v1: Wait for link status to become stable.
+ * all: Updates form set_host and set_peripheral as they are atomic.
+ */
+ INIT_DELAYED_WORK(&ab->dwork, ab5500_usb_delayed_work);
+
+ err = otg_set_transceiver(&ab->otg);
+ if (err)
+ dev_err(&pdev->dev, "Can't register transceiver\n");
+
+ ab->usb_cs_gpio = ab->usb_gpio->usb_cs;
+
+ ab->rev = abx500_get_chip_id(ab->dev);
+
+ ab->sysclk = clk_get(ab->dev, "sysclk");
+ if (IS_ERR(ab->sysclk)) {
+ ret = PTR_ERR(ab->sysclk);
+ ab->sysclk = NULL;
+ return ret;
+ }
+
+ ab->v_ape = regulator_get(ab->dev, "v-ape");
+ if (!ab->v_ape) {
+ dev_err(ab->dev, "Could not get v-ape supply\n");
+
+ return -EINVAL;
+ }
+
+ ab5500_usb_irq_setup(pdev, ab);
+
+ ret = gpio_request(ab->usb_cs_gpio, "usb-cs");
+ if (ret < 0)
+ dev_err(&pdev->dev, "usb gpio request fail\n");
+
+ /* Aquire GPIO alternate config struct for USB */
+ err = ab->usb_gpio->get(ab->dev);
+ if (err < 0)
+ goto fail1;
+
+ err = ab5500_usb_boot_detect(ab);
+ if (err < 0)
+ goto fail1;
+
+ err = ab5500_create_sysfsentries(ab);
+ if (err < 0)
+ dev_err(ab->dev, "usb create sysfs entries failed\n");
+
+ return 0;
+
+fail1:
+ ab5500_usb_irq_free(ab);
+ kfree(ab);
+ return err;
+}
+
+static int __devexit ab5500_usb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ab5500_usb_driver = {
+ .driver = {
+ .name = "ab5500-usb",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_usb_probe,
+ .remove = __devexit_p(ab5500_usb_remove),
+};
+
+static int __init ab5500_usb_init(void)
+{
+ return platform_driver_register(&ab5500_usb_driver);
+}
+subsys_initcall(ab5500_usb_init);
+
+static void __exit ab5500_usb_exit(void)
+{
+ platform_driver_unregister(&ab5500_usb_driver);
+}
+module_exit(ab5500_usb_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
index 74fe6e62e0f..d48c5cf8c71 100644
--- a/drivers/usb/otg/ab8500-usb.c
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -29,24 +29,58 @@
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/kernel_stat.h>
+#include <linux/pm_qos.h>
+
+#include <asm/io.h>
+
+#include <mach/usb.h>
#define AB8500_MAIN_WD_CTRL_REG 0x01
#define AB8500_USB_LINE_STAT_REG 0x80
#define AB8500_USB_PHY_CTRL_REG 0x8A
+#define AB8500_VBUS_CTRL_REG 0x82
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE20_REG 0x13
+#define AB8500_SRC_INT_USB_HOST 0x04
+#define AB8500_SRC_INT_USB_DEVICE 0x80
#define AB8500_BIT_OTG_STAT_ID (1 << 0)
#define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
#define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
#define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
#define AB8500_BIT_WD_CTRL_KICK (1 << 1)
+#define AB8500_BIT_VBUS_ENABLE (1 << 0)
#define AB8500_V1x_LINK_STAT_WAIT (HZ/10)
#define AB8500_WD_KICK_DELAY_US 100 /* usec */
#define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
+#define AB8500_V20_31952_DISABLE_DELAY_US 100 /* usec */
#define AB8500_WD_V10_DISABLE_DELAY_MS 100 /* ms */
+/* Registers in bank 0x11 */
+#define AB8500_BANK12_ACCESS 0x00
+
+/* Registers in bank 0x12 */
+#define AB8500_USB_PHY_TUNE1 0x05
+#define AB8500_USB_PHY_TUNE2 0x06
+#define AB8500_USB_PHY_TUNE3 0x07
+
+static struct pm_qos_request usb_pm_qos_latency;
+static bool usb_pm_qos_is_latency_0;
+
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+#define USB_LIMIT (200) /* If we have more than 200 irqs per second */
+
+#define PUBLIC_ID_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0x0FC0)
+#define MAX_USB_SERIAL_NUMBER_LEN 31
+
/* Usb line status register */
enum ab8500_usb_link_status {
USB_LINK_NOT_CONFIGURED = 0,
@@ -67,6 +101,13 @@ enum ab8500_usb_link_status {
USB_LINK_NOT_VALID_LINK
};
+enum ab8500_usb_mode {
+ USB_IDLE = 0,
+ USB_PERIPHERAL,
+ USB_HOST,
+ USB_DEDICATED_CHG
+};
+
struct ab8500_usb {
struct otg_transceiver otg;
struct device *dev;
@@ -80,6 +121,14 @@ struct ab8500_usb {
struct work_struct phy_dis_work;
unsigned long link_status_wait;
int rev;
+ enum ab8500_usb_mode mode;
+ struct clk *sysclk;
+ struct regulator *v_ape;
+ struct regulator *v_musb;
+ struct regulator *v_ulpi;
+ struct abx500_usbgpio_platform_data *usb_gpio;
+ struct delayed_work work_usb_workaround;
+ struct kobject *serial_number_kobj;
};
static inline struct ab8500_usb *xceiv_to_ab(struct otg_transceiver *x)
@@ -102,10 +151,8 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
(AB8500_BIT_WD_CTRL_ENABLE
| AB8500_BIT_WD_CTRL_KICK));
- if (ab->rev > 0x10) /* v1.1 v2.0 */
+ if (ab->rev > 0x10) /* v2.0 v3.0 */
udelay(AB8500_WD_V11_DISABLE_DELAY_US);
- else /* v1.0 */
- msleep(AB8500_WD_V10_DISABLE_DELAY_MS);
abx500_set_register_interruptible(ab->dev,
AB8500_SYS_CTRL2_BLOCK,
@@ -113,46 +160,167 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
0);
}
-static void ab8500_usb_phy_ctrl(struct ab8500_usb *ab, bool sel_host,
+static void ab8500_usb_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+ struct delayed_work *work_usb_workaround = to_delayed_work(work);
+ struct ab8500_usb *ab = container_of(work_usb_workaround,
+ struct ab8500_usb, work_usb_workaround);
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > USB_LIMIT) {
+
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 125);
+ if (!usb_pm_qos_is_latency_0) {
+
+ pm_qos_add_request(&usb_pm_qos_latency,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+ usb_pm_qos_is_latency_0 = true;
+ }
+ } else {
+
+ if (usb_pm_qos_is_latency_0) {
+
+ pm_qos_remove_request(&usb_pm_qos_latency);
+ usb_pm_qos_is_latency_0 = false;
+ }
+
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 25);
+ }
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+}
+
+static void ab8500_usb_regulator_ctrl(struct ab8500_usb *ab, bool sel_host,
bool enable)
{
- u8 ctrl_reg;
- abx500_get_register_interruptible(ab->dev,
+ int ret = 0, volt = 0;
+
+ if (enable) {
+ regulator_enable(ab->v_ape);
+ if (ab->rev >= 0x30) {
+ ret = regulator_set_voltage(ab->v_ulpi,
+ 1300000, 1350000);
+ if (ret < 0)
+ dev_err(ab->dev, "Failed to set the Vintcore"
+ " to 1.3V, ret=%d\n", ret);
+ ret = regulator_set_optimum_mode(ab->v_ulpi,
+ 28000);
+ if (ret < 0)
+ dev_err(ab->dev, "Failed to set optimum mode"
+ " (ret=%d)\n", ret);
+
+ }
+ regulator_enable(ab->v_ulpi);
+ if (ab->rev >= 0x30) {
+ volt = regulator_get_voltage(ab->v_ulpi);
+ if ((volt != 1300000) && (volt != 1350000))
+ dev_err(ab->dev, "Vintcore is not"
+ " set to 1.3V"
+ " volt=%d\n", volt);
+ }
+ regulator_enable(ab->v_musb);
+
+ } else {
+ regulator_disable(ab->v_musb);
+ regulator_disable(ab->v_ulpi);
+ regulator_disable(ab->v_ape);
+ }
+}
+
+
+static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
+ AB8500_BIT_PHY_CTRL_DEVICE_EN;
+
+ ab->usb_gpio->enable();
+ clk_enable(ab->sysclk);
+
+ ab8500_usb_regulator_ctrl(ab, sel_host, true);
+
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 100);
+
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB,
AB8500_USB_PHY_CTRL_REG,
- &ctrl_reg);
- if (sel_host) {
- if (enable)
- ctrl_reg |= AB8500_BIT_PHY_CTRL_HOST_EN;
- else
- ctrl_reg &= ~AB8500_BIT_PHY_CTRL_HOST_EN;
- } else {
- if (enable)
- ctrl_reg |= AB8500_BIT_PHY_CTRL_DEVICE_EN;
- else
- ctrl_reg &= ~AB8500_BIT_PHY_CTRL_DEVICE_EN;
+ bit,
+ bit);
+
+}
+
+static void ab8500_usb_wd_linkstatus(struct ab8500_usb *ab,u8 bit)
+{
+ /* Wrokaround for v2.0 bug # 31952 */
+ if (ab->rev == 0x20) {
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ bit,
+ bit);
+ udelay(AB8500_V20_31952_DISABLE_DELAY_US);
}
+}
- abx500_set_register_interruptible(ab->dev,
+static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
+ AB8500_BIT_PHY_CTRL_DEVICE_EN;
+
+ ab8500_usb_wd_linkstatus(ab,bit);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB,
AB8500_USB_PHY_CTRL_REG,
- ctrl_reg);
+ bit,
+ 0);
+
+ /* Needed to disable the phy.*/
+ ab8500_usb_wd_workaround(ab);
+
+ clk_disable(ab->sysclk);
+
+ ab8500_usb_regulator_ctrl(ab, sel_host, false);
- /* Needed to enable the phy.*/
- if (enable)
- ab8500_usb_wd_workaround(ab);
+ ab->usb_gpio->disable();
+
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 50);
+
+ if (!sel_host) {
+
+ cancel_delayed_work_sync(&ab->work_usb_workaround);
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 25);
+ }
}
-#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_ctrl(ab, true, true)
-#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_ctrl(ab, true, false)
-#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_ctrl(ab, false, true)
-#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_ctrl(ab, false, false)
+#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_enable(ab, true)
+#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_disable(ab, true)
+#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_enable(ab, false)
+#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_disable(ab, false)
static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
{
u8 reg;
enum ab8500_usb_link_status lsts;
- void *v = NULL;
enum usb_xceiv_events event;
abx500_get_register_interruptible(ab->dev,
@@ -166,10 +334,12 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
case USB_LINK_NOT_CONFIGURED:
case USB_LINK_RESERVED:
case USB_LINK_NOT_VALID_LINK:
- /* TODO: Disable regulators. */
- ab8500_usb_host_phy_dis(ab);
- ab8500_usb_peri_phy_dis(ab);
- ab->otg.state = OTG_STATE_B_IDLE;
+ case USB_LINK_ACA_RID_B:
+ if (ab->mode == USB_HOST)
+ ab8500_usb_host_phy_dis(ab);
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+ ab->mode = USB_IDLE;
ab->otg.default_a = false;
ab->vbus_draw = 0;
event = USB_EVENT_NONE;
@@ -181,74 +351,90 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
case USB_LINK_HOST_CHG_NM:
case USB_LINK_HOST_CHG_HS:
case USB_LINK_HOST_CHG_HS_CHIRP:
- if (ab->otg.gadget) {
- /* TODO: Enable regulators. */
+ case USB_LINK_ACA_RID_C_NM:
+ case USB_LINK_ACA_RID_C_HS:
+ case USB_LINK_ACA_RID_C_HS_CHIRP:
+ if (ab->mode == USB_HOST) {
+ ab->mode = USB_PERIPHERAL;
+ ab8500_usb_host_phy_dis(ab);
+ ux500_restore_context();
+ ab8500_usb_peri_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_PERIPHERAL;
+ ux500_restore_context();
ab8500_usb_peri_phy_en(ab);
- v = ab->otg.gadget;
}
event = USB_EVENT_VBUS;
break;
case USB_LINK_HM_IDGND:
- if (ab->otg.host) {
- /* TODO: Enable regulators. */
+ case USB_LINK_ACA_RID_A:
+ if (ab->mode == USB_PERIPHERAL) {
+ ab->mode = USB_HOST;
+ ab8500_usb_peri_phy_dis(ab);
+ ux500_restore_context();
+ ab8500_usb_host_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_HOST;
+ ux500_restore_context();
ab8500_usb_host_phy_en(ab);
- v = ab->otg.host;
}
- ab->otg.state = OTG_STATE_A_IDLE;
ab->otg.default_a = true;
event = USB_EVENT_ID;
break;
- case USB_LINK_ACA_RID_A:
- case USB_LINK_ACA_RID_B:
- /* TODO */
- case USB_LINK_ACA_RID_C_NM:
- case USB_LINK_ACA_RID_C_HS:
- case USB_LINK_ACA_RID_C_HS_CHIRP:
case USB_LINK_DEDICATED_CHG:
/* TODO: vbus_draw */
+ ab->mode = USB_DEDICATED_CHG;
event = USB_EVENT_CHARGER;
break;
}
- atomic_notifier_call_chain(&ab->otg.notifier, event, v);
+ atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw);
return 0;
}
static void ab8500_usb_delayed_work(struct work_struct *work)
{
- struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
- dwork.work);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ab8500_usb *ab = container_of(dwork, struct ab8500_usb, dwork);
ab8500_usb_link_status_update(ab);
}
-static irqreturn_t ab8500_usb_v1x_common_irq(int irq, void *data)
-{
- struct ab8500_usb *ab = (struct ab8500_usb *) data;
-
- /* Wait for link status to become stable. */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ab8500_usb_v1x_vbus_fall_irq(int irq, void *data)
+static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
+ enum usb_xceiv_events event;
/* Link status will not be updated till phy is disabled. */
- ab8500_usb_peri_phy_dis(ab);
-
- /* Wait for link status to become stable. */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+ if (ab->mode == USB_HOST) {
+ event = USB_EVENT_NONE;
+ ab->otg.default_a = false;
+ ab->vbus_draw = 0;
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ event, &ab->vbus_draw);
+ ab8500_usb_host_phy_dis(ab);
+ }
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+ else if (ab->mode == USB_DEDICATED_CHG && ab->rev == 0x20) {
+ ab8500_usb_wd_linkstatus(ab,AB8500_BIT_PHY_CTRL_DEVICE_EN);
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ 0);
+ }
+ ab->mode = USB_IDLE;
return IRQ_HANDLED;
}
-static irqreturn_t ab8500_usb_v20_irq(int irq, void *data)
+static irqreturn_t ab8500_usb_v20_link_status_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
@@ -267,6 +453,19 @@ static void ab8500_usb_phy_disable_work(struct work_struct *work)
if (!ab->otg.gadget)
ab8500_usb_peri_phy_dis(ab);
+
+}
+
+static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA)
+{
+ /* AB V2 has eye diagram issues when drawing more
+ * than 100mA from VBUS.So setting charging current
+ * to 100mA in case of standard host
+ */
+ if ((ab->rev < 0x30) && (mA > 100))
+ mA = 100;
+
+ return mA;
}
static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
@@ -278,18 +477,15 @@ static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
ab = xceiv_to_ab(otg);
+ mA = ab8500_eyediagram_workaroud(ab, mA);
+
ab->vbus_draw = mA;
- if (mA)
- atomic_notifier_call_chain(&ab->otg.notifier,
- USB_EVENT_ENUMERATED, ab->otg.gadget);
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ USB_EVENT_VBUS, &ab->vbus_draw);
return 0;
}
-/* TODO: Implement some way for charging or other drivers to read
- * ab->vbus_draw.
- */
-
static int ab8500_usb_set_suspend(struct otg_transceiver *x, int suspend)
{
/* TODO */
@@ -306,25 +502,13 @@ static int ab8500_usb_set_peripheral(struct otg_transceiver *otg,
ab = xceiv_to_ab(otg);
+ ab->otg.gadget = gadget;
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
-
- if (!gadget) {
- /* TODO: Disable regulators. */
- ab->otg.gadget = NULL;
+ if (!gadget)
schedule_work(&ab->phy_dis_work);
- } else {
- ab->otg.gadget = gadget;
- ab->otg.state = OTG_STATE_B_IDLE;
-
- /* Phy will not be enabled if cable is already
- * plugged-in. Schedule to enable phy.
- * Use same delay to avoid any race condition.
- */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
- }
return 0;
}
@@ -339,148 +523,246 @@ static int ab8500_usb_set_host(struct otg_transceiver *otg,
ab = xceiv_to_ab(otg);
+ ab->otg.host = host;
+
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
-
- if (!host) {
- /* TODO: Disable regulators. */
- ab->otg.host = NULL;
+ if (!host)
schedule_work(&ab->phy_dis_work);
- } else {
- ab->otg.host = host;
- /* Phy will not be enabled if cable is already
- * plugged-in. Schedule to enable phy.
- * Use same delay to avoid any race condition.
- */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+
+ return 0;
+}
+/**
+ * ab8500_usb_boot_detect : detect the USB cable during boot time.
+ * @device: value for device.
+ *
+ * This function is used to detect the USB cable during boot time.
+ */
+static int ab8500_usb_boot_detect(struct ab8500_usb *ab)
+{
+ /* Disabling PHY before selective enable or disable */
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN);
+
+ udelay(100);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ 0);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_HOST_EN,
+ AB8500_BIT_PHY_CTRL_HOST_EN);
+
+ udelay(100);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_HOST_EN,
+ 0);
+
+ ab8500_usb_link_status_update(ab);
+
+ return 0;
+}
+
+static void ab8500_usb_regulator_put(struct ab8500_usb *ab)
+{
+
+ if (ab->v_ape)
+ regulator_put(ab->v_ape);
+
+ if (ab->v_ulpi)
+ regulator_put(ab->v_ulpi);
+
+ if (ab->v_musb)
+ regulator_put(ab->v_musb);
+}
+
+static int ab8500_usb_regulator_get(struct ab8500_usb *ab)
+{
+ int err;
+
+ ab->v_ape = regulator_get(ab->dev, "v-ape");
+ if (IS_ERR(ab->v_ape)) {
+ dev_err(ab->dev, "Could not get v-ape supply\n");
+ err = PTR_ERR(ab->v_ape);
+ goto reg_error;
+ }
+
+ ab->v_ulpi = regulator_get(ab->dev, "vddulpivio18");
+ if (IS_ERR(ab->v_ulpi)) {
+ dev_err(ab->dev, "Could not get vddulpivio18 supply\n");
+ err = PTR_ERR(ab->v_ulpi);
+ goto reg_error;
+ }
+
+ ab->v_musb = regulator_get(ab->dev, "musb_1v8");
+ if (IS_ERR(ab->v_musb)) {
+ dev_err(ab->dev, "Could not get musb_1v8 supply\n");
+ err = PTR_ERR(ab->v_musb);
+ goto reg_error;
}
return 0;
+
+reg_error:
+ ab8500_usb_regulator_put(ab);
+ return err;
}
static void ab8500_usb_irq_free(struct ab8500_usb *ab)
{
- if (ab->rev < 0x20) {
+ if (ab->irq_num_id_rise)
free_irq(ab->irq_num_id_rise, ab);
+
+ if (ab->irq_num_id_fall)
free_irq(ab->irq_num_id_fall, ab);
+
+ if (ab->irq_num_vbus_rise)
free_irq(ab->irq_num_vbus_rise, ab);
+
+ if (ab->irq_num_vbus_fall)
free_irq(ab->irq_num_vbus_fall, ab);
- } else {
+
+ if (ab->irq_num_link_status)
free_irq(ab->irq_num_link_status, ab);
- }
}
-static int ab8500_usb_v1x_res_setup(struct platform_device *pdev,
+static int ab8500_usb_irq_setup(struct platform_device *pdev,
struct ab8500_usb *ab)
{
int err;
+ int irq;
+
+ if (ab->rev > 0x10) { /* 0x20 0x30 */
+ irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
+ if (irq < 0) {
+ err = irq;
+ dev_err(&pdev->dev, "Link status irq not found\n");
+ goto irq_fail;
+ }
- ab->irq_num_id_rise = platform_get_irq_byname(pdev, "ID_WAKEUP_R");
- if (ab->irq_num_id_rise < 0) {
- dev_err(&pdev->dev, "ID rise irq not found\n");
- return ab->irq_num_id_rise;
- }
- err = request_threaded_irq(ab->irq_num_id_rise, NULL,
- ab8500_usb_v1x_common_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-id-rise", ab);
- if (err < 0) {
- dev_err(ab->dev, "request_irq failed for ID rise irq\n");
- goto fail0;
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_v20_link_status_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-link-status", ab);
+ if (err < 0) {
+ dev_err(ab->dev,
+ "request_irq failed for link status irq\n");
+ return err;
+ }
+ ab->irq_num_link_status = irq;
}
- ab->irq_num_id_fall = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
- if (ab->irq_num_id_fall < 0) {
+ irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
+ if (irq < 0) {
+ err = irq;
dev_err(&pdev->dev, "ID fall irq not found\n");
return ab->irq_num_id_fall;
}
- err = request_threaded_irq(ab->irq_num_id_fall, NULL,
- ab8500_usb_v1x_common_irq,
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED,
"usb-id-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for ID fall irq\n");
- goto fail1;
- }
-
- ab->irq_num_vbus_rise = platform_get_irq_byname(pdev, "VBUS_DET_R");
- if (ab->irq_num_vbus_rise < 0) {
- dev_err(&pdev->dev, "VBUS rise irq not found\n");
- return ab->irq_num_vbus_rise;
- }
- err = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
- ab8500_usb_v1x_common_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-vbus-rise", ab);
- if (err < 0) {
- dev_err(ab->dev, "request_irq failed for Vbus rise irq\n");
- goto fail2;
+ goto irq_fail;
}
+ ab->irq_num_id_fall = irq;
- ab->irq_num_vbus_fall = platform_get_irq_byname(pdev, "VBUS_DET_F");
- if (ab->irq_num_vbus_fall < 0) {
+ irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
+ if (irq < 0) {
+ err = irq;
dev_err(&pdev->dev, "VBUS fall irq not found\n");
- return ab->irq_num_vbus_fall;
+ goto irq_fail;
}
- err = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
- ab8500_usb_v1x_vbus_fall_irq,
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED,
"usb-vbus-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
- goto fail3;
+ goto irq_fail;
}
+ ab->irq_num_vbus_fall = irq;
return 0;
-fail3:
- free_irq(ab->irq_num_vbus_rise, ab);
-fail2:
- free_irq(ab->irq_num_id_fall, ab);
-fail1:
- free_irq(ab->irq_num_id_rise, ab);
-fail0:
+
+irq_fail:
+ ab8500_usb_irq_free(ab);
return err;
}
-static int ab8500_usb_v2_res_setup(struct platform_device *pdev,
- struct ab8500_usb *ab)
+/* Sys interfaces */
+static ssize_t usb_serial_number
+ (struct kobject *kobj, struct attribute *attr, char *buf)
{
- int err;
+ u32 bufer[5];
+ void __iomem *backup_ram = NULL;
- ab->irq_num_link_status = platform_get_irq_byname(pdev,
- "USB_LINK_STATUS");
- if (ab->irq_num_link_status < 0) {
- dev_err(&pdev->dev, "Link status irq not found\n");
- return ab->irq_num_link_status;
- }
+ backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14);
- err = request_threaded_irq(ab->irq_num_link_status, NULL,
- ab8500_usb_v20_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-link-status", ab);
- if (err < 0) {
- dev_err(ab->dev,
- "request_irq failed for link status irq\n");
- return err;
- }
+ if (backup_ram) {
+ bufer[0] = readl(backup_ram);
+ bufer[1] = readl(backup_ram + 4);
+ bufer[2] = readl(backup_ram + 8);
+ bufer[3] = readl(backup_ram + 0x0c);
+ bufer[4] = readl(backup_ram + 0x10);
- return 0;
+ snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1,
+ "%.8X%.8X%.8X%.8X%.8X",
+ bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]);
+
+ iounmap(backup_ram);
+ } else
+ printk(KERN_ERR "$$ ioremap failed\n");
+
+ return strlen(buf);
}
+static struct attribute usb_serial_number_attribute = \
+ {.name = "serial_number", .mode = S_IRUGO};
+
+static struct attribute *serial_number[] = {
+ &usb_serial_number_attribute,
+ NULL
+};
+
+const struct sysfs_ops usb_sysfs_ops = {
+ .show = usb_serial_number,
+};
+
+static struct kobj_type ktype_serial_number = {
+ .sysfs_ops = &usb_sysfs_ops,
+ .default_attrs = serial_number,
+};
+
static int __devinit ab8500_usb_probe(struct platform_device *pdev)
{
struct ab8500_usb *ab;
+ struct ab8500_platform_data *ab8500_pdata =
+ dev_get_platdata(pdev->dev.parent);
int err;
int rev;
+ int ret = -1;
rev = abx500_get_chip_id(&pdev->dev);
if (rev < 0) {
dev_err(&pdev->dev, "Chip id read failed\n");
return rev;
- } else if (rev < 0x10) {
- dev_err(&pdev->dev, "Unsupported AB8500 chip\n");
+ } else if (rev < 0x20) {
+ dev_err(&pdev->dev, "Unsupported AB8500 chip rev=%d\n", rev);
return -ENODEV;
}
@@ -492,11 +774,12 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
ab->rev = rev;
ab->otg.dev = ab->dev;
ab->otg.label = "ab8500";
- ab->otg.state = OTG_STATE_UNDEFINED;
+ ab->otg.state = OTG_STATE_B_IDLE;
ab->otg.set_host = ab8500_usb_set_host;
ab->otg.set_peripheral = ab8500_usb_set_peripheral;
ab->otg.set_suspend = ab8500_usb_set_suspend;
ab->otg.set_power = ab8500_usb_set_power;
+ ab->usb_gpio = ab8500_pdata->usb;
platform_set_drvdata(pdev, ab);
@@ -510,27 +793,114 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
/* all: Disable phy when called from set_host and set_peripheral */
INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
- if (ab->rev < 0x20) {
- err = ab8500_usb_v1x_res_setup(pdev, ab);
- ab->link_status_wait = AB8500_V1x_LINK_STAT_WAIT;
- } else {
- err = ab8500_usb_v2_res_setup(pdev, ab);
+ INIT_DELAYED_WORK_DEFERRABLE(&ab->work_usb_workaround,
+ ab8500_usb_load);
+ err = ab8500_usb_regulator_get(ab);
+ if (err)
+ goto fail0;
+
+ ab->sysclk = clk_get(ab->dev, "sysclk");
+ if (IS_ERR(ab->sysclk)) {
+ err = PTR_ERR(ab->sysclk);
+ goto fail1;
}
+ err = ab8500_usb_irq_setup(pdev, ab);
if (err < 0)
- goto fail0;
+ goto fail2;
err = otg_set_transceiver(&ab->otg);
if (err) {
dev_err(&pdev->dev, "Can't register transceiver\n");
- goto fail1;
+ goto fail3;
}
- dev_info(&pdev->dev, "AB8500 usb driver initialized\n");
+ /* Write Phy tuning values */
+ if (ab->rev >= 0x30) {
+ /* Enable the PBT/Bank 0x12 access */
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEVELOPMENT,
+ AB8500_BANK12_ACCESS,
+ 0x01);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to enable bank12"
+ " access ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE1,
+ 0xC8);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE1"
+ " register ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE2,
+ 0x00);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE2"
+ " register ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE3,
+ 0x78);
+
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE3"
+ " regester ret=%d\n", ret);
+
+ /* Switch to normal mode/disable Bank 0x12 access */
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEVELOPMENT,
+ AB8500_BANK12_ACCESS,
+ 0x00);
+
+ if (ret < 0)
+ printk(KERN_ERR "Failed to switch bank12"
+ " access ret=%d\n", ret);
+ }
+ /* Needed to enable ID detection. */
+ ab8500_usb_wd_workaround(ab);
+
+ ab->serial_number_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+
+ if (ab->serial_number_kobj == NULL)
+ ret = -ENOMEM;
+ ab->serial_number_kobj->ktype = &ktype_serial_number;
+ kobject_init(ab->serial_number_kobj, ab->serial_number_kobj->ktype);
+
+ ret = kobject_set_name(ab->serial_number_kobj, "usb_serial_number");
+ if (ret)
+ kfree(ab->serial_number_kobj);
+
+ ret = kobject_add(ab->serial_number_kobj, NULL, "usb_serial_number");
+ if (ret)
+ kfree(ab->serial_number_kobj);
+
+
+ err = ab->usb_gpio->get(ab->dev);
+ if (err < 0)
+ goto fail3;
+
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 50);
+ dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", ab->rev);
+
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "usb", 25);
+
+ err = ab8500_usb_boot_detect(ab);
+ if (err < 0)
+ goto fail3;
return 0;
-fail1:
+fail3:
ab8500_usb_irq_free(ab);
+fail2:
+ clk_put(ab->sysclk);
+fail1:
+ ab8500_usb_regulator_put(ab);
fail0:
kfree(ab);
return err;
@@ -548,8 +918,16 @@ static int __devexit ab8500_usb_remove(struct platform_device *pdev)
otg_set_transceiver(NULL);
- ab8500_usb_host_phy_dis(ab);
- ab8500_usb_peri_phy_dis(ab);
+ if (ab->mode == USB_HOST)
+ ab8500_usb_host_phy_dis(ab);
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+
+ clk_put(ab->sysclk);
+
+ ab8500_usb_regulator_put(ab);
+
+ ab->usb_gpio->put();
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6ca0c407c14..a9a2c30b6aa 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/stub/Kconfig"
+source "drivers/gpu/mali/Kconfig"
+
config VGASTATE
tristate
default n
@@ -286,6 +288,8 @@ config FB_CIRRUS
Say N unless you have such a graphics board or plan to get one
before you next recompile the kernel.
+source "drivers/video/mcde/Kconfig"
+
config FB_PM2
tristate "Permedia2 support"
depends on FB && ((AMIGA && BROKEN) || PCI)
@@ -2413,6 +2417,8 @@ source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/backlight/Kconfig"
+source "drivers/video/av8100/Kconfig"
+source "drivers/video/b2r2/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 142606814d9..950183044f7 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -136,6 +136,9 @@ obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
obj-y += omap2/
+obj-$(CONFIG_FB_MCDE) += mcde/
+obj-$(CONFIG_AV8100) += av8100/
+obj-y += b2r2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
diff --git a/drivers/video/av8100/Kconfig b/drivers/video/av8100/Kconfig
new file mode 100644
index 00000000000..40b9943aaa9
--- /dev/null
+++ b/drivers/video/av8100/Kconfig
@@ -0,0 +1,48 @@
+config AV8100
+ tristate "AV8100 driver support(HDMI/CVBS)"
+ default n
+ help
+ Please enable this feature if hdmi/tvout driver support is required.
+
+config HDMI_AV8100_DEBUG
+ bool "HDMI and AV8100 debug messages"
+ default n
+ depends on AV8100
+ ---help---
+ Say Y here if you want the HDMI and AV8100 driver to
+ output debug messages.
+
+choice
+ prompt "AV8100 HW trig method"
+ default AV8100_HWTRIG_DSI_TE
+
+config AV8100_HWTRIG_INT
+ bool "AV8100 HW trig on INT"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ from AV8100 INT to MCDE sync0.
+
+config AV8100_HWTRIG_I2SDAT3
+ bool "AV8100 HW trig on I2SDAT3"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ from AV8100 I2SDAT3 to MCDE sync1.
+
+config AV8100_HWTRIG_DSI_TE
+ bool "AV8100 HW trig on DSI"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ using DSI TE polling between AV8100 and MCDE.
+
+config AV8100_HWTRIG_NONE
+ bool "AV8100 SW trig"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use SW triggering
+ between AV8100 and MCDE.
+
+endchoice
+
diff --git a/drivers/video/av8100/Makefile b/drivers/video/av8100/Makefile
new file mode 100644
index 00000000000..2d3028b18ca
--- /dev/null
+++ b/drivers/video/av8100/Makefile
@@ -0,0 +1,10 @@
+# Make file for compiling and loadable module HDMI
+
+obj-$(CONFIG_AV8100) += av8100.o hdmi.o
+
+ifdef CONFIG_HDMI_AV8100_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+clean-files := av8100.o hdmi.o built-in.o modules.order
+
diff --git a/drivers/video/av8100/av8100.c b/drivers/video/av8100/av8100.c
new file mode 100644
index 00000000000..d5d159079c3
--- /dev/null
+++ b/drivers/video/av8100/av8100.c
@@ -0,0 +1,4166 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * AV8100 driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "av8100_regs.h"
+#include <video/av8100.h>
+#include <video/hdmi.h>
+#include <linux/firmware.h>
+
+#define AV8100_FW_FILENAME "av8100.fw"
+#define CUT_STR_0 "2.1"
+#define CUT_STR_1 "2.2"
+#define CUT_STR_3 "2.3"
+#define CUT_STR_30 "3.0"
+#define CUT_STR_UNKNOWN ""
+#define AV8100_DEVNR_DEFAULT 0
+
+/* Interrupts */
+#define AV8100_INT_EVENT 0x1
+#define AV8100_PLUGSTARTUP_EVENT 0x4
+
+#define AV8100_PLUGSTARTUP_TIME 100
+
+/* Standby search time */
+#define AV8100_ON_TIME 1 /* 9 ms step */
+#define AV8100_DENC_OFF_TIME 3 /* 275 ms step if > V1. Not used if V1 */
+#define AV8100_HDMI_OFF_TIME 2 /* 140 ms step if V2. 80 ms step if V1 */
+
+/* Command offsets */
+#define AV8100_COMMAND_OFFSET 0x10
+#define AV8100_CUTVER_OFFSET 0x11
+#define AV8100_COMMAND_MAX_LENGTH 0x81
+#define AV8100_CMD_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_2ND_RET_BYTE_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_CEC_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 4)
+#define AV8100_HDCP_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_EDID_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_FUSE_CRC_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_FUSE_PRGD_OFFSET (AV8100_COMMAND_OFFSET + 3)
+#define AV8100_CRC32_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_CEC_ADDR_OFFSET (AV8100_COMMAND_OFFSET + 3)
+
+/* Tearing effect line numbers */
+#define AV8100_TE_LINE_NB_14 14
+#define AV8100_TE_LINE_NB_17 17
+#define AV8100_TE_LINE_NB_18 18
+#define AV8100_TE_LINE_NB_21 21
+#define AV8100_TE_LINE_NB_22 22
+#define AV8100_TE_LINE_NB_24 24
+#define AV8100_TE_LINE_NB_25 25
+#define AV8100_TE_LINE_NB_26 26
+#define AV8100_TE_LINE_NB_29 29
+#define AV8100_TE_LINE_NB_30 30
+#define AV8100_TE_LINE_NB_32 32
+#define AV8100_TE_LINE_NB_38 38
+#define AV8100_TE_LINE_NB_40 40
+#define AV8100_UI_X4_DEFAULT 6
+
+#define HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT 2
+#define HDMI_CEC_MESSAGE_WRITE_BUFFER_SIZE 16
+#define HDMI_HDCP_SEND_KEY_SIZE 7
+#define HDMI_INFOFRAME_DATA_SIZE 28
+#define HDMI_FUSE_AES_KEY_SIZE 16
+#define HDMI_FUSE_AES_KEY_RET_SIZE 2
+#define HDMI_LOADAES_END_BLK_NR 145
+#define HDMI_CRC32_SIZE 4
+#define HDMI_HDCP_MGMT_BKSV_SIZE 5
+#define HDMI_HDCP_MGMT_SHA_SIZE 20
+#define HDMI_HDCP_MGMT_MAX_DEVICES_SIZE 20
+#define HDMI_HDCP_MGMT_DEVICE_MASK 0x7F
+#define HDMI_EDIDREAD_SIZE 0x7F
+
+#define HPDS_INVALID 0xF
+#define CPDS_INVALID 0xF
+#define CECRX_INVALID 0xF
+
+#define REG_16_8_LSB(p) ((u8)(p & 0xFF))
+#define REG_16_8_MSB(p) ((u8)((p & 0xFF00)>>8))
+#define REG_32_8_MSB(p) ((u8)((p & 0xFF000000)>>24))
+#define REG_32_8_MMSB(p) ((u8)((p & 0x00FF0000)>>16))
+#define REG_32_8_MLSB(p) ((u8)((p & 0x0000FF00)>>8))
+#define REG_32_8_LSB(p) ((u8)(p & 0x000000FF))
+#define REG_10_8_MSB(p) ((u8)((p & 0x300)>>8))
+#define REG_12_8_MSB(p) ((u8)((p & 0xf00)>>8))
+
+#define AV8100_WAITTIME_1MS 1
+#define AV8100_WAITTIME_5MS 5
+#define AV8100_WAITTIME_10MS 10
+#define AV8100_WAITTIME_50MS 50
+#define AV8100_WATTIME_100US 100
+
+static DEFINE_MUTEX(av8100_hw_mutex);
+#define LOCK_AV8100_HW mutex_lock(&av8100_hw_mutex)
+#define UNLOCK_AV8100_HW mutex_unlock(&av8100_hw_mutex)
+static DEFINE_MUTEX(av8100_fwdl_mutex);
+#define LOCK_AV8100_FWDL mutex_lock(&av8100_fwdl_mutex)
+#define UNLOCK_AV8100_FWDL mutex_unlock(&av8100_fwdl_mutex)
+
+struct color_conversion_cmd {
+ unsigned short c0;
+ unsigned short c1;
+ unsigned short c2;
+ unsigned short c3;
+ unsigned short c4;
+ unsigned short c5;
+ unsigned short c6;
+ unsigned short c7;
+ unsigned short c8;
+ unsigned short aoffset;
+ unsigned short boffset;
+ unsigned short coffset;
+ unsigned char lmax;
+ unsigned char lmin;
+ unsigned char cmax;
+ unsigned char cmin;
+};
+
+struct av8100_config {
+ struct i2c_client *client;
+ struct i2c_device_id *id;
+ struct av8100_video_input_format_cmd hdmi_video_input_cmd;
+ struct av8100_audio_input_format_cmd hdmi_audio_input_cmd;
+ struct av8100_video_output_format_cmd hdmi_video_output_cmd;
+ struct av8100_video_scaling_format_cmd hdmi_video_scaling_cmd;
+ enum av8100_color_transform color_transform;
+ struct av8100_cec_message_write_format_cmd
+ hdmi_cec_message_write_cmd;
+ struct av8100_cec_message_read_back_format_cmd
+ hdmi_cec_message_read_back_cmd;
+ struct av8100_denc_format_cmd hdmi_denc_cmd;
+ struct av8100_hdmi_cmd hdmi_cmd;
+ struct av8100_hdcp_send_key_format_cmd hdmi_hdcp_send_key_cmd;
+ struct av8100_hdcp_management_format_cmd
+ hdmi_hdcp_management_format_cmd;
+ struct av8100_infoframes_format_cmd hdmi_infoframes_cmd;
+ struct av8100_edid_section_readback_format_cmd
+ hdmi_edid_section_readback_cmd;
+ struct av8100_pattern_generator_format_cmd hdmi_pattern_generator_cmd;
+ struct av8100_fuse_aes_key_format_cmd hdmi_fuse_aes_key_cmd;
+};
+
+enum av8100_plug_state {
+ AV8100_UNPLUGGED,
+ AV8100_PLUGGED_STARTUP,
+ AV8100_PLUGGED
+};
+
+struct av8100_params {
+ int denc_off_time;/* 5 volt time */
+ int hdmi_off_time;/* 5 volt time */
+ int on_time;/* 5 volt time */
+ u8 hpdm;/*stby_int_mask*/
+ u8 cpdm;/*stby_int_mask*/
+ u8 cecm;/*gen_int_mask*/
+ u8 hdcpm;/*gen_int_mask*/
+ u8 uovbm;/*gen_int_mask*/
+ void (*hdmi_ev_cb)(enum av8100_hdmi_event);
+ enum av8100_plug_state plug_state;
+ struct clk *inputclk;
+ bool inputclk_requested;
+ bool opp_requested;
+ struct regulator *regulator_pwr;
+ bool regulator_requested;
+ bool pre_suspend_power;
+ bool ints_enabled;
+ bool irq_requested;
+};
+
+/**
+ * struct av8100_cea - CEA(consumer electronic access) standard structure
+ * @cea_id:
+ * @cea_nb:
+ * @vtotale:
+ **/
+
+struct av8100_cea {
+ char cea_id[40];
+ int cea_nb;
+ int vtotale;
+ int vactive;
+ int vsbp;
+ int vslen;
+ int vsfp;
+ char vpol[5];
+ int htotale;
+ int hactive;
+ int hbp;
+ int hslen;
+ int hfp;
+ int frequence;
+ char hpol[5];
+ int reg_line_duration;
+ int blkoel_duration;
+ int uix4;
+ int pll_mult;
+ int pll_div;
+};
+
+enum av8100_command_size {
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE = 0x17,
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE = 0x8,
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE = 0x1E,
+ AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE = 0x11,
+ AV8100_COMMAND_COLORSPACECONVERSION_SIZE = 0x1D,
+ AV8100_COMMAND_CEC_MESSAGE_WRITE_SIZE = 0x12,
+ AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE = 0x1,
+ AV8100_COMMAND_DENC_SIZE = 0x6,
+ AV8100_COMMAND_HDMI_SIZE = 0x4,
+ AV8100_COMMAND_HDCP_SENDKEY_SIZE = 0xA,
+ AV8100_COMMAND_HDCP_MANAGEMENT_SIZE = 0x3,
+ AV8100_COMMAND_INFOFRAMES_SIZE = 0x21,
+ AV8100_COMMAND_EDID_SECTION_READBACK_SIZE = 0x3,
+ AV8100_COMMAND_PATTERNGENERATOR_SIZE = 0x4,
+ AV8100_COMMAND_FUSE_AES_KEY_SIZE = 0x12,
+ AV8100_COMMAND_FUSE_AES_CHK_SIZE = 0x2,
+};
+
+struct av8100_device {
+ struct list_head list;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct av8100_config config;
+ struct av8100_status status;
+ struct timer_list timer;
+ wait_queue_head_t event;
+ int flag;
+ struct av8100_params params;
+ u8 chip_version;
+};
+
+static const unsigned int waittime_retry[10] = {
+ 1, 2, 4, 6, 8, 10, 10, 10, 10, 10};
+
+static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on);
+static void clr_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status);
+static void set_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status);
+static void cec_rx(struct av8100_device *adev);
+static void cec_tx(struct av8100_device *adev);
+static void cec_txerr(struct av8100_device *adev);
+static void hdcp_changed(struct av8100_device *adev);
+static const struct color_conversion_cmd *get_color_transform_cmd(
+ struct av8100_device *adev,
+ enum av8100_color_transform transform);
+static int av8100_open(struct inode *inode, struct file *filp);
+static int av8100_release(struct inode *inode, struct file *filp);
+static long av8100_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int __devinit av8100_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id);
+static int __devexit av8100_remove(struct i2c_client *i2c_client);
+
+static const struct file_operations av8100_fops = {
+ .owner = THIS_MODULE,
+ .open = av8100_open,
+ .release = av8100_release,
+ .unlocked_ioctl = av8100_ioctl
+};
+
+/* List of devices */
+static LIST_HEAD(av8100_device_list);
+
+static const struct av8100_cea av8100_all_cea[29] = {
+/* cea id
+ * cea_nr vtot vact vsbpp vslen
+ * vsfp vpol htot hact hbp hslen hfp freq
+ * hpol rld bd uix4 pm pd */
+{ "0 CUSTOM ",
+ 0, 0, 0, 0, 0,
+ 0, "-", 800, 640, 16, 96, 10, 25200000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be defined*/
+{ "1 CEA 1 VESA 4 640x480p @ 60 Hz ",
+ 1, 525, 480, 33, 2,
+ 10, "-", 800, 640, 49, 290, 146, 25200000,
+ "-", 2438, 1270, 6, 32, 1},/*RGB888*/
+{ "2 CEA 2 - 3 720x480p @ 60 Hz 4:3 ",
+ 2, 525, 480, 30, 6,
+ 9, "-", 858, 720, 34, 130, 128, 27027000,
+ "-", 1828, 0x3C0, 8, 24, 1},/*RGB565*/
+{ "3 CEA 4 1280x720p @ 60 Hz ",
+ 4, 750, 720, 20, 5,
+ 5, "+", 1650, 1280, 114, 39, 228, 74250000,
+ "+", 1706, 164, 6, 32, 1},/*RGB565*/
+{ "4 CEA 5 1920x1080i @ 60 Hz ",
+ 5, 1125, 540, 20, 5,
+ 0, "+", 2200, 1920, 88, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "5 CEA 6-7 480i (NTSC) ",
+ 6, 525, 240, 44, 5,
+ 0, "-", 858, 720, 12, 64, 10, 13513513,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "6 CEA 14-15 480p @ 60 Hz ",
+ 14, 525, 480, 44, 5,
+ 0, "-", 858, 720, 12, 64, 10, 27027000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "7 CEA 16 1920x1080p @ 60 Hz ",
+ 16, 1125, 1080, 36, 5,
+ 0, "+", 1980, 1280, 440, 40, 10, 133650000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "8 CEA 17-18 720x576p @ 50 Hz ",
+ 17, 625, 576, 44, 5,
+ 0, "-", 864, 720, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "9 CEA 19 1280x720p @ 50 Hz ",
+ 19, 750, 720, 25, 5,
+ 0, "+", 1980, 1280, 440, 40, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "10 CEA 20 1920 x 1080i @ 50 Hz ",
+ 20, 1125, 540, 20, 5,
+ 0, "+", 2640, 1920, 528, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "11 CEA 21-22 576i (PAL) ",
+ 21, 625, 288, 44, 5,
+ 0, "-", 1728, 1440, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "12 CEA 29/30 576p ",
+ 29, 625, 576, 44, 5,
+ 0, "-", 864, 720, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "13 CEA 31 1080p 50Hz ",
+ 31, 1125, 1080, 44, 5,
+ 0, "-", 2640, 1920, 12, 64, 10, 148500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "14 CEA 32 1920x1080p @ 24 Hz ",
+ 32, 1125, 1080, 36, 5,
+ 4, "+", 2750, 1920, 660, 44, 153, 74250000,
+ "+", 2844, 0x530, 6, 32, 1},/*RGB565*/
+{ "15 CEA 33 1920x1080p @ 25 Hz ",
+ 33, 1125, 1080, 36, 5,
+ 4, "+", 2640, 1920, 528, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "16 CEA 34 1920x1080p @ 30Hz ",
+ 34, 1125, 1080, 36, 5,
+ 4, "+", 2200, 1920, 91, 44, 153, 74250000,
+ "+", 2275, 0xAB, 6, 32, 1},/*RGB565*/
+{ "17 CEA 60 1280x720p @ 24 Hz ",
+ 60, 750, 720, 20, 5,
+ 5, "+", 3300, 1280, 284, 50, 2276, 59400000,
+ "+", 4266, 0xAD0, 5, 32, 1},/*RGB565*/
+{ "18 CEA 61 1280x720p @ 25 Hz ",
+ 61, 750, 720, 20, 5,
+ 5, "+", 3960, 1280, 228, 39, 2503, 74250000,
+ "+", 4096, 0x500, 5, 32, 1},/*RGB565*/
+{ "19 CEA 62 1280x720p @ 30 Hz ",
+ 62, 750, 720, 20, 5,
+ 5, "+", 3300, 1280, 228, 39, 1820, 74250000,
+ "+", 3413, 0x770, 5, 32, 1},/*RGB565*/
+{ "20 VESA 9 800x600 @ 60 Hz ",
+ 109, 628, 600, 28, 4,
+ 0, "+", 1056, 800, 40, 128, 10, 40000000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "21 VESA 14 848x480 @ 60 Hz ",
+ 114, 517, 480, 20, 5,
+ 0, "+", 1088, 848, 24, 80, 10, 33750000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "22 VESA 16 1024x768 @ 60 Hz ",
+ 116, 806, 768, 38, 6,
+ 0, "-", 1344, 1024, 24, 135, 10, 65000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "23 VESA 22 1280x768 @ 60 Hz ",
+ 122, 790, 768, 34, 4,
+ 0, "+", 1440, 1280, 48, 160, 10, 68250000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "24 VESA 23 1280x768 @ 60 Hz ",
+ 123, 798, 768, 30, 7,
+ 0, "+", 1664, 1280, 64, 128, 10, 79500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "25 VESA 27 1280x800 @ 60 Hz ",
+ 127, 823, 800, 23, 6,
+ 0, "+", 1440, 1280, 48, 32, 10, 71000000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "26 VESA 28 1280x800 @ 60 Hz ",
+ 128, 831, 800, 31, 6,
+ 0, "+", 1680, 1280, 72, 128, 10, 83500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "27 VESA 39 1360x768 @ 60 Hz ",
+ 139, 795, 768, 22, 5,
+ 0, "-", 1792, 1360, 48, 32, 10, 85500000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "28 VESA 81 1366x768 @ 60 Hz ",
+ 181, 798, 768, 30, 5,
+ 0, "+", 1792, 1366, 72, 136, 10, 85750000,
+ "-", 0, 0, 0, 0, 0} /*Settings to be define*/
+};
+
+const struct color_conversion_cmd col_trans_identity = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_identity_clamp_yuv = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xeb,
+ .lmin = 0x10,
+ .cmax = 0xf0,
+ .cmin = 0x10,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_rgb_v1 = {
+ .c0 = 0x0087, .c1 = 0x0000, .c2 = 0x00ba,
+ .c3 = 0x0087, .c4 = 0xffd3, .c5 = 0xffa1,
+ .c6 = 0x0087, .c7 = 0x00eb, .c8 = 0x0000,
+ .aoffset = 0xffab, .boffset = 0x004e, .coffset = 0xff92,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_rgb_v2 = {
+ .c0 = 0x0198, .c1 = 0x012a, .c2 = 0x0000,
+ .c3 = 0xff30, .c4 = 0x012a, .c5 = 0xff9c,
+ .c6 = 0x0000, .c7 = 0x012a, .c8 = 0x0204,
+ .aoffset = 0xff21, .boffset = 0x0088, .coffset = 0xfeeb,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_denc = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xeb,
+ .lmin = 0x10,
+ .cmax = 0xf0,
+ .cmin = 0x10,
+};
+
+const struct color_conversion_cmd col_trans_rgb_to_denc = {
+ .c0 = 0x0070, .c1 = 0xffb6, .c2 = 0xffda,
+ .c3 = 0x0042, .c4 = 0x0081, .c5 = 0x0019,
+ .c6 = 0xffee, .c7 = 0xffa2, .c8 = 0x0070,
+ .aoffset = 0x007f, .boffset = 0x0010, .coffset = 0x007f,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+static const struct i2c_device_id av8100_id[] = {
+ { "av8100", 0 },
+ { }
+};
+
+static struct av8100_device *devnr_to_adev(int devnr)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (cnt == devnr)
+ return av8100_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static struct av8100_device *dev_to_adev(struct device *dev)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (av8100_dev->dev == dev)
+ return av8100_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static int adev_to_devnr(struct av8100_device *adev)
+{
+ /* Get devnr from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (av8100_dev == adev)
+ return cnt;
+ cnt++;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_PM
+static int av8100_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ adev->params.pre_suspend_power =
+ (av8100_status_get().av8100_state > AV8100_OPMODE_SHUTDOWN);
+
+ if (adev->params.pre_suspend_power) {
+ ret = av8100_powerdown();
+ if (ret)
+ dev_err(dev, "av8100_powerdown failed\n");
+ }
+
+ return ret;
+}
+
+static int av8100_resume(struct device *dev)
+{
+ int ret;
+ u8 hpds = 0;
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (adev->params.pre_suspend_power) {
+ ret = av8100_powerup();
+ if (ret) {
+ dev_err(dev, "av8100_powerup failed\n");
+ return ret;
+ }
+
+ /* Check HDMI plug status */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, NULL, NULL)) {
+ dev_warn(dev, "av8100_reg_stby_r failed\n");
+ goto av8100_resume_end;
+ }
+
+ if (hpds)
+ set_plug_status(adev, AV8100_HDMI_PLUGIN); /* Plugged*/
+ else
+ clr_plug_status(adev,
+ AV8100_HDMI_PLUGIN); /* Unplugged*/
+
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ av8100_enable_interrupt();
+ }
+
+av8100_resume_end:
+ return 0;
+}
+
+static const struct dev_pm_ops av8100_dev_pm_ops = {
+ .suspend = av8100_suspend,
+ .resume = av8100_resume,
+};
+#endif
+
+static struct i2c_driver av8100_driver = {
+ .probe = av8100_probe,
+ .remove = av8100_remove,
+ .driver = {
+ .name = "av8100",
+#ifdef CONFIG_PM
+ .pm = &av8100_dev_pm_ops,
+#endif
+ },
+ .id_table = av8100_id,
+};
+
+static void av8100_plugtimer_int(unsigned long value)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev((int)value);
+ adev->flag |= AV8100_PLUGSTARTUP_EVENT;
+ wake_up_interruptible(&adev->event);
+ del_timer(&adev->timer);
+}
+
+static int av8100_int_event_handle(struct av8100_device *adev)
+{
+ u8 hpdi = 0;
+ u8 cpdi = 0;
+ u8 uovbi = 0;
+ u8 hdcpi = 0;
+ u8 ceci = 0;
+ u8 hpds = 0;
+ u8 cpds = 0;
+ u8 hdcps = 0;
+ u8 onuvb = 0;
+ u8 cectxerr = 0;
+ u8 cecrx = 0;
+ u8 cectx = 0;
+
+ /* STANDBY_PENDING_INTERRUPT reg */
+ if (av8100_reg_stby_pend_int_r(&hpdi, &cpdi, NULL, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_stby_pend_int_r failed\n");
+ goto av8100_int_event_handle_1;
+ }
+
+ /* Plug event */
+ if (hpdi | cpdi) {
+ /* Clear pending interrupts */
+ (void)av8100_reg_stby_pend_int_w(1, 1, 1, 0);
+
+ /* STANDBY reg */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_stby_r failed\n");
+ goto av8100_int_event_handle_1;
+ }
+ }
+
+ if (cpdi & adev->params.cpdm) {
+ /* TVout plugin change */
+ if (cpds) {
+ dev_dbg(adev->dev, "cpds 1\n");
+ set_plug_status(adev, AV8100_CVBS_PLUGIN);
+ } else {
+ dev_dbg(adev->dev, "cpds 0\n");
+ clr_plug_status(adev, AV8100_CVBS_PLUGIN);
+ }
+ }
+
+ if (hpdi & adev->params.hpdm) {
+ /* HDMI plugin change */
+ if (hpds) {
+ /* Plugged */
+ /* Set 5V always on */
+ av8100_5V_w(adev->params.denc_off_time,
+ 0,
+ adev->params.on_time);
+ dev_dbg(adev->dev, "hpds 1\n");
+ set_plug_status(adev, AV8100_HDMI_PLUGIN);
+ } else {
+ /* Unplugged */
+ av8100_5V_w(adev->params.denc_off_time,
+ adev->params.hdmi_off_time,
+ adev->params.on_time);
+ dev_dbg(adev->dev, "hpds 0\n");
+ clr_plug_status(adev, AV8100_HDMI_PLUGIN);
+ }
+ }
+
+av8100_int_event_handle_1:
+ /* GENERAL_INTERRUPT reg */
+ if (av8100_reg_gen_int_r(NULL, NULL, NULL, &ceci,
+ &hdcpi, &uovbi, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_gen_int_r failed\n");
+ return -EINVAL;
+ }
+
+ /* CEC or HDCP event */
+ if (ceci | hdcpi | uovbi) {
+ /* Clear pending interrupts */
+ av8100_reg_gen_int_w(1, 1, 1, 1, 1, 1);
+
+ /* GENERAL_STATUS reg */
+ if (av8100_reg_gen_status_r(&cectxerr, &cecrx, &cectx, NULL,
+ &onuvb, &hdcps) != 0) {
+ dev_dbg(adev->dev, "av8100_reg_gen_status_r fail\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Underflow or overflow */
+ if (uovbi)
+ dev_dbg(adev->dev, "uovbi %d\n", onuvb);
+
+ /* CEC received */
+ if (ceci && cecrx) {
+ u8 val;
+
+ dev_dbg(adev->dev, "cecrx\n");
+
+ /* Clear cecrx in status reg*/
+ if (av8100_reg_r(AV8100_GENERAL_STATUS, &val) == 0) {
+ if (av8100_reg_w(AV8100_GENERAL_STATUS,
+ val & ~AV8100_GENERAL_STATUS_CECREC_MASK))
+ dev_info(adev->dev, "gen_stat write error\n");
+ } else {
+ dev_info(adev->dev, "gen_stat read error\n");
+ }
+
+ /* Report CEC event */
+ cec_rx(adev);
+ }
+
+ /* CEC tx error */
+ if (ceci && cectx && cectxerr) {
+ dev_dbg(adev->dev, "cectxerr\n");
+ /* Report CEC tx error event */
+ cec_txerr(adev);
+ } else if (ceci && cectx) {
+ dev_dbg(adev->dev, "cectx\n");
+ /* Report CEC tx event */
+ cec_tx(adev);
+ }
+
+ /* HDCP event */
+ if (hdcpi) {
+ dev_dbg(adev->dev, "hdcpch:%0x\n", hdcps);
+ /* Report HDCP status change event */
+ hdcp_changed(adev);
+ }
+
+ return 0;
+}
+
+static int av8100_plugstartup_event_handle(struct av8100_device *adev)
+{
+ u8 hpds = 0;
+ u8 cpds = 0;
+
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ case AV8100_PLUGGED:
+ default:
+ break;
+
+ case AV8100_PLUGGED_STARTUP:
+ /* Unmask interrupt */
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ if (av8100_reg_stby_int_mask_w(adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) {
+ dev_dbg(adev->dev,
+ "av8100_reg_stby_int_mask_w fail\n");
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ /* Get actual plug status */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL))
+ dev_dbg(adev->dev, "av8100_reg_stby_r fail\n");
+
+ /* Set plugstate */
+ if (hpds) {
+ adev->params.plug_state = AV8100_PLUGGED;
+ dev_dbg(adev->dev, "plug_state:2\n");
+ } else {
+ adev->params.plug_state = AV8100_UNPLUGGED;
+ dev_dbg(adev->dev, "plug_state:0\n");
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int av8100_thread(void *p)
+{
+ u8 flags;
+ struct av8100_device *adev = p;
+
+ while (1) {
+ wait_event_interruptible(adev->event, (adev->flag != 0));
+ flags = adev->flag;
+ adev->flag = 0;
+
+ if (adev->status.av8100_state < AV8100_OPMODE_STANDBY)
+ continue;
+
+ if (flags & AV8100_INT_EVENT)
+ (void)av8100_int_event_handle(adev);
+
+ if (flags & AV8100_PLUGSTARTUP_EVENT)
+ (void)av8100_plugstartup_event_handle(adev);
+ }
+
+ return 0;
+}
+
+static irqreturn_t av8100_intr_handler(int irq, void *p)
+{
+ struct av8100_device *adev;
+
+ adev = (struct av8100_device *) p;
+ adev->flag |= AV8100_INT_EVENT;
+ wake_up_interruptible(&adev->event);
+
+ return IRQ_HANDLED;
+}
+
+static u16 av8100_get_te_line_nb(
+ enum av8100_output_CEA_VESA output_video_format)
+{
+ u16 retval;
+
+ switch (output_video_format) {
+ case AV8100_CEA1_640X480P_59_94HZ:
+ case AV8100_CEA2_3_720X480P_59_94HZ:
+ case AV8100_VESA16_1024X768P_60HZ:
+ retval = AV8100_TE_LINE_NB_30;
+ break;
+
+ case AV8100_CEA4_1280X720P_60HZ:
+ case AV8100_CEA60_1280X720P_24HZ:
+ case AV8100_CEA61_1280X720P_25HZ:
+ case AV8100_CEA62_1280X720P_30HZ:
+ retval = AV8100_TE_LINE_NB_21;
+ break;
+
+ case AV8100_CEA5_1920X1080I_60HZ:
+ case AV8100_CEA6_7_NTSC_60HZ:
+ case AV8100_CEA20_1920X1080I_50HZ:
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ case AV8100_VESA27_1280X800P_59_91HZ:
+ retval = AV8100_TE_LINE_NB_18;
+ break;
+
+ case AV8100_CEA14_15_480p_60HZ:
+ retval = AV8100_TE_LINE_NB_32;
+ break;
+
+ case AV8100_CEA17_18_720X576P_50HZ:
+ case AV8100_CEA29_30_576P_50HZ:
+ retval = AV8100_TE_LINE_NB_40;
+ break;
+
+ case AV8100_CEA19_1280X720P_50HZ:
+ case AV8100_VESA39_1360X768P_60_02HZ:
+ retval = AV8100_TE_LINE_NB_22;
+ break;
+
+ case AV8100_CEA32_1920X1080P_24HZ:
+ case AV8100_CEA33_1920X1080P_25HZ:
+ case AV8100_CEA34_1920X1080P_30HZ:
+ retval = AV8100_TE_LINE_NB_38;
+ break;
+
+ case AV8100_VESA9_800X600P_60_32HZ:
+ retval = AV8100_TE_LINE_NB_24;
+ break;
+
+ case AV8100_VESA14_848X480P_60HZ:
+ retval = AV8100_TE_LINE_NB_29;
+ break;
+
+ case AV8100_VESA22_1280X768P_59_99HZ:
+ retval = AV8100_TE_LINE_NB_17;
+ break;
+
+ case AV8100_VESA23_1280X768P_59_87HZ:
+ case AV8100_VESA81_1366X768P_59_79HZ:
+ retval = AV8100_TE_LINE_NB_25;
+ break;
+
+ case AV8100_VESA28_1280X800P_59_81HZ:
+ retval = AV8100_TE_LINE_NB_26;
+ break;
+
+ case AV8100_CEA16_1920X1080P_60HZ:
+ case AV8100_CEA31_1920x1080P_50Hz:
+ default:
+ /* TODO */
+ retval = AV8100_TE_LINE_NB_38;
+ break;
+ }
+
+ return retval;
+}
+
+static u16 av8100_get_ui_x4(
+ enum av8100_output_CEA_VESA output_video_format)
+{
+ return AV8100_UI_X4_DEFAULT;
+}
+
+static int av8100_config_video_output_dep(
+ enum av8100_output_CEA_VESA output_format)
+{
+ int retval;
+ union av8100_configuration config;
+
+ /* video input */
+ config.video_input_format.dsi_input_mode =
+ AV8100_HDMI_DSI_COMMAND_MODE;
+ config.video_input_format.input_pixel_format = AV8100_INPUT_PIX_RGB565;
+ config.video_input_format.total_horizontal_pixel =
+ av8100_all_cea[output_format].htotale;
+ config.video_input_format.total_horizontal_active_pixel =
+ av8100_all_cea[output_format].hactive;
+ config.video_input_format.total_vertical_lines =
+ av8100_all_cea[output_format].vtotale;
+ config.video_input_format.total_vertical_active_lines =
+ av8100_all_cea[output_format].vactive;
+
+ switch (output_format) {
+ case AV8100_CEA5_1920X1080I_60HZ:
+ case AV8100_CEA20_1920X1080I_50HZ:
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ case AV8100_CEA6_7_NTSC_60HZ:
+ config.video_input_format.video_mode =
+ AV8100_VIDEO_INTERLACE;
+ break;
+
+ default:
+ config.video_input_format.video_mode =
+ AV8100_VIDEO_PROGRESSIVE;
+ break;
+ }
+
+ config.video_input_format.nb_data_lane =
+ AV8100_DATA_LANES_USED_2;
+ config.video_input_format.nb_virtual_ch_command_mode = 0;
+ config.video_input_format.nb_virtual_ch_video_mode = 0;
+ config.video_input_format.ui_x4 = av8100_get_ui_x4(output_format);
+ config.video_input_format.TE_line_nb = av8100_get_te_line_nb(
+ output_format);
+ config.video_input_format.TE_config = AV8100_TE_OFF;
+ config.video_input_format.master_clock_freq = 0;
+
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* DENC */
+ switch (output_format) {
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ config.denc_format.cvbs_video_format = AV8100_CVBS_625;
+ config.denc_format.standard_selection = AV8100_PAL_BDGHI;
+ break;
+
+ case AV8100_CEA6_7_NTSC_60HZ:
+ config.denc_format.cvbs_video_format = AV8100_CVBS_525;
+ config.denc_format.standard_selection = AV8100_NTSC_M;
+ break;
+
+ default:
+ /* Not supported */
+ break;
+ }
+
+ return 0;
+}
+
+static int av8100_config_init(struct av8100_device *adev)
+{
+ int retval;
+ union av8100_configuration config;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ memset(&config, 0, sizeof(union av8100_configuration));
+ memset(&adev->config, 0, sizeof(struct av8100_config));
+
+ /* Color conversion */
+ config.color_transform = AV8100_COLOR_TRANSFORM_INDENTITY;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_COLORSPACECONVERSION, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* DENC */
+ config.denc_format.cvbs_video_format = AV8100_CVBS_625;
+ config.denc_format.standard_selection = AV8100_PAL_BDGHI;
+ config.denc_format.enable = 0;
+ config.denc_format.macrovision_enable = 0;
+ config.denc_format.internal_generator = 0;
+ retval = av8100_conf_prep(AV8100_COMMAND_DENC, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Video output */
+ config.video_output_format.video_output_cea_vesa =
+ AV8100_CEA4_1280X720P_60HZ;
+
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Video input */
+ av8100_config_video_output_dep(
+ config.video_output_format.video_output_cea_vesa);
+
+ /* Pattern generator */
+ config.pattern_generator_format.pattern_audio_mode =
+ AV8100_PATTERN_AUDIO_OFF;
+ config.pattern_generator_format.pattern_type =
+ AV8100_PATTERN_GENERATOR;
+ config.pattern_generator_format.pattern_video_format =
+ AV8100_PATTERN_720P;
+ retval = av8100_conf_prep(AV8100_COMMAND_PATTERNGENERATOR,
+ &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Audio input */
+ config.audio_input_format.audio_input_if_format =
+ AV8100_AUDIO_I2SDELAYED_MODE;
+ config.audio_input_format.i2s_input_nb = 1;
+ config.audio_input_format.sample_audio_freq = AV8100_AUDIO_FREQ_48KHZ;
+ config.audio_input_format.audio_word_lg = AV8100_AUDIO_16BITS;
+ config.audio_input_format.audio_format = AV8100_AUDIO_LPCM_MODE;
+ config.audio_input_format.audio_if_mode = AV8100_AUDIO_MASTER;
+ config.audio_input_format.audio_mute = AV8100_AUDIO_MUTE_DISABLE;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* HDMI mode */
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ config.hdmi_format.hdmi_format = AV8100_HDMI;
+ config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ retval = av8100_conf_prep(AV8100_COMMAND_HDMI, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* EDID section readback */
+ config.edid_section_readback_format.address = 0xA0;
+ config.edid_section_readback_format.block_number = 0;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_EDID_SECTION_READBACK, &config);
+ if (retval)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int av8100_params_init(struct av8100_device *adev)
+{
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ memset(&adev->params, 0, sizeof(struct av8100_params));
+
+ adev->params.denc_off_time = AV8100_DENC_OFF_TIME;
+ adev->params.hdmi_off_time = AV8100_HDMI_OFF_TIME;
+ adev->params.on_time = AV8100_ON_TIME;
+
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ adev->params.cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH;
+ adev->params.hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH;
+ adev->params.cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH;
+ adev->params.uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH;
+
+ return 0;
+}
+
+static void clr_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status)
+{
+ adev->status.av8100_plugin_status &= ~status;
+
+ switch (status) {
+ case AV8100_HDMI_PLUGIN:
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ case AV8100_PLUGGED_STARTUP:
+ default:
+ break;
+
+ case AV8100_PLUGGED:
+ adev->params.plug_state =
+ AV8100_UNPLUGGED;
+ dev_dbg(adev->dev, "plug_state:0\n");
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT);
+ break;
+ }
+ break;
+
+ case AV8100_CVBS_PLUGIN:
+ /* TODO */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void set_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status)
+{
+ adev->status.av8100_plugin_status |= status;
+
+ switch (status) {
+ case AV8100_HDMI_PLUGIN:
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ adev->params.plug_state =
+ AV8100_PLUGGED_STARTUP;
+
+ dev_dbg(adev->dev, "plug_state:1\n");
+
+ /*
+ * Mask interrupts to avoid plug detect during
+ * startup
+ * */
+ adev->params.hpdm =
+ AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW;
+ if (av8100_reg_stby_int_mask_w(
+ adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) {
+ dev_dbg(adev->dev,
+ "av8100_reg_stby_int_mask_w fail\n");
+ }
+
+ /* Set plug startup timer */
+ init_timer(&adev->timer);
+ adev->timer.expires = jiffies +
+ AV8100_PLUGSTARTUP_TIME;
+ adev->timer.function =
+ av8100_plugtimer_int;
+ adev->timer.data = 0;
+ adev->timer.data = adev_to_devnr(adev);
+ mod_timer(&adev->timer, adev->timer.expires);
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGIN);
+ break;
+
+ case AV8100_PLUGGED_STARTUP:
+ case AV8100_PLUGGED:
+ default:
+ break;
+ }
+ break;
+
+ case AV8100_CVBS_PLUGIN:
+ /* TODO */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void cec_rx(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CEC);
+}
+
+static void cec_tx(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTX);
+}
+
+static void cec_txerr(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTXERR);
+}
+
+static void hdcp_changed(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_HDCP);
+}
+
+static void av8100_set_state(struct av8100_device *adev,
+ enum av8100_operating_mode state)
+{
+ adev->status.av8100_state = state;
+
+ if (state <= AV8100_OPMODE_STANDBY) {
+ clr_plug_status(adev, AV8100_HDMI_PLUGIN);
+ clr_plug_status(adev, AV8100_CVBS_PLUGIN);
+ adev->status.hdmi_on = false;
+ }
+}
+
+/**
+ * write_single_byte() - Write a single byte to av8100
+ * through i2c interface.
+ * @client: i2c client structure
+ * @reg: register offset
+ * @data: data byte to be written
+ *
+ * This funtion uses smbus byte write API to write a single byte to av8100
+ **/
+static int write_single_byte(struct i2c_client *client, u8 reg,
+ u8 data)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
+ dev_dbg(dev, "i2c smbus write byte failed\n");
+
+ return ret;
+}
+
+/**
+ * read_single_byte() - read single byte from av8100
+ * through i2c interface
+ * @client: i2c client structure
+ * @reg: register offset
+ * @val: register value
+ *
+ * This funtion uses smbus read block API to read single byte from the reg
+ * offset.
+ **/
+static int read_single_byte(struct i2c_client *client, u8 reg, u8 *val)
+{
+ int value;
+ struct device *dev = &client->dev;
+
+ value = i2c_smbus_read_byte_data(client, reg);
+ if (value < 0) {
+ dev_dbg(dev, "i2c smbus read byte failed,read data = %x "
+ "from offset:%x\n" , value, reg);
+ return -EFAULT;
+ }
+
+ *val = (u8) value;
+ return 0;
+}
+
+/**
+ * write_multi_byte() - Write a multiple bytes to av8100 through
+ * i2c interface.
+ * @client: i2c client structure
+ * @buf: buffer to be written
+ * @nbytes: nunmber of bytes to be written
+ *
+ * This funtion uses smbus block write API's to write n number of bytes to the
+ * av8100
+ **/
+static int write_multi_byte(struct i2c_client *client, u8 reg,
+ u8 *buf, u8 nbytes)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, nbytes, buf);
+ if (ret < 0)
+ dev_dbg(dev, "i2c smbus write multi byte error\n");
+
+ return ret;
+}
+
+static int configuration_video_input_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_video_input_cmd.dsi_input_mode;
+ buffer[1] = adev->config.hdmi_video_input_cmd.input_pixel_format;
+ buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_pixel);
+ buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_pixel);
+ buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_active_pixel);
+ buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_active_pixel);
+ buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_lines);
+ buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_lines);
+ buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_active_lines);
+ buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_active_lines);
+ buffer[10] = adev->config.hdmi_video_input_cmd.video_mode;
+ buffer[11] = adev->config.hdmi_video_input_cmd.nb_data_lane;
+ buffer[12] = adev->config.hdmi_video_input_cmd.
+ nb_virtual_ch_command_mode;
+ buffer[13] = adev->config.hdmi_video_input_cmd.
+ nb_virtual_ch_video_mode;
+ buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ TE_line_nb);
+ buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ TE_line_nb);
+ buffer[16] = adev->config.hdmi_video_input_cmd.TE_config;
+ buffer[17] = REG_32_8_MSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[18] = REG_32_8_MMSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[19] = REG_32_8_MLSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[20] = REG_32_8_LSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[21] = adev->config.hdmi_video_input_cmd.ui_x4;
+
+ *length = AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE - 1;
+ return 0;
+
+}
+
+static int configuration_audio_input_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_audio_input_cmd.audio_input_if_format;
+ buffer[1] = adev->config.hdmi_audio_input_cmd.i2s_input_nb;
+ buffer[2] = adev->config.hdmi_audio_input_cmd.sample_audio_freq;
+ buffer[3] = adev->config.hdmi_audio_input_cmd.audio_word_lg;
+ buffer[4] = adev->config.hdmi_audio_input_cmd.audio_format;
+ buffer[5] = adev->config.hdmi_audio_input_cmd.audio_if_mode;
+ buffer[6] = adev->config.hdmi_audio_input_cmd.audio_mute;
+
+ *length = AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_video_output_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_video_output_cmd.
+ video_output_cea_vesa;
+
+ if (buffer[0] == AV8100_CUSTOM) {
+ buffer[1] = adev->config.hdmi_video_output_cmd.
+ vsync_polarity;
+ buffer[2] = adev->config.hdmi_video_output_cmd.
+ hsync_polarity;
+ buffer[3] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_pixel);
+ buffer[4] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_pixel);
+ buffer[5] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_active_pixel);
+ buffer[6] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_active_pixel);
+ buffer[7] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_vertical_in_half_lines);
+ buffer[8] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_vertical_in_half_lines);
+ buffer[9] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.
+ total_vertical_active_in_half_lines);
+ buffer[10] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.
+ total_vertical_active_in_half_lines);
+ buffer[11] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hsync_start_in_pixel);
+ buffer[12] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hsync_start_in_pixel);
+ buffer[13] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hsync_length_in_pixel);
+ buffer[14] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hsync_length_in_pixel);
+ buffer[15] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vsync_start_in_half_line);
+ buffer[16] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vsync_start_in_half_line);
+ buffer[17] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vsync_length_in_half_line);
+ buffer[18] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vsync_length_in_half_line);
+ buffer[19] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hor_video_start_pixel);
+ buffer[20] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hor_video_start_pixel);
+ buffer[21] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vert_video_start_pixel);
+ buffer[22] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vert_video_start_pixel);
+ buffer[23] = adev->config.hdmi_video_output_cmd.video_type;
+ buffer[24] = adev->config.hdmi_video_output_cmd.pixel_repeat;
+ buffer[25] = REG_32_8_MSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[26] = REG_32_8_MMSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[27] = REG_32_8_MLSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[28] = REG_32_8_LSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+
+ *length = AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE - 1;
+ } else {
+ *length = 1;
+ }
+
+ return 0;
+}
+
+static int configuration_video_scaling_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_in_pixel);
+ buffer[1] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_in_pixel);
+ buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_in_pixel);
+ buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_in_pixel);
+ buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_in_line);
+ buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_in_line);
+ buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_in_line);
+ buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_in_line);
+ buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_out_pixel);
+ buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd
+ .h_start_out_pixel);
+ buffer[10] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_out_pixel);
+ buffer[11] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_out_pixel);
+ buffer[12] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_out_line);
+ buffer[13] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_out_line);
+ buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_out_line);
+ buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_out_line);
+
+ *length = AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_colorspace_conversion_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ const struct color_conversion_cmd *hdmi_color_space_conversion_cmd;
+
+ hdmi_color_space_conversion_cmd =
+ get_color_transform_cmd(adev, adev->config.color_transform);
+
+ buffer[0] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c0);
+ buffer[1] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c0);
+ buffer[2] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c1);
+ buffer[3] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c1);
+ buffer[4] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c2);
+ buffer[5] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c2);
+ buffer[6] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c3);
+ buffer[7] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c3);
+ buffer[8] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c4);
+ buffer[9] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c4);
+ buffer[10] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c5);
+ buffer[11] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c5);
+ buffer[12] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c6);
+ buffer[13] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c6);
+ buffer[14] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c7);
+ buffer[15] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c7);
+ buffer[16] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c8);
+ buffer[17] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c8);
+ buffer[18] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->aoffset);
+ buffer[19] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->aoffset);
+ buffer[20] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->boffset);
+ buffer[21] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->boffset);
+ buffer[22] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->coffset);
+ buffer[23] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->coffset);
+ buffer[24] = hdmi_color_space_conversion_cmd->lmax;
+ buffer[25] = hdmi_color_space_conversion_cmd->lmin;
+ buffer[26] = hdmi_color_space_conversion_cmd->cmax;
+ buffer[27] = hdmi_color_space_conversion_cmd->cmin;
+
+ *length = AV8100_COMMAND_COLORSPACECONVERSION_SIZE - 1;
+ return 0;
+}
+
+static int configuration_cec_message_write_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_cec_message_write_cmd.buffer_length;
+ memcpy(&buffer[1], adev->config.hdmi_cec_message_write_cmd.buffer,
+ adev->config.hdmi_cec_message_write_cmd.buffer_length);
+
+ *length = adev->config.hdmi_cec_message_write_cmd.buffer_length + 1;
+
+ return 0;
+}
+
+static int configuration_cec_message_read_get(char *buffer,
+ unsigned int *length)
+{
+ /* No buffer data */
+ *length = AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE - 1;
+ return 0;
+}
+
+static int configuration_denc_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_denc_cmd.cvbs_video_format;
+ buffer[1] = adev->config.hdmi_denc_cmd.standard_selection;
+ buffer[2] = adev->config.hdmi_denc_cmd.enable;
+ buffer[3] = adev->config.hdmi_denc_cmd.macrovision_enable;
+ buffer[4] = adev->config.hdmi_denc_cmd.internal_generator;
+
+ *length = AV8100_COMMAND_DENC_SIZE - 1;
+ return 0;
+}
+
+static int configuration_hdmi_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_cmd.hdmi_mode;
+ buffer[1] = adev->config.hdmi_cmd.hdmi_format;
+ buffer[2] = adev->config.hdmi_cmd.dvi_format;
+
+ *length = AV8100_COMMAND_HDMI_SIZE - 1;
+ return 0;
+}
+
+static int configuration_hdcp_sendkey_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_hdcp_send_key_cmd.key_number;
+ memcpy(&buffer[1], adev->config.hdmi_hdcp_send_key_cmd.data,
+ adev->config.hdmi_hdcp_send_key_cmd.data_len);
+
+ *length = adev->config.hdmi_hdcp_send_key_cmd.data_len + 1;
+ return 0;
+}
+
+static int configuration_hdcp_management_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_hdcp_management_format_cmd.req_type;
+ buffer[1] = adev->config.hdmi_hdcp_management_format_cmd.encr_use;
+
+ *length = AV8100_COMMAND_HDCP_MANAGEMENT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_infoframe_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_infoframes_cmd.type;
+ buffer[1] = adev->config.hdmi_infoframes_cmd.version;
+ buffer[2] = adev->config.hdmi_infoframes_cmd.length;
+ buffer[3] = adev->config.hdmi_infoframes_cmd.crc;
+ memcpy(&buffer[4], adev->config.hdmi_infoframes_cmd.data,
+ HDMI_INFOFRAME_DATA_SIZE);
+
+ *length = adev->config.hdmi_infoframes_cmd.length + 4;
+ return 0;
+}
+
+static int av8100_edid_section_readback_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_edid_section_readback_cmd.address;
+ buffer[1] = adev->config.hdmi_edid_section_readback_cmd.
+ block_number;
+
+ *length = AV8100_COMMAND_EDID_SECTION_READBACK_SIZE - 1;
+ return 0;
+}
+
+static int configuration_pattern_generator_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_pattern_generator_cmd.pattern_type;
+ buffer[1] = adev->config.hdmi_pattern_generator_cmd.
+ pattern_video_format;
+ buffer[2] = adev->config.hdmi_pattern_generator_cmd.
+ pattern_audio_mode;
+
+ *length = AV8100_COMMAND_PATTERNGENERATOR_SIZE - 1;
+ return 0;
+}
+
+static int configuration_fuse_aes_key_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_fuse_aes_key_cmd.fuse_operation;
+ if (adev->config.hdmi_fuse_aes_key_cmd.fuse_operation) {
+ /* Write key command */
+ memcpy(&buffer[1], adev->config.hdmi_fuse_aes_key_cmd.key,
+ HDMI_FUSE_AES_KEY_SIZE);
+
+ *length = AV8100_COMMAND_FUSE_AES_KEY_SIZE - 1;
+ } else {
+ /* Check key command */
+ *length = AV8100_COMMAND_FUSE_AES_CHK_SIZE - 1;
+ }
+ return 0;
+}
+
+static int get_command_return_first(struct i2c_client *i2c,
+ enum av8100_command_type command_type) {
+ int retval = 0;
+ char val;
+ struct device *dev = &i2c->dev;
+
+ retval = read_single_byte(i2c, AV8100_COMMAND_OFFSET, &val);
+ if (retval) {
+ dev_dbg(dev, "%s 1st ret failed\n", __func__);
+ return retval;
+ }
+
+ if (val != (0x80 | command_type)) {
+ dev_dbg(dev, "%s 1st ret wrong:%x\n", __func__, val);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int get_command_return_data(struct i2c_client *i2c,
+ enum av8100_command_type command_type,
+ u8 *command_buffer,
+ u8 *buffer_length,
+ u8 *buffer)
+{
+ int retval = 0;
+ char val;
+ int index = 0;
+ struct device *dev = &i2c->dev;
+
+ if (buffer_length)
+ *buffer_length = 0;
+
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ case AV8100_COMMAND_DENC:
+ case AV8100_COMMAND_HDMI:
+ case AV8100_COMMAND_INFOFRAMES:
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the return buffer length */
+ retval = read_single_byte(i2c, AV8100_CEC_ADDR_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ dev_dbg(dev, "cec buflen:%d\n", val);
+ *buffer_length = val;
+
+ if (*buffer_length >
+ HDMI_CEC_READ_MAXSIZE) {
+ dev_dbg(dev, "CEC size too large %d\n",
+ *buffer_length);
+ *buffer_length = HDMI_CEC_READ_MAXSIZE;
+ }
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (index = 0; index < *buffer_length; ++index) {
+ retval = read_single_byte(i2c,
+ AV8100_CEC_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ }
+
+ dev_dbg(dev, "\n");
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ {
+ u8 nrdev;
+ u8 devcnt;
+ int cnt;
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval) {
+ goto get_command_return_data_fail2r;
+ } else {
+ /* Check the second return byte */
+ if (val)
+ goto get_command_return_data_fail2v;
+ }
+
+ if ((buffer == NULL) || (buffer_length == NULL))
+ /* Ignore return data */
+ break;
+
+ dev_dbg(dev, "req_type:%02x ", command_buffer[0]);
+
+ /* Check if revoc list data is requested */
+ if (command_buffer[0] !=
+ HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT) {
+ *buffer_length = 0;
+ break;
+ }
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+ }
+
+ /* Get Device count */
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &nrdev);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = nrdev;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+
+ /* Determine number of devices */
+ nrdev &= HDMI_HDCP_MGMT_DEVICE_MASK;
+ if (nrdev > HDMI_HDCP_MGMT_MAX_DEVICES_SIZE)
+ nrdev = HDMI_HDCP_MGMT_MAX_DEVICES_SIZE;
+
+ /* Get Bksv for each connected equipment */
+ for (devcnt = 0; devcnt < nrdev; devcnt++)
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index,
+ &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ",
+ *(buffer + index));
+ }
+ index++;
+ }
+
+ if (nrdev == 0)
+ goto hdcp_management_end;
+
+ /* Get SHA signature */
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_SHA_SIZE - 1; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+ }
+
+hdcp_management_end:
+ *buffer_length = index;
+
+ dev_dbg(dev, "\n");
+ }
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Return buffer length is fixed */
+ *buffer_length = HDMI_EDIDREAD_SIZE;
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (index = 0; index < *buffer_length; ++index) {
+ retval = read_single_byte(i2c,
+ AV8100_EDID_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ }
+
+ dev_dbg(dev, "\n");
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ /* Check the second return byte */
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+
+ /* Return buffer length is fixed */
+ *buffer_length = HDMI_FUSE_AES_KEY_RET_SIZE;
+
+ /* Get CRC */
+ retval = read_single_byte(i2c,
+ AV8100_FUSE_CRC_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ *buffer = val;
+ dev_dbg(dev, "CRC:%02x ", val);
+
+ /* Get programmed status */
+ retval = read_single_byte(i2c,
+ AV8100_FUSE_PRGD_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ *(buffer + 1) = val;
+
+ dev_dbg(dev, "programmed:%02x ", val);
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ if ((command_buffer[0] == HDMI_LOADAES_END_BLK_NR) &&
+ ((buffer == NULL) || (buffer_length == NULL))) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+
+ if (command_buffer[0] == HDMI_LOADAES_END_BLK_NR) {
+ /* Return CRC32 if last AES block */
+ int cnt;
+
+ dev_dbg(dev, "CRC32:");
+ for (cnt = 0; cnt < HDMI_CRC32_SIZE; cnt++) {
+ if (read_single_byte(i2c,
+ AV8100_CRC32_OFFSET + cnt, &val))
+ goto get_command_return_data_fail;
+ *(buffer + cnt) = val;
+ dev_dbg(dev, "%02x", val);
+ }
+
+ *buffer_length = HDMI_CRC32_SIZE;
+ }
+ break;
+
+ default:
+ retval = -EFAULT;
+ break;
+ }
+
+ return retval;
+get_command_return_data_fail2r:
+ dev_dbg(dev, "%s Reading 2nd return byte failed\n", __func__);
+ return retval;
+get_command_return_data_fail2v:
+ dev_dbg(dev, "%s 2nd return byte is wrong:%x\n", __func__, val);
+ return retval;
+get_command_return_data_fail:
+ dev_dbg(dev, "%s FAIL\n", __func__);
+ return retval;
+}
+
+static int av8100_powerup1(struct av8100_device *adev)
+{
+ int retval;
+ struct av8100_platform_data *pdata = adev->dev->platform_data;
+
+ /* Regulator enable */
+ if ((adev->params.regulator_pwr) &&
+ (adev->params.regulator_requested == false)) {
+ retval = regulator_enable(adev->params.regulator_pwr);
+ if (retval < 0) {
+ dev_warn(adev->dev, "%s: regulator_enable failed\n",
+ __func__);
+ return retval;
+ }
+ dev_dbg(adev->dev, "regulator_enable ok\n");
+ adev->params.regulator_requested = true;
+ }
+
+ /* Reset av8100 */
+ gpio_set_value_cansleep(pdata->reset, 1);
+
+ /* Need to wait before proceeding */
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_STANDBY);
+
+ if (pdata->alt_powerupseq) {
+ dev_dbg(adev->dev, "powerup seq alt\n");
+ retval = av8100_5V_w(0, 0, AV8100_ON_TIME);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 1\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ udelay(AV8100_WATTIME_100US);
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 2\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ udelay(AV8100_WATTIME_100US);
+
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 3\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_LOW, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 4\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 5\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+ }
+
+ retval = request_irq(pdata->irq, av8100_intr_handler,
+ IRQF_TRIGGER_RISING, "av8100", adev);
+ if (retval == 0)
+ adev->params.irq_requested = true;
+ else
+ dev_err(adev->dev, "request_irq %d failed %d\n",
+ pdata->irq, retval);
+
+ return retval;
+
+av8100_powerup1_err:
+ av8100_powerdown();
+ return -EFAULT;
+}
+
+static int av8100_powerup2(struct av8100_device *adev)
+{
+ int retval;
+
+ /* ON time & OFF time on 5v HDMI plug detect */
+ retval = av8100_5V_w(adev->params.denc_off_time,
+ adev->params.hdmi_off_time,
+ adev->params.on_time);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return retval;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_SCAN);
+
+ return 0;
+}
+
+static int register_read_internal(u8 offset, u8 *value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ i2c = adev->config.client;
+
+ /* Read from register */
+ retval = read_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ return -EFAULT;
+ }
+
+ return retval;
+}
+
+static int register_write_internal(u8 offset, u8 value)
+{
+ int retval;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ i2c = adev->config.client;
+
+ /* Write to register */
+ retval = write_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int av8100_powerscan(void)
+{
+ int retval;
+ struct av8100_device *adev;
+ struct av8100_platform_data *pdata;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ pdata = adev->dev->platform_data;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (av8100_status_get().av8100_state > AV8100_OPMODE_SCAN) {
+ dev_dbg(adev->dev, "set to scan mode\n");
+
+ av8100_disable_interrupt();
+
+ /* Stby mode */
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_LOW, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write to av8100 register\n");
+ return retval;
+ }
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk &&
+ adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_enable_interrupt();
+
+ av8100_set_state(adev, AV8100_OPMODE_SCAN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_powerscan);
+
+int av8100_powerup(void)
+{
+ int ret = 0;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state < AV8100_OPMODE_STANDBY) {
+ ret = av8100_powerup1(adev);
+ if (ret) {
+ dev_err(adev->dev, "av8100_powerup1 fail\n");
+ return -EFAULT;
+ }
+ }
+
+ if (av8100_status_get().av8100_state < AV8100_OPMODE_SCAN)
+ ret = av8100_powerup2(adev);
+
+ av8100_enable_interrupt();
+
+ return ret;
+}
+EXPORT_SYMBOL(av8100_powerup);
+
+int av8100_powerdown(void)
+{
+ int retval = 0;
+ struct av8100_device *adev;
+ struct av8100_platform_data *pdata;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ pdata = adev->dev->platform_data;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ goto av8100_powerdown_end;
+
+ av8100_disable_interrupt();
+
+ if (adev->params.irq_requested)
+ free_irq(pdata->irq, adev);
+ adev->params.irq_requested = false;
+
+ if (pdata->alt_powerupseq) {
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH);
+
+ if (retval)
+ dev_err(adev->dev, "%s reg_wr err\n", __func__);
+ msleep(AV8100_WAITTIME_50MS);
+ }
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk && adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN);
+
+ gpio_set_value_cansleep(pdata->reset, 0);
+
+ /* Regulator disable */
+ if ((adev->params.regulator_pwr) &&
+ (adev->params.regulator_requested)) {
+ dev_dbg(adev->dev, "regulator_disable\n");
+ regulator_disable(adev->params.regulator_pwr);
+ adev->params.regulator_requested = false;
+ }
+
+ if (pdata->alt_powerupseq)
+ mdelay(AV8100_WAITTIME_5MS);
+
+av8100_powerdown_end:
+ return retval;
+}
+EXPORT_SYMBOL(av8100_powerdown);
+
+int av8100_download_firmware(enum interface_type if_type)
+{
+ int retval;
+ int temp = 0x0;
+ int increment = 15;
+ int index = 0;
+ int size = 0x0;
+ char val = 0x0;
+ char checksum = 0;
+ int cnt;
+ int cnt_max;
+ struct i2c_client *i2c;
+ u8 uc;
+ u8 fdl;
+ u8 hld;
+ u8 wa;
+ u8 ra;
+ struct av8100_platform_data *pdata;
+ const struct firmware *fw_file;
+ u8 *fw_buff;
+ int fw_bytes;
+ struct av8100_device *adev;
+ struct av8100_status status;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ LOCK_AV8100_FWDL;
+
+ status = av8100_status_get();
+ if (status.av8100_state <= AV8100_OPMODE_SHUTDOWN) {
+ retval = -EINVAL;
+ goto av8100_download_firmware_err2;
+ }
+
+ if (status.av8100_state >= AV8100_OPMODE_INIT) {
+ dev_dbg(adev->dev, "FW already ok\n");
+ retval = 0;
+ goto av8100_download_firmware_err2;
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_INIT);
+
+ pdata = adev->dev->platform_data;
+
+ /* Request firmware */
+ if (request_firmware(&fw_file,
+ AV8100_FW_FILENAME,
+ adev->dev)) {
+ dev_err(adev->dev, "fw request failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err2;
+ }
+
+ /* Master clock timing, running */
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ goto av8100_download_firmware_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ /* Clock enable */
+ if (adev->params.inputclk &&
+ adev->params.inputclk_requested == false) {
+ if (clk_enable(adev->params.inputclk)) {
+ dev_err(adev->dev, "inputclk en failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ adev->params.inputclk_requested = true;
+ }
+
+ /* Request 100% APE OPP */
+ if (adev->params.opp_requested == false) {
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name, 100)) {
+ dev_err(adev->dev, "APE OPP 100 failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name, 100)) {
+ dev_err(adev->dev, "DDR OPP 100 failed\n");
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ adev->params.opp_requested = true;
+ }
+
+ msleep(AV8100_WAITTIME_10MS);
+
+ /* Prepare firmware data */
+ fw_bytes = fw_file->size;
+ fw_buff = (u8 *)fw_file->data;
+ dev_dbg(adev->dev, "fw size:%d\n", fw_bytes);
+
+ i2c = adev->config.client;
+
+ /* Enable firmware download */
+ retval = av8100_reg_gen_ctrl_w(
+ AV8100_GENERAL_CONTROL_FDL_HIGH,
+ AV8100_GENERAL_CONTROL_HLD_HIGH,
+ AV8100_GENERAL_CONTROL_WA_LOW,
+ AV8100_GENERAL_CONTROL_RA_LOW);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ retval = av8100_reg_gen_ctrl_r(&fdl, &hld, &wa, &ra);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ } else {
+ dev_dbg(adev->dev, "GENERAL_CONTROL_REG register fdl:%d "
+ "hld:%d wa:%d ra:%d\n", fdl, hld, wa, ra);
+ }
+
+ LOCK_AV8100_HW;
+
+ temp = fw_bytes % increment;
+ for (size = 0; size < (fw_bytes-temp); size = size + increment,
+ index += increment) {
+ if (if_type == I2C_INTERFACE) {
+ retval = write_multi_byte(i2c,
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size,
+ increment);
+ if (retval) {
+ dev_dbg(adev->dev, "Failed to download the "
+ "av8100 firmware\n");
+ UNLOCK_AV8100_HW;
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ } else if (if_type == DSI_INTERFACE) {
+ dev_dbg(adev->dev,
+ "DSI_INTERFACE is currently not supported\n");
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ } else {
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ }
+ }
+
+ /* Transfer last firmware bytes */
+ if (if_type == I2C_INTERFACE) {
+ retval = write_multi_byte(i2c,
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size, temp);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to download the av8100 firmware\n");
+ UNLOCK_AV8100_HW;
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ } else if (if_type == DSI_INTERFACE) {
+ /* TODO: Add support for DSI firmware download */
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ } else {
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ }
+
+ /* check transfer*/
+ for (size = 0; size < fw_bytes; size++)
+ checksum = checksum ^ fw_buff[size];
+
+ UNLOCK_AV8100_HW;
+
+ retval = av8100_reg_fw_dl_entry_r(&val);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ dev_dbg(adev->dev, "checksum:%x,val:%x\n", checksum, val);
+
+ if (checksum != val) {
+ dev_dbg(adev->dev,
+ ">Fw downloading.... FAIL checksum issue\n");
+ dev_dbg(adev->dev, "checksum = %d\n", checksum);
+ dev_dbg(adev->dev, "checksum read: %d\n", val);
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ } else {
+ dev_dbg(adev->dev, ">Fw downloading.... success\n");
+ }
+
+ /* Set to idle mode */
+ av8100_reg_gen_ctrl_w(AV8100_GENERAL_CONTROL_FDL_LOW,
+ AV8100_GENERAL_CONTROL_HLD_LOW, AV8100_GENERAL_CONTROL_WA_LOW,
+ AV8100_GENERAL_CONTROL_RA_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ /* Wait Internal Micro controler ready */
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = av8100_reg_gen_status_r(NULL, NULL, NULL, &uc,
+ NULL, NULL);
+ while ((retval == 0) && (uc != 0x1) && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = av8100_reg_gen_status_r(NULL, NULL, NULL,
+ &uc, NULL, NULL);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "av8100 fwdl cnt:%d\n", cnt);
+
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ if (uc != 0x1)
+ dev_dbg(adev->dev, "UC is not ready\n");
+
+ release_firmware(fw_file);
+
+ if (adev->chip_version != 1) {
+ char *cut_str;
+
+ /* Get cut version */
+ retval = read_single_byte(i2c, AV8100_CUTVER_OFFSET, &val);
+ if (retval) {
+ dev_err(adev->dev, "Read cut ver failed\n");
+ return retval;
+ }
+
+ switch (val) {
+ case 0x00:
+ cut_str = CUT_STR_0;
+ break;
+ case 0x01:
+ cut_str = CUT_STR_1;
+ break;
+ case 0x03:
+ cut_str = CUT_STR_3;
+ break;
+ case 0x30:
+ cut_str = CUT_STR_30;
+ break;
+ default:
+ cut_str = CUT_STR_UNKNOWN;
+ break;
+ }
+ dev_dbg(adev->dev, "Cut ver %d %s\n", val, cut_str);
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_IDLE);
+
+ UNLOCK_AV8100_FWDL;
+ return 0;
+
+av8100_download_firmware_err:
+ release_firmware(fw_file);
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk && adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+av8100_download_firmware_err2:
+ UNLOCK_AV8100_FWDL;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_download_firmware);
+
+int av8100_disable_interrupt(void)
+{
+ int retval;
+ u8 hpdm = 0;
+ u8 cpdm = 0;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (!adev->params.ints_enabled)
+ return 0;
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_gen_int_mask_w(
+ AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ hpdm = adev->params.hpdm;
+ cpdm = adev->params.cpdm;
+
+ retval = av8100_reg_stby_int_mask_w(
+ AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW,
+ AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ adev->params.hpdm = hpdm;
+ adev->params.cpdm = cpdm;
+ adev->params.ints_enabled = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_disable_interrupt);
+
+int av8100_enable_interrupt(void)
+{
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (adev->params.ints_enabled)
+ return 0;
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_gen_int_mask_w(
+ AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW,
+ adev->params.cecm,
+ adev->params.hdcpm,
+ adev->params.uovbm,
+ AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_stby_int_mask_w(
+ adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ adev->params.ints_enabled = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_enable_interrupt);
+
+int av8100_reg_stby_w(
+ u8 cpd, u8 stby, u8 mclkrng)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_CPD(cpd) | AV8100_STANDBY_STBY(stby) |
+ AV8100_STANDBY_MCLKRNG(mclkrng);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_w);
+
+static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on)
+{
+ u8 val;
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value.
+ * chip_version == 1 have one common off time
+ * chip_version > 1 support different off time for hdmi and tvout. */
+ if (adev->chip_version == 1)
+ val = AV8100_HDMI_5_VOLT_TIME_OFF_TIME(hdmi_off) |
+ AV8100_HDMI_5_VOLT_TIME_ON_TIME(on);
+ else
+ val = AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(denc_off) |
+ AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(hdmi_off) |
+ AV8100_HDMI_5_VOLT_TIME_ON_TIME(on);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_HDMI_5_VOLT_TIME, val);
+
+ UNLOCK_AV8100_HW;
+
+ return retval;
+}
+
+int av8100_reg_hdmi_5_volt_time_w(u8 denc_off, u8 hdmi_off, u8 on)
+{
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ retval = av8100_5V_w(denc_off, hdmi_off, on);
+
+ /* Set vars */
+ if (adev->chip_version > 1)
+ adev->params.denc_off_time = denc_off;
+
+ adev->params.hdmi_off_time = hdmi_off;
+ if (on)
+ adev->params.on_time = on;
+
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_w);
+
+int av8100_reg_stby_int_mask_w(
+ u8 hpdm, u8 cpdm, u8 stbygpiocfg, u8 ipol)
+{
+ int retval;
+ u8 val;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_INTERRUPT_MASK_HPDM(hpdm) |
+ AV8100_STANDBY_INTERRUPT_MASK_CPDM(cpdm) |
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(stbygpiocfg) |
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL(ipol);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY_INTERRUPT_MASK, val);
+
+ adev->params.hpdm = hpdm;
+ adev->params.cpdm = cpdm;
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_int_mask_w);
+
+int av8100_reg_stby_pend_int_w(
+ u8 hpdi, u8 cpdi, u8 oni, u8 bpdig)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_PENDING_INTERRUPT_HPDI(hpdi) |
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI(cpdi) |
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI(oni) |
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(bpdig);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY_PENDING_INTERRUPT, val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_pend_int_w);
+
+int av8100_reg_gen_int_mask_w(
+ u8 eocm, u8 vsim, u8 vsom, u8 cecm, u8 hdcpm, u8 uovbm, u8 tem)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_INTERRUPT_MASK_EOCM(eocm) |
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM(vsim) |
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM(vsom) |
+ AV8100_GENERAL_INTERRUPT_MASK_CECM(cecm) |
+ AV8100_GENERAL_INTERRUPT_MASK_HDCPM(hdcpm) |
+ AV8100_GENERAL_INTERRUPT_MASK_UOVBM(uovbm) |
+ AV8100_GENERAL_INTERRUPT_MASK_TEM(tem);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_INTERRUPT_MASK, val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_mask_w);
+
+int av8100_reg_gen_int_w(
+ u8 eoci, u8 vsii, u8 vsoi, u8 ceci, u8 hdcpi, u8 uovbi)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_INTERRUPT_EOCI(eoci) |
+ AV8100_GENERAL_INTERRUPT_VSII(vsii) |
+ AV8100_GENERAL_INTERRUPT_VSOI(vsoi) |
+ AV8100_GENERAL_INTERRUPT_CECI(ceci) |
+ AV8100_GENERAL_INTERRUPT_HDCPI(hdcpi) |
+ AV8100_GENERAL_INTERRUPT_UOVBI(uovbi);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_INTERRUPT, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_w);
+
+int av8100_reg_gpio_conf_w(
+ u8 dat3dir, u8 dat3val, u8 dat2dir, u8 dat2val, u8 dat1dir,
+ u8 dat1val, u8 ucdbg)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GPIO_CONFIGURATION_DAT3DIR(dat3dir) |
+ AV8100_GPIO_CONFIGURATION_DAT3VAL(dat3val) |
+ AV8100_GPIO_CONFIGURATION_DAT2DIR(dat2dir) |
+ AV8100_GPIO_CONFIGURATION_DAT2VAL(dat2val) |
+ AV8100_GPIO_CONFIGURATION_DAT1DIR(dat1dir) |
+ AV8100_GPIO_CONFIGURATION_DAT1VAL(dat1val) |
+ AV8100_GPIO_CONFIGURATION_UCDBG(ucdbg);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GPIO_CONFIGURATION, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gpio_conf_w);
+
+int av8100_reg_gen_ctrl_w(
+ u8 fdl, u8 hld, u8 wa, u8 ra)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_CONTROL_FDL(fdl) |
+ AV8100_GENERAL_CONTROL_HLD(hld) |
+ AV8100_GENERAL_CONTROL_WA(wa) |
+ AV8100_GENERAL_CONTROL_RA(ra);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_CONTROL, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_ctrl_w);
+
+int av8100_reg_fw_dl_entry_w(
+ u8 mbyte_code_entry)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(
+ mbyte_code_entry);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_fw_dl_entry_w);
+
+int av8100_reg_w(
+ u8 offset, u8 value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ i2c = adev->config.client;
+
+ /* Write to register */
+ retval = write_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ UNLOCK_AV8100_HW;
+ return -EFAULT;
+ }
+
+ UNLOCK_AV8100_HW;
+ return 0;
+}
+EXPORT_SYMBOL(av8100_reg_w);
+
+int av8100_reg_stby_r(
+ u8 *cpd, u8 *stby, u8 *hpds, u8 *cpds, u8 *mclkrng)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY, &val);
+
+ /* Set return params */
+ if (cpd)
+ *cpd = AV8100_STANDBY_CPD_GET(val);
+ if (stby)
+ *stby = AV8100_STANDBY_STBY_GET(val);
+ if (hpds)
+ *hpds = AV8100_STANDBY_HPDS_GET(val);
+ if (cpds)
+ *cpds = AV8100_STANDBY_CPDS_GET(val);
+ if (mclkrng)
+ *mclkrng = AV8100_STANDBY_MCLKRNG_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_r);
+
+int av8100_reg_hdmi_5_volt_time_r(
+ u8 *denc_off_time, u8 *hdmi_off_time, u8 *on_time)
+{
+ int retval;
+ u8 val;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_HDMI_5_VOLT_TIME, &val);
+
+ /* Set return params */
+ if (adev->chip_version == 1) {
+ if (denc_off_time)
+ *denc_off_time = 0;
+ if (hdmi_off_time)
+ *hdmi_off_time =
+ AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(val);
+ } else {
+ if (denc_off_time)
+ *denc_off_time =
+ AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(val);
+ if (hdmi_off_time)
+ *hdmi_off_time =
+ AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(val);
+ }
+
+ if (on_time)
+ *on_time = AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_r);
+
+int av8100_reg_stby_int_mask_r(
+ u8 *hpdm, u8 *cpdm, u8 *stbygpiocfg, u8 *ipol)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY_INTERRUPT_MASK, &val);
+
+ /* Set return params */
+ if (hpdm)
+ *hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(val);
+ if (cpdm)
+ *cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(val);
+ if (stbygpiocfg)
+ *stbygpiocfg =
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(val);
+ if (ipol)
+ *ipol = AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_int_mask_r);
+
+int av8100_reg_stby_pend_int_r(
+ u8 *hpdi, u8 *cpdi, u8 *oni, u8 *sid)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY_PENDING_INTERRUPT,
+ &val);
+
+ /* Set return params */
+ if (hpdi)
+ *hpdi = AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(val);
+ if (cpdi)
+ *cpdi = AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(val);
+ if (oni)
+ *oni = AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(val);
+ if (sid)
+ *sid = AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_pend_int_r);
+
+int av8100_reg_gen_int_mask_r(
+ u8 *eocm,
+ u8 *vsim,
+ u8 *vsom,
+ u8 *cecm,
+ u8 *hdcpm,
+ u8 *uovbm,
+ u8 *tem)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_INTERRUPT_MASK, &val);
+
+ /* Set return params */
+ if (eocm)
+ *eocm = AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(val);
+ if (vsim)
+ *vsim = AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(val);
+ if (vsom)
+ *vsom = AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(val);
+ if (cecm)
+ *cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(val);
+ if (hdcpm)
+ *hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(val);
+ if (uovbm)
+ *uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(val);
+ if (tem)
+ *tem = AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_mask_r);
+
+int av8100_reg_gen_int_r(
+ u8 *eoci,
+ u8 *vsii,
+ u8 *vsoi,
+ u8 *ceci,
+ u8 *hdcpi,
+ u8 *uovbi,
+ u8 *tei)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_INTERRUPT, &val);
+
+ /* Set return params */
+ if (eoci)
+ *eoci = AV8100_GENERAL_INTERRUPT_EOCI_GET(val);
+ if (vsii)
+ *vsii = AV8100_GENERAL_INTERRUPT_VSII_GET(val);
+ if (vsoi)
+ *vsoi = AV8100_GENERAL_INTERRUPT_VSOI_GET(val);
+ if (ceci)
+ *ceci = AV8100_GENERAL_INTERRUPT_CECI_GET(val);
+ if (hdcpi)
+ *hdcpi = AV8100_GENERAL_INTERRUPT_HDCPI_GET(val);
+ if (uovbi)
+ *uovbi = AV8100_GENERAL_INTERRUPT_UOVBI_GET(val);
+ if (tei)
+ *tei = AV8100_GENERAL_INTERRUPT_TEI_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_r);
+
+int av8100_reg_gen_status_r(
+ u8 *cectxerr,
+ u8 *cecrec,
+ u8 *cectrx,
+ u8 *uc,
+ u8 *onuvb,
+ u8 *hdcps)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_STATUS, &val);
+
+ /* Set return params */
+ if (cectxerr)
+ *cectxerr = AV8100_GENERAL_STATUS_CECTXERR_GET(val);
+ if (cecrec)
+ *cecrec = AV8100_GENERAL_STATUS_CECREC_GET(val);
+ if (cectrx)
+ *cectrx = AV8100_GENERAL_STATUS_CECTRX_GET(val);
+ if (uc)
+ *uc = AV8100_GENERAL_STATUS_UC_GET(val);
+ if (onuvb)
+ *onuvb = AV8100_GENERAL_STATUS_ONUVB_GET(val);
+ if (hdcps)
+ *hdcps = AV8100_GENERAL_STATUS_HDCPS_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_status_r);
+
+int av8100_reg_gpio_conf_r(
+ u8 *dat3dir,
+ u8 *dat3val,
+ u8 *dat2dir,
+ u8 *dat2val,
+ u8 *dat1dir,
+ u8 *dat1val,
+ u8 *ucdbg)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GPIO_CONFIGURATION, &val);
+
+ /* Set return params */
+ if (dat3dir)
+ *dat3dir = AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(val);
+ if (dat3val)
+ *dat3val = AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(val);
+ if (dat2dir)
+ *dat2dir = AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(val);
+ if (dat2val)
+ *dat2val = AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(val);
+ if (dat1dir)
+ *dat1dir = AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(val);
+ if (dat1val)
+ *dat1val = AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(val);
+ if (ucdbg)
+ *ucdbg = AV8100_GPIO_CONFIGURATION_UCDBG_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gpio_conf_r);
+
+int av8100_reg_gen_ctrl_r(
+ u8 *fdl,
+ u8 *hld,
+ u8 *wa,
+ u8 *ra)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_CONTROL, &val);
+ /* Set return params */
+ if (fdl)
+ *fdl = AV8100_GENERAL_CONTROL_FDL_GET(val);
+ if (hld)
+ *hld = AV8100_GENERAL_CONTROL_HLD_GET(val);
+ if (wa)
+ *wa = AV8100_GENERAL_CONTROL_WA_GET(val);
+ if (ra)
+ *ra = AV8100_GENERAL_CONTROL_RA_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_ctrl_r);
+
+int av8100_reg_fw_dl_entry_r(
+ u8 *mbyte_code_entry)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, &val);
+
+ /* Set return params */
+ if (mbyte_code_entry)
+ *mbyte_code_entry =
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_fw_dl_entry_r);
+
+int av8100_reg_r(
+ u8 offset,
+ u8 *value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ i2c = adev->config.client;
+
+ /* Read from register */
+ retval = read_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_register_read_out;
+ }
+
+av8100_register_read_out:
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_r);
+
+int av8100_conf_get(enum av8100_command_type command_type,
+ union av8100_configuration *config)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ /* Put configuration data to the corresponding data struct depending
+ * on command type */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ memcpy(&config->video_input_format,
+ &adev->config.hdmi_video_input_cmd,
+ sizeof(struct av8100_video_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ memcpy(&config->audio_input_format,
+ &adev->config.hdmi_audio_input_cmd,
+ sizeof(struct av8100_audio_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ memcpy(&config->video_output_format,
+ &adev->config.hdmi_video_output_cmd,
+ sizeof(struct av8100_video_output_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ memcpy(&config->video_scaling_format,
+ &adev->config.hdmi_video_scaling_cmd,
+ sizeof(struct av8100_video_scaling_format_cmd));
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ config->color_transform = adev->config.color_transform;
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ memcpy(&config->cec_message_write_format,
+ &adev->config.hdmi_cec_message_write_cmd,
+ sizeof(struct av8100_cec_message_write_format_cmd));
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ memcpy(&config->cec_message_read_back_format,
+ &adev->config.hdmi_cec_message_read_back_cmd,
+ sizeof(struct av8100_cec_message_read_back_format_cmd));
+ break;
+
+ case AV8100_COMMAND_DENC:
+ memcpy(&config->denc_format, &adev->config.hdmi_denc_cmd,
+ sizeof(struct av8100_denc_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ memcpy(&config->hdmi_format, &adev->config.hdmi_cmd,
+ sizeof(struct av8100_hdmi_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ memcpy(&config->hdcp_send_key_format,
+ &adev->config.hdmi_hdcp_send_key_cmd,
+ sizeof(struct av8100_hdcp_send_key_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ memcpy(&config->hdcp_management_format,
+ &adev->config.hdmi_hdcp_management_format_cmd,
+ sizeof(struct av8100_hdcp_management_format_cmd));
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ memcpy(&config->infoframes_format,
+ &adev->config.hdmi_infoframes_cmd,
+ sizeof(struct av8100_infoframes_format_cmd));
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ memcpy(&config->edid_section_readback_format,
+ &adev->config.hdmi_edid_section_readback_cmd,
+ sizeof(struct
+ av8100_edid_section_readback_format_cmd));
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ memcpy(&config->pattern_generator_format,
+ &adev->config.hdmi_pattern_generator_cmd,
+ sizeof(struct av8100_pattern_generator_format_cmd));
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ memcpy(&config->fuse_aes_key_format,
+ &adev->config.hdmi_fuse_aes_key_cmd,
+ sizeof(struct av8100_fuse_aes_key_format_cmd));
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_conf_get);
+
+int av8100_conf_prep(enum av8100_command_type command_type,
+ union av8100_configuration *config)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!config || !adev)
+ return -EINVAL;
+
+ /* Put configuration data to the corresponding data struct depending
+ * on command type */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ memcpy(&adev->config.hdmi_video_input_cmd,
+ &config->video_input_format,
+ sizeof(struct av8100_video_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ memcpy(&adev->config.hdmi_audio_input_cmd,
+ &config->audio_input_format,
+ sizeof(struct av8100_audio_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ memcpy(&adev->config.hdmi_video_output_cmd,
+ &config->video_output_format,
+ sizeof(struct av8100_video_output_format_cmd));
+
+ /* Set params that depend on video output */
+ av8100_config_video_output_dep(adev->config.
+ hdmi_video_output_cmd.video_output_cea_vesa);
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ memcpy(&adev->config.hdmi_video_scaling_cmd,
+ &config->video_scaling_format,
+ sizeof(struct av8100_video_scaling_format_cmd));
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ adev->config.color_transform = config->color_transform;
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ memcpy(&adev->config.hdmi_cec_message_write_cmd,
+ &config->cec_message_write_format,
+ sizeof(struct av8100_cec_message_write_format_cmd));
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ memcpy(&adev->config.hdmi_cec_message_read_back_cmd,
+ &config->cec_message_read_back_format,
+ sizeof(struct av8100_cec_message_read_back_format_cmd));
+ break;
+
+ case AV8100_COMMAND_DENC:
+ memcpy(&adev->config.hdmi_denc_cmd, &config->denc_format,
+ sizeof(struct av8100_denc_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ memcpy(&adev->config.hdmi_cmd, &config->hdmi_format,
+ sizeof(struct av8100_hdmi_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ memcpy(&adev->config.hdmi_hdcp_send_key_cmd,
+ &config->hdcp_send_key_format,
+ sizeof(struct av8100_hdcp_send_key_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ memcpy(&adev->config.hdmi_hdcp_management_format_cmd,
+ &config->hdcp_management_format,
+ sizeof(struct av8100_hdcp_management_format_cmd));
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ memcpy(&adev->config.hdmi_infoframes_cmd,
+ &config->infoframes_format,
+ sizeof(struct av8100_infoframes_format_cmd));
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ memcpy(&adev->config.hdmi_edid_section_readback_cmd,
+ &config->edid_section_readback_format,
+ sizeof(struct
+ av8100_edid_section_readback_format_cmd));
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ memcpy(&adev->config.hdmi_pattern_generator_cmd,
+ &config->pattern_generator_format,
+ sizeof(struct av8100_pattern_generator_format_cmd));
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ memcpy(&adev->config.hdmi_fuse_aes_key_cmd,
+ &config->fuse_aes_key_format,
+ sizeof(struct av8100_fuse_aes_key_format_cmd));
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_conf_prep);
+
+int av8100_conf_w(enum av8100_command_type command_type,
+ u8 *return_buffer_length,
+ u8 *return_buffer, enum interface_type if_type)
+{
+ int retval = 0;
+ u8 cmd_buffer[AV8100_COMMAND_MAX_LENGTH];
+ u32 cmd_length = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (return_buffer_length)
+ *return_buffer_length = 0;
+
+ i2c = adev->config.client;
+
+ memset(&cmd_buffer, 0x00, AV8100_COMMAND_MAX_LENGTH);
+
+#define PRNK_MODE(_m) dev_dbg(adev->dev, "cmd: " #_m "\n");
+
+ /* Fill the command buffer with configuration data */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_INPUT_FORMAT);
+ configuration_video_input_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_AUDIO_INPUT_FORMAT);
+ configuration_audio_input_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT);
+ configuration_video_output_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_SCALING_FORMAT);
+ configuration_video_scaling_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ PRNK_MODE(AV8100_COMMAND_COLORSPACECONVERSION);
+ configuration_colorspace_conversion_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_WRITE);
+ configuration_cec_message_write_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_READ_BACK);
+ configuration_cec_message_read_get(cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_DENC:
+ PRNK_MODE(AV8100_COMMAND_DENC);
+ configuration_denc_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ PRNK_MODE(AV8100_COMMAND_HDMI);
+ configuration_hdmi_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ PRNK_MODE(AV8100_COMMAND_HDCP_SENDKEY);
+ configuration_hdcp_sendkey_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ PRNK_MODE(AV8100_COMMAND_HDCP_MANAGEMENT);
+ configuration_hdcp_management_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ PRNK_MODE(AV8100_COMMAND_INFOFRAMES);
+ configuration_infoframe_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ PRNK_MODE(AV8100_COMMAND_EDID_SECTION_READBACK);
+ av8100_edid_section_readback_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ PRNK_MODE(AV8100_COMMAND_PATTERNGENERATOR);
+ configuration_pattern_generator_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ PRNK_MODE(AV8100_COMMAND_FUSE_AES_KEY);
+ configuration_fuse_aes_key_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ default:
+ dev_dbg(adev->dev, "Invalid command type\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ LOCK_AV8100_HW;
+
+ if (if_type == I2C_INTERFACE) {
+ int cnt = 0;
+ int cnt_max;
+
+ dev_dbg(adev->dev, "av8100_conf_w cmd_type:%02x length:%02x ",
+ command_type, cmd_length);
+ dev_dbg(adev->dev, "buffer: ");
+ while (cnt < cmd_length) {
+ dev_dbg(adev->dev, "%02x ", cmd_buffer[cnt]);
+ cnt++;
+ }
+
+ /* Write the command buffer */
+ retval = write_multi_byte(i2c,
+ AV8100_CMD_BUF_OFFSET, cmd_buffer, cmd_length);
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+ /* Write the command */
+ retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET,
+ command_type);
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+
+ /* Get the first return byte */
+ mdelay(AV8100_WAITTIME_1MS);
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = get_command_return_first(i2c, command_type);
+ while (retval && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = get_command_return_first(i2c, command_type);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "first return cnt:%d\n", cnt);
+
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+ retval = get_command_return_data(i2c, command_type, cmd_buffer,
+ return_buffer_length, return_buffer);
+ } else if (if_type == DSI_INTERFACE) {
+ /* TODO */
+ } else {
+ retval = -EINVAL;
+ dev_dbg(adev->dev, "Invalid command type\n");
+ }
+
+ if (command_type == AV8100_COMMAND_HDMI) {
+ adev->status.hdmi_on = ((adev->config.hdmi_cmd.
+ hdmi_mode == AV8100_HDMI_ON) &&
+ (adev->config.hdmi_cmd.hdmi_format == AV8100_HDMI));
+ }
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_conf_w);
+
+int av8100_conf_w_raw(enum av8100_command_type command_type,
+ u8 buffer_length,
+ u8 *buffer,
+ u8 *return_buffer_length,
+ u8 *return_buffer)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ int cnt;
+ int cnt_max;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ if (return_buffer_length)
+ *return_buffer_length = 0;
+
+ i2c = adev->config.client;
+
+ /* Write the command buffer */
+ retval = write_multi_byte(i2c,
+ AV8100_CMD_BUF_OFFSET, buffer, buffer_length);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+ /* Write the command */
+ retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET,
+ command_type);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+
+ /* Get the first return byte */
+ mdelay(AV8100_WAITTIME_1MS);
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = get_command_return_first(i2c, command_type);
+ while (retval && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = get_command_return_first(i2c, command_type);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "first return cnt:%d\n", cnt);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+ retval = get_command_return_data(i2c, command_type, buffer,
+ return_buffer_length, return_buffer);
+
+av8100_conf_w_raw_out:
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_conf_w_raw);
+
+struct av8100_status av8100_status_get(void)
+{
+ struct av8100_status status = {0};
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (adev)
+ return adev->status;
+ else
+ return status;
+}
+EXPORT_SYMBOL(av8100_status_get);
+
+enum av8100_output_CEA_VESA av8100_video_output_format_get(int xres,
+ int yres,
+ int htot,
+ int vtot,
+ int pixelclk,
+ bool interlaced)
+{
+ enum av8100_output_CEA_VESA index = 1;
+ int yres_div = !interlaced ? 1 : 2;
+ int hres_div = 1;
+ long freq1;
+ long freq2;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ /*
+ * 720_576_I need a divider for hact and htot since
+ * these params need to be twice as large as expected in av8100_all_cea,
+ * which is used as input parameter to video input config.
+ */
+ if ((xres == 720) && (yres == 576) && (interlaced == true))
+ hres_div = 2;
+
+ freq1 = 1000000 / htot * 1000000 / vtot / pixelclk + 1;
+ while (index < sizeof(av8100_all_cea)/sizeof(struct av8100_cea)) {
+ freq2 = av8100_all_cea[index].frequence /
+ av8100_all_cea[index].htotale /
+ av8100_all_cea[index].vtotale;
+
+ dev_dbg(adev->dev, "freq1:%ld freq2:%ld\n", freq1, freq2);
+ if ((xres == av8100_all_cea[index].hactive / hres_div) &&
+ (yres == av8100_all_cea[index].vactive * yres_div) &&
+ (htot == av8100_all_cea[index].htotale / hres_div) &&
+ (vtot == av8100_all_cea[index].vtotale) &&
+ (abs(freq1 - freq2) < 2)) {
+ goto av8100_video_output_format_get_out;
+ }
+ index++;
+ }
+
+av8100_video_output_format_get_out:
+ dev_dbg(adev->dev, "av8100_video_output_format_get %d %d %d %d %d\n",
+ xres, yres, htot, vtot, index);
+ return index;
+}
+EXPORT_SYMBOL(av8100_video_output_format_get);
+
+void av8100_hdmi_event_cb_set(void (*hdmi_ev_cb)(enum av8100_hdmi_event))
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (adev)
+ adev->params.hdmi_ev_cb = hdmi_ev_cb;
+}
+EXPORT_SYMBOL(av8100_hdmi_event_cb_set);
+
+u8 av8100_ver_get(void)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ return adev->chip_version;
+}
+EXPORT_SYMBOL(av8100_ver_get);
+
+static const struct color_conversion_cmd *get_color_transform_cmd(
+ struct av8100_device *adev,
+ enum av8100_color_transform transform)
+{
+ const struct color_conversion_cmd *result;
+
+ switch (transform) {
+ case AV8100_COLOR_TRANSFORM_INDENTITY:
+ result = &col_trans_identity;
+ break;
+ case AV8100_COLOR_TRANSFORM_INDENTITY_CLAMP_YUV:
+ result = &col_trans_identity_clamp_yuv;
+ break;
+ case AV8100_COLOR_TRANSFORM_YUV_TO_RGB:
+ if (adev->chip_version == AV8100_CHIPVER_1)
+ result = &col_trans_yuv_to_rgb_v1;
+ else
+ result = &col_trans_yuv_to_rgb_v2;
+ break;
+ case AV8100_COLOR_TRANSFORM_YUV_TO_DENC:
+ result = &col_trans_yuv_to_denc;
+ break;
+ case AV8100_COLOR_TRANSFORM_RGB_TO_DENC:
+ result = &col_trans_rgb_to_denc;
+ break;
+ default:
+ dev_warn(adev->dev, "Unknown color space transform\n");
+ result = &col_trans_identity;
+ break;
+ }
+ return result;
+}
+
+static int av8100_open(struct inode *inode, struct file *filp)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int av8100_release(struct inode *inode, struct file *filp)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static long av8100_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+
+int av8100_device_register(struct av8100_device *adev)
+{
+ adev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ adev->miscdev.name = "av8100";
+ adev->miscdev.fops = &av8100_fops;
+
+ if (misc_register(&adev->miscdev)) {
+ pr_err("av8100 misc_register failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int av8100_init_device(struct av8100_device *adev, struct device *dev)
+{
+ adev->dev = dev;
+
+ if (av8100_config_init(adev)) {
+ dev_info(dev, "av8100_config_init failed\n");
+ return -EFAULT;
+ }
+
+ if (av8100_params_init(adev)) {
+ dev_info(dev, "av8100_params_init failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __devinit av8100_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct av8100_platform_data *pdata = i2c_client->dev.platform_data;
+ struct device *dev;
+ struct av8100_device *adev;
+
+ dev = &i2c_client->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Allocate device data */
+ adev = kzalloc(sizeof(struct av8100_device), GFP_KERNEL);
+ if (!adev) {
+ dev_info(dev, "%s: Alloc failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Add to list */
+ list_add_tail(&adev->list, &av8100_device_list);
+
+ av8100_device_register(adev);
+
+ av8100_init_device(adev, dev);
+
+ av8100_set_state(adev, AV8100_OPMODE_UNDEFINED);
+
+ if (!i2c_check_functionality(i2c_client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ ret = -ENODEV;
+ dev_info(dev, "av8100 i2c_check_functionality failed\n");
+ goto err1;
+ }
+
+ init_waitqueue_head(&adev->event);
+
+ adev->config.client = i2c_client;
+ adev->config.id = (struct i2c_device_id *) id;
+ i2c_set_clientdata(i2c_client, &adev->config);
+
+ kthread_run(av8100_thread, adev, "av8100_thread");
+
+ /* Get regulator resource */
+ if (pdata->regulator_pwr_id) {
+ adev->params.regulator_pwr = regulator_get(dev,
+ pdata->regulator_pwr_id);
+ if (IS_ERR(adev->params.regulator_pwr)) {
+ ret = PTR_ERR(adev->params.regulator_pwr);
+ dev_warn(dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_pwr_id);
+ adev->params.regulator_pwr = NULL;
+ goto err1;
+ }
+ }
+
+ /* Get clock resource */
+ if (pdata->inputclk_id) {
+ adev->params.inputclk = clk_get(NULL, pdata->inputclk_id);
+ if (IS_ERR(adev->params.inputclk)) {
+ adev->params.inputclk = NULL;
+ dev_warn(dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->inputclk_id);
+ }
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN);
+
+
+ if (av8100_powerup1(adev)) {
+ dev_err(adev->dev, "av8100_powerup1 fail\n");
+ ret = -EFAULT;
+ goto err1;
+ }
+
+ /* Obtain the chip version */
+ ret = av8100_reg_stby_pend_int_r(NULL, NULL, NULL,
+ &adev->chip_version);
+ if (ret) {
+ dev_err(adev->dev, "Failed to read chip version\n");
+ goto err2;
+ }
+
+ dev_info(adev->dev, "chip version:%d\n", adev->chip_version);
+
+ switch (adev->chip_version) {
+ case AV8100_CHIPVER_1:
+ case AV8100_CHIPVER_2:
+ break;
+
+ default:
+ dev_err(adev->dev, "Unsupported chip version:%d\n",
+ adev->chip_version);
+ ret = -EINVAL;
+ goto err2;
+ break;
+ }
+err2:
+ (void) av8100_powerdown();
+err1:
+ return ret;
+}
+
+static int __devexit av8100_remove(struct i2c_client *i2c_client)
+{
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(&i2c_client->dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (adev->params.inputclk)
+ clk_put(adev->params.inputclk);
+
+ /* Release regulator resource */
+ if (adev->params.regulator_pwr)
+ regulator_put(adev->params.regulator_pwr);
+
+ misc_deregister(&adev->miscdev);
+
+ /* Remove from list */
+ list_del(&adev->list);
+
+ /* Free device data */
+ kfree(adev);
+
+ return 0;
+}
+
+int av8100_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ if (i2c_add_driver(&av8100_driver)) {
+ pr_err("av8100 i2c_add_driver failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+module_init(av8100_init);
+
+void av8100_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ i2c_del_driver(&av8100_driver);
+}
+module_exit(av8100_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson hdmi display driver");
diff --git a/drivers/video/av8100/av8100_regs.h b/drivers/video/av8100/av8100_regs.h
new file mode 100644
index 00000000000..6ed9000987a
--- /dev/null
+++ b/drivers/video/av8100/av8100_regs.h
@@ -0,0 +1,346 @@
+
+#define AV8100_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define AV8100_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define AV8100_STANDBY 0x00000000
+#define AV8100_STANDBY_CPD_SHIFT 0
+#define AV8100_STANDBY_CPD_MASK 0x00000001
+#define AV8100_STANDBY_CPD_HIGH 1
+#define AV8100_STANDBY_CPD_LOW 0
+#define AV8100_STANDBY_CPD(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, CPD, __x)
+#define AV8100_STANDBY_CPD_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, CPD, __x)
+#define AV8100_STANDBY_STBY_SHIFT 1
+#define AV8100_STANDBY_STBY_MASK 0x00000002
+#define AV8100_STANDBY_STBY_HIGH 1
+#define AV8100_STANDBY_STBY_LOW 0
+#define AV8100_STANDBY_STBY(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, STBY, __x)
+#define AV8100_STANDBY_STBY_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, STBY, __x)
+#define AV8100_STANDBY_HPDS_SHIFT 2
+#define AV8100_STANDBY_HPDS_MASK 0x00000004
+#define AV8100_STANDBY_HPDS(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, HPDS, __x)
+#define AV8100_STANDBY_HPDS_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, HPDS, __x)
+#define AV8100_STANDBY_CPDS_SHIFT 3
+#define AV8100_STANDBY_CPDS_MASK 0x00000008
+#define AV8100_STANDBY_CPDS(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, CPDS, __x)
+#define AV8100_STANDBY_CPDS_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, CPDS, __x)
+#define AV8100_STANDBY_MCLKRNG_SHIFT 4
+#define AV8100_STANDBY_MCLKRNG_MASK 0x000000F0
+#define AV8100_STANDBY_MCLKRNG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, MCLKRNG, __x)
+#define AV8100_STANDBY_MCLKRNG_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, MCLKRNG, __x)
+#define AV8100_HDMI_5_VOLT_TIME 0x00000001
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_SHIFT 0
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_MASK 0x0000001F
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_SHIFT 0
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_MASK 0x00000003
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_SHIFT 2
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_MASK 0x0000001C
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_SHIFT 5
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_MASK 0x000000E0
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK 0x00000002
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_SHIFT 0
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_MASK 0x00000001
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_SHIFT 1
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_MASK 0x00000002
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_SHIFT 2
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_MASK 0x0000000C
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT 0x00
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_ALT 0x01
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT0 0x02
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT1 0x03
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_SHIFT 7
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_MASK 0x00000080
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT 0x00000003
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_SHIFT 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_MASK 0x00000001
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_SHIFT 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_MASK 0x00000002
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_SHIFT 2
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_MASK 0x00000004
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_SHIFT 4
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_MASK 0x000000F0
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_SHIFT 6
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_MASK 0x00000040
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, BPDIG, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_SHIFT 0
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_MASK 0x00000001
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_SHIFT 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_MASK 0x00000002
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_SHIFT 2
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_SHIFT 3
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_MASK 0x00000008
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_SHIFT 4
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_MASK 0x00000010
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_SHIFT 5
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_MASK 0x00000020
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_SHIFT 6
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_MASK 0x00000040
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x)
+#define AV8100_GENERAL_INTERRUPT 0x00000005
+#define AV8100_GENERAL_INTERRUPT_EOCI_SHIFT 0
+#define AV8100_GENERAL_INTERRUPT_EOCI_MASK 0x00000001
+#define AV8100_GENERAL_INTERRUPT_EOCI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, EOCI, __x)
+#define AV8100_GENERAL_INTERRUPT_EOCI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, EOCI, __x)
+#define AV8100_GENERAL_INTERRUPT_VSII_SHIFT 1
+#define AV8100_GENERAL_INTERRUPT_VSII_MASK 0x00000002
+#define AV8100_GENERAL_INTERRUPT_VSII(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSII, __x)
+#define AV8100_GENERAL_INTERRUPT_VSII_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSII, __x)
+#define AV8100_GENERAL_INTERRUPT_VSOI_SHIFT 2
+#define AV8100_GENERAL_INTERRUPT_VSOI_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_VSOI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSOI, __x)
+#define AV8100_GENERAL_INTERRUPT_VSOI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSOI, __x)
+#define AV8100_GENERAL_INTERRUPT_CECI_SHIFT 3
+#define AV8100_GENERAL_INTERRUPT_CECI_MASK 0x00000008
+#define AV8100_GENERAL_INTERRUPT_CECI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, CECI, __x)
+#define AV8100_GENERAL_INTERRUPT_CECI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, CECI, __x)
+#define AV8100_GENERAL_INTERRUPT_HDCPI_SHIFT 4
+#define AV8100_GENERAL_INTERRUPT_HDCPI_MASK 0x00000010
+#define AV8100_GENERAL_INTERRUPT_HDCPI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, HDCPI, __x)
+#define AV8100_GENERAL_INTERRUPT_HDCPI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, HDCPI, __x)
+#define AV8100_GENERAL_INTERRUPT_UOVBI_SHIFT 5
+#define AV8100_GENERAL_INTERRUPT_UOVBI_MASK 0x00000020
+#define AV8100_GENERAL_INTERRUPT_UOVBI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, UOVBI, __x)
+#define AV8100_GENERAL_INTERRUPT_UOVBI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, UOVBI, __x)
+#define AV8100_GENERAL_INTERRUPT_TEI_SHIFT 6
+#define AV8100_GENERAL_INTERRUPT_TEI_MASK 0x00000040
+#define AV8100_GENERAL_INTERRUPT_TEI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, TEI, __x)
+#define AV8100_GENERAL_INTERRUPT_TEI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, TEI, __x)
+#define AV8100_GENERAL_STATUS 0x00000006
+#define AV8100_GENERAL_STATUS_CECTXERR_SHIFT 0
+#define AV8100_GENERAL_STATUS_CECTXERR_MASK 0x00000001
+#define AV8100_GENERAL_STATUS_CECTXERR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTXERR, __x)
+#define AV8100_GENERAL_STATUS_CECREC_SHIFT 1
+#define AV8100_GENERAL_STATUS_CECREC_MASK 0x00000002
+#define AV8100_GENERAL_STATUS_CECREC_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECREC, __x)
+#define AV8100_GENERAL_STATUS_CECTRX_SHIFT 2
+#define AV8100_GENERAL_STATUS_CECTRX_MASK 0x00000004
+#define AV8100_GENERAL_STATUS_CECTRX_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTRX, __x)
+#define AV8100_GENERAL_STATUS_UC_SHIFT 3
+#define AV8100_GENERAL_STATUS_UC_MASK 0x00000008
+#define AV8100_GENERAL_STATUS_UC_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, UC, __x)
+#define AV8100_GENERAL_STATUS_ONUVB_SHIFT 4
+#define AV8100_GENERAL_STATUS_ONUVB_MASK 0x00000010
+#define AV8100_GENERAL_STATUS_ONUVB_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, ONUVB, __x)
+#define AV8100_GENERAL_STATUS_HDCPS_SHIFT 5
+#define AV8100_GENERAL_STATUS_HDCPS_MASK 0x000000E0
+#define AV8100_GENERAL_STATUS_HDCPS_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, HDCPS, __x)
+#define AV8100_GPIO_CONFIGURATION 0x00000007
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_SHIFT 0
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_MASK 0x00000001
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_SHIFT 1
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_MASK 0x00000002
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_SHIFT 2
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_MASK 0x00000004
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_SHIFT 3
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_MASK 0x00000008
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_SHIFT 4
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_MASK 0x00000010
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_SHIFT 5
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_MASK 0x00000020
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_UCDBG_SHIFT 6
+#define AV8100_GPIO_CONFIGURATION_UCDBG_MASK 0x00000040
+#define AV8100_GPIO_CONFIGURATION_UCDBG(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, UCDBG, __x)
+#define AV8100_GPIO_CONFIGURATION_UCDBG_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, UCDBG, __x)
+#define AV8100_GENERAL_CONTROL 0x00000008
+#define AV8100_GENERAL_CONTROL_FDL_SHIFT 4
+#define AV8100_GENERAL_CONTROL_FDL_MASK 0x00000010
+#define AV8100_GENERAL_CONTROL_FDL_HIGH 1
+#define AV8100_GENERAL_CONTROL_FDL_LOW 0
+#define AV8100_GENERAL_CONTROL_FDL(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, FDL, __x)
+#define AV8100_GENERAL_CONTROL_FDL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, FDL, __x)
+#define AV8100_GENERAL_CONTROL_HLD_SHIFT 5
+#define AV8100_GENERAL_CONTROL_HLD_MASK 0x00000020
+#define AV8100_GENERAL_CONTROL_HLD_HIGH 1
+#define AV8100_GENERAL_CONTROL_HLD_LOW 0
+#define AV8100_GENERAL_CONTROL_HLD(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, HLD, __x)
+#define AV8100_GENERAL_CONTROL_HLD_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, HLD, __x)
+#define AV8100_GENERAL_CONTROL_WA_SHIFT 6
+#define AV8100_GENERAL_CONTROL_WA_MASK 0x00000040
+#define AV8100_GENERAL_CONTROL_WA_HIGH 1
+#define AV8100_GENERAL_CONTROL_WA_LOW 0
+#define AV8100_GENERAL_CONTROL_WA(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, WA, __x)
+#define AV8100_GENERAL_CONTROL_WA_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, WA, __x)
+#define AV8100_GENERAL_CONTROL_RA_SHIFT 7
+#define AV8100_GENERAL_CONTROL_RA_MASK 0x00000080
+#define AV8100_GENERAL_CONTROL_RA_HIGH 1
+#define AV8100_GENERAL_CONTROL_RA_LOW 0
+#define AV8100_GENERAL_CONTROL_RA(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, RA, __x)
+#define AV8100_GENERAL_CONTROL_RA_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, RA, __x)
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY 0x0000000F
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_SHIFT 0
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_MASK 0x000000FF
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(__x) \
+ AV8100_VAL2REG(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x)
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(__x) \
+ AV8100_REG2VAL(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x)
diff --git a/drivers/video/av8100/hdmi.c b/drivers/video/av8100/hdmi.c
new file mode 100644
index 00000000000..3159c4446f1
--- /dev/null
+++ b/drivers/video/av8100/hdmi.c
@@ -0,0 +1,2479 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson HDMI driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/ctype.h>
+#include "hdmi_loc.h"
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#define SYSFS_EVENT_FILENAME "evread"
+#define HDMI_DEVNR_DEFAULT 0
+
+DEFINE_MUTEX(hdmi_events_mutex);
+#define LOCK_HDMI_EVENTS mutex_lock(&hdmi_events_mutex)
+#define UNLOCK_HDMI_EVENTS mutex_unlock(&hdmi_events_mutex)
+#define EVENTS_MASK 0xFF
+
+struct hdmi_device {
+ struct list_head list;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct hdmi_sysfs_data sysfs_data;
+ int events;
+ int events_mask;
+ wait_queue_head_t event_wq;
+ bool events_received;
+ int devnr;
+};
+
+/* List of devices */
+static LIST_HEAD(hdmi_device_list);
+
+static ssize_t store_storeastext(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_plugdeten(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_edidread(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_edidread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_ceceven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_cecread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_cecsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_infofrsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_hdcpeven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpchkaesotp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_hdcpstateget(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_evread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_evclr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_audiocfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_plugstatus(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_poweronoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_poweronoff(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_evwakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static const struct device_attribute hdmi_sysfs_attrs[] = {
+ __ATTR(storeastext, S_IWUSR, NULL, store_storeastext),
+ __ATTR(plugdeten, S_IWUSR, NULL, store_plugdeten),
+ __ATTR(edidread, S_IRUGO | S_IWUSR, show_edidread, store_edidread),
+ __ATTR(ceceven, S_IWUSR, NULL, store_ceceven),
+ __ATTR(cecread, S_IRUGO, show_cecread, NULL),
+ __ATTR(cecsend, S_IWUSR, NULL, store_cecsend),
+ __ATTR(infofrsend, S_IWUSR, NULL, store_infofrsend),
+ __ATTR(hdcpeven, S_IWUSR, NULL, store_hdcpeven),
+ __ATTR(hdcpchkaesotp, S_IRUGO, show_hdcpchkaesotp, NULL),
+ __ATTR(hdcpfuseaes, S_IRUGO | S_IWUSR, show_hdcpfuseaes,
+ store_hdcpfuseaes),
+ __ATTR(hdcploadaes, S_IRUGO | S_IWUSR, show_hdcploadaes,
+ store_hdcploadaes),
+ __ATTR(hdcpauthencr, S_IRUGO | S_IWUSR, show_hdcpauthencr,
+ store_hdcpauthencr),
+ __ATTR(hdcpstateget, S_IRUGO, show_hdcpstateget, NULL),
+ __ATTR(evread, S_IRUGO, show_evread, NULL),
+ __ATTR(evclr, S_IWUSR, NULL, store_evclr),
+ __ATTR(audiocfg, S_IWUSR, NULL, store_audiocfg),
+ __ATTR(plugstatus, S_IRUGO, show_plugstatus, NULL),
+ __ATTR(poweronoff, S_IRUGO | S_IWUSR, show_poweronoff,
+ store_poweronoff),
+ __ATTR(evwakeup, S_IWUSR, NULL, store_evwakeup),
+ __ATTR_NULL
+};
+
+/* Hex to int conversion */
+static unsigned int htoi(const char *ptr)
+{
+ unsigned int value = 0;
+ char ch;
+
+ if (!ptr)
+ return 0;
+
+ ch = *ptr;
+ if (isdigit(ch))
+ value = ch - '0';
+ else
+ value = toupper(ch) - 'A' + 10;
+
+ value <<= 4;
+ ch = *(++ptr);
+
+ if (isdigit(ch))
+ value += ch - '0';
+ else
+ value += toupper(ch) - 'A' + 10;
+
+ return value;
+}
+
+static struct hdmi_device *dev_to_hdev(struct device *dev)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct hdmi_device *hdmi_dev;
+ int cnt = 0;
+
+ list_for_each(element, &hdmi_device_list) {
+ hdmi_dev = list_entry(element, struct hdmi_device, list);
+ if (hdmi_dev->dev == dev)
+ return hdmi_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static struct hdmi_device *devnr_to_hdev(int devnr)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct hdmi_device *hdmi_dev;
+ int cnt = 0;
+
+ list_for_each(element, &hdmi_device_list) {
+ hdmi_dev = list_entry(element, struct hdmi_device, list);
+ if (cnt == devnr)
+ return hdmi_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static int event_enable(struct hdmi_device *hdev, bool enable,
+ enum hdmi_event ev)
+{
+ struct kobject *kobj = &hdev->dev->kobj;
+
+ dev_dbg(hdev->dev, "enable_event %d %02x\n", enable, ev);
+ if (enable)
+ hdev->events_mask |= ev;
+ else
+ hdev->events_mask &= ~ev;
+
+ if (hdev->events & ev) {
+ /* Report pending event */
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ wake_up_interruptible(&hdev->event_wq);
+ }
+
+ return 0;
+}
+
+static int plugdeten(struct hdmi_device *hdev, struct plug_detect *pldet)
+{
+ struct av8100_status status;
+ u8 denc_off_time = 0;
+ int retval;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ event_enable(hdev, pldet->hdmi_detect_enable != 0,
+ HDMI_EVENT_HDMI_PLUGIN);
+ event_enable(hdev, pldet->hdmi_detect_enable != 0,
+ HDMI_EVENT_HDMI_PLUGOUT);
+
+ av8100_reg_hdmi_5_volt_time_r(&denc_off_time, NULL, NULL);
+
+ retval = av8100_reg_hdmi_5_volt_time_w(
+ denc_off_time,
+ pldet->hdmi_off_time,
+ pldet->on_time);
+
+ if (retval) {
+ dev_err(hdev->dev, "Failed to write the value to av8100 "
+ "register\n");
+ return -EFAULT;
+ }
+
+ return retval;
+}
+
+static int edidread(struct hdmi_device *hdev, struct edid_read *edidread,
+ u8 *len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.edid_section_readback_format.address = edidread->address;
+ config.edid_section_readback_format.block_number = edidread->block_nr;
+
+ dev_dbg(hdev->dev, "addr:%0x blnr:%0x",
+ config.edid_section_readback_format.address,
+ config.edid_section_readback_format.block_number);
+
+ if (av8100_conf_prep(AV8100_COMMAND_EDID_SECTION_READBACK,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_EDID_SECTION_READBACK,
+ len, data, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "len:%0x\n", *len);
+
+ return 0;
+}
+
+static int cecread(struct hdmi_device *hdev, u8 *src, u8 *dest, u8 *data_len,
+ u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buff[HDMI_CEC_READ_MAXSIZE];
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ &buf_len, buff, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len > 0) {
+ *src = (buff[0] & 0xF0) >> 4;
+ *dest = buff[0] & 0x0F;
+ *data_len = buf_len - 1;
+ memcpy(data, &buff[1], buf_len - 1);
+ } else
+ *data_len = 0;
+
+ return 0;
+}
+
+/* CEC tx status can be set or read */
+static bool cec_tx_status(struct hdmi_device *hdev,
+ enum cec_tx_status_action action)
+{
+ static bool cec_tx_busy;
+
+ switch (action) {
+ case CEC_TX_SET_FREE:
+ cec_tx_busy = false;
+ dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy);
+ break;
+
+ case CEC_TX_SET_BUSY:
+ cec_tx_busy = true;
+ dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy);
+ break;
+
+ case CEC_TX_CHECK:
+ default:
+ dev_dbg(hdev->dev, "cec_tx_busy chk:%d\n", cec_tx_busy);
+ break;
+ }
+
+ return cec_tx_busy;
+}
+
+static int cecsend(struct hdmi_device *hdev, u8 src, u8 dest, u8 data_len,
+ u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ int cnt;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.cec_message_write_format.buffer[0] = ((src & 0x0F) << 4) +
+ (dest & 0x0F);
+ config.cec_message_write_format.buffer_length = data_len + 1;
+ memcpy(&config.cec_message_write_format.buffer[1], data, data_len);
+
+ if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_enable_interrupt() != 0) {
+ dev_err(hdev->dev, "av8100_ei FAIL\n");
+ return -EINVAL;
+ }
+
+ cnt = 0;
+ while ((cnt < CECTX_TRY) && cec_tx_status(hdev, CEC_TX_CHECK)) {
+ /* Wait for pending CEC to be finished */
+ msleep(CECTX_WAITTIME);
+ cnt++;
+ }
+ dev_dbg(hdev->dev, "cectxcnt:%d\n", cnt);
+
+ if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+ cec_tx_status(hdev, CEC_TX_SET_BUSY);
+
+ return 0;
+}
+
+static int infofrsend(struct hdmi_device *hdev, u8 type, u8 version, u8 crc,
+ u8 data_len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((data_len < 1) || (data_len > HDMI_INFOFRAME_MAX_SIZE))
+ return -EINVAL;
+
+ config.infoframes_format.type = type;
+ config.infoframes_format.version = version;
+ config.infoframes_format.crc = crc;
+ config.infoframes_format.length = data_len;
+ memcpy(&config.infoframes_format.data, data, data_len);
+ if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hdcpchkaesotp(struct hdmi_device *hdev, u8 *crc, u8 *progged)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[2];
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_READ;
+ memset(config.fuse_aes_key_format.key, 0, AV8100_FUSE_KEY_SIZE);
+ if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len == 2) {
+ *crc = buf[0];
+ *progged = buf[1];
+ }
+
+ return 0;
+}
+
+static int hdcpfuseaes(struct hdmi_device *hdev, u8 *key, u8 crc, u8 *result)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[2];
+
+ /* Default not OK */
+ *result = HDMI_RESULT_NOT_OK;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_WRITE;
+ memcpy(config.fuse_aes_key_format.key, key, AV8100_FUSE_KEY_SIZE);
+ if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len == 2) {
+ dev_dbg(hdev->dev, "buf[0]:%02x buf[1]:%02x\n", buf[0], buf[1]);
+ if ((crc == buf[0]) && (buf[1] == 1))
+ /* OK */
+ *result = HDMI_RESULT_OK;
+ else
+ *result = HDMI_RESULT_CRC_MISMATCH;
+ }
+
+ return 0;
+}
+
+static int hdcploadaes(struct hdmi_device *hdev, u8 block, u8 key_len, u8 *key,
+ u8 *result, u8 *crc32)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[CRC32_SIZE];
+
+ /* Default not OK */
+ *result = HDMI_RESULT_NOT_OK;
+
+ dev_dbg(hdev->dev, "%s block:%d\n", __func__, block);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.hdcp_send_key_format.key_number = block;
+ config.hdcp_send_key_format.data_len = key_len;
+ memcpy(config.hdcp_send_key_format.data, key, key_len);
+ if (av8100_conf_prep(AV8100_COMMAND_HDCP_SENDKEY, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_HDCP_SENDKEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if ((buf_len == CRC32_SIZE) && (crc32)) {
+ memcpy(crc32, buf, CRC32_SIZE);
+ dev_dbg(hdev->dev, "crc32:%02x%02x%02x%02x\n",
+ crc32[0], crc32[1], crc32[2], crc32[3]);
+ }
+
+ *result = HDMI_RESULT_OK;
+
+ return 0;
+}
+
+static int hdcpauthencr(struct hdmi_device *hdev, u8 auth_type, u8 encr_type,
+ u8 *len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ switch (auth_type) {
+ case HDMI_HDCP_AUTH_OFF:
+ default:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_REQ_OFF;
+ break;
+
+ case HDMI_HDCP_AUTH_START:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_REQ_ON;
+ break;
+
+ case HDMI_HDCP_AUTH_REV_LIST_REQ:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_REV_LIST_REQ;
+ break;
+ case HDMI_HDCP_AUTH_CONT:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_CONT;
+ break;
+ }
+
+ switch (encr_type) {
+ case HDMI_HDCP_ENCR_OESS:
+ default:
+ config.hdcp_management_format.encr_use =
+ AV8100_HDCP_ENCR_USE_OESS;
+ break;
+
+ case HDMI_HDCP_ENCR_EESS:
+ config.hdcp_management_format.encr_use =
+ AV8100_HDCP_ENCR_USE_EESS;
+ break;
+ }
+
+ if (av8100_conf_prep(AV8100_COMMAND_HDCP_MANAGEMENT,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_HDCP_MANAGEMENT,
+ len, data, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 events_read(struct hdmi_device *hdev)
+{
+ int ret;
+
+ LOCK_HDMI_EVENTS;
+ ret = hdev->events;
+ dev_dbg(hdev->dev, "%s %02x\n", __func__, hdev->events);
+ UNLOCK_HDMI_EVENTS;
+
+ return ret;
+}
+
+static int events_clear(struct hdmi_device *hdev, u8 ev)
+{
+ dev_dbg(hdev->dev, "%s %02x\n", __func__, ev);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events &= ~ev & EVENTS_MASK;
+ UNLOCK_HDMI_EVENTS;
+
+ return 0;
+}
+
+static int event_wakeup(struct hdmi_device *hdev)
+{
+ struct kobject *kobj = &hdev->dev->kobj;
+
+ dev_dbg(hdev->dev, "%s", __func__);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events |= HDMI_EVENT_WAKEUP;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+ wake_up_interruptible(&hdev->event_wq);
+
+ return 0;
+}
+
+static int audiocfg(struct hdmi_device *hdev, struct audio_cfg *cfg)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.audio_input_format.audio_input_if_format = cfg->if_format;
+ config.audio_input_format.i2s_input_nb = cfg->i2s_entries;
+ config.audio_input_format.sample_audio_freq = cfg->freq;
+ config.audio_input_format.audio_word_lg = cfg->word_length;
+ config.audio_input_format.audio_format = cfg->format;
+ config.audio_input_format.audio_if_mode = cfg->if_mode;
+ config.audio_input_format.audio_mute = cfg->mute;
+
+ if (av8100_conf_prep(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* sysfs */
+static ssize_t store_storeastext(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if ((count != HDMI_STOREASTEXT_BIN_SIZE) &&
+ (count != HDMI_STOREASTEXT_TEXT_SIZE) &&
+ (count != HDMI_STOREASTEXT_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ if ((count == HDMI_STOREASTEXT_BIN_SIZE) && (*buf == 0x1))
+ hdev->sysfs_data.store_as_hextext = true;
+ else if (((count == HDMI_STOREASTEXT_TEXT_SIZE) ||
+ (count == HDMI_STOREASTEXT_TEXT_SIZE + 1)) && (*buf == '0') &&
+ (*(buf + 1) == '1')) {
+ hdev->sysfs_data.store_as_hextext = true;
+ } else {
+ hdev->sysfs_data.store_as_hextext = false;
+ }
+
+ dev_dbg(hdev->dev, "store_as_hextext:%0d\n",
+ hdev->sysfs_data.store_as_hextext);
+
+ return count;
+}
+
+static ssize_t store_plugdeten(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct plug_detect plug_detect;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_PLUGDETEN_TEXT_SIZE) &&
+ (count != HDMI_PLUGDETEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ plug_detect.hdmi_detect_enable = htoi(buf + index);
+ index += 2;
+ plug_detect.on_time = htoi(buf + index);
+ index += 2;
+ plug_detect.hdmi_off_time = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_PLUGDETEN_BIN_SIZE)
+ return -EINVAL;
+ plug_detect.hdmi_detect_enable = *(buf + index++);
+ plug_detect.on_time = *(buf + index++);
+ plug_detect.hdmi_off_time = *(buf + index++);
+ }
+
+ if (plugdeten(hdev, &plug_detect))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_edidread(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct edid_read edid_read;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+ dev_dbg(hdev->dev, "count:%d\n", count);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_EDIDREAD_TEXT_SIZE) &&
+ (count != HDMI_EDIDREAD_TEXT_SIZE + 1))
+ return -EINVAL;
+ edid_read.address = htoi(buf + index);
+ index += 2;
+ edid_read.block_nr = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_EDIDREAD_BIN_SIZE)
+ return -EINVAL;
+ edid_read.address = *(buf + index++);
+ edid_read.block_nr = *(buf + index++);
+ }
+
+ if (edidread(hdev, &edid_read, &hdev->sysfs_data.edid_data.buf_len,
+ hdev->sysfs_data.edid_data.buf))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t show_edidread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int len;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ len = hdev->sysfs_data.edid_data.buf_len;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", len);
+ index += 2;
+ } else
+ *(buf + index++) = len;
+
+ dev_dbg(hdev->dev, "len:%02x\n", len);
+
+ cnt = 0;
+ while (cnt < len) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.edid_data.buf[cnt]);
+ index += 2;
+ } else
+ *(buf + index++) =
+ hdev->sysfs_data.edid_data.buf[cnt];
+
+ dev_dbg(hdev->dev, "%02x ",
+ hdev->sysfs_data.edid_data.buf[cnt]);
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_ceceven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_CECEVEN_TEXT_SIZE) &&
+ (count != HDMI_CECEVEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_CECEVEN_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ event_enable(hdev, enable, HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR |
+ HDMI_EVENT_CECTX);
+
+ return count;
+}
+
+static ssize_t show_cecread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct cec_rw cec_read;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (cecread(hdev, &cec_read.src, &cec_read.dest, &cec_read.length,
+ cec_read.data))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", cec_read.src);
+ index += 2;
+ snprintf(buf + index, 3, "%02x", cec_read.dest);
+ index += 2;
+ snprintf(buf + index, 3, "%02x", cec_read.length);
+ index += 2;
+ } else {
+ *(buf + index++) = cec_read.src;
+ *(buf + index++) = cec_read.dest;
+ *(buf + index++) = cec_read.length;
+ }
+
+ dev_dbg(hdev->dev, "len:%02x\n", cec_read.length);
+
+ cnt = 0;
+ while (cnt < cec_read.length) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", cec_read.data[cnt]);
+ index += 2;
+ } else
+ *(buf + index++) = cec_read.data[cnt];
+
+ dev_dbg(hdev->dev, "%02x ", cec_read.data[cnt]);
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_cecsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct cec_rw cec_w;
+ int index = 0;
+ int cnt;
+ int store_as_text;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if ((*buf == 'F') || (*buf == 'f'))
+ /* To be able to override bin format for test purpose */
+ store_as_text = 1;
+ else
+ store_as_text = hdev->sysfs_data.store_as_hextext;
+
+ if (store_as_text) {
+ if ((count < HDMI_CECSEND_TEXT_SIZE_MIN) ||
+ (count > HDMI_CECSEND_TEXT_SIZE_MAX))
+ return -EINVAL;
+
+ cec_w.src = htoi(buf + index) & 0x0F;
+ index += 2;
+ cec_w.dest = htoi(buf + index);
+ index += 2;
+ cec_w.length = htoi(buf + index);
+ index += 2;
+ if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE)
+ return -EINVAL;
+ cnt = 0;
+ while (cnt < cec_w.length) {
+ cec_w.data[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", cec_w.data[cnt]);
+ cnt++;
+ }
+ } else {
+ if ((count < HDMI_CECSEND_BIN_SIZE_MIN) ||
+ (count > HDMI_CECSEND_BIN_SIZE_MAX))
+ return -EINVAL;
+
+ cec_w.src = *(buf + index++);
+ cec_w.dest = *(buf + index++);
+ cec_w.length = *(buf + index++);
+ if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE)
+ return -EINVAL;
+ memcpy(cec_w.data, buf + index, cec_w.length);
+ }
+
+ if (cecsend(hdev, cec_w.src, cec_w.dest, cec_w.length, cec_w.data))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_infofrsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct info_fr info_fr;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count < HDMI_INFOFRSEND_TEXT_SIZE_MIN) ||
+ (count > HDMI_INFOFRSEND_TEXT_SIZE_MAX))
+ return -EINVAL;
+
+ info_fr.type = htoi(&buf[index]);
+ index += 2;
+ info_fr.ver = htoi(&buf[index]);
+ index += 2;
+ info_fr.crc = htoi(&buf[index]);
+ index += 2;
+ info_fr.length = htoi(&buf[index]);
+ index += 2;
+
+ if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE)
+ return -EINVAL;
+ cnt = 0;
+ while (cnt < info_fr.length) {
+ info_fr.data[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", info_fr.data[cnt]);
+ cnt++;
+ }
+ } else {
+ if ((count < HDMI_INFOFRSEND_BIN_SIZE_MIN) ||
+ (count > HDMI_INFOFRSEND_BIN_SIZE_MAX))
+ return -EINVAL;
+
+ info_fr.type = *(buf + index++);
+ info_fr.ver = *(buf + index++);
+ info_fr.crc = *(buf + index++);
+ info_fr.length = *(buf + index++);
+
+ if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE)
+ return -EINVAL;
+ memcpy(info_fr.data, buf + index, info_fr.length);
+ }
+
+ if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc,
+ info_fr.length, info_fr.data))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_hdcpeven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCPEVEN_TEXT_SIZE) &&
+ (count != HDMI_HDCPEVEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_HDCPEVEN_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ event_enable(hdev, enable, HDMI_EVENT_HDCP);
+
+ return count;
+}
+
+static ssize_t show_hdcpchkaesotp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 crc;
+ u8 progged;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdcpchkaesotp(hdev, &crc, &progged))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", progged);
+ index += 2;
+ } else {
+ *(buf + index++) = progged;
+ }
+
+ dev_dbg(hdev->dev, "progged:%02x\n", progged);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_fuseaes hdcp_fuseaes;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default not OK */
+ hdev->sysfs_data.fuse_result = HDMI_RESULT_NOT_OK;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCP_FUSEAES_TEXT_SIZE) &&
+ (count != HDMI_HDCP_FUSEAES_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ cnt = 0;
+ while (cnt < HDMI_HDCP_FUSEAES_KEYSIZE) {
+ hdcp_fuseaes.key[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.key[cnt]);
+ cnt++;
+ }
+ hdcp_fuseaes.crc = htoi(&buf[index]);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.crc);
+ } else {
+ if (count != HDMI_HDCP_FUSEAES_BIN_SIZE)
+ return -EINVAL;
+
+ memcpy(hdcp_fuseaes.key, buf + index,
+ HDMI_HDCP_FUSEAES_KEYSIZE);
+ index += HDMI_HDCP_FUSEAES_KEYSIZE;
+ hdcp_fuseaes.crc = *(buf + index++);
+ }
+
+ if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc,
+ &hdcp_fuseaes.result))
+ return -EINVAL;
+
+ dev_dbg(hdev->dev, "fuseresult:%02x ", hdcp_fuseaes.result);
+
+ hdev->sysfs_data.fuse_result = hdcp_fuseaes.result;
+
+ return count;
+}
+
+static ssize_t show_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", hdev->sysfs_data.fuse_result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.fuse_result;
+
+ dev_dbg(hdev->dev, "status:%02x\n", hdev->sysfs_data.fuse_result);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_loadaesone hdcp_loadaes;
+ int index = 0;
+ int block_cnt;
+ int cnt;
+ u8 crc32_rcvd[CRC32_SIZE];
+ u8 crc;
+ u8 progged;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default not OK */
+ hdev->sysfs_data.loadaes_result = HDMI_RESULT_NOT_OK;
+
+ if (hdcpchkaesotp(hdev, &crc, &progged))
+ return -EINVAL;
+
+ if (!progged) {
+ /* AES is not fused */
+ hdcp_loadaes.result = HDMI_AES_NOT_FUSED;
+ goto store_hdcploadaes_err;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCP_LOADAES_TEXT_SIZE) &&
+ (count != HDMI_HDCP_LOADAES_TEXT_SIZE + 1)) {
+ dev_err(hdev->dev, "%s", "count mismatch\n");
+ return -EINVAL;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ cnt = 0;
+ while (cnt < HDMI_HDCP_AES_KEYSIZE) {
+ hdcp_loadaes.key[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ",
+ hdcp_loadaes.key[cnt]);
+ cnt++;
+ }
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ crc32_rcvd)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err aes block",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ cnt = HDMI_HDCP_AES_KSVZEROESSIZE;
+ while (cnt < HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE) {
+ hdcp_loadaes.key[cnt] =
+ htoi(&buf[index]);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_loadaes.key[cnt]);
+ cnt++;
+ }
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ NULL)) {
+ dev_err(hdev->dev,
+ "%s %d\n", "hdcploadaes err in ksv\n",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ /* CRC32 */
+ for (cnt = 0; cnt < CRC32_SIZE; cnt++) {
+ hdcp_loadaes.crc32[cnt] = htoi(buf + index);
+ index += 2;
+ }
+
+ if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaes.crc32[0],
+ hdcp_loadaes.crc32[1],
+ hdcp_loadaes.crc32[2],
+ hdcp_loadaes.crc32[3]);
+ hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH;
+ goto store_hdcploadaes_err;
+ }
+ } else {
+ if (count != HDMI_HDCP_LOADAES_BIN_SIZE) {
+ dev_err(hdev->dev, "%s", "count mismatch\n");
+ return -EINVAL;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ memcpy(hdcp_loadaes.key, buf + index,
+ HDMI_HDCP_AES_KEYSIZE);
+ index += HDMI_HDCP_AES_KEYSIZE;
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ crc32_rcvd)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err aes block",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ memcpy(hdcp_loadaes.key + HDMI_HDCP_AES_KSVZEROESSIZE,
+ buf + index,
+ HDMI_HDCP_AES_KSVSIZE);
+ index += HDMI_HDCP_AES_KSVSIZE;
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ NULL)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err in ksv\n",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ memcpy(hdcp_loadaes.crc32, buf + index, CRC32_SIZE);
+ index += CRC32_SIZE;
+
+ /* CRC32 */
+ if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaes.crc32[0],
+ hdcp_loadaes.crc32[1],
+ hdcp_loadaes.crc32[2],
+ hdcp_loadaes.crc32[3]);
+ hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH;
+ }
+ }
+
+store_hdcploadaes_err:
+ hdev->sysfs_data.loadaes_result = hdcp_loadaes.result;
+ return count;
+}
+
+static ssize_t show_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.loadaes_result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.loadaes_result;
+
+ dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.loadaes_result);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_authencr hdcp_authencr;
+ int index = 0;
+ u8 crc;
+ u8 progged;
+ int result = HDMI_RESULT_NOT_OK;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default */
+ hdev->sysfs_data.authencr.buf_len = 0;
+
+ if (hdcpchkaesotp(hdev, &crc, &progged)) {
+ result = HDMI_AES_NOT_FUSED;
+ goto store_hdcpauthencr_end;
+ }
+
+ if (!progged) {
+ /* AES is not fused */
+ result = HDMI_AES_NOT_FUSED;
+ goto store_hdcpauthencr_end;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCPAUTHENCR_TEXT_SIZE) &&
+ (count != HDMI_HDCPAUTHENCR_TEXT_SIZE + 1))
+ goto store_hdcpauthencr_end;
+
+ hdcp_authencr.auth_type = htoi(buf + index);
+ index += 2;
+ hdcp_authencr.encr_type = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_HDCPAUTHENCR_BIN_SIZE)
+ goto store_hdcpauthencr_end;
+
+ hdcp_authencr.auth_type = *(buf + index++);
+ hdcp_authencr.encr_type = *(buf + index++);
+ }
+
+ if (hdcpauthencr(hdev, hdcp_authencr.auth_type, hdcp_authencr.encr_type,
+ &hdev->sysfs_data.authencr.buf_len,
+ hdev->sysfs_data.authencr.buf))
+ goto store_hdcpauthencr_end;
+
+ result = HDMI_RESULT_OK;
+
+store_hdcpauthencr_end:
+ hdev->sysfs_data.authencr.result = result;
+ return count;
+}
+
+static ssize_t show_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int len;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* result */
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.authencr.result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.authencr.result;
+
+ dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.authencr.result);
+
+ /* resp_size */
+ len = hdev->sysfs_data.authencr.buf_len;
+ if (len > AUTH_BUF_LEN)
+ len = AUTH_BUF_LEN;
+ dev_dbg(hdev->dev, "resp_size:%d\n", len);
+
+ /* resp */
+ cnt = 0;
+ while (cnt < len) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.authencr.buf[cnt]);
+ index += 2;
+
+ dev_dbg(hdev->dev, "%02x ",
+ hdev->sysfs_data.authencr.buf[cnt]);
+
+ } else
+ *(buf + index++) = hdev->sysfs_data.authencr.buf[cnt];
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t show_hdcpstateget(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 hdcp_state;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL, &hdcp_state))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", hdcp_state);
+ index += 2;
+ } else
+ *(buf + index++) = hdcp_state;
+
+ dev_dbg(hdev->dev, "status:%02x\n", hdcp_state);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t show_evread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ u8 ev;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ ev = events_read(hdev);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", ev);
+ index += 2;
+ } else
+ *(buf + index++) = ev;
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ /* Events are read: clear events */
+ events_clear(hdev, EVENTS_MASK);
+
+ return index;
+}
+
+static ssize_t store_evclr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 ev;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_EVCLR_TEXT_SIZE) &&
+ (count != HDMI_EVCLR_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ ev = htoi(&buf[index]);
+ index += 2;
+ } else {
+ if (count != HDMI_EVCLR_BIN_SIZE)
+ return -EINVAL;
+
+ ev = *(buf + index++);
+ }
+
+ events_clear(hdev, ev);
+
+ return count;
+}
+
+static ssize_t store_audiocfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct audio_cfg audio_cfg;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_AUDIOCFG_TEXT_SIZE) &&
+ (count != HDMI_AUDIOCFG_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ audio_cfg.if_format = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.i2s_entries = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.freq = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.word_length = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.format = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.if_mode = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.mute = htoi(&buf[index]);
+ index += 2;
+ } else {
+ if (count != HDMI_AUDIOCFG_BIN_SIZE)
+ return -EINVAL;
+
+ audio_cfg.if_format = *(buf + index++);
+ audio_cfg.i2s_entries = *(buf + index++);
+ audio_cfg.freq = *(buf + index++);
+ audio_cfg.word_length = *(buf + index++);
+ audio_cfg.format = *(buf + index++);
+ audio_cfg.if_mode = *(buf + index++);
+ audio_cfg.mute = *(buf + index++);
+ }
+
+ audiocfg(hdev, &audio_cfg);
+
+ return count;
+}
+
+static ssize_t show_plugstatus(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ struct av8100_status av8100_status;
+ u8 plstat;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ av8100_status = av8100_status_get();
+ plstat = av8100_status.av8100_plugin_status == AV8100_HDMI_PLUGIN;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", plstat);
+ index += 2;
+ } else
+ *(buf + index++) = plstat;
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_poweronoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_POWERONOFF_TEXT_SIZE) &&
+ (count != HDMI_POWERONOFF_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_POWERONOFF_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ if (enable == 0) {
+ if (av8100_powerdown() != 0) {
+ dev_err(hdev->dev, "av8100_powerdown FAIL\n");
+ return -EINVAL;
+ }
+ } else {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t show_poweronoff(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ struct av8100_status status;
+ u8 power_state;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_SCAN)
+ power_state = 0;
+ else
+ power_state = 1;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", power_state);
+ index += 3;
+ } else {
+ *(buf + index++) = power_state;
+ }
+
+ return index;
+}
+
+static ssize_t store_evwakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ event_wakeup(hdev);
+
+ return count;
+}
+
+static int hdmi_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int hdmi_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/* ioctl */
+static long hdmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ u8 value = 0;
+ struct hdmi_register reg;
+ struct av8100_status status;
+ u8 aes_status;
+ struct hdmi_device *hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+
+ switch (cmd) {
+ case IOC_PLUG_DETECT_ENABLE:
+ {
+ struct plug_detect plug_detect;
+
+ if (copy_from_user(&plug_detect, (void *)arg,
+ sizeof(struct plug_detect)))
+ return -EINVAL;
+
+ if (plugdeten(hdev, &plug_detect))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_EDID_READ:
+ {
+ struct edid_read edid_read;
+
+ if (copy_from_user(&edid_read, (void *)arg,
+ sizeof(struct edid_read)))
+ return -EINVAL;
+
+ if (edidread(hdev, &edid_read, &edid_read.data_length,
+ edid_read.data))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&edid_read,
+ sizeof(struct edid_read))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_CEC_EVENT_ENABLE:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ event_enable(hdev, value != 0,
+ HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR |
+ HDMI_EVENT_CECTX);
+ break;
+
+ case IOC_CEC_READ:
+ {
+ struct cec_rw cec_read;
+
+ if (cecread(hdev, &cec_read.src, &cec_read.dest,
+ &cec_read.length, cec_read.data))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&cec_read,
+ sizeof(struct cec_rw))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_CEC_SEND:
+ {
+ struct cec_rw cec_send;
+
+ if (copy_from_user(&cec_send, (void *)arg,
+ sizeof(struct cec_rw)))
+ return -EINVAL;
+
+ if (cecsend(hdev, cec_send.src, cec_send.dest, cec_send.length,
+ cec_send.data))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_INFOFRAME_SEND:
+ {
+ struct info_fr info_fr;
+
+ if (copy_from_user(&info_fr, (void *)arg,
+ sizeof(struct info_fr)))
+ return -EINVAL;
+
+ if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc,
+ info_fr.length, info_fr.data))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_EVENT_ENABLE:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ event_enable(hdev, value != 0, HDMI_EVENT_HDCP);
+ break;
+
+ case IOC_HDCP_CHKAESOTP:
+ if (hdcpchkaesotp(hdev, &value, &aes_status))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&aes_status,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_FUSEAES:
+ {
+ struct hdcp_fuseaes hdcp_fuseaes;
+
+ if (copy_from_user(&hdcp_fuseaes, (void *)arg,
+ sizeof(struct hdcp_fuseaes)))
+ return -EINVAL;
+
+ if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc,
+ &hdcp_fuseaes.result))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&hdcp_fuseaes,
+ sizeof(struct hdcp_fuseaes))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDCP_LOADAES:
+ {
+ int block_cnt;
+ struct hdcp_loadaesone hdcp_loadaesone;
+ struct hdcp_loadaesall hdcp_loadaesall;
+
+ if (copy_from_user(&hdcp_loadaesall, (void *)arg,
+ sizeof(struct hdcp_loadaesall)))
+ return -EINVAL;
+
+ if (hdcpchkaesotp(hdev, &value, &aes_status))
+ return -EINVAL;
+
+ if (!aes_status) {
+ /* AES is not fused */
+ hdcp_loadaesone.result = HDMI_AES_NOT_FUSED;
+ goto ioc_hdcploadaes_err;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ memcpy(hdcp_loadaesone.key, hdcp_loadaesall.key +
+ block_cnt * HDMI_HDCP_AES_KEYSIZE,
+ HDMI_HDCP_AES_KEYSIZE);
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaesone.key,
+ &hdcp_loadaesone.result,
+ hdcp_loadaesone.crc32))
+ return -EINVAL;
+
+ if (hdcp_loadaesone.result)
+ return -EINVAL;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaesone.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ memcpy(hdcp_loadaesone.key + HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaesall.ksv, HDMI_HDCP_AES_KSVSIZE);
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaesone.key,
+ &hdcp_loadaesone.result,
+ NULL))
+ return -EINVAL;
+
+ if (hdcp_loadaesone.result)
+ return -EINVAL;
+
+ /* CRC32 */
+ if (memcmp(hdcp_loadaesall.crc32, hdcp_loadaesone.crc32,
+ CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaesall.crc32[0],
+ hdcp_loadaesall.crc32[1],
+ hdcp_loadaesall.crc32[2],
+ hdcp_loadaesall.crc32[3]);
+ hdcp_loadaesone.result = HDMI_RESULT_CRC_MISMATCH;
+ goto ioc_hdcploadaes_err;
+ }
+
+ioc_hdcploadaes_err:
+ hdcp_loadaesall.result = hdcp_loadaesone.result;
+
+ if (copy_to_user((void *)arg, (void *)&hdcp_loadaesall,
+ sizeof(struct hdcp_loadaesall))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDCP_AUTHENCR_REQ:
+ {
+ struct hdcp_authencr hdcp_authencr;
+ int result = HDMI_RESULT_NOT_OK;
+
+ u8 buf[AUTH_BUF_LEN];
+
+ if (copy_from_user(&hdcp_authencr, (void *)arg,
+ sizeof(struct hdcp_authencr)))
+ return -EINVAL;
+
+ /* Default not OK */
+ hdcp_authencr.resp_size = 0;
+
+ if (hdcpchkaesotp(hdev, &value, &aes_status)) {
+ result = HDMI_AES_NOT_FUSED;
+ goto hdcp_authencr_end;
+ }
+
+ if (!aes_status) {
+ /* AES is not fused */
+ result = HDMI_AES_NOT_FUSED;
+ goto hdcp_authencr_end;
+ }
+
+ if (hdcpauthencr(hdev, hdcp_authencr.auth_type,
+ hdcp_authencr.encr_type,
+ &value,
+ buf)) {
+ result = HDMI_RESULT_NOT_OK;
+ goto hdcp_authencr_end;
+ }
+
+ if (value > AUTH_BUF_LEN)
+ value = AUTH_BUF_LEN;
+
+ result = HDMI_RESULT_OK;
+ hdcp_authencr.resp_size = value;
+ memcpy(hdcp_authencr.resp, buf, value);
+
+hdcp_authencr_end:
+ hdcp_authencr.result = result;
+ if (copy_to_user((void *)arg, (void *)&hdcp_authencr,
+ sizeof(struct hdcp_authencr)))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_STATE_GET:
+ if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL,
+ &value))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_EVENTS_READ:
+ value = events_read(hdev);
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+
+ /* Events are read: clear events */
+ events_clear(hdev, EVENTS_MASK);
+ break;
+
+ case IOC_EVENTS_CLEAR:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ events_clear(hdev, value);
+ break;
+
+ case IOC_AUDIO_CFG:
+ {
+ struct audio_cfg audio_cfg;
+
+ if (copy_from_user(&audio_cfg, (void *)arg,
+ sizeof(struct audio_cfg)))
+ return -EINVAL;
+
+ audiocfg(hdev, &audio_cfg);
+ }
+ break;
+
+ case IOC_PLUG_STATUS:
+ status = av8100_status_get();
+ value = status.av8100_plugin_status == AV8100_HDMI_PLUGIN;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_POWERONOFF:
+ /* Get desired power state on or off */
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ if (value == 0) {
+ if (av8100_powerdown() != 0) {
+ dev_err(hdev->dev, "av8100_powerdown FAIL\n");
+ return -EINVAL;
+ }
+ } else {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup FAIL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_EVENT_WAKEUP:
+ /* Trigger event */
+ event_wakeup(hdev);
+ break;
+
+ case IOC_POWERSTATE:
+ status = av8100_status_get();
+ value = status.av8100_state >= AV8100_OPMODE_SCAN;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ /* Internal */
+ case IOC_HDMI_ENABLE_INTERRUPTS:
+ av8100_disable_interrupt();
+ if (av8100_enable_interrupt() != 0) {
+ dev_err(hdev->dev, "av8100_ei FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_DOWNLOAD_FW:
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_ONOFF:
+ {
+ union av8100_configuration config;
+
+ /* Get desired HDMI mode on or off */
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EFAULT;
+
+ if (av8100_conf_get(AV8100_COMMAND_HDMI, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_get FAIL\n");
+ return -EINVAL;
+ }
+ if (value == 0)
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF;
+ else
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+
+ if (av8100_conf_prep(AV8100_COMMAND_HDMI, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+ if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDMI_REGISTER_WRITE:
+ if (copy_from_user(&reg, (void *)arg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+
+ if (av8100_reg_w(reg.offset, reg.value) != 0) {
+ dev_err(hdev->dev, "hdmi_register_write FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_REGISTER_READ:
+ if (copy_from_user(&reg, (void *)arg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+
+ if (av8100_reg_r(reg.offset, &reg.value) != 0) {
+ dev_err(hdev->dev, "hdmi_register_write FAIL\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void *)arg, (void *)&reg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_STATUS_GET:
+ status = av8100_status_get();
+
+ if (copy_to_user((void *)arg, (void *)&status,
+ sizeof(struct av8100_status))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_CONFIGURATION_WRITE:
+ {
+ struct hdmi_command_register command_reg;
+
+ if (copy_from_user(&command_reg, (void *)arg,
+ sizeof(struct hdmi_command_register)) != 0) {
+ dev_err(hdev->dev, "IOC_HDMI_CONFIGURATION_WRITE "
+ "fail 1\n");
+ command_reg.return_status = EINVAL;
+ } else {
+ command_reg.return_status = 0;
+ if (av8100_conf_w_raw(command_reg.cmd_id,
+ command_reg.buf_len,
+ command_reg.buf,
+ &(command_reg.buf_len),
+ command_reg.buf) != 0) {
+ dev_err(hdev->dev,
+ "IOC_HDMI_CONFIGURATION_WRITE "
+ "fail 2\n");
+ command_reg.return_status = EINVAL;
+ }
+ }
+
+ if (copy_to_user((void *)arg, (void *)&command_reg,
+ sizeof(struct hdmi_command_register)) != 0) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int
+hdmi_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct hdmi_device *hdev;
+
+ hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+ if (!hdev)
+ return 0;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ poll_wait(filp, &hdev->event_wq , wait);
+
+ LOCK_HDMI_EVENTS;
+ if (hdev->events_received == true) {
+ hdev->events_received = false;
+ mask = POLLIN | POLLRDNORM;
+ }
+ UNLOCK_HDMI_EVENTS;
+
+ return mask;
+}
+
+static const struct file_operations hdmi_fops = {
+ .owner = THIS_MODULE,
+ .open = hdmi_open,
+ .release = hdmi_release,
+ .unlocked_ioctl = hdmi_ioctl,
+ .poll = hdmi_poll
+};
+
+/* Event callback function called by hw driver */
+void hdmi_event(enum av8100_hdmi_event ev)
+{
+ int events_old;
+ int events_new;
+ struct hdmi_device *hdev;
+ struct kobject *kobj;
+
+ hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+ if (!hdev)
+ return;
+
+ dev_dbg(hdev->dev, "hdmi_event %02x\n", ev);
+
+ kobj = &(hdev->dev->kobj);
+
+ LOCK_HDMI_EVENTS;
+
+ events_old = hdev->events;
+
+ /* Set event */
+ switch (ev) {
+ case AV8100_HDMI_EVENT_HDMI_PLUGIN:
+ hdev->events &= ~HDMI_EVENT_HDMI_PLUGOUT;
+ hdev->events |= HDMI_EVENT_HDMI_PLUGIN;
+ break;
+
+ case AV8100_HDMI_EVENT_HDMI_PLUGOUT:
+ hdev->events &= ~HDMI_EVENT_HDMI_PLUGIN;
+ hdev->events |= HDMI_EVENT_HDMI_PLUGOUT;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ case AV8100_HDMI_EVENT_CEC:
+ hdev->events |= HDMI_EVENT_CEC;
+ break;
+
+ case AV8100_HDMI_EVENT_HDCP:
+ hdev->events |= HDMI_EVENT_HDCP;
+ break;
+
+ case AV8100_HDMI_EVENT_CECTXERR:
+ hdev->events |= HDMI_EVENT_CECTXERR;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ case AV8100_HDMI_EVENT_CECTX:
+ hdev->events |= HDMI_EVENT_CECTX;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ default:
+ break;
+ }
+
+ events_new = hdev->events_mask & hdev->events;
+
+ UNLOCK_HDMI_EVENTS;
+
+ dev_dbg(hdev->dev, "hdmi events:%02x, events_old:%02x mask:%02x\n",
+ events_new, events_old, hdev->events_mask);
+
+ if (events_new != events_old) {
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ wake_up_interruptible(&hdev->event_wq);
+ }
+}
+EXPORT_SYMBOL(hdmi_event);
+
+int hdmi_device_register(struct hdmi_device *hdev)
+{
+ hdev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ hdev->miscdev.name = "hdmi";
+ hdev->miscdev.fops = &hdmi_fops;
+
+ if (misc_register(&hdev->miscdev)) {
+ pr_err("hdmi misc_register failed\n");
+ return -EFAULT;
+ }
+
+ hdev->dev = hdev->miscdev.this_device;
+
+ return 0;
+}
+
+int __init hdmi_init(void)
+{
+ struct hdmi_device *hdev;
+ int i;
+ int ret;
+
+ /* Allocate device data */
+ hdev = kzalloc(sizeof(struct hdmi_device), GFP_KERNEL);
+ if (!hdev) {
+ pr_err("%s: Alloc failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Add to list */
+ list_add_tail(&hdev->list, &hdmi_device_list);
+
+ if (hdmi_device_register(hdev)) {
+ pr_err("%s: Alloc failure\n", __func__);
+ return -EFAULT;
+ }
+
+ hdev->devnr = HDMI_DEVNR_DEFAULT;
+
+ /* Default sysfs file format is hextext */
+ hdev->sysfs_data.store_as_hextext = true;
+
+ init_waitqueue_head(&hdev->event_wq);
+
+ /* Create sysfs attrs */
+ for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++) {
+ ret = device_create_file(hdev->dev, &hdmi_sysfs_attrs[i]);
+ if (ret)
+ dev_err(hdev->dev,
+ "Unable to create sysfs attr %s (%d)\n",
+ hdmi_sysfs_attrs[i].attr.name, ret);
+ }
+
+ /* Register event callback */
+ av8100_hdmi_event_cb_set(hdmi_event);
+
+ return 0;
+}
+late_initcall(hdmi_init);
+
+void hdmi_exit(void)
+{
+ struct hdmi_device *hdev = NULL;
+ int i;
+
+ if (list_empty(&hdmi_device_list))
+ return;
+ else
+ hdev = list_entry(hdmi_device_list.next,
+ struct hdmi_device, list);
+
+ /* Deregister event callback */
+ av8100_hdmi_event_cb_set(NULL);
+
+ /* Remove sysfs attrs */
+ for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++)
+ device_remove_file(hdev->dev, &hdmi_sysfs_attrs[i]);
+
+ misc_deregister(&hdev->miscdev);
+
+ /* Remove from list */
+ list_del(&hdev->list);
+
+ /* Free device data */
+ kfree(hdev);
+}
diff --git a/drivers/video/av8100/hdmi_loc.h b/drivers/video/av8100/hdmi_loc.h
new file mode 100644
index 00000000000..20314910db9
--- /dev/null
+++ b/drivers/video/av8100/hdmi_loc.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __HDMI_LOC__H__
+#define __HDMI_LOC__H__
+
+#define EDID_BUF_LEN 128
+#define COMMAND_BUF_LEN 128
+#define AES_KEY_SIZE 16
+#define CRC32_SIZE 4
+#define AUTH_BUF_LEN 126
+#define CECTX_TRY 20
+#define CECTX_WAITTIME 25
+
+struct edid_data {
+ u8 buf_len;
+ u8 buf[EDID_BUF_LEN];
+};
+
+struct authencr {
+ int result;
+ u8 buf_len;
+ u8 buf[AUTH_BUF_LEN];
+};
+
+struct hdmi_register {
+ unsigned char value;
+ unsigned char offset;
+};
+
+struct hdcp_loadaesone {
+ u8 key[AES_KEY_SIZE];
+ u8 result;
+ u8 crc32[CRC32_SIZE];
+};
+
+struct hdmi_sysfs_data {
+ bool store_as_hextext;
+ struct plug_detect plug_detect;
+ bool enable_cec_event;
+ struct edid_data edid_data;
+ struct cec_rw cec_read;
+ bool fuse_result;
+ int loadaes_result;
+ struct authencr authencr;
+};
+
+struct hdmi_command_register {
+ unsigned char cmd_id; /* input */
+ unsigned char buf_len; /* input, output */
+ unsigned char buf[COMMAND_BUF_LEN]; /* input, output */
+ unsigned char return_status; /* output */
+};
+
+enum cec_tx_status_action {
+ CEC_TX_SET_FREE,
+ CEC_TX_SET_BUSY,
+ CEC_TX_CHECK
+};
+
+/* Internal */
+#define IOC_HDMI_ENABLE_INTERRUPTS _IOWR(HDMI_IOC_MAGIC, 32, int)
+#define IOC_HDMI_DOWNLOAD_FW _IOWR(HDMI_IOC_MAGIC, 33, int)
+#define IOC_HDMI_ONOFF _IOWR(HDMI_IOC_MAGIC, 34, int)
+#define IOC_HDMI_REGISTER_WRITE _IOWR(HDMI_IOC_MAGIC, 35, int)
+#define IOC_HDMI_REGISTER_READ _IOWR(HDMI_IOC_MAGIC, 36, int)
+#define IOC_HDMI_STATUS_GET _IOWR(HDMI_IOC_MAGIC, 37, int)
+#define IOC_HDMI_CONFIGURATION_WRITE _IOWR(HDMI_IOC_MAGIC, 38, int)
+
+#endif /* __HDMI_LOC__H__ */
diff --git a/drivers/video/b2r2/Kconfig b/drivers/video/b2r2/Kconfig
new file mode 100644
index 00000000000..8cc81876de7
--- /dev/null
+++ b/drivers/video/b2r2/Kconfig
@@ -0,0 +1,134 @@
+config FB_B2R2
+ tristate "B2R2 engine support"
+ default n
+ help
+ B2R2 engine does various bit-blitting operations,post-processor operations
+ and various compositions.
+
+config B2R2_PLUG_CONF
+ bool "B2R2 bus plug configuration"
+ depends on FB_B2R2
+ default n
+ help
+ Configures how B2R2 access the memory bus. Enabling this will increase
+ the performance of B2R2 at the cost of using the bus more heavily.
+
+ If this is set to 'n', the hardware defaults will be used.
+
+choice
+ prompt "Opcode size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_OPSIZE_64
+
+ config B2R2_OPSIZE_8
+ bool "8 bytes"
+ config B2R2_OPSIZE_16
+ bool "16 bytes"
+ config B2R2_OPSIZE_32
+ bool "32 bytes"
+ config B2R2_OPSIZE_64
+ bool "64 bytes"
+
+endchoice
+
+choice
+ prompt "Chunk size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_CHSIZE_128
+
+ config B2R2_CHSIZE_1
+ bool "1 op"
+ config B2R2_CHSIZE_2
+ bool "2 ops"
+ config B2R2_CHSIZE_4
+ bool "4 ops"
+ config B2R2_CHSIZE_8
+ bool "8 ops"
+ config B2R2_CHSIZE_16
+ bool "16 ops"
+ config B2R2_CHSIZE_32
+ bool "32 ops"
+ config B2R2_CHSIZE_64
+ bool "64 ops"
+ config B2R2_CHSIZE_128
+ bool "128 ops"
+endchoice
+
+choice
+ prompt "Message size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_MGSIZE_128
+
+ config B2R2_MGSIZE_1
+ bool "1 chunk"
+ config B2R2_MGSIZE_2
+ bool "2 chunks"
+ config B2R2_MGSIZE_4
+ bool "4 chunks"
+ config B2R2_MGSIZE_8
+ bool "8 s"
+ config B2R2_MGSIZE_16
+ bool "16 chunks"
+ config B2R2_MGSIZE_32
+ bool "32 chunks"
+ config B2R2_MGSIZE_64
+ bool "64 chunks"
+ config B2R2_MGSIZE_128
+ bool "128 chunks"
+endchoice
+
+choice
+ prompt "Page size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_PGSIZE_256
+
+ config B2R2_PGSIZE_64
+ bool "64 bytes"
+ config B2R2_PGSIZE_128
+ bool "128 bytes"
+ config B2R2_PGSIZE_256
+ bool "256 bytes"
+endchoice
+
+config B2R2_DEBUG
+ bool "B2R2 debugging"
+ default n
+ depends on FB_B2R2
+ help
+ Enable debugging features for the B2R2 driver.
+
+config B2R2_PROFILER
+ tristate "B2R2 profiler"
+ default n
+ depends on FB_B2R2
+ help
+ Enables the profiler for the B2R2 driver.
+
+ It is recommended to build this as a module, since the configuration
+ of filters etc. is done at load time.
+
+config B2R2_GENERIC
+ bool "B2R2 generic path"
+ default y
+ depends on FB_B2R2
+ help
+ Enables support for the generic path in the B2R2 driver. This path should
+ be used when there is no optimized implementation for a request.
+
+choice
+ prompt "Generic usage mode"
+ depends on B2R2_GENERIC
+ default B2R2_GENERIC_FALLBACK
+
+ config B2R2_GENERIC_FALLBACK
+ bool "Fallback"
+ help
+ The optimized path will be used for all supported operations, and the
+ generic path will be used as a fallback for the ones not implemented.
+
+ config B2R2_GENERIC_ONLY
+ bool "Always"
+ help
+ The generic path will be used for all operations.
+
+endchoice
diff --git a/drivers/video/b2r2/Makefile b/drivers/video/b2r2/Makefile
new file mode 100644
index 00000000000..0150ad6f761
--- /dev/null
+++ b/drivers/video/b2r2/Makefile
@@ -0,0 +1,15 @@
+# Make file for compiling and loadable module B2R2
+
+obj-$(CONFIG_FB_B2R2) += b2r2.o
+
+b2r2-objs = b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o b2r2_filters.o b2r2_utils.o b2r2_input_validation.o
+
+ifdef CONFIG_B2R2_DEBUG
+b2r2-objs += b2r2_debug.o
+endif
+
+ifeq ($(CONFIG_FB_B2R2),m)
+obj-y += b2r2_kernel_if.o
+endif
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler/
diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c
new file mode 100644
index 00000000000..f79bfaee9ab
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_blt_main.c
@@ -0,0 +1,3363 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 Blitter module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/hwmem.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_node_split.h"
+#include "b2r2_generic.h"
+#include "b2r2_mem_alloc.h"
+#include "b2r2_profiler_socket.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_core.h"
+#include "b2r2_filters.h"
+
+#define B2R2_HEAP_SIZE (4 * PAGE_SIZE)
+#define MAX_TMP_BUF_SIZE (128 * PAGE_SIZE)
+
+/*
+ * TODO:
+ * Implementation of query cap
+ * Support for user space virtual pointer to physically consecutive memory
+ * Support for user space virtual pointer to physically scattered memory
+ * Callback reads lagging behind in blt_api_stress app
+ * Store smaller items in the report list instead of the whole request
+ * Support read of many report records at once.
+ */
+
+/**
+ * b2r2_blt_dev - Our device(s), /dev/b2r2_blt
+ */
+static struct b2r2_control *b2r2_ctl[B2R2_MAX_NBR_DEVICES];
+
+/* Debug file system support */
+#ifdef CONFIG_DEBUG_FS
+static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size);
+#endif
+
+/* Local functions */
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat);
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat);
+static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
+ int request_id);
+static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_query_cap *query_cap);
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+static int b2r2_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request);
+
+static void job_callback(struct b2r2_core_job *job);
+static void job_release(struct b2r2_core_job *job);
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources(struct b2r2_core_job *job, bool atomic);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC
+static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request);
+
+static void job_callback_gen(struct b2r2_core_job *job);
+static void job_release_gen(struct b2r2_core_job *job);
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void tile_job_callback_gen(struct b2r2_core_job *job);
+static void tile_job_release_gen(struct b2r2_core_job *job);
+#endif
+
+
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst, struct b2r2_resolved_buf *resolved);
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf, struct b2r2_resolved_buf *resolved);
+static void sync_buf(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved, bool is_dst,
+ struct b2r2_blt_rect *rect);
+static bool is_report_list_empty(struct b2r2_blt_instance *instance);
+static bool is_synching(struct b2r2_blt_instance *instance);
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect);
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region);
+static int resolve_hwmem(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used, bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf);
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf);
+
+/**
+ * struct sync_args - Data for clean/flush
+ *
+ * @start: Virtual start address
+ * @end: Virtual end address
+ */
+struct sync_args {
+ unsigned long start;
+ unsigned long end;
+};
+/**
+ * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the
+ * current CPU
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void flush_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_flush_range((void *)sa->start, (void *)sa->end);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * inv_l1_cache_range_all_cpus() - Cleans and invalidates L1 cache on all CPU:s
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void flush_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(flush_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * clean_l1_cache_range_curr_cpu() - Cleans L1 cache on current CPU
+ *
+ * Ensures that data is written out from the CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void clean_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_map_area((void *)sa->start,
+ (void *)sa->end - (void *)sa->start,
+ DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * clean_l1_cache_range_all_cpus() - Cleans L1 cache on all CPU:s
+ *
+ * Ensures that data is written out from all CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void clean_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(clean_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * b2r2_blt_open - Implements file open on the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * A B2R2 BLT instance is created and stored in the file structure.
+ */
+static int b2r2_blt_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_control *cont = filp->private_data;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_open);
+
+ /* Allocate and initialize the instance */
+ instance = (struct b2r2_blt_instance *)
+ kmalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ b2r2_log_err(cont->dev, "%s: Failed to alloc\n", __func__);
+ goto instance_alloc_failed;
+ }
+ memset(instance, 0, sizeof(*instance));
+ INIT_LIST_HEAD(&instance->report_list);
+ mutex_init(&instance->lock);
+ init_waitqueue_head(&instance->report_list_waitq);
+ init_waitqueue_head(&instance->synch_done_waitq);
+ instance->control = cont;
+
+ /*
+ * Remember the instance so that we can retrieve it in
+ * other functions
+ */
+ filp->private_data = instance;
+ goto out;
+
+instance_alloc_failed:
+out:
+ dec_stat(cont, &cont->stat_n_in_open);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_release - Implements last close on an instance of
+ * the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * All active jobs are finished or cancelled and allocated data
+ * is released.
+ */
+static int b2r2_blt_release(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_release);
+
+ /* Finish all outstanding requests */
+ ret = b2r2_blt_synch(instance, 0);
+ if (ret < 0)
+ b2r2_log_warn(cont->dev, "%s: b2r2_blt_sync failed with %d\n",
+ __func__, ret);
+
+ /* Now cancel any remaining outstanding request */
+ if (instance->no_of_active_requests) {
+ struct b2r2_core_job *job;
+
+ b2r2_log_warn(cont->dev, "%s: %d active requests\n", __func__,
+ instance->no_of_active_requests);
+
+ /* Find and cancel all jobs belonging to us */
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
+ while (job) {
+ b2r2_core_job_cancel(job);
+ /* Matches addref in b2r2_core_job_find... */
+ b2r2_core_job_release(job, __func__);
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
+ }
+
+ b2r2_log_warn(cont->dev, "%s: %d active requests after "
+ "cancel\n", __func__, instance->no_of_active_requests);
+ }
+
+ /* Release jobs in report list */
+ mutex_lock(&instance->lock);
+ while (!list_empty(&instance->report_list)) {
+ struct b2r2_blt_request *request = list_first_entry(
+ &instance->report_list,
+ struct b2r2_blt_request,
+ list);
+ list_del_init(&request->list);
+ mutex_unlock(&instance->lock);
+ /*
+ * This release matches the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ mutex_lock(&instance->lock);
+ }
+ mutex_unlock(&instance->lock);
+
+ /* Release our instance */
+ kfree(instance);
+
+ dec_stat(cont, &cont->stat_n_in_release);
+
+ return 0;
+}
+
+/**
+ * b2r2_blt_ioctl - This routine implements b2r2_blt ioctl interface
+ *
+ * @file: file pointer.
+ * @cmd :ioctl command.
+ * @arg: input argument for ioctl.
+ *
+ * Returns 0 if OK else negative error code
+ */
+static long b2r2_blt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) file->private_data;
+ struct b2r2_control *cont = instance->control;
+
+ /** Process actual ioctl */
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Get the instance from the file structure */
+ switch (cmd) {
+ case B2R2_BLT_IOC: {
+ /* This is the "blit" command */
+
+ /* arg is user pointer to struct b2r2_blt_request */
+ struct b2r2_blt_request *request =
+ kmalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ b2r2_log_err(cont->dev, "%s: Failed to alloc mem\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Initialize the structure */
+ memset(request, 0, sizeof(*request));
+ INIT_LIST_HEAD(&request->list);
+ request->instance = instance;
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&request->user_req, (void *)arg,
+ sizeof(request->user_req))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user failed\n",
+ __func__);
+ kfree(request);
+ return -EFAULT;
+ }
+
+ if (!b2r2_validate_user_req(cont, &request->user_req)) {
+ kfree(request);
+ return -EINVAL;
+ }
+
+ request->profile = is_profiler_registered_approx();
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((request->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ request->clut = dma_alloc_coherent(cont->dev,
+ CLUT_SIZE, &(request->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request->clut == NULL) {
+ b2r2_log_err(cont->dev, "%s CLUT allocation "
+ "failed.\n", __func__);
+ kfree(request);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request->clut,
+ request->user_req.clut, CLUT_SIZE)) {
+ b2r2_log_err(cont->dev, "%s: CLUT "
+ "copy_from_user failed\n",
+ __func__);
+ dma_free_coherent(cont->dev, CLUT_SIZE,
+ request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ kfree(request);
+ return -EFAULT;
+ }
+ }
+
+ /* Perform the blit */
+
+#ifdef CONFIG_B2R2_GENERIC_ONLY
+ /* Use the generic path for all operations */
+ ret = b2r2_generic_blt(instance, request);
+#else
+ /* Use the optimized path */
+ ret = b2r2_blt(instance, request);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC_FALLBACK
+ /* Fall back to generic path if operation was not supported */
+ if (ret == -ENOSYS) {
+ struct b2r2_blt_request *request_gen;
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* No support for BG BLEND in generic
+ * implementation yet */
+ b2r2_log_warn(cont->dev, "%s: Unsupported: "
+ "Background blend in b2r2_generic_blt\n",
+ __func__);
+ return ret;
+ }
+
+ b2r2_log_info(cont->dev,
+ "b2r2_blt=%d Going generic.\n", ret);
+ request_gen = kmalloc(sizeof(*request_gen), GFP_KERNEL);
+ if (!request_gen) {
+ b2r2_log_err(cont->dev,
+ "%s: Failed to alloc mem for "
+ "request_gen\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Initialize the structure */
+ memset(request_gen, 0, sizeof(*request_gen));
+ INIT_LIST_HEAD(&request_gen->list);
+ request_gen->instance = instance;
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&request_gen->user_req, (void *)arg,
+ sizeof(request_gen->user_req))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user "
+ "failed\n", __func__);
+ kfree(request_gen);
+ return -EFAULT;
+ }
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((request_gen->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION)
+ != 0) {
+ request_gen->clut = dma_alloc_coherent(
+ cont->dev, CLUT_SIZE,
+ &(request_gen->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request_gen->clut == NULL) {
+ b2r2_log_err(cont->dev, "%s CLUT "
+ "allocation failed.\n",
+ __func__);
+ kfree(request_gen);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request_gen->clut,
+ request_gen->user_req.clut,
+ CLUT_SIZE)) {
+ b2r2_log_err(cont->dev, "%s: CLUT"
+ " copy_from_user failed\n",
+ __func__);
+ dma_free_coherent(cont->dev, CLUT_SIZE,
+ request_gen->clut,
+ request_gen->clut_phys_addr);
+ request_gen->clut = NULL;
+ request_gen->clut_phys_addr = 0;
+ kfree(request_gen);
+ return -EFAULT;
+ }
+ }
+
+ request_gen->profile = is_profiler_registered_approx();
+
+ ret = b2r2_generic_blt(instance, request_gen);
+ b2r2_log_info(cont->dev, "\nb2r2_generic_blt=%d "
+ "Generic done.\n", ret);
+ }
+#endif /* CONFIG_B2R2_GENERIC_FALLBACK */
+
+ break;
+ }
+
+ case B2R2_BLT_SYNCH_IOC:
+ /* arg is request_id */
+ ret = b2r2_blt_synch(instance, (int) arg);
+ break;
+
+ case B2R2_BLT_QUERY_CAP_IOC:
+ {
+ /* Arg is struct b2r2_blt_query_cap */
+ struct b2r2_blt_query_cap query_cap;
+
+ /* Get the user data */
+ if (copy_from_user(&query_cap, (void *)arg,
+ sizeof(query_cap))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* Fill in our capabilities */
+ ret = b2r2_blt_query_cap(instance, &query_cap);
+
+ /* Return data to user */
+ if (copy_to_user((void *)arg, &query_cap,
+ sizeof(query_cap))) {
+ b2r2_log_err(cont->dev, "%s: copy_to_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ break;
+ }
+
+ default:
+ /* Unknown command */
+ b2r2_log_err(cont->dev, "%s: Unknown cmd %d\n", __func__, cmd);
+ ret = -EINVAL;
+ break;
+
+ }
+
+ if (ret < 0)
+ b2r2_log_err(cont->dev, "EC %d OK!\n", -ret);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_poll - Support for user-space poll, select & epoll.
+ * Used for user-space callback
+ *
+ * @filp: File to poll on
+ * @wait: Poll table to wait on
+ *
+ * This function checks if there are anything to read
+ */
+static unsigned b2r2_blt_poll(struct file *filp, poll_table *wait)
+{
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+ unsigned int mask = 0;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ poll_wait(filp, &instance->report_list_waitq, wait);
+ mutex_lock(&instance->lock);
+ if (!list_empty(&instance->report_list))
+ mask |= POLLIN | POLLRDNORM;
+ mutex_unlock(&instance->lock);
+
+ return mask;
+}
+
+/**
+ * b2r2_blt_read - Read report data, user for user-space callback
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ int ret = 0;
+ struct b2r2_blt_request *request = NULL;
+ struct b2r2_blt_report report;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * We return only complete report records, one at a time.
+ * Might be more efficient to support read of many.
+ */
+ count = (count / sizeof(struct b2r2_blt_report)) *
+ sizeof(struct b2r2_blt_report);
+ if (count > sizeof(struct b2r2_blt_report))
+ count = sizeof(struct b2r2_blt_report);
+ if (count == 0)
+ return count;
+
+ /*
+ * Loop and wait here until we have anything to return or
+ * until interrupted
+ */
+ mutex_lock(&instance->lock);
+ while (list_empty(&instance->report_list)) {
+ mutex_unlock(&instance->lock);
+
+ /* Return if non blocking read */
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__);
+ if (wait_event_interruptible(
+ instance->report_list_waitq,
+ !is_report_list_empty(instance)))
+ /* signal: tell the fs layer to handle it */
+ return -ERESTARTSYS;
+
+ /* Otherwise loop, but first reaquire the lock */
+ mutex_lock(&instance->lock);
+ }
+
+ if (!list_empty(&instance->report_list))
+ request = list_first_entry(
+ &instance->report_list, struct b2r2_blt_request, list);
+
+ if (request) {
+ /* Remove from list to avoid reading twice */
+ list_del_init(&request->list);
+
+ report.request_id = request->request_id;
+ report.report1 = request->user_req.report1;
+ report.report2 = request->user_req.report2;
+ report.usec_elapsed = 0; /* TBD */
+
+ mutex_unlock(&instance->lock);
+ if (copy_to_user(buf, &report, sizeof(report)))
+ ret = -EFAULT;
+ mutex_lock(&instance->lock);
+
+ if (ret < 0) {
+ /* copy to user failed, re-insert into list */
+ list_add(&request->list,
+ &request->instance->report_list);
+ request = NULL;
+ }
+ }
+ mutex_unlock(&instance->lock);
+
+ if (request)
+ /*
+ * Release matching the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+
+ return count;
+}
+
+/**
+ * b2r2_blt_fops - File operations for b2r2_blt
+ */
+static const struct file_operations b2r2_blt_fops = {
+ .owner = THIS_MODULE,
+ .open = b2r2_blt_open,
+ .release = b2r2_blt_release,
+ .unlocked_ioctl = b2r2_blt_ioctl,
+ .poll = b2r2_blt_poll,
+ .read = b2r2_blt_read,
+};
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+/**
+ * b2r2_blt - Implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+static int b2r2_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ struct b2r2_control *cont = instance->control;
+
+ u32 thread_runtime_at_start = 0;
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(cont->dev,
+ "src.fmt=%#010x src.buf={%d,%d,%d} "
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev,
+ "bg.fmt=%#010x bg.buf={%d,%d,%d} "
+ "bg.w,h={%d,%d} bg.rect={%d,%d,%d,%d}\n",
+ request->user_req.bg_img.fmt,
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ b2r2_log_info(cont->dev,
+ "dst.fmt=%#010x dst.buf={%d,%d,%d} "
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect,
+ false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Background buffer */
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ ret = resolve_buf(cont, &request->user_req.bg_img,
+ &request->user_req.bg_rect,
+ false, &request->bg_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve bg buf failed,"
+ " %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_bg_buf_failed;
+ }
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(cont, &request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src mask buf failed,"
+ " %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev, "bg.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->bg_resolved.physical_address,
+ request->bg_resolved.virtual_address,
+ request->bg_resolved.is_pmem,
+ request->bg_resolved.filep,
+ request->bg_resolved.file_physical_start,
+ request->bg_resolved.file_virtual_start,
+ request->bg_resolved.file_len);
+
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE, &node_count,
+ &request->bufs, &request->buf_count,
+ &request->node_split_job);
+ if (ret == -ENOSYS) {
+ /* There was no optimized path for this request */
+ b2r2_log_info(cont->dev, "%s: No optimized path for request\n",
+ __func__);
+ goto no_optimized_path;
+
+ } else if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to analyze request,"
+ " ret = %d\n", __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Analyze failed for:\n",
+ __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the"
+ " request. Message buffer"
+ " allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(cont,
+ node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Failed to allocate nodes,"
+ " ret = %d\n", __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Build the B2R2 node list */
+ ret = b2r2_node_split_configure(cont, &request->node_split_job,
+ request->first_node);
+
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s:"
+ " Failed to perform node split, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+
+ /* Exit here if dry run */
+ if (request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /* Configure the request */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback;
+ request->job.release = job_release;
+ request->job.acquire_resources = job_acquire_resources;
+ request->job.release_resources = job_release_resources;
+
+ /* Synchronize memory occupied by the buffers */
+
+ /* Source buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved, false,
+ &request->user_req.src_rect);
+
+ /* Background buffer */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) &&
+ !(request->user_req.flags &
+ B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH) &&
+ (request->user_req.bg_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.bg_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.bg_img,
+ &request->bg_resolved, false,
+ &request->user_req.bg_rect);
+
+ /* Source mask buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved, false, NULL);
+
+ /* Destination buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved, true,
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request for debugfs */
+ cont->debugfs_latest_request = *request;
+#endif
+
+ /* Submit the job */
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request->profile)
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+
+ mutex_lock(&instance->lock);
+
+ /* Add the job to b2r2_core */
+ request_id = b2r2_core_job_add(cont, &request->job);
+ request->request_id = request_id;
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to add job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done if synchronous */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) {
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(&request->job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev, "%s: Failed to wait job,"
+ " ret = %d\n", __func__, ret);
+ else
+ b2r2_log_info(cont->dev, "%s: Synchronous wait done\n",
+ __func__);
+ ret = 0;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add,
+ * the request must not be accessed after this call
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ return ret >= 0 ? request_id : ret;
+
+job_add_failed:
+exit_dry_run:
+no_optimized_path:
+generate_nodes_failed:
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
+ &request->bg_resolved);
+resolve_bg_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+ job_release(&request->job);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+ if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret)
+ b2r2_log_warn(cont->dev, "%s returns with error %d\n",
+ __func__, ret);
+
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ return ret;
+}
+
+/**
+ * Called when job is done or cancelled
+ *
+ * @job: The job
+ */
+static void job_callback(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ if (cont->dev)
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
+ &request->bg_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ mutex_lock(&request->instance->lock);
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /* Add a reference because we put the job in the report list */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ if (request->profile) {
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() - request->start_time_nsec);
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+static void job_release(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ b2r2_node_split_cancel(cont, &request->node_split_job);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+/**
+ * Tells the job to try to allocate the resources needed to execute the job.
+ * Called just before execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+ int ret;
+ int i;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (request->buf_count == 0)
+ return 0;
+
+ if (request->buf_count > MAX_TMP_BUFS_NEEDED) {
+ b2r2_log_err(cont->dev,
+ "%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n",
+ __func__);
+ return -ENOMSG;
+ }
+
+ /*
+ * 1 to 1 mapping between request temp buffers and temp buffers
+ * (request temp buf 0 is always temp buf 0, request temp buf 1 is
+ * always temp buf 1 and so on) to avoid starvation of jobs that
+ * require multiple temp buffers. Not optimal in terms of memory
+ * usage but we avoid get into a situation where lower prio jobs can
+ * delay higher prio jobs that require more temp buffers.
+ */
+ if (cont->tmp_bufs[0].in_use)
+ return -EAGAIN;
+
+ for (i = 0; i < request->buf_count; i++) {
+ if (cont->tmp_bufs[i].buf.size < request->bufs[i].size) {
+ b2r2_log_err(cont->dev, "%s: "
+ "cont->tmp_bufs[i].buf.size < "
+ "request->bufs[i].size\n", __func__);
+ ret = -ENOMSG;
+ goto error;
+ }
+
+ cont->tmp_bufs[i].in_use = true;
+ request->bufs[i].phys_addr = cont->tmp_bufs[i].buf.phys_addr;
+ request->bufs[i].virt_addr = cont->tmp_bufs[i].buf.virt_addr;
+
+ b2r2_log_info(cont->dev, "%s: phys=%p, virt=%p\n",
+ __func__, (void *)request->bufs[i].phys_addr,
+ request->bufs[i].virt_addr);
+
+ ret = b2r2_node_split_assign_buffers(cont,
+ &request->node_split_job,
+ request->first_node, request->bufs,
+ request->buf_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < request->buf_count; i++)
+ cont->tmp_bufs[i].in_use = false;
+
+ return ret;
+}
+
+/**
+ * Tells the job to free the resources needed to execute the job.
+ * Called after execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static void job_release_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+ int i;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Free any temporary buffers */
+ for (i = 0; i < request->buf_count; i++) {
+
+ b2r2_log_info(cont->dev, "%s: freeing %d bytes\n",
+ __func__, request->bufs[i].size);
+ cont->tmp_bufs[i].in_use = false;
+ memset(&request->bufs[i], 0, sizeof(request->bufs[i]));
+ }
+ request->buf_count = 0;
+
+ /*
+ * Early release of nodes
+ * FIXME: If nodes are to be reused we don't want to release here
+ */
+ if (!atomic && request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ request->first_node = NULL;
+ }
+}
+
+#endif /* !CONFIG_B2R2_GENERIC_ONLY */
+
+#ifdef CONFIG_B2R2_GENERIC
+/**
+ * Called when job for one tile is done or cancelled
+ * in the generic path.
+ *
+ * @job: The job
+ */
+static void tile_job_callback_gen(struct b2r2_core_job *job)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Notify if a tile job is cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED)
+ b2r2_log_info(cont->dev, "%s: Tile job cancelled:\n",
+ __func__);
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job is done or cancelled.
+ * Used for the last tile in the generic path
+ * to notify waiting clients.
+ *
+ * @job: The job
+ */
+static void job_callback_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ mutex_lock(&request->instance->lock);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /*
+ * Add a reference because we put the
+ * job in the report list
+ */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when tile job should be released (free memory etc.)
+ * Should be used only for tile jobs. Tile jobs should only be used
+ * by b2r2_core, thus making ref_count trigger their release.
+ *
+ * @job: The job
+ */
+
+static void tile_job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_control *cont = instance->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node_address=0x%.8x, ref_count="
+ "%d\n", __func__, job->first_node_address,
+ job->ref_count);
+
+ /* Release memory for the job */
+ kfree(job);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+
+static void job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+
+ /* Free nodes */
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+ return 0;
+}
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+}
+
+/**
+ * b2r2_generic_blt - Generic implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ s32 tmp_buf_width = 0;
+ s32 tmp_buf_height = 0;
+ u32 tmp_buf_count = 0;
+ s32 x;
+ s32 y;
+ const struct b2r2_blt_rect *dst_rect = &(request->user_req.dst_rect);
+ const s32 dst_img_width = request->user_req.dst_img.width;
+ const s32 dst_img_height = request->user_req.dst_img.height;
+ const enum b2r2_blt_flag flags = request->user_req.flags;
+ /* Descriptors for the temporary buffers */
+ struct b2r2_work_buf work_bufs[4];
+ struct b2r2_blt_rect dst_rect_tile;
+ int i;
+ struct b2r2_control *cont = instance->control;
+
+ u32 thread_runtime_at_start = 0;
+ s32 nsec_active_in_b2r2 = 0;
+
+ /*
+ * Early exit if zero blt.
+ * dst_rect outside of dst_img or
+ * dst_clip_rect outside of dst_img.
+ */
+ if (dst_rect->x + dst_rect->width <= 0 ||
+ dst_rect->y + dst_rect->height <= 0 ||
+ dst_img_width <= dst_rect->x ||
+ dst_img_height <= dst_rect->y ||
+ ((flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0 &&
+ (dst_img_width <= request->user_req.dst_clip_rect.x ||
+ dst_img_height <= request->user_req.dst_clip_rect.y ||
+ request->user_req.dst_clip_rect.x +
+ request->user_req.dst_clip_rect.width <= 0 ||
+ request->user_req.dst_clip_rect.y +
+ request->user_req.dst_clip_rect.height <= 0))) {
+ goto zero_blt;
+ }
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ memset(work_bufs, 0, sizeof(work_bufs));
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(cont->dev,
+ "src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.flags,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ b2r2_log_info(cont->dev,
+ "dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n"
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n"
+ "dst_clip_rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height,
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect, false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(cont, &request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Resolve src mask buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_generic_analyze(request, &tmp_buf_width,
+ &tmp_buf_height, &tmp_buf_count, &node_count);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to analyze request, ret = %d\n",
+ __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev,
+ "%s: Analyze failed for:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev,
+ "Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(cont, node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Allocate the temporary buffers */
+ for (i = 0; i < tmp_buf_count; i++) {
+ void *virt;
+ work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4;
+
+ virt = dma_alloc_coherent(cont->dev,
+ work_bufs[i].size,
+ &(work_bufs[i].phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (virt == NULL) {
+ ret = -ENOMEM;
+ goto alloc_work_bufs_failed;
+ }
+
+ work_bufs[i].virt_addr = virt;
+ memset(work_bufs[i].virt_addr, 0xff, work_bufs[i].size);
+ }
+ ret = b2r2_generic_configure(request,
+ request->first_node, &work_bufs[0], tmp_buf_count);
+
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to perform generic configure, ret = %d\n",
+ __func__, ret);
+ goto generic_conf_failed;
+ }
+
+ /* Exit here if dry run */
+ if (flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /*
+ * Configure the request and make sure
+ * that its job is run only for the LAST tile.
+ * This is when the request is complete
+ * and waiting clients should be notified.
+ */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback_gen;
+ request->job.release = job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ request->job.acquire_resources = job_acquire_resources_gen;
+ request->job.release_resources = job_release_resources_gen;
+
+ /* Flush the L1/L2 cache for the buffers */
+
+ /* Source buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved,
+ false, /*is_dst*/
+ &request->user_req.src_rect);
+
+ /* Source mask buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved,
+ false, /*is_dst*/
+ NULL);
+
+ /* Destination buffer */
+ if (!(flags & B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved,
+ true, /*is_dst*/
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request */
+ cont->debugfs_latest_request = *request;
+#endif
+
+ /*
+ * Same nodes are reused for all the jobs needed to complete the blit.
+ * Nodes are NOT released together with associated job,
+ * as is the case with optimized b2r2_blt() path.
+ */
+ mutex_lock(&instance->lock);
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+ /*
+ * Process all but the last row in the destination rectangle.
+ * Consider only the tiles that will actually end up inside
+ * the destination image.
+ * dst_rect->height - tmp_buf_height being <=0 is allright.
+ * The loop will not be entered since y will always be equal to or
+ * greater than zero.
+ * Early exit check at the beginning handles the cases when nothing
+ * at all should be processed.
+ */
+ y = 0;
+ if (dst_rect->y < 0)
+ y = -dst_rect->y;
+
+ for (; y < dst_rect->height - tmp_buf_height &&
+ y + dst_rect->y < dst_img_height - tmp_buf_height;
+ y += tmp_buf_height) {
+ /* Tile in the destination rectangle being processed */
+ struct b2r2_blt_rect dst_rect_tile;
+ dst_rect_tile.y = y;
+ dst_rect_tile.width = tmp_buf_width;
+ dst_rect_tile.height = tmp_buf_height;
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width && x + dst_rect->x < dst_img_width;
+ x += tmp_buf_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ */
+ struct b2r2_core_job *tile_job =
+ kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ /*
+ * Skip this tile. Do not abort,
+ * just hope for better luck
+ * with rest of the tiles.
+ * Memory might become available.
+ */
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
+ continue;
+ }
+ tile_job->tag = request->job.tag;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width =
+ dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * Where applicable, calculate area in src buffer
+ * that is needed to generate the specified part
+ * of destination rectangle.
+ */
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+ /* Submit the job */
+ b2r2_log_info(cont->dev,
+ "%s: Submitting job\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+
+ request_id = b2r2_core_job_add(cont, tile_job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Failed to add tile job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done */
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(tile_job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(cont->dev,
+ "%s: Synchronous wait done\n",
+ __func__);
+
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ }
+ /* Release matching the addref in b2r2_core_job_add */
+ b2r2_core_job_release(tile_job, __func__);
+ }
+ }
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width &&
+ x + dst_rect->x < dst_img_width; x += tmp_buf_width) {
+ struct b2r2_core_job *tile_job = NULL;
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ * Do NOT allocate a tile_job for the last tile.
+ * Send the job from the request. This way clients
+ * will be notified when the whole blit is complete
+ * and not just part of it.
+ */
+ tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
+ continue;
+ }
+ tile_job->tag = request->job.tag;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
+ }
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width = dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * y is now the last row. Either because the whole dst_rect
+ * has been processed, or because the last row that will be
+ * written to dst_img has been reached. Limits imposed in
+ * the same way as for width.
+ */
+ dst_rect_tile.y = y;
+ if (y + dst_rect->y + tmp_buf_height > dst_img_height)
+ dst_rect_tile.height =
+ dst_img_height - (y + dst_rect->y);
+ else if (y + tmp_buf_height > dst_rect->height)
+ dst_rect_tile.height = dst_rect->height - y;
+ else
+ dst_rect_tile.height = tmp_buf_height;
+
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ request_id = b2r2_core_job_add(cont, tile_job);
+ } else {
+ /*
+ * Last tile. Send the job-struct from the request.
+ * Clients will be notified once it completes.
+ */
+ request_id = b2r2_core_job_add(cont, &request->job);
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to add tile job, "
+ "ret = %d\n", __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ if (tile_job != NULL)
+ kfree(tile_job);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+ mutex_unlock(&instance->lock);
+
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ ret = b2r2_core_job_wait(tile_job);
+ } else {
+ /*
+ * This is the last tile. Wait for the job-struct from
+ * the request.
+ */
+ ret = b2r2_core_job_wait(&request->job);
+ }
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(cont->dev,
+ "%s: Synchronous wait done\n", __func__);
+
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width)
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ else
+ nsec_active_in_b2r2 +=
+ request->job.nsec_active_in_hw;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add.
+ * Make sure that the correct job-struct is released
+ * when the last tile is processed.
+ */
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ b2r2_core_job_release(tile_job, __func__);
+ } else {
+ /*
+ * Update profiling information before
+ * the request is released together with
+ * its core_job.
+ */
+ if (request->profile) {
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() -
+ request->start_time_nsec);
+ request->job.nsec_active_in_hw =
+ nsec_active_in_b2r2;
+
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ b2r2_core_job_release(&request->job, __func__);
+ }
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ for (i = 0; i < tmp_buf_count; i++) {
+ dma_free_coherent(cont->dev,
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+
+ return request_id;
+
+job_add_failed:
+exit_dry_run:
+generic_conf_failed:
+alloc_work_bufs_failed:
+ for (i = 0; i < 4; i++) {
+ if (work_bufs[i].virt_addr != 0) {
+ dma_free_coherent(cont->dev,
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+ }
+
+generate_nodes_failed:
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+zero_blt:
+ job_release_gen(&request->job);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ b2r2_log_info(cont->dev, "b2r2:%s ret=%d", __func__, ret);
+ return ret;
+}
+#endif /* CONFIG_B2R2_GENERIC */
+
+/**
+ * b2r2_blt_synch - Implements wait for all or a specified job
+ *
+ * @instance: The B2R2 BLT instance
+ * @request_id: If 0, wait for all requests on this instance to finish.
+ * Else wait for request with given request id to finish.
+ */
+static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
+ int request_id)
+{
+ int ret = 0;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s, request_id=%d\n", __func__, request_id);
+
+ if (request_id == 0) {
+ /* Wait for all requests */
+ inc_stat(cont, &cont->stat_n_in_synch_0);
+
+ /* Enter state "synching" if we have any active request */
+ mutex_lock(&instance->lock);
+ if (instance->no_of_active_requests)
+ instance->synching = true;
+ mutex_unlock(&instance->lock);
+
+ /* Wait until no longer in state synching */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ dec_stat(cont, &cont->stat_n_in_synch_0);
+ } else {
+ struct b2r2_core_job *job;
+
+ inc_stat(cont, &cont->stat_n_in_synch_job);
+
+ /* Wait for specific job */
+ job = b2r2_core_job_find(cont, request_id);
+ if (job) {
+ /* Wait on find job */
+ ret = b2r2_core_job_wait(job);
+ /* Release matching the addref in b2r2_core_job_find */
+ b2r2_core_job_release(job, __func__);
+ }
+
+ /* If job not found we assume that is has been run */
+ dec_stat(cont, &cont->stat_n_in_synch_job);
+ }
+
+ b2r2_log_info(cont->dev,
+ "%s, request_id=%d, returns %d\n", __func__, request_id, ret);
+
+ return ret;
+}
+
+/**
+ * Query B2R2 capabilities
+ *
+ * @instance: The B2R2 BLT instance
+ * @query_cap: The structure receiving the capabilities
+ */
+static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_query_cap *query_cap)
+{
+ /* FIXME: Not implemented yet */
+ return -ENOSYS;
+}
+
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect)
+{
+ struct b2r2_blt_rect dst_img_bounds;
+
+ b2r2_get_img_bounding_rect(&req->dst_img, &dst_img_bounds);
+
+ b2r2_intersect_rects(&req->dst_rect, &dst_img_bounds, actual_dst_rect);
+
+ if (req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP)
+ b2r2_intersect_rects(actual_dst_rect, &req->dst_clip_rect,
+ actual_dst_rect);
+}
+
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region)
+{
+ s32 img_size;
+
+ memset(region, 0, sizeof(*region));
+
+ if (b2r2_is_zero_area_rect(rect))
+ return;
+
+ img_size = b2r2_get_img_size(cont, img);
+
+ if (b2r2_is_single_plane_fmt(img->fmt) &&
+ b2r2_is_independent_pixel_fmt(img->fmt)) {
+ int img_fmt_bpp = b2r2_get_fmt_bpp(cont, img->fmt);
+ u32 img_pitch = b2r2_get_img_pitch(cont, img);
+
+ region->offset = (u32)(img->buf.offset + (rect->y *
+ img_pitch));
+ region->count = (u32)rect->height;
+ region->start = (u32)((rect->x * img_fmt_bpp) / 8);
+ region->end = (u32)b2r2_div_round_up(
+ (rect->x + rect->width) * img_fmt_bpp, 8);
+ region->size = img_pitch;
+ } else {
+ /*
+ * TODO: Locking entire buffer as a quick safe solution. In the
+ * future we should lock less to avoid unecessary cache
+ * synching. Pixel interleaved YCbCr formats should be quite
+ * easy, just align start and stop points on 2.
+ */
+ region->offset = (u32)img->buf.offset;
+ region->count = 1;
+ region->start = 0;
+ region->end = (u32)img_size;
+ region->size = (u32)img_size;
+ }
+}
+
+static int resolve_hwmem(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf)
+{
+ int return_value = 0;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+ enum hwmem_access required_access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region region;
+
+ resolved_buf->hwmem_alloc =
+ hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(resolved_buf->hwmem_alloc)) {
+ return_value = PTR_ERR(resolved_buf->hwmem_alloc);
+ b2r2_log_info(cont->dev, "%s: hwmem_resolve_by_name failed, "
+ "error code: %i\n", __func__, return_value);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(resolved_buf->hwmem_alloc, &resolved_buf->file_len,
+ &mem_type, &access);
+
+ required_access = (is_dst ? HWMEM_ACCESS_WRITE : HWMEM_ACCESS_READ) |
+ HWMEM_ACCESS_IMPORT;
+ if ((required_access & access) != required_access) {
+ b2r2_log_info(cont->dev, "%s: Insufficient access to hwmem "
+ "buffer.\n", __func__);
+ return_value = -EACCES;
+ goto access_check_failed;
+ }
+
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer is scattered.\n",
+ __func__);
+ return_value = -EINVAL;
+ goto buf_scattered;
+ }
+
+ if (resolved_buf->file_len <
+ img->buf.offset + (__u32)b2r2_get_img_size(cont, img)) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer too small. (%d < "
+ "%d)\n", __func__, resolved_buf->file_len,
+ img->buf.offset +
+ (__u32)b2r2_get_img_size(cont, img));
+ return_value = -EINVAL;
+ goto size_check_failed;
+ }
+
+ return_value = hwmem_pin(resolved_buf->hwmem_alloc, &mem_chunk,
+ &mem_chunk_length);
+ if (return_value < 0) {
+ b2r2_log_info(cont->dev, "%s: hwmem_pin failed, "
+ "error code: %i\n", __func__, return_value);
+ goto pin_failed;
+ }
+ resolved_buf->file_physical_start = mem_chunk.paddr;
+
+ set_up_hwmem_region(cont, img, rect_2b_used, &region);
+ return_value = hwmem_set_domain(resolved_buf->hwmem_alloc,
+ required_access, HWMEM_DOMAIN_SYNC, &region);
+ if (return_value < 0) {
+ b2r2_log_info(cont->dev, "%s: hwmem_set_domain failed, "
+ "error code: %i\n", __func__, return_value);
+ goto set_domain_failed;
+ }
+
+ resolved_buf->physical_address =
+ resolved_buf->file_physical_start + img->buf.offset;
+
+ goto out;
+
+set_domain_failed:
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+pin_failed:
+size_check_failed:
+buf_scattered:
+access_check_failed:
+ hwmem_release(resolved_buf->hwmem_alloc);
+resolve_failed:
+
+out:
+ return return_value;
+}
+
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf)
+{
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+ hwmem_release(resolved_buf->hwmem_alloc);
+}
+
+/**
+ * unresolve_buf() - Must be called after resolve_buf
+ *
+ * @buf: The buffer specification as supplied from user space
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf,
+ struct b2r2_resolved_buf *resolved)
+{
+#ifdef CONFIG_ANDROID_PMEM
+ if (resolved->is_pmem && resolved->filep)
+ put_pmem_file(resolved->filep);
+#endif
+ if (resolved->hwmem_alloc != NULL)
+ unresolve_hwmem(resolved);
+}
+
+/**
+ * get_fb_info() - Fill buf with framebuffer info
+ *
+ * @file: The framebuffer file
+ * @buf: Gathered information about the buffer
+ * @img_offset: Image offset info frame buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int get_fb_info(struct file *file,
+ struct b2r2_resolved_buf *buf,
+ __u32 img_offset)
+{
+#ifdef CONFIG_FB
+ if (file && buf &&
+ MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ int i;
+ /*
+ * (OK to do it like this, no locking???)
+ */
+ for (i = 0; i < num_registered_fb; i++) {
+ struct fb_info *info = registered_fb[i];
+
+ if (info && info->dev &&
+ MINOR(info->dev->devt) ==
+ MINOR(file->f_dentry->d_inode->i_rdev)) {
+ buf->file_physical_start = info->fix.smem_start;
+ buf->file_virtual_start = (u32)info->screen_base;
+ buf->file_len = info->fix.smem_len;
+ buf->physical_address = buf->file_physical_start +
+ img_offset;
+ buf->virtual_address =
+ (void *) (buf->file_virtual_start +
+ img_offset);
+ return 0;
+ }
+ }
+ }
+#endif
+ return -EINVAL;
+}
+
+/**
+ * resolve_buf() - Returns the physical & virtual addresses of a B2R2 blt buffer
+ *
+ * @img: The image specification as supplied from user space
+ * @rect_2b_used: The part of the image b2r2 will use.
+ * @usage: Specifies how the buffer will be used.
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved)
+{
+ int ret = 0;
+
+ memset(resolved, 0, sizeof(*resolved));
+
+ switch (img->buf.type) {
+ case B2R2_BLT_PTR_NONE:
+ break;
+
+ case B2R2_BLT_PTR_PHYSICAL:
+ resolved->physical_address = img->buf.offset;
+ resolved->file_len = img->buf.len;
+ break;
+
+ /* FD + OFFSET type */
+ case B2R2_BLT_PTR_FD_OFFSET: {
+ /*
+ * TODO: Do we need to check if the process is allowed to
+ * read/write (depending on if it's dst or src) to the file?
+ */
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(
+ img->buf.fd,
+ (unsigned long *) &resolved->file_physical_start,
+ (unsigned long *) &resolved->file_virtual_start,
+ (unsigned long *) &resolved->file_len,
+ &resolved->filep)) {
+ resolved->physical_address =
+ resolved->file_physical_start +
+ img->buf.offset;
+ resolved->virtual_address = (void *)
+ (resolved->file_virtual_start +
+ img->buf.offset);
+ resolved->is_pmem = true;
+ } else
+#endif
+ {
+ int fput_needed;
+ struct file *file;
+
+ file = fget_light(img->buf.fd, &fput_needed);
+ if (file == NULL)
+ return -EINVAL;
+
+ ret = get_fb_info(file, resolved,
+ img->buf.offset);
+ fput_light(file, fput_needed);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Check bounds */
+ if (img->buf.offset + img->buf.len >
+ resolved->file_len) {
+ ret = -ESPIPE;
+ unresolve_buf(cont, &img->buf, resolved);
+ }
+
+ break;
+ }
+
+ case B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET:
+ ret = resolve_hwmem(cont, img, rect_2b_used, is_dst, resolved);
+ break;
+
+ default:
+ b2r2_log_warn(cont->dev, "%s: Failed to resolve buf type %d\n",
+ __func__, img->buf.type);
+
+ ret = -EINVAL;
+ break;
+
+ }
+
+ return ret;
+}
+
+/**
+ * sync_buf - Synchronizes the memory occupied by an image buffer.
+ *
+ * @buf: User buffer specification
+ * @resolved_buf: Gathered info (physical address etc.) about buffer
+ * @is_dst: true if the buffer is a destination buffer, false if the buffer is a
+ * source buffer.
+ * @rect: rectangle in the image buffer that should be synced.
+ * NULL if the buffer is a source mask.
+ * @img_width: width of the complete image buffer
+ * @fmt: buffer format
+*/
+static void sync_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved,
+ bool is_dst,
+ struct b2r2_blt_rect *rect)
+{
+ struct sync_args sa;
+ u32 start_phys, end_phys;
+
+ if (B2R2_BLT_PTR_NONE == img->buf.type ||
+ B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type)
+ return;
+
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+
+ /*
+ * TODO: Very ugly. We should find out whether the memory is coherent in
+ * some generic way but cache handling will be rewritten soon so there
+ * is no use spending time on it. In the new design this will probably
+ * not be a problem.
+ */
+ /* Frame buffer is coherent, at least now. */
+ if (!resolved->is_pmem) {
+ /*
+ * Drain the write buffers as they are not always part of the
+ * coherent concept.
+ */
+ wmb();
+
+ return;
+ }
+
+ /*
+ * src_mask does not have rect.
+ * Also flush full buffer for planar and semiplanar YUV formats
+ */
+ if (rect == NULL ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE)) {
+ sa.start = (unsigned long)resolved->virtual_address;
+ sa.end = (unsigned long)resolved->virtual_address +
+ img->buf.len;
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+ } else {
+ /*
+ * buffer is not a src_mask so make use of rect when
+ * clean & flush caches
+ */
+ u32 bpp; /* Bits per pixel */
+ u32 pitch;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ bpp = 16;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ bpp = 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ bpp = 32;
+ break;
+ default:
+ bpp = 12;
+ }
+ if (img->pitch == 0)
+ pitch = (img->width * bpp) / 8;
+ else
+ pitch = img->pitch;
+
+ /*
+ * For 422I formats 2 horizontal pixels share color data.
+ * Thus, the x position must be aligned down to closest even
+ * number and width must be aligned up.
+ */
+ {
+ s32 x;
+ s32 width;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ x = (rect->x / 2) * 2;
+ width = ((rect->width + 1) / 2) * 2;
+ break;
+ default:
+ x = rect->x;
+ width = rect->width;
+ break;
+ }
+
+ sa.start = (unsigned long)resolved->virtual_address +
+ rect->y * pitch + (x * bpp) / 8;
+ sa.end = (unsigned long)sa.start +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+
+ start_phys = resolved->physical_address +
+ rect->y * pitch + (x * bpp) / 8;
+ end_phys = start_phys +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+ }
+ }
+
+ /*
+ * The virtual address to a pmem buffer is retrieved from ioremap, not
+ * sure if it's ok to use such an address as a kernel virtual address.
+ * When doing it at a higher level such as dma_map_single it triggers an
+ * error but at lower levels such as dmac_clean_range it seems to work,
+ * hence the low level stuff.
+ */
+
+ if (is_dst) {
+ /*
+ * According to ARM's docs you must clean before invalidating
+ * (ie flush) to avoid loosing data.
+ */
+
+ /* Flush L1 cache */
+#ifdef CONFIG_SMP
+ flush_l1_cache_range_all_cpus(&sa);
+#else
+ flush_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Flush L2 cache */
+ outer_flush_range(start_phys, end_phys);
+ } else {
+ /* Clean L1 cache */
+#ifdef CONFIG_SMP
+ clean_l1_cache_range_all_cpus(&sa);
+#else
+ clean_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Clean L2 cache */
+ outer_clean_range(start_phys, end_phys);
+ }
+}
+
+/**
+ * is_report_list_empty() - Spin lock protected check of report list
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_report_list_empty(struct b2r2_blt_instance *instance)
+{
+ bool is_empty;
+
+ mutex_lock(&instance->lock);
+ is_empty = list_empty(&instance->report_list);
+ mutex_unlock(&instance->lock);
+
+ return is_empty;
+}
+
+/**
+ * is_synching() - Spin lock protected check if synching
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_synching(struct b2r2_blt_instance *instance)
+{
+ bool is_synching;
+
+ mutex_lock(&instance->lock);
+ is_synching = instance->synching;
+ mutex_unlock(&instance->lock);
+
+ return is_synching;
+}
+
+/**
+ * inc_stat() - Spin lock protected increment of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be incremented
+ */
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat)
+{
+ mutex_lock(&cont->stat_lock);
+ (*stat)++;
+ mutex_unlock(&cont->stat_lock);
+}
+
+/**
+ * inc_stat() - Spin lock protected decrement of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be decremented
+ */
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat)
+{
+ mutex_lock(&cont->stat_lock);
+ (*stat)--;
+ mutex_unlock(&cont->stat_lock);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sprintf_req() - Builds a string representing the request, for debug
+ *
+ * @request:Request that should be encoded into a string
+ * @buf: Receiving buffer
+ * @size: Size of receiving buffer
+ *
+ * Returns number of characters in string, excluding null terminator
+ */
+static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
+{
+ size_t dev_size = 0;
+
+ /* generic request info */
+ dev_size += sprintf(buf + dev_size,
+ "instance : 0x%08lX\n",
+ (unsigned long) request->instance);
+ dev_size += sprintf(buf + dev_size,
+ "size : %d bytes\n", request->user_req.size);
+ dev_size += sprintf(buf + dev_size,
+ "flags : 0x%08lX\n",
+ (unsigned long) request->user_req.flags);
+ dev_size += sprintf(buf + dev_size,
+ "transform : %d\n",
+ (int) request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "prio : %d\n", request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "global_alpha : %d\n",
+ (int) request->user_req.global_alpha);
+ dev_size += sprintf(buf + dev_size,
+ "report1 : 0x%08lX\n",
+ (unsigned long) request->user_req.report1);
+ dev_size += sprintf(buf + dev_size,
+ "report2 : 0x%08lX\n",
+ (unsigned long) request->user_req.report2);
+ dev_size += sprintf(buf + dev_size,
+ "request_id : 0x%08lX\n\n",
+ (unsigned long) request->request_id);
+
+ /* src info */
+ dev_size += sprintf(buf + dev_size,
+ "src_img.fmt : %#010x\n",
+ request->user_req.src_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d, "
+ "offset=%d, len=%d}\n",
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.hwmem_buf_name,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.fmt : %#010x\n",
+ request->user_req.src_mask.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.src_mask.buf.type,
+ request->user_req.src_mask.buf.hwmem_buf_name,
+ request->user_req.src_mask.buf.fd,
+ request->user_req.src_mask.buf.offset,
+ request->user_req.src_mask.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_mask.width,
+ request->user_req.src_mask.height,
+ request->user_req.src_mask.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "src_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.src_color);
+
+ /* bg info */
+ dev_size += sprintf(buf + dev_size,
+ "bg_img.fmt : %#010x\n",
+ request->user_req.bg_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "bg_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.hwmem_buf_name,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "bg_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "bg_rect : {x=%d, y=%d, width=%d, height=%d}\n\n",
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ /* dst info */
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.fmt : %#010x\n",
+ request->user_req.dst_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.hwmem_buf_name,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "dst_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_clip_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.dst_color);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.file_len : %d\n\n",
+ request->src_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.file_len : %d\n\n",
+ request->src_mask_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.file_len : %d\n\n",
+ request->dst_resolved.file_len);
+
+ return dev_size;
+}
+
+/**
+ * debugfs_b2r2_blt_request_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_request_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_size = sprintf_req(&cont->debugfs_latest_request, Buf,
+ sizeof(char) * 4096);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_request_fops - File operations for B2R2 request debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_request_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_request_read,
+};
+
+/**
+ * struct debugfs_reg - Represents a B2R2 node "register"
+ *
+ * @name: Register name
+ * @offset: Offset within the node
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_node_regs - Array with all the registers in a B2R2 node, for debug
+ */
+static const struct debugfs_reg debugfs_node_regs[] = {
+ {"GROUP0.B2R2_NIP", offsetof(struct b2r2_link_list, GROUP0.B2R2_NIP)},
+ {"GROUP0.B2R2_CIC", offsetof(struct b2r2_link_list, GROUP0.B2R2_CIC)},
+ {"GROUP0.B2R2_INS", offsetof(struct b2r2_link_list, GROUP0.B2R2_INS)},
+ {"GROUP0.B2R2_ACK", offsetof(struct b2r2_link_list, GROUP0.B2R2_ACK)},
+
+ {"GROUP1.B2R2_TBA", offsetof(struct b2r2_link_list, GROUP1.B2R2_TBA)},
+ {"GROUP1.B2R2_TTY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TTY)},
+ {"GROUP1.B2R2_TXY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TXY)},
+ {"GROUP1.B2R2_TSZ", offsetof(struct b2r2_link_list, GROUP1.B2R2_TSZ)},
+
+ {"GROUP2.B2R2_S1CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S1CF)},
+ {"GROUP2.B2R2_S2CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S2CF)},
+
+ {"GROUP3.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP3.B2R2_SBA)},
+ {"GROUP3.B2R2_STY", offsetof(struct b2r2_link_list, GROUP3.B2R2_STY)},
+ {"GROUP3.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP3.B2R2_SXY)},
+ {"GROUP3.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP3.B2R2_SSZ)},
+
+ {"GROUP4.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP4.B2R2_SBA)},
+ {"GROUP4.B2R2_STY", offsetof(struct b2r2_link_list, GROUP4.B2R2_STY)},
+ {"GROUP4.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP4.B2R2_SXY)},
+ {"GROUP4.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP4.B2R2_SSZ)},
+
+ {"GROUP5.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP5.B2R2_SBA)},
+ {"GROUP5.B2R2_STY", offsetof(struct b2r2_link_list, GROUP5.B2R2_STY)},
+ {"GROUP5.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP5.B2R2_SXY)},
+ {"GROUP5.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP5.B2R2_SSZ)},
+
+ {"GROUP6.B2R2_CWO", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWO)},
+ {"GROUP6.B2R2_CWS", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWS)},
+
+ {"GROUP7.B2R2_CCO", offsetof(struct b2r2_link_list, GROUP7.B2R2_CCO)},
+ {"GROUP7.B2R2_CML", offsetof(struct b2r2_link_list, GROUP7.B2R2_CML)},
+
+ {"GROUP8.B2R2_FCTL", offsetof(struct b2r2_link_list, GROUP8.B2R2_FCTL)},
+ {"GROUP8.B2R2_PMK", offsetof(struct b2r2_link_list, GROUP8.B2R2_PMK)},
+
+ {"GROUP9.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP9.B2R2_RSF)},
+ {"GROUP9.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP9.B2R2_RZI)},
+ {"GROUP9.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_HFP)},
+ {"GROUP9.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_VFP)},
+
+ {"GROUP10.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP10.B2R2_RSF)},
+ {"GROUP10.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP10.B2R2_RZI)},
+ {"GROUP10.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_HFP)},
+ {"GROUP10.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_VFP)},
+
+ {"GROUP11.B2R2_FF0", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF0)},
+ {"GROUP11.B2R2_FF1", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF1)},
+ {"GROUP11.B2R2_FF2", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF2)},
+ {"GROUP11.B2R2_FF3", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF3)},
+
+ {"GROUP12.B2R2_KEY1", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY1)},
+ {"GROUP12.B2R2_KEY2", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY2)},
+
+ {"GROUP13.B2R2_XYL", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYL)},
+ {"GROUP13.B2R2_XYP", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYP)},
+
+ {"GROUP14.B2R2_SAR", offsetof(struct b2r2_link_list, GROUP14.B2R2_SAR)},
+ {"GROUP14.B2R2_USR", offsetof(struct b2r2_link_list, GROUP14.B2R2_USR)},
+
+ {"GROUP15.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX0)},
+ {"GROUP15.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX1)},
+ {"GROUP15.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX2)},
+ {"GROUP15.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX3)},
+
+ {"GROUP16.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX0)},
+ {"GROUP16.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX1)},
+ {"GROUP16.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX2)},
+ {"GROUP16.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX3)},
+};
+
+/**
+ * debugfs_b2r2_blt_stat_read() - Implements debugfs read for B2R2 BLT
+ * statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&cont->stat_lock);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ cont->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Released jobs : %lu\n",
+ cont->stat_n_jobs_released);
+ dev_size += sprintf(Buf + dev_size, "Jobs in report list : %lu\n",
+ cont->stat_n_jobs_in_report_list);
+ dev_size += sprintf(Buf + dev_size, "Clients in open : %lu\n",
+ cont->stat_n_in_open);
+ dev_size += sprintf(Buf + dev_size, "Clients in release : %lu\n",
+ cont->stat_n_in_release);
+ dev_size += sprintf(Buf + dev_size, "Clients in blt : %lu\n",
+ cont->stat_n_in_blt);
+ dev_size += sprintf(Buf + dev_size, " synch : %lu\n",
+ cont->stat_n_in_blt_synch);
+ dev_size += sprintf(Buf + dev_size, " add : %lu\n",
+ cont->stat_n_in_blt_add);
+ dev_size += sprintf(Buf + dev_size, " wait : %lu\n",
+ cont->stat_n_in_blt_wait);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch 0 : %lu\n",
+ cont->stat_n_in_synch_0);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch job : %lu\n",
+ cont->stat_n_in_synch_job);
+ dev_size += sprintf(Buf + dev_size, "Clients in query_cap : %lu\n",
+ cont->stat_n_in_query_cap);
+ mutex_unlock(&cont->stat_lock);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_stat_fops() - File operations for B2R2 BLT
+ * statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_stat_read,
+};
+#endif
+
+static void init_tmp_bufs(struct b2r2_control *cont)
+{
+ int i = 0;
+
+ for (i = 0; i < (sizeof(cont->tmp_bufs) / sizeof(struct tmp_buf));
+ i++) {
+ cont->tmp_bufs[i].buf.virt_addr = dma_alloc_coherent(
+ cont->dev, MAX_TMP_BUF_SIZE,
+ &cont->tmp_bufs[i].buf.phys_addr, GFP_DMA);
+ if (cont->tmp_bufs[i].buf.virt_addr != NULL)
+ cont->tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE;
+ else {
+ b2r2_log_err(cont->dev, "%s: Failed to allocate temp "
+ "buffer %i\n", __func__, i);
+ cont->tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+static void destroy_tmp_bufs(struct b2r2_control *cont)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
+ if (cont->tmp_bufs[i].buf.size != 0) {
+ dma_free_coherent(cont->dev,
+ cont->tmp_bufs[i].buf.size,
+ cont->tmp_bufs[i].buf.virt_addr,
+ cont->tmp_bufs[i].buf.phys_addr);
+
+ cont->tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+/**
+ * b2r2_blt_module_init() - Module init function
+ *
+ * Returns 0 if OK else negative error code
+ */
+int b2r2_blt_module_init(struct b2r2_control *cont)
+{
+ int ret;
+
+ mutex_init(&cont->stat_lock);
+
+ /* Register b2r2 driver */
+ cont->miscdev.minor = MISC_DYNAMIC_MINOR;
+ cont->miscdev.name = cont->name;
+ cont->miscdev.fops = &b2r2_blt_fops;
+
+ ret = misc_register(&cont->miscdev);
+ if (ret) {
+ printk(KERN_WARNING "%s: registering misc device fails\n",
+ __func__);
+ goto b2r2_misc_register_fail;
+ }
+
+ cont->dev = cont->miscdev.this_device;
+ dev_set_drvdata(cont->dev, cont);
+
+#ifdef CONFIG_B2R2_GENERIC
+ /* Initialize generic path */
+ b2r2_generic_init(cont);
+#endif
+ /* Initialize node splitter */
+ ret = b2r2_node_split_init(cont);
+ if (ret) {
+ printk(KERN_WARNING "%s: node split init fails\n", __func__);
+ goto b2r2_node_split_init_fail;
+ }
+
+ b2r2_log_info(cont->dev, "%s: device registered\n", __func__);
+
+ /*
+ * FIXME: This stuff should be done before the first requests i.e.
+ * before misc_register, but they need the device which is not
+ * available until after misc_register.
+ */
+ cont->dev->coherent_dma_mask = 0xFFFFFFFF;
+ init_tmp_bufs(cont);
+ ret = b2r2_filters_init(cont);
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: failed to init filters\n",
+ __func__);
+ goto b2r2_filter_init_fail;
+ }
+
+ /* Initialize memory allocator */
+ ret = b2r2_mem_init(cont, B2R2_HEAP_SIZE,
+ 4, sizeof(struct b2r2_node));
+ if (ret) {
+ printk(KERN_WARNING "%s: initializing B2R2 memhandler fails\n",
+ __func__);
+ goto b2r2_mem_init_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs */
+ if (cont->debugfs_root_dir) {
+ debugfs_create_file("last_request", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_request_fops);
+ debugfs_create_file("stats", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_stat_fops);
+ }
+#endif
+
+ b2r2_ctl[cont->id] = cont;
+ b2r2_log_info(cont->dev, "%s: done\n", __func__);
+
+ return ret;
+
+b2r2_mem_init_fail:
+ b2r2_filters_exit(cont);
+b2r2_filter_init_fail:
+ b2r2_node_split_exit(cont);
+b2r2_node_split_init_fail:
+#ifdef CONFIG_B2R2_GENERIC
+ b2r2_generic_exit(cont);
+#endif
+ misc_deregister(&cont->miscdev);
+b2r2_misc_register_fail:
+ return ret;
+}
+
+/**
+ * b2r2_module_exit() - Module exit function
+ */
+void b2r2_blt_module_exit(struct b2r2_control *cont)
+{
+ if (cont) {
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+#ifdef CONFIG_DEBUG_FS
+ if (cont->debugfs_root_dir) {
+ debugfs_remove_recursive(cont->debugfs_root_dir);
+ cont->debugfs_root_dir = NULL;
+ }
+#endif
+ b2r2_mem_exit(cont);
+ destroy_tmp_bufs(cont);
+ b2r2_ctl[cont->id] = NULL;
+ misc_deregister(&cont->miscdev);
+ b2r2_node_split_exit(cont);
+#if defined(CONFIG_B2R2_GENERIC)
+ b2r2_generic_exit(cont);
+#endif
+ b2r2_filters_exit(cont);
+ }
+}
+
+MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c
new file mode 100644
index 00000000000..629633a7888
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.c
@@ -0,0 +1,2819 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * TODO: Clock address from platform data
+ * Platform data should have string id instead of numbers
+ * b2r2_remove, some type of runtime problem when kernel hacking
+ * debug features on
+ *
+ * Is there already a priority list in kernel?
+ * Is it possible to handle clock using clock framework?
+ * uTimeOut, use mdelay instead?
+ * Measure performance
+ *
+ * Exchange our home-cooked ref count with kernel kref? See
+ * http://lwn.net/Articles/336224/
+ *
+ * B2R2:
+ * Source fill 2 bug
+ * Check with Symbian?
+ */
+
+/* include file */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_core.h"
+#include "b2r2_global.h"
+#include "b2r2_structures.h"
+#include "b2r2_internal.h"
+#include "b2r2_profiler_api.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+
+/**
+ * B2R2_DRIVER_TIMEOUT_VALUE - Busy loop timeout after soft reset
+ */
+#define B2R2_DRIVER_TIMEOUT_VALUE (1500)
+
+/**
+ * B2R2_CLK_FLAG - Value to write into clock reg to turn clock on
+ */
+#define B2R2_CLK_FLAG (0x125)
+
+/**
+ * DEBUG_CHECK_ADDREF_RELEASE - Define this to enable addref / release debug
+ */
+#define DEBUG_CHECK_ADDREF_RELEASE 1
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * HANDLE_TIMEOUTED_JOBS - Define this to check jobs for timeout and cancel them
+ */
+#define HANDLE_TIMEOUTED_JOBS 1
+#endif
+
+/**
+ * B2R2_CLOCK_ALWAYS_ON - Define this to disable power save clock turn off
+ */
+/* #define B2R2_CLOCK_ALWAYS_ON 1 */
+
+/**
+ * START_SENTINEL - Watch guard to detect job overwrites
+ */
+#define START_SENTINEL 0xBABEDEEA
+
+/**
+ * STOP_SENTINEL - Watch guard to detect job overwrites
+ */
+#define END_SENTINEL 0xDADBDCDD
+
+/**
+ * B2R2_CORE_LOWEST_PRIO - Lowest prio allowed
+ */
+#define B2R2_CORE_LOWEST_PRIO -19
+/**
+ * B2R2_CORE_HIGHEST_PRIO - Highest prio allowed
+ */
+#define B2R2_CORE_HIGHEST_PRIO 20
+
+/**
+ * B2R2_DOMAIN_DISABLE -
+ */
+#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100)
+
+/**
+ * B2R2_REGULATOR_RETRY_COUNT -
+ */
+#define B2R2_REGULATOR_RETRY_COUNT 10
+
+/**
+ * B2R2 Hardware defines below
+ */
+
+/* - BLT_AQ_CTL */
+#define B2R2_AQ_Enab (0x80000000)
+#define B2R2_AQ_PRIOR_0 (0x0)
+#define B2R2_AQ_PRIOR_1 (0x1)
+#define B2R2_AQ_PRIOR_2 (0x2)
+#define B2R2_AQ_PRIOR_3 (0x3)
+#define B2R2_AQ_NODE_REPEAT_INT (0x100000)
+#define B2R2_AQ_STOP_INT (0x200000)
+#define B2R2_AQ_LNA_REACH_INT (0x400000)
+#define B2R2_AQ_COMPLETED_INT (0x800000)
+
+/* - BLT_CTL */
+#define B2R2BLT_CTLGLOBAL_soft_reset (0x80000000)
+#define B2R2BLT_CTLStep_By_Step (0x20000000)
+#define B2R2BLT_CTLBig_not_little (0x10000000)
+#define B2R2BLT_CTLMask (0xb0000000)
+#define B2R2BLT_CTLTestMask (0xb0000000)
+#define B2R2BLT_CTLInitialValue (0x0)
+#define B2R2BLT_CTLAccessType (INITIAL_TEST)
+#define B2R2BLT_CTL (0xa00)
+
+/* - BLT_ITS */
+#define B2R2BLT_ITSRLD_ERROR (0x80000000)
+#define B2R2BLT_ITSAQ4_Node_Notif (0x8000000)
+#define B2R2BLT_ITSAQ4_Node_repeat (0x4000000)
+#define B2R2BLT_ITSAQ4_Stopped (0x2000000)
+#define B2R2BLT_ITSAQ4_LNA_Reached (0x1000000)
+#define B2R2BLT_ITSAQ3_Node_Notif (0x800000)
+#define B2R2BLT_ITSAQ3_Node_repeat (0x400000)
+#define B2R2BLT_ITSAQ3_Stopped (0x200000)
+#define B2R2BLT_ITSAQ3_LNA_Reached (0x100000)
+#define B2R2BLT_ITSAQ2_Node_Notif (0x80000)
+#define B2R2BLT_ITSAQ2_Node_repeat (0x40000)
+#define B2R2BLT_ITSAQ2_Stopped (0x20000)
+#define B2R2BLT_ITSAQ2_LNA_Reached (0x10000)
+#define B2R2BLT_ITSAQ1_Node_Notif (0x8000)
+#define B2R2BLT_ITSAQ1_Node_repeat (0x4000)
+#define B2R2BLT_ITSAQ1_Stopped (0x2000)
+#define B2R2BLT_ITSAQ1_LNA_Reached (0x1000)
+#define B2R2BLT_ITSCQ2_Repaced (0x80)
+#define B2R2BLT_ITSCQ2_Node_Notif (0x40)
+#define B2R2BLT_ITSCQ2_retriggered (0x20)
+#define B2R2BLT_ITSCQ2_completed (0x10)
+#define B2R2BLT_ITSCQ1_Repaced (0x8)
+#define B2R2BLT_ITSCQ1_Node_Notif (0x4)
+#define B2R2BLT_ITSCQ1_retriggered (0x2)
+#define B2R2BLT_ITSCQ1_completed (0x1)
+#define B2R2BLT_ITSMask (0x8ffff0ff)
+#define B2R2BLT_ITSTestMask (0x8ffff0ff)
+#define B2R2BLT_ITSInitialValue (0x0)
+#define B2R2BLT_ITSAccessType (INITIAL_TEST)
+#define B2R2BLT_ITS (0xa04)
+
+/* - BLT_STA1 */
+#define B2R2BLT_STA1BDISP_IDLE (0x1)
+#define B2R2BLT_STA1Mask (0x1)
+#define B2R2BLT_STA1TestMask (0x1)
+#define B2R2BLT_STA1InitialValue (0x1)
+#define B2R2BLT_STA1AccessType (INITIAL_TEST)
+#define B2R2BLT_STA1 (0xa08)
+
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+
+/**
+ * struct addref_release - Represents one addref or release. Used
+ * to debug addref / release problems
+ *
+ * @addref: true if this represents an addref else it represents
+ * a release.
+ * @job: The job that was referenced
+ * @caller: The caller of the addref or release
+ * @ref_count: The job reference count after addref / release
+ */
+struct addref_release {
+ bool addref;
+ struct b2r2_core_job *job;
+ const char *caller;
+ int ref_count;
+};
+
+#endif
+
+/**
+ * struct b2r2_core - Administration data for B2R2 core
+ *
+ * @lock: Spin lock protecting the b2r2_core structure and the B2R2 HW
+ * @hw: B2R2 registers memory mapped
+ * @pmu_b2r2_clock: Control of B2R2 clock
+ * @log_dev: Device used for logging via dev_... functions
+ *
+ * @prio_queue: Queue of jobs sorted in priority order
+ * @active_jobs: Array containing pointer to zero or one job per queue
+ * @n_active_jobs: Number of active jobs
+ * @jiffies_last_active: jiffie value when adding last active job
+ * @jiffies_last_irq: jiffie value when last irq occured
+ * @timeout_work: Work structure for timeout work
+ *
+ * @next_job_id: Contains the job id that will be assigned to the next
+ * added job.
+ *
+ * @clock_request_count: When non-zero, clock is on
+ * @clock_off_timer: Kernel timer to handle delayed turn off of clock
+ *
+ * @work_queue: Work queue to handle done jobs (callbacks) and timeouts in
+ * non-interrupt context.
+ *
+ * @stat_n_irq: Number of interrupts (statistics)
+ * @stat_n_jobs_added: Number of jobs added (statistics)
+ * @stat_n_jobs_removed: Number of jobs removed (statistics)
+ * @stat_n_jobs_in_prio_list: Number of jobs in prio list (statistics)
+ *
+ * @debugfs_root_dir: Root directory for B2R2 debugfs
+ *
+ * @ar: Circular array of addref / release debug structs
+ * @ar_write: Where next write will occur
+ * @ar_read: First valid place to read. When ar_read == ar_write then
+ * the array is empty.
+ */
+struct b2r2_core {
+ spinlock_t lock;
+
+ struct b2r2_memory_map *hw;
+
+ u8 op_size;
+ u8 ch_size;
+ u8 pg_size;
+ u8 mg_size;
+ u16 min_req_time;
+ int irq;
+
+ char name[16];
+ struct device *dev;
+
+ struct list_head prio_queue;
+
+ struct b2r2_core_job *active_jobs[B2R2_CORE_QUEUE_NO_OF];
+ unsigned long n_active_jobs;
+
+ unsigned long jiffies_last_active;
+ unsigned long jiffies_last_irq;
+#ifdef HANDLE_TIMEOUTED_JOBS
+ struct delayed_work timeout_work;
+#endif
+ int next_job_id;
+
+ unsigned long clock_request_count;
+ struct timer_list clock_off_timer;
+
+ struct workqueue_struct *work_queue;
+
+ /* Statistics */
+ unsigned long stat_n_irq;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_removed;
+
+ unsigned long stat_n_jobs_in_prio_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_core_root_dir;
+ struct dentry *debugfs_regs_dir;
+#endif
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Tracking release bug...*/
+ struct addref_release ar[100];
+ int ar_write;
+ int ar_read;
+#endif
+
+ /* Power management variables */
+ struct mutex domain_lock;
+ struct delayed_work domain_disable_work;
+
+ /*
+ * We need to keep track of both the number of domain_enable/disable()
+ * calls and whether the power was actually turned off, since the
+ * power off is done in a delayed job.
+ */
+ bool domain_enabled;
+ int domain_request_count;
+
+ struct clk *b2r2_clock;
+ struct regulator *b2r2_reg;
+
+ struct b2r2_control *control;
+};
+
+/**
+ * b2r2_core - Quick link to administration data for B2R2
+ */
+static struct b2r2_core *b2r2_core[B2R2_MAX_NBR_DEVICES];
+
+/* Local functions */
+static void check_prio_list(struct b2r2_core *core, bool atomic);
+static void clear_interrupts(struct b2r2_core *core);
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job);
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_list);
+static int get_next_job_id(struct b2r2_core *core);
+static void job_work_function(struct work_struct *ptr);
+static void init_job(struct b2r2_core_job *job);
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list);
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id);
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list);
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag);
+
+static int domain_enable(struct b2r2_core *core);
+static void domain_disable(struct b2r2_core *core);
+
+static void stop_queue(enum b2r2_core_queue queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+static void printk_regs(struct b2r2_core *core);
+static int hw_reset(struct b2r2_core *core);
+static void timeout_work_function(struct work_struct *ptr);
+#endif
+
+static void reset_hw_timer(struct b2r2_core_job *job);
+static void start_hw_timer(struct b2r2_core_job *job);
+static void stop_hw_timer(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+
+static int init_hw(struct b2r2_core *core);
+static void exit_hw(struct b2r2_core *core);
+
+/* Tracking release bug... */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+/**
+ * ar_add() - Adds an addref or a release to the array
+ *
+ * @core: The b2r2 core entity
+ * @job: The job that has been referenced
+ * @caller: The caller of addref / release
+ * @addref: true if it is an addref else false for release
+ */
+static void ar_add(struct b2r2_core *core, struct b2r2_core_job *job,
+ const char *caller, bool addref)
+{
+ core->ar[core->ar_write].addref = addref;
+ core->ar[core->ar_write].job = job;
+ core->ar[core->ar_write].caller = caller;
+ core->ar[core->ar_write].ref_count = job->ref_count;
+ core->ar_write = (core->ar_write + 1) %
+ ARRAY_SIZE(core->ar);
+ if (core->ar_write == core->ar_read)
+ core->ar_read = (core->ar_read + 1) %
+ ARRAY_SIZE(core->ar);
+}
+
+/**
+ * sprintf_ar() - Writes all addref / release to a string buffer
+ *
+ * @core: The b2r2 core entity
+ * @buf: Receiving character bufefr
+ * @job: Which job to write or NULL for all
+ *
+ * NOTE! No buffer size check!!
+ */
+static char *sprintf_ar(struct b2r2_core *core, char *buf,
+ struct b2r2_core_job *job)
+{
+ int i;
+ int size = 0;
+
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
+ if (!job || job == ar->job)
+ size += sprintf(buf + size,
+ "%s on %p from %s, ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+
+ return buf;
+}
+
+/**
+ * printk_ar() - Writes all addref / release using dev_info
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to write or NULL for all
+ */
+static void printk_ar(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ int i;
+
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
+ if (!job || job == ar->job)
+ b2r2_log_info(core->dev, "%s on %p from %s,"
+ " ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+}
+#endif
+
+/**
+ * internal_job_addref() - Increments the reference count for a job
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to increment reference count for
+ * @caller: Name of function calling addref (for debug)
+ *
+ * Note that core->lock _must_ be held
+ */
+static void internal_job_addref(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+
+ b2r2_log_info(core->dev, "%s (%p, %p) (from %s)\n",
+ __func__, core, job, caller);
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (%p, %p) start=%X end=%X "
+ "ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
+
+ /* Something is wrong, print the addref / release array */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(core, NULL);
+#endif
+ }
+
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+
+ /* Do the actual reference count increment */
+ ref_count = ++job->ref_count;
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Keep track of addref / release */
+ ar_add(core, job, caller, true);
+#endif
+
+ b2r2_log_info(core->dev, "%s called from %s (%p, %p): Ref Count is "
+ "%d\n", __func__, caller, core, job, job->ref_count);
+}
+
+/**
+ * internal_job_release() - Decrements the reference count for a job
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to decrement reference count for
+ * @caller: Name of function calling release (for debug)
+ *
+ * Returns true if job_release should be called by caller
+ * (reference count reached zero).
+ *
+ * Note that core->lock _must_ be held
+ */
+static bool internal_job_release(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+ bool call_release = false;
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ b2r2_log_info(core->dev, "%s (%p, %p) (from %s)\n",
+ __func__, core, job, caller);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (%p, %p) start=%X end=%X "
+ "ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(core, NULL);
+#endif
+ }
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+ BUG_ON(job->ref_count == 0 || job->ref_count > 10);
+
+ /* Do the actual decrement */
+ ref_count = --job->ref_count;
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ ar_add(core, job, caller, false);
+#endif
+ b2r2_log_info(core->dev, "%s called from %s (%p, %p) Ref Count is "
+ "%d\n", __func__, caller, core, job, ref_count);
+
+ if (!ref_count && job->release) {
+ call_release = true;
+ /* Job will now cease to exist */
+ job->start_sentinel = 0xFFFFFFFF;
+ job->end_sentinel = 0xFFFFFFFF;
+ }
+ return call_release;
+}
+
+
+
+/* Exported functions */
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ internal_job_addref(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ bool call_release = false;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ call_release = internal_job_release(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ if (call_release)
+ job->release(job);
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (%p, %p)\n", __func__, control, job);
+
+ /* Enable B2R2 */
+ domain_enable(core);
+
+ spin_lock_irqsave(&core->lock, flags);
+ core->stat_n_jobs_added++;
+
+ /* Initialise internal job data */
+ init_job(job);
+
+ /* Initial reference, should be released by caller of this function */
+ job->ref_count = 1;
+
+ /* Insert job into prio list */
+ insert_into_prio_list(core, job);
+
+ /* Check if we can dispatch job */
+ check_prio_list(core, false);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return 0;
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (%p, %d)\n", __func__, control, job_id);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Look through prio queue */
+ job = find_job_in_list(job_id, &core->prio_queue);
+
+ if (!job)
+ job = find_job_in_active_jobs(core, job_id);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job;
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (%p, %d)\n", __func__, control, tag);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Look through prio queue */
+ job = find_tag_in_list(core, tag, &core->prio_queue);
+
+ if (!job)
+ job = find_tag_in_active_jobs(core, tag);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job;
+}
+
+/**
+ * is_job_done() - Spin lock protected check if job is done
+ *
+ * @job: Job to check
+ *
+ * Returns true if job is done or cancelled
+ *
+ * core->lock must _NOT_ be held when calling this function
+ */
+static bool is_job_done(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ bool job_is_done;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ job_is_done =
+ job->job_state != B2R2_CORE_JOB_QUEUED &&
+ job->job_state != B2R2_CORE_JOB_RUNNING;
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job_is_done;
+}
+
+/**
+ * b2r2_core_job_wait()
+ *
+ * @job:
+ *
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ b2r2_log_info(core->dev, "%s (%p)\n", __func__, job);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Wait for the job to be done */
+ ret = wait_event_interruptible(
+ job->event,
+ is_job_done(job));
+
+ if (ret)
+ b2r2_log_warn(core->dev,
+ "%s: wait_event_interruptible returns %d state is %d",
+ __func__, ret, job->job_state);
+ return ret;
+}
+
+/**
+ * cancel_job() - Cancels a job (removes it from prio list or active jobs) and
+ * calls the job callback
+ *
+ * @job: Job to cancel
+ *
+ * Returns true if the job was found and cancelled
+ *
+ * core->lock must be held when calling this function
+ */
+static bool cancel_job(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ bool found_job = false;
+ bool job_was_active = false;
+
+ /* Remove from prio list */
+ if (job->job_state == B2R2_CORE_JOB_QUEUED) {
+ list_del_init(&job->list);
+ found_job = true;
+ }
+
+ /* Remove from active jobs */
+ if (!found_job && core->n_active_jobs > 0) {
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ if (core->active_jobs[i] == job) {
+ stop_queue((enum b2r2_core_queue)i);
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ found_job = true;
+ job_was_active = true;
+ }
+ }
+ }
+
+ /* Handle done list & callback */
+ if (found_job) {
+ /* Job is canceled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ queue_work(core->work_queue, &job->work);
+
+ /* Statistics */
+ if (!job_was_active)
+ core->stat_n_jobs_in_prio_list--;
+
+ }
+
+ return found_job;
+}
+
+/* core->lock _must_ _NOT_ be held when calling this function */
+int b2r2_core_job_cancel(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ b2r2_log_info(core->dev, "%s (%p) (%d)\n",
+ __func__, job, job->job_state);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Remove from prio list */
+ spin_lock_irqsave(&core->lock, flags);
+ cancel_job(core, job);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return ret;
+}
+
+/* LOCAL FUNCTIONS BELOW */
+
+/**
+ * domain_disable_work_function()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable_work_function(struct work_struct *work)
+{
+ struct delayed_work *twork = to_delayed_work(work);
+ struct b2r2_core *core = container_of(
+ twork, struct b2r2_core, domain_disable_work);
+
+ if (!mutex_trylock(&core->domain_lock))
+ return;
+
+ if (core->domain_request_count == 0) {
+ exit_hw(core);
+ clk_disable(core->b2r2_clock);
+ regulator_disable(core->b2r2_reg);
+ core->domain_enabled = false;
+ }
+
+ mutex_unlock(&core->domain_lock);
+}
+
+/**
+ * domain_enable()
+ *
+ * @core: The b2r2 core entity
+ */
+static int domain_enable(struct b2r2_core *core)
+{
+ mutex_lock(&core->domain_lock);
+ core->domain_request_count++;
+
+ if (!core->domain_enabled) {
+ int retry = 0;
+ int ret;
+again:
+ /*
+ * Since regulator_enable() may sleep we have to handle
+ * interrupts.
+ */
+ ret = regulator_enable(core->b2r2_reg);
+ if ((ret == -EAGAIN) &&
+ ((retry++) < B2R2_REGULATOR_RETRY_COUNT))
+ goto again;
+ else if (ret < 0)
+ goto regulator_enable_failed;
+
+ clk_enable(core->b2r2_clock);
+ if (init_hw(core) < 0)
+ goto init_hw_failed;
+ core->domain_enabled = true;
+ }
+
+ mutex_unlock(&core->domain_lock);
+
+ return 0;
+
+init_hw_failed:
+ b2r2_log_err(core->dev,
+ "%s: Could not initialize hardware!\n", __func__);
+
+ clk_disable(core->b2r2_clock);
+
+ if (regulator_disable(core->b2r2_reg) < 0)
+ b2r2_log_err(core->dev, "%s: regulator_disable failed!\n",
+ __func__);
+
+regulator_enable_failed:
+ core->domain_request_count--;
+ mutex_unlock(&core->domain_lock);
+
+ return -EFAULT;
+}
+
+/**
+ * domain_disable()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable(struct b2r2_core *core)
+{
+ mutex_lock(&core->domain_lock);
+
+ if (core->domain_request_count == 0) {
+ b2r2_log_err(core->dev,
+ "%s: Unbalanced domain_disable()\n", __func__);
+ } else {
+ core->domain_request_count--;
+
+ /* Cancel any existing work */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ /* Add a work to disable the power and clock after a delay */
+ queue_delayed_work(core->work_queue, &core->domain_disable_work,
+ B2R2_DOMAIN_DISABLE_TIMEOUT);
+ }
+
+ mutex_unlock(&core->domain_lock);
+}
+
+/**
+ * stop_queue() - Stops the specified queue.
+ */
+static void stop_queue(enum b2r2_core_queue queue)
+{
+ /* TODO: Implement! If this function is not implemented canceled jobs
+ * will use b2r2 which is a waste of resources. Not stopping jobs will
+ * also screw up the hardware timing, the job the canceled job
+ * intrerrupted (if any) will be billed for the time between the point
+ * where the job is cancelled and when it stops. */
+}
+
+/**
+ * exit_job_list() - Empties a job queue by canceling the jobs
+ *
+ * @core: The b2r2 core entity
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_queue)
+{
+ while (!list_empty(job_queue)) {
+ struct b2r2_core_job *job =
+ list_entry(job_queue->next,
+ struct b2r2_core_job,
+ list);
+ /* Add reference to prevent job from disappearing
+ in the middle of our work, released below */
+ internal_job_addref(core, job, __func__);
+
+ cancel_job(core, job);
+
+ /* Matching release to addref above */
+ internal_job_release(core, job, __func__);
+
+ }
+}
+
+/**
+ * get_next_job_id() - Return a new job id.
+ *
+ * @core: The b2r2 core entity
+ */
+static int get_next_job_id(struct b2r2_core *core)
+{
+ int job_id;
+
+ if (core->next_job_id < 1)
+ core->next_job_id = 1;
+ job_id = core->next_job_id++;
+
+ return job_id;
+}
+
+/**
+ * job_work_function() - Work queue function that calls callback(s) and
+ * checks if B2R2 can accept a new job
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core_job)
+ */
+static void job_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job =
+ container_of(ptr, struct b2r2_core_job, work);
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ /* Disable B2R2 */
+ domain_disable(core);
+
+ /* Release resources */
+ if (job->release_resources)
+ job->release_resources(job, false);
+
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Dispatch a new job if possible */
+ check_prio_list(core, false);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Tell the client */
+ if (job->callback)
+ job->callback(job);
+
+ /* Drop our reference, matches the
+ addref in handle_queue_event or b2r2_core_job_cancel */
+ b2r2_core_job_release(job, __func__);
+}
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * timeout_work_function() - Work queue function that checks for
+ * timeout:ed jobs. B2R2 might silently refuse
+ * to execute some jobs, i.e. SRC2 fill
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core)
+ *
+ */
+static void timeout_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct list_head job_list;
+ struct delayed_work *twork = to_delayed_work(ptr);
+ struct b2r2_core *core = container_of(twork, struct b2r2_core,
+ timeout_work);
+
+ INIT_LIST_HEAD(&job_list);
+
+ /* Cancel all jobs if too long time since last irq */
+ spin_lock_irqsave(&core->lock, flags);
+ if (core->n_active_jobs > 0) {
+ unsigned long diff =
+ (long) jiffies - (long) core->jiffies_last_irq;
+ if (diff > HZ/2) {
+ /* Active jobs and more than a second since last irq! */
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list.
+ * It's important that the application queues are
+ * killed in order of decreasing priority */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job =
+ core->active_jobs[i];
+
+ if (job) {
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ list_add_tail(&job->list, &job_list);
+ }
+ }
+
+ /* Print the B2R2 register and reset B2R2 */
+ printk_regs(core);
+ hw_reset(core);
+ }
+ }
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Handle timeout:ed jobs */
+ spin_lock_irqsave(&core->lock, flags);
+ while (!list_empty(&job_list)) {
+ struct b2r2_core_job *job =
+ list_entry(job_list.next,
+ struct b2r2_core_job,
+ list);
+
+ b2r2_log_warn(core->dev, "%s: Job timeout\n", __func__);
+
+ list_del_init(&job->list);
+
+ /* Job is cancelled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Job callbacks handled via work queue */
+ queue_work(core->work_queue, &job->work);
+ }
+
+ /* Requeue delayed work */
+ if (core->n_active_jobs)
+ queue_delayed_work(
+ core->work_queue,
+ &core->timeout_work, HZ/2);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+}
+#endif
+
+/**
+ * reset_hw_timer() - Resets a job's hardware timer. Must be called before
+ * the timer is used.
+ *
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void reset_hw_timer(struct b2r2_core_job *job)
+{
+ job->nsec_active_in_hw = 0;
+}
+
+/**
+ * start_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly before starting the
+ * hardware.
+ *
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void start_hw_timer(struct b2r2_core_job *job)
+{
+ job->hw_start_time = b2r2_get_curr_nsec();
+}
+
+/**
+ * stop_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly after the hardware has
+ * finished.
+ *
+ * @core: The b2r2 core entity
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void stop_hw_timer(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ /* Assumes only app queues are used, which is the case right now. */
+ /* Not 100% accurate. When a higher prio job interrupts a lower prio job it does
+ so after the current node of the low prio job has finished. Currently we can not
+ sense when the actual switch takes place so the time reported for a job that
+ interrupts a lower prio job will on average contain the time it takes to process
+ half a node in the lower prio job in addition to the time it takes to process the
+ job's own nodes. This could possibly be solved by adding node notifications but
+ that would involve a significant amount of work and consume system resources due
+ to the extra interrupts. */
+ /* If a job takes more than ~2s (absolute time, including idleing in the hardware)
+ the state of the hardware timer will be corrupted and it will not report valid
+ values until b2r2 becomes idle (no active jobs on any queues). The maximum length
+ can possibly be increased by using 64 bit integers. */
+
+ int i;
+
+ u32 stop_time_raw = b2r2_get_curr_nsec();
+ /* We'll add an offset to all positions in time to make the current time equal to
+ 0xFFFFFFFF. This way we can compare positions in time to each other without having
+ to wory about wrapping (so long as all positions in time are in the past). */
+ u32 stop_time = 0xFFFFFFFF;
+ u32 time_pos_offset = 0xFFFFFFFF - stop_time_raw;
+ u32 nsec_in_hw = stop_time - (job->hw_start_time + time_pos_offset);
+ job->nsec_active_in_hw += (s32)nsec_in_hw;
+
+ /* Check if we have delayed the start of higher prio jobs. Can happen as queue
+ switching only can be done between nodes. */
+ for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) {
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job->hw_start_time = stop_time_raw;
+ }
+
+ /* Check if the job has stolen time from lower prio jobs */
+ for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) {
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
+ u32 queue_active_job_hw_start_time;
+
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job_hw_start_time =
+ queue_active_job->hw_start_time +
+ time_pos_offset;
+
+ if (queue_active_job_hw_start_time < stop_time) {
+ u32 queue_active_job_nsec_in_hw = stop_time -
+ queue_active_job_hw_start_time;
+ u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw,
+ nsec_in_hw);
+
+ queue_active_job->nsec_active_in_hw -= (s32)num_stolen_nsec;
+
+ nsec_in_hw -= num_stolen_nsec;
+ stop_time -= num_stolen_nsec;
+ }
+
+ if (0 == nsec_in_hw)
+ break;
+ }
+}
+
+/**
+ * init_job() - Initializes a job structure from filled in client data.
+ * Reference count will be set to 1
+ *
+ * @job: Job to initialize
+ */
+static void init_job(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ job->start_sentinel = START_SENTINEL;
+ job->end_sentinel = END_SENTINEL;
+
+ /* Get a job id*/
+ job->job_id = get_next_job_id(core);
+
+ /* Job is idle, never queued */
+ job->job_state = B2R2_CORE_JOB_IDLE;
+
+ /* Initialize internal data */
+ INIT_LIST_HEAD(&job->list);
+ init_waitqueue_head(&job->event);
+ INIT_WORK(&job->work, job_work_function);
+
+ /* Map given prio to B2R2 queues */
+ if (job->prio < B2R2_CORE_LOWEST_PRIO)
+ job->prio = B2R2_CORE_LOWEST_PRIO;
+ else if (job->prio > B2R2_CORE_HIGHEST_PRIO)
+ job->prio = B2R2_CORE_HIGHEST_PRIO;
+
+ if (job->prio > 10) {
+ job->queue = B2R2_CORE_QUEUE_AQ1;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ1_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_3);
+ } else if (job->prio > 0) {
+ job->queue = B2R2_CORE_QUEUE_AQ2;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ2_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_2);
+ } else if (job->prio > -10) {
+ job->queue = B2R2_CORE_QUEUE_AQ3;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ3_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_1);
+ } else {
+ job->queue = B2R2_CORE_QUEUE_AQ4;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ4_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_0);
+ }
+}
+
+/**
+ * clear_interrupts() - Disables all interrupts
+ *
+ * core->lock _must_ be held
+ */
+static void clear_interrupts(struct b2r2_core *core)
+{
+ writel(0x0, &core->hw->BLT_ITM0);
+ writel(0x0, &core->hw->BLT_ITM1);
+ writel(0x0, &core->hw->BLT_ITM2);
+ writel(0x0, &core->hw->BLT_ITM3);
+}
+
+/**
+ * insert_into_prio_list() - Inserts the job into the sorted list of jobs.
+ * The list is sorted by priority.
+ *
+ * @core: The b2r2 core entity
+ * @job: Job to insert
+ *
+ * core->lock _must_ be held
+ */
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job)
+{
+ /* Ref count is increased when job put in list,
+ should be released when job is removed from list */
+ internal_job_addref(core, job, __func__);
+
+ core->stat_n_jobs_in_prio_list++;
+
+ /* Sort in the job */
+ if (list_empty(&core->prio_queue))
+ list_add_tail(&job->list, &core->prio_queue);
+ else {
+ struct b2r2_core_job *first_job = list_entry(
+ core->prio_queue.next,
+ struct b2r2_core_job, list);
+ struct b2r2_core_job *last_job = list_entry(
+ core->prio_queue.prev,
+ struct b2r2_core_job, list);
+
+ if (job->prio > first_job->prio)
+ list_add(&job->list, &core->prio_queue);
+ else if (job->prio <= last_job->prio)
+ list_add_tail(&job->list, &core->prio_queue);
+ else {
+ /* We need to find where to put it */
+ struct list_head *ptr;
+
+ list_for_each(ptr, &core->prio_queue) {
+ struct b2r2_core_job *list_job =
+ list_entry(ptr, struct b2r2_core_job,
+ list);
+ if (job->prio > list_job->prio) {
+ list_add_tail(&job->list,
+ &list_job->list);
+ break;
+ }
+ }
+ }
+ }
+ /* The job is now queued */
+ job->job_state = B2R2_CORE_JOB_QUEUED;
+}
+
+/**
+ * check_prio_list() - Checks if the first job(s) in the prio list can
+ * be dispatched to B2R2
+ *
+ * @core: The b2r2 core entity
+ * @atomic: true if in atomic context (i.e. interrupt context)
+ *
+ * core->lock _must_ be held
+ */
+static void check_prio_list(struct b2r2_core *core, bool atomic)
+{
+ bool dispatched_job;
+ int n_dispatched = 0;
+ struct b2r2_core_job *job;
+
+ do {
+ dispatched_job = false;
+
+ /* Do we have anything in our prio list? */
+ if (list_empty(&core->prio_queue))
+ break;
+
+ /* The first job waiting */
+ job = list_first_entry(&core->prio_queue,
+ struct b2r2_core_job, list);
+
+ /* Is the B2R2 queue available? */
+ if (core->active_jobs[job->queue] != NULL)
+ break;
+
+ /* Can we acquire resources? */
+ if (!job->acquire_resources ||
+ job->acquire_resources(job, atomic) == 0) {
+ /* Ok to dispatch job */
+
+ /* Remove from list */
+ list_del_init(&job->list);
+
+ /* The job is now active */
+ core->active_jobs[job->queue] = job;
+ core->n_active_jobs++;
+ job->jiffies = jiffies;
+ core->jiffies_last_active = jiffies;
+
+ /* Kick off B2R2 */
+ trigger_job(core, job);
+ dispatched_job = true;
+ n_dispatched++;
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Check in one half second if it hangs */
+ queue_delayed_work(core->work_queue,
+ &core->timeout_work, HZ/2);
+#endif
+ } else {
+ /* No resources */
+ if (!atomic && core->n_active_jobs == 0) {
+ b2r2_log_warn(core->dev,
+ "%s: No resource", __func__);
+ cancel_job(core, job);
+ }
+ }
+ } while (dispatched_job);
+
+ core->stat_n_jobs_in_prio_list -= n_dispatched;
+}
+
+/**
+ * find_job_in_list() - Finds job with job_id in list
+ *
+ * @jobid: Job id to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock _must_ be held
+ */
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job = list_entry(
+ ptr, struct b2r2_core_job, list);
+ if (job->job_id == job_id) {
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_core *core = instance->control->data;
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(core, job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_job_in_active_jobs() - Finds job in active job queues
+ *
+ * @core: The b2r2 core entity
+ * @job_id: Job id to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock _must_ be held
+ */
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
+
+ if (job && job->job_id == job_id) {
+ internal_job_addref(core, job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+/**
+ * find_tag_in_list() - Finds first job with tag in list
+ *
+ * @tag: Tag to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job =
+ list_entry(ptr, struct b2r2_core_job, list);
+ if (job->tag == tag) {
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(core, job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_tag_in_active_jobs() - Finds job with tag in active job queues
+ *
+ * @tag: Tag to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
+
+ if (job && job->tag == tag) {
+ internal_job_addref(core, job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * hw_reset() - Resets B2R2 hardware
+ *
+ * core->lock must be held
+ */
+static int hw_reset(struct b2r2_core *core)
+{
+ u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
+
+ /* Tell B2R2 to reset */
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+ writel(0x00000000, &core->hw->BLT_CTL);
+
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&core->hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+
+ if (uTimeOut == 0) {
+ b2r2_log_warn(core->dev,
+ "error-> after software reset B2R2 is not idle\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+
+}
+#endif
+
+/**
+ * trigger_job() - Put job in B2R2 HW queue
+ *
+ * @job: Job to trigger
+ *
+ * core->lock must be held
+ */
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ /* Debug prints */
+ b2r2_log_info(core->dev, "queue 0x%x\n", job->queue);
+ b2r2_log_info(core->dev, "BLT TRIG_IP 0x%x (first node)\n",
+ job->first_node_address);
+ b2r2_log_info(core->dev, "BLT LNA_CTL 0x%x (last node)\n",
+ job->last_node_address);
+ b2r2_log_info(core->dev, "BLT TRIG_CTL 0x%x\n", job->control);
+ b2r2_log_info(core->dev, "BLT PACE_CTL 0x%x\n", job->pace_control);
+
+ reset_hw_timer(job);
+ job->job_state = B2R2_CORE_JOB_RUNNING;
+
+ /* Enable interrupt */
+ writel(readl(&core->hw->BLT_ITM0) | job->interrupt_context,
+ &core->hw->BLT_ITM0);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS1_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS1_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS1_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS1_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS2_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS2_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS2_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS2_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS3_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS3_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS3_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS3_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGT_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGT_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGT_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGT_PGZ);
+
+ /* B2R2 kicks off when LNA is written, LNA write must be last! */
+ switch (job->queue) {
+ case B2R2_CORE_QUEUE_CQ1:
+ writel(job->first_node_address, &core->hw->BLT_CQ1_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ1_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ1_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_CQ2:
+ writel(job->first_node_address, &core->hw->BLT_CQ2_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ2_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ2_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ1:
+ writel(job->control, &core->hw->BLT_AQ1_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ1_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ1_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ2:
+ writel(job->control, &core->hw->BLT_AQ2_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ2_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ2_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ3:
+ writel(job->control, &core->hw->BLT_AQ3_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ3_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ3_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ4:
+ writel(job->control, &core->hw->BLT_AQ4_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ4_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ4_LNA);
+ break;
+
+ /** Handle the default case */
+ default:
+ break;
+
+ } /* end switch */
+
+}
+
+/**
+ * handle_queue_event() - Handles interrupt event for specified B2R2 queue
+ *
+ * @queue: Queue to handle event for
+ *
+ * core->lock must be held
+ */
+static void handle_queue_event(struct b2r2_core *core,
+ enum b2r2_core_queue queue)
+{
+ struct b2r2_core_job *job;
+
+ job = core->active_jobs[queue];
+ if (job) {
+ if (job->job_state != B2R2_CORE_JOB_RUNNING)
+ /* Should be running
+ Severe error. TBD */
+ b2r2_log_warn(core->dev,
+ "%s: Job is not running", __func__);
+
+ stop_hw_timer(core, job);
+
+ /* Remove from queue */
+ BUG_ON(core->n_active_jobs == 0);
+ core->active_jobs[queue] = NULL;
+ core->n_active_jobs--;
+ }
+
+ if (!job) {
+ /* No job, error? */
+ b2r2_log_warn(core->dev, "%s: No job", __func__);
+ return;
+ }
+
+
+ /* Atomic context release resources, release resources will
+ be called again later from process context (work queue) */
+ if (job->release_resources)
+ job->release_resources(job, true);
+
+ /* Job is done */
+ job->job_state = B2R2_CORE_JOB_DONE;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Dispatch to work queue to handle callbacks */
+ queue_work(core->work_queue, &job->work);
+}
+
+/**
+ * process_events() - Handles interrupt events
+ *
+ * @status: Contents of the B2R2 ITS register
+ */
+static void process_events(struct b2r2_core *core, u32 status)
+{
+ u32 mask = 0xF;
+ u32 disable_itm_mask = 0;
+
+ b2r2_log_info(core->dev, "Enters process_events\n");
+ b2r2_log_info(core->dev, "status 0x%x\n", status);
+
+ /* Composition queue 1 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Composition queue 2 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 8;
+
+ /* Application queue 1 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 2 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 3 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ3);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 4 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ4);
+ disable_itm_mask |= mask;
+ }
+
+ /* Clear received interrupt flags */
+ writel(status, &core->hw->BLT_ITS);
+ /* Disable handled interrupts */
+ writel(readl(&core->hw->BLT_ITM0) & ~disable_itm_mask,
+ &core->hw->BLT_ITM0);
+
+ b2r2_log_info(core->dev, "Returns process_events\n");
+}
+
+/**
+ * b2r2_irq_handler() - B2R2 interrupt handler
+ *
+ * @irq: Interrupt number (not used)
+ * @x: ??? (Not used)
+ */
+static irqreturn_t b2r2_irq_handler(int irq, void *x)
+{
+ unsigned long flags;
+ struct b2r2_core* core = (struct b2r2_core *) x;
+
+ /* Spin lock is need in irq handler (SMP) */
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Make sure that we have a clock */
+
+ /* Remember time for last irq (for timeout mgmt) */
+ core->jiffies_last_irq = jiffies;
+ core->stat_n_irq++;
+
+ /* Handle the interrupt(s) */
+ process_events(core, readl(&core->hw->BLT_ITS));
+
+ /* Check if we can dispatch new jobs */
+ check_prio_list(core, true);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * struct debugfs_reg - Represents one B2R2 register in debugfs
+ *
+ * @name: Register name
+ * @offset: Byte offset in B2R2 for register
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_regs - Array of B2R2 debugfs registers
+ */
+static const struct debugfs_reg debugfs_regs[] = {
+ {"BLT_SSBA17", offsetof(struct b2r2_memory_map, BLT_SSBA17)},
+ {"BLT_SSBA18", offsetof(struct b2r2_memory_map, BLT_SSBA18)},
+ {"BLT_SSBA19", offsetof(struct b2r2_memory_map, BLT_SSBA19)},
+ {"BLT_SSBA20", offsetof(struct b2r2_memory_map, BLT_SSBA20)},
+ {"BLT_SSBA21", offsetof(struct b2r2_memory_map, BLT_SSBA21)},
+ {"BLT_SSBA22", offsetof(struct b2r2_memory_map, BLT_SSBA22)},
+ {"BLT_SSBA23", offsetof(struct b2r2_memory_map, BLT_SSBA23)},
+ {"BLT_SSBA24", offsetof(struct b2r2_memory_map, BLT_SSBA24)},
+ {"BLT_STBA5", offsetof(struct b2r2_memory_map, BLT_STBA5)},
+ {"BLT_STBA6", offsetof(struct b2r2_memory_map, BLT_STBA6)},
+ {"BLT_STBA7", offsetof(struct b2r2_memory_map, BLT_STBA7)},
+ {"BLT_STBA8", offsetof(struct b2r2_memory_map, BLT_STBA8)},
+ {"BLT_CTL", offsetof(struct b2r2_memory_map, BLT_CTL)},
+ {"BLT_ITS", offsetof(struct b2r2_memory_map, BLT_ITS)},
+ {"BLT_STA1", offsetof(struct b2r2_memory_map, BLT_STA1)},
+ {"BLT_SSBA1", offsetof(struct b2r2_memory_map, BLT_SSBA1)},
+ {"BLT_SSBA2", offsetof(struct b2r2_memory_map, BLT_SSBA2)},
+ {"BLT_SSBA3", offsetof(struct b2r2_memory_map, BLT_SSBA3)},
+ {"BLT_SSBA4", offsetof(struct b2r2_memory_map, BLT_SSBA4)},
+ {"BLT_SSBA5", offsetof(struct b2r2_memory_map, BLT_SSBA5)},
+ {"BLT_SSBA6", offsetof(struct b2r2_memory_map, BLT_SSBA6)},
+ {"BLT_SSBA7", offsetof(struct b2r2_memory_map, BLT_SSBA7)},
+ {"BLT_SSBA8", offsetof(struct b2r2_memory_map, BLT_SSBA8)},
+ {"BLT_STBA1", offsetof(struct b2r2_memory_map, BLT_STBA1)},
+ {"BLT_STBA2", offsetof(struct b2r2_memory_map, BLT_STBA2)},
+ {"BLT_STBA3", offsetof(struct b2r2_memory_map, BLT_STBA3)},
+ {"BLT_STBA4", offsetof(struct b2r2_memory_map, BLT_STBA4)},
+ {"BLT_CQ1_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_TRIG_IP)},
+ {"BLT_CQ1_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_TRIG_CTL)},
+ {"BLT_CQ1_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_PACE_CTL)},
+ {"BLT_CQ1_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_IP)},
+ {"BLT_CQ2_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_TRIG_IP)},
+ {"BLT_CQ2_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_TRIG_CTL)},
+ {"BLT_CQ2_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_PACE_CTL)},
+ {"BLT_CQ2_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_IP)},
+ {"BLT_AQ1_CTL", offsetof(struct b2r2_memory_map, BLT_AQ1_CTL)},
+ {"BLT_AQ1_IP", offsetof(struct b2r2_memory_map, BLT_AQ1_IP)},
+ {"BLT_AQ1_LNA", offsetof(struct b2r2_memory_map, BLT_AQ1_LNA)},
+ {"BLT_AQ1_STA", offsetof(struct b2r2_memory_map, BLT_AQ1_STA)},
+ {"BLT_AQ2_CTL", offsetof(struct b2r2_memory_map, BLT_AQ2_CTL)},
+ {"BLT_AQ2_IP", offsetof(struct b2r2_memory_map, BLT_AQ2_IP)},
+ {"BLT_AQ2_LNA", offsetof(struct b2r2_memory_map, BLT_AQ2_LNA)},
+ {"BLT_AQ2_STA", offsetof(struct b2r2_memory_map, BLT_AQ2_STA)},
+ {"BLT_AQ3_CTL", offsetof(struct b2r2_memory_map, BLT_AQ3_CTL)},
+ {"BLT_AQ3_IP", offsetof(struct b2r2_memory_map, BLT_AQ3_IP)},
+ {"BLT_AQ3_LNA", offsetof(struct b2r2_memory_map, BLT_AQ3_LNA)},
+ {"BLT_AQ3_STA", offsetof(struct b2r2_memory_map, BLT_AQ3_STA)},
+ {"BLT_AQ4_CTL", offsetof(struct b2r2_memory_map, BLT_AQ4_CTL)},
+ {"BLT_AQ4_IP", offsetof(struct b2r2_memory_map, BLT_AQ4_IP)},
+ {"BLT_AQ4_LNA", offsetof(struct b2r2_memory_map, BLT_AQ4_LNA)},
+ {"BLT_AQ4_STA", offsetof(struct b2r2_memory_map, BLT_AQ4_STA)},
+ {"BLT_SSBA9", offsetof(struct b2r2_memory_map, BLT_SSBA9)},
+ {"BLT_SSBA10", offsetof(struct b2r2_memory_map, BLT_SSBA10)},
+ {"BLT_SSBA11", offsetof(struct b2r2_memory_map, BLT_SSBA11)},
+ {"BLT_SSBA12", offsetof(struct b2r2_memory_map, BLT_SSBA12)},
+ {"BLT_SSBA13", offsetof(struct b2r2_memory_map, BLT_SSBA13)},
+ {"BLT_SSBA14", offsetof(struct b2r2_memory_map, BLT_SSBA14)},
+ {"BLT_SSBA15", offsetof(struct b2r2_memory_map, BLT_SSBA15)},
+ {"BLT_SSBA16", offsetof(struct b2r2_memory_map, BLT_SSBA16)},
+ {"BLT_SGA1", offsetof(struct b2r2_memory_map, BLT_SGA1)},
+ {"BLT_SGA2", offsetof(struct b2r2_memory_map, BLT_SGA2)},
+ {"BLT_ITM0", offsetof(struct b2r2_memory_map, BLT_ITM0)},
+ {"BLT_ITM1", offsetof(struct b2r2_memory_map, BLT_ITM1)},
+ {"BLT_ITM2", offsetof(struct b2r2_memory_map, BLT_ITM2)},
+ {"BLT_ITM3", offsetof(struct b2r2_memory_map, BLT_ITM3)},
+ {"BLT_DFV2", offsetof(struct b2r2_memory_map, BLT_DFV2)},
+ {"BLT_DFV1", offsetof(struct b2r2_memory_map, BLT_DFV1)},
+ {"BLT_PRI", offsetof(struct b2r2_memory_map, BLT_PRI)},
+ {"PLUGS1_OP2", offsetof(struct b2r2_memory_map, PLUGS1_OP2)},
+ {"PLUGS1_CHZ", offsetof(struct b2r2_memory_map, PLUGS1_CHZ)},
+ {"PLUGS1_MSZ", offsetof(struct b2r2_memory_map, PLUGS1_MSZ)},
+ {"PLUGS1_PGZ", offsetof(struct b2r2_memory_map, PLUGS1_PGZ)},
+ {"PLUGS2_OP2", offsetof(struct b2r2_memory_map, PLUGS2_OP2)},
+ {"PLUGS2_CHZ", offsetof(struct b2r2_memory_map, PLUGS2_CHZ)},
+ {"PLUGS2_MSZ", offsetof(struct b2r2_memory_map, PLUGS2_MSZ)},
+ {"PLUGS2_PGZ", offsetof(struct b2r2_memory_map, PLUGS2_PGZ)},
+ {"PLUGS3_OP2", offsetof(struct b2r2_memory_map, PLUGS3_OP2)},
+ {"PLUGS3_CHZ", offsetof(struct b2r2_memory_map, PLUGS3_CHZ)},
+ {"PLUGS3_MSZ", offsetof(struct b2r2_memory_map, PLUGS3_MSZ)},
+ {"PLUGS3_PGZ", offsetof(struct b2r2_memory_map, PLUGS3_PGZ)},
+ {"PLUGT_OP2", offsetof(struct b2r2_memory_map, PLUGT_OP2)},
+ {"PLUGT_CHZ", offsetof(struct b2r2_memory_map, PLUGT_CHZ)},
+ {"PLUGT_MSZ", offsetof(struct b2r2_memory_map, PLUGT_MSZ)},
+ {"PLUGT_PGZ", offsetof(struct b2r2_memory_map, PLUGT_PGZ)},
+ {"BLT_NIP", offsetof(struct b2r2_memory_map, BLT_NIP)},
+ {"BLT_CIC", offsetof(struct b2r2_memory_map, BLT_CIC)},
+ {"BLT_INS", offsetof(struct b2r2_memory_map, BLT_INS)},
+ {"BLT_ACK", offsetof(struct b2r2_memory_map, BLT_ACK)},
+ {"BLT_TBA", offsetof(struct b2r2_memory_map, BLT_TBA)},
+ {"BLT_TTY", offsetof(struct b2r2_memory_map, BLT_TTY)},
+ {"BLT_TXY", offsetof(struct b2r2_memory_map, BLT_TXY)},
+ {"BLT_TSZ", offsetof(struct b2r2_memory_map, BLT_TSZ)},
+ {"BLT_S1CF", offsetof(struct b2r2_memory_map, BLT_S1CF)},
+ {"BLT_S2CF", offsetof(struct b2r2_memory_map, BLT_S2CF)},
+ {"BLT_S1BA", offsetof(struct b2r2_memory_map, BLT_S1BA)},
+ {"BLT_S1TY", offsetof(struct b2r2_memory_map, BLT_S1TY)},
+ {"BLT_S1XY", offsetof(struct b2r2_memory_map, BLT_S1XY)},
+ {"BLT_S2BA", offsetof(struct b2r2_memory_map, BLT_S2BA)},
+ {"BLT_S2TY", offsetof(struct b2r2_memory_map, BLT_S2TY)},
+ {"BLT_S2XY", offsetof(struct b2r2_memory_map, BLT_S2XY)},
+ {"BLT_S2SZ", offsetof(struct b2r2_memory_map, BLT_S2SZ)},
+ {"BLT_S3BA", offsetof(struct b2r2_memory_map, BLT_S3BA)},
+ {"BLT_S3TY", offsetof(struct b2r2_memory_map, BLT_S3TY)},
+ {"BLT_S3XY", offsetof(struct b2r2_memory_map, BLT_S3XY)},
+ {"BLT_S3SZ", offsetof(struct b2r2_memory_map, BLT_S3SZ)},
+ {"BLT_CWO", offsetof(struct b2r2_memory_map, BLT_CWO)},
+ {"BLT_CWS", offsetof(struct b2r2_memory_map, BLT_CWS)},
+ {"BLT_CCO", offsetof(struct b2r2_memory_map, BLT_CCO)},
+ {"BLT_CML", offsetof(struct b2r2_memory_map, BLT_CML)},
+ {"BLT_FCTL", offsetof(struct b2r2_memory_map, BLT_FCTL)},
+ {"BLT_PMK", offsetof(struct b2r2_memory_map, BLT_PMK)},
+ {"BLT_RSF", offsetof(struct b2r2_memory_map, BLT_RSF)},
+ {"BLT_RZI", offsetof(struct b2r2_memory_map, BLT_RZI)},
+ {"BLT_HFP", offsetof(struct b2r2_memory_map, BLT_HFP)},
+ {"BLT_VFP", offsetof(struct b2r2_memory_map, BLT_VFP)},
+ {"BLT_Y_RSF", offsetof(struct b2r2_memory_map, BLT_Y_RSF)},
+ {"BLT_Y_RZI", offsetof(struct b2r2_memory_map, BLT_Y_RZI)},
+ {"BLT_Y_HFP", offsetof(struct b2r2_memory_map, BLT_Y_HFP)},
+ {"BLT_Y_VFP", offsetof(struct b2r2_memory_map, BLT_Y_VFP)},
+ {"BLT_KEY1", offsetof(struct b2r2_memory_map, BLT_KEY1)},
+ {"BLT_KEY2", offsetof(struct b2r2_memory_map, BLT_KEY2)},
+ {"BLT_SAR", offsetof(struct b2r2_memory_map, BLT_SAR)},
+ {"BLT_USR", offsetof(struct b2r2_memory_map, BLT_USR)},
+ {"BLT_IVMX0", offsetof(struct b2r2_memory_map, BLT_IVMX0)},
+ {"BLT_IVMX1", offsetof(struct b2r2_memory_map, BLT_IVMX1)},
+ {"BLT_IVMX2", offsetof(struct b2r2_memory_map, BLT_IVMX2)},
+ {"BLT_IVMX3", offsetof(struct b2r2_memory_map, BLT_IVMX3)},
+ {"BLT_OVMX0", offsetof(struct b2r2_memory_map, BLT_OVMX0)},
+ {"BLT_OVMX1", offsetof(struct b2r2_memory_map, BLT_OVMX1)},
+ {"BLT_OVMX2", offsetof(struct b2r2_memory_map, BLT_OVMX2)},
+ {"BLT_OVMX3", offsetof(struct b2r2_memory_map, BLT_OVMX3)},
+ {"BLT_VC1R", offsetof(struct b2r2_memory_map, BLT_VC1R)},
+ {"BLT_Y_HFC0", offsetof(struct b2r2_memory_map, BLT_Y_HFC0)},
+ {"BLT_Y_HFC1", offsetof(struct b2r2_memory_map, BLT_Y_HFC1)},
+ {"BLT_Y_HFC2", offsetof(struct b2r2_memory_map, BLT_Y_HFC2)},
+ {"BLT_Y_HFC3", offsetof(struct b2r2_memory_map, BLT_Y_HFC3)},
+ {"BLT_Y_HFC4", offsetof(struct b2r2_memory_map, BLT_Y_HFC4)},
+ {"BLT_Y_HFC5", offsetof(struct b2r2_memory_map, BLT_Y_HFC5)},
+ {"BLT_Y_HFC6", offsetof(struct b2r2_memory_map, BLT_Y_HFC6)},
+ {"BLT_Y_HFC7", offsetof(struct b2r2_memory_map, BLT_Y_HFC7)},
+ {"BLT_Y_HFC8", offsetof(struct b2r2_memory_map, BLT_Y_HFC8)},
+ {"BLT_Y_HFC9", offsetof(struct b2r2_memory_map, BLT_Y_HFC9)},
+ {"BLT_Y_HFC10", offsetof(struct b2r2_memory_map, BLT_Y_HFC10)},
+ {"BLT_Y_HFC11", offsetof(struct b2r2_memory_map, BLT_Y_HFC11)},
+ {"BLT_Y_HFC12", offsetof(struct b2r2_memory_map, BLT_Y_HFC12)},
+ {"BLT_Y_HFC13", offsetof(struct b2r2_memory_map, BLT_Y_HFC13)},
+ {"BLT_Y_HFC14", offsetof(struct b2r2_memory_map, BLT_Y_HFC14)},
+ {"BLT_Y_HFC15", offsetof(struct b2r2_memory_map, BLT_Y_HFC15)},
+ {"BLT_Y_VFC0", offsetof(struct b2r2_memory_map, BLT_Y_VFC0)},
+ {"BLT_Y_VFC1", offsetof(struct b2r2_memory_map, BLT_Y_VFC1)},
+ {"BLT_Y_VFC2", offsetof(struct b2r2_memory_map, BLT_Y_VFC2)},
+ {"BLT_Y_VFC3", offsetof(struct b2r2_memory_map, BLT_Y_VFC3)},
+ {"BLT_Y_VFC4", offsetof(struct b2r2_memory_map, BLT_Y_VFC4)},
+ {"BLT_Y_VFC5", offsetof(struct b2r2_memory_map, BLT_Y_VFC5)},
+ {"BLT_Y_VFC6", offsetof(struct b2r2_memory_map, BLT_Y_VFC6)},
+ {"BLT_Y_VFC7", offsetof(struct b2r2_memory_map, BLT_Y_VFC7)},
+ {"BLT_Y_VFC8", offsetof(struct b2r2_memory_map, BLT_Y_VFC8)},
+ {"BLT_Y_VFC9", offsetof(struct b2r2_memory_map, BLT_Y_VFC9)},
+ {"BLT_HFC0", offsetof(struct b2r2_memory_map, BLT_HFC0)},
+ {"BLT_HFC1", offsetof(struct b2r2_memory_map, BLT_HFC1)},
+ {"BLT_HFC2", offsetof(struct b2r2_memory_map, BLT_HFC2)},
+ {"BLT_HFC3", offsetof(struct b2r2_memory_map, BLT_HFC3)},
+ {"BLT_HFC4", offsetof(struct b2r2_memory_map, BLT_HFC4)},
+ {"BLT_HFC5", offsetof(struct b2r2_memory_map, BLT_HFC5)},
+ {"BLT_HFC6", offsetof(struct b2r2_memory_map, BLT_HFC6)},
+ {"BLT_HFC7", offsetof(struct b2r2_memory_map, BLT_HFC7)},
+ {"BLT_HFC8", offsetof(struct b2r2_memory_map, BLT_HFC8)},
+ {"BLT_HFC9", offsetof(struct b2r2_memory_map, BLT_HFC9)},
+ {"BLT_HFC10", offsetof(struct b2r2_memory_map, BLT_HFC10)},
+ {"BLT_HFC11", offsetof(struct b2r2_memory_map, BLT_HFC11)},
+ {"BLT_HFC12", offsetof(struct b2r2_memory_map, BLT_HFC12)},
+ {"BLT_HFC13", offsetof(struct b2r2_memory_map, BLT_HFC13)},
+ {"BLT_HFC14", offsetof(struct b2r2_memory_map, BLT_HFC14)},
+ {"BLT_HFC15", offsetof(struct b2r2_memory_map, BLT_HFC15)},
+ {"BLT_VFC0", offsetof(struct b2r2_memory_map, BLT_VFC0)},
+ {"BLT_VFC1", offsetof(struct b2r2_memory_map, BLT_VFC1)},
+ {"BLT_VFC2", offsetof(struct b2r2_memory_map, BLT_VFC2)},
+ {"BLT_VFC3", offsetof(struct b2r2_memory_map, BLT_VFC3)},
+ {"BLT_VFC4", offsetof(struct b2r2_memory_map, BLT_VFC4)},
+ {"BLT_VFC5", offsetof(struct b2r2_memory_map, BLT_VFC5)},
+ {"BLT_VFC6", offsetof(struct b2r2_memory_map, BLT_VFC6)},
+ {"BLT_VFC7", offsetof(struct b2r2_memory_map, BLT_VFC7)},
+ {"BLT_VFC8", offsetof(struct b2r2_memory_map, BLT_VFC8)},
+ {"BLT_VFC9", offsetof(struct b2r2_memory_map, BLT_VFC9)},
+};
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * printk_regs() - Print B2R2 registers to printk
+ */
+static void printk_regs(struct b2r2_core *core)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value = readl(
+ (unsigned long *) (((u8 *) core->hw) +
+ debugfs_regs[i].offset));
+ b2r2_log_regdump(core->dev, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+#endif
+}
+#endif
+
+/**
+ * debugfs_b2r2_reg_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size;
+ int ret = 0;
+ unsigned long value;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Read from B2R2 */
+ value = readl((unsigned long *)
+ filp->f_dentry->d_inode->i_private);
+
+ /* Build the string */
+ dev_size = sprintf(Buf, "%8lX\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ /* Return it to user space */
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_write() - Implements debugfs write for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_reg_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char Buf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ /* Adjust count */
+ if (count >= sizeof(Buf))
+ count = sizeof(Buf) - 1;
+ /* Get it from user space */
+ if (copy_from_user(Buf, buf, count))
+ return -EINVAL;
+ Buf[count] = 0;
+ /* Convert from hex string */
+ if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ writel(reg_value, (u32 *)
+ filp->f_dentry->d_inode->i_private);
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_fops() - File operations for B2R2 register debugfs
+ */
+static const struct file_operations debugfs_b2r2_reg_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_reg_read,
+ .write = debugfs_b2r2_reg_write,
+};
+
+/**
+ * debugfs_b2r2_regs_read() - Implements debugfs read for B2R2 register dump
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_regs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a giant string containing all registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value =
+ readl((u32 *) (((u8 *)
+ filp->f_dentry->d_inode->i_private) +
+ debugfs_regs[i].offset));
+ dev_size += sprintf(Buf + dev_size, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_regs_fops() - File operations for B2R2 register dump debugfs
+ */
+static const struct file_operations debugfs_b2r2_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_regs_read,
+};
+
+/**
+ * debugfs_b2r2_stat_read() - Implements debugfs read for B2R2 statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a string containing all statistics */
+ dev_size += sprintf(Buf + dev_size, "Interrupts : %lu\n",
+ core->stat_n_irq);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ core->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Removed jobs : %lu\n",
+ core->stat_n_jobs_removed);
+ dev_size += sprintf(Buf + dev_size, "Jobs in prio list : %lu\n",
+ core->stat_n_jobs_in_prio_list);
+ dev_size += sprintf(Buf + dev_size, "Active jobs : %lu\n",
+ core->n_active_jobs);
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++)
+ dev_size += sprintf(Buf + dev_size,
+ " Job in queue %d : 0x%08lx\n",
+ i, (unsigned long) core->active_jobs[i]);
+ dev_size += sprintf(Buf + dev_size, "Clock requests : %lu\n",
+ core->clock_request_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_stat_fops() - File operations for B2R2 statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_stat_read,
+};
+
+
+/**
+ * debugfs_b2r2_clock_read() - Implements debugfs read for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /* 10 characters hex number + newline + string terminator; */
+ char Buf[10+2];
+ size_t dev_size;
+ int ret = 0;
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ unsigned long value = clk_get_rate(core->b2r2_clock);
+
+ dev_size = sprintf(Buf, "%#010lx\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_write() - Implements debugfs write for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_clock_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char Buf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ if (count >= sizeof(Buf))
+ count = sizeof(Buf) - 1;
+ if (copy_from_user(Buf, buf, count))
+ return -EINVAL;
+ Buf[count] = 0;
+ if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ /*not working yet*/
+ /*clk_set_rate(b2r2_core.b2r2_clock, (unsigned long) reg_value);*/
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_fops() - File operations for PMU B2R2 clock debugfs
+ */
+static const struct file_operations debugfs_b2r2_clock_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_clock_read,
+ .write = debugfs_b2r2_clock_write,
+};
+
+#endif
+
+/**
+ *
+ * init_hw() - B2R2 Hardware reset & initiliaze
+ *
+ * @pdev: B2R2 platform device
+ *
+ * 1)Register interrupt handler
+ *
+ * 2)B2R2 Register map
+ *
+ * 3)For resetting B2R2 hardware,write to B2R2 Control register the
+ * B2R2BLT_CTLGLOBAL_soft_reset and then polling for on
+ * B2R2 status register for B2R2BLT_STA1BDISP_IDLE flag.
+ *
+ * 4)Wait for B2R2 hardware to be idle (on a timeout rather than while loop)
+ *
+ * 5)Driver status reset
+ *
+ * 6)Recover from any error without any leaks.
+ *
+ */
+static int init_hw(struct b2r2_core *core)
+{
+ int result = 0;
+ u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
+
+ /* Put B2R2 into reset */
+ clear_interrupts(core);
+
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+
+ /* Set up interrupt handler */
+ result = request_irq(core->irq, b2r2_irq_handler, 0,
+ "b2r2-interrupt", core);
+ if (result) {
+ b2r2_log_err(core->dev,
+ "%s: failed to register IRQ for B2R2\n", __func__);
+ goto b2r2_init_request_irq_failed;
+ }
+
+ b2r2_log_info(core->dev, "do a global reset..\n");
+
+ /* Release reset */
+ writel(0x00000000, &core->hw->BLT_CTL);
+
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&core->hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+ if (uTimeOut == 0) {
+ b2r2_log_err(core->dev,
+ "%s: B2R2 not idle after SW reset\n", __func__);
+ result = -EAGAIN;
+ goto b2r2_core_init_hw_timeout;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs files for register access */
+ if (core->debugfs_core_root_dir && !core->debugfs_regs_dir) {
+ int i;
+ core->debugfs_regs_dir = debugfs_create_dir("regs",
+ core->debugfs_core_root_dir);
+ debugfs_create_file("all", 0666, core->debugfs_regs_dir,
+ (void *)core->hw, &debugfs_b2r2_regs_fops);
+ /* Create debugfs entries for all static registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++)
+ debugfs_create_file(debugfs_regs[i].name, 0666,
+ core->debugfs_regs_dir,
+ (void *)(((u8 *) core->hw) +
+ debugfs_regs[i].offset),
+ &debugfs_b2r2_reg_fops);
+ }
+#endif
+
+ b2r2_log_info(core->dev, "%s ended..\n", __func__);
+ return result;
+
+/** Recover from any error without any leaks */
+b2r2_core_init_hw_timeout:
+ /** Free B2R2 interrupt handler */
+ free_irq(core->irq, core);
+
+b2r2_init_request_irq_failed:
+ if (core->hw)
+ iounmap(core->hw);
+ core->hw = NULL;
+
+ return result;
+}
+
+
+/**
+ * exit_hw() - B2R2 Hardware exit
+ *
+ * core->lock _must_ NOT be held
+ */
+static void exit_hw(struct b2r2_core *core)
+{
+ unsigned long flags;
+
+ b2r2_log_info(core->dev, "%s started..\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Unregister our debugfs entries */
+ if (core->debugfs_regs_dir) {
+ debugfs_remove_recursive(core->debugfs_regs_dir);
+ core->debugfs_regs_dir = NULL;
+ }
+#endif
+ b2r2_log_debug(core->dev, "%s: locking core->lock\n", __func__);
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Cancel all pending jobs */
+ b2r2_log_debug(core->dev, "%s: canceling pending jobs\n", __func__);
+ exit_job_list(core, &core->prio_queue);
+
+ /* Soft reset B2R2 (Close all DMA,
+ reset all state to idle, reset regs)*/
+ b2r2_log_debug(core->dev, "%s: putting b2r2 in reset\n", __func__);
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+
+ b2r2_log_debug(core->dev, "%s: clearing interrupts\n", __func__);
+ clear_interrupts(core);
+
+ /** Free B2R2 interrupt handler */
+ b2r2_log_debug(core->dev, "%s: freeing interrupt handler\n", __func__);
+ free_irq(core->irq, core);
+
+ b2r2_log_debug(core->dev, "%s: unlocking core->lock\n", __func__);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ b2r2_log_info(core->dev, "%s ended...\n", __func__);
+}
+
+/**
+ * b2r2_probe() - This routine loads the B2R2 core driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct b2r2_core *core;
+ struct b2r2_control *control;
+
+ BUG_ON(pdev == NULL);
+ BUG_ON(pdev->id < 0 || pdev->id >= B2R2_MAX_NBR_DEVICES);
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ dev_err(&pdev->dev, "b2r2 core alloc failed\n");
+ ret = -EINVAL;
+ goto b2r2_probe_core_alloc_fail;
+ }
+
+ core->dev = &pdev->dev;
+ dev_set_drvdata(core->dev, core);
+ if (pdev->id)
+ snprintf(core->name, sizeof(core->name), "b2r2_%d", pdev->id);
+ else
+ snprintf(core->name, sizeof(core->name), "b2r2");
+
+ dev_info(&pdev->dev, "init started.\n");
+
+ /* Init spin locks */
+ spin_lock_init(&core->lock);
+
+ /* Init job queues */
+ INIT_LIST_HEAD(&core->prio_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Create work queue for callbacks & timeout */
+ INIT_DELAYED_WORK(&core->timeout_work, timeout_work_function);
+#endif
+
+ /* Work queue for callbacks and timeout management */
+ core->work_queue = create_workqueue("B2R2");
+ if (!core->work_queue) {
+ ret = -ENOMEM;
+ goto b2r2_probe_no_work_queue;
+ }
+
+ /* Get the clock for B2R2 */
+ core->b2r2_clock = clk_get(core->dev, "b2r2");
+ if (IS_ERR(core->b2r2_clock)) {
+ ret = PTR_ERR(core->b2r2_clock);
+ dev_err(&pdev->dev, "clk_get b2r2 failed\n");
+ goto b2r2_probe_no_clk;
+ }
+
+ /* Get the B2R2 regulator */
+ core->b2r2_reg = regulator_get(core->dev, "vsupply");
+ if (IS_ERR(core->b2r2_reg)) {
+ ret = PTR_ERR(core->b2r2_reg);
+ dev_err(&pdev->dev, "regulator_get vsupply failed "
+ "(dev_name=%s)\n", dev_name(core->dev));
+ goto b2r2_probe_no_reg;
+ }
+
+ /* Init power management */
+ mutex_init(&core->domain_lock);
+ INIT_DELAYED_WORK_DEFERRABLE(&core->domain_disable_work,
+ domain_disable_work_function);
+ core->domain_enabled = false;
+
+ /* Map B2R2 into kernel virtual memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ goto b2r2_probe_no_res;
+
+ /* Hook up irq */
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq <= 0) {
+ dev_err(&pdev->dev, "%s: Failed to request irq (irq=%d)\n",
+ __func__, core->irq);
+ goto b2r2_failed_irq_get;
+ }
+
+ core->hw = (struct b2r2_memory_map *) ioremap(res->start,
+ res->end - res->start + 1);
+ if (core->hw == NULL) {
+ dev_err(&pdev->dev, "%s: ioremap failed\n", __func__);
+ ret = -ENOMEM;
+ goto b2r2_probe_ioremap_failed;
+ }
+
+ dev_dbg(core->dev, "b2r2 structure address %p\n", core->hw);
+
+ control = kzalloc(sizeof(*control), GFP_KERNEL);
+ if (!control) {
+ dev_err(&pdev->dev, "b2r2 control alloc failed\n");
+ ret = -EINVAL;
+ goto b2r2_probe_control_alloc_fail;
+ }
+
+ control->miscdev.parent = core->dev;
+ control->data = (void *)core;
+ control->id = pdev->id;
+ control->dev = &pdev->dev; /* Temporary device */
+ snprintf(control->name, sizeof(control->name), "%s_blt", core->name);
+
+ core->op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT;
+ core->ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT;
+ core->pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT;
+ core->mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT;
+ core->min_req_time = 0;
+
+#ifdef CONFIG_DEBUG_FS
+ core->debugfs_root_dir = debugfs_create_dir(core->name, NULL);
+ core->debugfs_core_root_dir = debugfs_create_dir("core",
+ core->debugfs_root_dir);
+ debugfs_create_file("stats", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_stat_fops);
+ debugfs_create_file("clock", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_clock_fops);
+ debugfs_create_u8("op_size", 0666, core->debugfs_core_root_dir,
+ &core->op_size);
+ debugfs_create_u8("ch_size", 0666, core->debugfs_core_root_dir,
+ &core->ch_size);
+ debugfs_create_u8("pg_size", 0666, core->debugfs_core_root_dir,
+ &core->pg_size);
+ debugfs_create_u8("mg_size", 0666, core->debugfs_core_root_dir,
+ &core->mg_size);
+ debugfs_create_u16("min_req_time", 0666, core->debugfs_core_root_dir,
+ &core->min_req_time);
+
+ control->debugfs_debug_root_dir = debugfs_create_dir("debug",
+ core->debugfs_root_dir);
+ control->mem_heap.debugfs_root_dir = debugfs_create_dir("mem",
+ core->debugfs_root_dir);
+ control->debugfs_root_dir = debugfs_create_dir("blt",
+ core->debugfs_root_dir);
+#endif
+
+ ret = b2r2_debug_init(control);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "b2r2_debug_init failed\n");
+ goto b2r2_probe_debug_init_failed;
+ }
+
+ /* Initialize b2r2_blt module. FIXME: Module of it's own
+ or perhaps a dedicated module init c file? */
+ ret = b2r2_blt_module_init(control);
+ if (ret < 0) {
+ b2r2_log_err(&pdev->dev, "b2r2_blt_module_init failed\n");
+ goto b2r2_probe_blt_init_fail;
+ }
+
+ core->control = control;
+ b2r2_core[pdev->id] = core;
+ dev_info(&pdev->dev, "init done.\n");
+
+ return ret;
+
+/** Recover from any error if something fails */
+b2r2_probe_blt_init_fail:
+ kfree(control);
+b2r2_probe_control_alloc_fail:
+b2r2_probe_ioremap_failed:
+b2r2_failed_irq_get:
+b2r2_probe_no_res:
+ regulator_put(core->b2r2_reg);
+b2r2_probe_no_reg:
+ clk_put(core->b2r2_clock);
+b2r2_probe_no_clk:
+ destroy_workqueue(core->work_queue);
+ core->work_queue = NULL;
+b2r2_probe_no_work_queue:
+ b2r2_debug_exit();
+b2r2_probe_debug_init_failed:
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(core->debugfs_root_dir);
+#endif
+ kfree(core);
+b2r2_probe_core_alloc_fail:
+ dev_info(&pdev->dev, "init done with errors.\n");
+
+ return ret;
+}
+
+
+
+/**
+ * b2r2_remove - This routine unloads b2r2 driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_remove(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(&pdev->dev, "%s: Started\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(core->debugfs_root_dir);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(core->work_queue);
+
+ /* Exit b2r2 blt module */
+ b2r2_blt_module_exit(core->control);
+
+ kfree(core->control);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&core->timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(core->work_queue);
+
+ /* Make sure the power is turned off */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ /** Unmap B2R2 registers */
+ b2r2_log_info(&pdev->dev, "unmap b2r2 registers..\n");
+ if (core->hw) {
+ iounmap(core->hw);
+ core->hw = NULL;
+ }
+
+ destroy_workqueue(core->work_queue);
+
+ spin_lock_irqsave(&core->lock, flags);
+ core->work_queue = NULL;
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Return the clock */
+ clk_put(core->b2r2_clock);
+ regulator_put(core->b2r2_reg);
+
+ core->dev = NULL;
+ kfree(core);
+ b2r2_core[pdev->id] = NULL;
+
+ b2r2_debug_exit();
+
+ b2r2_log_info(&pdev->dev, "%s: Ended\n", __func__);
+
+ return 0;
+}
+/**
+ * b2r2_suspend() - This routine puts the B2R2 device in to sustend state.
+ * @pdev: platform device.
+ *
+ * This routine stores the current state of the b2r2 device and puts in to
+ * suspend state.
+ *
+ */
+int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(core->work_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&core->timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(core->work_queue);
+
+ /* Make sure power is turned off */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ return 0;
+}
+
+
+/**
+ * b2r2_resume() - This routine resumes the B2R2 device from sustend state.
+ * @pdev: platform device.
+ *
+ * This routine restore back the current state of the b2r2 device resumes.
+ *
+ */
+int b2r2_resume(struct platform_device *pdev)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+/**
+ * struct platform_b2r2_driver - Platform driver configuration for the
+ * B2R2 core driver
+ */
+static struct platform_driver platform_b2r2_driver = {
+ .remove = b2r2_remove,
+ .driver = {
+ .name = "b2r2",
+ },
+ /** TODO implement power mgmt functions */
+ .suspend = b2r2_suspend,
+ .resume = b2r2_resume,
+};
+
+
+/**
+ * b2r2_init() - Module init function for the B2R2 core module
+ */
+static int __init b2r2_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return platform_driver_probe(&platform_b2r2_driver, b2r2_probe);
+}
+module_init(b2r2_init);
+
+/**
+ * b2r2_exit() - Module exit function for the B2R2 core module
+ */
+static void __exit b2r2_exit(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ platform_driver_unregister(&platform_b2r2_driver);
+ return;
+}
+module_exit(b2r2_exit);
+
+
+/** Module is having GPL license */
+
+MODULE_LICENSE("GPL");
+
+/** Module author & discription */
+
+MODULE_AUTHOR("Robert Fekete (robert.fekete@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Core driver");
diff --git a/drivers/video/b2r2/b2r2_core.h b/drivers/video/b2r2/b2r2_core.h
new file mode 100644
index 00000000000..991dd9d9d1b
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_CORE_H__
+#define __B2R2_CORE_H__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+
+/**
+ * b2r2_core_job_add() - Adds a job to B2R2 job queues
+ *
+ * The job reference count will be increased after this function
+ * has been called and b2r2_core_job_release() must be called to
+ * release the reference. The job callback function will be always
+ * be called after the job is done or cancelled.
+ *
+ * @control: The b2r2 control entity
+ * @job: Job to be added
+ *
+ * Returns 0 if OK else negative error code
+ *
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_wait() - Waits for an added job to be done.
+ *
+ * @job: Job to wait for
+ *
+ * Returns 0 if job done else negative error code
+ *
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_cancel() - Cancel an already added job.
+ *
+ * @job: Job to cancel
+ *
+ * Returns 0 if job cancelled or done else negative error code
+ *
+ */
+int b2r2_core_job_cancel(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_find() - Finds job with given job id
+ *
+ * Reference count will be increased for the found job
+ *
+ * @control: The b2r2 control entity
+ * @job_id: Job id to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id);
+
+/**
+ * b2r2_core_job_find_first_with_tag() - Finds first job with given tag
+ *
+ * Reference count will be increased for the found job.
+ * This function can be used to find all jobs for a client, i.e.
+ * when cancelling all jobs for a client.
+ *
+ * @control: The b2r2 control entity
+ * @tag: Tag to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag);
+
+/**
+ * b2r2_core_job_addref() - Increase the job reference count.
+ *
+ * @job: Job to increase reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller);
+
+/**
+ * b2r2_core_job_release() - Decrease the job reference count. The
+ * job will be released (the release() function
+ * will be called) when the reference count
+ * reaches zero.
+ *
+ * @job: Job to decrease reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller);
+
+#endif /* !defined(__B2R2_CORE_JOB_H__) */
diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c
new file mode 100644
index 00000000000..23a0b1aa9ac
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+static struct dentry *log_lvl_dir;
+static int module_init;
+
+#define CHARS_IN_NODE_DUMP 1544
+#define DUMPED_NODE_SIZE (CHARS_IN_NODE_DUMP * sizeof(char) + 1)
+
+static void dump_node(char *dst, struct b2r2_node *node)
+{
+ dst += sprintf(dst, "node 0x%08x ------------------\n",
+ (unsigned int)node);
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_NIP:", node->node.GROUP0.B2R2_NIP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CIC:", node->node.GROUP0.B2R2_CIC);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_INS:", node->node.GROUP0.B2R2_INS);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_ACK:", node->node.GROUP0.B2R2_ACK);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TBA:", node->node.GROUP1.B2R2_TBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TTY:", node->node.GROUP1.B2R2_TTY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TXY:", node->node.GROUP1.B2R2_TXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TSZ:", node->node.GROUP1.B2R2_TSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1CF:", node->node.GROUP2.B2R2_S1CF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2CF:", node->node.GROUP2.B2R2_S2CF);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1BA:", node->node.GROUP3.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1TY:", node->node.GROUP3.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1XY:", node->node.GROUP3.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1SZ:", node->node.GROUP3.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2BA:", node->node.GROUP4.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2TY:", node->node.GROUP4.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2XY:", node->node.GROUP4.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2SZ:", node->node.GROUP4.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3BA:", node->node.GROUP5.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3TY:", node->node.GROUP5.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3XY:", node->node.GROUP5.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3SZ:", node->node.GROUP5.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWO:", node->node.GROUP6.B2R2_CWO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWS:", node->node.GROUP6.B2R2_CWS);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CCO:", node->node.GROUP7.B2R2_CCO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CML:", node->node.GROUP7.B2R2_CML);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_PMK:", node->node.GROUP8.B2R2_PMK);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FCTL:", node->node.GROUP8.B2R2_FCTL);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RSF:", node->node.GROUP9.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RZI:", node->node.GROUP9.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_HFP:", node->node.GROUP9.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_VFP:", node->node.GROUP9.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RSF:", node->node.GROUP10.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RZI:", node->node.GROUP10.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_HFP:", node->node.GROUP10.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_VFP:", node->node.GROUP10.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF0:", node->node.GROUP11.B2R2_FF0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF1:", node->node.GROUP11.B2R2_FF1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF2:", node->node.GROUP11.B2R2_FF2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF3:", node->node.GROUP11.B2R2_FF3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY1:", node->node.GROUP12.B2R2_KEY1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY2:", node->node.GROUP12.B2R2_KEY2);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYL:", node->node.GROUP13.B2R2_XYL);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYP:", node->node.GROUP13.B2R2_XYP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_SAR:", node->node.GROUP14.B2R2_SAR);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_USR:", node->node.GROUP14.B2R2_USR);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX0:", node->node.GROUP15.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX1:", node->node.GROUP15.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX2:", node->node.GROUP15.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX3:", node->node.GROUP15.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX0:", node->node.GROUP16.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX1:", node->node.GROUP16.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX2:", node->node.GROUP16.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX3:", node->node.GROUP16.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+}
+
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ struct b2r2_node **dst_node;
+ unsigned int node_count = 0;
+
+ while (node != NULL) {
+ node_count++;
+ node = node->next;
+ }
+
+ mutex_lock(&cont->last_job_lock);
+
+ if (cont->last_job) {
+ node = cont->last_job;
+ while (node != NULL) {
+ struct b2r2_node *tmp = node->next;
+ kfree(node);
+ node = tmp;
+ }
+ cont->last_job = NULL;
+ }
+
+ node = first_node;
+ dst_node = &cont->last_job;
+ while (node != NULL) {
+ *dst_node = kzalloc(sizeof(**dst_node), GFP_KERNEL);
+ if (!(*dst_node))
+ goto last_job_alloc_failed;
+
+ memcpy(*dst_node, node, sizeof(**dst_node));
+
+ dst_node = &((*dst_node)->next);
+ node = node->next;
+ }
+
+ mutex_unlock(&cont->last_job_lock);
+
+ return;
+
+last_job_alloc_failed:
+ mutex_unlock(&cont->last_job_lock);
+
+ while (cont->last_job != NULL) {
+ struct b2r2_node *tmp = cont->last_job->next;
+ kfree(cont->last_job);
+ cont->last_job = tmp;
+ }
+
+ return;
+}
+
+static ssize_t last_job_read(struct file *filp, char __user *buf,
+ size_t bytes, loff_t *off)
+{
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+ struct b2r2_node *node = cont->last_job;
+ int node_count = 0;
+ int i;
+
+ size_t size;
+ size_t count;
+ loff_t offs = *off;
+
+ for (; node != NULL; node = node->next)
+ node_count++;
+
+ size = node_count * DUMPED_NODE_SIZE;
+
+ if (node_count != cont->prev_node_count) {
+ kfree(cont->last_job_chars);
+
+ cont->last_job_chars = kzalloc(size, GFP_KERNEL);
+ if (!cont->last_job_chars)
+ return 0;
+ cont->prev_node_count = node_count;
+ }
+
+ mutex_lock(&cont->last_job_lock);
+ node = cont->last_job;
+ for (i = 0; i < node_count; i++) {
+ BUG_ON(node == NULL);
+ dump_node(cont->last_job_chars +
+ i * DUMPED_NODE_SIZE/sizeof(char),
+ node);
+ node = node->next;
+ }
+ mutex_unlock(&cont->last_job_lock);
+
+ if (offs > size)
+ return 0;
+
+ if (offs + bytes > size)
+ count = size - offs;
+ else
+ count = bytes;
+
+ if (copy_to_user(buf, cont->last_job_chars + offs, count))
+ return -EFAULT;
+
+ *off = offs + count;
+ return count;
+}
+
+static const struct file_operations last_job_fops = {
+ .read = last_job_read,
+};
+
+int b2r2_debug_init(struct b2r2_control *cont)
+{
+ int i;
+
+ if (!module_init) {
+ for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
+ b2r2_log_levels[i] = 0;
+
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ /*
+ * If dynamic debug is disabled we need some other way to
+ * control the log prints
+ */
+ log_lvl_dir = debugfs_create_dir("b2r2_log", NULL);
+
+ /* No need to save the files,
+ * they will be removed recursively */
+ (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
+ (void)debugfs_create_bool("info", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
+ (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
+ (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
+
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+ /* log_lvl_dir is never used */
+ (void)log_lvl_dir;
+#endif
+ module_init++;
+ }
+
+ if (cont->debugfs_debug_root_dir) {
+ /* No need to save the file,
+ * it will be removed recursively */
+ (void)debugfs_create_file("last_job", 0444,
+ cont->debugfs_debug_root_dir, cont,
+ &last_job_fops);
+ }
+
+ mutex_init(&cont->last_job_lock);
+
+ return 0;
+}
+
+void b2r2_debug_exit(void)
+{
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ module_init--;
+ if (!module_init && log_lvl_dir) {
+ debugfs_remove_recursive(log_lvl_dir);
+ log_lvl_dir = NULL;
+ }
+#endif
+}
diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h
new file mode 100644
index 00000000000..1b1ac83f6cb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+
+#include <linux/device.h>
+
+#include "b2r2_internal.h"
+
+#ifdef CONFIG_B2R2_DEBUG
+
+/* Log macros */
+enum b2r2_log_levels {
+ B2R2_LOG_LEVEL_WARN,
+ B2R2_LOG_LEVEL_INFO,
+ B2R2_LOG_LEVEL_DEBUG,
+ B2R2_LOG_LEVEL_REGDUMP,
+ B2R2_LOG_LEVEL_COUNT,
+};
+
+/*
+ * Booleans controlling the different log levels. The different log levels are
+ * enabled separately (i.e. you can have info prints without the warn prints).
+ */
+extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+
+#define b2r2_log_err(b2r2_log_dev, ...) do { \
+ dev_err(b2r2_log_dev, __VA_ARGS__); \
+ } while (0)
+
+/* If dynamic debug is enabled it should be used instead of loglevels */
+#ifdef CONFIG_DYNAMIC_DEBUG
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#else
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \
+ dev_warn(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \
+ dev_info(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \
+ dev_vdbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#endif
+
+int b2r2_debug_init(struct b2r2_control *cont);
+void b2r2_debug_exit(void);
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node);
+
+#else
+
+#define b2r2_log_err(...)
+#define b2r2_log_warn(...)
+#define b2r2_log_info(...)
+#define b2r2_log_debug(...)
+#define b2r2_log_regdump(...)
+
+static inline int b2r2_debug_init(struct b2r2_control *cont)
+{
+ return 0;
+}
+static inline void b2r2_debug_exit(void)
+{
+ return;
+}
+static inline void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node)
+{
+ return;
+}
+
+#endif
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_filters.c b/drivers/video/b2r2/b2r2_filters.c
new file mode 100644
index 00000000000..a969816a9e7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include "b2r2_filters.h"
+#include "b2r2_internal.h"
+
+/**
+ * struct b2r2_filter_spec filters[] - Filter lookup table
+ *
+ * Lookup table for filters for different scale factors. A filter
+ * will be selected according to "min < scale_factor <= max".
+ */
+static struct b2r2_filter_spec filters[] = {
+ {
+ .min = 1024,
+ .max = 1433,
+ .h_coeffs = {
+ 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
+ 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
+ 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
+ 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
+ 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
+ 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
+ 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
+ 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
+ },
+ .v_coeffs = {
+ 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
+ 0xf6, 0x12, 0x3b, 0x02, 0xfb,
+ 0xf4, 0x1b, 0x35, 0xfd, 0xff,
+ 0xf4, 0x23, 0x30, 0xf8, 0x01,
+ 0xf6, 0x29, 0x27, 0xf6, 0x04,
+ 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
+ 0xfd, 0x31, 0x16, 0xf6, 0x06,
+ 0x02, 0x32, 0x0d, 0xf8, 0x07
+ },
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .h_coeffs = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ },
+ .v_coeffs = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ },
+ },
+ {
+ .min = 1536,
+ .max = 3072,
+ .h_coeffs = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ },
+ .v_coeffs = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ },
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .h_coeffs = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ },
+ .v_coeffs = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ },
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .h_coeffs = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ },
+ .v_coeffs = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ },
+ },
+};
+static const size_t filters_size = sizeof(filters)/sizeof(filters[0]);
+
+/**
+ * struct b2r2_filter_spec bilinear_filter - A bilinear filter
+ *
+ * The bilinear filter will be used if no custom filters are specified, or
+ * for upscales not matching any filter in the lookup table.
+ */
+static struct b2r2_filter_spec bilinear_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x08, 0x3e, 0xfb, 0x00, 0x00,
+ 0x00, 0x00, 0xfb, 0x13, 0x3b, 0xf7, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x1f, 0x34, 0xf5, 0x00, 0x00,
+ 0x00, 0x00, 0xf6, 0x2b, 0x2a, 0xf5, 0x00, 0x00,
+ 0x00, 0x00, 0xf6, 0x35, 0x1e, 0xf7, 0x00, 0x00,
+ 0x00, 0x00, 0xf9, 0x3c, 0x12, 0xf9, 0x00, 0x00,
+ 0x00, 0x00, 0xfd, 0x3f, 0x07, 0xfd, 0x00, 0x00
+ },
+ .v_coeffs = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x00, 0x09, 0x3d, 0xfa, 0x00,
+ 0x00, 0x13, 0x39, 0xf4, 0x00,
+ 0x00, 0x1e, 0x31, 0xf1, 0x00,
+ 0x00, 0x27, 0x28, 0xf1, 0x00,
+ 0x00, 0x31, 0x1d, 0xf2, 0x00,
+ 0x00, 0x38, 0x12, 0xf6, 0x00,
+ 0x00, 0x3d, 0x07, 0xfc, 0x00
+ },
+};
+
+/**
+ * struct b2r2_filter_spec default_downscale_filter - Default filter for downscale
+ *
+ * The default downscale filter will be used for downscales not matching any
+ * filter in the lookup table.
+ */
+static struct b2r2_filter_spec default_downscale_filter = {
+ .min = 1 << 10,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ },
+ .v_coeffs = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ },
+};
+
+/**
+ * struct b2r2_filter_spec blur_filter - Blur filter
+ *
+ * Filter for blurring an image.
+ */
+static struct b2r2_filter_spec blur_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08
+ },
+ .v_coeffs = {
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c
+ },
+};
+
+/* Private function declarations */
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+
+/* Public functions */
+
+int b2r2_filters_init(struct b2r2_control *cont)
+{
+ int i;
+
+ if (cont->filters_initialized)
+ return 0;
+
+ for (i = 0; i < filters_size; i++) {
+ alloc_filter_coeffs(cont->dev, &filters[i]);
+ }
+
+ alloc_filter_coeffs(cont->dev, &bilinear_filter);
+ alloc_filter_coeffs(cont->dev, &default_downscale_filter);
+ alloc_filter_coeffs(cont->dev, &blur_filter);
+
+ cont->filters_initialized = 1;
+
+ return 0;
+}
+
+void b2r2_filters_exit(struct b2r2_control *cont)
+{
+ int i;
+
+ if (!cont->filters_initialized)
+ return;
+
+ for (i = 0; i < filters_size; i++) {
+ free_filter_coeffs(cont->dev, &filters[i]);
+ }
+
+ free_filter_coeffs(cont->dev, &bilinear_filter);
+ free_filter_coeffs(cont->dev, &default_downscale_filter);
+ free_filter_coeffs(cont->dev, &blur_filter);
+
+ cont->filters_initialized = 0;
+}
+
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor)
+{
+ int i;
+ struct b2r2_filter_spec *filter = NULL;
+
+ for (i = 0; i < filters_size; i++) {
+ if ((filters[i].min < scale_factor) &&
+ (scale_factor <= filters[i].max) &&
+ filters[i].h_coeffs_dma_addr &&
+ filters[i].v_coeffs_dma_addr) {
+ filter = &filters[i];
+ break;
+ }
+ }
+
+ if (filter == NULL) {
+ /*
+ * No suitable filter has been found. Use default filters,
+ * bilinear for any upscale.
+ */
+ if (scale_factor < (1 << 10))
+ filter = &bilinear_filter;
+ else
+ filter = &default_downscale_filter;
+ }
+
+ /*
+ * Check so that the coefficients were successfully allocated for this
+ * filter.
+ */
+ if (!filter->h_coeffs_dma_addr || !filter->v_coeffs_dma_addr)
+ return NULL;
+ else
+ return filter;
+}
+
+struct b2r2_filter_spec *b2r2_filter_blur()
+{
+ return &blur_filter;
+}
+
+/* Private functions */
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
+{
+ int ret;
+
+ filter->h_coeffs_dma_addr = dma_alloc_coherent(dev,
+ B2R2_HF_TABLE_SIZE, &(filter->h_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->h_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ filter->v_coeffs_dma_addr = dma_alloc_coherent(dev,
+ B2R2_VF_TABLE_SIZE, &(filter->v_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->v_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs,
+ B2R2_HF_TABLE_SIZE);
+ memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs,
+ B2R2_VF_TABLE_SIZE);
+
+ return 0;
+
+error:
+ free_filter_coeffs(dev, filter);
+ return ret;
+
+}
+
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
+{
+ if (filter->h_coeffs_dma_addr != NULL)
+ dma_free_coherent(dev, B2R2_HF_TABLE_SIZE,
+ filter->h_coeffs_dma_addr,
+ filter->h_coeffs_phys_addr);
+ if (filter->v_coeffs_dma_addr != NULL)
+ dma_free_coherent(dev, B2R2_VF_TABLE_SIZE,
+ filter->v_coeffs_dma_addr,
+ filter->v_coeffs_phys_addr);
+
+ filter->h_coeffs_dma_addr = NULL;
+ filter->h_coeffs_phys_addr = 0;
+ filter->v_coeffs_dma_addr = NULL;
+ filter->v_coeffs_phys_addr = 0;
+}
diff --git a/drivers/video/b2r2/b2r2_filters.h b/drivers/video/b2r2/b2r2_filters.h
new file mode 100644
index 00000000000..790c9ec8ee9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_FILTERS_H
+#define _LINUX_VIDEO_B2R2_FILTERS_H
+
+#include <linux/kernel.h>
+
+#include "b2r2_internal.h"
+
+#define B2R2_HF_TABLE_SIZE 64
+#define B2R2_VF_TABLE_SIZE 40
+
+/**
+ * @struct b2r2_filter_spec - Filter specification structure
+ *
+ * @param min - Minimum scale factor for this filter (in 6.10 fixed point)
+ * @param max - Maximum scale factor for this filter (in 6.10 fixed point)
+ * @param h_coeffs - Horizontal filter coefficients
+ * @param v_coeffs - Vertical filter coefficients
+ * @param h_coeffs_dma_addr - Virtual DMA address for horizontal coefficients
+ * @param v_coeffs_dma_addr - Virtual DMA address for vertical coefficients
+ * @param h_coeffs_phys_addr - Physical address for horizontal coefficients
+ * @param v_coeffs_phys_addr - Physical address for vertical coefficients
+ */
+struct b2r2_filter_spec {
+ const u16 min;
+ const u16 max;
+
+ const u8 h_coeffs[B2R2_HF_TABLE_SIZE];
+ const u8 v_coeffs[B2R2_VF_TABLE_SIZE];
+
+ void *h_coeffs_dma_addr;
+ u32 h_coeffs_phys_addr;
+
+ void *v_coeffs_dma_addr;
+ u32 v_coeffs_phys_addr;
+};
+
+/**
+ * b2r2_filters_init() - Initilizes the B2R2 filters
+ */
+int b2r2_filters_init(struct b2r2_control *control);
+
+/**
+ * b2r2_filters_init() - De-initilizes the B2R2 filters
+ */
+void b2r2_filters_exit(struct b2r2_control *control);
+
+/**
+ * b2r2_filter_find() - Find a filter matching the given scale factor
+ *
+ * @param scale_factor - Scale factor to find a filter for
+ *
+ * Returns NULL if no filter could be found.
+ */
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor);
+
+/**
+ * b2r2_filter_blur() - Returns the blur filter
+ *
+ * Returns NULL if no blur filter is available.
+ */
+struct b2r2_filter_spec *b2r2_filter_blur(void);
+
+#endif /* _LINUX_VIDEO_B2R2_FILTERS_H */
diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c
new file mode 100644
index 00000000000..1a27adbaadf
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.c
@@ -0,0 +1,3334 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include "b2r2_generic.h"
+#include "b2r2_internal.h"
+#include "b2r2_global.h"
+#include "b2r2_debug.h"
+#include "b2r2_filters.h"
+
+/*
+ * Debug printing
+ */
+#define B2R2_GENERIC_DEBUG_AREAS 0
+#define B2R2_GENERIC_DEBUG
+
+#define B2R2_GENERIC_WORK_BUF_WIDTH 16
+#define B2R2_GENERIC_WORK_BUF_HEIGHT 16
+#define B2R2_GENERIC_WORK_BUF_PITCH (16 * 4)
+#define B2R2_GENERIC_WORK_BUF_FMT B2R2_NATIVE_ARGB8888
+
+/*
+ * Private functions
+ */
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_control *cont,
+ struct b2r2_node *node)
+{
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ while (node != NULL) {
+ memset(&(node->node), 0, sizeof(node->node));
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7fffc;
+
+ if (node->next == NULL)
+ break;
+
+ node->node.GROUP0.B2R2_NIP = node->next->physical_address;
+
+ node = node->next;
+ }
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/**
+ * dump_nodes() - prints the node list
+ */
+static void dump_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first, bool dump_all)
+{
+ struct b2r2_node *node = first;
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+ do {
+ b2r2_log_debug(cont->dev, "\nNODE START:\n=============\n");
+ b2r2_log_debug(cont->dev, "B2R2_ACK: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_ACK);
+ b2r2_log_debug(cont->dev, "B2R2_INS: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_INS);
+ b2r2_log_debug(cont->dev, "B2R2_CIC: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_CIC);
+ b2r2_log_debug(cont->dev, "B2R2_NIP: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_NIP);
+
+ b2r2_log_debug(cont->dev, "B2R2_TSZ: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TSZ);
+ b2r2_log_debug(cont->dev, "B2R2_TXY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TXY);
+ b2r2_log_debug(cont->dev, "B2R2_TTY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TTY);
+ b2r2_log_debug(cont->dev, "B2R2_TBA: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S2CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S2CF);
+ b2r2_log_debug(cont->dev, "B2R2_S1CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S1CF);
+
+ b2r2_log_debug(cont->dev, "B2R2_S1SZ: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S1XY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S1TY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S1BA: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S2SZ: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S2XY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S2TY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S2BA: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S3SZ: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S3XY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S3TY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S3BA: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_CWS: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWS);
+ b2r2_log_debug(cont->dev, "B2R2_CWO: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWO);
+
+ b2r2_log_debug(cont->dev, "B2R2_FCTL: \t0x%.8x\n",
+ node->node.GROUP8.B2R2_FCTL);
+ b2r2_log_debug(cont->dev, "B2R2_RSF: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RSF);
+ b2r2_log_debug(cont->dev, "B2R2_RZI: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RZI);
+ b2r2_log_debug(cont->dev, "B2R2_HFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_HFP);
+ b2r2_log_debug(cont->dev, "B2R2_VFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_VFP);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RSF: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RSF);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RZI: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RZI);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_HFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_HFP);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_VFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_VFP);
+
+
+ b2r2_log_debug(cont->dev, "B2R2_IVMX0: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX0);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX1: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX1);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX2: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX2);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX3: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX3);
+ b2r2_log_debug(cont->dev, "\n=============\nNODE END\n");
+
+ node = node->next;
+ } while (node != NULL && dump_all);
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static inline enum b2r2_native_fmt to_native_fmt(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static inline enum b2r2_ty get_alpha_range(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ break;
+ default:
+ break;
+ }
+
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+}
+
+static unsigned int get_pitch(struct b2r2_control *cont,
+ enum b2r2_blt_fmt format, u32 width)
+{
+ switch (format) {
+ case B2R2_BLT_FMT_1_BIT_A1: {
+ int pitch = width >> 3;
+ /* Check for remainder */
+ if (width & 7)
+ pitch++;
+ return pitch;
+ break;
+ }
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return width;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* all 16 bits/pixel RGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* all 24 bits/pixel raster formats */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* all 32 bits/pixel formats */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return width * 4;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* width of the buffer must be a multiple of 4 */
+ if (width & 3) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+ break;
+ /* fall through, same pitch and pointers */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ /* width of the buffer must be a multiple of 2 */
+ if (width & 1) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* width of the buffer must be a multiple of 16. */
+ if (width & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ default:
+ b2r2_log_warn(cont->dev, "%s: Unable to determine pitch "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+}
+
+static s32 validate_buf(struct b2r2_control *cont,
+ const struct b2r2_blt_img *image,
+ const struct b2r2_resolved_buf *buf)
+{
+ u32 expect_buf_size;
+ u32 pitch;
+
+ if (image->width <= 0 || image->height <= 0) {
+ b2r2_log_warn(cont->dev, "%s: width=%d or height=%d negative"
+ ".\n", __func__, image->width, image->height);
+ return -EINVAL;
+ }
+
+ if (image->pitch == 0) {
+ /* autodetect pitch based on format and width */
+ pitch = get_pitch(cont, image->fmt, image->width);
+ } else
+ pitch = image->pitch;
+
+ expect_buf_size = pitch * image->height;
+
+ if (pitch == 0) {
+ b2r2_log_warn(cont->dev, "%s: Unable to detect pitch. "
+ "fmt=%#010x, width=%d\n",
+ __func__,
+ image->fmt, image->width);
+ return -EINVAL;
+ }
+
+ /* format specific adjustments */
+ switch (image->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ /*
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size +=
+ (pitch >> 1) * ((image->height + 1) >> 1) * 2;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ expect_buf_size += (pitch >> 1) * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ expect_buf_size += pitch * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size += pitch * ((image->height + 1) >> 1);
+ break;
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ */
+ expect_buf_size += pitch * image->height;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * (image->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * image->height;
+ break;
+ default:
+ break;
+ }
+
+ if (buf->file_len < expect_buf_size) {
+ b2r2_log_warn(cont->dev, "%s: Invalid buffer size:\n"
+ "fmt=%#010x w=%d h=%d buf.len=%d expect_buf_size=%d\n",
+ __func__,
+ image->fmt, image->width, image->height, buf->file_len,
+ expect_buf_size);
+ return -EINVAL;
+ }
+
+ if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) {
+ b2r2_log_warn(cont->dev, "%s: Virtual pointers not supported"
+ " yet.\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(struct b2r2_control *cont, u32 color,
+ const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+
+static void setup_fill_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ enum b2r2_native_fmt fill_fmt = 0;
+ u32 src_color = req->user_req.src_color;
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ /* Determine format in src_color */
+ switch (dst_img->fmt) {
+ /* ARGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ } else {
+ /* SOURCE_FILL_RAW */
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
+ if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
+ /*
+ * Color is read from a register,
+ * where it is stored in ABGR format.
+ * Set up IVMX.
+ */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_BGR;
+ }
+ }
+ break;
+ /* YUV formats */
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ * Format of the supplied src_color is
+ * B2R2_BLT_FMT_32_BIT_AYUV8888.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+ } else {
+ /* SOURCE_FILL_RAW */
+ bool dst_yuv_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV444_PACKED_PLANAR ==
+ dst_img->fmt;
+
+ bool dst_yuv_semi_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt;
+
+ if (dst_yuv_planar || dst_yuv_semi_planar) {
+ /*
+ * SOURCE_FILL_RAW cannot be supported
+ * with multi-buffer formats.
+ * Force a legal format to prevent B2R2
+ * from misbehaving.
+ */
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ } else {
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+ /*
+ * Re-arrange the color components from
+ * VUY(A) to (A)YUV
+ */
+ if (dst_img->fmt ==
+ B2R2_BLT_FMT_24_BIT_VUY888) {
+ u32 Y = src_color & 0xff;
+ u32 U = src_color & 0xff00;
+ u32 V = src_color & 0xff0000;
+ src_color = (Y << 16) | U | (V >> 16);
+ } else if (dst_img->fmt ==
+ B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ u32 A = src_color & 0xff;
+ u32 Y = src_color & 0xff00;
+ u32 U = src_color & 0xff0000;
+ u32 V = src_color & 0xff000000;
+ src_color = (A << 24) |
+ (Y << 8) |
+ (U >> 8) |
+ (V >> 24);
+ }
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to
+ * RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ default:
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ }
+ }
+ break;
+ default:
+ src_color = 0;
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ /* Set color fill on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = 0;
+ node->node.GROUP4.B2R2_STY =
+ (0 << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ fill_fmt |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL;
+ node->node.GROUP2.B2R2_S2CF = src_color;
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_img *src_img = &(req->user_req.src_img);
+ u32 src_pitch = 0;
+ /* horizontal and vertical scan order for out_buf */
+ enum b2r2_ty dst_hso = B2R2_TY_HSO_LEFT_TO_RIGHT;
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ u32 endianness = 0;
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ bool yuv_semi_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ struct b2r2_filter_spec *hf;
+ struct b2r2_filter_spec *vf;
+
+ bool use_h_filter = false;
+ bool use_v_filter = false;
+
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (((B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW) &
+ req->user_req.flags) != 0) {
+ setup_fill_input_stage(req, node, out_buf);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+ return;
+ }
+
+ if (src_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ src_pitch = get_pitch(cont, src_img->fmt, src_img->width);
+ } else {
+ src_pitch = src_img->pitch;
+ }
+
+ b2r2_log_info(cont->dev, "%s transform=%#010x\n",
+ __func__, req->user_req.transform);
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+
+ use_h_filter = h_scf != (1 << 10);
+ use_v_filter = v_scf != (1 << 10);
+
+ /* B2R2_BLT_FLAG_BLUR overrides any scaling filter. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ /* Configure horizontal rescale */
+ if (h_scf != (1 << 10)) {
+ b2r2_log_info(cont->dev, "%s: Scaling horizontally by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ h_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ /* Configure vertical rescale */
+ if (v_scf != (1 << 10)) {
+ b2r2_log_info(cont->dev, "%s: Scaling vertically by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ v_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+
+ /* Adjustments that depend on the source format */
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (src_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /*
+ * Luma handled in the same way
+ * for all YUV multi-buffer formats.
+ * Set luma rescale registers.
+ */
+ u32 rsf_luma = 0;
+ u32 rzi_luma = 0;
+
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_IVMX | B2R2_CIC_RESIZE_LUMA;
+
+ if (src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ }
+
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ rsf_luma |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi_luma |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ rsf_luma |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi_luma |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP10.B2R2_RSF = rsf_luma;
+ node->node.GROUP10.B2R2_RZI = rzi_luma;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (v_scf >> 1) << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ vf = b2r2_filter_find(v_scf >> 1);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ use_h_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * Set the filter control and rescale registers.
+ * GROUP9 registers are used for all single-buffer formats
+ * or for chroma in case of multi-buffer YUV formats.
+ * h/v_filter is now appropriately selected for chroma scaling,
+ * be it YUV multi-buffer, or single-buffer raster format.
+ * B2R2_BLT_FLAG_BLUR overrides any scaling filter.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI |= rzi;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * Flip transform is done before potential rotation.
+ * This can be achieved with appropriate scan order.
+ * Transform stage will only do rotation.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ dst_hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ dst_hso | dst_vso;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ else
+ cb_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (src_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (src_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ src_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ src_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance has full resolution, same as luminance.*/
+ chroma_pitch = src_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * src_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ u32 chroma_pitch = src_pitch;
+
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, src_img->fmt) |
+ get_alpha_range(cont, src_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ if ((req->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO = B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = req->clut_phys_addr;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_transform_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf)
+{
+ /* vertical scan order for out_buf */
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ enum b2r2_blt_transform transform = req->user_req.transform;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Scan order must be flipped otherwise contents will
+ * be mirrored vertically. Leftmost column of in_buf
+ * would become top instead of bottom row of out_buf.
+ */
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT | dst_vso;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/*
+static void setup_mask_stage(const struct b2r2_blt_request req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf);
+*/
+
+static void setup_dst_read_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 endianness = 0;
+ bool yuv_semi_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ u32 dst_pitch = 0;
+ struct b2r2_control *cont = req->instance->control;
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
+ } else {
+ dst_pitch = dst_img->pitch;
+ }
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ /* Adjustments that depend on the destination format */
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped
+ * so it is YVU and not YUV.
+ */
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (dst_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ if (dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ default:
+ break;
+ }
+ /* Set the filter control and rescale registers for chroma */
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL | B2R2_CIC_RESIZE_CHROMA;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_blend_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *bg_buf,
+ struct b2r2_work_buf *fg_buf)
+{
+ u32 global_alpha = req->user_req.global_alpha;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ node->node.GROUP0.B2R2_ACK = 0;
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)) {
+ /* Some kind of blending needs to be done. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT)
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /*
+ * global_alpha register accepts 0..128 range,
+ * global_alpha in the request is 0..255, remap needed.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+ if (global_alpha == 255)
+ global_alpha = 128;
+ else
+ global_alpha >>= 1;
+ } else {
+ /*
+ * Use solid global_alpha
+ * if global alpha blending is not set.
+ */
+ global_alpha = 128;
+ }
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << (B2R2_ACK_GALPHA_ROPID_SHIFT);
+
+ /* Set background on SRC1 channel */
+ node->node.GROUP3.B2R2_SBA = bg_buf->phys_addr;
+ node->node.GROUP3.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set foreground on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2;
+ } else {
+ /*
+ * No blending, foreground goes on SRC2. No global alpha.
+ * EMACSOC TODO: The blending stage should be skipped altogether
+ * if no blending is to be done. Probably could go directly from
+ * transform to writeback.
+ */
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ }
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_writeback_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *in_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ const enum b2r2_blt_fmt dst_fmt = dst_img->fmt;
+ const bool yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ const bool yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ const u32 group4_b2r2_sty =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ u32 dst_dither = 0;
+ u32 dst_pitch = 0;
+ u32 endianness = 0;
+
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
+ } else
+ dst_pitch = dst_img->pitch;
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DITHER) != 0)
+ dst_dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ /* Set target buffer(s) */
+ if (yuv_planar_dst) {
+ /*
+ * three nodes required to write the output.
+ * Luma, blue chroma and red chroma.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED |
+ B2R2_INS_IVMX_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW |
+ B2R2_CIC_IVMX;
+
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ /*
+ * YUV422 or YVU422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ /*
+ * No scaling required since
+ * chrominance is not subsampled.
+ */
+ default:
+ break;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Blue chroma (U-component)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cb_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+
+ /*
+ * Red chroma (V-component)
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * CR_NOT_CB.
+ */
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cr_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TTY_CB_NOT_CR |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else if (yuv_semi_planar_dst) {
+ /*
+ * two nodes required to write the output.
+ * One node for luma and one for interleaved chroma
+ * components.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED |
+ B2R2_INS_IVMX_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW |
+ B2R2_CIC_IVMX;
+
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR) {
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ } else {
+ /*
+ * YUV422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+ }
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Chroma (UV-components)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = chroma_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+ }
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS =
+ group0_b2r2_ins | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic |
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else {
+ /* single buffer target */
+
+ /* Set up OVMX */
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO;
+
+ /*
+ * Re-arrange color components from (A)YUV to VUY(A)
+ * when bytes are stored in memory.
+ */
+ if (dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ endianness;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_CLIP_WINDOW;
+
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(cont, req->user_req.src_color,
+ req->user_req.src_img.fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ }
+ /*
+ * Writeback is the last stage. Terminate the program chain
+ * to prevent out-of-control B2R2 execution.
+ */
+ node->node.GROUP0.B2R2_NIP = 0;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/*
+ * Public functions
+ */
+void b2r2_generic_init(struct b2r2_control *cont)
+{
+
+}
+
+void b2r2_generic_exit(struct b2r2_control *cont)
+{
+
+}
+
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count)
+{
+ /*
+ * Need at least 4 nodes, read or fill input, read dst, blend
+ * and write back the result */
+ u32 n_nodes = 4;
+ /* Need at least 2 bufs, 1 for blend output and 1 for input */
+ u32 n_work_bufs = 2;
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ enum b2r2_blt_fmt dst_fmt = 0;
+ bool is_src_fill = false;
+ bool yuv_planar_dst;
+ bool yuv_semi_planar_dst;
+ struct b2r2_blt_rect src_rect;
+ struct b2r2_blt_rect dst_rect;
+ struct b2r2_control *cont = req->instance->control;
+
+ if (req == NULL || work_buf_width == NULL || work_buf_height == NULL ||
+ work_buf_count == NULL || node_count == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid in or out pointers:\n"
+ "req=0x%p\n"
+ "work_buf_width=0x%p work_buf_height=0x%p "
+ "work_buf_count=0x%p\n"
+ "node_count=0x%p.\n",
+ __func__,
+ req,
+ work_buf_width, work_buf_height,
+ work_buf_count,
+ node_count);
+ return -EINVAL;
+ }
+
+ dst_fmt = req->user_req.dst_img.fmt;
+
+ is_src_fill = (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+ yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ *node_count = 0;
+ *work_buf_width = 0;
+ *work_buf_height = 0;
+ *work_buf_count = 0;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ n_nodes++;
+ n_work_bufs++;
+ }
+
+ if ((yuv_planar_dst || yuv_semi_planar_dst) &&
+ (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source_fill_raw"
+ " and multi-buffer destination.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 &&
+ (req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source and "
+ "destination color keying.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
+ B2R2_BLT_FLAG_DEST_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
+ "source_fill and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
+ "blending and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: source mask"
+ "and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK)) {
+ enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_src =
+ src_fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ if (yuv_src || src_fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ src_fmt == B2R2_BLT_FMT_8_BIT_A8) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source "
+ "color keying with YUV or pure alpha "
+ "formats.\n", __func__);
+ return -ENOSYS;
+ }
+ }
+
+ /* Check for invalid dimensions that would hinder scale calculations */
+ src_rect = req->user_req.src_rect;
+ dst_rect = req->user_req.dst_rect;
+ /* Check for invalid src_rect unless src_fill is enabled */
+ if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 ||
+ src_rect.x + src_rect.width > req->user_req.src_img.width ||
+ src_rect.y + src_rect.height > req->user_req.src_img.height)) {
+ b2r2_log_warn(cont->dev, "%s: src_rect outside src_img:\n"
+ "src(x,y,w,h)=(%d, %d, %d, %d) "
+ "src_img(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.x, src_rect.y, src_rect.width, src_rect.height,
+ req->user_req.src_img.width,
+ req->user_req.src_img.height);
+ return -EINVAL;
+ }
+
+ if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) {
+ b2r2_log_warn(cont->dev, "%s: Invalid source dimensions:\n"
+ "src(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.width, src_rect.height);
+ return -EINVAL;
+ }
+
+ if (dst_rect.width <= 0 || dst_rect.height <= 0) {
+ b2r2_log_warn(cont->dev, "%s: Invalid dest dimensions:\n"
+ "dst(w,h)=(%d, %d).\n",
+ __func__,
+ dst_rect.width, dst_rect.height);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for invalid image params */
+ if (!is_src_fill && validate_buf(cont, &(req->user_req.src_img),
+ &(req->src_resolved)))
+ return -EINVAL;
+
+ if (validate_buf(cont, &(req->user_req.dst_img), &(req->dst_resolved)))
+ return -EINVAL;
+
+ if (is_src_fill) {
+ /*
+ * Params correct for a source fill operation.
+ * No need for further checking.
+ */
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d "
+ "buf_count=%d node_count=%d\n", __func__,
+ *work_buf_width, *work_buf_height,
+ *work_buf_count, *node_count);
+ return 0;
+ }
+
+ /*
+ * Calculate scaling factors, all transform enum values
+ * that include rotation have the CCW_ROT_90 bit set.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect.width << 10) / dst_rect.height;
+ v_scf = (src_rect.height << 10) / dst_rect.width;
+ } else {
+ h_scf = (src_rect.width << 10) / dst_rect.width;
+ v_scf = (src_rect.height << 10) / dst_rect.height;
+ }
+
+ /* Check for degenerate/out_of_range scaling factors. */
+ if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) {
+ b2r2_log_warn(cont->dev,
+ "%s: Dimensions result in degenerate or "
+ "out of range scaling:\n"
+ "src(w,h)=(%d, %d) "
+ "dst(w,h)=(%d,%d).\n"
+ "h_scf=0x%.8x, v_scf=0x%.8x\n",
+ __func__,
+ src_rect.width, src_rect.height,
+ dst_rect.width, dst_rect.height,
+ h_scf, v_scf);
+ return -EINVAL;
+ }
+
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d buf_count=%d "
+ "node_count=%d\n", __func__, *work_buf_width,
+ *work_buf_height, *work_buf_count, *node_count);
+ return 0;
+}
+
+/*
+ *
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count)
+{
+ struct b2r2_node *node = NULL;
+ struct b2r2_work_buf *in_buf = NULL;
+ struct b2r2_work_buf *out_buf = NULL;
+ struct b2r2_work_buf *empty_buf = NULL;
+ struct b2r2_control *cont = req->instance->control;
+
+#ifdef B2R2_GENERIC_DEBUG
+ u32 needed_bufs = 0;
+ u32 needed_nodes = 0;
+ s32 work_buf_width = 0;
+ s32 work_buf_height = 0;
+ u32 n_nodes = 0;
+ int invalid_req = b2r2_generic_analyze(req, &work_buf_width,
+ &work_buf_height, &needed_bufs,
+ &needed_nodes);
+ if (invalid_req < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid request supplied, ec=%d\n",
+ __func__, invalid_req);
+ return -EINVAL;
+ }
+
+ node = first;
+
+ while (node != NULL) {
+ n_nodes++;
+ node = node->next;
+ }
+ if (n_nodes < needed_nodes) {
+ b2r2_log_warn(cont->dev, "%s: Not enough nodes %d < %d.\n",
+ __func__, n_nodes, needed_nodes);
+ return -EINVAL;
+ }
+
+ if (buf_count < needed_bufs) {
+ b2r2_log_warn(cont->dev, "%s: Not enough buffers %d < %d.\n",
+ __func__, buf_count, needed_bufs);
+ return -EINVAL;
+ }
+
+#endif
+
+ reset_nodes(cont, first);
+ node = first;
+ empty_buf = tmp_bufs;
+ out_buf = empty_buf;
+ empty_buf++;
+ /* Prepare input tile. Color_fill or read from src */
+ setup_input_stage(req, node, out_buf);
+ in_buf = out_buf;
+ out_buf = empty_buf;
+ empty_buf++;
+ node = node->next;
+
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ setup_transform_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ /* EMACSOC TODO: mask */
+ /*
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ setup_mask_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ */
+ /* Read the part of destination that will be updated */
+ setup_dst_read_stage(req, node, out_buf);
+ node = node->next;
+ setup_blend_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ setup_writeback_stage(req, node, in_buf);
+ return 0;
+}
+
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area)
+{
+ /*
+ * Nodes come in the following order: <input stage>, [transform],
+ * [src_mask], <dst_read>, <blend>, <writeback>
+ */
+ struct b2r2_node *node = first;
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_multi_buffer_src =
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ const enum b2r2_blt_fmt dst_fmt = req->user_req.dst_img.fmt;
+ const bool yuv_multi_buffer_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ u32 b2r2_rzi = 0;
+ s32 clip_top = 0;
+ s32 clip_left = 0;
+ s32 clip_bottom = req->user_req.dst_img.height - 1;
+ s32 clip_right = req->user_req.dst_img.width - 1;
+ /* Dst coords inside the dst_rect, not the buffer */
+ s32 dst_x = dst_rect_area->x;
+ s32 dst_y = dst_rect_area->y;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Normally the inverse transform for 90 degree rotation
+ * is given by:
+ * | 0 1| |x| | y|
+ * | | X | | = | |
+ * |-1 0| |y| |-x|
+ * but screen coordinates are flipped in y direction
+ * (compared to usual Cartesian coordinates), hence the offsets.
+ */
+ src_x = (dst_rect->height - dst_y - dst_rect_area->height) *
+ h_scf;
+ src_y = dst_x * v_scf;
+ src_w = dst_rect_area->height * h_scf;
+ src_h = dst_rect_area->width * v_scf;
+ } else {
+ src_x = dst_x * h_scf;
+ src_y = dst_y * v_scf;
+ src_w = dst_rect_area->width * h_scf;
+ src_h = dst_rect_area->height * v_scf;
+ }
+
+ b2r2_rzi |= ((src_x & 0x3ff) << B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular tile.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination tile, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination tile maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination tile is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ * However, the reading should not start at x == 5.0
+ * but a bit inside, namely x == 5.4
+ * The B2R2_RZI register is used to instruct the HW to do so.
+ * It contains the fractional part that will be added to
+ * the first pixel coordinate, before incrementing the current source
+ * coordinate with the step specified in B2R2_RSF register.
+ * The same applies to scaling in vertical direction.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ /*
+ * EMACSOC TODO: Remove this debug clamp, once tile size
+ * is taken into account in generic_analyze()
+ */
+ if (src_w > 128)
+ src_w = 128;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = src_rect->width - src_x - src_w;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src/dst_rect coordinates into true
+ * src/dst_buffer coordinates
+ */
+ src_x += src_rect->x;
+ src_y += src_rect->y;
+
+ dst_x += dst_rect->x;
+ dst_y += dst_rect->y;
+
+ /*
+ * Clamp the src coords to buffer dimensions
+ * to prevent illegal reads.
+ */
+ if (src_x < 0)
+ src_x = 0;
+
+ if (src_y < 0)
+ src_y = 0;
+
+ if ((src_x + src_w) > req->user_req.src_img.width)
+ src_w = req->user_req.src_img.width - src_x;
+
+ if ((src_y + src_h) > req->user_req.src_img.height)
+ src_h = req->user_req.src_img.height - src_y;
+
+
+ /* The input node */
+ if (yuv_multi_buffer_src) {
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP10.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP10.B2R2_RZI |= b2r2_rzi;
+
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ switch (src_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((src_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((src_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |= (b2r2_rzi >> 1) &
+ ((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |=
+ (((src_x & 0x3ff) >> 1) <<
+ B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP4.B2R2_SSZ;
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ break;
+ default:
+ break;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * dst_rect_area coordinates are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped.
+ * Horizontal and vertical flips are accomplished with
+ * suitable scanning order while writing
+ * to the temporary buffer.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ /*
+ * Scan order for source fill should always be left-to-right
+ * and top-to-bottom. Fill the input tile from top left.
+ */
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP1.B2R2_TSZ;
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Input node done.\n", __func__);
+ }
+
+ /* Transform */
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ /*
+ * Transform node operates on temporary buffers.
+ * Content always at top left, but scanning order
+ * has to be flipped during rotation.
+ * Width and height need to be considered as well, since
+ * a tile may not necessarily be filled completely.
+ * dst_rect_area dimensions are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped on src.
+ */
+ node = node->next;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ /* Bottom line written first */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Tranform node done.\n", __func__);
+ }
+ }
+
+ /* Source mask */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ node = node->next;
+ /*
+ * Same coords for mask as for the input stage.
+ * Should the mask be transformed together with source?
+ * EMACSOC TODO: Apply mask before any
+ * transform/scaling is done.
+ * Otherwise it will be dst_ not src_mask.
+ */
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Source mask node done.\n", __func__);
+ }
+ }
+
+ /* dst_read */
+ if (yuv_multi_buffer_dst) {
+ s32 dst_w = dst_rect_area->width;
+ s32 dst_h = dst_rect_area->height;
+ bool yuv420_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv422_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ node = node->next;
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (yuv420_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (yuv422_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ node = node->next;
+ node->node.GROUP4.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s dst_read node done.\n", __func__);
+ }
+
+ /* blend */
+ node = node->next;
+ node->node.GROUP3.B2R2_SXY = 0;
+ node->node.GROUP3.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ /* contents of the foreground temporary buffer always at top left */
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Blend node done.\n", __func__);
+ }
+
+ /* writeback */
+ node = node->next;
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0) {
+ clip_left = req->user_req.dst_clip_rect.x;
+ clip_top = req->user_req.dst_clip_rect.y;
+ clip_right = clip_left + req->user_req.dst_clip_rect.width - 1;
+ clip_bottom = clip_top + req->user_req.dst_clip_rect.height - 1;
+ }
+ /*
+ * Clamp the dst clip rectangle to buffer dimensions to prevent
+ * illegal writes. An illegal clip rectangle, e.g. outside the
+ * buffer will be ignored, resulting in nothing being clipped.
+ */
+ if (clip_left < 0 || req->user_req.dst_img.width <= clip_left)
+ clip_left = 0;
+
+ if (clip_top < 0 || req->user_req.dst_img.height <= clip_top)
+ clip_top = 0;
+
+ if (clip_right < 0 || req->user_req.dst_img.width <= clip_right)
+ clip_right = req->user_req.dst_img.width - 1;
+
+ if (clip_bottom < 0 || req->user_req.dst_img.height <= clip_bottom)
+ clip_bottom = req->user_req.dst_img.height - 1;
+
+ /*
+ * Only allow writing inside the clip rect.
+ * INTNL bit in B2R2_CWO should be zero.
+ */
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (yuv_multi_buffer_dst) {
+ const s32 dst_w = dst_rect_area->width;
+ const s32 dst_h = dst_rect_area->height;
+ int i = 0;
+ /* Number of nodes required to write chroma output */
+ int n_nodes = 1;
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR)
+ n_nodes = 2;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Writeback luma node done.\n", __func__);
+ }
+
+ node = node->next;
+
+ /*
+ * Chroma components. 1 or 2 nodes
+ * for semi-planar or planar buffer respectively.
+ */
+ for (i = 0; i < n_nodes && node != NULL; ++i) {
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma is half the size of luma.
+ * Must round up the chroma size to handle
+ * cases when luma size is not divisible by 2.
+ * E.g. luma_width==7 requires chroma_width==4.
+ * Chroma_width==7/2==3 is only enough
+ * for luma_width==6.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) <<
+ B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Now chroma is half the size of luma only
+ * in horizontal direction.
+ * Same rounding applies as
+ * for 420 formats above, except it is only
+ * done horizontally.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma has the same resolution as luma.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback chroma "
+ "node %d of %d done.\n",
+ __func__, i + 1, n_nodes);
+ }
+
+ node = node->next;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback node done.\n",
+ __func__);
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
diff --git a/drivers/video/b2r2/b2r2_generic.h b/drivers/video/b2r2/b2r2_generic.h
new file mode 100644
index 00000000000..3b22f654deb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_GENERIC_H
+#define _LINUX_VIDEO_B2R2_GENERIC_H
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+/**
+ * b2r2_generic_init()
+ */
+void b2r2_generic_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_generic_exit()
+ */
+void b2r2_generic_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_generic_analyze()
+ */
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count);
+/**
+ * b2r2_generic_configure()
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count);
+/**
+ * b2r2_generic_set_areas()
+ */
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area);
+#endif
diff --git a/drivers/video/b2r2/b2r2_global.h b/drivers/video/b2r2/b2r2_global.h
new file mode 100644
index 00000000000..38cf74bb753
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_global.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 global definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_GLOBAL_H
+#define __B2R2_GLOBAL_H
+
+/** Sources involved */
+
+struct b2r2_system {
+ unsigned int B2R2_NIP;
+ unsigned int B2R2_CIC;
+ unsigned int B2R2_INS;
+ unsigned int B2R2_ACK;
+};
+
+struct b2r2_target {
+ unsigned int B2R2_TBA;
+ unsigned int B2R2_TTY;
+ unsigned int B2R2_TXY;
+ unsigned int B2R2_TSZ;
+};
+
+struct b2r2_color_fill {
+ unsigned int B2R2_S1CF;
+ unsigned int B2R2_S2CF;
+};
+
+struct b2r2_src_config {
+ unsigned int B2R2_SBA;
+ unsigned int B2R2_STY;
+ unsigned int B2R2_SXY;
+ unsigned int B2R2_SSZ;
+};
+
+struct b2r2_clip {
+ unsigned int B2R2_CWO;
+ unsigned int B2R2_CWS;
+};
+
+struct b2r2_color_key {
+ unsigned int B2R2_KEY1;
+ unsigned int B2R2_KEY2;
+};
+
+struct b2r2_clut {
+ unsigned int B2R2_CCO;
+ unsigned int B2R2_CML;
+};
+
+struct b2r2_rsz_pl_mask {
+ unsigned int B2R2_FCTL;
+ unsigned int B2R2_PMK;
+};
+
+struct b2r2_Cr_luma_rsz {
+ unsigned int B2R2_RSF;
+ unsigned int B2R2_RZI;
+ unsigned int B2R2_HFP;
+ unsigned int B2R2_VFP;
+};
+
+struct b2r2_flikr_filter {
+ unsigned int B2R2_FF0;
+ unsigned int B2R2_FF1;
+ unsigned int B2R2_FF2;
+ unsigned int B2R2_FF3;
+};
+
+struct b2r2_xyl {
+ unsigned int B2R2_XYL;
+ unsigned int B2R2_XYP;
+};
+
+struct b2r2_sau {
+ unsigned int B2R2_SAR;
+ unsigned int B2R2_USR;
+};
+
+struct b2r2_vm {
+ unsigned int B2R2_VMX0;
+ unsigned int B2R2_VMX1;
+ unsigned int B2R2_VMX2;
+ unsigned int B2R2_VMX3;
+};
+
+struct b2r2_link_list {
+
+ struct b2r2_system GROUP0;
+ struct b2r2_target GROUP1;
+ struct b2r2_color_fill GROUP2;
+ struct b2r2_src_config GROUP3;
+ struct b2r2_src_config GROUP4;
+ struct b2r2_src_config GROUP5;
+ struct b2r2_clip GROUP6;
+ struct b2r2_clut GROUP7;
+ struct b2r2_rsz_pl_mask GROUP8;
+ struct b2r2_Cr_luma_rsz GROUP9;
+ struct b2r2_Cr_luma_rsz GROUP10;
+ struct b2r2_flikr_filter GROUP11;
+ struct b2r2_color_key GROUP12;
+ struct b2r2_xyl GROUP13;
+ struct b2r2_sau GROUP14;
+ struct b2r2_vm GROUP15;
+ struct b2r2_vm GROUP16;
+
+ unsigned int B2R2_RESERVED[2];
+};
+
+
+#endif /* !defined(__B2R2_GLOBAL_H) */
diff --git a/drivers/video/b2r2/b2r2_hw.h b/drivers/video/b2r2/b2r2_hw.h
new file mode 100644
index 00000000000..d492168913a
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_hw.h
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 hw definitions
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef B2R2_HW_H__
+#define B2R2_HW_H__
+
+#include <linux/bitops.h>
+
+/* Scaling works in strips 128 pixels wide */
+#define B2R2_RESCALE_MAX_WIDTH 128
+
+/* Rotation works in strips 16 pixels wide */
+#define B2R2_ROTATE_MAX_WIDTH 16
+
+/* B2R2 color formats */
+#define B2R2_COLOR_FORMAT_SHIFT 16
+enum b2r2_native_fmt {
+ /* RGB formats */
+ B2R2_NATIVE_RGB565 = 0x00 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_RGB888 = 0x01 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8565 = 0x04 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8888 = 0x05 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB1555 = 0x06 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB4444 = 0x07 << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* YCbCr formats */
+ B2R2_NATIVE_YCBCR888 = 0x10 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR422R = 0x12 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_AYCBCR8888 = 0x15 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MB = 0x14 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_R2B = 0x16 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MBN = 0x0e << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* CLUT formats */
+ B2R2_NATIVE_CLUT2 = 0x09 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_CLUT8 = 0x0b << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT44 = 0x0c << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT88 = 0x0d << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* Misc. formats */
+ B2R2_NATIVE_A1 = 0x18 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_A8 = 0x19 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YUV = 0x1e << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_BYTE = 0x1f << B2R2_COLOR_FORMAT_SHIFT,
+};
+
+/* B2R2_CIC register values */
+enum b2r2_cic {
+ B2R2_CIC_COLOR_FILL = BIT(1),/*0x00000002*/
+ B2R2_CIC_SOURCE_1 = BIT(2),/*0x00000004*/
+ B2R2_CIC_SOURCE_2 = BIT(3),/*0x00000008*/
+ B2R2_CIC_SOURCE_3 = BIT(4),/*0x00000010*/
+ B2R2_CIC_CLIP_WINDOW = BIT(5),/*0x00000020*/
+ B2R2_CIC_CLUT = BIT(6),/*0x00000040*/
+ B2R2_CIC_FILTER_CONTROL = BIT(7),/*0x00000080*/
+ B2R2_CIC_RESIZE_CHROMA = BIT(8),/*0x00000100*/
+ B2R2_CIC_RESIZE_LUMA = BIT(9),/*0x00000200*/
+ B2R2_CIC_FLICKER_COEFF = BIT(10),/*0x00000400*/
+ B2R2_CIC_COLOR_KEY = BIT(11),/*0x00000800*/
+ B2R2_CIC_XYL = BIT(12),/*0x00001000*/
+ B2R2_CIC_SAU = BIT(13),/*0x00002000*/
+ B2R2_CIC_IVMX = BIT(14),/*0x00004000*/
+ B2R2_CIC_OVMX = BIT(15),/*0x00008000*/
+ B2R2_CIC_PACEDOT = BIT(16),/*0x00010000*/
+ B2R2_CIC_VC1 = BIT(17)/*0x00020000*/
+};
+
+/* B2R2_INS register values */
+#define B2R2_INS_SOURCE_1_SHIFT 0
+#define B2R2_INS_SOURCE_2_SHIFT 3
+#define B2R2_INS_SOURCE_3_SHIFT 5
+#define B2R2_INS_IVMX_SHIFT 6
+#define B2R2_INS_CLUTOP_SHIFT 7
+#define B2R2_INS_RESCALE2D_SHIFT 8
+#define B2R2_INS_FLICK_FILT_SHIFT 9
+#define B2R2_INS_RECT_CLIP_SHIFT 10
+#define B2R2_INS_CKEY_SHIFT 11
+#define B2R2_INS_OVMX_SHIFT 12
+#define B2R2_INS_DEI_SHIFT 13
+#define B2R2_INS_PLANE_MASK_SHIFT 14
+#define B2R2_INS_XYL_SHIFT 15
+#define B2R2_INS_DOT_SHIFT 16
+#define B2R2_INS_VC1R_SHIFT 17
+#define B2R2_INS_ROTATION_SHIFT 18
+#define B2R2_INS_PACE_DOWN_SHIFT 30
+#define B2R2_INS_BLITCOMPIRQ_SHIFT 31
+enum b2r2_ins {
+ /* Source 1 config */
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_COPY = 0x4 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_FILL = 0x7 << B2R2_INS_SOURCE_1_SHIFT,
+
+ /* Source 2 config */
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_2_SHIFT,
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_2_SHIFT,
+
+ /* Source 3 config */
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_3_SHIFT,
+
+ /* Other configs */
+ B2R2_INS_IVMX_ENABLED = 0x1 << B2R2_INS_IVMX_SHIFT,
+ B2R2_INS_CLUTOP_ENABLED = 0x1 << B2R2_INS_CLUTOP_SHIFT,
+ B2R2_INS_RESCALE2D_ENABLED = 0x1 << B2R2_INS_RESCALE2D_SHIFT,
+ B2R2_INS_FLICK_FILT_ENABLED = 0x1 << B2R2_INS_FLICK_FILT_SHIFT,
+ B2R2_INS_RECT_CLIP_ENABLED = 0x1 << B2R2_INS_RECT_CLIP_SHIFT,
+ B2R2_INS_CKEY_ENABLED = 0x1 << B2R2_INS_CKEY_SHIFT,
+ B2R2_INS_OVMX_ENABLED = 0x1 << B2R2_INS_OVMX_SHIFT,
+ B2R2_INS_DEI_ENABLED = 0x1 << B2R2_INS_DEI_SHIFT,
+ B2R2_INS_PLANE_MASK_ENABLED = 0x1 << B2R2_INS_PLANE_MASK_SHIFT,
+ B2R2_INS_XYL_ENABLED = 0x1 << B2R2_INS_XYL_SHIFT,
+ B2R2_INS_DOT_ENABLED = 0x1 << B2R2_INS_DOT_SHIFT,
+ B2R2_INS_VC1R_ENABLED = 0x1 << B2R2_INS_VC1R_SHIFT,
+ B2R2_INS_ROTATION_ENABLED = 0x1 << B2R2_INS_ROTATION_SHIFT,
+ B2R2_INS_PACE_DOWN_ENABLED = 0x1 << B2R2_INS_PACE_DOWN_SHIFT,
+ B2R2_INS_BLITCOMPIRQ_ENABLED = 0x1 << B2R2_INS_BLITCOMPIRQ_SHIFT,
+
+};
+
+/* B2R2_ACK register values */
+#define B2R2_ACK_MODE_SHIFT 0
+#define B2R2_ACK_SWAP_FG_BG_SHIFT 4
+#define B2R2_ACK_GALPHA_ROPID_SHIFT 8
+#define B2R2_ACK_CKEY_BLUE_SHIFT 16
+#define B2R2_ACK_CKEY_GREEN_SHIFT 18
+#define B2R2_ACK_CKEY_RED_SHIFT 20
+#define B2R2_ACK_CKEY_SEL_SHIFT 22
+enum b2r2_ack {
+ /* ALU operation modes */
+ B2R2_ACK_MODE_LOGICAL_OPERATION = 0x1 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT = 0x2 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_PREMULT = 0x3 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_FIRST_PASS = 0x4 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_BLEND = 0x5 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BYPASS_S2_S3 = 0x7 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_SECOND_PASS = 0x8 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_LOGICAL = 0x9 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_NOT_PREMULT =
+ 0xa << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_PREMULT = 0xb << B2R2_ACK_MODE_SHIFT,
+
+ /* ALU channel selection */
+ B2R2_ACK_SWAP_FG_BG = 0x1 << B2R2_ACK_SWAP_FG_BG_SHIFT,
+
+ /* Global alpha and ROP IDs */
+ B2R2_ACK_ROP_CLEAR = 0x0 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND = 0x1 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_REV = 0x2 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY = 0x3 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_INV = 0x4 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOOP = 0x5 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_XOR = 0x6 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR = 0x7 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOR = 0x8 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_EQUIV = 0x9 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_INVERT = 0xa << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_REV = 0xb << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY_INV = 0xc << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_INV = 0xd << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NAND = 0xe << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_SET = 0xf << B2R2_ACK_GALPHA_ROPID_SHIFT,
+
+ /* Color key configuration bits */
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_RED_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_RED_SHIFT,
+
+ /* Color key input selection */
+ B2R2_ACK_CKEY_SEL_DEST = 0x0 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_BEFORE_CLUT = 0x1 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT = 0x2 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_BLANKING_S2_ALPHA = 0x3 << B2R2_ACK_CKEY_SEL_SHIFT,
+};
+
+/* Common <S/T>TY defines */
+#define B2R2_TY_BITMAP_PITCH_SHIFT 0
+#define B2R2_TY_COLOR_FORM_SHIFT 16
+#define B2R2_TY_ALPHA_RANGE_SHIFT 21
+#define B2R2_TY_MB_ACCESS_MODE_SHIFT 23
+#define B2R2_TY_HSO_SHIFT 24
+#define B2R2_TY_VSO_SHIFT 25
+#define B2R2_TY_SUBBYTE_SHIFT 28
+#define B2R2_TY_ENDIAN_SHIFT 30
+#define B2R2_TY_SECURE_SHIFT 31
+
+/* Dummy enum for generalization of <S/T>TY registers */
+enum b2r2_ty {
+ /* Alpha range */
+ B2R2_TY_ALPHA_RANGE_128 = 0x0 << B2R2_TY_ALPHA_RANGE_SHIFT,
+ B2R2_TY_ALPHA_RANGE_255 = 0x1 << B2R2_TY_ALPHA_RANGE_SHIFT,
+
+ /* Access mode in macro-block organized frame buffers */
+ B2R2_TY_MB_ACCESS_MODE_FRAME = 0x0 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+ B2R2_TY_MB_ACCESS_MODE_FIELD = 0x1 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+
+ /* Horizontal scan order */
+ B2R2_TY_HSO_LEFT_TO_RIGHT = 0x0 << B2R2_TY_HSO_SHIFT,
+ B2R2_TY_HSO_RIGHT_TO_LEFT = 0x1 << B2R2_TY_HSO_SHIFT,
+
+ /* Vertical scan order */
+ B2R2_TY_VSO_TOP_TO_BOTTOM = 0x0 << B2R2_TY_VSO_SHIFT,
+ B2R2_TY_VSO_BOTTOM_TO_TOP = 0x1 << B2R2_TY_VSO_SHIFT,
+
+ /* Pixel ordering for sub-byte formats (position of right-most pixel) */
+ B2R2_TY_SUBBYTE_MSB = 0x0 << B2R2_TY_SUBBYTE_SHIFT,
+ B2R2_TY_SUBBYTE_LSB = 0x1 << B2R2_TY_SUBBYTE_SHIFT,
+
+ /* Bitmap endianess */
+ B2R2_TY_ENDIAN_BIG_NOT_LITTLE = 0x1 << B2R2_TY_ENDIAN_SHIFT,
+
+ /* Secureness of the target memory region */
+ B2R2_TY_SECURE_UNSECURE = 0x0 << B2R2_TY_SECURE_SHIFT,
+ B2R2_TY_SECURE_SECURE = 0x1 << B2R2_TY_SECURE_SHIFT,
+
+ /* Dummy to make sure the data type is large enough */
+ B2R2_TY_DUMMY = 0xffffffff,
+};
+
+/* B2R2_TTY register values */
+#define B2R2_TTY_CB_NOT_CR_SHIFT 22
+#define B2R2_TTY_RGB_ROUND_SHIFT 26
+#define B2R2_TTY_CHROMA_NOT_LUMA_SHIFT 27
+enum b2r2_tty {
+
+ /* Chroma component selection */
+ B2R2_TTY_CB_NOT_CR = 0x1 << B2R2_TTY_CB_NOT_CR_SHIFT,
+
+ /* RGB rounding mode */
+ B2R2_TTY_RGB_ROUND_NORMAL = 0x0 << B2R2_TTY_RGB_ROUND_SHIFT,
+ B2R2_TTY_RGB_ROUND_DITHER = 0x1 << B2R2_TTY_RGB_ROUND_SHIFT,
+
+ /* Component selection for splitted frame buffer formats */
+ B2R2_TTY_CHROMA_NOT_LUMA = 0x1 << B2R2_TTY_CHROMA_NOT_LUMA_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S1TY_A1_SUBST_SHIFT 22
+#define B2R2_S1TY_ROTATION_SHIFT 27
+#define B2R2_S1TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s1ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S1TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S1TY_A1_SUBST_SHIFT,
+
+ /* Input rectangle rotation (NOT YET IMPLEMENTED) */
+ B2R2_S1TY_ENABLE_ROTATION = 0x1 << B2R2_S1TY_ROTATION_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S1TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+ B2R2_S1TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S2TY_A1_SUBST_SHIFT 22
+#define B2R2_S2TY_CHROMA_LEFT_SHIFT 26
+#define B2R2_S2TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s2ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S2TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S2TY_A1_SUBST_SHIFT,
+
+ /* Chroma left extension */
+ B2R2_S2TY_CHROMA_LEFT_EXT_FOLLOWING_PIXEL = 0x0
+ << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+ B2R2_S2TY_CHROMA_LEFT_EXT_AVERAGE = 0x1 << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S2TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+ B2R2_S2TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S3TY_BLANK_ACC_SHIFT 26
+enum b2r2_s3ty {
+ /* Enables "blank" access on this source (nothing will be fetched from
+ memory) */
+ B2R2_S3TY_ENABLE_BLANK_ACCESS = 0x1 << B2R2_S3TY_BLANK_ACC_SHIFT,
+};
+
+/* B2R2_<S or T>XY register values */
+#define B2R2_XY_X_SHIFT 0
+#define B2R2_XY_Y_SHIFT 16
+
+/* B2R2_<S or T>SZ register values */
+#define B2R2_SZ_WIDTH_SHIFT 0
+#define B2R2_SZ_HEIGHT_SHIFT 16
+
+/* Clip window offset (top left coordinates) */
+#define B2R2_CWO_X_SHIFT 0
+#define B2R2_CWO_Y_SHIFT 16
+
+/* Clip window stop (bottom right coordinates) */
+#define B2R2_CWS_X_SHIFT 0
+#define B2R2_CWS_Y_SHIFT 16
+
+/* Color look-up table */
+enum b2r2_cco {
+ B2R2_CCO_CLUT_COLOR_CORRECTION = (1 << 16),
+ B2R2_CCO_CLUT_UPDATE = (1 << 18),
+ B2R2_CCO_CLUT_ON_S1 = (1 << 15)
+};
+
+/* Filter control (2D resize control) */
+enum b2r2_fctl {
+ /* Horizontal 2D filter mode */
+ B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(0),
+ B2R2_FCTL_HF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(1),
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER = BIT(2),
+
+ /* Vertical 2D filter mode */
+ B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(4),
+ B2R2_FCTL_VF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(5),
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER = BIT(6),
+
+ /* Alpha borders */
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_RIGHT = BIT(12),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_LEFT = BIT(13),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_BOTTOM = BIT(14),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_TOP = BIT(15),
+
+ /* Luma path horizontal 2D filter mode */
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER = BIT(24),
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER = BIT(25),
+
+ /* Luma path vertical 2D filter mode */
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER = BIT(28),
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER = BIT(29),
+};
+
+/* Resize scaling factor */
+#define B2R2_RSF_HSRC_INC_SHIFT 0
+#define B2R2_RSF_VSRC_INC_SHIFT 16
+
+/* Resizer initialization */
+#define B2R2_RZI_HSRC_INIT_SHIFT 0
+#define B2R2_RZI_HNB_REPEAT_SHIFT 12
+#define B2R2_RZI_VSRC_INIT_SHIFT 16
+#define B2R2_RZI_VNB_REPEAT_SHIFT 28
+
+/* Default values for the resizer */
+#define B2R2_RZI_DEFAULT_HNB_REPEAT (3 << B2R2_RZI_HNB_REPEAT_SHIFT)
+#define B2R2_RZI_DEFAULT_VNB_REPEAT (3 << B2R2_RZI_VNB_REPEAT_SHIFT)
+
+
+/* Bus plug configuration registers */
+enum b2r2_plug_opcode_size {
+ B2R2_PLUG_OPCODE_SIZE_8 = 0x3,
+ B2R2_PLUG_OPCODE_SIZE_16 = 0x4,
+ B2R2_PLUG_OPCODE_SIZE_32 = 0x5,
+ B2R2_PLUG_OPCODE_SIZE_64 = 0x6,
+};
+
+enum b2r2_plug_chunk_size {
+ B2R2_PLUG_CHUNK_SIZE_1 = 0x0,
+ B2R2_PLUG_CHUNK_SIZE_2 = 0x1,
+ B2R2_PLUG_CHUNK_SIZE_4 = 0x2,
+ B2R2_PLUG_CHUNK_SIZE_8 = 0x3,
+ B2R2_PLUG_CHUNK_SIZE_16 = 0x4,
+ B2R2_PLUG_CHUNK_SIZE_32 = 0x5,
+ B2R2_PLUG_CHUNK_SIZE_64 = 0x6,
+ B2R2_PLUG_CHUNK_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_message_size {
+ B2R2_PLUG_MESSAGE_SIZE_1 = 0x0,
+ B2R2_PLUG_MESSAGE_SIZE_2 = 0x1,
+ B2R2_PLUG_MESSAGE_SIZE_4 = 0x2,
+ B2R2_PLUG_MESSAGE_SIZE_8 = 0x3,
+ B2R2_PLUG_MESSAGE_SIZE_16 = 0x4,
+ B2R2_PLUG_MESSAGE_SIZE_32 = 0x5,
+ B2R2_PLUG_MESSAGE_SIZE_64 = 0x6,
+ B2R2_PLUG_MESSAGE_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_page_size {
+ B2R2_PLUG_PAGE_SIZE_64 = 0x0,
+ B2R2_PLUG_PAGE_SIZE_128 = 0x1,
+ B2R2_PLUG_PAGE_SIZE_256 = 0x2,
+};
+
+/* Default opcode size */
+#if defined(CONFIG_B2R2_OPSIZE_8)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_8
+#elif defined(CONFIG_B2R2_OPSIZE_16)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_16
+#elif defined(CONFIG_B2R2_OPSIZE_32)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_32
+#elif defined(CONFIG_B2R2_OPSIZE_64)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_64
+#else
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT 0
+#endif
+
+/* Default chunk size */
+#if defined(CONFIG_B2R2_CHSIZE_1)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_1
+#elif defined(CONFIG_B2R2_CHSIZE_2)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_2
+#elif defined(CONFIG_B2R2_CHSIZE_4)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_4
+#elif defined(CONFIG_B2R2_CHSIZE_8)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_8
+#elif defined(CONFIG_B2R2_CHSIZE_16)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_16
+#elif defined(CONFIG_B2R2_CHSIZE_32)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_32
+#elif defined(CONFIG_B2R2_CHSIZE_64)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_64
+#elif defined(CONFIG_B2R2_CHSIZE_128)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_128
+#else
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT 0
+#endif
+
+/* Default message size */
+#if defined(CONFIG_B2R2_MGSIZE_1)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_1
+#elif defined(CONFIG_B2R2_MGSIZE_2)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_2
+#elif defined(CONFIG_B2R2_MGSIZE_4)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_4
+#elif defined(CONFIG_B2R2_MGSIZE_8)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_8
+#elif defined(CONFIG_B2R2_MGSIZE_16)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_16
+#elif defined(CONFIG_B2R2_MGSIZE_32)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_32
+#elif defined(CONFIG_B2R2_MGSIZE_64)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_64
+#elif defined(CONFIG_B2R2_MGSIZE_128)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_128
+#else
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT 0
+#endif
+
+/* Default page size */
+#if defined(CONFIG_B2R2_PGSIZE_64)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_64
+#elif defined(CONFIG_B2R2_PGSIZE_128)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_128
+#elif defined(CONFIG_B2R2_PGSIZE_256)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_256
+#else
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT 0
+#endif
+
+/* VMX register values for RGB to YUV color conversion */
+/* Magic numbers from 27.11 in DB8500_DesignSpecification_v2.5.pdf */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_601_VIDEO 0x107e4beb
+#define B2R2_VMX1_RGB_TO_YUV_601_VIDEO 0x0982581d
+#define B2R2_VMX2_RGB_TO_YUV_601_VIDEO 0xfa9ea483
+#define B2R2_VMX3_RGB_TO_YUV_601_VIDEO 0x08000080
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_601_GFX 0x0e1e8bee
+#define B2R2_VMX1_RGB_TO_YUV_601_GFX 0x08420419
+#define B2R2_VMX2_RGB_TO_YUV_601_GFX 0xfb5ed471
+#define B2R2_VMX3_RGB_TO_YUV_601_GFX 0x08004080
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_709_VIDEO 0x107e27f4
+#define B2R2_VMX1_RGB_TO_YUV_709_VIDEO 0x06e2dc13
+#define B2R2_VMX2_RGB_TO_YUV_709_VIDEO 0xfc5e6c83
+#define B2R2_VMX3_RGB_TO_YUV_709_VIDEO 0x08000080
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_709_GFX 0x0e3e6bf5
+#define B2R2_VMX1_RGB_TO_YUV_709_GFX 0x05e27410
+#define B2R2_VMX2_RGB_TO_YUV_709_GFX 0xfcdea471
+#define B2R2_VMX3_RGB_TO_YUV_709_GFX 0x08004080
+
+/* VMX register values for YUV to RGB color conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_601_VIDEO 0x2c440000
+#define B2R2_VMX1_YUV_TO_RGB_601_VIDEO 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_RGB_601_VIDEO 0x0004013f
+#define B2R2_VMX3_YUV_TO_RGB_601_VIDEO 0x34f21322
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_601_GFX 0x3324a800
+#define B2R2_VMX1_YUV_TO_RGB_601_GFX 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_RGB_601_GFX 0x0004a957
+#define B2R2_VMX3_YUV_TO_RGB_601_GFX 0x32121eeb
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_709_VIDEO 0x31440000
+#define B2R2_VMX1_YUV_TO_RGB_709_VIDEO 0xf16403d1
+#define B2R2_VMX2_YUV_TO_RGB_709_VIDEO 0x00040145
+#define B2R2_VMX3_YUV_TO_RGB_709_VIDEO 0x33b14b18
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_709_GFX 0x3964a800
+#define B2R2_VMX1_YUV_TO_RGB_709_GFX 0xef04abc9
+#define B2R2_VMX2_YUV_TO_RGB_709_GFX 0x0004a95f
+#define B2R2_VMX3_YUV_TO_RGB_709_GFX 0x307132df
+
+/* VMX register values for RGB to BGR conversion */
+#define B2R2_VMX0_RGB_TO_BGR 0x00000100
+#define B2R2_VMX1_RGB_TO_BGR 0x00040000
+#define B2R2_VMX2_RGB_TO_BGR 0x20000000
+#define B2R2_VMX3_RGB_TO_BGR 0x00000000
+
+/* VMX register values for BGR to YUV color conversion */
+/* Note: All BGR -> YUV values are calculated by multiplying
+ * the RGB -> YUV matrices [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |0 1 0|
+ * |1 0 0|
+ * Essentially swapping first and third columns in
+ * the matrices (VMX0, VMX1 and VMX2 values).
+ * The offset vector VMX3 remains untouched.
+ * Put another way, the value of bits 0 through 9
+ * is swapped with the value of
+ * bits 20 through 31 in VMX0, VMX1 and VMX2,
+ * taking into consideration the compression
+ * that is used on bits 0 through 9. Bit 0 being LSB.
+ */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_601_VIDEO 0xfd7e4883
+#define B2R2_VMX1_BGR_TO_YUV_601_VIDEO 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YUV_601_VIDEO 0x107ea7d4
+#define B2R2_VMX3_BGR_TO_YUV_601_VIDEO 0x08000080
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_601_GFX 0xfdde8870
+#define B2R2_VMX1_BGR_TO_YUV_601_GFX 0x03220442
+#define B2R2_VMX2_BGR_TO_YUV_601_GFX 0x0e3ed7da
+#define B2R2_VMX3_BGR_TO_YUV_601_GFX 0x08004080
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_709_VIDEO 0xfe9e2483
+#define B2R2_VMX1_BGR_TO_YUV_709_VIDEO 0x0262dc37
+#define B2R2_VMX2_BGR_TO_YUV_709_VIDEO 0x107e6fe2
+#define B2R2_VMX3_BGR_TO_YUV_709_VIDEO 0x08000080
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_709_GFX 0xfebe6871
+#define B2R2_VMX1_BGR_TO_YUV_709_GFX 0x0202742f
+#define B2R2_VMX2_BGR_TO_YUV_709_GFX 0x0e3ea7e6
+#define B2R2_VMX3_BGR_TO_YUV_709_GFX 0x08004080
+
+
+/* VMX register values for YUV to BGR conversion */
+/* Note: All YUV -> BGR values are constructed
+ * from the YUV -> RGB ones, by swapping
+ * first and third rows in the matrix
+ * (VMX0 and VMX2 values). Further, the first and
+ * third values in the offset vector need to be
+ * swapped as well, i.e. bits 0 through 9 are swapped
+ * with bits 20 through 29 in the VMX3 value.
+ * Bit 0 being LSB.
+ */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_601_VIDEO 0x0004013f
+#define B2R2_VMX1_YUV_TO_BGR_601_VIDEO 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_BGR_601_VIDEO 0x2c440000
+#define B2R2_VMX3_YUV_TO_BGR_601_VIDEO 0x3222134f
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_601_GFX 0x0004a957
+#define B2R2_VMX1_YUV_TO_BGR_601_GFX 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_BGR_601_GFX 0x3324a800
+#define B2R2_VMX3_YUV_TO_BGR_601_GFX 0x2eb21f21
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_709_VIDEO 0x00040145
+#define B2R2_VMX1_YUV_TO_BGR_709_VIDEO 0xf16403d1
+#define B2R2_VMX2_YUV_TO_BGR_709_VIDEO 0x31440000
+#define B2R2_VMX3_YUV_TO_BGR_709_VIDEO 0x31814b3b
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_709_GFX 0x0004a95f
+#define B2R2_VMX1_YUV_TO_BGR_709_GFX 0xef04abc9
+#define B2R2_VMX2_YUV_TO_BGR_709_GFX 0x3964a800
+#define B2R2_VMX3_YUV_TO_BGR_709_GFX 0x2df13307
+
+
+/* VMX register values for YVU to RGB conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YVU_TO_RGB_601_VIDEO 0x00040120
+#define B2R2_VMX1_YVU_TO_RGB_601_VIDEO 0xF544034D
+#define B2R2_VMX2_YVU_TO_RGB_601_VIDEO 0x37840000
+#define B2R2_VMX3_YVU_TO_RGB_601_VIDEO 0x34f21322
+
+/* VMX register values for RGB to YVU conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_RGB_TO_YVU_601_VIDEO 0xfa9ea483
+#define B2R2_VMX1_RGB_TO_YVU_601_VIDEO 0x0982581d
+#define B2R2_VMX2_RGB_TO_YVU_601_VIDEO 0x107e4beb
+#define B2R2_VMX3_RGB_TO_YVU_601_VIDEO 0x08000080
+
+/* VMX register values for YVU to BGR conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YVU_TO_BGR_601_VIDEO 0x37840000
+#define B2R2_VMX1_YVU_TO_BGR_601_VIDEO 0xF544034D
+#define B2R2_VMX2_YVU_TO_BGR_601_VIDEO 0x00040120
+#define B2R2_VMX3_YVU_TO_BGR_601_VIDEO 0x3222134F
+
+/* VMX register values for BGR to YVU conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_BGR_TO_YVU_601_VIDEO 0x107ea7d4
+#define B2R2_VMX1_BGR_TO_YVU_601_VIDEO 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YVU_601_VIDEO 0xfd7e4883
+#define B2R2_VMX3_BGR_TO_YVU_601_VIDEO 0x08000080
+
+/* VMX register values for YVU to YUV conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/* Internally, the components are in fact stored
+ * with luma in the middle, i.e. UYV, which is why
+ * the values are just like for RGB->BGR conversion.
+ */
+#define B2R2_VMX0_YVU_TO_YUV_601_VIDEO 0x00000100
+#define B2R2_VMX1_YVU_TO_YUV_601_VIDEO 0x00040000
+#define B2R2_VMX2_YVU_TO_YUV_601_VIDEO 0x20000000
+#define B2R2_VMX3_YVU_TO_YUV_601_VIDEO 0x00000000
+
+/* VMX register values for RGB to BLT_YUV888 conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/*
+ * BLT_YUV888 has color components laid out in memory as V, U, Y, (Alpha)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ * Note: RGB -> BLT_YUV888 values are calculated by multiplying
+ * the RGB -> YUV matrix [A], with [S] to form [S]x[A] where
+ * |0 1 0|
+ * S = |0 0 1|
+ * |1 0 0|
+ * Essentially changing the order of rows in the original
+ * matrix [A].
+ * row1 -> row3
+ * row2 -> row1
+ * row3 -> row2
+ * Values in the offset vector are swapped in the same manner.
+ */
+#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO 0x0982581d
+#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO 0xfa9ea483
+#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO 0x107e4beb
+#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO 0x00020080
+
+/* VMX register values for BLT_YUV888 to RGB conversion */
+
+/*
+ * Note: BLT_YUV888 -> RGB values are calculated by multiplying
+ * the YUV -> RGB matrix [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |1 0 0|
+ * |0 1 0|
+ * Essentially changing the order of columns in the original
+ * matrix [A].
+ * col1 -> col3
+ * col2 -> col1
+ * col3 -> col2
+ * Values in the offset vector remain unchanged.
+ */
+#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO 0x20000121
+#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO 0x201ea74c
+#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO 0x2006f000
+#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO 0x34f21322
+
+/* VMX register values for YUV to BLT_YUV888 conversion */
+#define B2R2_VMX0_YUV_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YUV_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX2_YUV_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX3_YUV_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YUV conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YUV 0x00000100
+#define B2R2_VMX1_BLT_YUV888_TO_YUV 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YUV 0x00040000
+#define B2R2_VMX3_BLT_YUV888_TO_YUV 0x00000000
+
+/* VMX register values for YVU to BLT_YUV888 conversion */
+#define B2R2_VMX0_YVU_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YVU_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX2_YVU_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX3_YVU_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YVU conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YVU 0x00040000
+#define B2R2_VMX1_BLT_YUV888_TO_YVU 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YVU 0x00000100
+#define B2R2_VMX3_BLT_YUV888_TO_YVU 0x00000000
+
+#endif /* B2R2_HW_H__ */
diff --git a/drivers/video/b2r2/b2r2_input_validation.c b/drivers/video/b2r2/b2r2_input_validation.c
new file mode 100644
index 00000000000..ac8b5728847
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include "b2r2_internal.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+
+#include <video/b2r2_blt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt);
+static bool is_valid_bg_format(enum b2r2_blt_fmt fmt);
+
+static bool is_valid_pitch_for_fmt(struct b2r2_control *cont,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt);
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt);
+
+static bool validate_img(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+static bool validate_rect(struct b2r2_control *cont,
+ struct b2r2_blt_rect *rect);
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_valid_bg_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+
+static bool is_valid_pitch_for_fmt(struct b2r2_control *cont,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt)
+{
+ s32 complete_width;
+ u32 pitch_derived_from_width;
+
+ complete_width = width_2_complete_width(width, fmt);
+
+ pitch_derived_from_width = b2r2_calc_pitch_from_width(cont,
+ complete_width, fmt);
+
+ if (pitch < pitch_derived_from_width)
+ return false;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ if (!b2r2_is_aligned(pitch, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (!b2r2_is_aligned(pitch, 4))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ if (!b2r2_is_aligned(width, 4))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ if (!b2r2_is_aligned(width, 8))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return b2r2_align_up(width, 2);
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return b2r2_align_up(width, 16);
+
+ default:
+ return width;
+ }
+}
+
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(width, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(height, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(height, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool validate_img(struct b2r2_control *cont,
+ struct b2r2_blt_img *img)
+{
+ /*
+ * So that we always can do width * height * bpp without overflowing a
+ * 32 bit signed integer. isqrt(s32_max / max_bpp) was used to
+ * calculate the value.
+ */
+ static const s32 max_img_width_height = 8191;
+
+ s32 img_size;
+
+ if (!is_valid_format(img->fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_format(img->fmt)\n");
+ return false;
+ }
+
+ if (img->width < 0 || img->width > max_img_width_height ||
+ img->height < 0 || img->height > max_img_width_height) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "img->width < 0 || "
+ "img->width > max_img_width_height || "
+ "img->height < 0 || "
+ "img->height > max_img_width_height\n");
+ return false;
+ }
+
+ if (b2r2_is_mb_fmt(img->fmt)) {
+ if (!is_complete_width_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt)\n");
+ return false;
+ }
+ } else {
+ if (0 == img->pitch &&
+ (!is_aligned_width_for_fmt(img->width, img->fmt) ||
+ !is_complete_width_for_fmt(img->width, img->fmt))) {
+ b2r2_log_info(cont->dev,
+ "Validation Error: "
+ "0 == img->pitch && "
+ "(!is_aligned_width_for_fmt(img->width,"
+ " img->fmt) || "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt))\n");
+ return false;
+ }
+
+ if (img->pitch != 0 &&
+ !is_valid_pitch_for_fmt(cont, img->pitch, img->width,
+ img->fmt)) {
+ b2r2_log_info(cont->dev,
+ "Validation Error: "
+ "img->pitch != 0 && "
+ "!is_valid_pitch_for_fmt(cont, "
+ "img->pitch, img->width, img->fmt)\n");
+ return false;
+ }
+ }
+
+ if (!is_valid_height_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_height_for_fmt(img->width, img->fmt)\n");
+ return false;
+ }
+
+ img_size = b2r2_get_img_size(cont, img);
+
+ /*
+ * To keep the entire image inside s32 range.
+ */
+ if ((B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type ||
+ B2R2_BLT_PTR_FD_OFFSET == img->buf.type) &&
+ img->buf.offset > (u32)b2r2_s32_max - (u32)img_size) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "(B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == "
+ "img->buf.type || B2R2_BLT_PTR_FD_OFFSET == "
+ "img->buf.type) && img->buf.offset > "
+ "(u32)B2R2_MAX_S32 - (u32)img_size\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_rect(struct b2r2_control *cont,
+ struct b2r2_blt_rect *rect)
+{
+ if (rect->width < 0 || rect->height < 0) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "rect->width < 0 || rect->height < 0\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool b2r2_validate_user_req(struct b2r2_control *cont,
+ struct b2r2_blt_req *req)
+{
+ bool is_src_img_used;
+ bool is_bg_img_used;
+ bool is_src_mask_used;
+ bool is_dst_clip_rect_used;
+
+ if (req->size != sizeof(struct b2r2_blt_req)) {
+ b2r2_log_err(cont->dev, "Validation Error: "
+ "req->size != sizeof(struct b2r2_blt_req)\n");
+ return false;
+ }
+
+ is_src_img_used = !(req->flags & B2R2_BLT_FLAG_SOURCE_FILL ||
+ req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW);
+ is_bg_img_used = (req->flags & B2R2_BLT_FLAG_BG_BLEND);
+ is_src_mask_used = req->flags & B2R2_BLT_FLAG_SOURCE_MASK;
+ is_dst_clip_rect_used = req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP;
+
+ if (is_src_img_used || is_src_mask_used) {
+ if (!validate_rect(cont, &req->src_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->src_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_rect(cont, &req->dst_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->dst_rect)\n");
+ return false;
+ }
+
+ if (is_bg_img_used) {
+ if (!validate_rect(cont, &req->bg_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->bg_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_dst_clip_rect_used) {
+ if (!validate_rect(cont, &req->dst_clip_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->dst_clip_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_img_used) {
+ struct b2r2_blt_rect src_img_bounding_rect;
+
+ if (!validate_img(cont, &req->src_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->src_img)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_img,
+ &src_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_img_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_bg_img_used) {
+ struct b2r2_blt_rect bg_img_bounding_rect;
+
+ if (!validate_img(cont, &req->bg_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->bg_img)\n");
+ return false;
+ }
+
+ if (!is_valid_bg_format(req->bg_img.fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_bg_format(req->bg_img->fmt)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->bg_img,
+ &bg_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->bg_rect,
+ &bg_img_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->bg_rect, "
+ "&bg_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_mask_used) {
+ struct b2r2_blt_rect src_mask_bounding_rect;
+
+ if (!validate_img(cont, &req->src_mask)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->src_mask)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_mask,
+ &src_mask_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_mask_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_mask_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_img(cont, &req->dst_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->dst_img)\n");
+ return false;
+ }
+
+ if (is_bg_img_used) {
+ if (!b2r2_is_rect_gte_rect(&req->bg_rect, &req->dst_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_gte_rect(&req->bg_rect, "
+ "&req->dst_rect)\n");
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/video/b2r2/b2r2_input_validation.h b/drivers/video/b2r2/b2r2_input_validation.h
new file mode 100644
index 00000000000..d3c6ae1b296
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+bool b2r2_validate_user_req(struct b2r2_control *cont,
+ struct b2r2_blt_req *req);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h
new file mode 100644
index 00000000000..7a46bbda19e
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_internal.h
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <video/b2r2_blt.h>
+
+#include "b2r2_global.h"
+#include "b2r2_hw.h"
+
+/**
+ * B2R2_MAX_NBR_DEVICES - The maximum number of B2R2s handled
+ */
+#define B2R2_MAX_NBR_DEVICES 1
+
+/* The maximum possible number of temporary buffers needed */
+#define MAX_TMP_BUFS_NEEDED 2
+
+/* Size of the color look-up table */
+#define CLUT_SIZE 1024
+
+/**
+ * b2r2_op_type - the type of B2R2 operation to configure
+ */
+enum b2r2_op_type {
+ B2R2_DIRECT_COPY,
+ B2R2_DIRECT_FILL,
+ B2R2_COPY,
+ B2R2_FILL,
+ B2R2_SCALE,
+ B2R2_ROTATE,
+ B2R2_SCALE_AND_ROTATE,
+ B2R2_FLIP,
+};
+
+/**
+ * b2r2_fmt_type - the type of buffer for a given format
+ */
+enum b2r2_fmt_type {
+ B2R2_FMT_TYPE_RASTER,
+ B2R2_FMT_TYPE_SEMI_PLANAR,
+ B2R2_FMT_TYPE_PLANAR,
+};
+
+/**
+ * b2r2_fmt_conv - the type of format conversion to do
+ */
+enum b2r2_fmt_conv {
+ B2R2_FMT_CONV_NONE,
+ B2R2_FMT_CONV_RGB_TO_YUV,
+ B2R2_FMT_CONV_YUV_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_YUV,
+ B2R2_FMT_CONV_RGB_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_YUV,
+};
+
+/**
+ * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to
+ *
+ * @B2R2_CORE_QUEUE_AQ1: Application queue 1
+ * @B2R2_CORE_QUEUE_AQ2: Application queue 2
+ * @B2R2_CORE_QUEUE_AQ3: Application queue 3
+ * @B2R2_CORE_QUEUE_AQ4: Application queue 4
+ * @B2R2_CORE_QUEUE_CQ1: Composition queue 1
+ * @B2R2_CORE_QUEUE_CQ2: Composition queue 2
+ * @B2R2_CORE_QUEUE_NO_OF: Number of queues
+ */
+enum b2r2_core_queue {
+ B2R2_CORE_QUEUE_AQ1 = 0,
+ B2R2_CORE_QUEUE_AQ2,
+ B2R2_CORE_QUEUE_AQ3,
+ B2R2_CORE_QUEUE_AQ4,
+ B2R2_CORE_QUEUE_CQ1,
+ B2R2_CORE_QUEUE_CQ2,
+ B2R2_CORE_QUEUE_NO_OF,
+};
+
+#define B2R2_NUM_APPLICATIONS_QUEUES 4
+
+/**
+ * enum b2r2_core_job_state - Indicates the current state of the job
+ *
+ * @B2R2_CORE_JOB_IDLE: Never queued
+ * @B2R2_CORE_JOB_QUEUED: In queue but not started yet
+ * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2
+ * @B2R2_CORE_JOB_DONE: Completed
+ * @B2R2_CORE_JOB_CANCELED: Canceled
+ */
+enum b2r2_core_job_state {
+ B2R2_CORE_JOB_IDLE = 0,
+ B2R2_CORE_JOB_QUEUED,
+ B2R2_CORE_JOB_RUNNING,
+ B2R2_CORE_JOB_DONE,
+ B2R2_CORE_JOB_CANCELED,
+};
+
+/**
+ * b2r2_work_buf - specification for a temporary work buffer
+ *
+ * @size - the size of the buffer (set by b2r2_node_split)
+ * @phys_addr - the physical address of the buffer (set by b2r2_blt_main)
+ */
+struct b2r2_work_buf {
+ u32 size;
+ u32 phys_addr;
+ void *virt_addr;
+ u32 mem_handle;
+};
+
+struct tmp_buf {
+ struct b2r2_work_buf buf;
+ bool in_use;
+};
+
+/**
+ * struct b2r2_blt_instance - Represents the B2R2 instance (one per open)
+ *
+ * @lock: Lock to protect the instance
+ *
+ * @report_list: Ready requests that should be reported,
+ * @report_list_waitq: Wait queue for report list
+ * @no_of_active_requests: Number of requests added but not reported
+ * in callback.
+ * @synching: true if any client is waiting for b2r2_blt_synch(0)
+ * @synch_done_waitq: Wait queue to handle synching on request_id 0
+ * @control: The b2r2 control entity
+ */
+struct b2r2_blt_instance {
+ struct mutex lock;
+
+ /* Requests to be reported */
+ struct list_head report_list;
+ wait_queue_head_t report_list_waitq;
+
+ /* Below for synching */
+ u32 no_of_active_requests;
+ bool synching;
+ wait_queue_head_t synch_done_waitq;
+
+ struct b2r2_control *control;
+};
+
+/**
+ * struct b2r2_node - Represents a B2R2 node with reqister values, executed
+ * by B2R2. Should be allocated non-cached.
+ *
+ * @next: Next node
+ * @physical_address: Physical address to be given to B2R2
+ * (physical address of "node" member below)
+ * @node: The B2R2 node with register settings. This is the data
+ * that B2R2 will use.
+ *
+ */
+struct b2r2_node {
+ struct b2r2_node *next;
+ u32 physical_address;
+
+ int src_tmp_index;
+ int dst_tmp_index;
+
+ int src_index;
+
+ /* B2R2 regs comes here */
+ struct b2r2_link_list node;
+};
+
+/**
+ * struct b2r2_resolved_buf - Contains calculated information about
+ * image buffers.
+ *
+ * @physical_address: Physical address of the buffer
+ * @virtual_address: Virtual address of the buffer
+ * @is_pmem: true if buffer is from pmem
+ * @hwmem_session: Hwmem session
+ * @hwmem_alloc: Hwmem alloc
+ * @filep: File pointer of mapped file (like pmem device, frame buffer device)
+ * @file_physical_start: Physical address of file start
+ * @file_virtual_start: Virtual address of file start
+ * @file_len: File len
+ *
+ */
+struct b2r2_resolved_buf {
+ u32 physical_address;
+ void *virtual_address;
+ bool is_pmem;
+ struct hwmem_alloc *hwmem_alloc;
+ /* Data for validation below */
+ struct file *filep;
+ u32 file_physical_start;
+ u32 file_virtual_start;
+ u32 file_len;
+};
+
+/**
+ * b2r2_node_split_buf - information about a source or destination buffer
+ *
+ * @addr - the physical base address
+ * @chroma_addr - the physical address of the chroma plane
+ * @chroma_cr_addr - the physical address of the Cr chroma plane
+ * @fmt - the buffer format
+ * @fmt_type - the buffer format type
+ * @rect - the rectangle of the buffer to use
+ * @color - the color value to use is case of a fill operation
+ * @pitch - the pixmap byte pitch
+ * @height - the pixmap height
+ * @alpha_range - the alpha range of the buffer (0-128 or 0-255)
+ * @hso - the horizontal scan order
+ * @vso - the vertical scan order
+ * @endian - the endianess of the buffer
+ * @plane_selection - the plane to write if buffer is planar or semi-planar
+ */
+struct b2r2_node_split_buf {
+ u32 addr;
+ u32 chroma_addr;
+ u32 chroma_cr_addr;
+
+ enum b2r2_blt_fmt fmt;
+ enum b2r2_fmt_type type;
+
+ struct b2r2_blt_rect rect;
+ struct b2r2_blt_rect win;
+
+ s32 dx;
+ s32 dy;
+
+ u32 color;
+ u16 pitch;
+ u16 width;
+ u16 height;
+
+ enum b2r2_ty alpha_range;
+ enum b2r2_ty hso;
+ enum b2r2_ty vso;
+ enum b2r2_ty endian;
+ enum b2r2_tty dither;
+
+ /* Plane selection (used when writing to a multibuffer format) */
+ enum b2r2_tty plane_selection;
+
+ /* Chroma plane selection (used when writing planar formats) */
+ enum b2r2_tty chroma_selection;
+
+ int tmp_buf_index;
+};
+
+/**
+ * b2r2_node_split_job - an instance of a node split job
+ *
+ * @type - the type of operation
+ * @ivmx - the ivmx matrix to use for color conversion
+ * @blend - determines if blending is enabled
+ * @clip - determines if destination clipping is enabled
+ * @swap_fg_bg - determines if FG and BG should be swapped when blending
+ * @flags - the flags passed in the blt request
+ * @flag_param - parameter required by certain flags,
+ * e.g. color for source color keying.
+ * @transform - the transforms passed in the blt request
+ * @global_alpha - the global alpha
+ * @clip_rect - the clipping rectangle to use
+ * @horiz_rescale - determmines if horizontal rescaling is enabled
+ * @horiz_sf - the horizontal scale factor
+ * @vert_rescale - determines if vertical rescale is enabled
+ * @vert_sf - the vertical scale factor
+ * @src - the incoming source buffer
+ * @bg - the incoming background buffer
+ * @dst - the outgoing destination buffer
+ * @work_bufs - work buffer specifications
+ * @tmp_bufs - temporary buffers
+ * @buf_count - the number of temporary buffers used for the job
+ * @node_count - the number of nodes used for the job
+ * @max_buf_size - the maximum size of temporary buffers
+ * @nbr_rows - the number of tile rows in the blit operation
+ * @nbr_cols - the number of time columns in the blit operation
+ */
+struct b2r2_node_split_job {
+ enum b2r2_op_type type;
+
+ const u32 *ivmx;
+
+ bool blend;
+ bool clip;
+ bool rotation;
+
+ bool swap_fg_bg;
+
+ u32 flags;
+ u32 flag_param;
+ u32 transform;
+ u32 global_alpha;
+
+ struct b2r2_blt_rect clip_rect;
+
+ bool h_rescale;
+ u16 h_rsf;
+
+ bool v_rescale;
+ u16 v_rsf;
+
+ struct b2r2_node_split_buf src;
+ struct b2r2_node_split_buf bg;
+ struct b2r2_node_split_buf dst;
+
+ struct b2r2_work_buf work_bufs[MAX_TMP_BUFS_NEEDED];
+ struct b2r2_node_split_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+
+ u32 buf_count;
+ u32 node_count;
+ u32 max_buf_size;
+};
+
+/**
+ * struct b2r2_core_job - Represents a B2R2 core job
+ *
+ * @start_sentinel: Memory overwrite guard
+ *
+ * @tag: Client value. Used by b2r2_core_job_find_first_with_tag().
+ * @prio: Job priority, from -19 up to 20. Mapped to the
+ * B2R2 application queues. Filled in by the client.
+ * @first_node_address: Physical address of the first node. Filled
+ * in by the client.
+ * @last_node_address: Physical address of the last node. Filled
+ * in by the client.
+ *
+ * @callback: Function that will be called when the job is done.
+ * @acquire_resources: Function that allocates the resources needed
+ * to execute the job (i.e. SRAM alloc). Must not
+ * sleep if atomic, should fail with negative error code
+ * if resources not available.
+ * @release_resources: Function that releases the resources previously
+ * allocated by acquire_resources (i.e. SRAM alloc).
+ * @release: Function that will be called when the reference count reaches
+ * zero.
+ *
+ * @job_id: Unique id for this job, assigned by B2R2 core
+ * @job_state: The current state of the job
+ * @jiffies: Number of jiffies needed for this request
+ *
+ * @list: List entry element for internal list management
+ * @event: Wait queue event to wait for job done
+ * @work: Work queue structure, for callback implementation
+ *
+ * @queue: The queue that this job shall be submitted to
+ * @control: B2R2 Queue control
+ * @pace_control: For composition queue only
+ * @interrupt_context: Context for interrupt
+ * @hw_start_time: The point when the b2r2 HW queue is activated for this job
+ * @nsec_active_in_hw: Time spent on the b2r2 HW queue for this job
+ *
+ * @end_sentinel: Memory overwrite guard
+ */
+struct b2r2_core_job {
+ u32 start_sentinel;
+
+ /* Data to be filled in by client */
+ int tag;
+ int prio;
+ u32 first_node_address;
+ u32 last_node_address;
+ void (*callback)(struct b2r2_core_job *);
+ int (*acquire_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release)(struct b2r2_core_job *);
+
+ /* Output data, do not modify */
+ int job_id;
+ enum b2r2_core_job_state job_state;
+ unsigned long jiffies;
+
+ /* Data below is internal to b2r2_core, do not modify */
+
+ /* Reference counting */
+ u32 ref_count;
+
+ /* Internal data */
+ struct list_head list;
+ wait_queue_head_t event;
+ struct work_struct work;
+
+ /* B2R2 HW data */
+ enum b2r2_core_queue queue;
+ u32 control;
+ u32 pace_control;
+ u32 interrupt_context;
+
+ /* Timing data */
+ u32 hw_start_time;
+ s32 nsec_active_in_hw;
+
+ u32 end_sentinel;
+};
+
+/**
+ * struct b2r2_blt_request - Represents one B2R2 blit request
+ *
+ * @instance: Back pointer to the instance structure
+ * @list: List item to keep track of requests per instance
+ * @user_req: The request received from userspace
+ * @job: The administration structure for the B2R2 job,
+ * consisting of one or more nodes
+ * @node_split_job: The administration structure for the B2R2 node split job
+ * @first_node: Pointer to the first B2R2 node
+ * @request_id: Request id for this job
+ * @node_split_handle: Handle of the node split
+ * @src_resolved: Calculated info about the source buffer
+ * @src_mask_resolved: Calculated info about the source mask buffer
+ * @bg_resolved: Calculated info about the background buffer
+ * @dst_resolved: Calculated info about the destination buffer
+ * @profile: True if the blit shall be profiled, false otherwise
+ */
+struct b2r2_blt_request {
+ struct b2r2_blt_instance *instance;
+ struct list_head list;
+ struct b2r2_blt_req user_req;
+ struct b2r2_core_job job;
+ struct b2r2_node_split_job node_split_job;
+ struct b2r2_node *first_node;
+ int request_id;
+
+ /* Resolved buffer addresses */
+ struct b2r2_resolved_buf src_resolved;
+ struct b2r2_resolved_buf src_mask_resolved;
+ struct b2r2_resolved_buf bg_resolved;
+ struct b2r2_resolved_buf dst_resolved;
+
+ /* TBD: Info about SRAM usage & needs */
+ struct b2r2_work_buf *bufs;
+ u32 buf_count;
+
+ /* color look-up table */
+ void *clut;
+ u32 clut_phys_addr;
+
+ /* Profiling stuff */
+ bool profile;
+
+ s32 nsec_active_in_cpu;
+
+ u32 start_time_nsec;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_mem_heap - The memory heap
+ *
+ * @start_phys_addr: Physical memory start address
+ * @start_virt_ptr: Virtual pointer to start
+ * @size: Memory size
+ * @align: Alignment
+ * @blocks: List of all blocks
+ * @heap_lock: Protection for the heap
+ * @node_size: Size of each B2R2 node
+ * @node_heap: Heap for B2R2 node allocations
+ * @debugfs_root_dir: Debugfs B2R2 mem root dir
+ * @debugfs_heap_stats: Debugfs B2R2 memory status
+ * @debugfs_dir_blocks: Debugfs B2R2 free blocks dir
+ */
+struct b2r2_mem_heap {
+ dma_addr_t start_phys_addr;
+ void *start_virt_ptr;
+ u32 size;
+ u32 align;
+ struct list_head blocks;
+ spinlock_t heap_lock;
+ u32 node_size;
+ struct dma_pool *node_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_heap_stats;
+ struct dentry *debugfs_dir_blocks;
+#endif
+};
+
+/**
+ *
+ * @miscdev: The miscdev presenting b2r2 to the system
+ * @dev: The device handle of the b2r2 instance
+ * @id: The id of the b2r2 instance
+ * @name: The name of the b2r2 instance
+ * @data: Used to store a reference to b2r2_core
+ * @tmp_bufs: Temporary buffers needed in the node splitter
+ * @filters_initialized: Indicating of filters has been
+ * initialized for this b2r2 instance
+ * @mem_heap: The b2r2 heap, e.g. used to allocate nodes
+ * @debugfs_latest_request: Copy of the latest request issued
+ * @debugfs_root_dir: The debugfs root directory, e.g. /debugfs/b2r2
+ * @debugfs_debug_root_dir: The b2r2 debug root directory,
+ * e.g. /debugfs/b2r2/debug
+ * @stat_lock: Spin lock protecting the statistics
+ * @stat_n_jobs_added: Number of jobs added to b2r2_core
+ * @stat_n_jobs_released: Number of jobs released (job_release called)
+ * @stat_n_jobs_in_report_list: Number of jobs currently in the report list
+ * @stat_n_in_blt: Number of client threads currently exec inside b2r2_blt()
+ * @stat_n_in_blt_synch: Number of client threads currently waiting for synch
+ * @stat_n_in_blt_add: Number of client threads currenlty adding in b2r2_blt
+ * @stat_n_in_blt_wait: Number of client threads currently waiting in b2r2_blt
+ * @stat_n_in_synch_0: Number of client threads currently in b2r2_blt_sync
+ * waiting for all client jobs to finish
+ * @stat_n_in_synch_job: Number of client threads currently in b2r2_blt_sync
+ * waiting specific job to finish
+ * @stat_n_in_query_cap: Number of clients currently in query cap
+ * @stat_n_in_open: Number of clients currently in b2r2_blt_open
+ * @stat_n_in_release: Number of clients currently in b2r2_blt_release
+ * @last_job_lock: Mutex protecting last_job
+ * @last_job: The last running job on this b2r2 instance
+ * @last_job_chars: Temporary buffer used in printing last_job
+ * @prev_node_count: Node cound of last_job
+ */
+struct b2r2_control {
+ struct miscdevice miscdev;
+ struct device *dev;
+ int id;
+ char name[16];
+ void *data;
+ struct tmp_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+ int filters_initialized;
+ struct b2r2_mem_heap mem_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct b2r2_blt_request debugfs_latest_request;
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_debug_root_dir;
+#endif
+ struct mutex stat_lock;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_released;
+ unsigned long stat_n_jobs_in_report_list;
+ unsigned long stat_n_in_blt;
+ unsigned long stat_n_in_blt_synch;
+ unsigned long stat_n_in_blt_add;
+ unsigned long stat_n_in_blt_wait;
+ unsigned long stat_n_in_synch_0;
+ unsigned long stat_n_in_synch_job;
+ unsigned long stat_n_in_query_cap;
+ unsigned long stat_n_in_open;
+ unsigned long stat_n_in_release;
+ struct mutex last_job_lock;
+ struct b2r2_node *last_job;
+ char *last_job_chars;
+ int prev_node_count;
+};
+
+/* FIXME: The functions below should be removed when we are
+ switching to the new Robert Lind allocator */
+
+/**
+ * b2r2_blt_alloc_nodes() - Allocate nodes
+ *
+ * @node_count: Number of nodes to allocate
+ *
+ * Return:
+ * Returns a pointer to the first node in the node list.
+ */
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int node_count);
+
+/**
+ * b2r2_blt_free_nodes() - Release nodes previously allocated via
+ * b2r2_generate_nodes
+ *
+ * @first_node: First node in linked list of nodes
+ */
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node);
+
+/**
+ * b2r2_blt_module_init() - Initialize the B2R2 blt module
+ */
+int b2r2_blt_module_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_blt_module_exit() - Un-initialize the B2R2 blt module
+ */
+void b2r2_blt_module_exit(struct b2r2_control *cont);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_kernel_if.c b/drivers/video/b2r2/b2r2_kernel_if.c
new file mode 100644
index 00000000000..373311ccca5
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_kernel_if.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 kernel interface for beeing a separate module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(fput_light);
+EXPORT_SYMBOL(flush_cache_range);
+EXPORT_SYMBOL(task_sched_runtime);
+#ifdef CONFIG_ANDROID_PMEM
+EXPORT_SYMBOL(get_pmem_file);
+EXPORT_SYMBOL(put_pmem_file);
+EXPORT_SYMBOL(flush_pmem_file);
+#endif
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.c b/drivers/video/b2r2/b2r2_mem_alloc.c
new file mode 100644
index 00000000000..e5235d2c97f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_mem_alloc.h"
+
+/* Forward declarations */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ struct b2r2_control *cont, u32 offset, u32 size, bool free);
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block);
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mem_heap,
+ struct b2r2_mem_heap_status *mem_heap_status);
+
+/* Align value down to specified alignment */
+static inline u32 align_down(u32 align, u32 value)
+{
+ return value & ~(align - 1);
+}
+
+/* Align value up to specified alignment */
+static inline u32 align_up(u32 align, u32 value)
+{
+ return (value + align - 1) & ~(align - 1);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/* About debugfs:
+ * debugfs is a mountable debug file system.
+ *
+ * Mount like this:
+ * mkdir /debug
+ * mount -t debugfs none /debug
+ * ls /debug/b2r2/mem
+ *
+ * ls -al /debug/b2r2/mem/blocks
+ * cat /debug/b2r2/mem/stats
+ */
+
+
+/* Create string containing memory heap status */
+static char *get_b2r2_mem_stats(struct b2r2_mem_heap *mem_heap, char *buf)
+{
+ struct b2r2_mem_heap_status mem_heap_status;
+
+ if (b2r2_mem_heap_status(mem_heap, &mem_heap_status) != 0) {
+ strcpy(buf, "Error, failed to get status\n");
+ return buf;
+ }
+
+ sprintf(buf,
+ "Handle : 0x%lX\n"
+ "Physical start address : 0x%lX\n"
+ "Size : %lu\n"
+ "Align : %lu\n"
+ "No of blocks allocated : %lu\n"
+ "Allocated size : %lu\n"
+ "No of free blocks : %lu\n"
+ "Free size : %lu\n"
+ "No of locks : %lu\n"
+ "No of locked : %lu\n"
+ "No of nodes : %lu\n",
+ (unsigned long) mem_heap,
+ (unsigned long) mem_heap_status.start_phys_addr,
+ (unsigned long) mem_heap_status.size,
+ (unsigned long) mem_heap_status.align,
+ (unsigned long) mem_heap_status.num_alloc,
+ (unsigned long) mem_heap_status.allocated_size,
+ (unsigned long) mem_heap_status.num_free,
+ (unsigned long) mem_heap_status.free_size,
+ (unsigned long) mem_heap_status.num_locks,
+ (unsigned long) mem_heap_status.num_locked,
+ (unsigned long) mem_heap_status.num_nodes);
+
+ return buf;
+}
+
+/*
+ * Print memory heap status on file
+ * (Use like "cat /debug/b2r2/mem/stats")
+ */
+static int debugfs_b2r2_mem_stats_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_heap *mem_heap = filp->f_dentry->d_inode->i_private;
+ char Buf[400];
+ size_t dev_size;
+ int ret = 0;
+
+ get_b2r2_mem_stats(mem_heap, Buf);
+ dev_size = strlen(Buf);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for the "stats" file */
+static const struct file_operations debugfs_b2r2_mem_stats_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_stats_read,
+};
+
+/* read function for file in the "blocks" sub directory */
+static int debugfs_b2r2_mem_block_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_block *mem_block = filp->f_dentry->d_inode->i_private;
+ char Buf[200];
+ size_t dev_size;
+ int ret = 0;
+
+ dev_size = sprintf(Buf, "offset: %08lX %s size: %8d "
+ "lock_count: %2d\n",
+ (unsigned long) mem_block->offset,
+ mem_block->free ? "free" : "allc",
+ mem_block->size,
+ mem_block->lock_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for files in the "blocks" directory */
+static const struct file_operations debugfs_b2r2_mem_block_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_block_read,
+};
+
+/*
+ * Create or update the debugfs directory entry for a file in the
+ * "blocks" directory (a memory allocation)
+ */
+void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block,
+ struct dentry *parent)
+{
+ struct timespec tm = current_kernel_time();
+ struct timespec atime = tm;
+ struct timespec mtime = tm;
+ struct timespec ctime = tm;
+
+ if (mem_block->debugfs_block) {
+ atime = mem_block->debugfs_block->d_inode->i_atime;
+ ctime = mem_block->debugfs_block->d_inode->i_ctime;
+ debugfs_remove(mem_block->debugfs_block);
+ }
+
+ /* Add the block in debugfs */
+ if (mem_block->free)
+ sprintf(mem_block->debugfs_fname, "%08lX free",
+ (unsigned long) mem_block->offset);
+ else {
+ sprintf(mem_block->debugfs_fname, "%08lX allc h:%08lX "
+ "lck:%d ",
+ (unsigned long) mem_block->offset,
+ (unsigned long) mem_block,
+ mem_block->lock_count);
+ }
+
+ mem_block->debugfs_block = debugfs_create_file(
+ mem_block->debugfs_fname,
+ 0444, parent, mem_block,
+ &debugfs_b2r2_mem_block_fops);
+ if (mem_block->debugfs_block) {
+ mem_block->debugfs_block->d_inode->i_size = mem_block->size;
+ mem_block->debugfs_block->d_inode->i_atime = atime;
+ mem_block->debugfs_block->d_inode->i_mtime = mtime;
+ mem_block->debugfs_block->d_inode->i_ctime = ctime;
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module initialization function */
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size)
+{
+ struct b2r2_mem_block *mem_block;
+ u32 aligned_size;
+
+ dev_info(cont->dev, "%s: Creating heap for size %d bytes\n",
+ __func__, (int) heap_size);
+
+ /* Align size */
+ aligned_size = align_down(align, heap_size);
+ if (aligned_size == 0)
+ return -EINVAL;
+
+ cont->mem_heap.start_virt_ptr = dma_alloc_coherent(cont->dev,
+ aligned_size, &(cont->mem_heap.start_phys_addr), GFP_KERNEL);
+ if (!cont->mem_heap.start_phys_addr || !cont->mem_heap.start_virt_ptr) {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the heap */
+ cont->mem_heap.size = aligned_size;
+ cont->mem_heap.align = align;
+
+ INIT_LIST_HEAD(&cont->mem_heap.blocks);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debugfs */
+ if (cont->mem_heap.debugfs_root_dir) {
+ cont->mem_heap.debugfs_heap_stats = debugfs_create_file(
+ "stats", 0444, cont->mem_heap.debugfs_root_dir,
+ &cont->mem_heap, &debugfs_b2r2_mem_stats_fops);
+ cont->mem_heap.debugfs_dir_blocks = debugfs_create_dir(
+ "blocks", cont->mem_heap.debugfs_root_dir);
+ }
+#endif
+
+ /* Create the first _free_ memory block */
+ mem_block = b2r2_mem_block_alloc(cont, 0, aligned_size, true);
+ if (!mem_block) {
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Add the free block to the blocks list */
+ list_add(&mem_block->list, &cont->mem_heap.blocks);
+
+ /* Allocate separate heap for B2R2 nodes */
+ cont->mem_heap.node_size = node_size;
+ cont->mem_heap.node_heap = dma_pool_create("b2r2_node_cache",
+ cont->dev, node_size, align, 4096);
+ if (!cont->mem_heap.node_heap) {
+ b2r2_mem_block_free(mem_block);
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_init);
+
+/* Module exit function */
+void b2r2_mem_exit(struct b2r2_control *cont)
+{
+ struct list_head *ptr;
+
+ /* Free B2R2 node heap */
+ dma_pool_destroy(cont->mem_heap.node_heap);
+
+ list_for_each(ptr, &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ b2r2_mem_block_free(mem_block);
+ }
+
+ dma_free_coherent(cont->dev, cont->mem_heap.size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+}
+EXPORT_SYMBOL(b2r2_mem_exit);
+
+/* Return status of the heap */
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mheap,
+ struct b2r2_mem_heap_status *mem_heap_status)
+{
+ struct list_head *ptr;
+
+ if (!mheap || !mem_heap_status)
+ return -EINVAL;
+ memset(mem_heap_status, 0, sizeof(*mem_heap_status));
+
+ /* Lock the heap */
+ spin_lock(&mheap->heap_lock);
+
+ /* Fill in static info */
+ mem_heap_status->start_phys_addr = mheap->start_phys_addr;
+ mem_heap_status->size = mheap->size;
+ mem_heap_status->align = mheap->align;
+
+ list_for_each(ptr, &mheap->blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free) {
+ mem_heap_status->num_free++;
+ mem_heap_status->free_size += mem_block->size;
+ } else {
+ if (mem_block->lock_count) {
+ mem_heap_status->num_locked++;
+ mem_heap_status->num_locks +=
+ mem_block->lock_count;
+ }
+ mem_heap_status->num_alloc++;
+ mem_heap_status->allocated_size += mem_block->size;
+ }
+ }
+
+ spin_unlock(&mheap->heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_heap_status);
+
+/* Internal: Allocate a housekeeping structure
+ * for an allocated or free memory block
+ */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ struct b2r2_control *cont, u32 offset, u32 size, bool free)
+{
+ struct b2r2_mem_block *mem_block = kmalloc(
+ sizeof(struct b2r2_mem_block), GFP_KERNEL);
+
+ if (mem_block) {
+ mem_block->offset = offset;
+ mem_block->size = size;
+ mem_block->free = free;
+ mem_block->lock_count = 0;
+
+ INIT_LIST_HEAD(&mem_block->list);
+
+#ifdef CONFIG_DEBUG_FS
+ mem_block->debugfs_block = NULL;
+ /* Add the block in debugfs */
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ return mem_block;
+}
+
+/* Internal: Release housekeeping structure */
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block)
+{
+ if (mem_block) {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(mem_block->debugfs_block);
+#endif
+ kfree(mem_block);
+ }
+}
+
+/* Allocate a block from the heap */
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle)
+{
+ int ret = 0;
+ struct list_head *ptr;
+ struct b2r2_mem_block *found_mem_block = NULL;
+ u32 aligned_size;
+
+ if (!mem_handle)
+ return -EINVAL;
+
+ printk(KERN_INFO "%s: size=%d\n", __func__, requested_size);
+
+ *mem_handle = 0;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ aligned_size = align_up(cont->mem_heap.align, requested_size);
+ /* Try to find the best matching free block of suitable size */
+ list_for_each(ptr, &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free && mem_block->size >= aligned_size &&
+ (!found_mem_block ||
+ mem_block->size < found_mem_block->size)) {
+ found_mem_block = mem_block;
+ if (found_mem_block->size == aligned_size)
+ break;
+ }
+ }
+
+ if (found_mem_block) {
+ struct b2r2_mem_block *new_block
+ = b2r2_mem_block_alloc(cont,
+ found_mem_block->offset,
+ requested_size, false);
+
+ if (new_block) {
+ /* Insert the new block before the found block */
+ list_add_tail(&new_block->list,
+ &found_mem_block->list);
+
+ /* Split the free block */
+ found_mem_block->offset += aligned_size;
+ found_mem_block->size -= aligned_size;
+
+ if (found_mem_block->size == 0)
+ b2r2_mem_block_free(found_mem_block);
+ else {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(
+ found_mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ *mem_handle = (u32) new_block;
+ *returned_size = aligned_size;
+ } else {
+ ret = -ENOMEM;
+ }
+ } else
+ ret = -ENOMEM;
+
+ if (ret != 0) {
+ *returned_size = 0;
+ *mem_handle = (u32) 0;
+ }
+
+ /* Unlock */
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_alloc);
+
+/* Free the allocated block */
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle)
+{
+ int ret = 0;
+ struct b2r2_mem_block *mem_block = (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ if (!ret && mem_block->free)
+ ret = -EINVAL;
+
+ if (!ret) {
+ printk(KERN_INFO "%s: freeing block 0x%p\n", __func__, mem_block);
+ /* Release the block */
+
+ mem_block->free = true;
+ mem_block->size = align_up(cont->mem_heap.align,
+ mem_block->size);
+
+ /* Join with previous block if possible */
+ if (mem_block->list.prev != &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *prev_block =
+ list_entry(mem_block->list.prev,
+ struct b2r2_mem_block, list);
+
+ if (prev_block->free &&
+ (prev_block->offset + prev_block->size) ==
+ mem_block->offset) {
+ mem_block->offset = prev_block->offset;
+ mem_block->size += prev_block->size;
+
+ b2r2_mem_block_free(prev_block);
+ }
+ }
+
+ /* Join with next block if possible */
+ if (mem_block->list.next != &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *next_block
+ = list_entry(mem_block->list.next,
+ struct b2r2_mem_block,
+ list);
+
+ if (next_block->free &&
+ (mem_block->offset + mem_block->size) ==
+ next_block->offset) {
+ mem_block->size += next_block->size;
+
+ b2r2_mem_block_free(next_block);
+ }
+ }
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ /* Unlock */
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_free);
+
+/* Lock the allocated block in memory */
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ mem_block->lock_count++;
+
+ if (phys_addr)
+ *phys_addr = cont->mem_heap.start_phys_addr + mem_block->offset;
+ if (virt_ptr)
+ *virt_ptr = (char *) cont->mem_heap.start_virt_ptr +
+ mem_block->offset;
+ if (size)
+ *size = align_up(cont->mem_heap.align, mem_block->size);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_lock);
+
+/* Unlock the allocated block in memory */
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ mem_block->lock_count--;
+
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ /* debugfs will be updated in release */
+ return 0;
+/* return b2r2_mem_free(mem_handle);*/
+}
+EXPORT_SYMBOL(b2r2_mem_unlock);
+
+/* Allocate one or more b2r2 nodes from DMA pool */
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node)
+{
+ int i;
+ int ret = 0;
+ u32 physical_address;
+ struct b2r2_node *first_node_ptr;
+ struct b2r2_node *node_ptr;
+
+ /* Check input parameters */
+ if ((num_nodes <= 0) || !first_node) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Invalid parameter for b2r2_node_alloc, "
+ "num_nodes=%d, first_node=%ld\n",
+ (int) num_nodes, (long) first_node);
+ return -EINVAL;
+ }
+
+ /* Allocate the first node */
+ first_node_ptr = dma_pool_alloc(cont->mem_heap.node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (!first_node_ptr) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Failed to allocate memory for node\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize first node */
+ first_node_ptr->next = NULL;
+ first_node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+
+ /* Allocate and initialize remaining nodes, */
+ /* and link them into a list */
+ for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) {
+ node_ptr->next = dma_pool_alloc(cont->mem_heap.node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (node_ptr->next) {
+ node_ptr = node_ptr->next;
+ node_ptr->next = NULL;
+ node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+ } else {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /* If all nodes were allocated successfully, */
+ /* return the first node */
+ if (!ret)
+ *first_node = first_node_ptr;
+ else
+ b2r2_node_free(cont, first_node_ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_node_alloc);
+
+/* Free a linked list of b2r2 nodes */
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node)
+{
+ struct b2r2_node *current_node = first_node;
+ struct b2r2_node *next_node = NULL;
+
+ /* Traverse the linked list and free the nodes */
+ while (current_node != NULL) {
+ next_node = current_node->next;
+ dma_pool_free(cont->mem_heap.node_heap, current_node,
+ current_node->physical_address -
+ offsetof(struct b2r2_node, node));
+ current_node = next_node;
+ }
+}
+EXPORT_SYMBOL(b2r2_node_free);
+
+MODULE_AUTHOR("Robert Lind <robert.lind@ericsson.com");
+MODULE_DESCRIPTION("Ericsson AB B2R2 physical memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.h b/drivers/video/b2r2/b2r2_mem_alloc.h
new file mode 100644
index 00000000000..4fd1e66abca
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_MEM_ALLOC_H
+#define __B2R2_MEM_ALLOC_H
+
+#include "b2r2_internal.h"
+
+
+/**
+ * struct b2r2_mem_heap_status - Information about current state of the heap
+ *
+ * @start_phys_addr: Physical address of the the memory area
+ * @size: Size of the memory area
+ * @align: Alignment of start and allocation sizes (in bytes).
+ * @num_alloc: Number of memory allocations
+ * @allocated_size: Size allocated (sum of requested sizes)
+ * @num_free: Number of free blocks (fragments)
+ * @free_size: Free size available for allocation
+ * @num_locks: Sum of number of number of locks on memory allocations
+ * @num_locked: Number of locked memory allocations
+ * @num_nodes: Number of node allocations
+ *
+ **/
+struct b2r2_mem_heap_status {
+ u32 start_phys_addr;
+ u32 size;
+ u32 align;
+ u32 num_alloc;
+ u32 allocated_size;
+ u32 num_free;
+ u32 free_size;
+ u32 num_locks;
+ u32 num_locked;
+ u32 num_nodes;
+};
+
+/**
+ * struct b2r2_mem_block - Represents one block of b2r2
+ * physical memory, free or allocated
+ *
+ * @list: For membership in list
+ * @offset: Offset in b2r2 physical memory area (aligned)
+ * @size: Size of the object (requested size if busy, else actual)
+ * @free: True if the block is free
+ * @lock_count: Lock count
+ * @debugfs_fname: Debugfs file name
+ * @debugfs_block: Debugfs dir entry for the block
+ */
+struct b2r2_mem_block {
+ struct list_head list;
+ u32 offset;
+ u32 size;
+ bool free;
+ u32 lock_count;
+#ifdef CONFIG_DEBUG_FS
+ char debugfs_fname[80];
+ struct dentry *debugfs_block;
+#endif
+};
+
+
+/* B2R2 memory API (kernel) */
+
+/**
+ * b2r2_mem_init() - Initializes the B2R2 memory manager
+ * @dev: Pointer to device to use for allocating the memory heap
+ * @heap_size: Size of the heap (in bytes)
+ * @align: Alignment to use for memory allocations on heap (in bytes)
+ * @node_size: Size of each B2R2 node (in bytes)
+ *
+ * Returns 0 if success, else negative error code
+ **/
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size);
+
+/**
+ * b2r2_mem_exit() - Cleans up the B2R2 memory manager
+ *
+ **/
+void b2r2_mem_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_mem_alloc() - Allocates memory block from physical memory heap
+ * @requested_size: Requested size
+ * @returned_size: Actual size of memory block. Might be adjusted due to
+ * alignment but is always >= requested size if function
+ * succeeds
+ * @mem_handle: Returned memory handle
+ *
+ * All memory allocations are movable when not locked.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle);
+
+/**
+ * b2r2_mem_free() - Frees an allocation
+ * @mem_handle: Memory handle
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle);
+
+/**
+ * b2r2_mem_lock() - Lock memory in memory and return physical address
+ * @mem_handle: Memory handle
+ * @phys_addr: Returned physical address to start of memory allocation.
+ * May be NULL.
+ * @virt_ptr: Returned virtual address pointer to start of memory allocation.
+ * May be NULL.
+ * @size: Returned size of memory allocation. May be NULL.
+ *
+ * The adress of the memory allocation is locked and the physical address
+ * is returned.
+ * The lock count is incremented by one.
+ * You need to call b2r2_mem_unlock once for each call to
+ * b2r2_mem_lock.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size);
+
+/**
+ * b2r2_mem_unlock() - Unlock previously locked memory
+ * @mem_handle: Memory handle
+ *
+ * Decrements lock count. When lock count reaches 0 the
+ * memory area is movable again.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle);
+
+/**
+ * b2r2_node_alloc() - Allocates B2R2 node from physical memory heap
+ * @num_nodes: Number of linked nodes to allocate
+ * @first_node: Returned pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node);
+
+/**
+ * b2r2_node_free() - Frees a linked list of allocated B2R2 nodes
+ * @first_node: Pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node);
+
+
+#endif /* __B2R2_MEM_ALLOC_H */
diff --git a/drivers/video/b2r2/b2r2_node_gen.c b/drivers/video/b2r2/b2r2_node_gen.c
new file mode 100644
index 00000000000..1f48bac6fe7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_gen.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node generator
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include "b2r2_internal.h"
+
+static void free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ int no_of_nodes = 0;
+
+ while (node) {
+ no_of_nodes++;
+ node = node->next;
+ }
+
+ dma_free_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ first_node,
+ first_node->physical_address -
+ offsetof(struct b2r2_node, node));
+}
+
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int no_of_nodes)
+{
+ u32 physical_address;
+ struct b2r2_node *nodes;
+ struct b2r2_node *tmpnode;
+
+ if (no_of_nodes <= 0) {
+ dev_err(cont->dev, "%s: Wrong number of nodes (%d)",
+ __func__, no_of_nodes);
+ return NULL;
+ }
+
+ /* Allocate the memory */
+ nodes = (struct b2r2_node *) dma_alloc_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ &physical_address, GFP_DMA | GFP_KERNEL);
+
+ if (nodes == NULL) {
+ dev_err(cont->dev,
+ "%s: Failed to alloc memory for nodes",
+ __func__);
+ return NULL;
+ }
+
+ /* Build the linked list */
+ tmpnode = nodes;
+ physical_address += offsetof(struct b2r2_node, node);
+ while (no_of_nodes--) {
+ tmpnode->physical_address = physical_address;
+ if (no_of_nodes)
+ tmpnode->next = tmpnode + 1;
+ else
+ tmpnode->next = NULL;
+
+ tmpnode++;
+ physical_address += sizeof(struct b2r2_node);
+ }
+
+ return nodes;
+}
+
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ free_nodes(cont, first_node);
+}
+
diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c
new file mode 100644
index 00000000000..6587ef0c343
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.c
@@ -0,0 +1,3734 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include "b2r2_node_split.h"
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+#include "b2r2_filters.h"
+#include "b2r2_utils.h"
+
+#include <linux/kernel.h>
+
+/*
+ * Macros and constants
+ */
+#define ABS(x) ((x) < 0 ? -(x) : (x))
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+#define INSTANCES_DEFAULT_SIZE 10
+#define INSTANCES_GROW_SIZE 5
+
+/*
+ * Internal types
+ */
+
+
+/*
+ * Global variables
+ */
+
+/**
+ * VMX values for different color space conversions
+ */
+static const u32 vmx_rgb_to_yuv[] = {
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_blt_yuv888[] = {
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_rgb[] = {
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_blt_yuv888_to_rgb[] = {
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_blt_yuv888[] = {
+ B2R2_VMX0_YUV_TO_BLT_YUV888,
+ B2R2_VMX1_YUV_TO_BLT_YUV888,
+ B2R2_VMX2_YUV_TO_BLT_YUV888,
+ B2R2_VMX3_YUV_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yuv[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YUV,
+ B2R2_VMX1_BLT_YUV888_TO_YUV,
+ B2R2_VMX2_BLT_YUV888_TO_YUV,
+ B2R2_VMX3_BLT_YUV888_TO_YUV,
+};
+
+static const u32 vmx_yvu_to_blt_yuv888[] = {
+ B2R2_VMX0_YVU_TO_BLT_YUV888,
+ B2R2_VMX1_YVU_TO_BLT_YUV888,
+ B2R2_VMX2_YVU_TO_BLT_YUV888,
+ B2R2_VMX3_YVU_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yvu[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YVU,
+ B2R2_VMX1_BLT_YUV888_TO_YVU,
+ B2R2_VMX2_BLT_YUV888_TO_YVU,
+ B2R2_VMX3_BLT_YUV888_TO_YVU,
+};
+
+static const u32 vmx_yvu_to_rgb[] = {
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_yvu[] = {
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_bgr[] = {
+ B2R2_VMX0_RGB_TO_BGR,
+ B2R2_VMX1_RGB_TO_BGR,
+ B2R2_VMX2_RGB_TO_BGR,
+ B2R2_VMX3_RGB_TO_BGR,
+};
+
+static const u32 vmx_bgr_to_yuv[] = {
+ B2R2_VMX0_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX1_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX2_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX3_BGR_TO_YUV_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_bgr[] = {
+ B2R2_VMX0_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX1_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX2_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX3_YUV_TO_BGR_601_VIDEO,
+};
+
+static const u32 vmx_bgr_to_yvu[] = {
+ B2R2_VMX0_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX1_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX2_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX3_BGR_TO_YVU_601_VIDEO,
+};
+
+static const u32 vmx_yvu_to_bgr[] = {
+ B2R2_VMX0_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX1_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX2_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX3_YVU_TO_BGR_601_VIDEO,
+};
+
+static const u32 vmx_yvu_to_yuv[] = {
+ B2R2_VMX0_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX1_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX2_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX3_YVU_TO_YUV_601_VIDEO,
+};
+
+/*
+ * Forward declaration of private functions
+ */
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count);
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count);
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this);
+
+static void configure_src(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx);
+static void configure_bg(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *bg, bool swap_fg_bg);
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_blend(struct b2r2_control *cont, struct b2r2_node *node,
+ u32 flags, u32 global_alpha);
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect);
+
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+static void configure_direct_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next);
+static int configure_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color, enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_direct_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next);
+static int configure_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rotate(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_scale(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+
+static void recalculate_rects(struct b2r2_control *cont,
+ struct b2r2_blt_req *req);
+
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip);
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr, const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect, bool color_fill, u32 color);
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *this, u32 max_size,
+ enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height);
+
+static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt);
+static u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color);
+static u8 get_alpha(enum b2r2_blt_fmt fmt, u32 pixel);
+static bool fmt_has_alpha(enum b2r2_blt_fmt fmt);
+
+static bool is_rgb_fmt(enum b2r2_blt_fmt fmt);
+static bool is_bgr_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv420_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv422_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv444_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu420_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu422_fmt(enum b2r2_blt_fmt fmt);
+
+static int fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width);
+static enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt);
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt);
+static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt);
+
+static bool is_transform(const struct b2r2_blt_request *req);
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf);
+static s32 inv_rescale(s32 dim, u16 sf);
+
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values);
+
+static void reset_nodes(struct b2r2_node *node);
+
+static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
+ enum b2r2_blt_fmt dst_fmt);
+
+/*
+ * Public functions
+ */
+
+/**
+ * b2r2_node_split_analyze() - analyzes the request
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
+ u32 max_buf_size, u32 *node_count, struct b2r2_work_buf **bufs,
+ u32 *buf_count, struct b2r2_node_split_job *this)
+{
+ int ret;
+ bool color_fill;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ memset(this, 0, sizeof(*this));
+
+ /* Copy parameters */
+ this->flags = req->user_req.flags;
+ this->transform = req->user_req.transform;
+ this->max_buf_size = max_buf_size;
+ this->global_alpha = req->user_req.global_alpha;
+ this->buf_count = 0;
+ this->node_count = 0;
+
+ if (this->flags & B2R2_BLT_FLAG_BLUR) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Unsupported formats on src */
+ switch (req->user_req.src_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (is_bgr_fmt(req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on dst */
+ switch (req->user_req.dst_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (is_bgr_fmt(req->user_req.src_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on bg */
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ /*
+ * There are no ivmx on source 1, so check that there is no
+ * such requirement on the background to destination format
+ * conversion. This check is sufficient since the node splitter
+ * currently does not support destination ivmx. That fact also
+ * removes the source format as a parameter when checking the
+ * background format.
+ */
+ if (bg_format_require_ivmx(req->user_req.bg_img.fmt,
+ req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) &&
+ (is_yuv_fmt(req->user_req.src_img.fmt) ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_8_BIT_A8)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source color keying "
+ "with YUV or pure alpha formats.\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if (this->flags & (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for color fill */
+ color_fill = (this->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ /*
+ * B2R2 cannot handle destination clipping on buffers
+ * allocated close to 64MiB bank boundaries.
+ * recalculate src_ and dst_rect to avoid clipping.
+ */
+ recalculate_rects(cont, (struct b2r2_blt_req *) &req->user_req);
+
+ /* Configure the source and destination buffers */
+ set_buf(cont, &this->src, req->src_resolved.physical_address,
+ &req->user_req.src_img, &req->user_req.src_rect,
+ color_fill, req->user_req.src_color);
+
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ set_buf(cont, &this->bg, req->bg_resolved.physical_address,
+ &req->user_req.bg_img, &req->user_req.bg_rect,
+ false, 0);
+ }
+
+ set_buf(cont, &this->dst, req->dst_resolved.physical_address,
+ &req->user_req.dst_img, &req->user_req.dst_rect, false,
+ 0);
+
+ b2r2_log_info(cont->dev, "%s:\n"
+ "\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t"
+ "bg.rect=(%4d, %4d, %4d, %4d)\t"
+ "dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x,
+ this->src.rect.y, this->src.rect.width, this->src.rect.height,
+ this->bg.rect.x, this->bg.rect.y, this->bg.rect.width,
+ this->bg.rect.height, this->dst.rect.x, this->dst.rect.y,
+ this->dst.rect.width, this->dst.rect.height);
+
+ if (this->flags & B2R2_BLT_FLAG_DITHER)
+ this->dst.dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ this->flag_param = req->user_req.src_color;
+
+ /* Check for blending */
+ if ((this->flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) &&
+ (this->global_alpha != 255))
+ this->blend = true;
+ else if (this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ this->blend = (color_fill && fmt_has_alpha(this->dst.fmt)) ||
+ fmt_has_alpha(this->src.fmt);
+ else if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ this->blend = true;
+
+ if (this->blend && this->src.type == B2R2_FMT_TYPE_PLANAR) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: blend with planar"
+ " source\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check for clipping */
+ this->clip = (this->flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0;
+ if (this->clip) {
+ s32 l = req->user_req.dst_clip_rect.x;
+ s32 r = l + req->user_req.dst_clip_rect.width;
+ s32 t = req->user_req.dst_clip_rect.y;
+ s32 b = t + req->user_req.dst_clip_rect.height;
+
+ /* Intersect the clip and buffer rects */
+ if (l < 0)
+ l = 0;
+ if (r > req->user_req.dst_img.width)
+ r = req->user_req.dst_img.width;
+ if (t < 0)
+ t = 0;
+ if (b > req->user_req.dst_img.height)
+ b = req->user_req.dst_img.height;
+
+ this->clip_rect.x = l;
+ this->clip_rect.y = t;
+ this->clip_rect.width = r - l;
+ this->clip_rect.height = b - t;
+ } else {
+ /* Set the clip rectangle to the buffer bounds */
+ this->clip_rect.x = 0;
+ this->clip_rect.y = 0;
+ this->clip_rect.width = req->user_req.dst_img.width;
+ this->clip_rect.height = req->user_req.dst_img.height;
+ }
+
+ /* Validate the destination */
+ ret = check_rect(cont, &req->user_req.dst_img, &req->user_req.dst_rect,
+ &this->clip_rect);
+ if (ret < 0)
+ goto error;
+
+ /* Validate the source (if not color fill) */
+ if (!color_fill) {
+ ret = check_rect(cont, &req->user_req.src_img,
+ &req->user_req.src_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Validate the background source */
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ ret = check_rect(cont, &req->user_req.bg_img,
+ &req->user_req.bg_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Do the analysis depending on the type of operation */
+ if (color_fill) {
+ ret = analyze_color_fill(this, req, &this->node_count);
+ } else {
+
+ bool upsample;
+ bool downsample;
+
+ /*
+ * YUV formats that are non-raster, non-yuv444 needs to be
+ * up (or down) sampled using the resizer.
+ *
+ * NOTE: The resizer needs to be enabled for YUV444 as well,
+ * even though there is no upsampling. This is most
+ * likely a bug in the hardware.
+ */
+ upsample = this->src.type != B2R2_FMT_TYPE_RASTER &&
+ is_yuv_fmt(this->src.fmt);
+ downsample = this->dst.type != B2R2_FMT_TYPE_RASTER &&
+ is_yuv_fmt(this->dst.fmt);
+
+ if (is_transform(req) || upsample || downsample)
+ ret = analyze_transform(this, req, &this->node_count,
+ &this->buf_count);
+ else
+ ret = analyze_copy(this, req, &this->node_count,
+ &this->buf_count);
+ }
+
+ if (ret == -ENOSYS) {
+ goto unsupported;
+ } else if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Analysis failed!\n", __func__);
+ goto error;
+ }
+
+ /* Setup the origin and movement of the destination window */
+ if (this->dst.hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ this->dst.dx = -this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x + this->dst.rect.width - 1;
+ } else {
+ this->dst.dx = this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x;
+ }
+ if (this->dst.vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ this->dst.dy = -this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y + this->dst.rect.height - 1;
+ } else {
+ this->dst.dy = this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y;
+ }
+
+ *buf_count = this->buf_count;
+ *node_count = this->node_count;
+
+ if (this->buf_count > 0)
+ *bufs = &this->work_bufs[0];
+
+ b2r2_log_info(cont->dev, "%s: dst.win=(%d, %d, %d, %d), "
+ "dst.dx=%d, dst.dy=%d\n", __func__, this->dst.win.x,
+ this->dst.win.y, this->dst.win.width, this->dst.win.height,
+ this->dst.dx, this->dst.dy);
+ if (this->buf_count > 0)
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, buf_size=%d, "
+ "node_count=%d\n", __func__, *buf_count,
+ bufs[0]->size, *node_count);
+ else
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, node_count=%d\n",
+ __func__, *buf_count, *node_count);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * b2r2_node_split_configure() - configures the node list
+ */
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node *node = first;
+
+ u32 x_pixels = 0;
+ u32 y_pixels = 0;
+
+ reset_nodes(node);
+
+ while (y_pixels < dst->rect.height) {
+ s32 dst_x = dst->win.x;
+ s32 dst_w = dst->win.width;
+
+ /* Clamp window height */
+ if (dst->win.height > dst->rect.height - y_pixels)
+ dst->win.height = dst->rect.height - y_pixels;
+
+ while (x_pixels < dst->rect.width) {
+
+ /* Clamp window width */
+ if (dst_w > dst->rect.width - x_pixels)
+ dst->win.width = dst->rect.width - x_pixels;
+
+ ret = configure_tile(cont, this, node, &node);
+ if (ret < 0)
+ goto error;
+
+ dst->win.x += dst->dx;
+ x_pixels += max(dst->dx, -dst->dx);
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
+ }
+
+ dst->win.y += dst->dy;
+ y_pixels += max(dst->dy, -dst->dy);
+
+ dst->win.x = dst_x;
+ dst->win.width = dst_w;
+ x_pixels = 0;
+
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * b2r2_node_split_assign_buffers() - assigns temporary buffers to the node list
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first,
+ struct b2r2_work_buf *bufs, u32 buf_count)
+{
+ struct b2r2_node *node = first;
+
+ while (node != NULL) {
+ /* The indices are offset by one */
+ if (node->dst_tmp_index) {
+ BUG_ON(node->dst_tmp_index > buf_count);
+
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as "
+ "dst\n", __func__, node->dst_tmp_index);
+
+ node->node.GROUP1.B2R2_TBA =
+ bufs[node->dst_tmp_index - 1].phys_addr;
+ }
+ if (node->src_tmp_index) {
+ u32 addr = bufs[node->src_tmp_index - 1].phys_addr;
+
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as src "
+ "%d ", __func__, node->src_tmp_index,
+ node->src_index);
+
+ BUG_ON(node->src_tmp_index > buf_count);
+
+ switch (node->src_index) {
+ case 1:
+ b2r2_log_info(cont->dev, "1\n");
+ node->node.GROUP3.B2R2_SBA = addr;
+ break;
+ case 2:
+ b2r2_log_info(cont->dev, "2\n");
+ node->node.GROUP4.B2R2_SBA = addr;
+ break;
+ case 3:
+ b2r2_log_info(cont->dev, "3\n");
+ node->node.GROUP5.B2R2_SBA = addr;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s: tba=%p\tsba=%p\n", __func__,
+ (void *)node->node.GROUP1.B2R2_TBA,
+ (void *)node->node.GROUP4.B2R2_SBA);
+
+ node = node->next;
+ }
+
+ return 0;
+}
+
+/**
+ * b2r2_node_split_unassign_buffers() - releases temporary buffers
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
+{
+ return;
+}
+
+/**
+ * b2r2_node_split_cancel() - cancels and releases a job instance
+ */
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
+{
+ memset(this, 0, sizeof(*this));
+
+ return;
+}
+
+/*
+ * Private functions
+ */
+
+static void recalculate_rects(struct b2r2_control *cont,
+ struct b2r2_blt_req *req)
+{
+ struct b2r2_blt_rect new_dst_rect;
+ struct b2r2_blt_rect new_src_rect;
+ struct b2r2_blt_rect new_bg_rect;
+
+ b2r2_trim_rects(cont,
+ req, &new_bg_rect, &new_dst_rect, &new_src_rect);
+
+ req->dst_rect = new_dst_rect;
+ req->src_rect = new_src_rect;
+ req->bg_rect = new_bg_rect;
+}
+
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip)
+{
+ int ret;
+
+ s32 l, r, b, t;
+
+ /* Check rectangle dimensions*/
+ if ((rect->width <= 0) || (rect->height <= 0)) {
+ b2r2_log_warn(cont->dev, "%s: Illegal rect (%d, %d, %d, %d)\n",
+ __func__, rect->x, rect->y, rect->width,
+ rect->height);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* If we are using clip we should only look at the intersection of the
+ rects */
+ if (clip) {
+ l = MAX(rect->x, clip->x);
+ t = MAX(rect->y, clip->y);
+ r = MIN(rect->x + rect->width, clip->x + clip->width);
+ b = MIN(rect->y + rect->height, clip->y + clip->height);
+ } else {
+ l = rect->x;
+ t = rect->y;
+ r = rect->x + rect->width;
+ b = rect->y + rect->height;
+ }
+
+ /* Check so that the rect isn't outside the buffer */
+ if ((l < 0) || (t < 0) || (l >= img->width) || (t >= img->height)) {
+ b2r2_log_warn(cont->dev, "%s: rect origin outside buffer\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((r > img->width) || (b > img->height)) {
+ b2r2_log_warn(cont->dev, "%s: rect ends outside buffer\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Check so the intersected rectangle isn't empty */
+ if ((l == r) || (t == b)) {
+ b2r2_log_warn(cont->dev,
+ "%s: rect is empty (width or height zero)\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * bg_format_require_ivmx()
+ *
+ * Check if there are any color space conversion needed for the
+ * background to the destination format.
+ */
+static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
+ enum b2r2_blt_fmt dst_fmt)
+{
+ if (is_rgb_fmt(bg_fmt)) {
+ if (is_yvu_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yuv_fmt(dst_fmt))
+ return true;
+ else if (is_bgr_fmt(dst_fmt))
+ return true;
+ } else if (is_yvu_fmt(bg_fmt)) {
+ if (is_rgb_fmt(dst_fmt))
+ return true;
+ else if (is_bgr_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yuv_fmt(dst_fmt) &&
+ !is_yvu_fmt(dst_fmt))
+ return true;
+ } else if (bg_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ bg_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ bg_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ bg_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ if (is_rgb_fmt(dst_fmt)) {
+ return true;
+ } else if (is_yvu_fmt(dst_fmt)) {
+ return true;
+ } else if (is_yuv_fmt(dst_fmt)) {
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ break;
+ default:
+ return true;
+ }
+ }
+ } else if (is_yuv_fmt(bg_fmt)) {
+ if (is_rgb_fmt(dst_fmt))
+ return true;
+ else if (is_bgr_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yvu_fmt(dst_fmt))
+ return true;
+ } else if (is_bgr_fmt(bg_fmt)) {
+ if (is_rgb_fmt(dst_fmt))
+ return true;
+ else if (is_yvu_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yuv_fmt(dst_fmt))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * analyze_fmt_conv() - analyze the format conversions needed for a job
+ */
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count)
+{
+ if (is_rgb_fmt(src->fmt)) {
+ if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_yvu[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ *vmx = &vmx_rgb_to_blt_yuv888[0];
+ else if (is_yuv_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_yuv[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_bgr[0];
+ } else if (is_yvu_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_rgb[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_bgr[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ *vmx = &vmx_yvu_to_blt_yuv888[0];
+ else if (is_yuv_fmt(dst->fmt) &&
+ !is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_yuv[0];
+ } else if (src->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ src->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ src->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ if (is_rgb_fmt(dst->fmt)) {
+ *vmx = &vmx_blt_yuv888_to_rgb[0];
+ } else if (is_yvu_fmt(dst->fmt)) {
+ *vmx = &vmx_blt_yuv888_to_yvu[0];
+ } else if (is_yuv_fmt(dst->fmt)) {
+ switch (dst->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* do nothing */
+ break;
+ default:
+ *vmx = &vmx_blt_yuv888_to_yuv[0];
+ break;
+ }
+ }
+ } else if (is_yuv_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_yuv_to_rgb[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_yuv_to_bgr[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ *vmx = &vmx_yuv_to_blt_yuv888[0];
+ else if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_yuv[0];
+ } else if (is_bgr_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_bgr[0];
+ else if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_bgr_to_yvu[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ BUG_ON(1);
+ else if (is_yuv_fmt(dst->fmt))
+ *vmx = &vmx_bgr_to_yuv[0];
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_RASTER) {
+ *node_count = 1;
+ } else if (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR) {
+ *node_count = 2;
+ } else if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ *node_count = 3;
+ } else {
+ /* That's strange... */
+ BUG_ON(1);
+ }
+
+ return 0;
+}
+
+/**
+ * analyze_color_fill() - analyze a color fill operation
+ */
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+
+ /* Destination must be raster for raw fill to work */
+ if (this->dst.type != B2R2_FMT_TYPE_RASTER) {
+ b2r2_log_warn(cont->dev,
+ "%s: fill requires raster destination\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* We will try to fill the entire rectangle in one go */
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /* Check if this is a direct fill */
+ if ((!this->blend) && ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ARGB8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_AYUV8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_VUYA8888))) {
+ this->type = B2R2_DIRECT_FILL;
+
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+
+ /* The entire destination rectangle will be */
+ memcpy(&this->dst.win, &this->dst.rect,
+ sizeof(this->dst.win));
+ *node_count = 1;
+ } else {
+ this->type = B2R2_FILL;
+
+ /* Determine the fill color format */
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) {
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+ } else {
+ /* If the dst fmt is YUV the fill fmt will be as well */
+ if (is_yuv_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else if (is_rgb_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (is_bgr_fmt(this->dst.fmt)) {
+ /* Color will still be ARGB, we will translate
+ using IVMX (configured later) */
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Illegal destination format for fill",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* Also, B2R2 seems to ignore the pixel alpha value */
+ if (((this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ != 0) &&
+ ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)
+ == 0) && fmt_has_alpha(this->src.fmt)) {
+ u8 pixel_alpha = get_alpha(this->src.fmt,
+ this->src.color);
+ u32 new_global = pixel_alpha * this->global_alpha / 255;
+
+ this->global_alpha = (u8)new_global;
+
+ /* Set the pixel alpha to full opaque so we don't get
+ any nasty surprises */
+ this->src.color = set_alpha(this->src.fmt, 0xFF,
+ this->src.color);
+ }
+
+ ret = analyze_fmt_conv(
+ cont, &this->src, &this->dst, &this->ivmx,
+ node_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_transform() - analyze a transform operation (rescale, rotate, etc.)
+ */
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ bool is_scaling;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * The transform enum is defined so that all rotation transforms are
+ * masked with the rotation flag
+ */
+ this->rotation = (this->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0;
+
+ /* B2R2 cannot do rotations if the destination is not raster, or 422R */
+ if (this->rotation && (this->dst.type != B2R2_FMT_TYPE_RASTER ||
+ this->dst.fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ this->dst.fmt == B2R2_BLT_FMT_CB_Y_CR_Y)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Unsupported operation "
+ "(rot && (!dst_raster || dst==422R))",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Flip the image by changing the scan order of the destination */
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ this->dst.hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Check for scaling */
+ if (this->rotation) {
+ is_scaling = (this->src.rect.width != this->dst.rect.height) ||
+ (this->src.rect.height != this->dst.rect.width);
+ } else {
+ is_scaling = (this->src.rect.width != this->dst.rect.width) ||
+ (this->src.rect.height != this->dst.rect.height);
+ }
+
+ /* Plane separated formats must be treated as scaling */
+ is_scaling = is_scaling ||
+ (this->src.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->src.type == B2R2_FMT_TYPE_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_PLANAR);
+
+ if (is_scaling && this->rotation && this->blend) {
+ /* TODO: This is unsupported. Fix it! */
+ b2r2_log_info(cont->dev, "%s: Unsupported operation "
+ "(rot+rescale+blend)\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check which type of transform */
+ if (is_scaling && this->rotation) {
+ ret = analyze_rot_scale(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (is_scaling) {
+ ret = analyze_scaling(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (this->rotation) {
+ ret = analyze_rotate(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else {
+ /* No additional nodes needed for a flip */
+ ret = analyze_copy(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ this->type = B2R2_FLIP;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * analyze_copy() - analyze a copy operation
+ */
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ if (!this->blend &&
+ !(this->flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ (this->src.fmt == this->dst.fmt) &&
+ (this->src.type == B2R2_FMT_TYPE_RASTER) &&
+ (this->dst.rect.x >= this->clip_rect.x) &&
+ (this->dst.rect.y >= this->clip_rect.y) &&
+ (this->dst.rect.x + this->dst.rect.width <=
+ this->clip_rect.x + this->clip_rect.width) &&
+ (this->dst.rect.y + this->dst.rect.height <=
+ this->clip_rect.y + this->clip_rect.height)) {
+ this->type = B2R2_DIRECT_COPY;
+ *node_count = 1;
+ } else {
+ u32 copy_count;
+
+ this->type = B2R2_COPY;
+
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst,
+ &this->ivmx, &copy_count);
+ if (ret < 0)
+ goto error;
+
+ *node_count = copy_count;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+static int calc_rot_count(u32 width, u32 height)
+{
+ int count;
+
+ count = width / B2R2_ROTATE_MAX_WIDTH;
+ if (width % B2R2_ROTATE_MAX_WIDTH)
+ count++;
+ if (height > B2R2_ROTATE_MAX_WIDTH &&
+ height % B2R2_ROTATE_MAX_WIDTH)
+ count *= 2;
+
+ return count;
+}
+
+static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ u32 num_rows;
+ u32 num_cols;
+ u32 rot_count;
+ u32 rescale_count;
+ u32 nodes_per_rot;
+ u32 nodes_per_rescale;
+ u32 right_width;
+ u32 bottom_height;
+ const u32 *dummy_vmx;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Calculate the desired tmp buffer size */
+ tmp->win.width = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ tmp->win.width >>= 10;
+ tmp->win.width = min(tmp->win.width, dst->rect.height);
+ tmp->win.height = dst->rect.width;
+
+ setup_tmp_buf(cont, tmp, this->max_buf_size, dst->fmt, tmp->win.width,
+ tmp->win.height);
+ tmp->tmp_buf_index = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ tmp->win.width = tmp->rect.width;
+ tmp->win.height = tmp->rect.height;
+
+ tmp->dither = dst->dither;
+ dst->dither = 0;
+
+ /* Update the dst window with the actual tmp buffer dimensions */
+ dst->win.width = tmp->win.height;
+ dst->win.height = tmp->win.width;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /*
+ * Calculate how many nodes are required to copy to and from the tmp
+ * buffer
+ */
+ ret = analyze_fmt_conv(cont, src, tmp, &this->ivmx, &nodes_per_rescale);
+ if (ret < 0)
+ goto error;
+
+ /* We will not do any format conversion in the rotation stage */
+ ret = analyze_fmt_conv(cont, tmp, dst, &dummy_vmx, &nodes_per_rot);
+ if (ret < 0)
+ goto error;
+
+ /* Calculate node count for the inner tiles */
+ num_cols = dst->rect.width / dst->win.width;
+ num_rows = dst->rect.height / dst->win.height;
+
+ rescale_count = num_cols * num_rows;
+ rot_count = calc_rot_count(dst->win.height, dst->win.width) *
+ num_cols * num_rows;
+
+ right_width = dst->rect.width % dst->win.width;
+ bottom_height = dst->rect.height % dst->win.height;
+
+ /* Calculate node count for the rightmost tiles */
+ if (right_width) {
+ u32 count = calc_rot_count(dst->win.height, right_width);
+
+ rot_count += count * num_rows;
+ rescale_count += num_rows;
+ b2r2_log_info(cont->dev, "%s: rightmost: %d nodes\n", __func__,
+ count*num_rows);
+ }
+
+ /* Calculate node count for the bottom tiles */
+ if (bottom_height) {
+ u32 count = calc_rot_count(bottom_height, dst->win.width);
+
+ rot_count += count * num_cols;
+ rescale_count += num_cols;
+ b2r2_log_info(cont->dev, "%s: bottom: %d nodes\n", __func__,
+ count * num_cols);
+
+ }
+
+ /* And finally for the bottom right corner */
+ if (right_width && bottom_height) {
+ u32 count = calc_rot_count(bottom_height, right_width);
+
+ rot_count += count;
+ rescale_count++;
+ b2r2_log_info(cont->dev, "%s: bottom right: %d nodes\n",
+ __func__, count);
+
+ }
+
+ *node_count = rot_count * nodes_per_rot;
+ *node_count += rescale_count * nodes_per_rescale;
+ *buf_count = 1;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+static int analyze_rot_scale_upscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ /* TODO: When upscaling we should optimally to the rotation first... */
+ return analyze_rot_scale_downscale(this, req, node_count, buf_count);
+}
+
+/**
+ * analyze_rot_scaling() - analyzes a combined rotation and scaling op
+ */
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ bool upscale;
+ struct b2r2_control *cont = req->instance->control;
+
+ ret = analyze_scale_factors(cont, this);
+ if (ret < 0)
+ goto error;
+
+ upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 20);
+
+ if (upscale)
+ ret = analyze_rot_scale_upscale(this, req, node_count,
+ buf_count);
+ else
+ ret = analyze_rot_scale_downscale(this, req, node_count,
+ buf_count);
+
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_SCALE_AND_ROTATE;
+
+ return 0;
+
+error:
+ return ret;
+}
+
+/**
+ * analyze_scaling() - analyze a rescale operation
+ */
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ u32 copy_count;
+ u32 nbr_cols;
+ s32 dst_w;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ ret = analyze_scale_factors(cont, this);
+ if (ret < 0)
+ goto error;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
+ &copy_count);
+ if (ret < 0)
+ goto error;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /*
+ * We need to subtract from the actual maximum rescale width since the
+ * start of the stripe will be floored and the end ceiled. This could in
+ * some cases cause the stripe to be one pixel more than the maximum
+ * width.
+ *
+ * Example:
+ * x = 127.8, w = 127.8
+ *
+ * The stripe will touch pixels 127.8 through 255.6, i.e. 129 pixels.
+ */
+ dst_w = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ if (dst_w < (1 << 10))
+ dst_w = 1;
+ else
+ dst_w >>= 10;
+
+ b2r2_log_info(cont->dev, "%s: dst_w=%d dst.rect.width=%d\n",
+ __func__, dst_w, this->dst.rect.width);
+
+ this->dst.win.width = min(dst_w, this->dst.rect.width);
+
+ b2r2_log_info(cont->dev, "%s: dst.win.width=%d\n",
+ __func__, this->dst.win.width);
+
+ nbr_cols = this->dst.rect.width / this->dst.win.width;
+ if (this->dst.rect.width % this->dst.win.width)
+ nbr_cols++;
+
+ *node_count = copy_count * nbr_cols;
+
+ this->type = B2R2_SCALE;
+
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_rotate() - analyze a rotate operation
+ */
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ u32 nodes_per_tile;
+ struct b2r2_control *cont = req->instance->control;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
+ &nodes_per_tile);
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_ROTATE;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ this->dst.win.height = min(this->dst.win.height, B2R2_ROTATE_MAX_WIDTH);
+
+ /*
+ * B2R2 cannot do rotations on stripes that are not a multiple of 16
+ * pixels high (if larger than 16 pixels).
+ */
+ if (this->dst.win.width > 16)
+ this->dst.win.width -= (this->dst.win.width % 16);
+
+ /* Blending cannot be combined with rotation */
+ if (this->blend) {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+ enum b2r2_blt_fmt tmp_fmt;
+
+ if (is_yuv_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ else if (is_bgr_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ else
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+
+ setup_tmp_buf(cont, tmp, this->max_buf_size, tmp_fmt,
+ this->dst.win.width, this->dst.win.height);
+
+ tmp->tmp_buf_index = 1;
+
+ tmp->vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ this->dst.win.width = tmp->rect.width;
+ this->dst.win.height = tmp->rect.height;
+
+ memcpy(&tmp->win, &tmp->rect, sizeof(tmp->win));
+
+ *buf_count = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ /*
+ * One more node per tile is required to rotate to the temp
+ * buffer.
+ */
+ nodes_per_tile++;
+ }
+
+ /* Finally, calculate the node count */
+ *node_count = nodes_per_tile *
+ calc_rot_count(this->src.rect.width, this->src.rect.height);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * analyze_scale_factors() - determines the scale factors for the op
+ */
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ u16 hsf;
+ u16 vsf;
+
+ if (this->rotation) {
+ ret = calculate_scale_factor(cont, this->src.rect.width,
+ this->dst.rect.height, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(cont, this->src.rect.height,
+ this->dst.rect.width, &vsf);
+ if (ret < 0)
+ goto error;
+ } else {
+ ret = calculate_scale_factor(cont, this->src.rect.width,
+ this->dst.rect.width, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(cont, this->src.rect.height,
+ this->dst.rect.height, &vsf);
+ if (ret < 0)
+ goto error;
+ }
+
+ this->h_rescale = hsf != (1 << 10);
+ this->v_rescale = vsf != (1 << 10);
+
+ this->h_rsf = hsf;
+ this->v_rsf = vsf;
+
+ b2r2_log_info(cont->dev, "%s: h_rsf=%.4x\n", __func__, this->h_rsf);
+ b2r2_log_info(cont->dev, "%s: v_rsf=%.4x\n", __func__, this->v_rsf);
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_tile() - configures one tile of a blit operation
+ */
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ int ret = 0;
+
+ struct b2r2_node *last;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *bg = &this->bg;
+
+ struct b2r2_blt_rect dst_norm;
+ struct b2r2_blt_rect src_norm;
+ struct b2r2_blt_rect bg_norm;
+
+ /* Normalize the dest coords to the dest rect coordinate space */
+ dst_norm.x = dst->win.x - dst->rect.x;
+ dst_norm.y = dst->win.y - dst->rect.y;
+ dst_norm.width = dst->win.width;
+ dst_norm.height = dst->win.height;
+
+ if (dst->vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ /* The y coord should be counted from the bottom */
+ dst_norm.y = dst->rect.height - (dst_norm.y + 1);
+ }
+ if (dst->hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ /* The x coord should be counted from the right */
+ dst_norm.x = dst->rect.width - (dst_norm.x + 1);
+ }
+
+ /* If the destination is rotated we should swap x, y */
+ if (this->rotation) {
+ src_norm.x = dst_norm.y;
+ src_norm.y = dst_norm.x;
+ src_norm.width = dst_norm.height;
+ src_norm.height = dst_norm.width;
+ } else {
+ src_norm.x = dst_norm.x;
+ src_norm.y = dst_norm.y;
+ src_norm.width = dst_norm.width;
+ src_norm.height = dst_norm.height;
+ }
+
+ /* Convert to src coordinate space */
+ src->win.x = src_norm.x + src->rect.x;
+ src->win.y = src_norm.y + src->rect.y;
+ src->win.width = src_norm.width;
+ src->win.height = src_norm.height;
+
+ /* Set bg norm */
+ bg_norm.x = dst->win.x - dst->rect.x;
+ bg_norm.y = dst->win.y - dst->rect.y;
+ bg_norm.width = dst->win.width;
+ bg_norm.height = dst->win.height;
+
+ /* Convert to bg coordinate space */
+ bg->win.x = bg_norm.x + bg->rect.x;
+ bg->win.y = bg_norm.y + bg->rect.y;
+ bg->win.width = bg_norm.width;
+ bg->win.height = bg_norm.height;
+ bg->vso = dst->vso;
+ bg->hso = dst->hso;
+
+ /* Do the configuration depending on operation type */
+ switch (this->type) {
+ case B2R2_DIRECT_FILL:
+ configure_direct_fill(cont, node, this->src.color, dst, &last);
+ break;
+
+ case B2R2_DIRECT_COPY:
+ configure_direct_copy(cont, node, src, dst, &last);
+ break;
+
+ case B2R2_FILL:
+ ret = configure_fill(cont, node, src->color, src->fmt,
+ dst, this->ivmx, &last);
+ break;
+
+ case B2R2_FLIP: /* FLIP is just a copy with different VSO/HSO */
+ case B2R2_COPY:
+ ret = configure_copy(
+ cont, node, src, dst, this->ivmx, &last, this);
+ break;
+
+ case B2R2_ROTATE:
+ {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ if (this->blend) {
+ b2r2_log_info(cont->dev, "%s: rotation + "
+ "blend\n", __func__);
+
+ tmp->win.x = 0;
+ tmp->win.y = tmp->win.height - 1;
+ tmp->win.width = dst->win.width;
+ tmp->win.height = dst->win.height;
+
+ /* Rotate to the temp buf */
+ ret = configure_rotate(cont, node, src, tmp,
+ this->ivmx, &node, NULL);
+ if (ret < 0)
+ goto error;
+
+ /* Then do a copy to the destination */
+ ret = configure_copy(cont, node, tmp, dst, NULL,
+ &last, this);
+ } else {
+ /* Just do a rotation */
+ ret = configure_rotate(cont, node, src, dst,
+ this->ivmx, &last, this);
+ }
+ }
+ break;
+
+ case B2R2_SCALE:
+ ret = configure_scale(cont, node, src, dst, this->h_rsf,
+ this->v_rsf, this->ivmx, &last, this);
+ break;
+
+ case B2R2_SCALE_AND_ROTATE:
+ ret = configure_rot_scale(cont, this, node, &last);
+ break;
+
+ default:
+ b2r2_log_warn(cont->dev, "%s: Unsupported request\n", __func__);
+ ret = -ENOSYS;
+ goto error;
+ break;
+
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Scale and rotate will configure its own blending and clipping */
+ if (this->type != B2R2_SCALE_AND_ROTATE) {
+
+ /* Configure blending and clipping */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (this->blend) {
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ configure_bg(cont, node, bg,
+ this->swap_fg_bg);
+ else
+ configure_bg(cont, node, dst,
+ this->swap_fg_bg);
+ configure_blend(cont, node, this->flags,
+ this->global_alpha);
+ }
+ if (this->clip)
+ configure_clip(cont, node, &this->clip_rect);
+
+ node = node->next;
+
+ } while (node != last);
+ }
+
+ /* Consume the nodes */
+ *next = last;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Error!\n", __func__);
+ return ret;
+}
+
+/*
+ * configure_sub_rot() - configure a sub-rotation
+ *
+ * This functions configures a set of nodes for rotation using the destination
+ * window instead of the rectangle for calculating tiles.
+ */
+static int configure_sub_rot(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *job)
+{
+ int ret;
+
+ struct b2r2_blt_rect src_win;
+ struct b2r2_blt_rect dst_win;
+
+ u32 y_pixels = 0;
+ u32 x_pixels = 0;
+
+ memcpy(&src_win, &src->win, sizeof(src_win));
+ memcpy(&dst_win, &dst->win, sizeof(dst_win));
+
+ b2r2_log_info(cont->dev, "%s: src_win=(%d, %d, %d, %d) "
+ "dst_win=(%d, %d, %d, %d)\n", __func__,
+ src_win.x, src_win.y, src_win.width, src_win.height,
+ dst_win.x, dst_win.y, dst_win.width, dst_win.height);
+
+ dst->win.height = B2R2_ROTATE_MAX_WIDTH;
+ if (dst->win.width % B2R2_ROTATE_MAX_WIDTH)
+ dst->win.width -= dst->win.width % B2R2_ROTATE_MAX_WIDTH;
+
+ while (x_pixels < dst_win.width) {
+ u32 src_x = src->win.x;
+ u32 src_w = src->win.width;
+ u32 dst_y = dst->win.y;
+ u32 dst_h = dst->win.height;
+
+ dst->win.width = min(dst->win.width, dst_win.width -
+ (int)x_pixels);
+ src->win.height = dst->win.width;
+
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
+
+ while (y_pixels < dst_win.height) {
+ dst->win.height = min(dst->win.height,
+ dst_win.height - (int)y_pixels);
+ src->win.width = dst->win.height;
+
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
+
+ ret = configure_rotate(cont, node, src, dst,
+ ivmx, &node, job);
+ if (ret < 0)
+ goto error;
+
+ src->win.x += (src->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ src->win.width : -src->win.width;
+ dst->win.y += (dst->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ dst->win.height : -dst->win.height;
+
+ y_pixels += dst->win.height;
+ }
+
+ src->win.x = src_x;
+ src->win.y += (src->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ src->win.height : -src->win.height;
+ src->win.width = src_w;
+
+ dst->win.x += (dst->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ dst->win.width : -dst->win.width;
+ dst->win.y = dst_y;
+ dst->win.height = dst_h;
+
+ x_pixels += dst->win.width;
+ y_pixels = 0;
+
+ }
+
+ memcpy(&src->win, &src_win, sizeof(src->win));
+ memcpy(&dst->win, &dst_win, sizeof(dst->win));
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_downscale() - configures a combined rotate and downscale
+ *
+ * When doing a downscale it is better to do the rotation last.
+ */
+static int configure_rot_downscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ tmp->win.x = 0;
+ tmp->win.y = 0;
+ tmp->win.width = dst->win.height;
+ tmp->win.height = dst->win.width;
+
+ ret = configure_scale(cont, node, src, tmp, this->h_rsf, this->v_rsf,
+ this->ivmx, &node, this);
+ if (ret < 0)
+ goto error;
+
+ ret = configure_sub_rot(cont, node, tmp, dst, NULL, &node, this);
+ if (ret < 0)
+ goto error;
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_info(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_upscale() - configures a combined rotate and upscale
+ *
+ * When doing an upscale it is better to do the rotation first.
+ */
+static int configure_rot_upscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ /* TODO: Implement a optimal upscale (rotation first) */
+ return configure_rot_downscale(cont, this, node, next);
+}
+
+/**
+ * configure_rot_scale() - configures a combined rotation and scaling op
+ */
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ int ret;
+
+ bool upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 10);
+
+ if (upscale)
+ ret = configure_rot_upscale(cont, this, node, next);
+ else
+ ret = configure_rot_downscale(cont, this, node, next);
+
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_direct_fill() - configures the given node for direct fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL | B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_FILL;
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Source setup */
+
+ /* It seems B2R2 checks so that source and dest has the same format */
+ node->node.GROUP3.B2R2_STY = to_native_fmt(dst->fmt);
+ node->node.GROUP2.B2R2_S1CF = color;
+ node->node.GROUP2.B2R2_S2CF = 0;
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_direct_copy() - configures the node for direct copy
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_COPY;
+
+ /* Source setup, use the base function to avoid altering the INS */
+ set_src(&node->node.GROUP3, src->addr, src);
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_fill() - configures the given node for color fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @fmt - the source color format
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * A normal fill operation can be combined with any other per pixel operations
+ * such as blend.
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ struct b2r2_node *last;
+
+ /* Configure the destination */
+ ret = configure_dst(cont, node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_COLOR_FILL;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ /* B2R2 has a bug that disables color fill from S2. As a
+ workaround we use S1 for the color. */
+ node->node.GROUP2.B2R2_S1CF = 0;
+ node->node.GROUP2.B2R2_S2CF = color;
+
+ /* TO BE REMOVED: */
+ set_src_2(node, dst->addr, dst);
+ node->node.GROUP4.B2R2_STY = to_native_fmt(fmt);
+
+ /* Setup the iVMX for color conversion */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+
+ if ((dst->type == B2R2_FMT_TYPE_PLANAR) ||
+ (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR)) {
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP8.B2R2_FCTL =
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+ node->node.GROUP9.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP10.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP10.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_copy() - configures the given node for a copy operation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_dst(cont, node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ /* Configure the source for each node */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ " Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ if (this != NULL &&
+ (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ != 0) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(this->flag_param, src->fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ if (this != NULL &&
+ (this->flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ struct b2r2_blt_request *request =
+ container_of(this, struct b2r2_blt_request,
+ node_split_job);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO =
+ B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = request->clut_phys_addr;
+ }
+ /* Configure the source(s) */
+ configure_src(cont, node, src, ivmx);
+
+ node = node->next;
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rotate() - configures the given node for rotation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes are are required by the combination
+ * of rotating and writing the destination format.
+ */
+static int configure_rotate(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+
+ b2r2_log_debug(cont->dev, "%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "-----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_scale() - configures the given node for scaling
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @h_rsf - the horizontal rescale factor
+ * @v_rsf - the vertical rescale factor
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ */
+static int configure_scale(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ struct b2r2_filter_spec *hf = NULL;
+ struct b2r2_filter_spec *vf = NULL;
+
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ u32 hsrc_init = 0;
+ u32 vsrc_init = 0;
+ u32 hfp = 0;
+ u32 vfp = 0;
+
+ u16 luma_h_rsf = h_rsf;
+ u16 luma_v_rsf = v_rsf;
+
+ struct b2r2_filter_spec *luma_hf = NULL;
+ struct b2r2_filter_spec *luma_vf = NULL;
+
+ u32 luma_fctl = 0;
+ u32 luma_rsf = 0;
+ u32 luma_rzi = 0;
+ u32 luma_hsrc_init = 0;
+ u32 luma_vsrc_init = 0;
+ u32 luma_hfp = 0;
+ u32 luma_vfp = 0;
+
+ s32 src_x;
+ s32 src_y;
+ s32 src_w;
+ s32 src_h;
+
+ bool upsample;
+ bool downsample;
+
+ struct b2r2_blt_rect tmp_win = src->win;
+ bool src_raster = src->type == B2R2_FMT_TYPE_RASTER;
+ bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER;
+
+ /* Rescale the normalized source window */
+ src_x = inv_rescale(src->win.x - src->rect.x, luma_h_rsf);
+ src_y = inv_rescale(src->win.y - src->rect.y, luma_v_rsf);
+ src_w = inv_rescale(src->win.width, luma_h_rsf);
+ src_h = inv_rescale(src->win.height, luma_v_rsf);
+
+ /* Convert to src coordinate space */
+ src->win.x = (src_x >> 10) + src->rect.x;
+ src->win.y = (src_y >> 10) + src->rect.y;
+
+ /*
+ * Since the stripe might start and end on a fractional pixel
+ * we need to count all the touched pixels in the width.
+ *
+ * Example:
+ * src_x = 1.8, src_w = 2.8
+ *
+ * The stripe touches pixels 1.8 through 4.6, i.e. 4 pixels
+ */
+ src->win.width = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src->win.height = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ luma_hsrc_init = src_x & 0x3ff;
+ luma_vsrc_init = src_y & 0x3ff;
+
+ /* Check for upsampling of chroma */
+ upsample = !src_raster && !is_yuv444_fmt(src->fmt);
+ if (upsample) {
+ h_rsf /= 2;
+
+ if (is_yuv420_fmt(src->fmt))
+ v_rsf /= 2;
+ }
+
+ /* Check for downsampling of chroma */
+ downsample = !dst_raster && !is_yuv444_fmt(dst->fmt);
+ if (downsample) {
+ h_rsf *= 2;
+
+ if (is_yuv420_fmt(dst->fmt))
+ v_rsf *= 2;
+ }
+
+ src_x = inv_rescale(tmp_win.x - src->rect.x, h_rsf);
+ src_y = inv_rescale(tmp_win.y - src->rect.y, v_rsf);
+ hsrc_init = src_x & 0x3ff;
+ vsrc_init = src_y & 0x3ff;
+
+ /* Configure resize and filters */
+ fctl = B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ luma_fctl = B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf = (h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+ luma_rsf = (luma_h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (luma_v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+
+ rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+ luma_rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * We should only filter if there is an actual rescale (i.e. not when
+ * up or downsampling).
+ */
+ if (luma_h_rsf != (1 << 10)) {
+ hf = b2r2_filter_find(h_rsf);
+ luma_hf = b2r2_filter_find(luma_h_rsf);
+ }
+ if (luma_v_rsf != (1 << 10)) {
+ vf = b2r2_filter_find(v_rsf);
+ luma_vf = b2r2_filter_find(luma_v_rsf);
+ }
+
+ if (hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ hfp = hf->h_coeffs_phys_addr;
+ }
+
+ if (vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ vfp = vf->v_coeffs_phys_addr;
+ }
+
+ if (luma_hf) {
+ luma_fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ luma_hfp = luma_hf->h_coeffs_phys_addr;
+ }
+
+ if (luma_vf) {
+ luma_fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ luma_vfp = luma_vf->v_coeffs_phys_addr;
+ }
+
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ bool chroma_rescale =
+ (h_rsf != (1 << 10)) || (v_rsf != (1 << 10));
+ bool luma_rescale =
+ (luma_h_rsf != (1 << 10)) ||
+ (luma_v_rsf != (1 << 10));
+ bool dst_chroma = node->node.GROUP1.B2R2_TTY &
+ B2R2_TTY_CHROMA_NOT_LUMA;
+ bool dst_luma = !dst_chroma;
+
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Internal error! Out "
+ "of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * If the source format is anything other than raster, we
+ * always have to enable both chroma and luma resizers. This
+ * could be a bug in the hardware, since it is not mentioned in
+ * the specification.
+ *
+ * Otherwise, we will only enable the chroma resizer when
+ * writing chroma and the luma resizer when writing luma
+ * (or both when writing raster). Also, if there is no rescale
+ * to be done there's no point in using the resizers.
+ */
+
+ if (!src_raster || (chroma_rescale &&
+ (dst_raster || dst_chroma))) {
+ /* Enable chroma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI = rzi;
+ node->node.GROUP9.B2R2_HFP = hfp;
+ node->node.GROUP9.B2R2_VFP = vfp;
+ }
+
+ if (!src_raster || (luma_rescale &&
+ (dst_raster || dst_luma))) {
+ /* Enable luma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA;
+ node->node.GROUP8.B2R2_FCTL |= luma_fctl;
+
+ node->node.GROUP10.B2R2_RSF = luma_rsf;
+ node->node.GROUP10.B2R2_RZI = luma_rzi;
+ node->node.GROUP10.B2R2_HFP = luma_hfp;
+ node->node.GROUP10.B2R2_VFP = luma_vfp;
+ /*
+ * Scaling operation from raster to a multi-buffer
+ * format, requires the raster input to be scaled
+ * before luminance information can be extracted.
+ * Raster input is scaled by the chroma resizer.
+ * Luma resizer only handles luminance data which
+ * exists in a separate buffer in source image,
+ * as is the case with YUV planar/semi-planar formats.
+ */
+ if (src_raster) {
+ /* Activate chroma scaling */
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ /*
+ * Color data must be scaled
+ * to the same size as luma.
+ * Use luma scaling parameters.
+ */
+ node->node.GROUP9.B2R2_RSF = luma_rsf;
+ node->node.GROUP9.B2R2_RZI = luma_rzi;
+ node->node.GROUP9.B2R2_HFP = luma_hfp;
+ node->node.GROUP9.B2R2_VFP = luma_vfp;
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_src() - configures the source registers and the iVMX
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @ivmx - the iVMX to use for color conversion
+ *
+ * This operation will not consume any nodes
+ */
+static void configure_src(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx)
+{
+ struct b2r2_node_split_buf tmp_buf;
+
+ b2r2_log_info(cont->dev,
+ "%s: src.win=(%d, %d, %d, %d)\n", __func__,
+ src->win.x, src->win.y, src->win.width,
+ src->win.height);
+
+ /* Configure S1 - S3 */
+ switch (src->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ set_src_2(node, src->addr, src);
+ break;
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ /*
+ * For 420 and 422 the chroma has lower resolution than the
+ * luma
+ */
+ if (!is_yuv444_fmt(src->fmt)) {
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ if (is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src);
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ if (!is_yuv444_fmt(src->fmt)) {
+ /*
+ * Each chroma buffer will have half as many values
+ * per line as the luma buffer
+ */
+ tmp_buf.pitch = (tmp_buf.pitch + 1) / 2;
+
+ /* Horizontal resolution is half */
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well
+ */
+ if (is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src); /* Y */
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf); /* U */
+ set_src_1(node, tmp_buf.chroma_cr_addr, &tmp_buf); /* V */
+
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+
+ /* Configure the iVMX for color space conversions */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+}
+
+/**
+ * configure_bg() - configures a background for the given node
+ *
+ * @node - the node to configure
+ * @bg - the background buffer
+ * @swap_fg_bg - if true, fg will be on s1 instead of s2
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_bg(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *bg, bool swap_fg_bg)
+{
+ b2r2_log_info(cont->dev,
+ "%s: bg.win=(%d, %d, %d, %d)\n", __func__,
+ bg->win.x, bg->win.y, bg->win.width,
+ bg->win.height);
+
+ /* Configure S1 */
+ switch (bg->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ if (swap_fg_bg) {
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_SWAP_FG_BG;
+
+ set_src(&node->node.GROUP4, bg->addr, bg);
+ } else {
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP3, bg->addr, bg);
+ }
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+}
+
+/**
+ * configure_dst() - configures the destination registers of the given node
+ *
+ * @node - the node to configure
+ * @ivmx - the iVMX to use for color conversion
+ * @dst - the destination buffer
+ *
+ * This operation will consume as many nodes as are required to write the
+ * destination format.
+ */
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ int nbr_planes = 1;
+ int i;
+
+ struct b2r2_node_split_buf dst_planes[3];
+
+ b2r2_log_info(cont->dev,
+ "%s: dst.win=(%d, %d, %d, %d)\n", __func__,
+ dst->win.x, dst->win.y, dst->win.width,
+ dst->win.height);
+
+ memcpy(&dst_planes[0], dst, sizeof(dst_planes[0]));
+
+ if (dst->type != B2R2_FMT_TYPE_RASTER) {
+ /* There will be at least 2 planes */
+ nbr_planes = 2;
+
+ memcpy(&dst_planes[1], dst, sizeof(dst_planes[1]));
+
+ dst_planes[1].addr = dst->chroma_addr;
+ dst_planes[1].plane_selection = B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (!is_yuv444_fmt(dst->fmt)) {
+ /* Horizontal resolution is half */
+ dst_planes[1].win.x /= 2;
+ /*
+ * Must round up the chroma size to handle cases when
+ * luma size is not divisible by 2. E.g. luma width==7 r
+ * equires chroma width==4. Chroma width==7/2==3 is only
+ * enough for luma width==6.
+ */
+ dst_planes[1].win.width =
+ (dst_planes[1].win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well. Height must be rounded in
+ * the same way as is done for width.
+ */
+ if (is_yuv420_fmt(dst->fmt)) {
+ dst_planes[1].win.y /= 2;
+ dst_planes[1].win.height =
+ (dst_planes[1].win.height + 1) / 2;
+ }
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ /* There will be a third plane as well */
+ nbr_planes = 3;
+
+ if (!is_yuv444_fmt(dst->fmt)) {
+ /* The chroma planes have half the luma pitch */
+ dst_planes[1].pitch /= 2;
+ }
+
+ memcpy(&dst_planes[2], &dst_planes[1],
+ sizeof(dst_planes[2]));
+ dst_planes[2].addr = dst->chroma_cr_addr;
+
+ /*
+ * The third plane will be Cr.
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * B2R2_TTY_CR_NOT_CB.
+ */
+ dst_planes[2].chroma_selection = B2R2_TTY_CB_NOT_CR;
+ }
+
+ }
+
+ /* Configure one node for each plane */
+ for (i = 0; i < nbr_planes; i++) {
+
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * When writing chroma, there's no need to read the luma and
+ * vice versa.
+ */
+ if ((node->node.GROUP3.B2R2_STY & B2R2_NATIVE_YUV) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ if (i != 1) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ }
+ if (i != 2) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER;
+ }
+ } else if ((node->node.GROUP3.B2R2_STY &
+ (B2R2_NATIVE_YCBCR42X_MBN |
+ B2R2_NATIVE_YCBCR42X_R2B)) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ }
+
+ set_target(node, dst_planes[i].addr, &dst_planes[i]);
+
+ node = node->next;
+ }
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * configure_blend() - configures the given node for alpha blending
+ *
+ * @node - the node to configure
+ * @flags - the flags passed in the blt_request
+ * @global_alpha - the global alpha to use (if enabled in flags)
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_blend(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 flags, u32 global_alpha)
+{
+ node->node.GROUP0.B2R2_ACK &= ~(B2R2_ACK_MODE_BYPASS_S2_S3);
+
+ /* Check if the foreground is premultiplied */
+ if ((flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT) != 0)
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /* Check if global alpha blend should be enabled */
+ if (flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+
+ /* B2R2 expects the global alpha to be in 0...128 range */
+ global_alpha = (global_alpha*128)/255;
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << B2R2_ACK_GALPHA_ROPID_SHIFT;
+ } else {
+ node->node.GROUP0.B2R2_ACK |=
+ (128 << B2R2_ACK_GALPHA_ROPID_SHIFT);
+ }
+}
+
+/**
+ * configure_clip() - configures destination clipping for the given node
+ *
+ * @node - the node to configure
+ * @clip_rect - the clip rectangle
+ *
+ * This operation does not consume any nodes.
+ */
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect)
+{
+ s32 l = clip_rect->x;
+ s32 r = clip_rect->x + clip_rect->width - 1;
+ s32 t = clip_rect->y;
+ s32 b = clip_rect->y + clip_rect->height - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+
+ /* Clip window setup */
+ node->node.GROUP6.B2R2_CWO =
+ ((t & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((l & 0x7FFF) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT);
+}
+
+/**
+ * set_buf() - configures the given buffer with the provided values
+ *
+ * @addr - the physical base address
+ * @img - the blt image to base the buffer on
+ * @rect - the rectangle to use
+ * @color_fill - determines whether the buffer should be used for color fill
+ * @color - the color to use in case of color fill
+ */
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ bool color_fill,
+ u32 color)
+{
+ memset(buf, 0, sizeof(*buf));
+
+ buf->fmt = img->fmt;
+ buf->type = get_fmt_type(img->fmt);
+
+ if (color_fill) {
+ buf->type = B2R2_FMT_TYPE_RASTER;
+ buf->color = color;
+ } else {
+ buf->addr = addr;
+
+ buf->alpha_range = get_alpha_range(img->fmt);
+
+ if (img->pitch == 0)
+ buf->pitch = fmt_byte_pitch(img->fmt, img->width);
+ else
+ buf->pitch = img->pitch;
+
+ buf->height = img->height;
+ buf->width = img->width;
+
+ switch (buf->type) {
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ if (is_yuv422_fmt(buf->fmt) ||
+ is_yuv420_fmt(buf->fmt)) {
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ } else {
+ buf->chroma_cr_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ }
+ if (is_yuv420_fmt(buf->fmt)) {
+ /*
+ * Use ceil(height/2) in case
+ * buffer height is not divisible by 2.
+ */
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (is_yuv422_fmt(buf->fmt)) {
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) * buf->height);
+ } else if (is_yvu420_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (is_yvu422_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) * buf->height);
+ }
+ break;
+ default:
+ break;
+ }
+
+ memcpy(&buf->rect, rect, sizeof(buf->rect));
+ }
+}
+
+/**
+ * setup_tmp_buf() - configure a temporary buffer
+ */
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *tmp,
+ u32 max_size,
+ enum b2r2_blt_fmt pref_fmt,
+ u32 pref_width,
+ u32 pref_height)
+{
+ int ret;
+
+ enum b2r2_blt_fmt fmt;
+
+ u32 width;
+ u32 height;
+ u32 pitch;
+ u32 size;
+
+ /* Determine what format we should use for the tmp buf */
+ if (is_rgb_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (is_bgr_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ } else if (is_yvu_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_CB_Y_CR_Y;
+ } else if (is_yuv_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Cannot create tmp buf from this fmt (%d)\n",
+ __func__, pref_fmt);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* See if we can fit the entire preferred rectangle */
+ width = pref_width;
+ height = pref_height;
+ pitch = fmt_byte_pitch(fmt, width);
+ size = pitch * height;
+
+ if (size > max_size) {
+ /* We need to limit the size, so we choose a different width */
+ width = MIN(width, B2R2_RESCALE_MAX_WIDTH);
+ pitch = fmt_byte_pitch(fmt, width);
+ height = MIN(height, max_size / pitch);
+ size = pitch * height;
+ }
+
+ /* We should at least have enough room for one scanline */
+ if (height == 0) {
+ b2r2_log_warn(cont->dev, "%s: Not enough tmp mem!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+
+ tmp->fmt = fmt;
+ tmp->type = B2R2_FMT_TYPE_RASTER;
+ tmp->height = height;
+ tmp->width = width;
+ tmp->pitch = pitch;
+
+ tmp->rect.width = width;
+ tmp->rect.height = tmp->height;
+ tmp->alpha_range = B2R2_TY_ALPHA_RANGE_255;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ default:
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+ }
+}
+
+/**
+ * get_alpha() - returns the pixel alpha in 0...255 range
+ */
+static u8 get_alpha(enum b2r2_blt_fmt fmt, u32 pixel)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return (pixel >> 24) & 0xff;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return pixel & 0xff;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return (pixel & 0xfff) >> 16;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return (((pixel >> 12) & 0xf) * 255) / 15;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return (pixel >> 15) * 255;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return pixel * 255;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return pixel;
+ default:
+ return 255;
+ }
+}
+
+/**
+ * set_alpha() - returns a color value with the alpha component set
+ */
+static u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color)
+{
+ u32 alpha_mask;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ color &= 0x00ffffff;
+ alpha_mask = alpha << 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ color &= 0xffffff00;
+ alpha_mask = alpha;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ color &= 0x00ffff;
+ alpha_mask = alpha << 16;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ color &= 0x0fff;
+ alpha_mask = (alpha << 8) & 0xF000;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ color &= 0x7fff;
+ alpha_mask = (alpha / 255) << 15 ;
+ break;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ color = 0;
+ alpha_mask = (alpha / 255);
+ break;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ color = 0;
+ alpha_mask = alpha;
+ break;
+ default:
+ alpha_mask = 0;
+ }
+
+ return color | alpha_mask;
+}
+
+/**
+ * fmt_has_alpha() - returns whether the given format carries an alpha value
+ */
+static bool fmt_has_alpha(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_rgb_fmt() - returns whether the given format is a rgb format
+ */
+static bool is_rgb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_bgr_fmt() - returns whether the given format is a bgr format
+ */
+static bool is_bgr_fmt(enum b2r2_blt_fmt fmt)
+{
+ return (fmt == B2R2_BLT_FMT_32_BIT_ABGR8888);
+}
+
+/**
+ * is_yuv_fmt() - returns whether the given format is a yuv format
+ */
+static bool is_yuv_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yvu_fmt() - returns whether the given format is a yvu format
+ */
+static bool is_yvu_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yuv420_fmt() - returns whether the given format is a yuv420 format
+ */
+static bool is_yuv420_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_yuv422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yvu420_fmt() - returns whether the given format is a yvu420 format
+ */
+static bool is_yvu420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_yvu422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+/**
+ * is_yuv444_fmt() - returns whether the given format is a yuv444 format
+ */
+static bool is_yuv444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * get_fmt_byte_pitch() - returns the pitch of a pixmap with the given width
+ */
+static int fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width)
+{
+ int pitch;
+
+ switch (fmt) {
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ pitch = width >> 3; /* Shift is faster than division */
+ if ((width & 0x3) != 0) /* Check for remainder */
+ pitch++;
+ return pitch;
+
+ case B2R2_BLT_FMT_8_BIT_A8: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return width << 1;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return width << 2;
+
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ return 0;
+ }
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* Not actually supported by HW */
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* Not actually supported by HW */
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+/**
+ * get_fmt_type() - returns the type of the given format (raster, planar, etc.)
+ */
+static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_FMT_TYPE_RASTER;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_FMT_TYPE_PLANAR;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_FMT_TYPE_SEMI_PLANAR;
+ default:
+ return B2R2_FMT_TYPE_RASTER;
+ }
+}
+
+/**
+ * is_transform() - returns whether the given request is a transform operation
+ */
+static bool is_transform(const struct b2r2_blt_request *req)
+{
+ return (req->user_req.transform != B2R2_BLT_TRANSFORM_NONE) ||
+ (req->user_req.src_rect.width !=
+ req->user_req.dst_rect.width) ||
+ (req->user_req.src_rect.height !=
+ req->user_req.dst_rect.height);
+}
+
+/**
+ * rescale() - rescales the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf)
+{
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (sf == 0) {
+ b2r2_log_err(cont->dev, "%s: Scale factor is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ /*
+ * This is normally not safe to do, since it drastically decreases the
+ * precision of the integer part of the dimension. But since the B2R2
+ * hardware only has 12-bit registers for these values, we are safe.
+ */
+ return (dim << 20) / sf;
+}
+
+/**
+ * inv_rescale() - does an inverted rescale of the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 inv_rescale(s32 dim, u16 sf)
+{
+ if (sf == 0)
+ return dim;
+
+ return dim * sf;
+}
+
+/**
+ * set_target() - sets the target registers of the given node
+ */
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ s32 l;
+ s32 r;
+ s32 t;
+ s32 b;
+
+ if (buf->tmp_buf_index)
+ node->dst_tmp_index = buf->tmp_buf_index;
+
+ node->node.GROUP1.B2R2_TBA = addr;
+ node->node.GROUP1.B2R2_TTY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->chroma_selection | buf->hso |
+ buf->vso | buf->dither | buf->plane_selection;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP1.B2R2_TTY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Check if the rectangle is outside the buffer */
+ if (buf->vso == B2R2_TY_VSO_BOTTOM_TO_TOP)
+ t = buf->win.y - (buf->win.height - 1);
+ else
+ t = buf->win.y;
+
+ if (buf->hso == B2R2_TY_HSO_RIGHT_TO_LEFT)
+ l = buf->win.x - (buf->win.width - 1);
+ else
+ l = buf->win.x;
+
+ r = l + buf->win.width;
+ b = t + buf->win.height;
+
+ /* Clip to the destination buffer to prevent memory overwrites */
+ if ((l < 0) || (r > buf->width) || (t < 0) || (b > buf->height)) {
+ /* The clip rectangle is including the borders */
+ l = MAX(l, 0);
+ r = MIN(r, buf->width) - 1;
+ t = MAX(t, 0);
+ b = MIN(b, buf->height) - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP6.B2R2_CWO =
+ ((l & 0x7FFF) << B2R2_CWS_X_SHIFT) |
+ ((t & 0x7FFF) << B2R2_CWS_Y_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT) |
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT);
+ }
+
+}
+
+/**
+ * set_src() - configures the given source register with the given values
+ */
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ src->B2R2_SBA = addr;
+ src->B2R2_STY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ src->B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ src->B2R2_SSZ = ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ src->B2R2_SXY = ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+}
+
+/**
+ * set_src_1() - sets the source 1 registers of the given node
+ */
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ node->node.GROUP3.B2R2_SBA = addr;
+ node->node.GROUP3.B2R2_STY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP3.B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP3.B2R2_SXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Source 1 has no size register */
+}
+
+/**
+ * set_src_2() - sets the source 2 registers of the given node
+ */
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 2;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP4, addr, buf);
+}
+
+/**
+ * set_src_3() - sets the source 3 registers of the given node
+ */
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 3;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_3;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP5, addr, buf);
+}
+
+/**
+ * set_ivmx() - configures the iVMX registers with the given values
+ */
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+
+ node->node.GROUP15.B2R2_VMX0 = vmx_values[0];
+ node->node.GROUP15.B2R2_VMX1 = vmx_values[1];
+ node->node.GROUP15.B2R2_VMX2 = vmx_values[2];
+ node->node.GROUP15.B2R2_VMX3 = vmx_values[3];
+}
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_node *node)
+{
+ while (node != NULL) {
+ memset(&node->node, 0, sizeof(node->node));
+
+ node->src_tmp_index = 0;
+ node->dst_tmp_index = 0;
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7ffff;
+
+ if (node->next != NULL)
+ node->node.GROUP0.B2R2_NIP =
+ node->next->physical_address;
+ node = node->next;
+ }
+}
+
+int b2r2_node_split_init(struct b2r2_control *cont)
+{
+ return 0;
+}
+
+void b2r2_node_split_exit(struct b2r2_control *cont)
+{
+
+}
diff --git a/drivers/video/b2r2/b2r2_node_split.h b/drivers/video/b2r2/b2r2_node_split.h
new file mode 100644
index 00000000000..a577241c31b
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_NODE_SPLIT_H_
+#define __B2R2_NODE_SPLIT_H_
+
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+
+/**
+ * b2r2_node_split_analyze() - Analyzes a B2R2 request
+ *
+ * @req - The request to analyze
+ * @max_buf_size - The largest size allowed for intermediate buffers
+ * @node_count - Number of nodes required for the job
+ * @buf_count - Number of intermediate buffers required for the job
+ * @bufs - An array of buffers needed for intermediate buffers
+ *
+ * Analyzes the request and determines how many nodes and intermediate buffers
+ * are required.
+ *
+ * It is the responsibility of the caller to allocate memory and assign the
+ * physical addresses. After that b2r2_node_split_assign_buffers should be
+ * called to assign the buffers to the right nodes.
+ *
+ * Returns:
+ * A handle identifing the analyzed request if successful, a negative
+ * value otherwise.
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req, u32 max_buf_size,
+ u32 *node_count, struct b2r2_work_buf **bufs, u32* buf_count,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_configure() - Performs a node split
+ *
+ * @handle - A handle for the analyzed request
+ * @first - The first node in the list of nodes to use
+ *
+ * Fills the supplied list of nodes with the parameters acquired by analyzing
+ * the request.
+ *
+ * All pointers to intermediate buffers are represented by integers to be used
+ * in the array returned by b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job, struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_assign_buffers() - Assignes physical addresses
+ *
+ * @handle - The handle for the job
+ * @first - The first node in the node list
+ * @bufs - Buffers with assigned physical addresses
+ * @buf_count - Number of physical addresses
+ *
+ * Assigns the physical addresses where intermediate buffers are required in
+ * the node list.
+ *
+ * The order of the elements of 'bufs' must be maintained from the call to
+ * b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
+ struct b2r2_node *first, struct b2r2_work_buf *bufs,
+ u32 buf_count);
+
+/**
+ * b2r2_node_split_unassign_buffers() - Removes all physical addresses
+ *
+ * @handle - The handle associated with the job
+ * @first - The first node in the node list
+ *
+ * Removes all references to intermediate buffers from the node list.
+ *
+ * This makes it possible to reuse the node list with new buffers by calling
+ * b2r2_node_split_assign_buffers again. Useful for caching node lists.
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
+ struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_release() - Releases all resources for a job
+ *
+ * @handle - The handle identifying the job. This will be set to 0.
+ *
+ * Releases all resources associated with a job.
+ *
+ * This should always be called once b2r2_node_split_analyze has been called
+ * in order to release any resources allocated while analyzing.
+ */
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_init() - Initializes the node split module
+ *
+ * Initializes the node split module and creates debugfs files.
+ */
+int b2r2_node_split_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_node_split_exit() - Deinitializes the node split module
+ *
+ * Releases all resources for the node split module.
+ */
+void b2r2_node_split_exit(struct b2r2_control *cont);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_profiler/Makefile b/drivers/video/b2r2/b2r2_profiler/Makefile
new file mode 100644
index 00000000000..69a85524fd7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/Makefile
@@ -0,0 +1,3 @@
+# Make file for loadable module B2R2 Profiler
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler.o
diff --git a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
new file mode 100644
index 00000000000..e038941b4e8
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson B2R2 profiler implementation
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <video/b2r2_blt.h>
+#include "../b2r2_profiler_api.h"
+
+
+#define S32_MAX 2147483647
+
+
+static int src_format_filter_on = false;
+module_param(src_format_filter_on, bool, S_IRUGO | S_IWUSR);
+static unsigned int src_format_filter;
+module_param(src_format_filter, uint, S_IRUGO | S_IWUSR);
+
+static int print_blts_on = 0;
+module_param(print_blts_on, bool, S_IRUGO | S_IWUSR);
+static int use_mpix_per_second_in_print_blts = 1;
+module_param(use_mpix_per_second_in_print_blts, bool, S_IRUGO | S_IWUSR);
+
+static int profiler_stats_on = 1;
+module_param(profiler_stats_on, bool, S_IRUGO | S_IWUSR);
+
+static const unsigned int profiler_stats_blts_used = 400;
+static struct {
+ unsigned long sampling_start_time_jiffies;
+
+ s32 min_mpix_per_second;
+ struct b2r2_blt_req min_blt_request;
+ struct b2r2_blt_profiling_info min_blt_profiling_info;
+
+ s32 max_mpix_per_second;
+ struct b2r2_blt_req max_blt_request;
+ struct b2r2_blt_profiling_info max_blt_profiling_info;
+
+ s32 accumulated_num_pixels;
+ s32 accumulated_num_usecs;
+
+ u32 num_blts_done;
+} profiler_stats;
+
+
+static s32 nsec_2_usec(const s32 nsec);
+
+static int is_scale_blt(const struct b2r2_blt_req * const request);
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request);
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs);
+static void print_profiler_stats(void);
+static void reset_profiler_stats(void);
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static void blt_done(const struct b2r2_blt_req * const blt,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+
+static struct b2r2_profiler this = {
+ .blt_done = blt_done,
+};
+
+
+static s32 nsec_2_usec(const s32 nsec)
+{
+ return nsec / 1000;
+}
+
+
+static int is_scale_blt(const struct b2r2_blt_req * const request)
+{
+ if ((request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90 &&
+ (request->src_rect.width !=
+ request->dst_rect.height ||
+ request->src_rect.height !=
+ request->dst_rect.width)) ||
+ (!(request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) &&
+ (request->src_rect.width !=
+ request->dst_rect.width ||
+ request->src_rect.height !=
+ request->dst_rect.height)))
+ return 1;
+ else
+ return 0;
+}
+
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ return get_mpix_per_second(get_num_pixels_in_blt(request),
+ nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2));
+}
+
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ char tmp_str[128];
+ sprintf(tmp_str, "SF: %#10x, DF: %#10x, F: %#10x, T: %#3x, S: %1i, P: %7i",
+ request->src_img.fmt,
+ request->dst_img.fmt,
+ request->flags,
+ request->transform,
+ is_scale_blt(request),
+ get_num_pixels_in_blt(request));
+ if (use_mpix_per_second_in_print_blts)
+ printk(KERN_ALERT "%s, MPix/s: %3i\n", tmp_str,
+ get_blt_mpix_per_second(request, blt_profiling_info));
+ else
+ printk(KERN_ALERT "%s, CPU: %10i, B2R2: %10i, Tot: %10i ns\n",
+ tmp_str, blt_profiling_info->nsec_active_in_cpu,
+ blt_profiling_info->nsec_active_in_b2r2,
+ blt_profiling_info->total_time_nsec);
+}
+
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request)
+{
+ s32 num_pixels_in_src = request->src_rect.width * request->src_rect.height;
+ s32 num_pixels_in_dst = request->dst_rect.width * request->dst_rect.height;
+ if (request->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW))
+ return num_pixels_in_dst;
+ else
+ return (num_pixels_in_src + num_pixels_in_dst) / 2;
+}
+
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs)
+{
+ s32 num_pixels_scale_factor = num_pixels != 0 ?
+ S32_MAX / num_pixels : S32_MAX;
+ s32 num_usecs_scale_factor = num_usecs != 0 ?
+ S32_MAX / num_usecs : S32_MAX;
+ s32 scale_factor = min(num_pixels_scale_factor, num_usecs_scale_factor);
+
+ s32 num_pixels_scaled = num_pixels * scale_factor;
+ s32 num_usecs_scaled = num_usecs * scale_factor;
+
+ if (num_usecs_scaled < 1000000)
+ return 0;
+
+ return (num_pixels_scaled / 1000000) / (num_usecs_scaled / 1000000);
+}
+
+static void print_profiler_stats(void)
+{
+ printk(KERN_ALERT "Min: %3i, Avg: %3i, Max: %3i MPix/s\n",
+ profiler_stats.min_mpix_per_second,
+ get_mpix_per_second(
+ profiler_stats.accumulated_num_pixels,
+ profiler_stats.accumulated_num_usecs),
+ profiler_stats.max_mpix_per_second);
+ printk(KERN_ALERT "Min blit:\n");
+ print_blt(&profiler_stats.min_blt_request,
+ &profiler_stats.min_blt_profiling_info);
+ printk(KERN_ALERT "Max blit:\n");
+ print_blt(&profiler_stats.max_blt_request,
+ &profiler_stats.max_blt_profiling_info);
+}
+
+static void reset_profiler_stats(void)
+{
+ profiler_stats.sampling_start_time_jiffies = jiffies;
+ profiler_stats.min_mpix_per_second = S32_MAX;
+ profiler_stats.max_mpix_per_second = 0;
+ profiler_stats.accumulated_num_pixels = 0;
+ profiler_stats.accumulated_num_usecs = 0;
+ profiler_stats.num_blts_done = 0;
+}
+
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ s32 num_pixels_in_blt;
+ s32 num_usec_blt_took;
+ s32 blt_mpix_per_second;
+
+ if (time_before(jiffies, profiler_stats.sampling_start_time_jiffies))
+ return;
+
+ num_pixels_in_blt = get_num_pixels_in_blt(request);
+ num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2);
+ blt_mpix_per_second = get_mpix_per_second(num_pixels_in_blt,
+ num_usec_blt_took);
+
+ if (blt_mpix_per_second <=
+ profiler_stats.min_mpix_per_second) {
+ profiler_stats.min_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.min_blt_request,
+ request, sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.min_blt_profiling_info,
+ blt_profiling_info,
+ sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ if (blt_mpix_per_second >= profiler_stats.max_mpix_per_second) {
+ profiler_stats.max_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.max_blt_request, request,
+ sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.max_blt_profiling_info,
+ blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ profiler_stats.accumulated_num_pixels += num_pixels_in_blt;
+ profiler_stats.accumulated_num_usecs += num_usec_blt_took;
+ profiler_stats.num_blts_done++;
+
+ if (profiler_stats.num_blts_done >= profiler_stats_blts_used) {
+ print_profiler_stats();
+ reset_profiler_stats();
+ /* The printouts initiated above can disturb the next measurement
+ so we delay it two seconds to give the printouts a chance to finish. */
+ profiler_stats.sampling_start_time_jiffies = jiffies + (2 * HZ);
+ }
+}
+
+static void blt_done(const struct b2r2_blt_req * const request,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ /* Filters */
+ if (src_format_filter_on && request->src_img.fmt != src_format_filter)
+ return;
+
+ /* Processors */
+ if (print_blts_on)
+ print_blt(request, blt_profiling_info);
+
+ if (profiler_stats_on)
+ do_profiler_stats(request, blt_profiling_info);
+}
+
+
+static int __init b2r2_profiler_init(void)
+{
+ reset_profiler_stats();
+
+ return b2r2_register_profiler(&this);
+}
+module_init(b2r2_profiler_init);
+
+static void __exit b2r2_profiler_exit(void)
+{
+ b2r2_unregister_profiler(&this);
+}
+module_exit(b2r2_profiler_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Johan Mossberg (johan.xx.mossberg@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Profiler");
diff --git a/drivers/video/b2r2/b2r2_profiler_api.h b/drivers/video/b2r2/b2r2_profiler_api.h
new file mode 100644
index 00000000000..5f1f9abbe49
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_api.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiling API
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_API_H
+#define _LINUX_VIDEO_B2R2_PROFILER_API_H
+
+#include <video/b2r2_blt.h>
+
+/**
+ * struct b2r2_blt_profiling_info - Profiling information for a blit
+ *
+ * @nsec_active_in_cpu: The number of nanoseconds the job was active in the CPU.
+ * This is an approximate value, check out the code for more
+ * info.
+ * @nsec_active_in_b2r2: The number of nanoseconds the job was active in B2R2. This
+ * is an approximate value, check out the code for more info.
+ * @total_time_nsec: The total time the job took in nano seconds. Includes ideling.
+ */
+struct b2r2_blt_profiling_info {
+ s32 nsec_active_in_cpu;
+ s32 nsec_active_in_b2r2;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_profiler - B2R2 profiler.
+ *
+ * The callbacks are never run concurrently. No heavy stuff must be done in the
+ * callbacks as this might adversely affect the B2R2 driver. The callbacks must
+ * not call the B2R2 profiler API as this will cause a deadlock. If the callbacks
+ * call into the B2R2 driver care must be taken as deadlock situations can arise.
+ *
+ * @blt_done: Called when a blit has finished, timed out or been canceled.
+ */
+struct b2r2_profiler {
+ void (*blt_done)(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+};
+
+/**
+ * b2r2_register_profiler() - Registers a profiler.
+ *
+ * Currently only one profiler can be registered at any given time.
+ *
+ * @profiler: The profiler
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler);
+
+/**
+ * b2r2_unregister_profiler() - Unregisters a profiler.
+ *
+ * @profiler: The profiler
+ */
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler);
+
+#endif /* #ifdef _LINUX_VIDEO_B2R2_PROFILER_API_H */
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.c b/drivers/video/b2r2/b2r2_profiler_socket.c
new file mode 100644
index 00000000000..ffa7f2870c8
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <asm/errno.h>
+
+#include "b2r2_profiler_api.h"
+#include "b2r2_internal.h"
+
+
+/*
+ * TODO: Call the profiler in a seperate thread and have a circular buffer
+ * between the B2R2 driver and that thread. That way the profiler can not slow
+ * down or kill the B2R2 driver. Seems a bit overkill right now as there is
+ * only one B2R2 profiler and we have full control over it but the situation
+ * may be different in the future.
+ */
+
+
+static const struct b2r2_profiler *b2r2_profiler;
+static DEFINE_SEMAPHORE(b2r2_profiler_lock);
+
+
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler)
+{
+ int return_value;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0)
+ return return_value;
+
+ if (b2r2_profiler != NULL) {
+ return_value = -EUSERS;
+
+ goto cleanup;
+ }
+
+ b2r2_profiler = profiler;
+
+ return_value = 0;
+
+cleanup:
+ up(&b2r2_profiler_lock);
+
+ return return_value;
+}
+EXPORT_SYMBOL(b2r2_register_profiler);
+
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler)
+{
+ down(&b2r2_profiler_lock);
+
+ if (profiler == b2r2_profiler)
+ b2r2_profiler = NULL;
+
+ up(&b2r2_profiler_lock);
+}
+EXPORT_SYMBOL(b2r2_unregister_profiler);
+
+
+bool is_profiler_registered_approx(void)
+{
+ /* No locking by design, to make it fast, hence the approx */
+ if (b2r2_profiler != NULL)
+ return true;
+ else
+ return false;
+}
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request)
+{
+ int return_value;
+ struct b2r2_blt_profiling_info blt_profiling_info;
+ struct b2r2_control *cont = request->instance->control;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0) {
+ dev_err(cont->dev,
+ "%s: Failed to acquire semaphore, ret=%i. "
+ "Lost profiler call!\n", __func__, return_value);
+
+ return;
+ }
+
+ if (NULL == b2r2_profiler)
+ goto cleanup;
+
+ blt_profiling_info.nsec_active_in_cpu = request->nsec_active_in_cpu;
+ blt_profiling_info.nsec_active_in_b2r2 = request->job.nsec_active_in_hw;
+ blt_profiling_info.total_time_nsec = request->total_time_nsec;
+
+ b2r2_profiler->blt_done(&request->user_req, request->request_id, &blt_profiling_info);
+
+cleanup:
+ up(&b2r2_profiler_lock);
+}
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.h b/drivers/video/b2r2/b2r2_profiler_socket.h
new file mode 100644
index 00000000000..80b2c20293f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+#define _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+
+#include "b2r2_internal.h"
+
+/* Will give a correct result most of the time but can be wrong */
+bool is_profiler_registered_approx(void);
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request);
+
+#endif /* _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H */
diff --git a/drivers/video/b2r2/b2r2_structures.h b/drivers/video/b2r2/b2r2_structures.h
new file mode 100644
index 00000000000..99fa7f047d3
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_structures.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 register struct
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef __B2R2_STRUCTURES_H
+#define __B2R2_STRUCTURES_H
+
+/* C struct view */
+struct b2r2_memory_map {
+ unsigned char fill0[2304];
+ unsigned int BLT_SSBA17; /* @2304 */
+ unsigned int BLT_SSBA18; /* @2308 */
+ unsigned int BLT_SSBA19; /* @2312 */
+ unsigned int BLT_SSBA20; /* @2316 */
+ unsigned int BLT_SSBA21; /* @2320 */
+ unsigned int BLT_SSBA22; /* @2324 */
+ unsigned int BLT_SSBA23; /* @2328 */
+ unsigned int BLT_SSBA24; /* @2332 */
+ unsigned char fill1[32];
+ unsigned int BLT_STBA5; /* @2368 */
+ unsigned int BLT_STBA6; /* @2372 */
+ unsigned int BLT_STBA7; /* @2376 */
+ unsigned int BLT_STBA8; /* @2380 */
+ unsigned char fill2[176];
+ unsigned int BLT_CTL; /* @2560 */
+ unsigned int BLT_ITS; /* @2564 */
+ unsigned int BLT_STA1; /* @2568 */
+ unsigned char fill3[4];
+ unsigned int BLT_SSBA1; /* @2576 */
+ unsigned int BLT_SSBA2; /* @2580 */
+ unsigned int BLT_SSBA3; /* @2584 */
+ unsigned int BLT_SSBA4; /* @2588 */
+ unsigned int BLT_SSBA5; /* @2592 */
+ unsigned int BLT_SSBA6; /* @2596 */
+ unsigned int BLT_SSBA7; /* @2600 */
+ unsigned int BLT_SSBA8; /* @2604 */
+ unsigned int BLT_STBA1; /* @2608 */
+ unsigned int BLT_STBA2; /* @2612 */
+ unsigned int BLT_STBA3; /* @2616 */
+ unsigned int BLT_STBA4; /* @2620 */
+ unsigned int BLT_CQ1_TRIG_IP; /* @2624 */
+ unsigned int BLT_CQ1_TRIG_CTL; /* @2628 */
+ unsigned int BLT_CQ1_PACE_CTL; /* @2632 */
+ unsigned int BLT_CQ1_IP; /* @2636 */
+ unsigned int BLT_CQ2_TRIG_IP; /* @2640 */
+ unsigned int BLT_CQ2_TRIG_CTL; /* @2644 */
+ unsigned int BLT_CQ2_PACE_CTL; /* @2648 */
+ unsigned int BLT_CQ2_IP; /* @2652 */
+ unsigned int BLT_AQ1_CTL; /* @2656 */
+ unsigned int BLT_AQ1_IP; /* @2660 */
+ unsigned int BLT_AQ1_LNA; /* @2664 */
+ unsigned int BLT_AQ1_STA; /* @2668 */
+ unsigned int BLT_AQ2_CTL; /* @2672 */
+ unsigned int BLT_AQ2_IP; /* @2676 */
+ unsigned int BLT_AQ2_LNA; /* @2680 */
+ unsigned int BLT_AQ2_STA; /* @2684 */
+ unsigned int BLT_AQ3_CTL; /* @2688 */
+ unsigned int BLT_AQ3_IP; /* @2692 */
+ unsigned int BLT_AQ3_LNA; /* @2696 */
+ unsigned int BLT_AQ3_STA; /* @2700 */
+ unsigned int BLT_AQ4_CTL; /* @2704 */
+ unsigned int BLT_AQ4_IP; /* @2708 */
+ unsigned int BLT_AQ4_LNA; /* @2712 */
+ unsigned int BLT_AQ4_STA; /* @2716 */
+ unsigned int BLT_SSBA9; /* @2720 */
+ unsigned int BLT_SSBA10; /* @2724 */
+ unsigned int BLT_SSBA11; /* @2728 */
+ unsigned int BLT_SSBA12; /* @2732 */
+ unsigned int BLT_SSBA13; /* @2736 */
+ unsigned int BLT_SSBA14; /* @2740 */
+ unsigned int BLT_SSBA15; /* @2744 */
+ unsigned int BLT_SSBA16; /* @2748 */
+ unsigned int BLT_SGA1; /* @2752 */
+ unsigned int BLT_SGA2; /* @2756 */
+ unsigned char fill4[8];
+ unsigned int BLT_ITM0; /* @2768 */
+ unsigned int BLT_ITM1; /* @2772 */
+ unsigned int BLT_ITM2; /* @2776 */
+ unsigned int BLT_ITM3; /* @2780 */
+ unsigned char fill5[16];
+ unsigned int BLT_DFV2; /* @2800 */
+ unsigned int BLT_DFV1; /* @2804 */
+ unsigned int BLT_PRI; /* @2808 */
+ unsigned char fill6[8];
+ unsigned int PLUGS1_OP2; /* @2820 */
+ unsigned int PLUGS1_CHZ; /* @2824 */
+ unsigned int PLUGS1_MSZ; /* @2828 */
+ unsigned int PLUGS1_PGZ; /* @2832 */
+ unsigned char fill7[16];
+ unsigned int PLUGS2_OP2; /* @2852 */
+ unsigned int PLUGS2_CHZ; /* @2856 */
+ unsigned int PLUGS2_MSZ; /* @2860 */
+ unsigned int PLUGS2_PGZ; /* @2864 */
+ unsigned char fill8[16];
+ unsigned int PLUGS3_OP2; /* @2884 */
+ unsigned int PLUGS3_CHZ; /* @2888 */
+ unsigned int PLUGS3_MSZ; /* @2892 */
+ unsigned int PLUGS3_PGZ; /* @2896 */
+ unsigned char fill9[48];
+ unsigned int PLUGT_OP2; /* @2948 */
+ unsigned int PLUGT_CHZ; /* @2952 */
+ unsigned int PLUGT_MSZ; /* @2956 */
+ unsigned int PLUGT_PGZ; /* @2960 */
+ unsigned char fill10[108];
+ unsigned int BLT_NIP; /* @3072 */
+ unsigned int BLT_CIC; /* @3076 */
+ unsigned int BLT_INS; /* @3080 */
+ unsigned int BLT_ACK; /* @3084 */
+ unsigned int BLT_TBA; /* @3088 */
+ unsigned int BLT_TTY; /* @3092 */
+ unsigned int BLT_TXY; /* @3096 */
+ unsigned int BLT_TSZ; /* @3100 */
+ unsigned int BLT_S1CF; /* @3104 */
+ unsigned int BLT_S2CF; /* @3108 */
+ unsigned int BLT_S1BA; /* @3112 */
+ unsigned int BLT_S1TY; /* @3116 */
+ unsigned int BLT_S1XY; /* @3120 */
+ unsigned char fill11[4];
+ unsigned int BLT_S2BA; /* @3128 */
+ unsigned int BLT_S2TY; /* @3132 */
+ unsigned int BLT_S2XY; /* @3136 */
+ unsigned int BLT_S2SZ; /* @3140 */
+ unsigned int BLT_S3BA; /* @3144 */
+ unsigned int BLT_S3TY; /* @3148 */
+ unsigned int BLT_S3XY; /* @3152 */
+ unsigned int BLT_S3SZ; /* @3156 */
+ unsigned int BLT_CWO; /* @3160 */
+ unsigned int BLT_CWS; /* @3164 */
+ unsigned int BLT_CCO; /* @3168 */
+ unsigned int BLT_CML; /* @3172 */
+ unsigned int BLT_FCTL; /* @3176 */
+ unsigned int BLT_PMK; /* @3180 */
+ unsigned int BLT_RSF; /* @3184 */
+ unsigned int BLT_RZI; /* @3188 */
+ unsigned int BLT_HFP; /* @3192 */
+ unsigned int BLT_VFP; /* @3196 */
+ unsigned int BLT_Y_RSF; /* @3200 */
+ unsigned int BLT_Y_RZI; /* @3204 */
+ unsigned int BLT_Y_HFP; /* @3208 */
+ unsigned int BLT_Y_VFP; /* @3212 */
+ unsigned char fill12[16];
+ unsigned int BLT_KEY1; /* @3232 */
+ unsigned int BLT_KEY2; /* @3236 */
+ unsigned char fill13[8];
+ unsigned int BLT_SAR; /* @3248 */
+ unsigned int BLT_USR; /* @3252 */
+ unsigned char fill14[8];
+ unsigned int BLT_IVMX0; /* @3264 */
+ unsigned int BLT_IVMX1; /* @3268 */
+ unsigned int BLT_IVMX2; /* @3272 */
+ unsigned int BLT_IVMX3; /* @3276 */
+ unsigned int BLT_OVMX0; /* @3280 */
+ unsigned int BLT_OVMX1; /* @3284 */
+ unsigned int BLT_OVMX2; /* @3288 */
+ unsigned int BLT_OVMX3; /* @3292 */
+ unsigned char fill15[8];
+ unsigned int BLT_VC1R; /* @3304 */
+ unsigned char fill16[20];
+ unsigned int BLT_Y_HFC0; /* @3328 */
+ unsigned int BLT_Y_HFC1; /* @3332 */
+ unsigned int BLT_Y_HFC2; /* @3336 */
+ unsigned int BLT_Y_HFC3; /* @3340 */
+ unsigned int BLT_Y_HFC4; /* @3344 */
+ unsigned int BLT_Y_HFC5; /* @3348 */
+ unsigned int BLT_Y_HFC6; /* @3352 */
+ unsigned int BLT_Y_HFC7; /* @3356 */
+ unsigned int BLT_Y_HFC8; /* @3360 */
+ unsigned int BLT_Y_HFC9; /* @3364 */
+ unsigned int BLT_Y_HFC10; /* @3368 */
+ unsigned int BLT_Y_HFC11; /* @3372 */
+ unsigned int BLT_Y_HFC12; /* @3376 */
+ unsigned int BLT_Y_HFC13; /* @3380 */
+ unsigned int BLT_Y_HFC14; /* @3384 */
+ unsigned int BLT_Y_HFC15; /* @3388 */
+ unsigned char fill17[80];
+ unsigned int BLT_Y_VFC0; /* @3472 */
+ unsigned int BLT_Y_VFC1; /* @3476 */
+ unsigned int BLT_Y_VFC2; /* @3480 */
+ unsigned int BLT_Y_VFC3; /* @3484 */
+ unsigned int BLT_Y_VFC4; /* @3488 */
+ unsigned int BLT_Y_VFC5; /* @3492 */
+ unsigned int BLT_Y_VFC6; /* @3496 */
+ unsigned int BLT_Y_VFC7; /* @3500 */
+ unsigned int BLT_Y_VFC8; /* @3504 */
+ unsigned int BLT_Y_VFC9; /* @3508 */
+ unsigned char fill18[72];
+ unsigned int BLT_HFC0; /* @3584 */
+ unsigned int BLT_HFC1; /* @3588 */
+ unsigned int BLT_HFC2; /* @3592 */
+ unsigned int BLT_HFC3; /* @3596 */
+ unsigned int BLT_HFC4; /* @3600 */
+ unsigned int BLT_HFC5; /* @3604 */
+ unsigned int BLT_HFC6; /* @3608 */
+ unsigned int BLT_HFC7; /* @3612 */
+ unsigned int BLT_HFC8; /* @3616 */
+ unsigned int BLT_HFC9; /* @3620 */
+ unsigned int BLT_HFC10; /* @3624 */
+ unsigned int BLT_HFC11; /* @3628 */
+ unsigned int BLT_HFC12; /* @3632 */
+ unsigned int BLT_HFC13; /* @3636 */
+ unsigned int BLT_HFC14; /* @3640 */
+ unsigned int BLT_HFC15; /* @3644 */
+ unsigned char fill19[80];
+ unsigned int BLT_VFC0; /* @3728 */
+ unsigned int BLT_VFC1; /* @3732 */
+ unsigned int BLT_VFC2; /* @3736 */
+ unsigned int BLT_VFC3; /* @3740 */
+ unsigned int BLT_VFC4; /* @3744 */
+ unsigned int BLT_VFC5; /* @3748 */
+ unsigned int BLT_VFC6; /* @3752 */
+ unsigned int BLT_VFC7; /* @3756 */
+ unsigned int BLT_VFC8; /* @3760 */
+ unsigned int BLT_VFC9; /* @3764 */
+};
+
+#endif /* !defined(__B2R2_STRUCTURES_H) */
+
diff --git a/drivers/video/b2r2/b2r2_timing.c b/drivers/video/b2r2/b2r2_timing.c
new file mode 100644
index 00000000000..4f3e2b8b042
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/time.h>
+
+
+u32 b2r2_get_curr_nsec(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+
+ return (u32)timespec_to_ns(&ts);
+}
diff --git a/drivers/video/b2r2/b2r2_timing.h b/drivers/video/b2r2/b2r2_timing.h
new file mode 100644
index 00000000000..e87113c0ec9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+
+/**
+ * b2r2_get_curr_nsec() - Return the current nanosecond. Notice that the value
+ * wraps when the u32 limit is reached.
+ *
+ */
+u32 b2r2_get_curr_nsec(void);
+
+#endif /* _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ */
diff --git a/drivers/video/b2r2/b2r2_utils.c b/drivers/video/b2r2/b2r2_utils.c
new file mode 100644
index 00000000000..3df7a272211
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.c
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_utils.h"
+#include "b2r2_debug.h"
+#include "b2r2_internal.h"
+
+const s32 b2r2_s32_max = 2147483647;
+
+
+/**
+ * calculate_scale_factor() - calculates the scale factor between the given
+ * values
+ */
+int calculate_scale_factor(struct b2r2_control *cont,
+ u32 from, u32 to, u16 *sf_out)
+{
+ int ret;
+ u32 sf;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (to == from) {
+ *sf_out = 1 << 10;
+ return 0;
+ } else if (to == 0) {
+ b2r2_log_err(cont->dev, "%s: To is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ sf = (from << 10) / to;
+
+ if ((sf & 0xffff0000) != 0) {
+ /* Overflow error */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Scale factor too large\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ } else if (sf == 0) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Scale factor too small\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ *sf_out = (u16)sf;
+
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect)
+{
+ bounding_rect->x = 0;
+ bounding_rect->y = 0;
+ bounding_rect->width = img->width;
+ bounding_rect->height = img->height;
+}
+
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect)
+{
+ return rect->width == 0 || rect->height == 0;
+}
+
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->x >= rect2->x &&
+ rect1->y >= rect2->y &&
+ rect1->x + rect1->width <= rect2->x + rect2->width &&
+ rect1->y + rect1->height <= rect2->y + rect2->height;
+}
+
+bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->width >= rect2->width &&
+ rect1->height >= rect2->height;
+}
+
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2, struct b2r2_blt_rect *intersection)
+{
+ struct b2r2_blt_rect tmp_rect;
+
+ tmp_rect.x = max(rect1->x, rect2->x);
+ tmp_rect.y = max(rect1->y, rect2->y);
+ tmp_rect.width = min(rect1->x + rect1->width, rect2->x + rect2->width)
+ - tmp_rect.x;
+ if (tmp_rect.width < 0)
+ tmp_rect.width = 0;
+ tmp_rect.height =
+ min(rect1->y + rect1->height, rect2->y + rect2->height) -
+ tmp_rect.y;
+ if (tmp_rect.height < 0)
+ tmp_rect.height = 0;
+
+ *intersection = tmp_rect;
+}
+
+/*
+ * Calculate new rectangles for the supplied
+ * request, so that clipping to destination imaage
+ * can be avoided.
+ * Essentially, the new destination rectangle is
+ * defined inside the old one. Given the transform
+ * and scaling, one has to calculate which part of
+ * the old source rectangle corresponds to
+ * to the new part of old destination rectangle.
+ */
+void b2r2_trim_rects(struct b2r2_control *cont,
+ const struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *new_bg_rect,
+ struct b2r2_blt_rect *new_dst_rect,
+ struct b2r2_blt_rect *new_src_rect)
+{
+ enum b2r2_blt_transform transform = req->transform;
+ struct b2r2_blt_rect *old_src_rect =
+ (struct b2r2_blt_rect *) &req->src_rect;
+ struct b2r2_blt_rect *old_dst_rect =
+ (struct b2r2_blt_rect *) &req->dst_rect;
+ struct b2r2_blt_rect *old_bg_rect =
+ (struct b2r2_blt_rect *) &req->bg_rect;
+ struct b2r2_blt_rect dst_img_bounds;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ s32 dx = 0;
+ s32 dy = 0;
+ s16 hsf;
+ s16 vsf;
+
+ b2r2_log_info(cont->dev,
+ "%s\nold_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ old_dst_rect->x, old_dst_rect->y,
+ old_dst_rect->width, old_dst_rect->height);
+ b2r2_log_info(cont->dev,
+ "%s\nold_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ old_src_rect->x, old_src_rect->y,
+ old_src_rect->width, old_src_rect->height);
+
+ b2r2_get_img_bounding_rect((struct b2r2_blt_img *) &req->dst_img,
+ &dst_img_bounds);
+
+ /* dst_rect inside dst_img, no clipping necessary */
+ if (b2r2_is_rect_inside_rect(old_dst_rect, &dst_img_bounds))
+ goto keep_rects;
+
+ b2r2_intersect_rects(old_dst_rect, &dst_img_bounds, new_dst_rect);
+ b2r2_log_info(cont->dev,
+ "%s\nnew_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ new_dst_rect->x, new_dst_rect->y,
+ new_dst_rect->width, new_dst_rect->height);
+
+ /* dst_rect completely outside, leave it to validation */
+ if (new_dst_rect->width == 0 || new_dst_rect->height == 0)
+ goto keep_rects;
+
+ dx = new_dst_rect->x - old_dst_rect->x;
+ dy = new_dst_rect->y - old_dst_rect->y;
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ int res = 0;
+ res = calculate_scale_factor(cont, old_src_rect->width,
+ old_dst_rect->height, &hsf);
+ /* invalid dimensions, leave them to validation */
+ if (res < 0)
+ goto keep_rects;
+
+ res = calculate_scale_factor(cont, old_src_rect->height,
+ old_dst_rect->width, &vsf);
+ if (res < 0)
+ goto keep_rects;
+
+ /*
+ * After applying the inverse transform
+ * for 90 degree rotation, the top-left corner
+ * becomes top-right.
+ * src_rect origin is defined as top-left,
+ * so a translation between dst and src
+ * coordinate spaces is necessary.
+ */
+ src_x = (old_src_rect->width << 10) -
+ hsf * (dy + new_dst_rect->height);
+ src_y = dx * vsf;
+ src_w = new_dst_rect->height * hsf;
+ src_h = new_dst_rect->width * vsf;
+ } else {
+ int res = 0;
+ res = calculate_scale_factor(cont, old_src_rect->width,
+ old_dst_rect->width, &hsf);
+ if (res < 0)
+ goto keep_rects;
+
+ res = calculate_scale_factor(cont, old_src_rect->height,
+ old_dst_rect->height, &vsf);
+ if (res < 0)
+ goto keep_rects;
+
+ src_x = dx * hsf;
+ src_y = dy * vsf;
+ src_w = new_dst_rect->width * hsf;
+ src_h = new_dst_rect->height * vsf;
+ }
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular destination rectangle.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination rectangle, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination rectangle maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination rectangle is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = old_src_rect->width - src_x - src_w;
+
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = old_src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src_rect coordinates into true
+ * src_buffer coordinates.
+ */
+ src_x += old_src_rect->x;
+ src_y += old_src_rect->y;
+
+ new_src_rect->x = src_x;
+ new_src_rect->y = src_y;
+ new_src_rect->width = src_w;
+ new_src_rect->height = src_h;
+
+ b2r2_log_info(cont->dev,
+ "%s\nnew_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ new_src_rect->x, new_src_rect->y,
+ new_src_rect->width, new_src_rect->height);
+
+ if (req->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* Modify bg_rect in the same way as dst_rect */
+ s32 dw = new_dst_rect->width - old_dst_rect->width;
+ s32 dh = new_dst_rect->height - old_dst_rect->height;
+ b2r2_log_info(cont->dev,
+ "%s\nold bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ __func__, old_bg_rect->x, old_bg_rect->y,
+ old_bg_rect->width, old_bg_rect->height);
+ new_bg_rect->x = old_bg_rect->x + dx;
+ new_bg_rect->y = old_bg_rect->y + dy;
+ new_bg_rect->width = old_bg_rect->width + dw;
+ new_bg_rect->height = old_bg_rect->height + dh;
+ b2r2_log_info(cont->dev,
+ "%s\nnew bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ __func__, new_bg_rect->x, new_bg_rect->y,
+ new_bg_rect->width, new_bg_rect->height);
+ }
+ return;
+keep_rects:
+ /*
+ * Recalculation was not possible, or not necessary.
+ * Do not change anything, leave it to validation.
+ */
+ *new_src_rect = *old_src_rect;
+ *new_dst_rect = *old_dst_rect;
+ *new_bg_rect = *old_bg_rect;
+ b2r2_log_info(cont->dev, "%s original rectangles preserved.\n", __func__);
+ return;
+}
+
+int b2r2_get_fmt_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt)
+{
+ /*
+ * Currently this function is not used that often but if that changes a
+ * lookup table could make it a lot faster.
+ */
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return 1;
+
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return 8;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return 12;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return 16;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return 24;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 32;
+
+ default:
+ b2r2_log_err(cont->dev,
+ "%s: Internal error! Format %#x not recognized.\n",
+ __func__, fmt);
+ return 32;
+ }
+}
+
+int b2r2_get_fmt_y_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 8;
+
+ default:
+ b2r2_log_err(cont->dev,
+ "%s: Internal error! Non YCbCr format supplied.\n",
+ __func__);
+ return 8;
+ }
+}
+
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+u32 b2r2_calc_pitch_from_width(struct b2r2_control *cont,
+ s32 width, enum b2r2_blt_fmt fmt)
+{
+ if (b2r2_is_single_plane_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_bpp(cont, fmt), 8);
+ } else if (b2r2_is_ycbcrsp_fmt(fmt) || b2r2_is_ycbcrp_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_y_bpp(cont, fmt), 8);
+ } else {
+ b2r2_log_err(cont->dev, "%s: Internal error! "
+ "Pitchless format supplied.\n",
+ __func__);
+ return 0;
+ }
+}
+
+u32 b2r2_get_img_pitch(struct b2r2_control *cont, struct b2r2_blt_img *img)
+{
+ if (img->pitch != 0)
+ return img->pitch;
+ else
+ return b2r2_calc_pitch_from_width(cont, img->width, img->fmt);
+}
+
+s32 b2r2_get_img_size(struct b2r2_control *cont, struct b2r2_blt_img *img)
+{
+ if (b2r2_is_single_plane_fmt(img->fmt)) {
+ return (s32)b2r2_get_img_pitch(cont, img) * img->height;
+ } else if (b2r2_is_ycbcrsp_fmt(img->fmt) ||
+ b2r2_is_ycbcrp_fmt(img->fmt)) {
+ s32 y_plane_size;
+
+ y_plane_size = (s32)b2r2_get_img_pitch(cont, img) * img->height;
+
+ if (b2r2_is_ycbcr420_fmt(img->fmt)) {
+ return y_plane_size + y_plane_size / 2;
+ } else if (b2r2_is_ycbcr422_fmt(img->fmt)) {
+ return y_plane_size * 2;
+ } else if (b2r2_is_ycbcr444_fmt(img->fmt)) {
+ return y_plane_size * 3;
+ } else {
+ b2r2_log_err(cont->dev, "%s: Internal error!"
+ " Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+ } else if (b2r2_is_mb_fmt(img->fmt)) {
+ return (img->width * img->height *
+ b2r2_get_fmt_bpp(cont, img->fmt)) / 8;
+ } else {
+ b2r2_log_err(cont->dev, "%s: Internal error! "
+ "Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+}
+
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor)
+{
+ s32 quotient = dividend / divisor;
+ if (dividend % divisor != 0)
+ quotient++;
+
+ return quotient;
+}
+
+bool b2r2_is_aligned(s32 value, s32 alignment)
+{
+ return value % alignment == 0;
+}
+
+s32 b2r2_align_up(s32 value, s32 alignment)
+{
+ s32 remainder = abs(value) % abs(alignment);
+ s32 value_to_add;
+
+ if (remainder > 0) {
+ if (value >= 0)
+ value_to_add = alignment - remainder;
+ else
+ value_to_add = remainder;
+ } else {
+ value_to_add = 0;
+ }
+
+ return value + value_to_add;
+}
diff --git a/drivers/video/b2r2/b2r2_utils.h b/drivers/video/b2r2/b2r2_utils.h
new file mode 100644
index 00000000000..0516447b42f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+extern const s32 b2r2_s32_max;
+
+int calculate_scale_factor(struct b2r2_control *cont,
+ u32 from, u32 to, u16 *sf_out);
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect);
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect);
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2,
+ struct b2r2_blt_rect *intersection);
+void b2r2_trim_rects(struct b2r2_control *cont,
+ const struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *new_bg_rect,
+ struct b2r2_blt_rect *new_dst_rect,
+ struct b2r2_blt_rect *new_src_rect);
+
+int b2r2_get_fmt_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt);
+int b2r2_get_fmt_y_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt);
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt);
+
+/*
+ * Rounds up if an invalid width causes the pitch to be non byte aligned.
+ */
+u32 b2r2_calc_pitch_from_width(struct b2r2_control *cont,
+ s32 width, enum b2r2_blt_fmt fmt);
+u32 b2r2_get_img_pitch(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+s32 b2r2_get_img_size(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor);
+bool b2r2_is_aligned(s32 value, s32 alignment);
+s32 b2r2_align_up(s32 value, s32 alignment);
+
+#endif
diff --git a/drivers/video/mcde/Kconfig b/drivers/video/mcde/Kconfig
new file mode 100644
index 00000000000..cb88a66d370
--- /dev/null
+++ b/drivers/video/mcde/Kconfig
@@ -0,0 +1,96 @@
+config FB_MCDE
+ tristate "MCDE support"
+ depends on FB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select HWMEM
+ ---help---
+ This enables support for MCDE based frame buffer driver.
+
+ Please read the file <file:Documentation/fb/mcde.txt>
+
+config FB_MCDE_DEBUG
+ bool "MCDE debug messages"
+ depends on FB_MCDE
+ ---help---
+ Say Y here if you want the MCDE driver to output debug messages
+
+config FB_MCDE_VDEBUG
+ bool "MCDE verbose debug messages"
+ depends on FB_MCDE_DEBUG
+ ---help---
+ Say Y here if you want the MCDE driver to output more debug messages
+
+config MCDE_FB_AVOID_REALLOC
+ bool "MCDE early allocate framebuffer"
+ default n
+ depends on FB_MCDE
+ ---help---
+ If you say Y here maximum frame buffer size is allocated and
+ used for all resolutions. If you say N here, the frame buffer is
+ reallocated when resolution is changed. This reallocation might
+ fail because of fragmented memory. Note that this memory will
+ never be deallocated, while the MCDE framebuffer is used.
+
+config MCDE_DISPLAY_DSI
+ bool "Support for DSI displays within MCDE"
+ depends on FB_MCDE
+ default y
+
+menu "MCDE DSI displays"
+ depends on MCDE_DISPLAY_DSI
+
+config MCDE_DISPLAY_GENERIC_DSI
+ tristate "Generic DSI display driver"
+
+config MCDE_DISPLAY_SAMSUNG_S6D16D0
+ bool "Samsung S6D16D0 DSI display driver"
+ ---help---
+ Say Y if you have a TPO Taal or Blackpearl display panel.
+
+config MCDE_DISPLAY_SONY_ACX424AKP_DSI
+ tristate "Sony acx424akp DSI display driver"
+
+config MCDE_DISPLAY_AV8100
+ tristate "AV8100 HDMI/CVBS display driver"
+ select AV8100
+
+config MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ bool "HDMI_FB_AUTO_CREATE"
+ default y
+ depends on MCDE_DISPLAY_AV8100
+ ---help---
+ Say Y if you want the HDMI frame buffer to be created on start
+ Say N if you want the HDMI frame buffer to be created when HDMI
+ cable is plugged (needs user space HDMIservice)
+
+endmenu
+
+config MCDE_DISPLAY_DPI
+ bool "Support for DPI displays within MCDE"
+ depends on FB_MCDE
+ default n
+ ---help---
+ Add this option to choose which DPI display driver for MCDE to include
+
+ DPI (Display Pixel Interface) is a MIPI Alliance standard used for
+ active-matrix LCDs. The DPI uses parallel data lines.
+
+menu "MCDE DPI displays"
+ depends on MCDE_DISPLAY_DPI
+
+config MCDE_DISPLAY_VUIB500_DPI
+ tristate "DPI display driver for the VUIB500 board"
+ ---help---
+ The VUIB500 is an ST-Ericsson user interface board.
+
+endmenu
+
+config MCDE_DISPLAY_AB8500_DENC
+ tristate "AB8500 CVBS display driver"
+ depends on FB_MCDE
+ select AB8500_DENC
+
+
diff --git a/drivers/video/mcde/Makefile b/drivers/video/mcde/Makefile
new file mode 100644
index 00000000000..82a78c2542a
--- /dev/null
+++ b/drivers/video/mcde/Makefile
@@ -0,0 +1,23 @@
+mcde-objs += mcde_mod.o
+mcde-objs += mcde_hw.o
+mcde-objs += mcde_dss.o
+mcde-objs += mcde_display.o
+mcde-objs += mcde_bus.o
+mcde-objs += mcde_fb.o
+mcde-objs += mcde_debugfs.o
+obj-$(CONFIG_FB_MCDE) += mcde.o
+
+obj-$(CONFIG_MCDE_DISPLAY_GENERIC_DSI) += display-generic_dsi.o
+obj-$(CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0) += display-samsung_s6d16d0.o
+obj-$(CONFIG_MCDE_DISPLAY_SONY_ACX424AKP_DSI) += display-sony_acx424akp_dsi.o
+obj-$(CONFIG_MCDE_DISPLAY_VUIB500_DPI) += display-vuib500-dpi.o
+obj-$(CONFIG_MCDE_DISPLAY_AB8500_DENC) += display-ab8500.o
+obj-$(CONFIG_MCDE_DISPLAY_AV8100) += display-av8100.o
+obj-$(CONFIG_DISPLAY_FICTIVE) += display-fictive.o
+
+ifdef CONFIG_FB_MCDE_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+ifdef CONFIG_FB_MCDE_VDEBUG
+EXTRA_CFLAGS += -DVERBOSE_DEBUG
+endif
diff --git a/drivers/video/mcde/display-ab8500.c b/drivers/video/mcde/display-ab8500.c
new file mode 100644
index 00000000000..a761a5eec80
--- /dev/null
+++ b/drivers/video/mcde/display-ab8500.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 display driver
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/ab8500/denc.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-ab8500.h>
+
+#define AB8500_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__)
+
+#define SDTV_PIXCLOCK 37037
+
+/*
+ * PAL:
+ * Total nr of active lines: 576
+ * Total nr of blanking lines: 49
+ * total: 625
+ */
+#define PAL_HBP 132
+#define PAL_HFP 12
+#define PAL_VBP_FIELD_1 22
+#define PAL_VBP_FIELD_2 23
+#define PAL_VFP_FIELD_1 2
+#define PAL_VFP_FIELD_2 2
+
+/*
+ * NTSC (ITU-R BT.470-5):
+ * Total nr of active lines: 486
+ * Total nr of blanking lines: 39
+ * total: 525
+ */
+#define NTSC_ORG_HBP 122
+#define NTSC_ORG_HFP 16
+#define NTSC_ORG_VBP_FIELD_1 16
+#define NTSC_ORG_VBP_FIELD_2 17
+#define NTSC_ORG_VFP_FIELD_1 3
+#define NTSC_ORG_VFP_FIELD_2 3
+
+/*
+ * NTSC (DV variant):
+ * Total nr of active lines: 480
+ * Total nr of blanking lines: 45
+ * total: 525
+ */
+#define NTSC_HBP 122
+#define NTSC_HFP 16
+#define NTSC_VBP_FIELD_1 19
+#define NTSC_VBP_FIELD_2 20
+#define NTSC_VFP_FIELD_1 3
+#define NTSC_VFP_FIELD_2 3
+
+struct display_driver_data {
+ struct ab8500_denc_conf denc_conf;
+ struct platform_device *denc_dev;
+ int nr_regulators;
+ struct regulator **regulator;
+};
+
+static int try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode);
+static int on_first_update(struct mcde_display_device *ddev);
+static int display_update(struct mcde_display_device *ddev,
+ bool tripple_buffer);
+
+static int __devinit ab8500_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ int i;
+ struct ab8500_display_platform_data *pdata = ddev->dev.platform_data;
+ struct display_driver_data *driver_data;
+
+ AB8500_DISP_TRACE;
+
+ if (pdata == NULL) {
+ dev_err(&ddev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+ if (ddev->port->type != MCDE_PORTTYPE_DPI) {
+ dev_err(&ddev->dev, "%s:Invalid port type %d\n", __func__,
+ ddev->port->type);
+ return -EINVAL;
+ }
+
+ driver_data = (struct display_driver_data *)
+ kzalloc(sizeof(struct display_driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(&ddev->dev, "Failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+ driver_data->denc_dev = ab8500_denc_get_device();
+ if (!driver_data->denc_dev) {
+ dev_err(&ddev->dev, "Failed to get DENC device\n");
+ ret = -ENODEV;
+ goto dev_get_failed;
+ }
+
+ driver_data->regulator = kzalloc(pdata->nr_regulators *
+ sizeof(struct regulator *), GFP_KERNEL);
+ if (!driver_data->regulator) {
+ dev_err(&ddev->dev, "Failed to allocate regulator list\n");
+ ret = -ENOMEM;
+ goto reg_alloc_failed;
+ }
+ for (i = 0; i < pdata->nr_regulators; i++) {
+ driver_data->regulator[i] = regulator_get(&ddev->dev,
+ pdata->regulator_id[i]);
+ if (IS_ERR(driver_data->regulator[i])) {
+ ret = PTR_ERR(driver_data->regulator[i]);
+ dev_warn(&ddev->dev, "%s:Failed to get regulator %s\n",
+ __func__, pdata->regulator_id[i]);
+ goto regulator_get_failed;
+ }
+ }
+ driver_data->nr_regulators = pdata->nr_regulators;
+
+ dev_set_drvdata(&ddev->dev, driver_data);
+
+ ddev->try_video_mode = try_video_mode;
+ ddev->set_video_mode = set_video_mode;
+ ddev->set_power_mode = set_power_mode;
+ ddev->on_first_update = on_first_update;
+ ddev->update = display_update;
+
+ return 0;
+
+regulator_get_failed:
+ for (i--; i >= 0; i--)
+ regulator_put(driver_data->regulator[i]);
+ kfree(driver_data->regulator);
+ driver_data->regulator = NULL;
+reg_alloc_failed:
+ ab8500_denc_put_device(driver_data->denc_dev);
+dev_get_failed:
+ kfree(driver_data);
+ return ret;
+}
+
+static int __devexit ab8500_remove(struct mcde_display_device *ddev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ if (driver_data->regulator) {
+ int i;
+ for (i = driver_data->nr_regulators - 1; i >= 0; i--)
+ regulator_put(driver_data->regulator[i]);
+ kfree(driver_data->regulator);
+ driver_data->regulator = NULL;
+ driver_data->nr_regulators = 0;
+ }
+ ab8500_denc_put_device(driver_data->denc_dev);
+ kfree(driver_data);
+ return 0;
+}
+
+static int ab8500_resume(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ AB8500_DISP_TRACE;
+
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s: Failed to resume display\n",
+ __func__);
+
+ return ret;
+}
+
+static int ab8500_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret = 0;
+ AB8500_DISP_TRACE;
+
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s: Failed to suspend display\n",
+ __func__);
+
+ return ret;
+}
+
+
+static struct mcde_display_driver ab8500_driver = {
+ .probe = ab8500_probe,
+ .remove = ab8500_remove,
+ .suspend = ab8500_suspend,
+ .resume = ab8500_resume,
+ .driver = {
+ .name = "mcde_tv_ab8500",
+ },
+};
+
+static void print_vmode(struct mcde_video_mode *vmode)
+{
+ pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres);
+ pr_debug(" pixclock: %d\n", vmode->pixclock);
+ pr_debug(" hbp: %d\n", vmode->hbp);
+ pr_debug(" hfp: %d\n", vmode->hfp);
+ pr_debug(" vbp: %d\n", vmode->vbp);
+ pr_debug(" vfp: %d\n", vmode->vfp);
+ pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false");
+}
+
+static int try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ AB8500_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (video_mode->xres != 720) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ /* TODO: move this part to MCDE: mcde_dss_try_video_mode? */
+ /* check for PAL */
+ switch (video_mode->yres) {
+ case 576:
+ /* set including SAV/EAV: */
+ video_mode->hbp = PAL_HBP;
+ video_mode->hfp = PAL_HFP;
+ video_mode->vbp = PAL_VBP_FIELD_1 + PAL_VBP_FIELD_2;
+ video_mode->vfp = PAL_VFP_FIELD_1 + PAL_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ case 480:
+ /* set including SAV/EAV */
+ video_mode->hbp = NTSC_HBP;
+ video_mode->hfp = NTSC_HFP;
+ video_mode->vbp = NTSC_VBP_FIELD_1 + NTSC_VBP_FIELD_2;
+ video_mode->vfp = NTSC_VFP_FIELD_1 + NTSC_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ case 486:
+ /* set including SAV/EAV */
+ video_mode->hbp = NTSC_ORG_HBP;
+ video_mode->hfp = NTSC_ORG_HFP;
+ video_mode->vbp = NTSC_ORG_VBP_FIELD_1 + NTSC_ORG_VBP_FIELD_2;
+ video_mode->vfp = NTSC_ORG_VFP_FIELD_1 + NTSC_ORG_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ default:
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ print_vmode(video_mode);
+
+ return 0;
+
+}
+
+static int set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res;
+ struct ab8500_display_platform_data *pdata = ddev->dev.platform_data;
+ struct display_driver_data *driver_data =
+ (struct display_driver_data *)dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ ddev->video_mode = *video_mode;
+
+ if (video_mode->xres != 720) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ /* check for PAL BDGHI and N */
+ switch (video_mode->yres) {
+ case 576:
+ driver_data->denc_conf.TV_std = TV_STD_PAL_BDGHI;
+ /* TODO: how to choose LOW DEF FILTER */
+ driver_data->denc_conf.cr_filter = TV_CR_PAL_HIGH_DEF_FILTER;
+ /* TODO: PAL N (e.g. uses a setup of 7.5 IRE) */
+ driver_data->denc_conf.black_level_setup = false;
+ break;
+ case 480: /* NTSC, PAL M DV variant */
+ case 486: /* NTSC, PAL M original */
+ /* TODO: PAL M */
+ driver_data->denc_conf.TV_std = TV_STD_NTSC_M;
+ /* TODO: how to choose LOW DEF FILTER */
+ driver_data->denc_conf.cr_filter = TV_CR_NTSC_HIGH_DEF_FILTER;
+ driver_data->denc_conf.black_level_setup = true;
+ break;
+ default:
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+
+ driver_data->denc_conf.progressive = !video_mode->interlaced;
+ driver_data->denc_conf.act_output = true;
+ driver_data->denc_conf.test_pattern = false;
+ driver_data->denc_conf.partial_blanking = true;
+ driver_data->denc_conf.blank_all = false;
+ driver_data->denc_conf.suppress_col = false;
+ driver_data->denc_conf.phase_reset_mode = TV_PHASE_RST_MOD_DISABLE;
+ driver_data->denc_conf.dac_enable = false;
+ driver_data->denc_conf.act_dc_output = true;
+
+ set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (pdata->rgb_2_yCbCr_transform)
+ mcde_chnl_set_col_convert(ddev->chnl_state,
+ pdata->rgb_2_yCbCr_transform,
+ MCDE_CONVERT_RGB_2_YCBCR);
+ mcde_chnl_stop_flow(ddev->chnl_state);
+ res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode);
+ if (res < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n",
+ __func__);
+
+ return res;
+ }
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+
+ return 0;
+}
+
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ int i;
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ dev_dbg(&ddev->dev, "off -> standby\n");
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ goto error;
+ }
+ if (driver_data->regulator) {
+ for (i = 0; i < driver_data->nr_regulators; i++) {
+ ret = regulator_enable(
+ driver_data->regulator[i]);
+ if (ret)
+ goto off_to_standby_failed;
+ dev_dbg(&ddev->dev, "regulator %d on\n", i);
+ }
+ }
+ ab8500_denc_power_up(driver_data->denc_dev);
+ ab8500_denc_reset(driver_data->denc_dev, true);
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+ dev_dbg(&ddev->dev, "standby -> on\n");
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ dev_dbg(&ddev->dev, "on -> standby\n");
+ ab8500_denc_reset(driver_data->denc_dev, false);
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ bool error = false;
+ dev_dbg(&ddev->dev, "standby -> off\n");
+ if (driver_data->regulator) {
+ for (i = 0; i < driver_data->nr_regulators; i++) {
+ ret = regulator_disable(
+ driver_data->regulator[i]);
+ /* continue in case of an error */
+ error |= (ret != 0);
+ dev_dbg(&ddev->dev, "regulator %d off\n", i);
+ }
+ }
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ error |= (ret != 0);
+ }
+ if (error) {
+ /* the latest error code is returned */
+ goto error;
+ }
+ memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode));
+ ab8500_denc_power_down(driver_data->denc_dev);
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return 0;
+
+ /* In case of an error, try to leave in off-state */
+off_to_standby_failed:
+ for (i--; i >= 0; i--)
+ regulator_disable(driver_data->regulator[i]);
+ ddev->platform_disable(ddev);
+
+error:
+ dev_err(&ddev->dev, "Failed to set power mode");
+ return ret;
+}
+
+static int on_first_update(struct mcde_display_device *ddev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ ab8500_denc_conf(driver_data->denc_dev, &driver_data->denc_conf);
+ ab8500_denc_conf_plug_detect(driver_data->denc_dev, true, false,
+ TV_PLUG_TIME_2S);
+ ab8500_denc_mask_int_plug_det(driver_data->denc_dev, false, false);
+ ddev->first_update = false;
+ return 0;
+}
+
+static int display_update(struct mcde_display_device *ddev, bool tripple_buffer)
+{
+ int ret;
+
+ if (ddev->first_update)
+ on_first_update(ddev);
+ if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) {
+ ret = set_power_mode(ddev, MCDE_DISPLAY_PM_ON);
+ if (ret < 0)
+ goto error;
+ }
+ ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area,
+ tripple_buffer);
+ if (ret < 0)
+ goto error;
+out:
+ return ret;
+error:
+ dev_warn(&ddev->dev, "%s:Failed to set power mode to on\n", __func__);
+ goto out;
+}
+
+/* Module init */
+static int __init mcde_display_tvout_ab8500_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ return mcde_display_driver_register(&ab8500_driver);
+}
+late_initcall(mcde_display_tvout_ab8500_init);
+
+static void __exit mcde_display_tvout_ab8500_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ mcde_display_driver_unregister(&ab8500_driver);
+}
+module_exit(mcde_display_tvout_ab8500_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE TVout through AB8500 display driver");
diff --git a/drivers/video/mcde/display-av8100.c b/drivers/video/mcde/display-av8100.c
new file mode 100644
index 00000000000..b5116621e16
--- /dev/null
+++ b/drivers/video/mcde/display-av8100.c
@@ -0,0 +1,1610 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson HDMI display driver
+ *
+ * Author: Per Persson <per-xb-persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <video/mcde_fb.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-av8100.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+
+#define SWITCH_HELPSTR ", 0=HDMI, 1=SDTV, 2=DVI\n"
+
+/* AVI Infoframe */
+#define AVI_INFOFRAME_DATA_SIZE 13
+#define AVI_INFOFRAME_TYPE 0x82
+#define AVI_INFOFRAME_VERSION 0x02
+#define AVI_INFOFRAME_DB1 0x10 /* Active Information present */
+#define AVI_INFOFRAME_DB2 0x08 /* Active Portion Aspect ratio */
+
+#ifdef CONFIG_DISPLAY_AV8100_TRIPPLE_BUFFER
+#define NUM_FB_BUFFERS 3
+#else
+#define NUM_FB_BUFFERS 2
+#endif
+
+#define DSI_HS_FREQ_HZ 840320000
+#define DSI_LP_FREQ_HZ 19200000
+
+struct cea_vesa_video_mode {
+ u32 cea;
+ u32 vesa_cea_nr;
+ struct mcde_video_mode *video_mode;
+};
+
+static int hdmi_try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode);
+static int hdmi_set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode);
+static int hdmi_set_pixel_format(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format);
+static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr);
+static int ceanr_convert(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, u16 *w, u16 *h);
+
+static ssize_t show_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_disponoff(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_disponoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_vesacea(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_timing(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_timing(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_stayalive(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static DEVICE_ATTR(disponoff, S_IRUGO | S_IWUSR, show_disponoff,
+ store_disponoff);
+static DEVICE_ATTR(vesacea, S_IRUGO, show_vesacea, NULL);
+static DEVICE_ATTR(timing, S_IRUGO | S_IWUSR, show_timing, store_timing);
+static DEVICE_ATTR(stayalive, S_IWUSR, NULL, store_stayalive);
+
+static DEVICE_ATTR(hdmisdtvswitch, S_IRUGO | S_IWUSR, show_hdmisdtvswitch,
+ store_hdmisdtvswitch);
+static DEVICE_ATTR(input_pixel_format, S_IRUGO | S_IWUSR,
+ show_input_pixel_format, store_input_pixel_format);
+
+static ssize_t show_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ int index;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ sprintf(buf, "%1x%s", mdev->port->hdmi_sdtv_switch, SWITCH_HELPSTR);
+ index = 1 + strlen(SWITCH_HELPSTR) + 1;
+
+ return index;
+}
+
+static ssize_t store_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (count > 0) {
+ if ((*buf == 0) || (*buf == '0')) {
+ dev_dbg(dev, "hdmi/sdtv switch = hdmi\n");
+ mdev->port->hdmi_sdtv_switch = HDMI_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_HDMI;
+ mdev->native_y_res = NATIVE_YRES_HDMI;
+ } else if ((*buf == 1) || (*buf == '1')) {
+ dev_dbg(dev, "hdmi/sdtv switch = sdtv\n");
+ mdev->port->hdmi_sdtv_switch = SDTV_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_SDTV;
+ mdev->native_y_res = NATIVE_YRES_SDTV;
+ } else if ((*buf == 2) || (*buf == '2')) {
+ dev_dbg(dev, "hdmi/sdtv switch = dvi\n");
+ mdev->port->hdmi_sdtv_switch = DVI_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_HDMI;
+ mdev->native_y_res = NATIVE_YRES_HDMI;
+ }
+ /* implicitely read by a memcmp in dss */
+ mdev->video_mode.force_update = true;
+ }
+
+ return count;
+}
+
+static ssize_t show_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ return sprintf(buf, "%d\n", ddev->port->pixel_format);
+}
+
+static ssize_t store_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (count > 0) {
+ unsigned long input;
+ if (strict_strtoul(buf, 10, &input) != 0)
+ return -EINVAL;
+ switch (input) {
+ /* intentional fall through */
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ ddev->port->pixel_format = input;
+ break;
+ default:
+ dev_warn(&ddev->dev, "invalid format (%ld)\n",
+ input);
+ return -EINVAL;
+ break;
+ }
+ /* implicitely read by a memcmp in dss */
+ ddev->video_mode.force_update = true;
+ driver_data->update_port_pixel_format = true;
+ }
+
+ return count;
+}
+
+static ssize_t show_disponoff(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (ddev->fbi && driver_data->fbdevname) {
+ dev_dbg(dev, "name:%s\n", driver_data->fbdevname);
+ strcpy(buf, driver_data->fbdevname);
+ return strlen(driver_data->fbdevname) + 1;
+ }
+ return 0;
+}
+
+static ssize_t store_disponoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ bool enable = false;
+ u8 cea = 0;
+ u8 vesa_cea_nr = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if ((count != DISPONOFF_SIZE) && (count != DISPONOFF_SIZE + 1))
+ return -EINVAL;
+
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ cea = (hex_to_bin(buf[2]) << 4) + hex_to_bin(buf[3]);
+ vesa_cea_nr = (hex_to_bin(buf[4]) << 4) + hex_to_bin(buf[5]);
+ dev_dbg(dev, "enable:%d cea:%d nr:%d\n", enable, cea, vesa_cea_nr);
+
+ if (enable && !mdev->enabled && mdev->fbi == NULL) {
+ struct display_driver_data *driver_data = dev_get_drvdata(dev);
+ u16 w = mdev->native_x_res;
+ u16 h = mdev->native_y_res, vh;
+ int buffering = NUM_FB_BUFFERS;
+ struct fb_info *fbi;
+
+ ceanr_convert(mdev, cea, vesa_cea_nr, &w, &h);
+ vh = h * buffering;
+ fbi = mcde_fb_create(mdev, w, h, w, vh,
+ mdev->default_pixel_format, FB_ROTATE_UR);
+ if (IS_ERR(fbi))
+ dev_warn(dev, "fb create failed\n");
+ else
+ driver_data->fbdevname = dev_name(fbi->dev);
+ } else if (!enable && mdev->enabled) {
+ mcde_fb_destroy(mdev);
+ }
+
+ return count;
+}
+
+static ssize_t show_timing(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ struct mcde_video_mode *video_mode;
+ int index;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ index = 0;
+ if (driver_data->video_mode) {
+ video_mode = driver_data->video_mode;
+ memcpy(buf + index, &video_mode->xres, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->yres, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->pixclock, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->hbp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->hfp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->vbp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->vfp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->interlaced, sizeof(u32));
+ index += sizeof(u32);
+ }
+ return index;
+}
+
+static ssize_t store_timing(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (count != TIMING_SIZE)
+ return -EINVAL;
+
+ driver_data->video_mode = video_mode_get(ddev, *buf, *(buf + 1));
+
+ return count;
+}
+
+static ssize_t store_stayalive(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ if (count != STAYALIVE_SIZE)
+ return -EINVAL;
+
+ if ((*buf == 1) || (*buf == '1'))
+ ddev->stay_alive = true;
+ else
+ ddev->stay_alive = false;
+
+ dev_dbg(dev, "%s %d\n", __func__, ddev->stay_alive);
+
+ return count;
+}
+
+static int ceanr_convert(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, u16 *w, u16 *h)
+{
+ struct mcde_video_mode *video_mode;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ video_mode = video_mode_get(ddev, cea, vesa_cea_nr);
+ if (video_mode) {
+ *w = video_mode->xres;
+ *h = video_mode->yres;
+ dev_dbg(&ddev->dev, "cea:%d nr:%d found\n",
+ cea, vesa_cea_nr);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Supported HDMI modes */
+static struct mcde_video_mode video_modes_supp_hdmi[] = {
+ /* 0 CEA #1 640_480_60_P */
+ {
+ .xres = 640, .yres = 480,
+ .pixclock = 39682,
+ .hbp = 112, .hfp = 48,
+ .vbp = 33, .vfp = 12
+ },
+ /* 1 720_480_60_P */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 37000,
+ .hbp = 104, .hfp = 34,
+ .vbp = 30, .vfp = 15
+ },
+ /* 2 720_576_50_P */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 37037,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5
+ },
+ /* 3 1280_720_60_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 256, .hfp = 114,
+ .vbp = 20, .vfp = 10
+ },
+ /* 4 1280_720_50_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 440,
+ .vbp = 25, .vfp = 5
+ },
+ /* 5 1280_720_30_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 1760,
+ .vbp = 20, .vfp = 10
+ },
+ /* 6 1280_720_24_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 16835,
+ .hbp = 260, .hfp = 1760,
+ .vbp = 20, .vfp = 10
+ },
+ /* 7 1280_720_25_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 2420,
+ .vbp = 20, .vfp = 10
+ },
+ /* 8 1920_1080_30_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 189, .hfp = 91,
+ .vbp = 36, .vfp = 9
+ },
+ /* 9 1920_1080_24_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 170, .hfp = 660,
+ .vbp = 36, .vfp = 9
+ },
+ /* 10 1920_1080_25_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 528,
+ .vbp = 36, .vfp = 9
+ },
+ /* 11 720_480_60_I */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 74074,
+ .hbp = 126, .hfp = 12,
+ .vbp = 44, .vfp = 1,
+ .interlaced = true,
+ },
+ /* 12 720_576_50_I */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 74074,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5,
+ .interlaced = true,
+ },
+ /* 13 1920_1080_50_I */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 528,
+ .vbp = 20, .vfp = 25,
+ .interlaced = true,
+ },
+ /* 14 1920_1080_60_I */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 88,
+ .vbp = 20, .vfp = 25,
+ .interlaced = true,
+ },
+ /* 15 VESA #9 800_600_60_P */
+ {
+ .xres = 800, .yres = 600,
+ .pixclock = 25000,
+ .hbp = 168, .hfp = 88,
+ .vbp = 23, .vfp = 5,
+ .interlaced = false,
+ },
+ /* 16 VESA #14 848_480_60_P */
+ {
+ .xres = 848, .yres = 480,
+ .pixclock = 29630,
+ .hbp = 128, .hfp = 112,
+ .vbp = 23, .vfp = 14,
+ .interlaced = false,
+ },
+ /* 17 VESA #16 1024_768_60_P */
+ {
+ .xres = 1024, .yres = 768,
+ .pixclock = 15385,
+ .hbp = 160, .hfp = 160,
+ .vbp = 29, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 18 VESA #22 1280_768_60_P */
+ {
+ .xres = 1280, .yres = 768,
+ .pixclock = 14652,
+ .hbp = 80, .hfp = 80,
+ .vbp = 12, .vfp = 10,
+ .interlaced = false,
+ },
+ /* 19 VESA #23 1280_768_60_P */
+ {
+ .xres = 1280, .yres = 768,
+ .pixclock = 12579,
+ .hbp = 192, .hfp = 192,
+ .vbp = 20, .vfp = 10,
+ .interlaced = false,
+ },
+ /* 20 VESA #27 1280_800_60_P */
+ {
+ .xres = 1280, .yres = 800,
+ .pixclock = 14085,
+ .hbp = 80, .hfp = 80,
+ .vbp = 14, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 21 VESA #28 1280_800_60_P */
+ {
+ .xres = 1280, .yres = 800,
+ .pixclock = 11976,
+ .hbp = 200, .hfp = 200,
+ .vbp = 22, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 22 VESA #39 1360_768_60_P */
+ {
+ .xres = 1360, .yres = 768,
+ .pixclock = 11696,
+ .hbp = 176, .hfp = 256,
+ .vbp = 18, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 23 VESA #81 1366_768_60_P */
+ {
+ .xres = 1366, .yres = 768,
+ .pixclock = 11662,
+ .hbp = 213, .hfp = 213,
+ .vbp = 24, .vfp = 6,
+ .interlaced = false,
+ },
+};
+
+/* Supported TVout modes */
+static struct mcde_video_mode video_modes_supp_sdtv[] = {
+ /* 720_480_60_I) */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 74074,
+ .hbp = 126, .hfp = 12,
+ .vbp = 44, .vfp = 1,
+ .interlaced = true,
+ },
+ /* 720_576_50_I) */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 74074,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5,
+ .interlaced = true,
+ },
+};
+
+static struct cea_vesa_video_mode cea_vesa_video_mode[] = {
+ /* 640_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 1,
+ .video_mode = &video_modes_supp_hdmi[0],
+ },
+ /* 720_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 2,
+ .video_mode = &video_modes_supp_hdmi[1],
+ },
+ /* 720_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 3,
+ .video_mode = &video_modes_supp_hdmi[1],
+ },
+ /* 720_576_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 17,
+ .video_mode = &video_modes_supp_hdmi[2],
+ },
+ /* 720_576_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 18,
+ .video_mode = &video_modes_supp_hdmi[2],
+ },
+ /* 1280_720_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 4,
+ .video_mode = &video_modes_supp_hdmi[3],
+ },
+ /* 1280_720_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 19,
+ .video_mode = &video_modes_supp_hdmi[4],
+ },
+ /* 1280_720_30_P */
+ {
+ .cea = 1, .vesa_cea_nr = 62,
+ .video_mode = &video_modes_supp_hdmi[5],
+ },
+ /* 1280_720_24_P */
+ {
+ .cea = 1, .vesa_cea_nr = 60,
+ .video_mode = &video_modes_supp_hdmi[6],
+ },
+ /* 1280_720_25_P */
+ {
+ .cea = 1, .vesa_cea_nr = 61,
+ .video_mode = &video_modes_supp_hdmi[7],
+ },
+ /* 1920_1080_30_P */
+ {
+ .cea = 1, .vesa_cea_nr = 34,
+ .video_mode = &video_modes_supp_hdmi[8],
+ },
+ /* 1920_1080_24_P */
+ {
+ .cea = 1, .vesa_cea_nr = 32,
+ .video_mode = &video_modes_supp_hdmi[9],
+ },
+ /* 1920_1080_25_P */
+ {
+ .cea = 1, .vesa_cea_nr = 33,
+ .video_mode = &video_modes_supp_hdmi[10],
+ },
+ /* 720_480_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 6,
+ .video_mode = &video_modes_supp_hdmi[11],
+ },
+ /* 720_480_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 7,
+ .video_mode = &video_modes_supp_hdmi[11],
+ },
+ /* 720_576_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 21,
+ .video_mode = &video_modes_supp_hdmi[12],
+ },
+ /* 720_576_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 22,
+ .video_mode = &video_modes_supp_hdmi[12],
+ },
+ /* 1920_1080_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 20,
+ .video_mode = &video_modes_supp_hdmi[13],
+ },
+ /* 1920_1080_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 5,
+ .video_mode = &video_modes_supp_hdmi[14],
+ },
+ /* VESA #4 640_480_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 4,
+ .video_mode = &video_modes_supp_hdmi[0],
+ },
+ /* VESA #9 800_600_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 9,
+ .video_mode = &video_modes_supp_hdmi[15],
+ },
+ /* VESA #14 848_480_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 14,
+ .video_mode = &video_modes_supp_hdmi[16],
+ },
+ /* VESA #16 1024_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 16,
+ .video_mode = &video_modes_supp_hdmi[17],
+ },
+ /* VESA #22 1280_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 22,
+ .video_mode = &video_modes_supp_hdmi[18],
+ },
+ /* VESA #23 1280_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 23,
+ .video_mode = &video_modes_supp_hdmi[19],
+ },
+ /* VESA #27 1280_800_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 27,
+ .video_mode = &video_modes_supp_hdmi[20],
+ },
+ /* VESA #28 1280_800_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 28,
+ .video_mode = &video_modes_supp_hdmi[21],
+ },
+ /* VESA #39 1360_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 39,
+ .video_mode = &video_modes_supp_hdmi[22],
+ },
+ /* VESA #81 1366_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 81,
+ .video_mode = &video_modes_supp_hdmi[23],
+ },
+};
+
+static ssize_t show_vesacea(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int findex;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++) {
+ *(buf + findex * 2) = cea_vesa_video_mode[findex].cea;
+ *(buf + findex * 2 + 1) =
+ cea_vesa_video_mode[findex].vesa_cea_nr;
+ }
+ *(buf + findex * 2) = '\0';
+
+ return findex * 2 + 1;
+}
+
+static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr)
+{
+ int findex;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++)
+ if ((cea == cea_vesa_video_mode[findex].cea) &&
+ (vesa_cea_nr ==
+ cea_vesa_video_mode[findex].vesa_cea_nr)) {
+ dev_dbg(&ddev->dev, "cea:%d nr:%d\n", cea, vesa_cea_nr);
+ return cea_vesa_video_mode[findex].video_mode;
+ }
+
+ return NULL;
+}
+
+static u8 ceanr_get(struct mcde_display_device *ddev)
+{
+ int cnt;
+ int cea;
+ int vesa_cea_nr;
+ struct mcde_video_mode *vmode = &ddev->video_mode;
+ struct mcde_video_mode *vmode_try;
+
+ if (!vmode)
+ return 0;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ for (cnt = 0; cnt < ARRAY_SIZE(cea_vesa_video_mode); cnt++) {
+ vmode_try = cea_vesa_video_mode[cnt].video_mode;
+ cea = cea_vesa_video_mode[cnt].cea;
+ vesa_cea_nr = cea_vesa_video_mode[cnt].vesa_cea_nr;
+
+ if (cea && vmode_try->xres == vmode->xres &&
+ vmode_try->yres == vmode->yres &&
+ vmode_try->pixclock == vmode->pixclock &&
+ vmode_try->hbp == vmode->hbp &&
+ vmode_try->hfp == vmode->hfp &&
+ vmode_try->vbp == vmode->vbp &&
+ vmode_try->vfp == vmode->vfp &&
+ vmode_try->interlaced == vmode->interlaced) {
+ dev_dbg(&ddev->dev, "ceanr:%d\n", vesa_cea_nr);
+ return vesa_cea_nr;
+ }
+ }
+
+ return 0;
+}
+
+#define AV8100_MAX_LEVEL 255
+
+static int hdmi_try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int index = 0;
+ int match_level = AV8100_MAX_LEVEL;
+ int found_index = -1;
+ struct mcde_video_mode *video_modes_supp;
+ int array_size;
+
+ if (ddev == NULL || video_mode == NULL) {
+ pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+
+ if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ video_mode->interlaced = true;
+ video_modes_supp = video_modes_supp_sdtv;
+ array_size = ARRAY_SIZE(video_modes_supp_sdtv);
+ } else {
+ video_modes_supp = video_modes_supp_hdmi;
+ array_size = ARRAY_SIZE(video_modes_supp_hdmi);
+ }
+
+ while (index < array_size) {
+ /* 1. Check if all parameters match */
+ if ((video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ ((video_mode->xres + video_mode->hbp +
+ video_mode->hfp) ==
+ (video_modes_supp[index].xres +
+ video_modes_supp[index].hbp +
+ video_modes_supp[index].hfp)) &&
+ ((video_mode->yres + video_mode->vbp + video_mode->vfp)
+ ==
+ (video_modes_supp[index].yres +
+ video_modes_supp[index].vbp +
+ video_modes_supp[index].vfp)) &&
+ (video_mode->pixclock ==
+ video_modes_supp[index].pixclock) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 1;
+ found_index = index;
+ break;
+ }
+
+ /* 2. Check if xres,yres,htot,vtot,interlaced match */
+ if ((match_level > 2) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ ((video_mode->xres + video_mode->hbp +
+ video_mode->hfp) ==
+ (video_modes_supp[index].xres +
+ video_modes_supp[index].hbp +
+ video_modes_supp[index].hfp)) &&
+ ((video_mode->yres + video_mode->vbp + video_mode->vfp)
+ ==
+ (video_modes_supp[index].yres +
+ video_modes_supp[index].vbp +
+ video_modes_supp[index].vfp)) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 2;
+ found_index = index;
+ }
+
+ /* 3. Check if xres,yres,pixelclock,interlaced match */
+ if ((match_level > 3) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced) &&
+ (video_mode->pixclock ==
+ video_modes_supp[index].pixclock)) {
+ match_level = 3;
+ found_index = index;
+ }
+
+ /* 4. Check if xres,yres,interlaced match */
+ if ((match_level > 4) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 4;
+ found_index = index;
+ }
+
+ index++;
+ }
+
+ if (found_index == -1) {
+ dev_dbg(&ddev->dev, "video_mode not accepted\n");
+ dev_dbg(&ddev->dev, "xres:%d yres:%d pixclock:%d hbp:%d hfp:%d "
+ "vfp:%d vbp:%d intlcd:%d\n",
+ video_mode->xres, video_mode->yres,
+ video_mode->pixclock,
+ video_mode->hbp, video_mode->hfp,
+ video_mode->vfp, video_mode->vbp,
+ video_mode->interlaced);
+ return -EINVAL;
+ }
+
+ memset(video_mode, 0, sizeof(struct mcde_video_mode));
+ memcpy(video_mode, &video_modes_supp[found_index],
+ sizeof(struct mcde_video_mode));
+
+ dev_dbg(&ddev->dev, "%s:HDMI video_mode %d chosen. Level:%d\n",
+ __func__, found_index, match_level);
+
+ return 0;
+}
+
+static int hdmi_set_video_mode(
+ struct mcde_display_device *dev, struct mcde_video_mode *video_mode)
+{
+ int ret;
+ union av8100_configuration av8100_config;
+ struct mcde_display_hdmi_platform_data *pdata;
+ struct display_driver_data *driver_data;
+ struct av8100_status status;
+
+ /* TODO check video_mode_params */
+ if (dev == NULL || video_mode == NULL) {
+ pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = dev->dev.platform_data;
+ driver_data = dev_get_drvdata(&dev->dev);
+
+ dev_dbg(&dev->dev, "%s:\n", __func__);
+ dev_vdbg(&dev->dev, "%s:xres:%d yres:%d hbp:%d hfp:%d vbp:%d vfp:%d "
+ "interlaced:%d\n", __func__,
+ video_mode->xres,
+ video_mode->yres,
+ video_mode->hbp,
+ video_mode->hfp,
+ video_mode->vbp,
+ video_mode->vfp,
+ video_mode->interlaced);
+
+ if (driver_data->update_port_pixel_format) {
+ hdmi_set_pixel_format(dev, dev->pixel_format);
+ driver_data->update_port_pixel_format = false;
+ }
+
+ memset(&(dev->video_mode), 0, sizeof(struct mcde_video_mode));
+ memcpy(&(dev->video_mode), video_mode, sizeof(struct mcde_video_mode));
+
+ if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 &&
+ pdata->rgb_2_yCbCr_transform)
+ mcde_chnl_set_col_convert(dev->chnl_state,
+ pdata->rgb_2_yCbCr_transform,
+ MCDE_CONVERT_RGB_2_YCBCR);
+ mcde_chnl_stop_flow(dev->chnl_state);
+
+ ret = mcde_chnl_set_video_mode(dev->chnl_state, &dev->video_mode);
+ if (ret < 0) {
+ dev_warn(&dev->dev, "Failed to set video mode\n");
+ return ret;
+ }
+
+ status = av8100_status_get();
+ if (status.av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ if (av8100_ver_get() == AV8100_CHIPVER_1) {
+ if (status.av8100_state >= AV8100_OPMODE_STANDBY) {
+ /* Disable interrupts */
+ ret = av8100_disable_interrupt();
+ if (ret) {
+ dev_err(&dev->dev,
+ "%s:av8100_disable_interrupt failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_powerdown();
+ if (ret) {
+ dev_err(&dev->dev,
+ "av8100_powerdown failed\n");
+ return ret;
+ }
+
+ msleep(10);
+ }
+ }
+
+ /* Set to powerup with interrupts disabled */
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ ret = av8100_powerup();
+ if (ret) {
+ dev_err(&dev->dev, "av8100_powerup failed\n");
+ return ret;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_IDLE) {
+ ret = av8100_download_firmware(I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "av8100_download_firmware failed\n");
+ return ret;
+ }
+ }
+
+ if (av8100_disable_interrupt())
+ return -EFAULT;
+
+ /*
+ * Don't look at dev->port->hdmi_sdtv_switch; it states only which
+ * one should be started, not which one is currently working
+ */
+ if (av8100_conf_get(AV8100_COMMAND_HDMI, &av8100_config))
+ return -EFAULT;
+ if (av8100_config.hdmi_format.hdmi_mode == AV8100_HDMI_ON) {
+ /* Set HDMI mode to OFF */
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ av8100_config.hdmi_format.hdmi_format = AV8100_HDMI;
+ if (av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config))
+ return -EFAULT;
+
+ if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE))
+ return -EFAULT;
+ }
+ if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ if (av8100_config.denc_format.enable) {
+ /* Turn off DENC */
+ av8100_config.denc_format.enable = 0;
+ if (av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ if (av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL,
+ I2C_INTERFACE))
+ return -EFAULT;
+ }
+
+ /* Get current av8100 video output format */
+ ret = av8100_conf_get(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_get "
+ "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH)
+ av8100_config.video_output_format.video_output_cea_vesa =
+ dev->video_mode.yres == NATIVE_YRES_SDTV ?
+ AV8100_CEA21_22_576I_PAL_50HZ :
+ AV8100_CEA6_7_NTSC_60HZ;
+ else
+ av8100_config.video_output_format.video_output_cea_vesa =
+ av8100_video_output_format_get(
+ dev->video_mode.xres,
+ dev->video_mode.yres,
+ dev->video_mode.xres +
+ dev->video_mode.hbp + dev->video_mode.hfp,
+ dev->video_mode.yres +
+ dev->video_mode.vbp + dev->video_mode.vfp,
+ dev->video_mode.pixclock,
+ dev->video_mode.interlaced);
+
+ if (AV8100_VIDEO_OUTPUT_CEA_VESA_MAX ==
+ av8100_config.video_output_format.video_output_cea_vesa) {
+ dev_err(&dev->dev, "%s:video output format not found "
+ "\n", __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Get current av8100 video input format */
+ ret = av8100_conf_get(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_get "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set correct av8100 video input pixel format */
+ switch (dev->port->pixel_format) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ default:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB565;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB666;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB666P;
+ break;
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB888;
+ break;
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_YCBCR422;
+ break;
+ }
+
+ /* Set ui_x4 */
+ av8100_config.video_input_format.ui_x4 = dev->port->phy.dsi.ui;
+
+ /* Set TE_config */
+ switch (dev->port->sync_src) {
+ case MCDE_SYNCSRC_TE0:
+ av8100_config.video_input_format.TE_config = AV8100_TE_IT_LINE;
+ break;
+ case MCDE_SYNCSRC_TE1:
+ av8100_config.video_input_format.TE_config = AV8100_TE_GPIO_IT;
+ break;
+ case MCDE_SYNCSRC_TE_POLLING:
+ av8100_config.video_input_format.TE_config =
+ AV8100_TE_DSI_LANE; /* Only on DSI, no interrupts */
+ break;
+ case MCDE_SYNCSRC_OFF:
+ default:
+ av8100_config.video_input_format.TE_config = AV8100_TE_OFF;
+ break;
+ }
+
+ ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_w(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ if (dev->port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422)
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_RGB_TO_DENC;
+ else
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_YUV_TO_DENC;
+ } else if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422) {
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_YUV_TO_RGB;
+ } else {
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_INDENTITY;
+ }
+
+ ret = av8100_conf_prep(
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_configuration_prepare "
+ "AV8100_COMMAND_COLORSPACECONVERSION failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_w(
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_COLORSPACECONVERSION failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set video output format */
+ ret = av8100_conf_w(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "av8100_conf_w failed\n");
+ return ret;
+ }
+
+ /* Set audio input format */
+ ret = av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_AUDIO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ dev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+ dev->first_update = true;
+
+ return 0;
+}
+
+static u16 rotate_byte_left(u8 c, int nr)
+{
+ return (0xff & (c << nr)) | (0xff & (c >> (8 - nr)));
+}
+
+static u16 map_yv(u8 in)
+{
+ return rotate_byte_left(in, 3) << 4;
+}
+
+static u16 map_u(u8 in)
+{
+ return rotate_byte_left(in, 5) << 4;
+}
+
+static int hdmi_set_pixel_format(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format)
+{
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ ddev->pixel_format = format;
+
+ return 0;
+}
+
+static int hdmi_set_port_pixel_format(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ mcde_chnl_stop_flow(ddev->chnl_state);
+ ret = mcde_chnl_set_pixel_format(ddev->chnl_state,
+ ddev->port->pixel_format);
+
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s: Failed to set pixel format = %d\n",
+ __func__, ddev->port->pixel_format);
+ return ret;
+ }
+
+ if (ddev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 &&
+ av8100_ver_get() == 2) {
+ /* The V2 version has an error for unpacking YUV422 */
+ struct mcde_palette_table palette = {
+ .map_col_ch0 = *map_yv,
+ .map_col_ch1 = *map_u,
+ .map_col_ch2 = *map_yv,
+ };
+ ret = mcde_chnl_set_palette(ddev->chnl_state, &palette);
+ } else {
+ ret = mcde_chnl_set_palette(ddev->chnl_state, NULL);
+ }
+
+ return 0;
+}
+
+static int hdmi_apply_config(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (!ddev->update_flags)
+ return 0;
+
+ ret = mcde_chnl_apply(ddev->chnl_state);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to apply to channel\n",
+ __func__);
+ return ret;
+ }
+ ddev->update_flags = 0;
+
+ return 0;
+}
+
+static int hdmi_on_first_update(struct mcde_display_device *dev)
+{
+ int ret;
+ union av8100_configuration av8100_config;
+ u8 *infofr_data;
+ int infofr_crc;
+ int cnt;
+
+ dev->first_update = false;
+
+ /*
+ * Prepare HDMI configuration
+ * Avoid simultaneous output of DENC and HDMI/DVI.
+ * Only one of them should be enabled.
+ * Note HDMI/DVI and DENC are always turned off in set_video_mode.
+ */
+ switch (dev->port->hdmi_sdtv_switch) {
+ case SDTV_SWITCH:
+ if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ av8100_config.denc_format.enable = 1;
+ if (dev->video_mode.yres == NATIVE_YRES_SDTV) {
+ av8100_config.denc_format.standard_selection =
+ AV8100_PAL_BDGHI;
+ av8100_config.denc_format.cvbs_video_format =
+ AV8100_CVBS_625;
+ } else {
+ av8100_config.denc_format.standard_selection =
+ AV8100_NTSC_M;
+ av8100_config.denc_format.cvbs_video_format =
+ AV8100_CVBS_525;
+ }
+ ret = av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config);
+ break;
+ case DVI_SWITCH:
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ av8100_config.hdmi_format.hdmi_format = AV8100_DVI;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config);
+ break;
+ case HDMI_SWITCH:
+ default:
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ av8100_config.hdmi_format.hdmi_format = AV8100_HDMI;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config);
+ break;
+ }
+
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_HDMI/DENC failed\n", __func__);
+ return ret;
+ }
+
+ /* Enable interrupts */
+ ret = av8100_enable_interrupt();
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_enable_interrupt failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH)
+ ret = av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL,
+ I2C_INTERFACE);
+ else
+ ret = av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_HDMI/DENC failed\n", __func__);
+ return ret;
+ }
+
+ /* AVI Infoframe only if HDMI */
+ if (dev->port->hdmi_sdtv_switch != HDMI_SWITCH)
+ goto hdmi_on_first_update_end;
+
+ /* Create AVI Infoframe */
+ av8100_config.infoframes_format.type = AVI_INFOFRAME_TYPE;
+ av8100_config.infoframes_format.version = AVI_INFOFRAME_VERSION;
+ av8100_config.infoframes_format.length = AVI_INFOFRAME_DATA_SIZE;
+
+ /* AVI Infoframe data */
+ infofr_data = &av8100_config.infoframes_format.data[0];
+ memset(infofr_data, 0, AVI_INFOFRAME_DATA_SIZE);
+ infofr_data[0] = AVI_INFOFRAME_DB1;
+ infofr_data[1] = AVI_INFOFRAME_DB2;
+ infofr_data[3] = ceanr_get(dev);
+
+ /* Calculate AVI Infoframe checksum */
+ infofr_crc = av8100_config.infoframes_format.type +
+ av8100_config.infoframes_format.version +
+ av8100_config.infoframes_format.length;
+ for (cnt = 0; cnt < AVI_INFOFRAME_DATA_SIZE; cnt++)
+ infofr_crc += infofr_data[cnt];
+ infofr_crc &= 0xFF;
+ av8100_config.infoframes_format.crc = 0x100 - infofr_crc;
+
+ /* Send AVI Infoframe */
+ if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES,
+ &av8100_config) != 0) {
+ dev_err(&dev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(&dev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+hdmi_on_first_update_end:
+ return ret;
+}
+
+static int hdmi_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ int ret = 0;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * the regulator for analog TV out is only enabled here,
+ * this means that one needs to switch to the OFF state
+ * to be able to switch from HDMI to CVBS.
+ */
+ if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ ret = regulator_enable(driver_data->cvbs_regulator);
+ if (ret)
+ return ret;
+ driver_data->cvbs_regulator_enabled = true;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+
+ hdmi_set_port_pixel_format(ddev);
+ }
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ goto set_power_and_exit;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode));
+ ret = av8100_powerscan();
+ if (ret)
+ dev_err(&ddev->dev, "%s:av8100_powerscan failed\n"
+ , __func__);
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ if (driver_data->cvbs_regulator_enabled) {
+ ret = regulator_disable(driver_data->cvbs_regulator);
+ if (ret)
+ return ret;
+ driver_data->cvbs_regulator_enabled = false;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+set_power_and_exit:
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static int __devinit hdmi_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ struct mcde_port *port;
+ struct display_driver_data *driver_data;
+ struct mcde_display_hdmi_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->port->type != MCDE_PORTTYPE_DSI) {
+ dev_err(&dev->dev, "%s:Invalid port type %d\n",
+ __func__, dev->port->type);
+ return -EINVAL;
+ }
+
+ driver_data = (struct display_driver_data *)
+ kzalloc(sizeof(struct display_driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(&dev->dev, "Failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ /* DSI use clock continous mode if AV8100_CHIPVER_1 > 1 */
+ if (av8100_ver_get() > AV8100_CHIPVER_1)
+ dev->port->phy.dsi.clk_cont = true;
+
+ dev->on_first_update = hdmi_on_first_update;
+ dev->try_video_mode = hdmi_try_video_mode;
+ dev->set_video_mode = hdmi_set_video_mode;
+ dev->apply_config = hdmi_apply_config;
+ dev->set_pixel_format = hdmi_set_pixel_format;
+ dev->set_power_mode = hdmi_set_power_mode;
+
+ port = dev->port;
+
+ port->phy.dsi.host_eot_gen = true;
+ port->phy.dsi.num_data_lanes = 2;
+ port->phy.dsi.hs_freq = DSI_HS_FREQ_HZ;
+ port->phy.dsi.lp_freq = DSI_LP_FREQ_HZ;
+
+ /* Create sysfs files */
+ if (device_create_file(&dev->dev, &dev_attr_hdmisdtvswitch))
+ dev_info(&dev->dev,
+ "Unable to create hdmisdtvswitch attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_input_pixel_format))
+ dev_info(&dev->dev,
+ "Unable to create input_pixel_format attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_disponoff))
+ dev_info(&dev->dev,
+ "Unable to create disponoff attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_vesacea))
+ dev_info(&dev->dev,
+ "Unable to create ceavesa attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_timing))
+ dev_info(&dev->dev,
+ "Unable to create timing attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_stayalive))
+ dev_info(&dev->dev,
+ "Unable to create stayalive attr\n");
+
+ if (pdata->cvbs_regulator_id) {
+ driver_data->cvbs_regulator = regulator_get(&dev->dev,
+ pdata->cvbs_regulator_id);
+ if (IS_ERR(driver_data->cvbs_regulator)) {
+ ret = PTR_ERR(driver_data->cvbs_regulator);
+ dev_warn(&dev->dev, "%s:Failed to get regulator %s\n",
+ __func__, pdata->cvbs_regulator_id);
+ driver_data->cvbs_regulator = NULL;
+ goto av_regulator_get_failed;
+ }
+ }
+
+ dev_set_drvdata(&dev->dev, driver_data);
+ dev_info(&dev->dev, "HDMI display probed\n");
+
+ return 0;
+
+av_regulator_get_failed:
+ kfree(driver_data);
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct mcde_display_device *dev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&dev->dev);
+ struct mcde_display_hdmi_platform_data *pdata =
+ dev->dev.platform_data;
+
+ /* Remove sysfs files */
+ device_remove_file(&dev->dev, &dev_attr_input_pixel_format);
+ device_remove_file(&dev->dev, &dev_attr_hdmisdtvswitch);
+ device_remove_file(&dev->dev, &dev_attr_disponoff);
+ device_remove_file(&dev->dev, &dev_attr_vesacea);
+ device_remove_file(&dev->dev, &dev_attr_timing);
+ device_remove_file(&dev->dev, &dev_attr_stayalive);
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ if (driver_data->cvbs_regulator)
+ regulator_put(driver_data->cvbs_regulator);
+ kfree(driver_data);
+ if (pdata->hdmi_platform_enable) {
+ if (pdata->regulator)
+ regulator_put(pdata->regulator);
+ if (pdata->reset_gpio) {
+ gpio_direction_input(pdata->reset_gpio);
+ gpio_free(pdata->reset_gpio);
+ }
+ }
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int hdmi_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (ddev->chnl_state == NULL)
+ return 0;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+
+ return ret;
+}
+
+static int hdmi_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret;
+
+ if (ddev->chnl_state == NULL)
+ return 0;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = hdmi_suspend,
+ .resume = hdmi_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "av8100_hdmi",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_hdmi_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&hdmi_driver);
+
+}
+late_initcall(mcde_display_hdmi_init);
+
+static void __exit mcde_display_hdmi_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&hdmi_driver);
+}
+module_exit(mcde_display_hdmi_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson hdmi display driver");
diff --git a/drivers/video/mcde/display-fictive.c b/drivers/video/mcde/display-fictive.c
new file mode 100644
index 00000000000..c7ea1429b9f
--- /dev/null
+++ b/drivers/video/mcde/display-fictive.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * ST-Ericsson MCDE fictive display driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+
+static int __devinit fictive_probe(struct mcde_display_device *dev)
+{
+ dev->platform_enable = NULL,
+ dev->platform_disable = NULL,
+ dev->set_power_mode = NULL;
+
+ dev_info(&dev->dev, "Fictive display probed\n");
+
+ return 0;
+}
+
+static int __devexit fictive_remove(struct mcde_display_device *dev)
+{
+ return 0;
+}
+
+static struct mcde_display_driver fictive_driver = {
+ .probe = fictive_probe,
+ .remove = fictive_remove,
+ .driver = {
+ .name = "mcde_disp_fictive",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_fictive_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&fictive_driver);
+}
+module_init(mcde_display_fictive_init);
+
+static void __exit mcde_display_fictive_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&fictive_driver);
+}
+module_exit(mcde_display_fictive_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE fictive display driver");
diff --git a/drivers/video/mcde/display-generic_dsi.c b/drivers/video/mcde/display-generic_dsi.c
new file mode 100644
index 00000000000..6c63f4cd750
--- /dev/null
+++ b/drivers/video/mcde/display-generic_dsi.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE generic DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-generic_dsi.h>
+
+static int generic_platform_enable(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev_dbg(&dev->dev, "%s: Reset & power on generic display\n", __func__);
+
+ if (pdata->regulator) {
+ if (regulator_enable(pdata->regulator) < 0) {
+ dev_err(&dev->dev, "%s:Failed to enable regulator\n"
+ , __func__);
+ return -EINVAL;
+ }
+ }
+ if (pdata->reset_gpio)
+ gpio_set_value_cansleep(pdata->reset_gpio, pdata->reset_high);
+ mdelay(pdata->reset_delay);
+ if (pdata->reset_gpio)
+ gpio_set_value_cansleep(pdata->reset_gpio, !pdata->reset_high);
+
+ return 0;
+}
+
+static int generic_platform_disable(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev_dbg(&dev->dev, "%s:Reset & power off generic display\n", __func__);
+
+ if (pdata->regulator) {
+ if (regulator_disable(pdata->regulator) < 0) {
+ dev_err(&dev->dev, "%s:Failed to disable regulator\n"
+ , __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int generic_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ struct mcde_display_generic_platform_data *pdata =
+ ddev->dev.platform_data;
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ msleep(pdata->sleep_out_delay);
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_ON, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ goto set_power_and_exit;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_OFF, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* SLEEP -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+set_power_and_exit:
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static int __devinit generic_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->port->type != MCDE_PORTTYPE_DSI) {
+ dev_err(&dev->dev,
+ "%s:Invalid port type %d\n",
+ __func__, dev->port->type);
+ return -EINVAL;
+ }
+
+ if (!dev->platform_enable && !dev->platform_disable) {
+ pdata->generic_platform_enable = true;
+ if (pdata->reset_gpio) {
+ ret = gpio_request(pdata->reset_gpio, NULL);
+ if (ret) {
+ dev_warn(&dev->dev,
+ "%s:Failed to request gpio %d\n",
+ __func__, pdata->reset_gpio);
+ goto gpio_request_failed;
+ }
+ gpio_direction_output(pdata->reset_gpio,
+ !pdata->reset_high);
+ }
+ if (pdata->regulator_id) {
+ pdata->regulator = regulator_get(&dev->dev,
+ pdata->regulator_id);
+ if (IS_ERR(pdata->regulator)) {
+ ret = PTR_ERR(pdata->regulator);
+ dev_warn(&dev->dev,
+ "%s:Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_id);
+ pdata->regulator = NULL;
+ goto regulator_get_failed;
+ }
+
+ if (regulator_set_voltage(pdata->regulator,
+ pdata->min_supply_voltage,
+ pdata->max_supply_voltage) < 0) {
+ int volt;
+
+ dev_warn(&dev->dev,
+ "%s:Failed to set voltage '%s'\n",
+ __func__, pdata->regulator_id);
+ volt = regulator_get_voltage(pdata->regulator);
+ dev_warn(&dev->dev,
+ "Voltage:%d\n", volt);
+ }
+
+ /*
+ * When u-boot has display a startup screen.
+ * U-boot has turned on display power however the
+ * regulator framework does not know about that
+ * This is the case here, the display driver has to
+ * enable the regulator for the display.
+ */
+ if (dev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ ret = regulator_enable(pdata->regulator);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "%s:Failed to enable regulator\n"
+ , __func__);
+ goto regulator_enable_failed;
+ }
+ }
+ }
+ }
+
+ dev->platform_enable = generic_platform_enable,
+ dev->platform_disable = generic_platform_disable,
+ dev->set_power_mode = generic_set_power_mode;
+
+ dev_info(&dev->dev, "Generic display probed\n");
+
+ goto out;
+regulator_enable_failed:
+regulator_get_failed:
+ if (pdata->generic_platform_enable && pdata->reset_gpio)
+ gpio_free(pdata->reset_gpio);
+gpio_request_failed:
+out:
+ return ret;
+}
+
+static int __devexit generic_remove(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ if (!pdata->generic_platform_enable)
+ return 0;
+
+ if (pdata->regulator)
+ regulator_put(pdata->regulator);
+ if (pdata->reset_gpio) {
+ gpio_direction_input(pdata->reset_gpio);
+ gpio_free(pdata->reset_gpio);
+ }
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int generic_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ ddev->set_synchronized_update(ddev,
+ ddev->get_synchronized_update(ddev));
+ return ret;
+}
+
+static int generic_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver generic_driver = {
+ .probe = generic_probe,
+ .remove = generic_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = generic_suspend,
+ .resume = generic_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde_disp_generic",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_generic_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&generic_driver);
+}
+module_init(mcde_display_generic_init);
+
+static void __exit mcde_display_generic_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&generic_driver);
+}
+module_exit(mcde_display_generic_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE generic DCS display driver");
diff --git a/drivers/video/mcde/display-samsung_s6d16d0.c b/drivers/video/mcde/display-samsung_s6d16d0.c
new file mode 100644
index 00000000000..62db3db6556
--- /dev/null
+++ b/drivers/video/mcde/display-samsung_s6d16d0.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Samsung S6D16D0 display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+
+#define RESET_DURATION_US 10
+#define RESET_DELAY_MS 120
+#define SLEEP_OUT_DELAY_MS 120
+#define IO_REGU "vdd1"
+#define IO_REGU_MIN 1650000
+#define IO_REGU_MAX 3300000
+
+#define DSI_HS_FREQ_HZ 420160000
+#define DSI_LP_FREQ_HZ 19200000
+
+struct device_info {
+ int reset_gpio;
+ struct mcde_port port;
+ struct regulator *regulator;
+};
+
+static inline struct device_info *get_drvdata(struct mcde_display_device *ddev)
+{
+ return (struct device_info *)dev_get_drvdata(&ddev->dev);
+}
+
+static int power_on(struct mcde_display_device *ddev)
+{
+ struct device_info *di = get_drvdata(ddev);
+
+ dev_dbg(&ddev->dev, "Reset & power on s6d16d0 display\n");
+
+ regulator_enable(di->regulator);
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ udelay(RESET_DURATION_US);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+
+ return 0;
+}
+
+static int power_off(struct mcde_display_device *ddev)
+{
+ struct device_info *di = get_drvdata(ddev);
+
+ dev_dbg(&ddev->dev, "Power off s6d16d0 display\n");
+
+ regulator_disable(di->regulator);
+
+ return 0;
+}
+
+static int display_on(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display on s6d16d0\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE,
+ NULL, 0);
+ if (ret)
+ return ret;
+ msleep(SLEEP_OUT_DELAY_MS);
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON,
+ NULL, 0);
+}
+
+static int display_off(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display off s6d16d0\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE,
+ NULL, 0);
+}
+
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ dev_dbg(&ddev->dev, "Set power mode %d\n", power_mode);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ ret = power_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = display_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+
+ ret = display_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ ret = power_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+}
+
+static int __devinit samsung_s6d16d0_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_display_dsi_platform_data *pdata = ddev->dev.platform_data;
+ struct device_info *di;
+
+ if (pdata == NULL || !pdata->reset_gpio) {
+ dev_err(&ddev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+ di->reset_gpio = pdata->reset_gpio;
+ di->port.link = pdata->link;
+ di->port.type = MCDE_PORTTYPE_DSI;
+ di->port.mode = MCDE_PORTMODE_CMD;
+ di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP;
+ di->port.sync_src = MCDE_SYNCSRC_BTA;
+ di->port.phy.dsi.num_data_lanes = 2;
+ di->port.phy.dsi.host_eot_gen = true;
+ /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */
+ di->port.phy.dsi.ui = 9;
+ di->port.phy.dsi.hs_freq = DSI_HS_FREQ_HZ;
+ di->port.phy.dsi.lp_freq = DSI_LP_FREQ_HZ;
+
+ ret = gpio_request(di->reset_gpio, NULL);
+ if (ret)
+ goto gpio_request_failed;
+ gpio_direction_output(di->reset_gpio, 1);
+ di->regulator = regulator_get(&ddev->dev, IO_REGU);
+ if (IS_ERR(di->regulator)) {
+ di->regulator = NULL;
+ goto regulator_get_failed;
+ }
+ ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX);
+ if (WARN_ON(ret))
+ goto regulator_voltage_failed;
+
+ /* Get in sync with u-boot */
+ if (ddev->power_mode != MCDE_DISPLAY_PM_OFF)
+ (void)regulator_enable(di->regulator);
+
+ ddev->set_power_mode = set_power_mode;
+ ddev->port = &di->port;
+ ddev->native_x_res = 864;
+ ddev->native_y_res = 480;
+ dev_set_drvdata(&ddev->dev, di);
+
+ dev_info(&ddev->dev, "Samsung s6d16d0 display probed\n");
+
+ return 0;
+regulator_voltage_failed:
+ regulator_put(di->regulator);
+regulator_get_failed:
+ gpio_free(di->reset_gpio);
+gpio_request_failed:
+ kfree(di);
+ return ret;
+}
+
+static struct mcde_display_driver samsung_s6d16d0_driver = {
+ .probe = samsung_s6d16d0_probe,
+ .driver = {
+ .name = "samsung_s6d16d0",
+ },
+};
+
+static int __init samsung_s6d16d0_init(void)
+{
+ return mcde_display_driver_register(&samsung_s6d16d0_driver);
+}
+module_init(samsung_s6d16d0_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE Samsung S6D16D0 display driver");
diff --git a/drivers/video/mcde/display-sony_acx424akp_dsi.c b/drivers/video/mcde/display-sony_acx424akp_dsi.c
new file mode 100644
index 00000000000..44302de8f1b
--- /dev/null
+++ b/drivers/video/mcde/display-sony_acx424akp_dsi.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Sony acx424akp DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+
+#define RESET_DELAY_MS 11
+#define RESET_LOW_DELAY_US 20
+#define SLEEP_OUT_DELAY_MS 140
+#define IO_REGU "vddi"
+#define IO_REGU_MIN 1600000
+#define IO_REGU_MAX 3300000
+
+#define DSI_HS_FREQ_HZ 420160000
+#define DSI_LP_FREQ_HZ 19200000
+
+struct device_info {
+ int reset_gpio;
+ struct mcde_port port;
+ struct regulator *regulator;
+};
+
+static inline struct device_info *get_drvdata(struct mcde_display_device *ddev)
+{
+ return (struct device_info *)dev_get_drvdata(&ddev->dev);
+}
+
+static int display_read_deviceid(struct mcde_display_device *dev, u16 *id)
+{
+ struct mcde_chnl_state *chnl;
+
+ u8 id1, id2, id3;
+ int len = 1;
+ int ret = 0;
+ int readret = 0;
+
+ dev_dbg(&dev->dev, "%s: Read device id of the display\n", __func__);
+
+ /* Acquire MCDE resources */
+ chnl = mcde_chnl_get(dev->chnl_id, dev->fifo, dev->port);
+ if (IS_ERR(chnl)) {
+ ret = PTR_ERR(chnl);
+ dev_warn(&dev->dev, "Failed to acquire MCDE channel\n");
+ goto out;
+ }
+
+ /* plugnplay: use registers DA, DBh and DCh to detect display */
+ readret = mcde_dsi_dcs_read(chnl, 0xDA, (u32 *)&id1, &len);
+ if (!readret)
+ readret = mcde_dsi_dcs_read(chnl, 0xDB, (u32 *)&id2, &len);
+ if (!readret)
+ readret = mcde_dsi_dcs_read(chnl, 0xDC, (u32 *)&id3, &len);
+
+ if (readret) {
+ dev_info(&dev->dev,
+ "mcde_dsi_dcs_read failed to read display ID\n");
+ goto read_fail;
+ }
+
+ *id = (id3 << 8) | id2;
+read_fail:
+ /* close MCDE channel */
+ mcde_chnl_put(chnl);
+out:
+ return 0;
+}
+
+static int power_on(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s: Reset & power on sony display\n", __func__);
+
+ regulator_enable(di->regulator);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ udelay(RESET_LOW_DELAY_US);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+
+ return 0;
+}
+
+static int power_off(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s:Reset & power off sony display\n", __func__);
+
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ msleep(RESET_DELAY_MS);
+ regulator_disable(di->regulator);
+
+ return 0;
+}
+
+static int display_on(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display on sony display\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE,
+ NULL, 0);
+ if (ret)
+ return ret;
+ msleep(SLEEP_OUT_DELAY_MS);
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON,
+ NULL, 0);
+}
+
+static int display_off(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display off sony display\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE,
+ NULL, 0);
+}
+
+static int sony_acx424akp_set_scan_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ u8 param[MCDE_MAX_DSI_DIRECT_CMD_WRITE];
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* 180 rotation for SONY ACX424AKP display */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ param[0] = 0xAA;
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, 0xf3, param, 1);
+ if (ret)
+ return ret;
+
+ param[0] = 0x00;
+ param[1] = 0x00;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xC9;
+ param[1] = 0x01;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xA2;
+ param[1] = 0x00;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xFF;
+ param[1] = 0xAA;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int sony_acx424akp_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ ret = power_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = display_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+
+ ret = display_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ ret = power_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+ return sony_acx424akp_set_scan_mode(ddev, power_mode);
+}
+
+static int __devinit sony_acx424akp_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ u16 id = 0;
+ struct device_info *di;
+ struct mcde_port *port;
+ struct mcde_display_sony_acx424akp_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL || !pdata->reset_gpio) {
+ dev_err(&dev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ port = dev->port;
+ di->reset_gpio = pdata->reset_gpio;
+ di->port.type = MCDE_PORTTYPE_DSI;
+ di->port.mode = MCDE_PORTMODE_CMD;
+ di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP;
+ di->port.sync_src = MCDE_SYNCSRC_BTA;
+ di->port.phy.dsi.num_data_lanes = 2;
+ di->port.link = port->link;
+ di->port.phy.dsi.host_eot_gen = true;
+ /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */
+ di->port.phy.dsi.ui = 9;
+ di->port.phy.dsi.hs_freq = DSI_HS_FREQ_HZ;
+ di->port.phy.dsi.lp_freq = DSI_LP_FREQ_HZ;
+
+ ret = gpio_request(di->reset_gpio, NULL);
+ if (WARN_ON(ret))
+ goto gpio_request_failed;
+
+ gpio_direction_output(di->reset_gpio, 1);
+ di->regulator = regulator_get(&dev->dev, IO_REGU);
+ if (IS_ERR(di->regulator)) {
+ ret = PTR_ERR(di->regulator);
+ di->regulator = NULL;
+ goto regulator_get_failed;
+ }
+ ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX);
+ if (WARN_ON(ret))
+ goto regulator_voltage_failed;
+
+ dev->set_power_mode = sony_acx424akp_set_power_mode;
+
+ dev->port = &di->port;
+ dev->native_x_res = 480;
+ dev->native_y_res = 854;
+ dev_set_drvdata(&dev->dev, di);
+
+ /*
+ * When u-boot has display a startup screen.
+ * U-boot has turned on display power however the
+ * regulator framework does not know about that
+ * This is the case here, the display driver has to
+ * enable the regulator for the display.
+ */
+ if (dev->power_mode != MCDE_DISPLAY_PM_OFF) {
+ (void) regulator_enable(di->regulator);
+ } else {
+ power_on(dev);
+ dev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ ret = display_read_deviceid(dev, &id);
+ if (ret)
+ goto read_id_failed;
+
+ switch (id) {
+ case DISPLAY_SONY_ACX424AKP:
+ case DISPLAY_SONY_ACX424AKP_ID2:
+ pdata->disp_panel = id;
+ dev_info(&dev->dev,
+ "Sony ACX424AKP display (ID 0x%.4X) probed\n", id);
+ break;
+ default:
+ pdata->disp_panel = DISPLAY_NONE;
+ dev_info(&dev->dev,
+ "Display not recognized (ID 0x%.4X) probed\n", id);
+ goto read_id_failed;
+ }
+
+ return 0;
+
+read_id_failed:
+regulator_voltage_failed:
+ regulator_put(di->regulator);
+regulator_get_failed:
+ gpio_free(di->reset_gpio);
+gpio_request_failed:
+ kfree(di);
+ return ret;
+}
+
+static int __devexit sony_acx424akp_remove(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ regulator_put(di->regulator);
+ gpio_direction_input(di->reset_gpio);
+ gpio_free(di->reset_gpio);
+
+ kfree(di);
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int sony_acx424akp_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ ddev->set_synchronized_update(ddev,
+ ddev->get_synchronized_update(ddev));
+ return ret;
+}
+
+static int sony_acx424akp_suspend(struct mcde_display_device *ddev, \
+ pm_message_t state)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver sony_acx424akp_driver = {
+ .probe = sony_acx424akp_probe,
+ .remove = sony_acx424akp_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = sony_acx424akp_suspend,
+ .resume = sony_acx424akp_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde_disp_sony_acx424akp",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_sony_acx424akp_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&sony_acx424akp_driver);
+}
+module_init(mcde_display_sony_acx424akp_init);
+
+static void __exit mcde_display_sony_acx424akp_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&sony_acx424akp_driver);
+}
+module_exit(mcde_display_sony_acx424akp_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE Sony ACX424AKP DCS display driver");
diff --git a/drivers/video/mcde/display-vuib500-dpi.c b/drivers/video/mcde/display-vuib500-dpi.c
new file mode 100644
index 00000000000..2bd5b990608
--- /dev/null
+++ b/drivers/video/mcde/display-vuib500-dpi.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE DPI display driver
+ * The VUIB500 is an user interface board the can be attached to an HREF. It
+ * supports the DPI pixel interface and converts this to an analog VGA signal,
+ * which can be connected to a monitor using a DSUB connector. The VUIB board
+ * uses an external power supply of 5V.
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-vuib500-dpi.h>
+
+#define DPI_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__)
+
+static int try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+
+static int __devinit dpi_display_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_display_dpi_platform_data *pdata = ddev->dev.platform_data;
+ DPI_DISP_TRACE;
+
+ if (pdata == NULL) {
+ dev_err(&ddev->dev, "%s:Platform data missing\n", __func__);
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ if (ddev->port->type != MCDE_PORTTYPE_DPI) {
+ dev_err(&ddev->dev,
+ "%s:Invalid port type %d\n",
+ __func__, ddev->port->type);
+ ret = -EINVAL;
+ goto invalid_port_type;
+ }
+
+ ddev->try_video_mode = try_video_mode;
+ ddev->set_video_mode = set_video_mode;
+ dev_info(&ddev->dev, "DPI display probed\n");
+
+ goto out;
+invalid_port_type:
+no_pdata:
+out:
+ return ret;
+}
+
+static int __devexit dpi_display_remove(struct mcde_display_device *ddev)
+{
+ DPI_DISP_TRACE;
+
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ return 0;
+}
+
+static int dpi_display_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+ DPI_DISP_TRACE;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ return ret;
+}
+
+static int dpi_display_suspend(struct mcde_display_device *ddev,
+ pm_message_t state)
+{
+ int ret;
+ DPI_DISP_TRACE;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+
+static void print_vmode(struct mcde_video_mode *vmode)
+{
+ pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres);
+ pr_debug(" pixclock: %d\n", vmode->pixclock);
+ pr_debug(" hbp: %d\n", vmode->hbp);
+ pr_debug(" hfp: %d\n", vmode->hfp);
+ pr_debug(" hsw: %d\n", vmode->hsw);
+ pr_debug(" vbp: %d\n", vmode->vbp);
+ pr_debug(" vfp: %d\n", vmode->vfp);
+ pr_debug(" vsw: %d\n", vmode->vsw);
+ pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false");
+}
+
+/* Taken from the programmed value of the LCD clock in PRCMU */
+#define PIX_CLK_FREQ 25000000
+#define VMODE_XRES 640
+#define VMODE_YRES 480
+
+static int try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res = -EINVAL;
+ DPI_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return res;
+ }
+
+ if (video_mode->xres == VMODE_XRES && video_mode->yres == VMODE_YRES) {
+ video_mode->hbp = 40;
+ video_mode->hfp = 8;
+ video_mode->hsw = 96;
+ video_mode->vbp = 25;
+ video_mode->vfp = 2;
+ video_mode->vsw = 2;
+ /*
+ * The pixclock setting is not used within MCDE. The clock is
+ * setup elsewhere. But the pixclock value is visible in user
+ * space.
+ */
+ video_mode->pixclock = (int) (1e+12 * (1.0 / PIX_CLK_FREQ));
+ res = 0;
+ } /* TODO: add more supported resolutions here */
+ video_mode->interlaced = false;
+
+ if (res == 0)
+ print_vmode(video_mode);
+ else
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+
+ return res;
+
+}
+
+static int set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res;
+ DPI_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (video_mode->xres != VMODE_XRES || video_mode->yres != VMODE_YRES) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+ ddev->video_mode = *video_mode;
+ print_vmode(video_mode);
+
+ res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode);
+ if (res < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n",
+ __func__);
+
+ }
+ /* notify mcde display driver about updated video mode */
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+ return res;
+}
+
+static struct mcde_display_driver dpi_display_driver = {
+ .probe = dpi_display_probe,
+ .remove = dpi_display_remove,
+ .suspend = dpi_display_suspend,
+ .resume = dpi_display_resume,
+ .driver = {
+ .name = "mcde_display_dpi",
+ },
+};
+
+/* Module init */
+static int __init mcde_dpi_display_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&dpi_display_driver);
+}
+module_init(mcde_dpi_display_init);
+
+static void __exit mcde_dpi_display_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&dpi_display_driver);
+}
+module_exit(mcde_dpi_display_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE DPI display driver fro VUIB500 display");
diff --git a/drivers/video/mcde/dsilink_regs.h b/drivers/video/mcde/dsilink_regs.h
new file mode 100644
index 00000000000..29fa75a1752
--- /dev/null
+++ b/drivers/video/mcde/dsilink_regs.h
@@ -0,0 +1,2037 @@
+
+#define DSI_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define DSI_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define DSI_MCTL_INTEGRATION_MODE 0x00000000
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_SHIFT 0
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_MASK 0x00000001
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_INTEGRATION_MODE, INT_MODE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_SHIFT 0
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, LINK_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_SHIFT 1
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_MASK 0x00000002
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_CMD 0
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_VID 1
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, \
+ DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_##__x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_SHIFT 2
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_MASK 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, VID_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_SHIFT 3
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_MASK 0x00000008
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_SHIFT 4
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_MASK 0x00000010
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TBG_SEL, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_SHIFT 5
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_SHIFT 6
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_MASK 0x00000040
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_SHIFT 7
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_MASK 0x00000080
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_SHIFT 8
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_MASK 0x00000100
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, READ_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_SHIFT 9
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_MASK 0x00000200
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, BTA_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_SHIFT 10
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_MASK 0x00000400
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_ECC, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_SHIFT 11
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_MASK 0x00000800
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_CHECKSUM, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_SHIFT 12
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_MASK 0x00001000
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_SHIFT 13
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_MASK 0x00002000
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_EOT_GEN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_SHIFT 14
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_MASK 0x00004000
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_SHIFT 15
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_MASK 0x00008000
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_SHIFT 0
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, LANE2_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_SHIFT 1
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_MASK 0x00000002
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_SHIFT 2
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_MASK 0x00000004
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_SHIFT 3
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_SHIFT 4
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT1_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_SHIFT 5
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT2_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT 6
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_MASK 0x000003C0
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, WAIT_BURST_TIME, __x)
+#define DSI_MCTL_PLL_CTL 0x0000000C
+#define DSI_MCTL_PLL_CTL_PLL_MULT_SHIFT 0
+#define DSI_MCTL_PLL_CTL_PLL_MULT_MASK 0x000000FF
+#define DSI_MCTL_PLL_CTL_PLL_MULT(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MULT, __x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_SHIFT 8
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_MASK 0x00003F00
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_DIV, __x)
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_SHIFT 14
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_MASK 0x0001C000
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_IN_DIV, __x)
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_SHIFT 17
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_MASK 0x00020000
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_SEL_DIV2, __x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SHIFT 18
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_MASK 0x00040000
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_INT_PLL 0
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SYS_PLL 1
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, \
+ DSI_MCTL_PLL_CTL_PLL_OUT_SEL_##__x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, __x)
+#define DSI_MCTL_PLL_CTL_PLL_MASTER_SHIFT 31
+#define DSI_MCTL_PLL_CTL_PLL_MASTER_MASK 0x80000000
+#define DSI_MCTL_PLL_CTL_PLL_MASTER(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MASTER, __x)
+#define DSI_MCTL_LANE_STS 0x00000010
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_SHIFT 0
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_MASK 0x00000003
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_START 0
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_HS 2
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, \
+ DSI_MCTL_LANE_STS_CLKLANE_STATE_##__x)
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, __x)
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_SHIFT 2
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_MASK 0x0000001C
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_START 0
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_WRITE 2
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_READ 4
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, \
+ DSI_MCTL_LANE_STS_DATLANE1_STATE_##__x)
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, __x)
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_SHIFT 5
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_MASK 0x00000060
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_START 0
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_WRITE 2
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, \
+ DSI_MCTL_LANE_STS_DATLANE2_STATE_##__x)
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, __x)
+#define DSI_MCTL_DPHY_TIMEOUT 0x00000014
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT 0
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_MASK 0x0000000F
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, CLK_DIV, __x)
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT 4
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_MASK 0x0003FFF0
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, HSTX_TO_VAL, __x)
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT 18
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_MASK 0xFFFC0000
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, LPRX_TO_VAL, __x)
+#define DSI_MCTL_ULPOUT_TIME 0x00000018
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT 0
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_MASK 0x000001FF
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, CKLANE_ULPOUT_TIME, __x)
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT 9
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_MASK 0x0003FE00
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, DATA_ULPOUT_TIME, __x)
+#define DSI_MCTL_DPHY_STATIC 0x0000001C
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_SHIFT 0
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_MASK 0x00000001
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_CLK, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_SHIFT 1
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_MASK 0x00000002
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_CLK, __x)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_SHIFT 2
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_MASK 0x00000004
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT1, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_SHIFT 3
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_MASK 0x00000008
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT1, __x)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_SHIFT 4
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_MASK 0x00000010
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT2, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_SHIFT 5
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_MASK 0x00000020
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT2, __x)
+#define DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT 6
+#define DSI_MCTL_DPHY_STATIC_UI_X4_MASK 0x00000FC0
+#define DSI_MCTL_DPHY_STATIC_UI_X4(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, UI_X4, __x)
+#define DSI_MCTL_MAIN_EN 0x00000020
+#define DSI_MCTL_MAIN_EN_PLL_START_SHIFT 0
+#define DSI_MCTL_MAIN_EN_PLL_START_MASK 0x00000001
+#define DSI_MCTL_MAIN_EN_PLL_START(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, PLL_START, __x)
+#define DSI_MCTL_MAIN_EN_CKLANE_EN_SHIFT 3
+#define DSI_MCTL_MAIN_EN_CKLANE_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_EN_CKLANE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, CKLANE_EN, __x)
+#define DSI_MCTL_MAIN_EN_DAT1_EN_SHIFT 4
+#define DSI_MCTL_MAIN_EN_DAT1_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_EN_DAT1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_EN, __x)
+#define DSI_MCTL_MAIN_EN_DAT2_EN_SHIFT 5
+#define DSI_MCTL_MAIN_EN_DAT2_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_EN_DAT2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_EN, __x)
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_SHIFT 6
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_MASK 0x00000040
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_SHIFT 7
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_MASK 0x00000080
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_SHIFT 8
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_MASK 0x00000100
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_IF1_EN_SHIFT 9
+#define DSI_MCTL_MAIN_EN_IF1_EN_MASK 0x00000200
+#define DSI_MCTL_MAIN_EN_IF1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF1_EN, __x)
+#define DSI_MCTL_MAIN_EN_IF2_EN_SHIFT 10
+#define DSI_MCTL_MAIN_EN_IF2_EN_MASK 0x00000400
+#define DSI_MCTL_MAIN_EN_IF2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF2_EN, __x)
+#define DSI_MCTL_MAIN_STS 0x00000024
+#define DSI_MCTL_MAIN_STS_PLL_LOCK_SHIFT 0
+#define DSI_MCTL_MAIN_STS_PLL_LOCK_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_PLL_LOCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, PLL_LOCK, __x)
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, CLKLANE_READY, __x)
+#define DSI_MCTL_MAIN_STS_DAT1_READY_SHIFT 2
+#define DSI_MCTL_MAIN_STS_DAT1_READY_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_DAT1_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT1_READY, __x)
+#define DSI_MCTL_MAIN_STS_DAT2_READY_SHIFT 3
+#define DSI_MCTL_MAIN_STS_DAT2_READY_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_DAT2_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT2_READY, __x)
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_SHIFT 4
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, HSTX_TO_ERR, __x)
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_SHIFT 5
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, LPRX_TO_ERR, __x)
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, CRS_UNTERM_PCK, __x)
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_SHIFT 7
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, VRS_UNTERM_PCK, __x)
+#define DSI_MCTL_DPHY_ERR 0x00000028
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_2, __x)
+#define DSI_INT_VID_RDDATA 0x00000030
+#define DSI_INT_VID_RDDATA_IF_DATA_SHIFT 0
+#define DSI_INT_VID_RDDATA_IF_DATA_MASK 0x0000FFFF
+#define DSI_INT_VID_RDDATA_IF_DATA(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_DATA, __x)
+#define DSI_INT_VID_RDDATA_IF_VALID_SHIFT 16
+#define DSI_INT_VID_RDDATA_IF_VALID_MASK 0x00010000
+#define DSI_INT_VID_RDDATA_IF_VALID(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_VALID, __x)
+#define DSI_INT_VID_RDDATA_IF_START_SHIFT 17
+#define DSI_INT_VID_RDDATA_IF_START_MASK 0x00020000
+#define DSI_INT_VID_RDDATA_IF_START(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_START, __x)
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_SHIFT 18
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_MASK 0x00040000
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_FRAME_SYNC, __x)
+#define DSI_INT_VID_GNT 0x00000034
+#define DSI_INT_VID_GNT_IF_STALL_SHIFT 0
+#define DSI_INT_VID_GNT_IF_STALL_MASK 0x00000001
+#define DSI_INT_VID_GNT_IF_STALL(__x) \
+ DSI_VAL2REG(DSI_INT_VID_GNT, IF_STALL, __x)
+#define DSI_INT_CMD_RDDATA 0x00000038
+#define DSI_INT_CMD_RDDATA_IF_DATA_SHIFT 0
+#define DSI_INT_CMD_RDDATA_IF_DATA_MASK 0x0000FFFF
+#define DSI_INT_CMD_RDDATA_IF_DATA(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_DATA, __x)
+#define DSI_INT_CMD_RDDATA_IF_VALID_SHIFT 16
+#define DSI_INT_CMD_RDDATA_IF_VALID_MASK 0x00010000
+#define DSI_INT_CMD_RDDATA_IF_VALID(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_VALID, __x)
+#define DSI_INT_CMD_RDDATA_IF_START_SHIFT 17
+#define DSI_INT_CMD_RDDATA_IF_START_MASK 0x00020000
+#define DSI_INT_CMD_RDDATA_IF_START(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_START, __x)
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_SHIFT 18
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_MASK 0x00040000
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_FRAME_SYNC, __x)
+#define DSI_INT_CMD_GNT 0x0000003C
+#define DSI_INT_CMD_GNT_IF_STALL_SHIFT 0
+#define DSI_INT_CMD_GNT_IF_STALL_MASK 0x00000001
+#define DSI_INT_CMD_GNT_IF_STALL(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_GNT, IF_STALL, __x)
+#define DSI_INT_INTERRUPT_CTL 0x00000040
+#define DSI_INT_INTERRUPT_CTL_INT_VAL_SHIFT 0
+#define DSI_INT_INTERRUPT_CTL_INT_VAL_MASK 0x00000001
+#define DSI_INT_INTERRUPT_CTL_INT_VAL(__x) \
+ DSI_VAL2REG(DSI_INT_INTERRUPT_CTL, INT_VAL, __x)
+#define DSI_CMD_MODE_CTL 0x00000050
+#define DSI_CMD_MODE_CTL_IF1_ID_SHIFT 0
+#define DSI_CMD_MODE_CTL_IF1_ID_MASK 0x00000003
+#define DSI_CMD_MODE_CTL_IF1_ID(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_ID, __x)
+#define DSI_CMD_MODE_CTL_IF2_ID_SHIFT 2
+#define DSI_CMD_MODE_CTL_IF2_ID_MASK 0x0000000C
+#define DSI_CMD_MODE_CTL_IF2_ID(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_ID, __x)
+#define DSI_CMD_MODE_CTL_IF1_LP_EN_SHIFT 4
+#define DSI_CMD_MODE_CTL_IF1_LP_EN_MASK 0x00000010
+#define DSI_CMD_MODE_CTL_IF1_LP_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_LP_EN, __x)
+#define DSI_CMD_MODE_CTL_IF2_LP_EN_SHIFT 5
+#define DSI_CMD_MODE_CTL_IF2_LP_EN_MASK 0x00000020
+#define DSI_CMD_MODE_CTL_IF2_LP_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_LP_EN, __x)
+#define DSI_CMD_MODE_CTL_ARB_MODE_SHIFT 6
+#define DSI_CMD_MODE_CTL_ARB_MODE_MASK 0x00000040
+#define DSI_CMD_MODE_CTL_ARB_MODE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_MODE, __x)
+#define DSI_CMD_MODE_CTL_ARB_PRI_SHIFT 7
+#define DSI_CMD_MODE_CTL_ARB_PRI_MASK 0x00000080
+#define DSI_CMD_MODE_CTL_ARB_PRI(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_PRI, __x)
+#define DSI_CMD_MODE_CTL_FIL_VALUE_SHIFT 8
+#define DSI_CMD_MODE_CTL_FIL_VALUE_MASK 0x0000FF00
+#define DSI_CMD_MODE_CTL_FIL_VALUE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, FIL_VALUE, __x)
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT 16
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_MASK 0x03FF0000
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, TE_TIMEOUT, __x)
+#define DSI_CMD_MODE_STS 0x00000054
+#define DSI_CMD_MODE_STS_ERR_NO_TE_SHIFT 0
+#define DSI_CMD_MODE_STS_ERR_NO_TE_MASK 0x00000001
+#define DSI_CMD_MODE_STS_ERR_NO_TE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_NO_TE, __x)
+#define DSI_CMD_MODE_STS_ERR_TE_MISS_SHIFT 1
+#define DSI_CMD_MODE_STS_ERR_TE_MISS_MASK 0x00000002
+#define DSI_CMD_MODE_STS_ERR_TE_MISS(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_TE_MISS, __x)
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_SHIFT 2
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_MASK 0x00000004
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI1_UNDERRUN, __x)
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_SHIFT 3
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_MASK 0x00000008
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI2_UNDERRUN, __x)
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_SHIFT 4
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_MASK 0x00000010
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_UNWANTED_RD, __x)
+#define DSI_CMD_MODE_STS_CSM_RUNNING_SHIFT 5
+#define DSI_CMD_MODE_STS_CSM_RUNNING_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CSM_RUNNING(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, CSM_RUNNING, __x)
+#define DSI_DIRECT_CMD_SEND 0x00000060
+#define DSI_DIRECT_CMD_SEND_START_SHIFT 0
+#define DSI_DIRECT_CMD_SEND_START_MASK 0xFFFFFFFF
+#define DSI_DIRECT_CMD_SEND_START(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_SEND, START, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS 0x00000064
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_SHIFT 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_MASK 0x00000007
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ 1
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ 4
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TRIG_REQ 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_BTA_REQ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, \
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_##__x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_SHIFT 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_MASK 0x00000008
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LONGNOTSHORT, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, \
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_##__x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_MASK 0x0000C000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_ID, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_MASK 0x001F0000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_SIZE, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_SHIFT 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_MASK 0x00200000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LP_EN, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_SHIFT 24
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_MASK 0x0F000000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, TRIGGER_VAL, __x)
+#define DSI_DIRECT_CMD_STS 0x00000068
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, CMD_TRANSMISSION, __x)
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_SHIFT 1
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, WRITE_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_SHIFT 2
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_SHIFT 3
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_SHIFT 4
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_SHIFT 5
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_WITH_ERR_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_SHIFT 6
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED_SHIFT 7
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TE_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_SHIFT 8
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED_SHIFT 9
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_FINISHED, __x)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_SHIFT 10
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED_WITH_ERR, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_SHIFT 11
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_MASK 0x00007800
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_VAL, __x)
+#define DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT 16
+#define DSI_DIRECT_CMD_STS_ACK_VAL_MASK 0xFFFF0000
+#define DSI_DIRECT_CMD_STS_ACK_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACK_VAL, __x)
+#define DSI_DIRECT_CMD_RD_INIT 0x0000006C
+#define DSI_DIRECT_CMD_RD_INIT_RESET_SHIFT 0
+#define DSI_DIRECT_CMD_RD_INIT_RESET_MASK 0xFFFFFFFF
+#define DSI_DIRECT_CMD_RD_INIT_RESET(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_INIT, RESET, __x)
+#define DSI_DIRECT_CMD_WRDAT0 0x00000070
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT0, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT1, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT2, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT3, __x)
+#define DSI_DIRECT_CMD_WRDAT1 0x00000074
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT4, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT5, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT6, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT7, __x)
+#define DSI_DIRECT_CMD_WRDAT2 0x00000078
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT8, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT9, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT10, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT11, __x)
+#define DSI_DIRECT_CMD_WRDAT3 0x0000007C
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT12, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT13, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT14, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT15, __x)
+#define DSI_DIRECT_CMD_RDDAT 0x00000080
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0_SHIFT 0
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0_MASK 0x000000FF
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT0, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1_SHIFT 8
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT1, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2_SHIFT 16
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT2, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3_SHIFT 24
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3_MASK 0xFF000000
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT3, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY 0x00000084
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_SHIFT 0
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK 0x0000FFFF
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_SHIFT 16
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_MASK 0x00030000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_ID, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_SHIFT 18
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_MASK 0x00040000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_DCSNOTGENERIC, __x)
+#define DSI_DIRECT_CMD_RD_STS 0x00000088
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_FIXED, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNCORRECTABLE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_CHECKSUM, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNDECODABLE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_RECEIVE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_OVERSIZE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_WRONG_LENGTH, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_MISSING_EOT, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_EOT_WITH_ERR, __x)
+#define DSI_VID_MAIN_CTL 0x00000090
+#define DSI_VID_MAIN_CTL_START_MODE_SHIFT 0
+#define DSI_VID_MAIN_CTL_START_MODE_MASK 0x00000003
+#define DSI_VID_MAIN_CTL_START_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, START_MODE, __x)
+#define DSI_VID_MAIN_CTL_STOP_MODE_SHIFT 2
+#define DSI_VID_MAIN_CTL_STOP_MODE_MASK 0x0000000C
+#define DSI_VID_MAIN_CTL_STOP_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, STOP_MODE, __x)
+#define DSI_VID_MAIN_CTL_VID_ID_SHIFT 4
+#define DSI_VID_MAIN_CTL_VID_ID_MASK 0x00000030
+#define DSI_VID_MAIN_CTL_VID_ID(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_ID, __x)
+#define DSI_VID_MAIN_CTL_HEADER_SHIFT 6
+#define DSI_VID_MAIN_CTL_HEADER_MASK 0x00000FC0
+#define DSI_VID_MAIN_CTL_HEADER(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, HEADER, __x)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_SHIFT 12
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_MASK 0x00003000
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS 0
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS 1
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE 2
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS 3
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, \
+ DSI_VID_MAIN_CTL_VID_PIXEL_MODE_##__x)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, __x)
+#define DSI_VID_MAIN_CTL_BURST_MODE_SHIFT 14
+#define DSI_VID_MAIN_CTL_BURST_MODE_MASK 0x00004000
+#define DSI_VID_MAIN_CTL_BURST_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, BURST_MODE, __x)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_SHIFT 15
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_MASK 0x00008000
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, __x)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_SHIFT 16
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_MASK 0x00010000
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, __x)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_SHIFT 17
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_MASK 0x00060000
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING 1
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0 2
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_1 3
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, \
+ DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_##__x)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, __x)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_SHIFT 19
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_MASK 0x00180000
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_BLANKING 1
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 2
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_1 3
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, \
+ DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_##__x)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, __x)
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT 21
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_MASK 0x00600000
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, RECOVERY_MODE, __x)
+#define DSI_VID_VSIZE 0x00000094
+#define DSI_VID_VSIZE_VSA_LENGTH_SHIFT 0
+#define DSI_VID_VSIZE_VSA_LENGTH_MASK 0x0000003F
+#define DSI_VID_VSIZE_VSA_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VSA_LENGTH, __x)
+#define DSI_VID_VSIZE_VBP_LENGTH_SHIFT 6
+#define DSI_VID_VSIZE_VBP_LENGTH_MASK 0x00000FC0
+#define DSI_VID_VSIZE_VBP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VBP_LENGTH, __x)
+#define DSI_VID_VSIZE_VFP_LENGTH_SHIFT 12
+#define DSI_VID_VSIZE_VFP_LENGTH_MASK 0x000FF000
+#define DSI_VID_VSIZE_VFP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VFP_LENGTH, __x)
+#define DSI_VID_VSIZE_VACT_LENGTH_SHIFT 20
+#define DSI_VID_VSIZE_VACT_LENGTH_MASK 0x7FF00000
+#define DSI_VID_VSIZE_VACT_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VACT_LENGTH, __x)
+#define DSI_VID_HSIZE1 0x00000098
+#define DSI_VID_HSIZE1_HSA_LENGTH_SHIFT 0
+#define DSI_VID_HSIZE1_HSA_LENGTH_MASK 0x000003FF
+#define DSI_VID_HSIZE1_HSA_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HSA_LENGTH, __x)
+#define DSI_VID_HSIZE1_HBP_LENGTH_SHIFT 10
+#define DSI_VID_HSIZE1_HBP_LENGTH_MASK 0x000FFC00
+#define DSI_VID_HSIZE1_HBP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HBP_LENGTH, __x)
+#define DSI_VID_HSIZE1_HFP_LENGTH_SHIFT 20
+#define DSI_VID_HSIZE1_HFP_LENGTH_MASK 0x7FF00000
+#define DSI_VID_HSIZE1_HFP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HFP_LENGTH, __x)
+#define DSI_VID_HSIZE2 0x0000009C
+#define DSI_VID_HSIZE2_RGB_SIZE_SHIFT 0
+#define DSI_VID_HSIZE2_RGB_SIZE_MASK 0x00001FFF
+#define DSI_VID_HSIZE2_RGB_SIZE(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE2, RGB_SIZE, __x)
+#define DSI_VID_BLKSIZE1 0x000000A0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK, __x)
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT 13
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK 0x03FFE000
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKEOL_PCK, __x)
+#define DSI_VID_BLKSIZE2 0x000000A4
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK, __x)
+#define DSI_VID_PCK_TIME 0x000000A8
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00001FFF
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION(__x) \
+ DSI_VAL2REG(DSI_VID_PCK_TIME, BLKEOL_DURATION, __x)
+#define DSI_VID_DPHY_TIME 0x000000AC
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_MASK 0x00001FFF
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION(__x) \
+ DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_LINE_DURATION, __x)
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT 13
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_MASK 0x00FFE000
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME(__x) \
+ DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_WAKEUP_TIME, __x)
+#define DSI_VID_ERR_COLOR 0x000000B0
+#define DSI_VID_ERR_COLOR_COL_RED_SHIFT 0
+#define DSI_VID_ERR_COLOR_COL_RED_MASK 0x000000FF
+#define DSI_VID_ERR_COLOR_COL_RED(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_RED, __x)
+#define DSI_VID_ERR_COLOR_COL_GREEN_SHIFT 8
+#define DSI_VID_ERR_COLOR_COL_GREEN_MASK 0x0000FF00
+#define DSI_VID_ERR_COLOR_COL_GREEN(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_GREEN, __x)
+#define DSI_VID_ERR_COLOR_COL_BLUE_SHIFT 16
+#define DSI_VID_ERR_COLOR_COL_BLUE_MASK 0x00FF0000
+#define DSI_VID_ERR_COLOR_COL_BLUE(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_BLUE, __x)
+#define DSI_VID_ERR_COLOR_PAD_VAL_SHIFT 24
+#define DSI_VID_ERR_COLOR_PAD_VAL_MASK 0xFF000000
+#define DSI_VID_ERR_COLOR_PAD_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, PAD_VAL, __x)
+#define DSI_VID_VPOS 0x000000B4
+#define DSI_VID_VPOS_LINE_POS_SHIFT 0
+#define DSI_VID_VPOS_LINE_POS_MASK 0x00000003
+#define DSI_VID_VPOS_LINE_POS(__x) \
+ DSI_VAL2REG(DSI_VID_VPOS, LINE_POS, __x)
+#define DSI_VID_VPOS_LINE_VAL_SHIFT 2
+#define DSI_VID_VPOS_LINE_VAL_MASK 0x00001FFC
+#define DSI_VID_VPOS_LINE_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_VPOS, LINE_VAL, __x)
+#define DSI_VID_HPOS 0x000000B8
+#define DSI_VID_HPOS_HORIZONTAL_POS_SHIFT 0
+#define DSI_VID_HPOS_HORIZONTAL_POS_MASK 0x00000007
+#define DSI_VID_HPOS_HORIZONTAL_POS(__x) \
+ DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_POS, __x)
+#define DSI_VID_HPOS_HORIZONTAL_VAL_SHIFT 3
+#define DSI_VID_HPOS_HORIZONTAL_VAL_MASK 0x0000FFF8
+#define DSI_VID_HPOS_HORIZONTAL_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_VAL, __x)
+#define DSI_VID_MODE_STS 0x000000BC
+#define DSI_VID_MODE_STS_VSG_RUNNING_SHIFT 0
+#define DSI_VID_MODE_STS_VSG_RUNNING_MASK 0x00000001
+#define DSI_VID_MODE_STS_VSG_RUNNING(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RUNNING, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA_SHIFT 1
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA_MASK 0x00000002
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_DATA, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_SHIFT 2
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_MASK 0x00000004
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_HSYNC, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_SHIFT 3
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_MASK 0x00000008
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_VSYNC, __x)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_SHIFT 4
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_MASK 0x00000010
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_LENGTH, __x)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_SHIFT 5
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_MASK 0x00000020
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_HEIGHT, __x)
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE_SHIFT 6
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE_MASK 0x00000040
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_BURSTWRITE, __x)
+#define DSI_VID_MODE_STS_ERR_LONGWRITE_SHIFT 7
+#define DSI_VID_MODE_STS_ERR_LONGWRITE_MASK 0x00000080
+#define DSI_VID_MODE_STS_ERR_LONGWRITE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGWRITE, __x)
+#define DSI_VID_MODE_STS_ERR_LONGREAD_SHIFT 8
+#define DSI_VID_MODE_STS_ERR_LONGREAD_MASK 0x00000100
+#define DSI_VID_MODE_STS_ERR_LONGREAD(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGREAD, __x)
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_SHIFT 9
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_MASK 0x00000200
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_VRS_WRONG_LENGTH, __x)
+#define DSI_VID_MODE_STS_VSG_RECOVERY_SHIFT 10
+#define DSI_VID_MODE_STS_VSG_RECOVERY_MASK 0x00000400
+#define DSI_VID_MODE_STS_VSG_RECOVERY(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RECOVERY, __x)
+#define DSI_VID_VCA_SETTING1 0x000000C0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT, __x)
+#define DSI_VID_VCA_SETTING1_BURST_LP_SHIFT 16
+#define DSI_VID_VCA_SETTING1_BURST_LP_MASK 0x00010000
+#define DSI_VID_VCA_SETTING1_BURST_LP(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING1, BURST_LP, __x)
+#define DSI_VID_VCA_SETTING2 0x000000C4
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT, __x)
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT 16
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK 0xFFFF0000
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT, __x)
+#define DSI_TVG_CTL 0x000000C8
+#define DSI_TVG_CTL_TVG_RUN_SHIFT 0
+#define DSI_TVG_CTL_TVG_RUN_MASK 0x00000001
+#define DSI_TVG_CTL_TVG_RUN(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_RUN, __x)
+#define DSI_TVG_CTL_TVG_STOPMODE_SHIFT 1
+#define DSI_TVG_CTL_TVG_STOPMODE_MASK 0x00000006
+#define DSI_TVG_CTL_TVG_STOPMODE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_STOPMODE, __x)
+#define DSI_TVG_CTL_TVG_MODE_SHIFT 3
+#define DSI_TVG_CTL_TVG_MODE_MASK 0x00000018
+#define DSI_TVG_CTL_TVG_MODE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_MODE, __x)
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE_SHIFT 5
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE_MASK 0x000000E0
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_STRIPE_SIZE, __x)
+#define DSI_TVG_IMG_SIZE 0x000000CC
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_SHIFT 0
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_MASK 0x00001FFF
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE(__x) \
+ DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_LINE_SIZE, __x)
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE_SHIFT 16
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE_MASK 0x07FF0000
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE(__x) \
+ DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_NBLINE, __x)
+#define DSI_TVG_COLOR1 0x000000D0
+#define DSI_TVG_COLOR1_COL1_RED_SHIFT 0
+#define DSI_TVG_COLOR1_COL1_RED_MASK 0x000000FF
+#define DSI_TVG_COLOR1_COL1_RED(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_RED, __x)
+#define DSI_TVG_COLOR1_COL1_GREEN_SHIFT 8
+#define DSI_TVG_COLOR1_COL1_GREEN_MASK 0x0000FF00
+#define DSI_TVG_COLOR1_COL1_GREEN(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_GREEN, __x)
+#define DSI_TVG_COLOR1_COL1_BLUE_SHIFT 16
+#define DSI_TVG_COLOR1_COL1_BLUE_MASK 0x00FF0000
+#define DSI_TVG_COLOR1_COL1_BLUE(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_BLUE, __x)
+#define DSI_TVG_COLOR2 0x000000D4
+#define DSI_TVG_COLOR2_COL2_RED_SHIFT 0
+#define DSI_TVG_COLOR2_COL2_RED_MASK 0x000000FF
+#define DSI_TVG_COLOR2_COL2_RED(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_RED, __x)
+#define DSI_TVG_COLOR2_COL2_GREEN_SHIFT 8
+#define DSI_TVG_COLOR2_COL2_GREEN_MASK 0x0000FF00
+#define DSI_TVG_COLOR2_COL2_GREEN(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_GREEN, __x)
+#define DSI_TVG_COLOR2_COL2_BLUE_SHIFT 16
+#define DSI_TVG_COLOR2_COL2_BLUE_MASK 0x00FF0000
+#define DSI_TVG_COLOR2_COL2_BLUE(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_BLUE, __x)
+#define DSI_TVG_STS 0x000000D8
+#define DSI_TVG_STS_TVG_RUNNING_SHIFT 0
+#define DSI_TVG_STS_TVG_RUNNING_MASK 0x00000001
+#define DSI_TVG_STS_TVG_RUNNING(__x) \
+ DSI_VAL2REG(DSI_TVG_STS, TVG_RUNNING, __x)
+#define DSI_TBG_CTL 0x000000E0
+#define DSI_TBG_CTL_TBG_START_SHIFT 0
+#define DSI_TBG_CTL_TBG_START_MASK 0x00000001
+#define DSI_TBG_CTL_TBG_START(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_START, __x)
+#define DSI_TBG_CTL_TBG_HS_REQ_SHIFT 1
+#define DSI_TBG_CTL_TBG_HS_REQ_MASK 0x00000002
+#define DSI_TBG_CTL_TBG_HS_REQ(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_HS_REQ, __x)
+#define DSI_TBG_CTL_TBG_DATA_SEL_SHIFT 2
+#define DSI_TBG_CTL_TBG_DATA_SEL_MASK 0x00000004
+#define DSI_TBG_CTL_TBG_DATA_SEL(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_DATA_SEL, __x)
+#define DSI_TBG_CTL_TBG_MODE_SHIFT 3
+#define DSI_TBG_CTL_TBG_MODE_MASK 0x00000018
+#define DSI_TBG_CTL_TBG_MODE_1BYTE 0
+#define DSI_TBG_CTL_TBG_MODE_2BYTE 1
+#define DSI_TBG_CTL_TBG_MODE_BURST_COUNTER 2
+#define DSI_TBG_CTL_TBG_MODE_BURST 3
+#define DSI_TBG_CTL_TBG_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, DSI_TBG_CTL_TBG_MODE_##__x)
+#define DSI_TBG_CTL_TBG_MODE(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, __x)
+#define DSI_TBG_SETTING 0x000000E4
+#define DSI_TBG_SETTING_TBG_DATA_SHIFT 0
+#define DSI_TBG_SETTING_TBG_DATA_MASK 0x0000FFFF
+#define DSI_TBG_SETTING_TBG_DATA(__x) \
+ DSI_VAL2REG(DSI_TBG_SETTING, TBG_DATA, __x)
+#define DSI_TBG_SETTING_TBG_CPT_SHIFT 16
+#define DSI_TBG_SETTING_TBG_CPT_MASK 0x0FFF0000
+#define DSI_TBG_SETTING_TBG_CPT(__x) \
+ DSI_VAL2REG(DSI_TBG_SETTING, TBG_CPT, __x)
+#define DSI_TBG_STS 0x000000E8
+#define DSI_TBG_STS_TBG_STATUS_SHIFT 0
+#define DSI_TBG_STS_TBG_STATUS_MASK 0x00000001
+#define DSI_TBG_STS_TBG_STATUS(__x) \
+ DSI_VAL2REG(DSI_TBG_STS, TBG_STATUS, __x)
+#define DSI_MCTL_MAIN_STS_CTL 0x000000F0
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_SHIFT 0
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_SHIFT 2
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_SHIFT 3
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_SHIFT 4
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_SHIFT 5
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_SHIFT 7
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_SHIFT 16
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_MASK 0x00010000
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_SHIFT 17
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_MASK 0x00020000
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_SHIFT 18
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_MASK 0x00040000
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_SHIFT 19
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_MASK 0x00080000
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_SHIFT 20
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_MASK 0x00100000
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_SHIFT 21
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_MASK 0x00200000
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_SHIFT 22
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_MASK 0x00400000
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_SHIFT 23
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_MASK 0x00800000
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL 0x000000F4
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_SHIFT 0
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_MASK 0x00000001
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_SHIFT 1
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_MASK 0x00000002
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_SHIFT 2
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_MASK 0x00000004
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_SHIFT 3
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_MASK 0x00000008
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_SHIFT 4
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_MASK 0x00000010
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_SHIFT 5
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_SHIFT 16
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_MASK 0x00010000
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_SHIFT 17
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_MASK 0x00020000
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_SHIFT 18
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_MASK 0x00040000
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_SHIFT 19
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_MASK 0x00080000
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_SHIFT 20
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_MASK 0x00100000
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_SHIFT 21
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_MASK 0x00200000
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL 0x000000F8
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_SHIFT 1
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_SHIFT 2
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_SHIFT 3
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_SHIFT 4
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_SHIFT 5
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_SHIFT 6
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_SHIFT 7
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_SHIFT 8
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_SHIFT 9
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_SHIFT 10
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_SHIFT 16
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_MASK 0x00010000
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_SHIFT 17
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_MASK 0x00020000
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_SHIFT 18
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_MASK 0x00040000
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_SHIFT 19
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_MASK 0x00080000
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_SHIFT 20
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_MASK 0x00100000
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_SHIFT 21
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_MASK 0x00200000
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_SHIFT 22
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_MASK 0x00400000
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_SHIFT 23
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_MASK 0x00800000
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_SHIFT 24
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_MASK 0x01000000
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_SHIFT 25
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_MASK 0x02000000
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_SHIFT 26
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_MASK 0x04000000
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL 0x000000FC
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_SHIFT 16
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_MASK 0x00010000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_SHIFT 17
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_MASK 0x00020000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_SHIFT 18
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_MASK 0x00040000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_SHIFT 19
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_MASK 0x00080000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_SHIFT 20
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_MASK 0x00100000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_SHIFT 21
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_MASK 0x00200000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_SHIFT 22
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_MASK 0x00400000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_SHIFT 23
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_MASK 0x00800000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_SHIFT 24
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_MASK 0x01000000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL 0x00000100
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_SHIFT 0
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_MASK 0x00000001
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_SHIFT 1
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_MASK 0x00000002
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_SHIFT 2
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_MASK 0x00000004
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_SHIFT 3
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_MASK 0x00000008
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EN, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_SHIFT 4
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_MASK 0x00000010
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EN, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_SHIFT 5
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_MASK 0x00000020
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_SHIFT 6
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_MASK 0x00000040
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_SHIFT 7
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_MASK 0x00000080
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_SHIFT 8
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_MASK 0x00000100
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_SHIFT 9
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_MASK 0x00000200
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EN, __x)
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_SHIFT 16
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_MASK 0x00010000
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_SHIFT 17
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_MASK 0x00020000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_SHIFT 18
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_MASK 0x00040000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_SHIFT 19
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_MASK 0x00080000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_SHIFT 20
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_MASK 0x00100000
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_SHIFT 21
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_MASK 0x00200000
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_SHIFT 22
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_MASK 0x00400000
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_SHIFT 23
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_MASK 0x00800000
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_SHIFT 24
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_MASK 0x01000000
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_SHIFT 25
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_MASK 0x02000000
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_SHIFT 26
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_MASK 0x04000000
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RECOVERY_EDGE, __x)
+#define DSI_TG_STS_CTL 0x00000104
+#define DSI_TG_STS_CTL_TVG_STS_EN_SHIFT 0
+#define DSI_TG_STS_CTL_TVG_STS_EN_MASK 0x00000001
+#define DSI_TG_STS_CTL_TVG_STS_EN(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EN, __x)
+#define DSI_TG_STS_CTL_TBG_STS_EN_SHIFT 1
+#define DSI_TG_STS_CTL_TBG_STS_EN_MASK 0x00000002
+#define DSI_TG_STS_CTL_TBG_STS_EN(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EN, __x)
+#define DSI_TG_STS_CTL_TVG_STS_EDGE_SHIFT 16
+#define DSI_TG_STS_CTL_TVG_STS_EDGE_MASK 0x00010000
+#define DSI_TG_STS_CTL_TVG_STS_EDGE(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EDGE, __x)
+#define DSI_TG_STS_CTL_TBG_STS_EDGE_SHIFT 17
+#define DSI_TG_STS_CTL_TBG_STS_EDGE_MASK 0x00020000
+#define DSI_TG_STS_CTL_TBG_STS_EDGE(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL 0x00000108
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_SHIFT 6
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_MASK 0x00000040
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_SHIFT 7
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_MASK 0x00000080
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_SHIFT 8
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_MASK 0x00000100
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_SHIFT 9
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_MASK 0x00000200
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_SHIFT 10
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_MASK 0x00000400
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_SHIFT 11
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_MASK 0x00000800
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_SHIFT 12
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_MASK 0x00001000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_SHIFT 13
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_MASK 0x00002000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_SHIFT 14
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_MASK 0x00004000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_SHIFT 15
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_MASK 0x00008000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_SHIFT 22
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_MASK 0x00400000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_SHIFT 23
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_MASK 0x00800000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_SHIFT 24
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_MASK 0x01000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_SHIFT 25
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_MASK 0x02000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_SHIFT 26
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_MASK 0x04000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_SHIFT 27
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_MASK 0x08000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_SHIFT 28
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_MASK 0x10000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_SHIFT 29
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_MASK 0x20000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_SHIFT 30
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_MASK 0x40000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_SHIFT 31
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_MASK 0x80000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CLR 0x00000110
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_SHIFT 0
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, PLL_LOCK_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CLKLANE_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_SHIFT 2
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT1_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_SHIFT 3
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT2_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_SHIFT 4
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, HSTX_TO_ERR_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_SHIFT 5
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, LPRX_TO_ERR_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CRS_UNTERM_PCK_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_SHIFT 7
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, VRS_UNTERM_PCK_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR 0x00000114
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_SHIFT 0
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_MASK 0x00000001
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_NO_TE_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_SHIFT 1
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_MASK 0x00000002
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_TE_MISS_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_SHIFT 2
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_MASK 0x00000004
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI1_UNDERRUN_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_SHIFT 3
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_MASK 0x00000008
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI2_UNDERRUN_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_SHIFT 4
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_MASK 0x00000010
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_UNWANTED_RD_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_SHIFT 5
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, CSM_RUNNING_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR 0x00000118
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, CMD_TRANSMISSION_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_SHIFT 1
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, WRITE_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_SHIFT 2
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_SHIFT 3
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_SHIFT 4
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_SHIFT 5
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_SHIFT 6
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_SHIFT 7
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TE_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_SHIFT 8
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_SHIFT 9
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_FINISHED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_SHIFT 10
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_WITH_ERR_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR 0x0000011C
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_FIXED_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNCORRECTABLE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_CHECKSUM_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNDECODABLE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_RECEIVE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_OVERSIZE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_WRONG_LENGTH_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_MISSING_EOT_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_EOT_WITH_ERR_CLR, __x)
+#define DSI_VID_MODE_STS_CLR 0x00000120
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_SHIFT 0
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_MASK 0x00000001
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_STS_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_SHIFT 1
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_MASK 0x00000002
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_DATA_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_SHIFT 2
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_MASK 0x00000004
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_HSYNC_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_SHIFT 3
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_MASK 0x00000008
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_VSYNC_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_SHIFT 4
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_MASK 0x00000010
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_LENGTH_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_SHIFT 5
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_MASK 0x00000020
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_HEIGHT_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_SHIFT 6
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_MASK 0x00000040
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_BURSTWRITE_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_SHIFT 7
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_MASK 0x00000080
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGWRITE_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_SHIFT 8
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_MASK 0x00000100
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGREAD_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_SHIFT 9
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_MASK 0x00000200
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_VRS_WRONG_LENGTH_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_SHIFT 10
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_MASK 0x00000400
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_RECOVERY_CLR, __x)
+#define DSI_TG_STS_CLR 0x00000124
+#define DSI_TG_STS_CLR_TVG_STS_CLR_SHIFT 0
+#define DSI_TG_STS_CLR_TVG_STS_CLR_MASK 0x00000001
+#define DSI_TG_STS_CLR_TVG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CLR, TVG_STS_CLR, __x)
+#define DSI_TG_STS_CLR_TBG_STS_CLR_SHIFT 1
+#define DSI_TG_STS_CLR_TBG_STS_CLR_MASK 0x00000002
+#define DSI_TG_STS_CLR_TBG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CLR, TBG_STS_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR 0x00000128
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_2_CLR, __x)
+#define DSI_MCTL_MAIN_STS_FLAG 0x00000130
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_SHIFT 0
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, PLL_LOCK_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_SHIFT 1
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CLKLANE_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_SHIFT 2
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT1_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_SHIFT 3
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT2_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_SHIFT 4
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, HSTX_TO_ERR_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_SHIFT 5
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, LPRX_TO_ERR_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_SHIFT 6
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CRS_UNTERM_PCK_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_SHIFT 7
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, VRS_UNTERM_PCK_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG 0x00000134
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_SHIFT 0
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_MASK 0x00000001
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_SHIFT 1
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_MASK 0x00000002
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_TE_MISS_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_SHIFT 2
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_MASK 0x00000004
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI1_UNDERRUN_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_SHIFT 3
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_MASK 0x00000008
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI2_UNDERRUN_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_SHIFT 4
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_MASK 0x00000010
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_UNWANTED_RD_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_SHIFT 5
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_MASK 0x00000020
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, CSM_RUNNING_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG 0x00000138
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_SHIFT 0
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, CMD_TRANSMISSION_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_SHIFT 1
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, WRITE_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_SHIFT 2
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_SHIFT 3
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_SHIFT 4
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_SHIFT 5
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_SHIFT 6
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_SHIFT 7
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_SHIFT 8
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_SHIFT 9
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_FINISHED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_SHIFT 10
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_WITH_ERR_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG 0x0000013C
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_FIXED_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNCORRECTABLE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_CHECKSUM_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNDECODABLE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_RECEIVE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_OVERSIZE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_WRONG_LENGTH_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_MISSING_EOT_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_EOT_WITH_ERR_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG 0x00000140
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_SHIFT 0
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_MASK 0x00000001
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_STS_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_SHIFT 1
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_MASK 0x00000002
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_DATA_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_SHIFT 2
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_MASK 0x00000004
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_HSYNC_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_SHIFT 3
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_MASK 0x00000008
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_VSYNC_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_SHIFT 4
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_MASK 0x00000010
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_LENGTH_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_SHIFT 5
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_MASK 0x00000020
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_HEIGHT_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_SHIFT 6
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_MASK 0x00000040
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_BURSTWRITE_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_SHIFT 7
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_MASK 0x00000080
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGWRITE_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_SHIFT 8
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_MASK 0x00000100
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGREAD_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_SHIFT 9
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_MASK 0x00000200
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_VRS_WRONG_LENGTH_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_SHIFT 10
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_MASK 0x00000400
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_RECOVERY_FLAG, __x)
+#define DSI_TG_STS_FLAG 0x00000144
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG_SHIFT 0
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG_MASK 0x00000001
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_TG_STS_FLAG, TVG_STS_FLAG, __x)
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG_SHIFT 1
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG_MASK 0x00000002
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_TG_STS_FLAG, TBG_STS_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG 0x00000148
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_2_FLAG, __x)
+#define DSI_DPHY_LANES_TRIM 0x00000150
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_SHIFT 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_MASK 0x00000003
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_SHIFT 2
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_MASK 0x00000004
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_CD_OFF_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_SHIFT 3
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_MASK 0x00000008
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_SHIFT 4
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_MASK 0x00000010
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_SHIFT 5
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_MASK 0x00000020
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_SHIFT 6
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_MASK 0x000000C0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_SHIFT 8
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_MASK 0x00000300
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_RX_VIL_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_SHIFT 10
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_MASK 0x00000C00
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_TX_SLEWRATE_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_SHIFT 12
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_MASK 0x00001000
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_81 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90 1
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, \
+ DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_##__x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_SHIFT 13
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_MASK 0x00002000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_SHIFT 14
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_MASK 0x00004000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_SHIFT 15
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_MASK 0x00008000
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_SHIFT 16
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_MASK 0x00030000
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_SHIFT 18
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_MASK 0x00040000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_SHIFT 19
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_MASK 0x00080000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_SHIFT 20
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_MASK 0x00100000
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT2, __x)
+#define DSI_ID_REG 0x00000FF0
+#define DSI_ID_REG_Y_SHIFT 0
+#define DSI_ID_REG_Y_MASK 0x0000000F
+#define DSI_ID_REG_Y(__x) \
+ DSI_VAL2REG(DSI_ID_REG, Y, __x)
+#define DSI_ID_REG_X_SHIFT 4
+#define DSI_ID_REG_X_MASK 0x000000F0
+#define DSI_ID_REG_X(__x) \
+ DSI_VAL2REG(DSI_ID_REG, X, __x)
+#define DSI_ID_REG_H_SHIFT 8
+#define DSI_ID_REG_H_MASK 0x00000300
+#define DSI_ID_REG_H(__x) \
+ DSI_VAL2REG(DSI_ID_REG, H, __x)
+#define DSI_ID_REG_PRODUCT_ID_SHIFT 10
+#define DSI_ID_REG_PRODUCT_ID_MASK 0x0003FC00
+#define DSI_ID_REG_PRODUCT_ID(__x) \
+ DSI_VAL2REG(DSI_ID_REG, PRODUCT_ID, __x)
+#define DSI_ID_REG_VENDOR_ID_SHIFT 18
+#define DSI_ID_REG_VENDOR_ID_MASK 0xFFFC0000
+#define DSI_ID_REG_VENDOR_ID(__x) \
+ DSI_VAL2REG(DSI_ID_REG, VENDOR_ID, __x)
+#define DSI_IP_CONF 0x00000FF4
+#define DSI_IP_CONF_FIFO_SIZE_SHIFT 0
+#define DSI_IP_CONF_FIFO_SIZE_MASK 0x0000003F
+#define DSI_IP_CONF_FIFO_SIZE(__x) \
+ DSI_VAL2REG(DSI_IP_CONF, FIFO_SIZE, __x)
diff --git a/drivers/video/mcde/mcde_bus.c b/drivers/video/mcde/mcde_bus.c
new file mode 100644
index 00000000000..bdcf65b0fb9
--- /dev/null
+++ b/drivers/video/mcde/mcde_bus.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display bus driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/notifier.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_dss.h>
+
+#define to_mcde_display_driver(__drv) \
+ container_of((__drv), struct mcde_display_driver, driver)
+
+static BLOCKING_NOTIFIER_HEAD(bus_notifier_list);
+
+static int mcde_drv_suspend(struct device *_dev, pm_message_t state);
+static int mcde_drv_resume(struct device *_dev);
+struct bus_type mcde_bus_type;
+
+static int mcde_suspend_device(struct device *dev, void *data)
+{
+ pm_message_t* state = (pm_message_t *) data;
+ if (dev->driver && dev->driver->suspend)
+ return dev->driver->suspend(dev, *state);
+ return 0;
+}
+
+static int mcde_resume_device(struct device *dev, void *data)
+{
+ if (dev->driver && dev->driver->resume)
+ return dev->driver->resume(dev);
+ return 0;
+}
+
+/* Bus driver */
+
+static int mcde_bus_match(struct device *_dev, struct device_driver *driver)
+{
+ pr_debug("Matching device %s with driver %s\n",
+ dev_name(_dev), driver->name);
+
+ return strncmp(dev_name(_dev), driver->name, strlen(driver->name)) == 0;
+}
+
+static int mcde_bus_suspend(struct device *_dev, pm_message_t state)
+{
+ int ret;
+ ret = bus_for_each_dev(&mcde_bus_type, NULL, &state,
+ mcde_suspend_device);
+ if (ret) {
+ /* TODO Resume all suspended devices */
+ /* mcde_bus_resume(dev); */
+ return ret;
+ }
+ return 0;
+}
+
+static int mcde_bus_resume(struct device *_dev)
+{
+ return bus_for_each_dev(&mcde_bus_type, NULL, NULL, mcde_resume_device);
+}
+
+struct bus_type mcde_bus_type = {
+ .name = "mcde_bus",
+ .match = mcde_bus_match,
+ .suspend = mcde_bus_suspend,
+ .resume = mcde_bus_resume,
+};
+
+static int mcde_drv_probe(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ return drv->probe(dev);
+}
+
+static int mcde_drv_remove(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void mcde_drv_shutdown(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+static int mcde_drv_suspend(struct device *_dev, pm_message_t state)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ if (drv->suspend)
+ return drv->suspend(dev, state);
+ else
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ return dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+#else
+ return 0;
+#endif
+}
+
+static int mcde_drv_resume(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ if (drv->resume)
+ return drv->resume(dev);
+ else
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ return dev->set_power_mode(dev, MCDE_DISPLAY_PM_STANDBY);
+#else
+ return 0;
+#endif
+}
+
+/* Bus device */
+
+static void mcde_bus_release(struct device *dev)
+{
+}
+
+struct device mcde_bus = {
+ .init_name = "mcde_bus",
+ .release = mcde_bus_release
+};
+
+/* Public bus API */
+
+int mcde_display_driver_register(struct mcde_display_driver *drv)
+{
+ drv->driver.bus = &mcde_bus_type;
+ if (drv->probe)
+ drv->driver.probe = mcde_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mcde_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = mcde_drv_shutdown;
+ drv->driver.suspend = mcde_drv_suspend;
+ drv->driver.resume = mcde_drv_resume;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mcde_display_driver_register);
+
+void mcde_display_driver_unregister(struct mcde_display_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mcde_display_driver_unregister);
+
+static void mcde_display_dev_release(struct device *dev)
+{
+ /* Do nothing */
+}
+
+int mcde_display_device_register(struct mcde_display_device *dev)
+{
+ /* Setup device */
+ if (!dev)
+ return -EINVAL;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.bus = &mcde_bus_type;
+ if (dev->dev.parent != NULL)
+ dev->dev.parent = &mcde_bus;
+ dev->dev.release = mcde_display_dev_release;
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, dev->name);
+
+ mcde_display_init_device(dev);
+
+ return device_register(&dev->dev);
+}
+EXPORT_SYMBOL(mcde_display_device_register);
+
+void mcde_display_device_unregister(struct mcde_display_device *dev)
+{
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL(mcde_display_device_unregister);
+
+/* Notifications */
+int mcde_dss_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bus_notifier_list, nb);
+}
+EXPORT_SYMBOL(mcde_dss_register_notifier);
+
+int mcde_dss_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bus_notifier_list, nb);
+}
+EXPORT_SYMBOL(mcde_dss_unregister_notifier);
+
+static int bus_notify_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ if (event == BUS_NOTIFY_BOUND_DRIVER) {
+ ddev->initialized = true;
+ blocking_notifier_call_chain(&bus_notifier_list,
+ MCDE_DSS_EVENT_DISPLAY_REGISTERED, ddev);
+ } else if (event == BUS_NOTIFY_UNBIND_DRIVER) {
+ ddev->initialized = false;
+ blocking_notifier_call_chain(&bus_notifier_list,
+ MCDE_DSS_EVENT_DISPLAY_UNREGISTERED, ddev);
+ }
+ return 0;
+}
+
+struct notifier_block bus_nb = {
+ .notifier_call = bus_notify_callback,
+};
+
+/* Driver init/exit */
+
+int __init mcde_display_init(void)
+{
+ int ret;
+
+ ret = bus_register(&mcde_bus_type);
+ if (ret) {
+ pr_warning("Unable to register bus type\n");
+ goto no_bus_registration;
+ }
+ ret = device_register(&mcde_bus);
+ if (ret) {
+ pr_warning("Unable to register bus device\n");
+ goto no_device_registration;
+ }
+ ret = bus_register_notifier(&mcde_bus_type, &bus_nb);
+ if (ret) {
+ pr_warning("Unable to register bus notifier\n");
+ goto no_bus_notifier;
+ }
+
+ goto out;
+
+no_bus_notifier:
+ device_unregister(&mcde_bus);
+no_device_registration:
+ bus_unregister(&mcde_bus_type);
+no_bus_registration:
+out:
+ return ret;
+}
+
+void mcde_display_exit(void)
+{
+ bus_unregister_notifier(&mcde_bus_type, &bus_nb);
+ device_unregister(&mcde_bus);
+ bus_unregister(&mcde_bus_type);
+}
diff --git a/drivers/video/mcde/mcde_debugfs.c b/drivers/video/mcde/mcde_debugfs.c
new file mode 100644
index 00000000000..586b1787d00
--- /dev/null
+++ b/drivers/video/mcde/mcde_debugfs.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "mcde_debugfs.h"
+
+#define MAX_NUM_OVERLAYS 2
+#define MAX_NUM_CHANNELS 4
+#define DEFAULT_DMESG_FPS_LOG_INTERVAL 100
+
+struct fps_info {
+ u32 enable_dmesg;
+ u32 interval_ms;
+ struct timespec timestamp_last;
+ u32 frame_counter_last;
+ u32 frame_counter;
+ u32 fpks;
+};
+
+struct overlay_info {
+ u8 id;
+ struct dentry *dentry;
+ struct fps_info fps;
+};
+
+struct channel_info {
+ u8 id;
+ struct dentry *dentry;
+ struct mcde_chnl_state *chnl;
+ struct fps_info fps;
+ struct overlay_info overlays[MAX_NUM_OVERLAYS];
+};
+
+static struct mcde_info {
+ struct device *dev;
+ struct dentry *dentry;
+ struct channel_info channels[MAX_NUM_CHANNELS];
+} mcde;
+
+/* Requires: lhs > rhs */
+static inline u32 timespec_ms_diff(struct timespec lhs, struct timespec rhs)
+{
+ struct timespec tmp_ts = timespec_sub(lhs, rhs);
+ u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
+ do_div(tmp_ns, NSEC_PER_MSEC);
+ return (u32)tmp_ns;
+}
+
+/* Returns "frames per 1000 secs", divide by 1000 to get fps with 3 decimals */
+static u32 update_fps(struct fps_info *fps)
+{
+ struct timespec now;
+ u32 fpks = 0, ms_since_last, num_frames;
+
+ getrawmonotonic(&now);
+ fps->frame_counter++;
+
+ ms_since_last = timespec_ms_diff(now, fps->timestamp_last);
+ num_frames = fps->frame_counter - fps->frame_counter_last;
+ if (num_frames > 1 && ms_since_last >= fps->interval_ms) {
+ fpks = (num_frames * 1000000) / ms_since_last;
+ fps->timestamp_last = now;
+ fps->frame_counter_last = fps->frame_counter;
+ fps->fpks = fpks;
+ }
+
+ return fpks;
+}
+
+static void update_chnl_fps(struct channel_info *ci)
+{
+ u32 fpks = update_fps(&ci->fps);
+ if (fpks && ci->fps.enable_dmesg)
+ dev_info(mcde.dev, "FPS: chnl=%d fps=%d.%.3d\n", ci->id,
+ fpks / 1000, fpks % 1000);
+}
+
+static void update_ovly_fps(struct channel_info *ci, struct overlay_info *oi)
+{
+ u32 fpks = update_fps(&oi->fps);
+ if (fpks && oi->fps.enable_dmesg)
+ dev_info(mcde.dev, "FPS: ovly=%d.%d fps=%d.%.3d\n", ci->id,
+ oi->id, fpks / 1000, fpks % 1000);
+}
+
+int mcde_debugfs_create(struct device *dev)
+{
+ if (mcde.dev)
+ return -EBUSY;
+
+ mcde.dentry = debugfs_create_dir("mcde", NULL);
+ if (!mcde.dentry)
+ return -ENOMEM;
+ mcde.dev = dev;
+
+ return 0;
+}
+
+static struct channel_info *find_chnl(u8 chnl_id)
+{
+ if (chnl_id > MAX_NUM_CHANNELS)
+ return NULL;
+ return &mcde.channels[chnl_id];
+}
+
+static struct overlay_info *find_ovly(struct channel_info *ci, u8 ovly_id)
+{
+ if (!ci || ovly_id >= MAX_NUM_OVERLAYS)
+ return NULL;
+ return &ci->overlays[ovly_id];
+}
+
+static void create_fps_files(struct dentry *dentry, struct fps_info *fps)
+{
+ debugfs_create_u32("frame_counter", S_IRUGO, dentry,
+ &fps->frame_counter);
+ debugfs_create_u32("frames_per_ksecs", S_IRUGO, dentry, &fps->fpks);
+ debugfs_create_u32("interval_ms", S_IRUGO|S_IWUGO, dentry,
+ &fps->interval_ms);
+ debugfs_create_u32("dmesg", S_IRUGO|S_IWUGO, dentry,
+ &fps->enable_dmesg);
+}
+
+int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ char name[10];
+
+ if (!chnl || !ci)
+ return -EINVAL;
+ if (ci->chnl)
+ return -EBUSY;
+
+ snprintf(name, sizeof(name), "chnl%d", chnl_id);
+ ci->dentry = debugfs_create_dir(name, mcde.dentry);
+ if (!ci->dentry)
+ return -ENOMEM;
+
+ create_fps_files(ci->dentry, &ci->fps);
+
+ ci->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL;
+ ci->id = chnl_id;
+ ci->chnl = chnl;
+
+ return 0;
+}
+
+int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ struct overlay_info *oi = find_ovly(ci, ovly_id);
+ char name[10];
+
+ if (!oi || !ci || ovly_id >= MAX_NUM_OVERLAYS)
+ return -EINVAL;
+ if (oi->dentry)
+ return -EBUSY;
+
+ snprintf(name, sizeof(name), "ovly%d", ovly_id);
+ oi->dentry = debugfs_create_dir(name, ci->dentry);
+ if (!oi->dentry)
+ return -ENOMEM;
+
+ create_fps_files(oi->dentry, &oi->fps);
+
+ oi->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL;
+ oi->id = ovly_id;
+
+ return 0;
+}
+
+void mcde_debugfs_channel_update(u8 chnl_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+
+ if (!ci || !ci->chnl)
+ return;
+
+ update_chnl_fps(ci);
+}
+
+void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ struct overlay_info *oi = find_ovly(ci, ovly_id);
+
+ if (!oi || !oi->dentry)
+ return;
+
+ update_ovly_fps(ci, oi);
+}
+
diff --git a/drivers/video/mcde/mcde_debugfs.h b/drivers/video/mcde/mcde_debugfs.h
new file mode 100644
index 00000000000..9f1e7f18ea5
--- /dev/null
+++ b/drivers/video/mcde/mcde_debugfs.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __MCDE_DEBUGFS__H__
+#define __MCDE_DEBUGFS__H__
+
+#include <video/mcde.h>
+
+int mcde_debugfs_create(struct device *dev);
+int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl);
+int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id);
+
+void mcde_debugfs_channel_update(u8 chnl_id);
+void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id);
+
+#endif /* __MCDE_DEBUGFS__H__ */
+
diff --git a/drivers/video/mcde/mcde_display.c b/drivers/video/mcde/mcde_display.c
new file mode 100644
index 00000000000..a461b36c9b6
--- /dev/null
+++ b/drivers/video/mcde/mcde_display.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <video/mcde_display.h>
+
+/*temp*/
+#include <linux/delay.h>
+
+static void mcde_display_get_native_resolution_default(
+ struct mcde_display_device *ddev, u16 *x_res, u16 *y_res)
+{
+ if (x_res)
+ *x_res = ddev->native_x_res;
+ if (y_res)
+ *y_res = ddev->native_y_res;
+}
+
+static enum mcde_ovly_pix_fmt mcde_display_get_default_pixel_format_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->default_pixel_format;
+}
+
+static void mcde_display_get_physical_size_default(
+ struct mcde_display_device *ddev, u16 *width, u16 *height)
+{
+ if (width)
+ *width = ddev->physical_width;
+ if (height)
+ *height = ddev->physical_height;
+}
+
+static int mcde_display_set_power_mode_default(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ /* force register settings */
+ if (ddev->port->type == MCDE_PORTTYPE_DPI)
+ ddev->update_flags = UPDATE_FLAG_VIDEO_MODE | UPDATE_FLAG_PIXEL_FORMAT;
+ }
+
+ if (ddev->port->type == MCDE_PORTTYPE_DSI) {
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_ON, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ } else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ /* ON -> STANDBY */
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_OFF, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ } else if (ddev->port->type == MCDE_PORTTYPE_DPI) {
+ ddev->power_mode = power_mode;
+ } else if (ddev->power_mode != power_mode) {
+ return -EINVAL;
+ }
+
+ /* SLEEP -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static inline enum mcde_display_power_mode mcde_display_get_power_mode_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->power_mode;
+}
+
+static inline int mcde_display_try_video_mode_default(
+ struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ /*
+ * DSI video mode:
+ * This function is intended for configuring supported video mode(s).
+ * Overload it into the panel driver file and set up blanking
+ * intervals and pixel clock according to below recommendations.
+ *
+ * vertical blanking parameters vbp, vfp, vsw are given in lines
+ * horizontal blanking parameters hbp, hfp, hsw are given in pixels
+ *
+ * video_mode->pixclock is the time between two pixels (in picoseconds)
+ * The source of the pixel clock is DSI PLL and it shall be set to
+ * meet the requirement
+ *
+ * non-burst mode:
+ * pixel clock (Hz) = (VACT+VBP+VFP+VSA) * (HACT+HBP+HFP+HSA) *
+ * framerate * bpp / num_data_lanes
+ *
+ * burst mode:
+ * pixel clock (Hz) > (VACT+VBP+VFP+VSA) * (HACT+HBP+HFP+HSA) *
+ * framerate * bpp / num_data_lanes * 1.1
+ * (1.1 is a 10% margin needed for burst mode calculations)
+ */
+ return 0;
+}
+
+static int mcde_display_set_video_mode_default(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ int ret;
+ struct mcde_video_mode channel_video_mode;
+
+ if (!video_mode)
+ return -EINVAL;
+
+ ddev->video_mode = *video_mode;
+ channel_video_mode = ddev->video_mode;
+ /* Dependant on if display should rotate or MCDE should rotate */
+ if (ddev->rotation == MCDE_DISPLAY_ROT_90_CCW ||
+ ddev->rotation == MCDE_DISPLAY_ROT_90_CW) {
+ channel_video_mode.xres = ddev->native_x_res;
+ channel_video_mode.yres = ddev->native_y_res;
+ }
+ ret = mcde_chnl_set_video_mode(ddev->chnl_state, &channel_video_mode);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode\n", __func__);
+ return ret;
+ }
+
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+
+ return 0;
+}
+
+static inline void mcde_display_get_video_mode_default(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ if (video_mode)
+ *video_mode = ddev->video_mode;
+}
+
+static int mcde_display_set_pixel_format_default(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format)
+{
+ int ret;
+
+ ddev->pixel_format = format;
+ ret = mcde_chnl_set_pixel_format(ddev->chnl_state,
+ ddev->port->pixel_format);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set pixel format = %d\n",
+ __func__, format);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline enum mcde_ovly_pix_fmt mcde_display_get_pixel_format_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->pixel_format;
+}
+
+
+static int mcde_display_set_rotation_default(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ int ret;
+
+ ret = mcde_chnl_set_rotation(ddev->chnl_state, rotation,
+ ddev->rotbuf1, ddev->rotbuf2);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set rotation = %d\n",
+ __func__, rotation);
+ return ret;
+ }
+
+ if (rotation == MCDE_DISPLAY_ROT_180_CCW) {
+ u8 param = 0x40;
+ (void) mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_ADDRESS_MODE, &param, 1);
+ } else if (ddev->rotation == MCDE_DISPLAY_ROT_180_CCW &&
+ rotation != MCDE_DISPLAY_ROT_180_CCW) {
+ u8 param = 0;
+ (void) mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_ADDRESS_MODE, &param, 1);
+ }
+
+ ddev->rotation = rotation;
+ ddev->update_flags |= UPDATE_FLAG_ROTATION;
+
+ return 0;
+}
+
+static inline enum mcde_display_rotation mcde_display_get_rotation_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->rotation;
+}
+
+static int mcde_display_set_synchronized_update_default(
+ struct mcde_display_device *ddev, bool enable)
+{
+ if (ddev->port->type == MCDE_PORTTYPE_DSI && enable) {
+ int ret;
+ u8 m = 0;
+
+ if (ddev->port->sync_src == MCDE_SYNCSRC_OFF)
+ return -EINVAL;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_TEAR_ON, &m, 1);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to set synchornized update = %d\n",
+ __func__, enable);
+ return ret;
+ }
+ }
+ ddev->synchronized_update = enable;
+ return 0;
+}
+
+static inline bool mcde_display_get_synchronized_update_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->synchronized_update;
+}
+
+static int mcde_display_apply_config_default(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ ret = mcde_chnl_enable_synchronized_update(ddev->chnl_state,
+ ddev->synchronized_update);
+
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to enable synchronized update\n",
+ __func__);
+ return ret;
+ }
+
+ if (!ddev->update_flags)
+ return 0;
+
+ if (ddev->update_flags & UPDATE_FLAG_VIDEO_MODE)
+ mcde_chnl_stop_flow(ddev->chnl_state);
+
+ ret = mcde_chnl_apply(ddev->chnl_state);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to apply to channel\n",
+ __func__);
+ return ret;
+ }
+ ddev->update_flags = 0;
+ ddev->first_update = true;
+
+ return 0;
+}
+
+static int mcde_display_invalidate_area_default(
+ struct mcde_display_device *ddev,
+ struct mcde_rectangle *area)
+{
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+ if (area) {
+ /* take union of rects */
+ u16 t;
+ t = min(ddev->update_area.x, area->x);
+ /* note should be > 0 */
+ ddev->update_area.w = max(ddev->update_area.x +
+ ddev->update_area.w,
+ area->x + area->w) - t;
+ ddev->update_area.x = t;
+ t = min(ddev->update_area.y, area->y);
+ ddev->update_area.h = max(ddev->update_area.y +
+ ddev->update_area.h,
+ area->y + area->h) - t;
+ ddev->update_area.y = t;
+ /* TODO: Implement real clipping when partial refresh is
+ activated.*/
+ ddev->update_area.w = min((u16) ddev->video_mode.xres,
+ (u16) ddev->update_area.w);
+ ddev->update_area.h = min((u16) ddev->video_mode.yres,
+ (u16) ddev->update_area.h);
+ } else {
+ ddev->update_area.x = 0;
+ ddev->update_area.y = 0;
+ ddev->update_area.w = ddev->video_mode.xres;
+ ddev->update_area.h = ddev->video_mode.yres;
+ /* Invalidate_area(ddev, NULL) means reset area to empty
+ * rectangle really. After that the rectangle should grow by
+ * taking an union (above). This means that the code should
+ * really look like below, however the code above is a temp fix
+ * for rotation.
+ * TODO: fix
+ * ddev->update_area.x = ddev->video_mode.xres;
+ * ddev->update_area.y = ddev->video_mode.yres;
+ * ddev->update_area.w = 0;
+ * ddev->update_area.h = 0;
+ */
+ }
+
+ return 0;
+}
+
+static int mcde_display_update_default(struct mcde_display_device *ddev,
+ bool tripple_buffer)
+{
+ int ret = 0;
+
+ ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area,
+ tripple_buffer);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to update channel\n", __func__);
+ return ret;
+ }
+ if (ddev->first_update && ddev->on_first_update)
+ ddev->on_first_update(ddev);
+
+ if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) {
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_ON);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to set power mode to on\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ dev_vdbg(&ddev->dev, "Overlay updated, chnl=%d\n", ddev->chnl_id);
+
+ return 0;
+}
+
+static inline int mcde_display_on_first_update_default(
+ struct mcde_display_device *ddev)
+{
+ ddev->first_update = false;
+ return 0;
+}
+
+void mcde_display_init_device(struct mcde_display_device *ddev)
+{
+ /* Setup default callbacks */
+ ddev->get_native_resolution =
+ mcde_display_get_native_resolution_default;
+ ddev->get_default_pixel_format =
+ mcde_display_get_default_pixel_format_default;
+ ddev->get_physical_size = mcde_display_get_physical_size_default;
+ ddev->set_power_mode = mcde_display_set_power_mode_default;
+ ddev->get_power_mode = mcde_display_get_power_mode_default;
+ ddev->try_video_mode = mcde_display_try_video_mode_default;
+ ddev->set_video_mode = mcde_display_set_video_mode_default;
+ ddev->get_video_mode = mcde_display_get_video_mode_default;
+ ddev->set_pixel_format = mcde_display_set_pixel_format_default;
+ ddev->get_pixel_format = mcde_display_get_pixel_format_default;
+ ddev->set_rotation = mcde_display_set_rotation_default;
+ ddev->get_rotation = mcde_display_get_rotation_default;
+ ddev->set_synchronized_update =
+ mcde_display_set_synchronized_update_default;
+ ddev->get_synchronized_update =
+ mcde_display_get_synchronized_update_default;
+ ddev->apply_config = mcde_display_apply_config_default;
+ ddev->invalidate_area = mcde_display_invalidate_area_default;
+ ddev->update = mcde_display_update_default;
+ ddev->on_first_update = mcde_display_on_first_update_default;
+
+ mutex_init(&ddev->display_lock);
+}
+
diff --git a/drivers/video/mcde/mcde_dss.c b/drivers/video/mcde/mcde_dss.c
new file mode 100644
index 00000000000..044e39ad285
--- /dev/null
+++ b/drivers/video/mcde/mcde_dss.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display sub system driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <video/mcde_dss.h>
+
+#define to_overlay(x) container_of(x, struct mcde_overlay, kobj)
+
+void overlay_release(struct kobject *kobj)
+{
+ struct mcde_overlay *ovly = to_overlay(kobj);
+
+ kfree(ovly);
+}
+
+struct kobj_type ovly_type = {
+ .release = overlay_release,
+};
+
+static int apply_overlay(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info, bool force)
+{
+ int ret = 0;
+ if (ovly->ddev->invalidate_area) {
+ /* TODO: transform ovly coord to screen coords (vmode):
+ * add offset
+ */
+ struct mcde_rectangle dirty = info->dirty;
+ mutex_lock(&ovly->ddev->display_lock);
+ ret = ovly->ddev->invalidate_area(ovly->ddev, &dirty);
+ mutex_unlock(&ovly->ddev->display_lock);
+ }
+
+ if (ovly->info.paddr != info->paddr || force)
+ mcde_ovly_set_source_buf(ovly->state, info->paddr);
+
+ if (ovly->info.stride != info->stride || ovly->info.fmt != info->fmt ||
+ force)
+ mcde_ovly_set_source_info(ovly->state, info->stride, info->fmt);
+ if (ovly->info.src_x != info->src_x ||
+ ovly->info.src_y != info->src_y ||
+ ovly->info.w != info->w ||
+ ovly->info.h != info->h || force)
+ mcde_ovly_set_source_area(ovly->state,
+ info->src_x, info->src_y, info->w, info->h);
+ if (ovly->info.dst_x != info->dst_x || ovly->info.dst_y != info->dst_y
+ || ovly->info.dst_z != info->dst_z ||
+ force)
+ mcde_ovly_set_dest_pos(ovly->state,
+ info->dst_x, info->dst_y, info->dst_z);
+
+ mcde_ovly_apply(ovly->state);
+ ovly->info = *info;
+
+ return ret;
+}
+
+/* MCDE DSS operations */
+
+int mcde_dss_open_channel(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_chnl_state *chnl;
+
+ mutex_lock(&ddev->display_lock);
+ /* Acquire MCDE resources */
+ chnl = mcde_chnl_get(ddev->chnl_id, ddev->fifo, ddev->port);
+ if (IS_ERR(chnl)) {
+ ret = PTR_ERR(chnl);
+ dev_warn(&ddev->dev, "Failed to acquire MCDE channel\n");
+ goto chnl_get_failed;
+ }
+ ddev->chnl_state = chnl;
+chnl_get_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_open_channel);
+
+void mcde_dss_close_channel(struct mcde_display_device *ddev)
+{
+ mutex_lock(&ddev->display_lock);
+ mcde_chnl_put(ddev->chnl_state);
+ ddev->chnl_state = NULL;
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_close_channel);
+
+int mcde_dss_enable_display(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (ddev->enabled)
+ return 0;
+
+ mutex_lock(&ddev->display_lock);
+ mcde_chnl_enable(ddev->chnl_state);
+
+ /* Initiate display communication */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "Failed to initialize display\n");
+ goto display_failed;
+ }
+
+ ret = ddev->set_synchronized_update(ddev,
+ ddev->get_synchronized_update(ddev));
+ if (ret < 0)
+ dev_warn(&ddev->dev, "Failed to set sync\n");
+
+ ret = mcde_chnl_enable_synchronized_update(ddev->chnl_state,
+ ddev->synchronized_update);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to enable synchronized update\n",
+ __func__);
+ goto enable_sync_failed;
+ }
+ /* TODO: call driver for all defaults like sync_update above */
+
+ dev_dbg(&ddev->dev, "Display enabled, chnl=%d\n",
+ ddev->chnl_id);
+ ddev->enabled = true;
+ mutex_unlock(&ddev->display_lock);
+
+ return 0;
+
+enable_sync_failed:
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+display_failed:
+ mcde_chnl_disable(ddev->chnl_state);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_enable_display);
+
+void mcde_dss_disable_display(struct mcde_display_device *ddev)
+{
+ if (!ddev->enabled)
+ return;
+
+ /* TODO: Disable overlays */
+ mutex_lock(&ddev->display_lock);
+
+ mcde_chnl_stop_flow(ddev->chnl_state);
+
+ (void)ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ mcde_chnl_disable(ddev->chnl_state);
+
+ ddev->enabled = false;
+ mutex_unlock(&ddev->display_lock);
+
+ dev_dbg(&ddev->dev, "Display disabled, chnl=%d\n", ddev->chnl_id);
+}
+EXPORT_SYMBOL(mcde_dss_disable_display);
+
+int mcde_dss_apply_channel(struct mcde_display_device *ddev)
+{
+ int ret;
+ if (!ddev->apply_config)
+ return -EINVAL;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->apply_config(ddev);
+ mutex_unlock(&ddev->display_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_apply_channel);
+
+struct mcde_overlay *mcde_dss_create_overlay(struct mcde_display_device *ddev,
+ struct mcde_overlay_info *info)
+{
+ struct mcde_overlay *ovly;
+
+ ovly = kzalloc(sizeof(struct mcde_overlay), GFP_KERNEL);
+ if (!ovly)
+ return NULL;
+
+ kobject_init(&ovly->kobj, &ovly_type); /* Local ref */
+ kobject_get(&ovly->kobj); /* Creator ref */
+ INIT_LIST_HEAD(&ovly->list);
+ mutex_lock(&ddev->display_lock);
+ list_add(&ddev->ovlys, &ovly->list);
+ mutex_unlock(&ddev->display_lock);
+ ovly->info = *info;
+ ovly->ddev = ddev;
+
+ return ovly;
+}
+EXPORT_SYMBOL(mcde_dss_create_overlay);
+
+void mcde_dss_destroy_overlay(struct mcde_overlay *ovly)
+{
+ list_del(&ovly->list);
+ if (ovly->state)
+ mcde_dss_disable_overlay(ovly);
+ kobject_put(&ovly->kobj);
+}
+EXPORT_SYMBOL(mcde_dss_destroy_overlay);
+
+int mcde_dss_enable_overlay(struct mcde_overlay *ovly)
+{
+ int ret;
+
+ if (!ovly->ddev->chnl_state)
+ return -EINVAL;
+
+ if (!ovly->state) {
+ struct mcde_ovly_state *state;
+ state = mcde_ovly_get(ovly->ddev->chnl_state);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ dev_warn(&ovly->ddev->dev,
+ "Failed to acquire overlay\n");
+ return ret;
+ }
+ ovly->state = state;
+ }
+
+ apply_overlay(ovly, &ovly->info, true);
+
+ dev_vdbg(&ovly->ddev->dev, "Overlay enabled, chnl=%d\n",
+ ovly->ddev->chnl_id);
+ return 0;
+}
+EXPORT_SYMBOL(mcde_dss_enable_overlay);
+
+int mcde_dss_apply_overlay(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info)
+{
+ if (info == NULL)
+ info = &ovly->info;
+ return apply_overlay(ovly, info, false);
+}
+EXPORT_SYMBOL(mcde_dss_apply_overlay);
+
+void mcde_dss_disable_overlay(struct mcde_overlay *ovly)
+{
+ if (!ovly->state)
+ return;
+
+ mcde_ovly_put(ovly->state);
+
+ dev_dbg(&ovly->ddev->dev, "Overlay disabled, chnl=%d\n",
+ ovly->ddev->chnl_id);
+
+ ovly->state = NULL;
+}
+EXPORT_SYMBOL(mcde_dss_disable_overlay);
+
+int mcde_dss_update_overlay(struct mcde_overlay *ovly, bool tripple_buffer)
+{
+ int ret;
+ dev_vdbg(&ovly->ddev->dev, "Overlay update, chnl=%d\n",
+ ovly->ddev->chnl_id);
+
+ if (!ovly->state || !ovly->ddev->update || !ovly->ddev->invalidate_area)
+ return -EINVAL;
+
+ mutex_lock(&ovly->ddev->display_lock);
+ /* Do not perform an update if power mode is off */
+ if (ovly->ddev->get_power_mode(ovly->ddev) == MCDE_DISPLAY_PM_OFF) {
+ ret = 0;
+ goto power_mode_off;
+ }
+
+ ret = ovly->ddev->update(ovly->ddev, tripple_buffer);
+ if (ret)
+ goto update_failed;
+
+ ret = ovly->ddev->invalidate_area(ovly->ddev, NULL);
+
+power_mode_off:
+update_failed:
+ mutex_unlock(&ovly->ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_update_overlay);
+
+void mcde_dss_get_overlay_info(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info) {
+ if (info)
+ *info = ovly->info;
+}
+EXPORT_SYMBOL(mcde_dss_get_overlay_info);
+
+void mcde_dss_get_native_resolution(struct mcde_display_device *ddev,
+ u16 *x_res, u16 *y_res)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_native_resolution(ddev, x_res, y_res);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_native_resolution);
+
+enum mcde_ovly_pix_fmt mcde_dss_get_default_pixel_format(
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_default_pixel_format(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_default_pixel_format);
+
+void mcde_dss_get_physical_size(struct mcde_display_device *ddev,
+ u16 *physical_width, u16 *physical_height)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_physical_size(ddev, physical_width, physical_height);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_physical_size);
+
+int mcde_dss_try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->try_video_mode(ddev, video_mode);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_try_video_mode);
+
+int mcde_dss_set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *vmode)
+{
+ int ret = 0;
+ struct mcde_video_mode old_vmode;
+
+ mutex_lock(&ddev->display_lock);
+ /* Do not perform set_video_mode if power mode is off */
+ if (ddev->get_power_mode(ddev) == MCDE_DISPLAY_PM_OFF)
+ goto power_mode_off;
+
+ ddev->get_video_mode(ddev, &old_vmode);
+ if (memcmp(vmode, &old_vmode, sizeof(old_vmode)) == 0)
+ goto same_video_mode;
+
+ ret = ddev->set_video_mode(ddev, vmode);
+ if (ret)
+ goto set_video_mode_failed;
+
+ if (ddev->invalidate_area)
+ ret = ddev->invalidate_area(ddev, NULL);
+power_mode_off:
+same_video_mode:
+set_video_mode_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_video_mode);
+
+void mcde_dss_get_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_video_mode(ddev, video_mode);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_video_mode);
+
+int mcde_dss_set_pixel_format(struct mcde_display_device *ddev,
+ enum mcde_ovly_pix_fmt pix_fmt)
+{
+ enum mcde_ovly_pix_fmt old_pix_fmt;
+ int ret;
+
+ mutex_lock(&ddev->display_lock);
+ old_pix_fmt = ddev->get_pixel_format(ddev);
+ if (old_pix_fmt == pix_fmt) {
+ ret = 0;
+ goto same_pixel_format;
+ }
+
+ ret = ddev->set_pixel_format(ddev, pix_fmt);
+
+same_pixel_format:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_pixel_format);
+
+int mcde_dss_get_pixel_format(struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_pixel_format(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_pixel_format);
+
+int mcde_dss_set_rotation(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ int ret;
+ enum mcde_display_rotation old_rotation;
+
+ mutex_lock(&ddev->display_lock);
+ old_rotation = ddev->get_rotation(ddev);
+ if (old_rotation == rotation) {
+ ret = 0;
+ goto same_rotation;
+ }
+
+ ret = ddev->set_rotation(ddev, rotation);
+same_rotation:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_rotation);
+
+enum mcde_display_rotation mcde_dss_get_rotation(
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_rotation(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_rotation);
+
+int mcde_dss_set_synchronized_update(struct mcde_display_device *ddev,
+ bool enable)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->set_synchronized_update(ddev, enable);
+ if (ret)
+ goto sync_update_failed;
+
+ if (ddev->chnl_state)
+ mcde_chnl_enable_synchronized_update(ddev->chnl_state, enable);
+ mutex_unlock(&ddev->display_lock);
+ return 0;
+
+sync_update_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_synchronized_update);
+
+bool mcde_dss_get_synchronized_update(struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_synchronized_update(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_synchronized_update);
+
+int __init mcde_dss_init(void)
+{
+ return 0;
+}
+
+void mcde_dss_exit(void)
+{
+}
+
diff --git a/drivers/video/mcde/mcde_fb.c b/drivers/video/mcde/mcde_fb.c
new file mode 100644
index 00000000000..7d877ec3e4f
--- /dev/null
+++ b/drivers/video/mcde/mcde_fb.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE frame buffer driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/hwmem.h>
+#include <linux/io.h>
+
+#include <linux/console.h>
+
+#include <video/mcde_fb.h>
+
+#define MCDE_FB_BPP_MAX 16
+#define MCDE_FB_VXRES_MAX 1920
+#define MCDE_FB_VYRES_MAX 2160
+
+static struct fb_ops fb_ops;
+
+struct pix_fmt_info {
+ enum mcde_ovly_pix_fmt pix_fmt;
+
+ u32 bpp;
+ struct fb_bitfield r;
+ struct fb_bitfield g;
+ struct fb_bitfield b;
+ struct fb_bitfield a;
+ u32 nonstd;
+};
+
+struct pix_fmt_info pix_fmt_map[] = {
+ {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGB565,
+ .bpp = 16,
+ .r = { .offset = 11, .length = 5 },
+ .g = { .offset = 5, .length = 6 },
+ .b = { .offset = 0, .length = 5 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA5551,
+ .bpp = 16,
+ .r = { .offset = 11, .length = 5 },
+ .g = { .offset = 6, .length = 5 },
+ .b = { .offset = 1, .length = 5 },
+ .a = { .offset = 0, .length = 1 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA4444,
+ .bpp = 16,
+ .r = { .offset = 12, .length = 4 },
+ .g = { .offset = 8, .length = 4 },
+ .b = { .offset = 4, .length = 4 },
+ .a = { .offset = 0, .length = 4 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_YCbCr422,
+ .bpp = 16,
+ .nonstd = MCDE_OVLYPIXFMT_YCbCr422,
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGB888,
+ .bpp = 24,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA8888,
+ .bpp = 32,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ .a = { .offset = 24, .length = 8 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBX8888,
+ .bpp = 32,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ }
+
+};
+
+static struct platform_device mcde_fb_device = {
+ .name = "mcde_fb",
+ .id = -1,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void early_suspend(struct early_suspend *data)
+{
+ int i;
+ struct mcde_fb *mfb =
+ container_of(data, struct mcde_fb, early_suspend);
+
+ console_lock();
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i] && mfb->ovlys[i]->ddev &&
+ (mfb->ovlys[i]->ddev->stay_alive == false))
+ mcde_dss_disable_display(mfb->ovlys[i]->ddev);
+ }
+ console_unlock();
+}
+
+static void late_resume(struct early_suspend *data)
+{
+ int i;
+ struct mcde_fb *mfb =
+ container_of(data, struct mcde_fb, early_suspend);
+
+ console_lock();
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i]) {
+ struct mcde_overlay *ovly = mfb->ovlys[i];
+ (void) mcde_dss_enable_display(ovly->ddev);
+ }
+ }
+ console_unlock();
+}
+#endif
+
+/* Helpers */
+
+static struct pix_fmt_info *find_pix_fmt_info(enum mcde_ovly_pix_fmt pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ if (pix_fmt_map[i].pix_fmt == pix_fmt)
+ return &pix_fmt_map[i];
+ }
+ return NULL;
+}
+
+static bool bitfield_cmp(struct fb_bitfield *bf1, struct fb_bitfield *bf2)
+{
+ return bf1->offset == bf2->offset &&
+ bf1->length == bf2->length &&
+ bf1->msb_right == bf2->msb_right;
+}
+
+static struct pix_fmt_info *var_to_pix_fmt_info(struct fb_var_screeninfo *var)
+{
+ int i;
+ struct pix_fmt_info *info;
+
+ if (var->nonstd)
+ return find_pix_fmt_info(var->nonstd);
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ info = &pix_fmt_map[i];
+ if (info->bpp == var->bits_per_pixel &&
+ bitfield_cmp(&info->r, &var->red) &&
+ bitfield_cmp(&info->g, &var->green) &&
+ bitfield_cmp(&info->b, &var->blue) &&
+ bitfield_cmp(&info->a, &var->transp))
+ return info;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ info = &pix_fmt_map[i];
+ if (var->bits_per_pixel == info->bpp)
+ return info;
+ }
+
+ return NULL;
+}
+
+static void pix_fmt_info_to_var(struct pix_fmt_info *pix_fmt_info,
+ struct fb_var_screeninfo *var)
+{
+ var->bits_per_pixel = pix_fmt_info->bpp;
+ var->nonstd = pix_fmt_info->nonstd;
+ var->red = pix_fmt_info->r;
+ var->green = pix_fmt_info->g;
+ var->blue = pix_fmt_info->b;
+ var->transp = pix_fmt_info->a;
+}
+
+static int init_var_fmt(struct fb_var_screeninfo *var,
+ u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt,
+ u32 rotate)
+{
+ struct pix_fmt_info *info;
+
+ info = find_pix_fmt_info(pix_fmt);
+ if (!info)
+ return -EINVAL;
+
+ var->bits_per_pixel = info->bpp;
+ var->nonstd = info->nonstd;
+ var->red = info->r;
+ var->green = info->g;
+ var->blue = info->b;
+ var->transp = info->a;
+ var->grayscale = false;
+
+ var->xres = w;
+ var->yres = h;
+ var->xres_virtual = vw;
+ var->yres_virtual = vh;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->rotate = rotate;
+
+ return 0;
+};
+
+static int reallocate_fb_mem(struct fb_info *fbi, u32 size)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ void *vaddr;
+ struct hwmem_alloc *alloc;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t num_mem_chunks = 1;
+ int name;
+
+ size = PAGE_ALIGN(size);
+
+ if (size == fbi->screen_size)
+ return 0;
+
+/* TODO: Remove once hwmem has support for defragmentation */
+#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
+ if (!mfb->alloc) {
+ u32 old_size = size;
+
+ size = MCDE_FB_BPP_MAX / 8 * MCDE_FB_VXRES_MAX *
+ MCDE_FB_VYRES_MAX;
+#endif
+
+ alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED,
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ name = hwmem_get_name(alloc);
+ if (name < 0) {
+ hwmem_release(alloc);
+ return name;
+ }
+
+ if (mfb->alloc) {
+ hwmem_kunmap(mfb->alloc);
+ hwmem_unpin(mfb->alloc);
+ hwmem_release(mfb->alloc);
+ }
+
+ (void)hwmem_pin(alloc, &mem_chunk, &num_mem_chunks);
+
+ vaddr = hwmem_kmap(alloc);
+ if (vaddr == NULL) {
+ hwmem_unpin(alloc);
+ hwmem_release(alloc);
+ return -ENOMEM;
+ }
+
+ mfb->alloc = alloc;
+ mfb->alloc_name = name;
+
+ fbi->screen_base = vaddr;
+ fbi->fix.smem_start = mem_chunk.paddr;
+
+#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
+ size = old_size;
+ }
+#endif
+
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+}
+
+static void free_fb_mem(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (mfb->alloc) {
+ hwmem_kunmap(mfb->alloc);
+ hwmem_unpin(mfb->alloc);
+ hwmem_release(mfb->alloc);
+ mfb->alloc = NULL;
+ mfb->alloc_name = 0;
+
+ fbi->fix.smem_start = 0;
+ fbi->fix.smem_len = 0;
+ fbi->screen_base = 0;
+ fbi->screen_size = 0;
+ }
+}
+
+static void init_fb(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ strlcpy(fbi->fix.id, "mcde_fb", sizeof(fbi->fix.id));
+ fbi->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbi->fix.visual = FB_VISUAL_DIRECTCOLOR;
+ fbi->fix.xpanstep = 1;
+ fbi->fix.ypanstep = 1;
+ fbi->flags = FBINFO_HWACCEL_DISABLED;
+ fbi->fbops = &fb_ops;
+ fbi->pseudo_palette = &mfb->pseudo_palette[0];
+}
+
+static void get_ovly_info(struct fb_info *fbi, struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ memset(info, 0, sizeof(*info));
+ info->paddr = fbi->fix.smem_start +
+ fbi->fix.line_length * fbi->var.yoffset;
+ info->vaddr = (u32 *)(fbi->screen_base +
+ fbi->fix.line_length * fbi->var.yoffset);
+ /* TODO: move mem check to check_var/pan_display */
+ if (info->paddr + fbi->fix.line_length * fbi->var.yres >
+ fbi->fix.smem_start + fbi->fix.smem_len) {
+ info->paddr = fbi->fix.smem_start;
+ info->vaddr = (u32 *)fbi->screen_base;
+ }
+ info->fmt = mfb->pix_fmt;
+ info->stride = fbi->fix.line_length;
+ if (ovly) {
+ info->src_x = ovly->info.src_x;
+ info->src_y = ovly->info.src_y;
+ info->dst_x = ovly->info.dst_x;
+ info->dst_y = ovly->info.dst_y;
+ info->dst_z = 1;
+ } else {
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = 0;
+ info->dst_y = 0;
+ info->dst_z = 1;
+ }
+ info->w = fbi->var.xres;
+ info->h = fbi->var.yres;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = fbi->var.xres;
+ info->dirty.h = fbi->var.yres;
+}
+
+void vmode_to_var(struct mcde_video_mode *video_mode,
+ struct fb_var_screeninfo *var)
+{
+ /* TODO: use only 1 vbp and 1 vfp */
+ var->xres = video_mode->xres;
+ var->yres = video_mode->yres;
+ var->pixclock = video_mode->pixclock;
+ var->upper_margin = video_mode->vbp;
+ var->lower_margin = video_mode->vfp;
+ var->vsync_len = video_mode->vsw;
+ var->left_margin = video_mode->hbp;
+ var->right_margin = video_mode->hfp;
+ var->hsync_len = video_mode->hsw;
+ var->vmode &= ~FB_VMODE_INTERLACED;
+ var->vmode |= video_mode->interlaced ?
+ FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED;
+}
+
+void var_to_vmode(struct fb_var_screeninfo *var,
+ struct mcde_video_mode *video_mode)
+{
+ video_mode->xres = var->xres;
+ video_mode->yres = var->yres;
+ video_mode->pixclock = var->pixclock;
+ video_mode->vbp = var->upper_margin;
+ video_mode->vfp = var->lower_margin;
+ video_mode->vsw = var->vsync_len;
+ video_mode->hbp = var->left_margin;
+ video_mode->hfp = var->right_margin;
+ video_mode->hsw = var->hsync_len;
+ video_mode->interlaced = (var->vmode & FB_VMODE_INTERLACED) ==
+ FB_VMODE_INTERLACED;
+}
+
+enum mcde_display_rotation var_to_rotation(struct fb_var_screeninfo *var)
+{
+ enum mcde_display_rotation rot;
+
+ switch (var->rotate) {
+ case FB_ROTATE_UR:
+ rot = MCDE_DISPLAY_ROT_0;
+ break;
+ case FB_ROTATE_CW:
+ rot = MCDE_DISPLAY_ROT_90_CW;
+ break;
+ case FB_ROTATE_UD:
+ rot = MCDE_DISPLAY_ROT_180_CW;
+ break;
+ case FB_ROTATE_CCW:
+ rot = MCDE_DISPLAY_ROT_90_CCW;
+ break;
+ default:
+ rot = MCDE_DISPLAY_ROT_0;
+ break;
+ }
+ dev_vdbg(&mcde_fb_device.dev, "var_rot: %d -> mcde_rot: %d\n",
+ var->rotate, rot);
+ return rot;
+}
+
+static struct mcde_display_device *fb_to_display(struct fb_info *fbi)
+{
+ int i;
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i])
+ return mfb->ovlys[i]->ddev;
+ }
+ return NULL;
+}
+
+static int check_var(struct fb_var_screeninfo *var, struct fb_info *fbi,
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ u16 w = -1, h = -1;
+ struct mcde_video_mode vmode;
+ struct pix_fmt_info *fmtinfo;
+
+ /* TODO: check sizes/offsets/memory validity */
+
+ /* Device physical size */
+ mcde_dss_get_physical_size(ddev, &w, &h);
+ var->width = w;
+ var->height = h;
+
+ /* Rotation */
+ if (var->rotate > 3) {
+ dev_info(&(ddev->dev), "check_var failed var->rotate\n");
+ return -EINVAL;
+ }
+
+ /* Video mode */
+ var_to_vmode(var, &vmode);
+ ret = mcde_dss_try_video_mode(ddev, &vmode);
+ if (ret < 0) {
+ dev_vdbg(&(ddev->dev), "check_var failed "
+ "mcde_dss_try_video_mode with size = %x\n", ret);
+ return ret;
+ }
+ vmode_to_var(&vmode, var);
+
+ /* Pixel format */
+ fmtinfo = var_to_pix_fmt_info(var);
+ if (!fmtinfo) {
+ dev_vdbg(&(ddev->dev), "check_var failed fmtinfo\n");
+ return -EINVAL;
+ }
+ pix_fmt_info_to_var(fmtinfo, var);
+
+ /* Not used */
+ var->grayscale = 0;
+ var->sync = 0;
+
+ return 0;
+}
+
+static int apply_var(struct fb_info *fbi, struct mcde_display_device *ddev)
+{
+ int ret, i;
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct fb_var_screeninfo *var;
+ struct mcde_video_mode vmode;
+ struct pix_fmt_info *fmt;
+ u32 line_len, size;
+
+ if (!ddev)
+ return -ENODEV;
+
+ dev_vdbg(&(ddev->dev), "%s\n", __func__);
+
+ var = &fbi->var;
+
+ ddev->check_transparency = 60;
+
+ /* Reallocate memory */
+ line_len = (fbi->var.bits_per_pixel * var->xres_virtual) / 8;
+ line_len = ALIGN(line_len, MCDE_BUF_LINE_ALIGMENT);
+ size = line_len * var->yres_virtual;
+ ret = reallocate_fb_mem(fbi, size);
+ if (ret) {
+ dev_vdbg(&(ddev->dev), "apply_var failed with"
+ "reallocate mem with size = %d\n", size);
+ return ret;
+ }
+ fbi->fix.line_length = line_len;
+
+ if (ddev->fictive)
+ goto apply_var_end;
+
+ /* Apply pixel format */
+ fmt = var_to_pix_fmt_info(var);
+ mfb->pix_fmt = fmt->pix_fmt;
+
+ /* Apply rotation */
+ mcde_dss_set_rotation(ddev, var_to_rotation(var));
+ /* Apply video mode */
+ memset(&vmode, 0, sizeof(struct mcde_video_mode));
+ var_to_vmode(var, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ return ret;
+
+ mcde_dss_apply_channel(ddev);
+
+ /* Apply overlay info */
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ struct mcde_overlay *ovly = mfb->ovlys[i];
+ struct mcde_overlay_info info;
+ int num_buffers;
+
+ get_ovly_info(fbi, ovly, &info);
+ (void) mcde_dss_apply_overlay(ovly, &info);
+
+ num_buffers = var->yres_virtual / var->yres;
+ mcde_dss_update_overlay(ovly, num_buffers == 3);
+ }
+
+apply_var_end:
+ return 0;
+}
+
+/* FB ops */
+
+static int mcde_fb_open(struct fb_info *fbi, int user)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int mcde_fb_release(struct fb_info *fbi, int user)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int mcde_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (!ddev) {
+ printk(KERN_ERR "mcde_fb_check_var failed !ddev\n");
+ return -ENODEV;
+ }
+
+ return check_var(var, fbi, ddev);
+}
+
+static int mcde_fb_set_par(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (mfb->ovlys[0]->state == NULL &&
+ ddev->fictive == false) {
+ printk(KERN_INFO "%s() - Enable fb %p\n",
+ __func__,
+ mfb->ovlys[0]);
+ mcde_dss_enable_overlay(mfb->ovlys[0]);
+ }
+
+ return apply_var(fbi, ddev);
+}
+
+static int mcde_fb_blank(int blank, struct fb_info *fbi)
+{
+ int ret = 0;
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (ddev->fictive)
+ goto mcde_fb_blank_end;
+
+ switch (blank) {
+ case FB_BLANK_NORMAL:
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ mcde_dss_disable_display(ddev);
+ break;
+ case FB_BLANK_UNBLANK:
+ ret = mcde_dss_enable_display(ddev);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+mcde_fb_blank_end:
+ return ret;
+}
+
+static int mcde_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (var->xoffset == fbi->var.xoffset &&
+ var->yoffset == fbi->var.yoffset)
+ return 0;
+
+ fbi->var.xoffset = var->xoffset;
+ fbi->var.yoffset = var->yoffset;
+ return apply_var(fbi, fb_to_display(fbi));
+}
+
+static void mcde_fb_rotate(struct fb_info *fbi, int rotate)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+}
+
+static int mcde_fb_ioctl(struct fb_info *fbi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (cmd == MCDE_GET_BUFFER_NAME_IOC)
+ return mfb->alloc_name;
+
+ return -EINVAL;
+}
+
+static int mcde_fb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue, unsigned int transp,
+ struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (regno >= 256)
+ return 1;
+
+ if (regno < 17) {
+ u32 pseudo_val;
+ u32 r, g, b;
+
+ if (fbi->var.bits_per_pixel > 16) {
+ r = red >> 8;
+ g = green >> 8;
+ b = blue >> 8;
+ } else if (fbi->var.bits_per_pixel == 16) {
+ r = red >> (3 + 8);
+ g = green >> (2 + 8);
+ b = blue >> (3 + 8);
+ } else if (fbi->var.bits_per_pixel == 15) {
+ r = red >> (3 + 8);
+ g = green >> (3 + 8);
+ b = blue >> (3 + 8);
+ } else
+ r = b = g = (regno & 15);
+ pseudo_val = r << fbi->var.red.offset;
+ pseudo_val |= g << fbi->var.green.offset;
+ pseudo_val |= b << fbi->var.blue.offset;
+
+ ((u32 *)fbi->pseudo_palette)[regno] = pseudo_val;
+ }
+
+ return 0;
+}
+
+static int mcde_fb_setcmap(struct fb_cmap *cmap, struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ /*Nothing to see here, move along*/
+ return 0;
+}
+
+static struct fb_ops fb_ops = {
+ /* creg, cmap */
+ .owner = THIS_MODULE,
+ .fb_open = mcde_fb_open,
+ .fb_release = mcde_fb_release,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = mcde_fb_check_var,
+ .fb_set_par = mcde_fb_set_par,
+ .fb_blank = mcde_fb_blank,
+ .fb_pan_display = mcde_fb_pan_display,
+ .fb_rotate = mcde_fb_rotate,
+ .fb_ioctl = mcde_fb_ioctl,
+ .fb_setcolreg = mcde_fb_setcolreg,
+ .fb_setcmap = mcde_fb_setcmap,
+};
+
+/* FB driver */
+
+struct fb_info *mcde_fb_create(struct mcde_display_device *ddev,
+ u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt,
+ u32 rotate)
+{
+ int ret = 0;
+ struct fb_info *fbi;
+ struct mcde_fb *mfb;
+ struct mcde_overlay *ovly = NULL;
+ struct mcde_overlay_info ovly_info;
+
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+ if (!ddev->initialized) {
+ dev_warn(&ddev->dev, "%s: Device not initialized\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Init fb */
+ fbi = framebuffer_alloc(sizeof(struct mcde_fb), &mcde_fb_device.dev);
+ if (fbi == NULL) {
+ ret = -ENOMEM;
+ goto fb_alloc_failed;
+ }
+ init_fb(fbi);
+ mfb = to_mcde_fb(fbi);
+
+ if (ddev->fictive == false) {
+ ret = mcde_dss_open_channel(ddev);
+ if (ret)
+ goto channel_open_failed;
+
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto display_enable_failed;
+ }
+
+ /* Prepare var and allocate frame buffer memory */
+ init_var_fmt(&fbi->var, w, h, vw, vh, pix_fmt, rotate);
+ check_var(&fbi->var, fbi, ddev);
+ ret = apply_var(fbi, ddev);
+ if (ret)
+ goto apply_var_failed;
+
+ if (ddev->fictive == false)
+ mcde_dss_set_pixel_format(ddev, ddev->port->pixel_format);
+
+ /* Setup overlay */
+ get_ovly_info(fbi, NULL, &ovly_info);
+ ovly = mcde_dss_create_overlay(ddev, &ovly_info);
+ if (!ovly) {
+ ret = PTR_ERR(ovly);
+ goto ovly_alloc_failed;
+ }
+ mfb->ovlys[0] = ovly;
+ mfb->num_ovlys = 1;
+
+ if (ddev->fictive == false) {
+ ret = mcde_dss_enable_overlay(ovly);
+ if (ret)
+ goto ovly_enable_failed;
+ }
+
+ mfb->id = ddev->id;
+
+ /* Register framebuffer */
+ ret = register_framebuffer(fbi);
+ if (ret)
+ goto fb_register_failed;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret)
+ dev_warn(&ddev->dev, "%s: Allocate color map memory failed!\n",
+ __func__);
+
+ ddev->fbi = fbi;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (ddev->fictive == false) {
+ mfb->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ mfb->early_suspend.suspend = early_suspend;
+ mfb->early_suspend.resume = late_resume;
+ register_early_suspend(&mfb->early_suspend);
+ }
+#endif
+
+ goto out;
+fb_register_failed:
+ mcde_dss_disable_overlay(ovly);
+ovly_enable_failed:
+ mcde_dss_destroy_overlay(ovly);
+ovly_alloc_failed:
+ free_fb_mem(fbi);
+apply_var_failed:
+ mcde_dss_disable_display(ddev);
+display_enable_failed:
+ mcde_dss_close_channel(ddev);
+channel_open_failed:
+ framebuffer_release(fbi);
+ fbi = NULL;
+fb_alloc_failed:
+out:
+ return ret ? ERR_PTR(ret) : fbi;
+}
+EXPORT_SYMBOL(mcde_fb_create);
+
+int mcde_fb_attach_overlay(struct fb_info *fb_info, struct mcde_overlay *ovl)
+{
+ /* TODO: Attach extra overlay targets */
+ return -EINVAL;
+}
+
+void mcde_fb_destroy(struct mcde_display_device *dev)
+{
+ struct mcde_fb *mfb;
+ int i;
+
+ dev_vdbg(&dev->dev, "%s\n", __func__);
+
+ if (dev->fictive == false) {
+ mcde_dss_disable_display(dev);
+ mcde_dss_close_channel(dev);
+ }
+
+ mfb = to_mcde_fb(dev->fbi);
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i])
+ mcde_dss_destroy_overlay(mfb->ovlys[i]);
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (dev->fictive == false)
+ unregister_early_suspend(&mfb->early_suspend);
+#endif
+ fb_dealloc_cmap(&dev->fbi->cmap);
+
+ unregister_framebuffer(dev->fbi);
+ free_fb_mem(dev->fbi);
+ framebuffer_release(dev->fbi);
+ dev->fbi = NULL;
+}
+
+/* Overlay fbs' platform device */
+static int mcde_fb_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int mcde_fb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver mcde_fb_driver = {
+ .probe = mcde_fb_probe,
+ .remove = mcde_fb_remove,
+ .driver = {
+ .name = "mcde_fb",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* MCDE fb init */
+
+int __init mcde_fb_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mcde_fb_driver);
+ if (ret)
+ goto fb_driver_failed;
+ ret = platform_device_register(&mcde_fb_device);
+ if (ret)
+ goto fb_device_failed;
+
+ goto out;
+fb_device_failed:
+ platform_driver_unregister(&mcde_fb_driver);
+fb_driver_failed:
+out:
+ return ret;
+}
+
+void mcde_fb_exit(void)
+{
+ platform_device_unregister(&mcde_fb_device);
+ platform_driver_unregister(&mcde_fb_driver);
+}
diff --git a/drivers/video/mcde/mcde_hw.c b/drivers/video/mcde/mcde_hw.c
new file mode 100644
index 00000000000..533b4d797d8
--- /dev/null
+++ b/drivers/video/mcde/mcde_hw.c
@@ -0,0 +1,3709 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <video/mcde.h>
+#include "dsilink_regs.h"
+#include "mcde_regs.h"
+#include "mcde_debugfs.h"
+
+
+/* MCDE channel states
+ *
+ * Allowed state transitions:
+ * IDLE <-> SUSPEND
+ * IDLE <-> DSI_READ
+ * IDLE <-> DSI_WRITE
+ * IDLE -> SETUP -> (WAIT_TE ->) RUNNING -> STOPPING1 -> STOPPING2 -> IDLE
+ * WAIT_TE -> STOPPED (for missing TE to allow re-enable)
+ */
+enum chnl_state {
+ CHNLSTATE_SUSPEND, /* HW in suspended mode, initial state */
+ CHNLSTATE_IDLE, /* Channel aquired, but not running, FLOEN==0 */
+ CHNLSTATE_DSI_READ, /* Executing DSI read */
+ CHNLSTATE_DSI_WRITE, /* Executing DSI write */
+ CHNLSTATE_SETUP, /* Channel register setup to prepare for running */
+ CHNLSTATE_WAIT_TE, /* Waiting for BTA or external TE */
+ CHNLSTATE_RUNNING, /* Update started, FLOEN=1, FLOEN==1 */
+ CHNLSTATE_STOPPING, /* Stopping, FLOEN=0, FLOEN==1, awaiting VCMP */
+ CHNLSTATE_STOPPED, /* Stopped, after VCMP, FLOEN==0|1 */
+};
+
+enum dsi_lane_status {
+ DSI_LANE_STATE_START = 0x00,
+ DSI_LANE_STATE_IDLE = 0x01,
+ DSI_LANE_STATE_WRITE = 0x02,
+ DSI_LANE_STATE_ULPM = 0x03,
+};
+
+static int set_channel_state_atomic(struct mcde_chnl_state *chnl,
+ enum chnl_state state);
+static int set_channel_state_sync(struct mcde_chnl_state *chnl,
+ enum chnl_state state);
+static void stop_channel(struct mcde_chnl_state *chnl);
+static int _mcde_chnl_enable(struct mcde_chnl_state *chnl);
+static int _mcde_chnl_apply(struct mcde_chnl_state *chnl);
+static void disable_flow(struct mcde_chnl_state *chnl);
+static void enable_flow(struct mcde_chnl_state *chnl);
+static void do_softwaretrig(struct mcde_chnl_state *chnl);
+static void dsi_te_poll_req(struct mcde_chnl_state *chnl);
+static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl,
+ unsigned int timeout);
+static void dsi_te_timer_function(unsigned long value);
+static int wait_for_vcmp(struct mcde_chnl_state *chnl);
+static int probe_hw(struct platform_device *pdev);
+static void wait_for_flow_disabled(struct mcde_chnl_state *chnl);
+
+#define OVLY_TIMEOUT 100
+#define CHNL_TIMEOUT 100
+#define FLOW_STOP_TIMEOUT 20
+#define SCREEN_PPL_HIGH 1280
+#define SCREEN_PPL_CEA2 720
+#define SCREEN_LPF_CEA2 480
+#define DSI_DELAY0_CEA2_ADD 10
+
+#define MCDE_SLEEP_WATCHDOG 500
+#define DSI_TE_NO_ANSWER_TIMEOUT_INIT 2500
+#define DSI_TE_NO_ANSWER_TIMEOUT 250
+#define DSI_WAIT_FOR_ULPM_STATE_MS 1
+#define DSI_ULPM_STATE_NBR_OF_RETRIES 10
+#define DSI_READ_TIMEOUT 200
+#define DSI_WRITE_CMD_TIMEOUT 1000
+#define DSI_READ_DELAY 5
+#define DSI_READ_NBR_OF_RETRIES 2
+#define MCDE_FLOWEN_MAX_TRIAL 60
+#define DSI_RESET_SW 0x7
+
+#define MCDE_VERSION_4_1_3 0x04010300
+#define MCDE_VERSION_4_0_4 0x04000400
+#define MCDE_VERSION_3_0_8 0x03000800
+#define MCDE_VERSION_3_0_5 0x03000500
+#define MCDE_VERSION_1_0_4 0x01000400
+
+#define CLK_MCDE "mcde"
+#define CLK_DPI "lcd"
+
+static u8 *mcdeio;
+static u8 **dsiio;
+static struct platform_device *mcde_dev;
+static u8 num_dsilinks;
+static u8 num_channels;
+static u8 num_overlays;
+static int mcde_irq;
+static u32 input_fifo_size;
+static u32 output_fifo_ab_size;
+static u32 output_fifo_c0c1_size;
+
+static struct regulator *regulator_vana;
+static struct regulator *regulator_mcde_epod;
+static struct regulator *regulator_esram_epod;
+static struct clk *clock_mcde;
+
+/* TODO remove when all platforms support dsilp and dsihs clocks */
+static struct clk *clock_dsi;
+static struct clk *clock_dsi_lp;
+
+static u8 mcde_is_enabled;
+static struct delayed_work hw_timeout_work;
+static u8 dsi_pll_is_enabled;
+static u8 dsi_ifc_is_supported;
+static u8 dsi_use_clk_framework;
+static u32 mcde_clk_rate; /* In Hz */
+
+static struct mutex mcde_hw_lock;
+static inline void mcde_lock(const char *func, int line)
+{
+ mutex_lock(&mcde_hw_lock);
+ dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line);
+}
+
+static inline void mcde_unlock(const char *func, int line)
+{
+ dev_vdbg(&mcde_dev->dev, "Exit MCDE: %s:%d\n", func, line);
+ mutex_unlock(&mcde_hw_lock);
+}
+
+static inline bool mcde_trylock(const char *func, int line)
+{
+ bool locked = mutex_trylock(&mcde_hw_lock) == 1;
+ if (locked)
+ dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line);
+ return locked;
+}
+
+static u8 mcde_dynamic_power_management = true;
+
+static inline u32 dsi_rreg(int i, u32 reg)
+{
+ return readl(dsiio[i] + reg);
+}
+static inline void dsi_wreg(int i, u32 reg, u32 val)
+{
+ writel(val, dsiio[i] + reg);
+}
+
+#define dsi_rfld(__i, __reg, __fld) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ ((dsi_rreg(__i, __reg) & mask) >> shift); \
+})
+
+#define dsi_wfld(__i, __reg, __fld, __val) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ const u32 oldval = dsi_rreg(__i, __reg); \
+ const u32 newval = ((__val) << shift); \
+ dsi_wreg(__i, __reg, (oldval & ~mask) | (newval & mask)); \
+})
+
+static inline u32 mcde_rreg(u32 reg)
+{
+ return readl(mcdeio + reg);
+}
+static inline void mcde_wreg(u32 reg, u32 val)
+{
+ writel(val, mcdeio + reg);
+}
+
+
+#define mcde_rfld(__reg, __fld) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ ((mcde_rreg(__reg) & mask) >> shift); \
+})
+
+#define mcde_wfld(__reg, __fld, __val) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ const u32 oldval = mcde_rreg(__reg); \
+ const u32 newval = ((__val) << shift); \
+ mcde_wreg(__reg, (oldval & ~mask) | (newval & mask)); \
+})
+
+struct ovly_regs {
+ bool enabled;
+ bool dirty;
+ bool dirty_buf;
+
+ u8 ch_id;
+ u32 baseaddress0;
+ u32 baseaddress1;
+ u8 bits_per_pixel;
+ u8 bpp;
+ bool bgr;
+ bool bebo;
+ bool opq;
+ u8 col_conv;
+ u8 alpha_source;
+ u8 alpha_value;
+ u8 pixoff;
+ u16 ppl;
+ u16 lpf;
+ u16 cropx;
+ u16 cropy;
+ u16 xpos;
+ u16 ypos;
+ u8 z;
+};
+
+struct mcde_ovly_state {
+ bool inuse;
+ u8 idx; /* MCDE overlay index */
+ struct mcde_chnl_state *chnl; /* Owner channel */
+ bool dirty;
+ bool dirty_buf;
+
+ /* Staged settings */
+ u32 paddr;
+ u16 stride;
+ enum mcde_ovly_pix_fmt pix_fmt;
+
+ u16 src_x;
+ u16 src_y;
+ u16 dst_x;
+ u16 dst_y;
+ u16 dst_z;
+ u16 w;
+ u16 h;
+
+ u8 alpha_source;
+ u8 alpha_value;
+
+ /* Applied settings */
+ struct ovly_regs regs;
+};
+
+static struct mcde_ovly_state *overlays;
+
+struct chnl_regs {
+ bool dirty;
+
+ bool floen;
+ u16 x;
+ u16 y;
+ u16 ppl;
+ u16 lpf;
+ u8 bpp;
+ bool internal_clk; /* CLKTYPE field */
+ u16 pcd;
+ u8 clksel;
+ u8 cdwin;
+ u16 (*map_r)(u8);
+ u16 (*map_g)(u8);
+ u16 (*map_b)(u8);
+ bool palette_enable;
+ bool bcd;
+ bool synchronized_update;
+ bool roten;
+ u8 rotdir;
+ u32 rotbuf1;
+ u32 rotbuf2;
+
+ /* Blending */
+ u8 blend_ctrl;
+ bool blend_en;
+ u8 alpha_blend;
+
+ /* DSI */
+ u8 dsipacking;
+};
+
+struct col_regs {
+ bool dirty;
+
+ u16 y_red;
+ u16 y_green;
+ u16 y_blue;
+ u16 cb_red;
+ u16 cb_green;
+ u16 cb_blue;
+ u16 cr_red;
+ u16 cr_green;
+ u16 cr_blue;
+ u16 off_y;
+ u16 off_cb;
+ u16 off_cr;
+};
+
+struct tv_regs {
+ bool dirty;
+
+ u16 dho; /* TV mode: left border width; destination horizontal offset */
+ /* LCD MODE: horizontal back porch */
+ u16 alw; /* TV mode: right border width */
+ /* LCD mode: horizontal front porch */
+ u16 hsw; /* horizontal synch width */
+ u16 dvo; /* TV mode: top border width; destination horizontal offset */
+ /* LCD MODE: vertical back porch */
+ u16 bsl; /* TV mode: bottom border width; blanking start line */
+ /* LCD MODE: vertical front porch */
+ /* field 1 */
+ u16 bel1; /* TV mode: field total vertical blanking lines */
+ /* LCD mode: vertical sync width */
+ u16 fsl1; /* field vbp */
+ /* field 2 */
+ u16 bel2;
+ u16 fsl2;
+ u8 tv_mode;
+ bool sel_mode_tv;
+ bool inv_clk;
+ bool interlaced_en;
+ u32 lcdtim1;
+};
+
+struct mcde_chnl_state {
+ bool enabled;
+ bool reserved;
+ enum mcde_chnl id;
+ enum mcde_fifo fifo;
+ struct mcde_port port;
+ struct mcde_ovly_state *ovly0;
+ struct mcde_ovly_state *ovly1;
+ enum chnl_state state;
+ wait_queue_head_t state_waitq;
+ wait_queue_head_t vcmp_waitq;
+ atomic_t vcmp_cnt;
+ struct timer_list dsi_te_timer;
+ struct clk *clk_dsi_lp;
+ struct clk *clk_dsi_hs;
+ struct clk *clk_dpi;
+
+ enum mcde_display_power_mode power_mode;
+
+ /* Staged settings */
+ u16 (*map_r)(u8);
+ u16 (*map_g)(u8);
+ u16 (*map_b)(u8);
+ bool palette_enable;
+ bool synchronized_update;
+ struct mcde_video_mode vmode;
+ enum mcde_display_rotation rotation;
+ u32 rotbuf1;
+ u32 rotbuf2;
+
+ struct mcde_col_transform rgb_2_ycbcr;
+ struct mcde_col_transform ycbcr_2_rgb;
+ struct mcde_col_transform *transform;
+
+ /* Blending */
+ u8 blend_ctrl;
+ bool blend_en;
+ u8 alpha_blend;
+
+ /* Applied settings */
+ struct chnl_regs regs;
+ struct col_regs col_regs;
+ struct tv_regs tv_regs;
+
+ /* an interlaced digital TV signal generates a VCMP per field */
+ bool vcmp_per_field;
+ bool even_vcmp;
+
+ bool formatter_updated;
+ bool esram_is_enabled;
+};
+
+static struct mcde_chnl_state *channels;
+/*
+ * Wait for CSM_RUNNING, all data sent for display
+ */
+static inline void wait_while_dsi_running(int lnk)
+{
+ u8 counter = DSI_READ_TIMEOUT;
+ while (dsi_rfld(lnk, DSI_CMD_MODE_STS, CSM_RUNNING) && --counter) {
+ dev_vdbg(&mcde_dev->dev,
+ "%s: DSI link %u read running state retry %u times\n"
+ , __func__, lnk, (DSI_READ_TIMEOUT - counter));
+ udelay(DSI_READ_DELAY);
+ }
+ WARN_ON(!counter);
+ if (!counter)
+ dev_warn(&mcde_dev->dev,
+ "%s: DSI link %u read timeout!\n", __func__, lnk);
+}
+
+static void enable_clocks_and_power(struct platform_device *pdev)
+{
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /* VANA should be enabled before a DSS hard reset */
+ if (regulator_vana)
+ WARN_ON_ONCE(regulator_enable(regulator_vana));
+
+ WARN_ON_ONCE(regulator_enable(regulator_mcde_epod));
+
+ if (!dsi_use_clk_framework)
+ pdata->platform_set_clocks();
+
+ WARN_ON_ONCE(clk_enable(clock_mcde));
+}
+
+static void disable_clocks_and_power(struct platform_device *pdev)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ clk_disable(clock_mcde);
+
+ WARN_ON_ONCE(regulator_disable(regulator_mcde_epod));
+
+ if (regulator_vana)
+ WARN_ON_ONCE(regulator_disable(regulator_vana));
+}
+
+static void update_mcde_registers(void)
+{
+ struct mcde_platform_data *pdata = mcde_dev->dev.platform_data;
+
+ /* Setup output muxing */
+ mcde_wreg(MCDE_CONF0,
+ MCDE_CONF0_IFIFOCTRLWTRMRKLVL(7) |
+ MCDE_CONF0_OUTMUX0(pdata->outmux[0]) |
+ MCDE_CONF0_OUTMUX1(pdata->outmux[1]) |
+ MCDE_CONF0_OUTMUX2(pdata->outmux[2]) |
+ MCDE_CONF0_OUTMUX3(pdata->outmux[3]) |
+ MCDE_CONF0_OUTMUX4(pdata->outmux[4]) |
+ pdata->syncmux);
+
+ mcde_wfld(MCDE_RISPP, VCMPARIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPBRIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPC0RIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPC1RIS, 1);
+
+ /* Enable channel VCMP interrupts */
+ mcde_wreg(MCDE_IMSCPP,
+ MCDE_IMSCPP_VCMPAIM(true) |
+ MCDE_IMSCPP_VCMPBIM(true) |
+ MCDE_IMSCPP_VCMPC0IM(true) |
+ MCDE_IMSCPP_VCMPC1IM(true));
+
+ mcde_wreg(MCDE_IMSCCHNL, MCDE_IMSCCHNL_CHNLAIM(0xf));
+ mcde_wreg(MCDE_IMSCERR, 0xFFFF01FF);
+
+ /* Setup sync pulse length
+ * Setting VSPMAX=0 disables the filter and VSYNC
+ * is generated after VSPMIN mcde cycles
+ */
+ mcde_wreg(MCDE_VSCRC0,
+ MCDE_VSCRC0_VSPMIN(0) |
+ MCDE_VSCRC0_VSPMAX(0));
+ mcde_wreg(MCDE_VSCRC1,
+ MCDE_VSCRC1_VSPMIN(1) |
+ MCDE_VSCRC1_VSPMAX(0xff));
+}
+
+static void dsi_link_handle_ulpm(struct mcde_port *port, bool enter_ulpm)
+{
+ u8 link = port->link;
+ u8 num_data_lanes = port->phy.dsi.num_data_lanes;
+ u8 nbr_of_retries = 0;
+ u8 lane_state;
+
+ /*
+ * The D-PHY protocol specifies the time to leave the ULP mode
+ * in ms. It will at least take 1 ms to exit ULPM.
+ * The ULPOUT time value is using number of system clock ticks
+ * divided by 1000. The system clock for the DSI link is the MCDE
+ * clock.
+ */
+ dsi_wreg(link, DSI_MCTL_ULPOUT_TIME,
+ DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(0x1FF) |
+ DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(0x1FF));
+
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, enter_ulpm);
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ,
+ enter_ulpm && num_data_lanes == 2);
+
+ if (enter_ulpm)
+ lane_state = DSI_LANE_STATE_ULPM;
+ else
+ lane_state = DSI_LANE_STATE_IDLE;
+
+ /* Wait for data lanes to enter ULPM */
+ while (dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE1_STATE)
+ != lane_state ||
+ (dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE2_STATE)
+ != lane_state &&
+ num_data_lanes > 1)) {
+ mdelay(DSI_WAIT_FOR_ULPM_STATE_MS);
+ if (nbr_of_retries++ == DSI_ULPM_STATE_NBR_OF_RETRIES) {
+ dev_warn(&mcde_dev->dev,
+ "Could not enter correct state=%d (link=%d)!\n",
+ lane_state, link);
+ break;
+ }
+ }
+
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, enter_ulpm);
+ nbr_of_retries = 0;
+
+ /* Wait for clock lane to enter ULPM */
+ while (dsi_rfld(link, DSI_MCTL_LANE_STS, CLKLANE_STATE)
+ != lane_state) {
+ mdelay(DSI_WAIT_FOR_ULPM_STATE_MS);
+ if (nbr_of_retries++ == DSI_ULPM_STATE_NBR_OF_RETRIES) {
+ dev_warn(&mcde_dev->dev,
+ "Could not enter correct state=%d (link=%d)!\n",
+ lane_state, link);
+ break;
+ }
+ }
+}
+
+static int dsi_link_enable(struct mcde_chnl_state *chnl)
+{
+ int ret = 0;
+ u8 link = chnl->port.link;
+
+ if (dsi_use_clk_framework) {
+ prcmu_write(DB8500_PRCM_DSI_SW_RESET, DSI_RESET_SW);
+ WARN_ON_ONCE(clk_enable(chnl->clk_dsi_lp));
+ WARN_ON_ONCE(clk_enable(chnl->clk_dsi_hs));
+ } else {
+ WARN_ON_ONCE(clk_enable(clock_dsi));
+ WARN_ON_ONCE(clk_enable(clock_dsi_lp));
+
+ if (!dsi_pll_is_enabled) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ ret = pdata->platform_enable_dsipll();
+ if (ret < 0) {
+ dev_warn(&mcde_dev->dev, "%s: "
+ "enable_dsipll failed ret = %d\n",
+ __func__, ret);
+ goto enable_dsipll_err;
+ }
+ dev_dbg(&mcde_dev->dev, "%s enable dsipll\n",
+ __func__);
+ }
+ dsi_pll_is_enabled++;
+ }
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, LINK_EN, true);
+
+ if (dsi_rfld(link, DSI_MCTL_LANE_STS, CLKLANE_STATE) ==
+ DSI_LANE_STATE_ULPM ||
+ dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE1_STATE)
+ == DSI_LANE_STATE_ULPM ||
+ dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE2_STATE)
+ == DSI_LANE_STATE_ULPM) {
+ /* Switch hs clock to sys_clk */
+ dsi_wfld(link, DSI_MCTL_PLL_CTL, PLL_OUT_SEL, 0x1);
+ dsi_link_handle_ulpm(&chnl->port, false);
+ /* Switch hs clock to tx_byte_hs_clk */
+ dsi_wfld(link, DSI_MCTL_PLL_CTL, PLL_OUT_SEL, 0x0);
+ }
+
+ dev_dbg(&mcde_dev->dev, "DSI%d LINK_EN\n", link);
+
+ return 0;
+
+enable_dsipll_err:
+ clk_disable(clock_dsi_lp);
+ clk_disable(clock_dsi);
+ return ret;
+}
+
+static void dsi_link_disable(struct mcde_chnl_state *chnl, bool suspend)
+{
+ wait_while_dsi_running(chnl->port.link);
+ /* only enter ULPM when device is suspended */
+ if (suspend)
+ dsi_link_handle_ulpm(&chnl->port, true);
+ if (dsi_use_clk_framework) {
+ clk_disable(chnl->clk_dsi_lp);
+ clk_disable(chnl->clk_dsi_hs);
+ } else {
+ if (dsi_pll_is_enabled && (--dsi_pll_is_enabled == 0)) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ dev_dbg(&mcde_dev->dev, "%s disable dsipll\n",
+ __func__);
+ pdata->platform_disable_dsipll();
+ }
+ clk_disable(clock_dsi);
+ clk_disable(clock_dsi_lp);
+ }
+
+ dsi_wfld(chnl->port.link, DSI_MCTL_MAIN_DATA_CTL, LINK_EN, false);
+}
+
+static void disable_mcde_hw(bool force_disable, bool suspend)
+{
+ int i;
+ bool mcde_up = false;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!mcde_is_enabled)
+ return;
+
+ for (i = 0; i < num_channels; i++) {
+ struct mcde_chnl_state *chnl = &channels[i];
+ if (force_disable || (chnl->enabled &&
+ chnl->state != CHNLSTATE_RUNNING)) {
+ stop_channel(chnl);
+ set_channel_state_sync(chnl, CHNLSTATE_SUSPEND);
+
+ if (chnl->formatter_updated) {
+ if (chnl->port.type == MCDE_PORTTYPE_DSI)
+ dsi_link_disable(chnl, suspend);
+ else if (chnl->port.type == MCDE_PORTTYPE_DPI)
+ clk_disable(chnl->clk_dpi);
+ chnl->formatter_updated = false;
+ }
+ if (chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_disable(
+ regulator_esram_epod));
+ chnl->esram_is_enabled = false;
+ }
+ } else if (chnl->enabled && chnl->state == CHNLSTATE_RUNNING) {
+ mcde_up = true;
+ }
+ }
+
+ if (mcde_up)
+ return;
+
+ free_irq(mcde_irq, &mcde_dev->dev);
+
+ disable_clocks_and_power(mcde_dev);
+
+ mcde_is_enabled = false;
+}
+
+static void dpi_video_mode_apply(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl->tv_regs.interlaced_en = chnl->vmode.interlaced;
+
+ chnl->tv_regs.sel_mode_tv = chnl->port.phy.dpi.tv_mode;
+ if (chnl->tv_regs.sel_mode_tv) {
+ /* TV mode */
+ u32 bel;
+ /* -4 since hsw is excluding SAV/EAV, 2 bytes each */
+ chnl->tv_regs.hsw = chnl->vmode.hbp + chnl->vmode.hfp - 4;
+ /* vbp_field2 = vbp_field1 + 1 */
+ chnl->tv_regs.fsl1 = chnl->vmode.vbp / 2;
+ chnl->tv_regs.fsl2 = chnl->vmode.vbp - chnl->tv_regs.fsl1;
+ /* +1 since vbp_field2 = vbp_field1 + 1 */
+ bel = chnl->vmode.vbp + chnl->vmode.vfp;
+ /* in TV mode: bel2 = bel1 + 1 */
+ chnl->tv_regs.bel1 = bel / 2;
+ chnl->tv_regs.bel2 = bel - chnl->tv_regs.bel1;
+ if (chnl->port.phy.dpi.bus_width == 4)
+ chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P_BE;
+ else
+ chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P;
+ chnl->tv_regs.inv_clk = true;
+ } else {
+ /* LCD mode */
+ u32 polarity;
+ chnl->tv_regs.hsw = chnl->vmode.hsw;
+ chnl->tv_regs.dho = chnl->vmode.hbp;
+ chnl->tv_regs.alw = chnl->vmode.hfp;
+ chnl->tv_regs.bel1 = chnl->vmode.vsw;
+ chnl->tv_regs.bel2 = chnl->tv_regs.bel1;
+ chnl->tv_regs.dvo = chnl->vmode.vbp;
+ chnl->tv_regs.bsl = chnl->vmode.vfp;
+ chnl->tv_regs.fsl1 = 0;
+ chnl->tv_regs.fsl2 = 0;
+ polarity = chnl->port.phy.dpi.polarity;
+ chnl->tv_regs.lcdtim1 = MCDE_LCDTIM1A_IHS(
+ (polarity & DPI_ACT_LOW_HSYNC) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IVS(
+ (polarity & DPI_ACT_LOW_VSYNC) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IOE(
+ (polarity & DPI_ACT_LOW_DATA_ENABLE) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IPC(
+ (polarity & DPI_ACT_ON_FALLING_EDGE) != 0);
+ }
+ chnl->tv_regs.dirty = true;
+}
+
+static void update_dpi_registers(enum mcde_chnl chnl_id, struct tv_regs *regs)
+{
+ u8 idx = chnl_id;
+
+ dev_dbg(&mcde_dev->dev, "%s\n", __func__);
+ mcde_wreg(MCDE_TVCRA + idx * MCDE_TVCRA_GROUPOFFSET,
+ MCDE_TVCRA_SEL_MOD(regs->sel_mode_tv) |
+ MCDE_TVCRA_INTEREN(regs->interlaced_en) |
+ MCDE_TVCRA_IFIELD(0) |
+ MCDE_TVCRA_TVMODE(regs->tv_mode) |
+ MCDE_TVCRA_SDTVMODE(MCDE_TVCRA_SDTVMODE_Y0CBY1CR) |
+ MCDE_TVCRA_CKINV(regs->inv_clk) |
+ MCDE_TVCRA_AVRGEN(0));
+ mcde_wreg(MCDE_TVBLUA + idx * MCDE_TVBLUA_GROUPOFFSET,
+ MCDE_TVBLUA_TVBLU(MCDE_CONFIG_TVOUT_BACKGROUND_LUMINANCE) |
+ MCDE_TVBLUA_TVBCB(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CB)|
+ MCDE_TVBLUA_TVBCR(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CR));
+
+ /* Vertical timing registers */
+ mcde_wreg(MCDE_TVDVOA + idx * MCDE_TVDVOA_GROUPOFFSET,
+ MCDE_TVDVOA_DVO1(regs->dvo) |
+ MCDE_TVDVOA_DVO2(regs->dvo));
+ mcde_wreg(MCDE_TVBL1A + idx * MCDE_TVBL1A_GROUPOFFSET,
+ MCDE_TVBL1A_BEL1(regs->bel1) |
+ MCDE_TVBL1A_BSL1(regs->bsl));
+ mcde_wreg(MCDE_TVBL2A + idx * MCDE_TVBL1A_GROUPOFFSET,
+ MCDE_TVBL2A_BEL2(regs->bel2) |
+ MCDE_TVBL2A_BSL2(regs->bsl));
+ mcde_wreg(MCDE_TVISLA + idx * MCDE_TVISLA_GROUPOFFSET,
+ MCDE_TVISLA_FSL1(regs->fsl1) |
+ MCDE_TVISLA_FSL2(regs->fsl2));
+
+ /* Horizontal timing registers */
+ mcde_wreg(MCDE_TVLBALWA + idx * MCDE_TVLBALWA_GROUPOFFSET,
+ MCDE_TVLBALWA_LBW(regs->hsw) |
+ MCDE_TVLBALWA_ALW(regs->alw));
+ mcde_wreg(MCDE_TVTIM1A + idx * MCDE_TVTIM1A_GROUPOFFSET,
+ MCDE_TVTIM1A_DHO(regs->dho));
+ if (!regs->sel_mode_tv)
+ mcde_wreg(MCDE_LCDTIM1A + idx * MCDE_LCDTIM1A_GROUPOFFSET,
+ regs->lcdtim1);
+ regs->dirty = false;
+}
+
+static void update_col_registers(enum mcde_chnl chnl_id, struct col_regs *regs)
+{
+ u8 idx = chnl_id;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ mcde_wreg(MCDE_RGBCONV1A + idx * MCDE_RGBCONV1A_GROUPOFFSET,
+ MCDE_RGBCONV1A_YR_RED(regs->y_red) |
+ MCDE_RGBCONV1A_YR_GREEN(regs->y_green));
+ mcde_wreg(MCDE_RGBCONV2A + idx * MCDE_RGBCONV2A_GROUPOFFSET,
+ MCDE_RGBCONV2A_YR_BLUE(regs->y_blue) |
+ MCDE_RGBCONV2A_CR_RED(regs->cr_red));
+ mcde_wreg(MCDE_RGBCONV3A + idx * MCDE_RGBCONV3A_GROUPOFFSET,
+ MCDE_RGBCONV3A_CR_GREEN(regs->cr_green) |
+ MCDE_RGBCONV3A_CR_BLUE(regs->cr_blue));
+ mcde_wreg(MCDE_RGBCONV4A + idx * MCDE_RGBCONV4A_GROUPOFFSET,
+ MCDE_RGBCONV4A_CB_RED(regs->cb_red) |
+ MCDE_RGBCONV4A_CB_GREEN(regs->cb_green));
+ mcde_wreg(MCDE_RGBCONV5A + idx * MCDE_RGBCONV5A_GROUPOFFSET,
+ MCDE_RGBCONV5A_CB_BLUE(regs->cb_blue) |
+ MCDE_RGBCONV5A_OFF_RED(regs->off_cr));
+ mcde_wreg(MCDE_RGBCONV6A + idx * MCDE_RGBCONV6A_GROUPOFFSET,
+ MCDE_RGBCONV6A_OFF_GREEN(regs->off_y) |
+ MCDE_RGBCONV6A_OFF_BLUE(regs->off_cb));
+ regs->dirty = false;
+}
+
+/* MCDE internal helpers */
+static u8 portfmt2dsipacking(enum mcde_port_pix_fmt pix_fmt)
+{
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ return MCDE_DSIVID0CONF0_PACKING_RGB565;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ return MCDE_DSIVID0CONF0_PACKING_RGB666;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ default:
+ return MCDE_DSIVID0CONF0_PACKING_RGB888;
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ return MCDE_DSIVID0CONF0_PACKING_HDTV;
+ }
+}
+
+static u8 portfmt2bpp(enum mcde_port_pix_fmt pix_fmt)
+{
+ /* TODO: Check DPI spec *//* REVIEW: Remove or check */
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DPI_16BPP_C1:
+ case MCDE_PORTPIXFMT_DPI_16BPP_C2:
+ case MCDE_PORTPIXFMT_DPI_16BPP_C3:
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ return 16;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C1:
+ case MCDE_PORTPIXFMT_DPI_18BPP_C2:
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ return 18;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DPI_24BPP:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ return 24;
+ default:
+ return 1;
+ }
+}
+
+static u8 bpp2outbpp(u8 bpp)
+{
+ switch (bpp) {
+ case 16:
+ return MCDE_CRA1_OUTBPP_16BPP;
+ case 18:
+ return MCDE_CRA1_OUTBPP_18BPP;
+ case 24:
+ return MCDE_CRA1_OUTBPP_24BPP;
+ default:
+ return 0;
+ }
+}
+
+static u8 portfmt2cdwin(enum mcde_port_pix_fmt pix_fmt)
+{
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DPI_16BPP_C1:
+ return MCDE_CRA1_CDWIN_16BPP_C1;
+ case MCDE_PORTPIXFMT_DPI_16BPP_C2:
+ return MCDE_CRA1_CDWIN_16BPP_C2;
+ case MCDE_PORTPIXFMT_DPI_16BPP_C3:
+ return MCDE_CRA1_CDWIN_16BPP_C3;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C1:
+ return MCDE_CRA1_CDWIN_18BPP_C1;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C2:
+ return MCDE_CRA1_CDWIN_18BPP_C2;
+ case MCDE_PORTPIXFMT_DPI_24BPP:
+ return MCDE_CRA1_CDWIN_24BPP;
+ default:
+ /* only DPI formats are relevant */
+ return 0;
+ }
+}
+
+static u32 get_output_fifo_size(enum mcde_fifo fifo)
+{
+ u32 ret = 1; /* Avoid div by zero */
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ case MCDE_FIFO_B:
+ ret = output_fifo_ab_size;
+ break;
+ case MCDE_FIFO_C0:
+ case MCDE_FIFO_C1:
+ ret = output_fifo_c0c1_size;
+ break;
+ default:
+ dev_warn(&mcde_dev->dev, "Unsupported fifo");
+ break;
+ }
+ return ret;
+}
+
+static inline u8 get_dsi_formatter_id(const struct mcde_port *port)
+{
+ if (dsi_ifc_is_supported)
+ return 2 * port->link + port->ifc;
+ else
+ return port->link;
+}
+
+static struct mcde_chnl_state *find_channel_by_dsilink(int link)
+{
+ struct mcde_chnl_state *chnl = &channels[0];
+ for (; chnl < &channels[num_channels]; chnl++)
+ if (chnl->enabled && chnl->port.link == link &&
+ chnl->port.type == MCDE_PORTTYPE_DSI)
+ return chnl;
+ return NULL;
+}
+
+static inline void mcde_handle_vcmp(struct mcde_chnl_state *chnl)
+{
+ if (!chnl->vcmp_per_field ||
+ (chnl->vcmp_per_field && chnl->even_vcmp)) {
+ atomic_inc(&chnl->vcmp_cnt);
+ if (chnl->state == CHNLSTATE_STOPPING)
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPED);
+ wake_up_all(&chnl->vcmp_waitq);
+ }
+ chnl->even_vcmp = !chnl->even_vcmp;
+}
+
+static void handle_dsi_irq(struct mcde_chnl_state *chnl, int i)
+{
+ u32 irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG);
+ if (irq_status) {
+ dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true));
+ dev_vdbg(&mcde_dev->dev, "BTA TE DSI%d\n", i);
+ do_softwaretrig(chnl);
+ }
+
+ irq_status = dsi_rfld(i, DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG);
+ if (irq_status) {
+ dsi_wreg(i, DSI_CMD_MODE_STS_CLR,
+ DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true));
+ dev_warn(&mcde_dev->dev, "NO_TE DSI%d\n", i);
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPED);
+ }
+
+ irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED);
+ if (irq_status) {
+ /* DSI TE polling answer received */
+ dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(true));
+
+ /* Reset TE watchdog timer */
+ if (chnl->port.sync_src == MCDE_SYNCSRC_TE_POLLING)
+ dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT);
+ }
+}
+
+static irqreturn_t mcde_irq_handler(int irq, void *dev)
+{
+ int i;
+ u32 irq_status;
+
+ irq_status = mcde_rreg(MCDE_MISCHNL);
+ if (irq_status) {
+ dev_err(&mcde_dev->dev, "chnl error=%.8x\n", irq_status);
+ mcde_wreg(MCDE_RISCHNL, irq_status);
+ }
+ irq_status = mcde_rreg(MCDE_MISERR);
+ if (irq_status) {
+ dev_err(&mcde_dev->dev, "error=%.8x\n", irq_status);
+ mcde_wreg(MCDE_RISERR, irq_status);
+ }
+
+ /* Handle channel irqs */
+ irq_status = mcde_rreg(MCDE_RISPP);
+ if (irq_status & MCDE_RISPP_VCMPARIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_A]);
+ if (irq_status & MCDE_RISPP_VCMPBRIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_B]);
+ if (irq_status & MCDE_RISPP_VCMPC0RIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_C0]);
+ if (irq_status & MCDE_RISPP_VCMPC1RIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_C1]);
+ mcde_wreg(MCDE_RISPP, irq_status);
+
+ for (i = 0; i < num_dsilinks; i++) {
+ struct mcde_chnl_state *chnl_from_dsi;
+
+ chnl_from_dsi = find_channel_by_dsilink(i);
+
+ if (chnl_from_dsi == NULL)
+ continue;
+
+ handle_dsi_irq(chnl_from_dsi, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Transitions allowed: WAIT_TE -> UPDATE -> STOPPING */
+static int set_channel_state_atomic(struct mcde_chnl_state *chnl,
+ enum chnl_state state)
+{
+ enum chnl_state chnl_state = chnl->state;
+
+ dev_dbg(&mcde_dev->dev, "Channel state change"
+ " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl_state, state);
+
+ if ((chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_WAIT_TE) ||
+ (chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_RUNNING) ||
+ (chnl_state == CHNLSTATE_WAIT_TE && state == CHNLSTATE_RUNNING) ||
+ (chnl_state == CHNLSTATE_RUNNING && state == CHNLSTATE_STOPPING)) {
+ /* Set wait TE, running, or stopping state */
+ chnl->state = state;
+ return 0;
+ } else if ((chnl_state == CHNLSTATE_STOPPING &&
+ state == CHNLSTATE_STOPPED) ||
+ (chnl_state == CHNLSTATE_WAIT_TE &&
+ state == CHNLSTATE_STOPPED)) {
+ /* Set stopped state */
+ chnl->state = state;
+ wake_up_all(&chnl->state_waitq);
+ return 0;
+ } else if (state == CHNLSTATE_IDLE) {
+ /* Set idle state */
+ WARN_ON_ONCE(chnl_state != CHNLSTATE_DSI_READ &&
+ chnl_state != CHNLSTATE_DSI_WRITE &&
+ chnl_state != CHNLSTATE_SUSPEND);
+ chnl->state = state;
+ wake_up_all(&chnl->state_waitq);
+ return 0;
+ } else {
+ /* Invalid atomic state transition */
+ dev_warn(&mcde_dev->dev, "Channel state change error (chnl=%d,"
+ " old=%d, new=%d)\n", chnl->id, chnl_state, state);
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+}
+
+/* LOCKING: mcde_hw_lock */
+static int set_channel_state_sync(struct mcde_chnl_state *chnl,
+ enum chnl_state state)
+{
+ int ret = 0;
+ enum chnl_state chnl_state = chnl->state;
+
+ dev_dbg(&mcde_dev->dev, "Channel state change"
+ " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl->state, state);
+
+ /* No change */
+ if (chnl_state == state)
+ return 0;
+
+ /* Wait for IDLE before changing state */
+ if (chnl_state != CHNLSTATE_IDLE) {
+ ret = wait_event_timeout(chnl->state_waitq,
+ /* STOPPED -> IDLE is manual, so wait for both */
+ chnl->state == CHNLSTATE_STOPPED ||
+ chnl->state == CHNLSTATE_IDLE,
+ msecs_to_jiffies(CHNL_TIMEOUT));
+ if (WARN_ON_ONCE(!ret))
+ dev_warn(&mcde_dev->dev, "Wait for channel timeout "
+ "(chnl=%d, curr=%d, new=%d)\n",
+ chnl->id, chnl->state, state);
+ chnl_state = chnl->state;
+ }
+
+ /* Do manual transition from STOPPED to IDLE */
+ if (chnl_state == CHNLSTATE_STOPPED)
+ wait_for_flow_disabled(chnl);
+
+ /* State is IDLE, do transition to new state */
+ chnl->state = state;
+
+ return ret;
+}
+
+static int wait_for_vcmp(struct mcde_chnl_state *chnl)
+{
+ u64 vcmp = atomic_read(&chnl->vcmp_cnt) + 1;
+ int ret = wait_event_timeout(chnl->vcmp_waitq,
+ atomic_read(&chnl->vcmp_cnt) >= vcmp,
+ msecs_to_jiffies(CHNL_TIMEOUT));
+ return ret;
+}
+
+static void get_vid_operating_mode(const struct mcde_port *port,
+ bool *burst_mode, bool *sync_is_pulse, bool *tvg_enable)
+{
+ switch (port->phy.dsi.vid_mode) {
+ case NON_BURST_MODE_WITH_SYNC_EVENT:
+ *burst_mode = false;
+ *sync_is_pulse = false;
+ *tvg_enable = false;
+ break;
+ case NON_BURST_MODE_WITH_SYNC_EVENT_TVG_ENABLED:
+ *burst_mode = false;
+ *sync_is_pulse = false;
+ *tvg_enable = true;
+ break;
+ case BURST_MODE_WITH_SYNC_EVENT:
+ *burst_mode = true;
+ *sync_is_pulse = false;
+ *tvg_enable = false;
+ break;
+ case BURST_MODE_WITH_SYNC_PULSE:
+ *burst_mode = true;
+ *sync_is_pulse = true;
+ *tvg_enable = false;
+ break;
+ default:
+ dev_err(&mcde_dev->dev, "Unsupported video mode");
+ break;
+ }
+}
+
+static void update_vid_static_registers(const struct mcde_port *port)
+{
+ u8 link = port->link;
+ bool burst_mode, sync_is_pulse, tvg_enable;
+
+ get_vid_operating_mode(port, &burst_mode, &sync_is_pulse, &tvg_enable);
+
+ /* burst mode or non-burst mode */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, BURST_MODE, burst_mode);
+
+ /* sync is pulse or event */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, sync_is_pulse);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, sync_is_pulse);
+
+ /* disable video stream when using TVG */
+ if (tvg_enable) {
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, IF1_EN, false);
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, IF2_EN, false);
+ }
+
+ /*
+ * behavior during blanking time
+ * 00: NULL packet 1x:LP 01:blanking-packet
+ */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, 1);
+
+ /*
+ * behavior during eol
+ * 00: NULL packet 1x:LP 01:blanking-packet
+ */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, 2);
+
+ /* time to perform LP->HS on D-PHY */
+ dsi_wfld(link, DSI_VID_DPHY_TIME, REG_WAKEUP_TIME,
+ port->phy.dsi.vid_wakeup_time);
+
+ /*
+ * video stream starts on VSYNC packet
+ * and stops at the end of a frame
+ */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, VID_ID, port->phy.dsi.virt_id);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, START_MODE, 0);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, STOP_MODE, 0);
+
+ /* 1: if1 in video mode, 0: if1 in command mode */
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, 1);
+
+ /* 1: enables the link, 0: disables the link */
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, VID_EN, 1);
+}
+
+static int update_channel_static_registers(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+
+ switch (chnl->fifo) {
+ case MCDE_FIFO_A:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_A));
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CTRLA, FORMTYPE,
+ MCDE_CTRLA_FORMTYPE_DPITV);
+ mcde_wfld(MCDE_CTRLA, FORMID, port->link);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ mcde_wfld(MCDE_CTRLA, FORMTYPE,
+ MCDE_CTRLA_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLA, FORMID,
+ get_dsi_formatter_id(port));
+ }
+ break;
+ case MCDE_FIFO_B:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_B));
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CTRLB, FORMTYPE,
+ MCDE_CTRLB_FORMTYPE_DPITV);
+ mcde_wfld(MCDE_CTRLB, FORMID, port->link);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ mcde_wfld(MCDE_CTRLB, FORMTYPE,
+ MCDE_CTRLB_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLB, FORMID,
+ get_dsi_formatter_id(port));
+ }
+
+ break;
+ case MCDE_FIFO_C0:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_C0));
+ if (port->type == MCDE_PORTTYPE_DPI)
+ return -EINVAL;
+ mcde_wfld(MCDE_CTRLC0, FORMTYPE,
+ MCDE_CTRLC0_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLC0, FORMID, get_dsi_formatter_id(port));
+ break;
+ case MCDE_FIFO_C1:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_C1));
+ if (port->type == MCDE_PORTTYPE_DPI)
+ return -EINVAL;
+ mcde_wfld(MCDE_CTRLC1, FORMTYPE,
+ MCDE_CTRLC1_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLC1, FORMID, get_dsi_formatter_id(port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Formatter */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ int i = 0;
+ u8 idx;
+ u8 lnk = port->link;
+
+ idx = get_dsi_formatter_id(port);
+
+ if (dsi_link_enable(chnl))
+ goto failed_to_enable_link;
+
+ if (port->sync_src == MCDE_SYNCSRC_TE_POLLING) {
+ /* Enable DSI TE polling */
+ dsi_te_poll_req(chnl);
+
+ /* Set timer to detect non TE answer */
+ dsi_te_poll_set_timer(chnl,
+ DSI_TE_NO_ANSWER_TIMEOUT_INIT);
+ } else {
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true);
+ }
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN,
+ port->phy.dsi.host_eot_gen);
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN,
+ port->phy.dsi.data_lanes_swap);
+
+ dsi_wreg(lnk, DSI_MCTL_DPHY_STATIC,
+ DSI_MCTL_DPHY_STATIC_UI_X4(port->phy.dsi.ui));
+ dsi_wreg(lnk, DSI_DPHY_LANES_TRIM,
+ DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(0_90));
+ dsi_wreg(lnk, DSI_MCTL_DPHY_TIMEOUT,
+ DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(0xf) |
+ DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(0x3fff) |
+ DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(0x3fff));
+ dsi_wreg(lnk, DSI_MCTL_MAIN_PHY_CTL,
+ DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(0xf) |
+ DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(
+ port->phy.dsi.num_data_lanes >= 2) |
+ DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(
+ port->phy.dsi.clk_cont));
+ /* TODO: make enum */
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_MODE, false);
+ /* TODO: make enum */
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_PRI, port->ifc == 1);
+ dsi_wreg(lnk, DSI_MCTL_MAIN_EN,
+ DSI_MCTL_MAIN_EN_PLL_START(true) |
+ DSI_MCTL_MAIN_EN_CKLANE_EN(true) |
+ DSI_MCTL_MAIN_EN_DAT1_EN(true) |
+ DSI_MCTL_MAIN_EN_DAT2_EN(port->phy.dsi.num_data_lanes
+ == 2) |
+ DSI_MCTL_MAIN_EN_IF1_EN(port->ifc == 0) |
+ DSI_MCTL_MAIN_EN_IF2_EN(port->ifc == 1));
+ while (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, CLKLANE_READY) == 0 ||
+ dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT1_READY) == 0 ||
+ (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT2_READY) == 0 &&
+ port->phy.dsi.num_data_lanes > 1)) {
+ mdelay(1);
+ if (i++ == 10) {
+ dev_warn(&mcde_dev->dev,
+ "DSI lane not ready (link=%d)!\n", lnk);
+ goto dsi_link_error;
+ }
+ }
+
+ mcde_wreg(MCDE_DSIVID0CONF0 +
+ idx * MCDE_DSIVID0CONF0_GROUPOFFSET,
+ MCDE_DSIVID0CONF0_BLANKING(0) |
+ MCDE_DSIVID0CONF0_VID_MODE(
+ port->mode == MCDE_PORTMODE_VID) |
+ MCDE_DSIVID0CONF0_CMD8(true) |
+ MCDE_DSIVID0CONF0_BIT_SWAP(false) |
+ MCDE_DSIVID0CONF0_BYTE_SWAP(false) |
+ MCDE_DSIVID0CONF0_DCSVID_NOTGEN(true));
+
+ if (port->mode == MCDE_PORTMODE_VID) {
+ update_vid_static_registers(port);
+ } else {
+ if (port->ifc == 0)
+ dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF1_ID,
+ port->phy.dsi.virt_id);
+ else if (port->ifc == 1)
+ dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF2_ID,
+ port->phy.dsi.virt_id);
+ }
+ }
+
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ if (port->phy.dpi.lcd_freq != clk_round_rate(chnl->clk_dpi,
+ port->phy.dpi.lcd_freq))
+ dev_warn(&mcde_dev->dev, "Could not set lcd freq"
+ " to %d\n", port->phy.dpi.lcd_freq);
+ WARN_ON_ONCE(clk_set_rate(chnl->clk_dpi,
+ port->phy.dpi.lcd_freq));
+ WARN_ON_ONCE(clk_enable(chnl->clk_dpi));
+ }
+
+ mcde_wfld(MCDE_CR, MCDEEN, true);
+ chnl->formatter_updated = true;
+
+ dev_vdbg(&mcde_dev->dev, "Static registers setup, chnl=%d\n", chnl->id);
+
+ return 0;
+dsi_link_error:
+ dsi_link_disable(chnl, true);
+failed_to_enable_link:
+ return -EINVAL;
+}
+
+void mcde_chnl_col_convert_apply(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->transform != transform) {
+
+ chnl->col_regs.y_red = transform->matrix[0][0];
+ chnl->col_regs.y_green = transform->matrix[0][1];
+ chnl->col_regs.y_blue = transform->matrix[0][2];
+ chnl->col_regs.cb_red = transform->matrix[1][0];
+ chnl->col_regs.cb_green = transform->matrix[1][1];
+ chnl->col_regs.cb_blue = transform->matrix[1][2];
+ chnl->col_regs.cr_red = transform->matrix[2][0];
+ chnl->col_regs.cr_green = transform->matrix[2][1];
+ chnl->col_regs.cr_blue = transform->matrix[2][2];
+ chnl->col_regs.off_y = transform->offset[0];
+ chnl->col_regs.off_cb = transform->offset[1];
+ chnl->col_regs.off_cr = transform->offset[2];
+ chnl->col_regs.dirty = true;
+
+ chnl->transform = transform;
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+static void chnl_ovly_pixel_format_apply(struct mcde_chnl_state *chnl,
+ struct mcde_ovly_state *ovly)
+{
+ struct mcde_port *port = &chnl->port;
+ struct ovly_regs *regs = &ovly->regs;
+
+ /* Note: YUV -> YUV: blending YUV overlays will not make sense. */
+ static struct mcde_col_transform crycb_2_ycbcr = {
+ /* Note that in MCDE YUV 422 pixels come as VYU pixels */
+ .matrix = {
+ {0x0000, 0x0100, 0x0000},
+ {0x0000, 0x0000, 0x0100},
+ {0x0100, 0x0000, 0x0000},
+ },
+ .offset = {0, 0, 0},
+ };
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ if (port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422) {
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) {
+ /* standard case: DSI: RGB -> RGB */
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ } else {
+ /* DSI: YUV -> RGB */
+ /* TODO change matrix */
+ regs->col_conv =
+ MCDE_OVL0CR_COLCCTRL_ENABLED_SAT;
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->ycbcr_2_rgb);
+ }
+ } else {
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422)
+ /* DSI: RGB -> YUV */
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->rgb_2_ycbcr);
+ else
+ /* DSI: YUV -> YUV */
+ mcde_chnl_col_convert_apply(chnl,
+ &crycb_2_ycbcr);
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI && port->phy.dpi.tv_mode) {
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT;
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422)
+ mcde_chnl_col_convert_apply(chnl, &chnl->rgb_2_ycbcr);
+ else
+ mcde_chnl_col_convert_apply(chnl, &crycb_2_ycbcr);
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ /* Note: YUV is not support port pixel format for DPI */
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) {
+ /* standard case: DPI: RGB -> RGB */
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ } else {
+ /* DPI: YUV -> RGB */
+ regs->col_conv =
+ MCDE_OVL0CR_COLCCTRL_ENABLED_SAT;
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->ycbcr_2_rgb);
+ }
+ }
+}
+
+/* REVIEW: Make update_* an mcde_rectangle? */
+static void update_overlay_registers(u8 idx, struct ovly_regs *regs,
+ struct mcde_port *port, enum mcde_fifo fifo,
+ u16 update_x, u16 update_y, u16 update_w,
+ u16 update_h, s16 stride, bool interlaced,
+ enum mcde_display_rotation rotation)
+{
+ /* TODO: fix clipping for small overlay */
+ u32 lmrgn = (regs->cropx + update_x) * regs->bits_per_pixel;
+ u32 tmrgn = (regs->cropy + update_y) * stride;
+ u32 ppl = regs->ppl - update_x;
+ u32 lpf = regs->lpf - update_y;
+ s32 ljinc = stride;
+ u32 pixelfetchwtrmrklevel;
+ u8 nr_of_bufs = 1;
+ u32 sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+
+ if (rotation == MCDE_DISPLAY_ROT_180_CCW) {
+ ljinc = -ljinc;
+ tmrgn += stride * (regs->lpf - 1) / 8;
+ }
+
+ /*
+ * Preferably most of this is done in some apply function instead of for
+ * every update. However lpf has a dependency on update_y.
+ */
+ if (interlaced && port->type == MCDE_PORTTYPE_DSI) {
+ nr_of_bufs = 2;
+ lpf = lpf / 2;
+ ljinc *= 2;
+ }
+
+ if ((fifo == MCDE_FIFO_A || fifo == MCDE_FIFO_B) &&
+ regs->ppl >= SCREEN_PPL_HIGH)
+ pixelfetchwtrmrklevel = input_fifo_size * 2;
+ else
+ pixelfetchwtrmrklevel = input_fifo_size / 2;
+
+ if (port->update_auto_trig && port->type == MCDE_PORTTYPE_DSI) {
+ switch (port->sync_src) {
+ case MCDE_SYNCSRC_OFF:
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+ break;
+ case MCDE_SYNCSRC_TE0:
+ case MCDE_SYNCSRC_TE1:
+ case MCDE_SYNCSRC_TE_POLLING:
+ default:
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE;
+ break;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI)
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+
+ mcde_wreg(MCDE_EXTSRC0CONF + idx * MCDE_EXTSRC0CONF_GROUPOFFSET,
+ MCDE_EXTSRC0CONF_BUF_ID(0) |
+ MCDE_EXTSRC0CONF_BUF_NB(nr_of_bufs) |
+ MCDE_EXTSRC0CONF_PRI_OVLID(idx) |
+ MCDE_EXTSRC0CONF_BPP(regs->bpp) |
+ MCDE_EXTSRC0CONF_BGR(regs->bgr) |
+ MCDE_EXTSRC0CONF_BEBO(regs->bebo) |
+ MCDE_EXTSRC0CONF_BEPO(false));
+ mcde_wreg(MCDE_EXTSRC0CR + idx * MCDE_EXTSRC0CR_GROUPOFFSET,
+ MCDE_EXTSRC0CR_SEL_MOD(sel_mod) |
+ MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(PRIMARY) |
+ MCDE_EXTSRC0CR_FS_DIV_DISABLE(false) |
+ MCDE_EXTSRC0CR_FORCE_FS_DIV(false));
+ mcde_wreg(MCDE_OVL0CR + idx * MCDE_OVL0CR_GROUPOFFSET,
+ MCDE_OVL0CR_OVLEN(regs->enabled) |
+ MCDE_OVL0CR_COLCCTRL(regs->col_conv) |
+ MCDE_OVL0CR_CKEYGEN(false) |
+ MCDE_OVL0CR_ALPHAPMEN(false) |
+ MCDE_OVL0CR_OVLF(false) |
+ MCDE_OVL0CR_OVLR(false) |
+ MCDE_OVL0CR_OVLB(false) |
+ MCDE_OVL0CR_FETCH_ROPC(0) |
+ MCDE_OVL0CR_STBPRIO(0) |
+ MCDE_OVL0CR_BURSTSIZE_ENUM(HW_8W) |
+ /* TODO: enum, get from ovly */
+ MCDE_OVL0CR_MAXOUTSTANDING_ENUM(8_REQ) |
+ /* TODO: _HW_8W, calculate? */
+ MCDE_OVL0CR_ROTBURSTSIZE_ENUM(HW_8W));
+ mcde_wreg(MCDE_OVL0CONF + idx * MCDE_OVL0CONF_GROUPOFFSET,
+ MCDE_OVL0CONF_PPL(ppl) |
+ MCDE_OVL0CONF_EXTSRC_ID(idx) |
+ MCDE_OVL0CONF_LPF(lpf));
+ mcde_wreg(MCDE_OVL0CONF2 + idx * MCDE_OVL0CONF2_GROUPOFFSET,
+ MCDE_OVL0CONF2_BP(regs->alpha_source) |
+ MCDE_OVL0CONF2_ALPHAVALUE(regs->alpha_value) |
+ MCDE_OVL0CONF2_OPQ(regs->opq) |
+ MCDE_OVL0CONF2_PIXOFF(lmrgn & 63) |
+ MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(
+ pixelfetchwtrmrklevel));
+ mcde_wreg(MCDE_OVL0LJINC + idx * MCDE_OVL0LJINC_GROUPOFFSET,
+ ljinc);
+ mcde_wreg(MCDE_OVL0CROP + idx * MCDE_OVL0CROP_GROUPOFFSET,
+ MCDE_OVL0CROP_TMRGN(tmrgn) |
+ MCDE_OVL0CROP_LMRGN(lmrgn >> 6));
+ regs->dirty = false;
+
+ dev_vdbg(&mcde_dev->dev, "Overlay registers setup, idx=%d\n", idx);
+}
+
+static void update_overlay_registers_on_the_fly(u8 idx, struct ovly_regs *regs)
+{
+ mcde_wreg(MCDE_OVL0COMP + idx * MCDE_OVL0COMP_GROUPOFFSET,
+ MCDE_OVL0COMP_XPOS(regs->xpos) |
+ MCDE_OVL0COMP_CH_ID(regs->ch_id) |
+ MCDE_OVL0COMP_YPOS(regs->ypos) |
+ MCDE_OVL0COMP_Z(regs->z));
+
+ mcde_wreg(MCDE_EXTSRC0A0 + idx * MCDE_EXTSRC0A0_GROUPOFFSET,
+ regs->baseaddress0);
+ mcde_wreg(MCDE_EXTSRC0A1 + idx * MCDE_EXTSRC0A1_GROUPOFFSET,
+ regs->baseaddress1);
+ regs->dirty_buf = false;
+}
+
+static void do_softwaretrig(struct mcde_chnl_state *chnl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ enable_flow(chnl);
+ mcde_wreg(MCDE_CHNL0SYNCHSW +
+ chnl->id * MCDE_CHNL0SYNCHSW_GROUPOFFSET,
+ MCDE_CHNL0SYNCHSW_SW_TRIG(true));
+ disable_flow(chnl);
+
+ local_irq_restore(flags);
+
+ dev_vdbg(&mcde_dev->dev, "Software TRIG on channel %d\n", chnl->id);
+}
+
+static void disable_flow(struct mcde_chnl_state *chnl)
+{
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(chnl->state != CHNLSTATE_RUNNING))
+ return;
+
+ local_irq_save(flags);
+
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ mcde_wfld(MCDE_CRA0, FLOEN, false);
+ break;
+ case MCDE_CHNL_B:
+ mcde_wfld(MCDE_CRB0, FLOEN, false);
+ break;
+ case MCDE_CHNL_C0:
+ mcde_wfld(MCDE_CRC, C1EN, false);
+ break;
+ case MCDE_CHNL_C1:
+ mcde_wfld(MCDE_CRC, C2EN, false);
+ break;
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPING);
+
+ local_irq_restore(flags);
+}
+
+static void stop_channel(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+ bool dpi_lcd_mode;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->state != CHNLSTATE_RUNNING)
+ return;
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS,
+ false);
+ if (port->sync_src == MCDE_SYNCSRC_TE_POLLING)
+ del_timer(&chnl->dsi_te_timer);
+ }
+
+ disable_flow(chnl);
+ /*
+ * Needs to manually trigger VCOMP after the channel is
+ * disabled. For all channels using video mode
+ * except for dpi lcd.
+ */
+ dpi_lcd_mode = (port->type == MCDE_PORTTYPE_DPI &&
+ !chnl->port.phy.dpi.tv_mode);
+
+ if (chnl->port.update_auto_trig && !dpi_lcd_mode)
+ mcde_wreg(MCDE_SISPP, 1 << chnl->id);
+}
+
+static void wait_for_flow_disabled(struct mcde_chnl_state *chnl)
+{
+ int i = 0;
+
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRA0, FLOEN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (A) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_B:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRB0, FLOEN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (B) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_C0:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRC, C1EN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (C1) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_C1:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRC, C2EN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (C2) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ }
+ if (i == MCDE_FLOWEN_MAX_TRIAL)
+ dev_err(&mcde_dev->dev, "%s: channel %d timeout\n",
+ __func__, chnl->id);
+}
+
+static void enable_flow(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (port->type == MCDE_PORTTYPE_DSI)
+ dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS,
+ port->phy.dsi.clk_cont);
+
+ /*
+ * When ROTEN is set, the FLOEN bit will also be set but
+ * the flow has to be started anyway.
+ */
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRA0, FLOEN));
+ mcde_wfld(MCDE_CRA0, ROTEN, chnl->regs.roten);
+ mcde_wfld(MCDE_CRA0, FLOEN, true);
+ break;
+ case MCDE_CHNL_B:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRB0, FLOEN));
+ mcde_wfld(MCDE_CRB0, ROTEN, chnl->regs.roten);
+ mcde_wfld(MCDE_CRB0, FLOEN, true);
+ break;
+ case MCDE_CHNL_C0:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C1EN));
+ mcde_wfld(MCDE_CRC, C1EN, true);
+ break;
+ case MCDE_CHNL_C1:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C2EN));
+ mcde_wfld(MCDE_CRC, C2EN, true);
+ break;
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_RUNNING);
+}
+
+static void work_sleep_function(struct work_struct *ptr)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ if (mcde_trylock(__func__, __LINE__)) {
+ if (mcde_dynamic_power_management)
+ disable_mcde_hw(false, false);
+ mcde_unlock(__func__, __LINE__);
+ }
+}
+
+/* TODO get from register */
+#define MCDE_CLK_FREQ_MHZ 160
+static u32 get_pkt_div(u32 disp_ppl,
+ struct mcde_port *port,
+ enum mcde_fifo fifo)
+{
+ /*
+ * The lines can be split in several packets only on DSI CMD mode.
+ * In DSI VIDEO mode, 1 line = 1 packet.
+ * DPI is like DSI VIDEO (watermark = 1 line).
+ * DPI waits for fifo ready only for the first line of the first frame.
+ * If line is wider than fifo size, one can set watermark
+ * at fifo size, or set it to line size as watermark will be
+ * saturated at fifo size inside MCDE.
+ */
+ switch (port->type) {
+ case MCDE_PORTTYPE_DSI:
+ if (port->mode == MCDE_PORTMODE_CMD)
+ /* Equivalent of ceil(disp_ppl/fifo_size) */
+ return (disp_ppl - 1) / get_output_fifo_size(fifo) + 1;
+ else
+ return 1;
+ break;
+ case MCDE_PORTTYPE_DPI:
+ return 1;
+ break;
+ default:
+ break;
+ }
+ return 1;
+}
+
+static void update_vid_horizontal_blanking(struct mcde_port *port,
+ struct mcde_video_mode *vmode, bool sync_is_pulse, u8 bpp)
+{
+ int hfp, hbp, hsa;
+ u8 link = port->link;
+
+ /*
+ * vmode->hfp, vmode->hbp and vmode->hsw are given in pixels
+ * and must be re-calculated into bytes
+ *
+ * 6 + 2 is HFP header + checksum
+ */
+ hfp = vmode->hfp * bpp - 6 - 2;
+ if (sync_is_pulse) {
+ /*
+ * 6 is HBP header + checksum
+ * 4 is RGB header + checksum
+ */
+ hbp = vmode->hbp * bpp - 4 - 6;
+ /*
+ * 6 is HBP header + checksum
+ * 4 is HSW packet bytes
+ * 4 is RGB header + checksum
+ */
+ hsa = vmode->hsw * bpp - 4 - 4 - 6;
+ } else {
+ /*
+ * 6 is HBP header + checksum
+ * 4 is HSW packet bytes
+ * 4 is RGB header + checksum
+ */
+ hbp = (vmode->hbp + vmode->hsw) * bpp - 4 - 4 - 6;
+ /* HSA is not considered in this mode and set to 0 */
+ hsa = 0;
+ }
+ if (hfp < 0) {
+ hfp = 0;
+ dev_warn(&mcde_dev->dev,
+ "%s: negative calc for hfp, set to 0\n", __func__);
+ }
+ if (hbp < 0) {
+ hbp = 0;
+ dev_warn(&mcde_dev->dev,
+ "%s: negative calc for hbp, set to 0\n", __func__);
+ }
+ if (hsa < 0) {
+ hsa = 0;
+ dev_warn(&mcde_dev->dev,
+ "%s: negative calc for hsa, set to 0\n", __func__);
+ }
+
+ dsi_wfld(link, DSI_VID_HSIZE1, HFP_LENGTH, hfp);
+ dsi_wfld(link, DSI_VID_HSIZE1, HBP_LENGTH, hbp);
+ dsi_wfld(link, DSI_VID_HSIZE1, HSA_LENGTH, hsa);
+}
+
+static void update_vid_frame_parameters(struct mcde_port *port,
+ struct mcde_video_mode *vmode, u8 bpp)
+{
+ u8 link = port->link;
+ bool burst_mode, sync_is_pulse, tvg_enable;
+ u32 hs_byte_clk, pck_len, blkline_pck, line_duration;
+ u32 blkeol_pck, blkeol_duration;
+ u8 pixel_mode;
+ u8 rgb_header;
+
+ get_vid_operating_mode(port, &burst_mode, &sync_is_pulse, &tvg_enable);
+
+ dsi_wfld(link, DSI_VID_VSIZE, VFP_LENGTH, vmode->vfp);
+ dsi_wfld(link, DSI_VID_VSIZE, VBP_LENGTH, vmode->vbp);
+ dsi_wfld(link, DSI_VID_VSIZE, VSA_LENGTH, vmode->vsw);
+ update_vid_horizontal_blanking(port, vmode, sync_is_pulse, bpp);
+
+ dsi_wfld(link, DSI_VID_VSIZE, VACT_LENGTH, vmode->yres);
+ dsi_wfld(link, DSI_VID_HSIZE2, RGB_SIZE, vmode->xres * bpp);
+
+ /*
+ * The rgb_header identifies the pixel stream format,
+ * as described in the MIPI DSI Specification:
+ *
+ * 0x0E: Packed pixel stream, 16-bit RGB, 565 format
+ * 0x1E: Packed pixel stream, 18-bit RGB, 666 format
+ * 0x2E: Loosely Packed pixel stream, 18-bit RGB, 666 format
+ * 0x3E: Packed pixel stream, 24-bit RGB, 888 format
+ */
+ switch (port->pixel_format) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ pixel_mode = 0;
+ rgb_header = 0x0E;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ pixel_mode = 2;
+ rgb_header = 0x2E;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ pixel_mode = 1;
+ rgb_header = 0x1E;
+ break;
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ pixel_mode = 3;
+ rgb_header = 0x3E;
+ break;
+ default:
+ pixel_mode = 3;
+ rgb_header = 0x3E;
+ dev_warn(&mcde_dev->dev,
+ "%s: invalid pixel format %d\n",
+ __func__, port->pixel_format);
+ break;
+ }
+
+ dsi_wfld(link, DSI_VID_MAIN_CTL, VID_PIXEL_MODE, pixel_mode);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, HEADER, rgb_header);
+
+ if (tvg_enable) {
+ /*
+ * with these settings, expect to see 64 pixels wide
+ * red and green vertical stripes on the screen when
+ * tvg_enable = 1
+ */
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, 1);
+
+ dsi_wfld(link, DSI_TVG_CTL, TVG_STRIPE_SIZE, 6);
+ dsi_wfld(link, DSI_TVG_CTL, TVG_MODE, 2);
+ dsi_wfld(link, DSI_TVG_CTL, TVG_STOPMODE, 2);
+ dsi_wfld(link, DSI_TVG_CTL, TVG_RUN, 1);
+
+ dsi_wfld(link, DSI_TVG_IMG_SIZE, TVG_NBLINE, vmode->yres);
+ dsi_wfld(link, DSI_TVG_IMG_SIZE, TVG_LINE_SIZE,
+ vmode->xres * bpp);
+
+ dsi_wfld(link, DSI_TVG_COLOR1, COL1_BLUE, 0);
+ dsi_wfld(link, DSI_TVG_COLOR1, COL1_GREEN, 0);
+ dsi_wfld(link, DSI_TVG_COLOR1, COL1_RED, 0xFF);
+
+ dsi_wfld(link, DSI_TVG_COLOR2, COL2_BLUE, 0);
+ dsi_wfld(link, DSI_TVG_COLOR2, COL2_GREEN, 0xFF);
+ dsi_wfld(link, DSI_TVG_COLOR2, COL2_RED, 0);
+ }
+
+ /*
+ * vid->pixclock is the time between two pixels (in picoseconds)
+ *
+ * hs_byte_clk is the amount of transferred bytes per lane and
+ * second (in MHz)
+ */
+ hs_byte_clk = 1000000 / vmode->pixclock / 8;
+ pck_len = 1000000 * hs_byte_clk / port->refresh_rate /
+ (vmode->vsw + vmode->vbp + vmode->yres + vmode->vfp) *
+ port->phy.dsi.num_data_lanes;
+
+ /*
+ * 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
+ * 4 is short packet for vsync/hsync
+ */
+ if (sync_is_pulse)
+ blkline_pck = pck_len - vmode->hsw - 6;
+ else
+ blkline_pck = pck_len - 4 - 6;
+
+ line_duration = (blkline_pck + 6) / port->phy.dsi.num_data_lanes;
+ blkeol_pck = pck_len -
+ (vmode->hsw + vmode->hbp + vmode->xres + vmode->hfp) * bpp - 6;
+ blkeol_duration = (blkeol_pck + 6) / port->phy.dsi.num_data_lanes;
+
+ if (sync_is_pulse)
+ dsi_wfld(link, DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK,
+ blkline_pck);
+ else
+ dsi_wfld(link, DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK,
+ blkline_pck);
+ dsi_wfld(link, DSI_VID_DPHY_TIME, REG_LINE_DURATION, line_duration);
+ if (burst_mode) {
+ dsi_wfld(link, DSI_VID_BLKSIZE1, BLKEOL_PCK, blkeol_pck);
+ dsi_wfld(link, DSI_VID_PCK_TIME, BLKEOL_DURATION,
+ blkeol_duration);
+ dsi_wfld(link, DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT,
+ blkeol_pck - 6);
+ dsi_wfld(link, DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT,
+ blkeol_pck);
+ }
+ if (sync_is_pulse)
+ dsi_wfld(link, DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT,
+ blkline_pck - 6);
+}
+
+void update_channel_registers(enum mcde_chnl chnl_id, struct chnl_regs *regs,
+ struct mcde_port *port, enum mcde_fifo fifo,
+ struct mcde_video_mode *video_mode)
+{
+ u8 idx = chnl_id;
+ u32 out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ u32 src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ u32 fifo_wtrmrk = 0;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /*
+ * Select appropriate fifo watermark.
+ * Watermark will be saturated at fifo size inside MCDE.
+ */
+ fifo_wtrmrk = video_mode->xres /
+ get_pkt_div(video_mode->xres, port, fifo);
+
+ dev_vdbg(&mcde_dev->dev, "%s fifo_watermark=%d for chnl_id=%d\n",
+ __func__, fifo_wtrmrk, chnl_id);
+
+ switch (chnl_id) {
+ case MCDE_CHNL_A:
+ mcde_wfld(MCDE_CTRLA, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_B:
+ mcde_wfld(MCDE_CTRLB, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_C0:
+ mcde_wfld(MCDE_CTRLC0, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_C1:
+ mcde_wfld(MCDE_CTRLC1, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ default:
+ break;
+ }
+
+ /* Channel */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ if (port->update_auto_trig) {
+ switch (port->sync_src) {
+ case MCDE_SYNCSRC_TE0:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ break;
+ case MCDE_SYNCSRC_OFF:
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ break;
+ case MCDE_SYNCSRC_TE1:
+ default:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE1;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ break;
+ case MCDE_SYNCSRC_TE_POLLING:
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ break;
+ case MCDE_SYNCSRC_FORMATTER:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ break;
+ }
+ } else {
+ if (port->sync_src == MCDE_SYNCSRC_TE0) {
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0;
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ }
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ src_synch = port->update_auto_trig ?
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE :
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ }
+
+ mcde_wreg(MCDE_CHNL0CONF + idx * MCDE_CHNL0CONF_GROUPOFFSET,
+ MCDE_CHNL0CONF_PPL(regs->ppl-1) |
+ MCDE_CHNL0CONF_LPF(regs->lpf-1));
+ mcde_wreg(MCDE_CHNL0STAT + idx * MCDE_CHNL0STAT_GROUPOFFSET,
+ MCDE_CHNL0STAT_CHNLBLBCKGND_EN(false) |
+ MCDE_CHNL0STAT_CHNLRD(true));
+ mcde_wreg(MCDE_CHNL0SYNCHMOD +
+ idx * MCDE_CHNL0SYNCHMOD_GROUPOFFSET,
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH(src_synch) |
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(out_synch_src));
+ mcde_wreg(MCDE_CHNL0BCKGNDCOL + idx * MCDE_CHNL0BCKGNDCOL_GROUPOFFSET,
+ MCDE_CHNL0BCKGNDCOL_B(0) |
+ MCDE_CHNL0BCKGNDCOL_G(0) |
+ MCDE_CHNL0BCKGNDCOL_R(0));
+
+ if (chnl_id == MCDE_CHNL_A || chnl_id == MCDE_CHNL_B) {
+ u32 mcde_crx1;
+ u32 mcde_pal0x;
+ u32 mcde_pal1x;
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_crx1 = MCDE_CRA1;
+ mcde_pal0x = MCDE_PAL0A;
+ mcde_pal1x = MCDE_PAL1A;
+ mcde_wfld(MCDE_CRA0, PALEN, regs->palette_enable);
+ } else {
+ mcde_crx1 = MCDE_CRB1;
+ mcde_pal0x = MCDE_PAL0B;
+ mcde_pal1x = MCDE_PAL1B;
+ mcde_wfld(MCDE_CRB0, PALEN, regs->palette_enable);
+ }
+ mcde_wreg(mcde_crx1,
+ MCDE_CRA1_PCD(regs->pcd) |
+ MCDE_CRA1_CLKSEL(regs->clksel) |
+ MCDE_CRA1_CDWIN(regs->cdwin) |
+ MCDE_CRA1_OUTBPP(bpp2outbpp(regs->bpp)) |
+ MCDE_CRA1_BCD(regs->bcd) |
+ MCDE_CRA1_CLKTYPE(regs->internal_clk));
+ if (regs->palette_enable) {
+ int i;
+ for (i = 0; i < 256; i++) {
+ mcde_wreg(mcde_pal0x,
+ MCDE_PAL0A_GREEN(regs->map_g(i)) |
+ MCDE_PAL0A_BLUE(regs->map_b(i)));
+ mcde_wreg(mcde_pal1x,
+ MCDE_PAL1A_RED(regs->map_r(i)));
+ }
+ }
+ }
+
+ /* Formatter */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ u8 fidx;
+ u32 temp, packet;
+ /* pkt_div is used to avoid underflow in output fifo for
+ * large packets */
+ u32 pkt_div = 1;
+ u32 dsi_delay0 = 0;
+ u32 screen_ppl, screen_lpf;
+
+ fidx = get_dsi_formatter_id(port);
+
+ screen_ppl = video_mode->xres;
+ screen_lpf = video_mode->yres;
+
+ pkt_div = get_pkt_div(screen_ppl, port, fifo);
+
+ if (video_mode->interlaced)
+ screen_lpf /= 2;
+
+ /* pkt_delay_progressive = pixelclock * htot /
+ * (1E12 / 160E6) / pkt_div */
+ dsi_delay0 = (video_mode->pixclock) *
+ (video_mode->xres + video_mode->hbp +
+ video_mode->hfp) /
+ (100000000 / ((mcde_clk_rate / 10000))) / pkt_div;
+
+ if ((screen_ppl == SCREEN_PPL_CEA2) &&
+ (screen_lpf == SCREEN_LPF_CEA2))
+ dsi_delay0 += DSI_DELAY0_CEA2_ADD;
+
+ temp = mcde_rreg(MCDE_DSIVID0CONF0 +
+ fidx * MCDE_DSIVID0CONF0_GROUPOFFSET);
+ mcde_wreg(MCDE_DSIVID0CONF0 +
+ fidx * MCDE_DSIVID0CONF0_GROUPOFFSET,
+ (temp & ~MCDE_DSIVID0CONF0_PACKING_MASK) |
+ MCDE_DSIVID0CONF0_PACKING(regs->dsipacking));
+ /* no extra command byte in video mode */
+ if (port->mode == MCDE_PORTMODE_CMD)
+ packet = ((screen_ppl / pkt_div * regs->bpp) >> 3) + 1;
+ else
+ packet = ((screen_ppl / pkt_div * regs->bpp) >> 3);
+ mcde_wreg(MCDE_DSIVID0FRAME +
+ fidx * MCDE_DSIVID0FRAME_GROUPOFFSET,
+ MCDE_DSIVID0FRAME_FRAME(packet * pkt_div * screen_lpf));
+ mcde_wreg(MCDE_DSIVID0PKT + fidx * MCDE_DSIVID0PKT_GROUPOFFSET,
+ MCDE_DSIVID0PKT_PACKET(packet));
+ mcde_wreg(MCDE_DSIVID0SYNC +
+ fidx * MCDE_DSIVID0SYNC_GROUPOFFSET,
+ MCDE_DSIVID0SYNC_SW(0) |
+ MCDE_DSIVID0SYNC_DMA(0));
+ mcde_wreg(MCDE_DSIVID0CMDW +
+ fidx * MCDE_DSIVID0CMDW_GROUPOFFSET,
+ MCDE_DSIVID0CMDW_CMDW_START(DCS_CMD_WRITE_START) |
+ MCDE_DSIVID0CMDW_CMDW_CONTINUE(DCS_CMD_WRITE_CONTINUE));
+ mcde_wreg(MCDE_DSIVID0DELAY0 +
+ fidx * MCDE_DSIVID0DELAY0_GROUPOFFSET,
+ MCDE_DSIVID0DELAY0_INTPKTDEL(dsi_delay0));
+ mcde_wreg(MCDE_DSIVID0DELAY1 +
+ fidx * MCDE_DSIVID0DELAY1_GROUPOFFSET,
+ MCDE_DSIVID0DELAY1_TEREQDEL(0) |
+ MCDE_DSIVID0DELAY1_FRAMESTARTDEL(0));
+
+ if (port->mode == MCDE_PORTMODE_VID)
+ update_vid_frame_parameters(port, video_mode,
+ regs->bpp / 8);
+ } else if (port->type == MCDE_PORTTYPE_DPI &&
+ !port->phy.dpi.tv_mode) {
+ /* DPI LCD Mode */
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_wreg(MCDE_SYNCHCONFA,
+ MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFA_HWREQVCNT(
+ video_mode->yres - 1) |
+ MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFA_SWINTVCNT(
+ video_mode->yres - 1));
+ } else if (chnl_id == MCDE_CHNL_B) {
+ mcde_wreg(MCDE_SYNCHCONFB,
+ MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFB_HWREQVCNT(
+ video_mode->yres - 1) |
+ MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFB_SWINTVCNT(
+ video_mode->yres - 1));
+ }
+ }
+
+ if (regs->roten) {
+ mcde_wreg(MCDE_ROTADD0A + chnl_id * MCDE_ROTADD0A_GROUPOFFSET,
+ regs->rotbuf1);
+ mcde_wreg(MCDE_ROTADD1A + chnl_id * MCDE_ROTADD1A_GROUPOFFSET,
+ regs->rotbuf2);
+ mcde_wreg(MCDE_ROTACONF + chnl_id * MCDE_ROTACONF_GROUPOFFSET,
+ MCDE_ROTACONF_ROTBURSTSIZE_ENUM(HW_8W) |
+ MCDE_ROTACONF_ROTDIR(regs->rotdir) |
+ MCDE_ROTACONF_STRIP_WIDTH_ENUM(16PIX) |
+ MCDE_ROTACONF_RD_MAXOUT_ENUM(4_REQ) |
+ MCDE_ROTACONF_WR_MAXOUT_ENUM(8_REQ));
+ }
+
+ /* Blending */
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_wfld(MCDE_CRA0, BLENDEN, regs->blend_en);
+ mcde_wfld(MCDE_CRA0, BLENDCTRL, regs->blend_ctrl);
+ mcde_wfld(MCDE_CRA0, ALPHABLEND, regs->alpha_blend);
+ } else if (chnl_id == MCDE_CHNL_B) {
+ mcde_wfld(MCDE_CRB0, BLENDEN, regs->blend_en);
+ mcde_wfld(MCDE_CRB0, BLENDCTRL, regs->blend_ctrl);
+ mcde_wfld(MCDE_CRB0, ALPHABLEND, regs->alpha_blend);
+ }
+
+ dev_vdbg(&mcde_dev->dev, "Channel registers setup, chnl=%d\n", chnl_id);
+ regs->dirty = false;
+}
+
+static int enable_mcde_hw(void)
+{
+ int ret;
+ int i;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ cancel_delayed_work(&hw_timeout_work);
+ schedule_delayed_work(&hw_timeout_work,
+ msecs_to_jiffies(MCDE_SLEEP_WATCHDOG));
+
+ for (i = 0; i < num_channels; i++) {
+ struct mcde_chnl_state *chnl = &channels[i];
+ if (chnl->state == CHNLSTATE_SUSPEND) {
+ /* Mark all registers as dirty */
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+ chnl->ovly0->regs.dirty = true;
+ chnl->ovly0->regs.dirty_buf = true;
+ if (chnl->ovly1) {
+ chnl->ovly1->regs.dirty = true;
+ chnl->ovly1->regs.dirty_buf = true;
+ }
+ chnl->regs.dirty = true;
+ chnl->col_regs.dirty = true;
+ chnl->tv_regs.dirty = true;
+ atomic_set(&chnl->vcmp_cnt, 0);
+ }
+ }
+
+ if (mcde_is_enabled) {
+ dev_vdbg(&mcde_dev->dev, "%s - already enabled\n", __func__);
+ return 0;
+ }
+
+ enable_clocks_and_power(mcde_dev);
+
+ ret = request_irq(mcde_irq, mcde_irq_handler, 0, "mcde",
+ &mcde_dev->dev);
+ if (ret) {
+ dev_dbg(&mcde_dev->dev, "Failed to request irq (irq=%d)\n",
+ mcde_irq);
+ cancel_delayed_work(&hw_timeout_work);
+ return -EINVAL;
+ }
+
+ update_mcde_registers();
+
+ dev_vdbg(&mcde_dev->dev, "%s - enable done\n", __func__);
+
+ mcde_is_enabled = true;
+ return 0;
+}
+
+/* DSI */
+static int mcde_dsi_direct_cmd_write(struct mcde_chnl_state *chnl,
+ bool dcs, u8 cmd, u8 *data, int len)
+{
+ int i, ret = 0;
+ u32 wrdat[4] = { 0, 0, 0, 0 };
+ u32 settings;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 counter = DSI_WRITE_CMD_TIMEOUT;
+
+ if (len > MCDE_MAX_DSI_DIRECT_CMD_WRITE ||
+ chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ _mcde_chnl_enable(chnl);
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE);
+
+ if (dcs) {
+ wrdat[0] = cmd;
+ for (i = 1; i <= len; i++)
+ wrdat[i>>2] |= ((u32)data[i-1] << ((i & 3) * 8));
+ } else {
+ /* no explicit cmd byte for generic_write, only params */
+ for (i = 0; i < len; i++)
+ wrdat[i>>2] |= ((u32)data[i] << ((i & 3) * 8));
+ }
+
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(len > 1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(len+1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true);
+ if (dcs) {
+ if (len == 0)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_SHORT_WRITE_0);
+ else if (len == 1)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_SHORT_WRITE_1);
+ else
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_LONG_WRITE);
+ } else {
+ if (len == 0)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_0);
+ else if (len == 1)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_1);
+ else if (len == 2)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_2);
+ else
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_LONG_WRITE);
+ }
+
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, wrdat[0]);
+ if (len > 3)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT1, wrdat[1]);
+ if (len > 7)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT2, wrdat[2]);
+ if (len > 11)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT3, wrdat[3]);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ /* loop will normally run zero or one time until WRITE_COMPLETED */
+ while (!dsi_rfld(link, DSI_DIRECT_CMD_STS, WRITE_COMPLETED)
+ && --counter)
+ cpu_relax();
+
+ if (!counter) {
+ dev_err(&mcde_dev->dev,
+ "%s: DSI write cmd 0x%x timeout on DSI link %u!\n",
+ __func__, cmd, link);
+ ret = -ETIME;
+ } else {
+ /* inform if >100 loops before command completion */
+ if (counter < (DSI_WRITE_CMD_TIMEOUT-DSI_WRITE_CMD_TIMEOUT/10))
+ dev_vdbg(&mcde_dev->dev,
+ "%s: %u loops for DSI command %x completion\n",
+ __func__, (DSI_WRITE_CMD_TIMEOUT - counter),
+ cmd);
+
+ dev_vdbg(&mcde_dev->dev, "DSI Write ok %x error %x\n",
+ dsi_rreg(link, DSI_DIRECT_CMD_STS_FLAG),
+ dsi_rreg(link, DSI_CMD_MODE_STS_FLAG));
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+
+int mcde_dsi_generic_write(struct mcde_chnl_state *chnl, u8* para, int len)
+{
+ return mcde_dsi_direct_cmd_write(chnl, false, 0, para, len);
+}
+
+int mcde_dsi_dcs_write(struct mcde_chnl_state *chnl, u8 cmd, u8* data, int len)
+{
+ return mcde_dsi_direct_cmd_write(chnl, true, cmd, data, len);
+}
+
+int mcde_dsi_dcs_read(struct mcde_chnl_state *chnl,
+ u8 cmd, u32 *data, int *len)
+{
+ int ret = 0;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 settings;
+ bool ok = false;
+ bool error, ack_with_err;
+ u8 nbr_of_retries = DSI_READ_NBR_OF_RETRIES;
+
+ if (*len > MCDE_MAX_DCS_READ || chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ _mcde_chnl_enable(chnl);
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_READ);
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(READ) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_READ);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, cmd);
+
+ do {
+ u8 wait = DSI_READ_TIMEOUT;
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_RD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ while (wait-- && !(error = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ READ_COMPLETED_WITH_ERR)) &&
+ !(ok = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ READ_COMPLETED)))
+ udelay(DSI_READ_DELAY);
+
+ ack_with_err = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ ACKNOWLEDGE_WITH_ERR_RECEIVED);
+ if (ack_with_err)
+ dev_warn(&mcde_dev->dev,
+ "DCS Acknowledge Error Report %.4X\n",
+ dsi_rfld(link, DSI_DIRECT_CMD_STS, ACK_VAL));
+ } while (--nbr_of_retries && ack_with_err);
+
+ if (ok) {
+ int rdsize;
+ u32 rddat;
+
+ rdsize = dsi_rfld(link, DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE);
+ rddat = dsi_rreg(link, DSI_DIRECT_CMD_RDDAT);
+ if (rdsize < *len)
+ dev_warn(&mcde_dev->dev, "DCS incomplete read %d<%d"
+ " (%.8X)\n", rdsize, *len, rddat);
+ *len = min(*len, rdsize);
+ memcpy(data, &rddat, *len);
+ } else {
+ dev_err(&mcde_dev->dev, "DCS read failed, err=%d, sts=%X\n",
+ error, dsi_rreg(link, DSI_DIRECT_CMD_STS));
+ ret = -EIO;
+ }
+
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+
+/*
+ * Set Maximum Return Packet size is a command that specifies the
+ * maximum size of the payload transmitted from peripheral back to
+ * the host processor.
+ *
+ * During power-on or reset sequence, the Maximum Return Packet Size
+ * is set to a default value of one. In order to be able to use
+ * mcde_dsi_dcs_read for reading more than 1 byte at a time, this
+ * parameter should be set by the host processor to the desired value
+ * in the initialization routine before commencing normal operation.
+ */
+int mcde_dsi_set_max_pkt_size(struct mcde_chnl_state *chnl)
+{
+ u32 settings;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+
+ if (chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EIO;
+ }
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE);
+
+ /*
+ * Set Maximum Return Packet Size is a two-byte command packet
+ * that specifies the maximum size of the payload as u16 value.
+ * The order of bytes is: MaxSize LSB, MaxSize MSB
+ */
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ SET_MAX_PKT_SIZE);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, MCDE_MAX_DCS_READ);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return 0;
+}
+
+static void dsi_te_poll_req(struct mcde_chnl_state *chnl)
+{
+ u8 lnk = chnl->port.link;
+ const struct mcde_port *port = &chnl->port;
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, false);
+ if (port->ifc == 0)
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, true);
+ if (port->ifc == 1)
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, true);
+}
+
+static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl,
+ unsigned int timeout)
+{
+ mod_timer(&chnl->dsi_te_timer,
+ jiffies +
+ msecs_to_jiffies(timeout));
+}
+
+static void dsi_te_timer_function(unsigned long arg)
+{
+ struct mcde_chnl_state *chnl;
+ u8 lnk;
+
+ if (arg >= num_channels) {
+ dev_err(&mcde_dev->dev, "%s invalid arg:%ld\n", __func__, arg);
+ return;
+ }
+
+ chnl = &channels[arg];
+
+ if (mcde_is_enabled && chnl->enabled && chnl->formatter_updated) {
+ lnk = chnl->port.link;
+ /* No TE answer; force stop */
+ dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, true);
+ udelay(20);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, false);
+ dev_info(&mcde_dev->dev, "DSI%d force stop\n", lnk);
+ dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT);
+ } else {
+ dev_info(&mcde_dev->dev, "1:DSI force stop\n");
+ }
+}
+
+static void dsi_te_request(struct mcde_chnl_state *chnl)
+{
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 settings;
+
+ dev_vdbg(&mcde_dev->dev, "Request BTA TE, chnl=%d\n",
+ chnl->id);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_WAIT_TE);
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true);
+ dsi_wfld(link, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF);
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(TE_REQ) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_SHORT_WRITE_1);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, DCS_CMD_SET_TEAR_ON);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true));
+ dsi_wfld(link, DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, true);
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR,
+ DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true));
+ dsi_wfld(link, DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, true);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+}
+
+/* MCDE channels */
+static struct mcde_chnl_state *_mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port)
+{
+ int i;
+ struct mcde_chnl_state *chnl = NULL;
+
+ static struct mcde_col_transform ycbcr_2_rgb = {
+ /* Note that in MCDE YUV 422 pixels come as VYU pixels */
+ .matrix = {
+ {0xff30, 0x012a, 0xff9c},
+ {0x0000, 0x012a, 0x0204},
+ {0x0199, 0x012a, 0x0000},
+ },
+ .offset = {0x0088, 0xfeeb, 0xff21},
+ };
+
+ static struct mcde_col_transform rgb_2_ycbcr = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x0010, 0x0080, 0x0080},
+ };
+
+ /* Allocate channel */
+ for (i = 0; i < num_channels; i++) {
+ if (chnl_id == channels[i].id)
+ chnl = &channels[i];
+ }
+ if (!chnl) {
+ dev_dbg(&mcde_dev->dev, "Invalid channel, chnl=%d\n", chnl_id);
+ return ERR_PTR(-EINVAL);
+ }
+ if (chnl->reserved) {
+ dev_dbg(&mcde_dev->dev, "Channel in use, chnl=%d\n", chnl_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ chnl->port = *port;
+ chnl->fifo = fifo;
+ chnl->formatter_updated = false;
+ chnl->ycbcr_2_rgb = ycbcr_2_rgb;
+ chnl->rgb_2_ycbcr = rgb_2_ycbcr;
+
+ chnl->blend_en = true;
+ chnl->blend_ctrl = MCDE_CRA0_BLENDCTRL_SOURCE;
+ chnl->alpha_blend = 0xFF;
+
+ _mcde_chnl_apply(chnl);
+ chnl->reserved = true;
+
+ if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ chnl->clk_dpi = clk_get(&mcde_dev->dev, CLK_DPI);
+ if (chnl->port.phy.dpi.tv_mode)
+ chnl->vcmp_per_field = true;
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ dsi_use_clk_framework) {
+ char dsihs_name[10];
+ char dsilp_name[10];
+
+ sprintf(dsihs_name, "dsihs%d", port->link);
+ sprintf(dsilp_name, "dsilp%d", port->link);
+
+ chnl->clk_dsi_lp = clk_get(&mcde_dev->dev, dsilp_name);
+ chnl->clk_dsi_hs = clk_get(&mcde_dev->dev, dsihs_name);
+ if (port->phy.dsi.lp_freq != clk_round_rate(chnl->clk_dsi_lp,
+ port->phy.dsi.lp_freq))
+ dev_warn(&mcde_dev->dev, "Could not set dsi lp freq"
+ " to %d\n", port->phy.dsi.lp_freq);
+ WARN_ON_ONCE(clk_set_rate(chnl->clk_dsi_lp,
+ port->phy.dsi.lp_freq));
+ if (port->phy.dsi.hs_freq != clk_round_rate(chnl->clk_dsi_hs,
+ port->phy.dsi.hs_freq))
+ dev_warn(&mcde_dev->dev, "Could not set dsi hs freq"
+ " to %d\n", port->phy.dsi.hs_freq);
+ WARN_ON_ONCE(clk_set_rate(chnl->clk_dsi_hs,
+ port->phy.dsi.hs_freq));
+ }
+ return chnl;
+}
+
+static int _mcde_chnl_apply(struct mcde_chnl_state *chnl)
+{
+ bool roten = false;
+ u8 rotdir = 0;
+
+ if (chnl->rotation == MCDE_DISPLAY_ROT_90_CCW) {
+ roten = true;
+ rotdir = MCDE_ROTACONF_ROTDIR_CCW;
+ } else if (chnl->rotation == MCDE_DISPLAY_ROT_90_CW) {
+ roten = true;
+ rotdir = MCDE_ROTACONF_ROTDIR_CW;
+ }
+ /* REVIEW: 180 deg? */
+
+ chnl->regs.bpp = portfmt2bpp(chnl->port.pixel_format);
+ chnl->regs.synchronized_update = chnl->synchronized_update;
+ chnl->regs.roten = roten;
+ chnl->regs.rotdir = rotdir;
+ chnl->regs.rotbuf1 = chnl->rotbuf1;
+ chnl->regs.rotbuf2 = chnl->rotbuf2;
+ chnl->regs.palette_enable = chnl->palette_enable;
+ chnl->regs.map_r = chnl->map_r;
+ chnl->regs.map_g = chnl->map_g;
+ chnl->regs.map_b = chnl->map_b;
+ if (chnl->port.type == MCDE_PORTTYPE_DSI) {
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_MCDECLK;
+ chnl->regs.dsipacking =
+ portfmt2dsipacking(chnl->port.pixel_format);
+ } else if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ if (chnl->port.phy.dpi.tv_mode) {
+ chnl->regs.internal_clk = false;
+ chnl->regs.bcd = true;
+ if (chnl->id == MCDE_CHNL_A)
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_TV1CLK;
+ else
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_TV2CLK;
+ } else {
+ chnl->regs.internal_clk = true;
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_CLKPLL72;
+ chnl->regs.cdwin =
+ portfmt2cdwin(chnl->port.pixel_format);
+ chnl->regs.bcd = (chnl->port.phy.dpi.clock_div < 2);
+ if (!chnl->regs.bcd)
+ chnl->regs.pcd =
+ chnl->port.phy.dpi.clock_div - 2;
+ }
+ dpi_video_mode_apply(chnl);
+ }
+
+ chnl->regs.blend_ctrl = chnl->blend_ctrl;
+ chnl->regs.blend_en = chnl->blend_en;
+ chnl->regs.alpha_blend = chnl->alpha_blend;
+
+ chnl->regs.dirty = true;
+
+ dev_vdbg(&mcde_dev->dev, "Channel applied, chnl=%d\n", chnl->id);
+ return 0;
+}
+
+static void setup_channel(struct mcde_chnl_state *chnl)
+{
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+
+ if (chnl->port.type == MCDE_PORTTYPE_DPI && chnl->tv_regs.dirty)
+ update_dpi_registers(chnl->id, &chnl->tv_regs);
+ if ((chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B) &&
+ chnl->col_regs.dirty)
+ update_col_registers(chnl->id, &chnl->col_regs);
+ if (chnl->regs.dirty)
+ update_channel_registers(chnl->id, &chnl->regs, &chnl->port,
+ chnl->fifo, &chnl->vmode);
+}
+
+static void chnl_update_continous(struct mcde_chnl_state *chnl,
+ bool tripple_buffer)
+{
+ if (chnl->state == CHNLSTATE_RUNNING) {
+ if (!tripple_buffer)
+ wait_for_vcmp(chnl);
+ return;
+ }
+
+ setup_channel(chnl);
+ if (chnl->port.sync_src == MCDE_SYNCSRC_TE0) {
+ mcde_wfld(MCDE_CRC, SYCEN0, true);
+ } else if (chnl->port.sync_src == MCDE_SYNCSRC_TE1) {
+ mcde_wfld(MCDE_VSCRC1, VSSEL, 1);
+ mcde_wfld(MCDE_CRC, SYCEN1, true);
+ }
+
+ enable_flow(chnl);
+}
+
+static void chnl_update_non_continous(struct mcde_chnl_state *chnl)
+{
+ /* Commit settings to registers */
+ setup_channel(chnl);
+
+ if (chnl->regs.synchronized_update &&
+ chnl->power_mode == MCDE_DISPLAY_PM_ON) {
+ if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ chnl->port.sync_src == MCDE_SYNCSRC_BTA)
+ dsi_te_request(chnl);
+ } else {
+ do_softwaretrig(chnl);
+ dev_vdbg(&mcde_dev->dev, "Channel update (no sync), chnl=%d\n",
+ chnl->id);
+ }
+}
+
+static void chnl_update_overlay(struct mcde_chnl_state *chnl,
+ struct mcde_ovly_state *ovly)
+{
+ if (!ovly)
+ return;
+
+ if (ovly->regs.dirty_buf) {
+ if (!chnl->port.update_auto_trig)
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+ update_overlay_registers_on_the_fly(ovly->idx, &ovly->regs);
+ mcde_debugfs_overlay_update(chnl->id, ovly != chnl->ovly0);
+ }
+ if (ovly->regs.dirty) {
+ if (!chnl->port.update_auto_trig)
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+ chnl_ovly_pixel_format_apply(chnl, ovly);
+ update_overlay_registers(ovly->idx, &ovly->regs, &chnl->port,
+ chnl->fifo, chnl->regs.x, chnl->regs.y,
+ chnl->regs.ppl, chnl->regs.lpf, ovly->stride,
+ chnl->vmode.interlaced, chnl->rotation);
+ if (chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B)
+ update_col_registers(chnl->id, &chnl->col_regs);
+ }
+}
+
+static int _mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /* TODO: lock & make wait->trig async */
+ if (!chnl->enabled || !update_area
+ || (update_area->w == 0 && update_area->h == 0)) {
+ return -EINVAL;
+ }
+
+ if (chnl->port.update_auto_trig && tripple_buffer)
+ wait_for_vcmp(chnl);
+
+ chnl->regs.x = update_area->x;
+ chnl->regs.y = update_area->y;
+ /* TODO Crop against video_mode.xres and video_mode.yres */
+ chnl->regs.ppl = update_area->w;
+ chnl->regs.lpf = update_area->h;
+ if (chnl->port.type == MCDE_PORTTYPE_DPI &&
+ chnl->port.phy.dpi.tv_mode) {
+ /* subtract border */
+ chnl->regs.ppl -= chnl->tv_regs.dho + chnl->tv_regs.alw;
+ /* subtract double borders, ie. for both fields */
+ chnl->regs.lpf -= 2 * (chnl->tv_regs.dvo + chnl->tv_regs.bsl);
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ chnl->vmode.interlaced)
+ chnl->regs.lpf /= 2;
+
+ chnl_update_overlay(chnl, chnl->ovly0);
+ chnl_update_overlay(chnl, chnl->ovly1);
+
+ if (chnl->port.update_auto_trig)
+ chnl_update_continous(chnl, tripple_buffer);
+ else
+ chnl_update_non_continous(chnl);
+
+ dev_vdbg(&mcde_dev->dev, "Channel updated, chnl=%d\n", chnl->id);
+ mcde_debugfs_channel_update(chnl->id);
+ return 0;
+}
+
+static int _mcde_chnl_enable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl->enabled = true;
+ return 0;
+}
+
+/* API entry points */
+/* MCDE channels */
+struct mcde_chnl_state *mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port)
+{
+ struct mcde_chnl_state *chnl;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl = _mcde_chnl_get(chnl_id, fifo, port);
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return chnl;
+}
+
+int mcde_chnl_set_pixel_format(struct mcde_chnl_state *chnl,
+ enum mcde_port_pix_fmt pix_fmt)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+ chnl->port.pixel_format = pix_fmt;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_set_palette(struct mcde_chnl_state *chnl,
+ struct mcde_palette_table *palette)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+ if (palette != NULL) {
+ chnl->map_r = palette->map_col_ch0;
+ chnl->map_g = palette->map_col_ch1;
+ chnl->map_b = palette->map_col_ch2;
+ chnl->palette_enable = true;
+ } else {
+ chnl->map_r = NULL;
+ chnl->map_g = NULL;
+ chnl->map_b = NULL;
+ chnl->palette_enable = false;
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+ return 0;
+}
+
+void mcde_chnl_set_col_convert(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform,
+ enum mcde_col_convert convert)
+{
+ switch (convert) {
+ case MCDE_CONVERT_RGB_2_YCBCR:
+ memcpy(&chnl->rgb_2_ycbcr, transform,
+ sizeof(struct mcde_col_transform));
+ /* force update: */
+ if (chnl->transform == &chnl->rgb_2_ycbcr) {
+ chnl->transform = NULL;
+ chnl->ovly0->dirty = true;
+ chnl->ovly1->dirty = true;
+ }
+ break;
+ case MCDE_CONVERT_YCBCR_2_RGB:
+ memcpy(&chnl->ycbcr_2_rgb, transform,
+ sizeof(struct mcde_col_transform));
+ /* force update: */
+ if (chnl->transform == &chnl->ycbcr_2_rgb) {
+ chnl->transform = NULL;
+ chnl->ovly0->dirty = true;
+ chnl->ovly1->dirty = true;
+ }
+ break;
+ default:
+ /* Trivial transforms are handled internally */
+ dev_warn(&mcde_dev->dev,
+ "%s: unsupported col convert\n", __func__);
+ break;
+ }
+}
+
+int mcde_chnl_set_video_mode(struct mcde_chnl_state *chnl,
+ struct mcde_video_mode *vmode)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl == NULL || vmode == NULL)
+ return -EINVAL;
+
+ chnl->vmode = *vmode;
+
+ chnl->ovly0->dirty = true;
+ if (chnl->ovly1)
+ chnl->ovly1->dirty = true;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL(mcde_chnl_set_video_mode);
+
+int mcde_chnl_set_rotation(struct mcde_chnl_state *chnl,
+ enum mcde_display_rotation rotation, u32 rotbuf1, u32 rotbuf2)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ if (chnl->id != MCDE_CHNL_A && chnl->id != MCDE_CHNL_B)
+ return -EINVAL;
+
+ chnl->rotation = rotation;
+ chnl->rotbuf1 = rotbuf1;
+ chnl->rotbuf2 = rotbuf2;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_enable_synchronized_update(struct mcde_chnl_state *chnl,
+ bool enable)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ chnl->synchronized_update = enable;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_set_power_mode(struct mcde_chnl_state *chnl,
+ enum mcde_display_power_mode power_mode)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ chnl->power_mode = power_mode;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_apply(struct mcde_chnl_state *chnl)
+{
+ int ret ;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+ ret = _mcde_chnl_apply(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+int mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer)
+{
+ int ret;
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+ enable_mcde_hw();
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ if (chnl->regs.roten && !chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_enable(regulator_esram_epod));
+ chnl->esram_is_enabled = true;
+ }
+
+ ret = _mcde_chnl_update(chnl, update_area, tripple_buffer);
+
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+void mcde_chnl_put(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->enabled) {
+ stop_channel(chnl);
+ cancel_delayed_work(&hw_timeout_work);
+ disable_mcde_hw(false, true);
+ chnl->enabled = false;
+ }
+
+ chnl->reserved = false;
+ if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ clk_put(chnl->clk_dpi);
+ if (chnl->port.phy.dpi.tv_mode) {
+ chnl->vcmp_per_field = false;
+ chnl->even_vcmp = false;
+ }
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI) {
+ if (dsi_use_clk_framework) {
+ clk_put(chnl->clk_dsi_lp);
+ clk_put(chnl->clk_dsi_hs);
+ }
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_stop_flow(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ if (mcde_is_enabled && chnl->enabled)
+ stop_channel(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_enable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ _mcde_chnl_enable(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_disable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ cancel_delayed_work(&hw_timeout_work);
+ /* The channel must be stopped before it is disabled */
+ WARN_ON_ONCE(chnl->state == CHNLSTATE_RUNNING);
+ disable_mcde_hw(false, true);
+ chnl->enabled = false;
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+/* MCDE overlays */
+struct mcde_ovly_state *mcde_ovly_get(struct mcde_chnl_state *chnl)
+{
+ struct mcde_ovly_state *ovly;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return ERR_PTR(-EINVAL);
+
+ if (!chnl->ovly0->inuse)
+ ovly = chnl->ovly0;
+ else if (chnl->ovly1 && !chnl->ovly1->inuse)
+ ovly = chnl->ovly1;
+ else
+ ovly = ERR_PTR(-EBUSY);
+
+ if (!IS_ERR(ovly)) {
+ ovly->inuse = true;
+ ovly->paddr = 0;
+ ovly->stride = 0;
+ ovly->pix_fmt = MCDE_OVLYPIXFMT_RGB565;
+ ovly->src_x = 0;
+ ovly->src_y = 0;
+ ovly->dst_x = 0;
+ ovly->dst_y = 0;
+ ovly->dst_z = 0;
+ ovly->w = 0;
+ ovly->h = 0;
+ ovly->alpha_value = 0xFF;
+ ovly->alpha_source = MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA;
+ ovly->dirty = true;
+ mcde_ovly_apply(ovly);
+ }
+
+ return ovly;
+}
+
+void mcde_ovly_put(struct mcde_ovly_state *ovly)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!ovly->inuse)
+ return;
+ if (ovly->regs.enabled) {
+ ovly->paddr = 0;
+ ovly->dirty = true;
+ mcde_ovly_apply(ovly);/* REVIEW: API call calling API call! */
+ }
+ ovly->inuse = false;
+}
+
+void mcde_ovly_set_source_buf(struct mcde_ovly_state *ovly, u32 paddr)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->dirty = paddr == 0 || ovly->paddr == 0;
+ ovly->dirty_buf = true;
+
+ ovly->paddr = paddr;
+}
+
+void mcde_ovly_set_source_info(struct mcde_ovly_state *ovly,
+ u32 stride, enum mcde_ovly_pix_fmt pix_fmt)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->stride = stride;
+ ovly->pix_fmt = pix_fmt;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_set_source_area(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->src_x = x;
+ ovly->src_y = y;
+ ovly->w = w;
+ ovly->h = h;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_set_dest_pos(struct mcde_ovly_state *ovly, u16 x, u16 y, u8 z)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->dst_x = x;
+ ovly->dst_y = y;
+ ovly->dst_z = z;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_apply(struct mcde_ovly_state *ovly)
+{
+ if (!ovly->inuse)
+ return;
+
+ mcde_lock(__func__, __LINE__);
+
+ if (ovly->dirty || ovly->dirty_buf) {
+ ovly->regs.ch_id = ovly->chnl->id;
+ ovly->regs.enabled = ovly->paddr != 0;
+ ovly->regs.baseaddress0 = ovly->paddr;
+ ovly->regs.baseaddress1 =
+ ovly->regs.baseaddress0 + ovly->stride;
+ ovly->regs.dirty_buf = true;
+ ovly->dirty_buf = false;
+ }
+ if (!ovly->dirty) {
+ mcde_unlock(__func__, __LINE__);
+ return;
+ }
+
+ switch (ovly->pix_fmt) {/* REVIEW: Extract to table */
+ case MCDE_OVLYPIXFMT_RGB565:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB565;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA5551:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_IRGB1555;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA4444:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB4444;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_RGB888:
+ ovly->regs.bits_per_pixel = 24;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBX8888:
+ ovly->regs.bits_per_pixel = 32;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_XRGB8888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = true;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA8888:
+ ovly->regs.bits_per_pixel = 32;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB8888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_YCbCr422:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_YCBCR422;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ default:
+ break;
+ }
+
+ ovly->regs.ppl = ovly->w;
+ ovly->regs.lpf = ovly->h;
+ ovly->regs.cropx = ovly->src_x;
+ ovly->regs.cropy = ovly->src_y;
+ ovly->regs.xpos = ovly->dst_x;
+ ovly->regs.ypos = ovly->dst_y;
+ ovly->regs.z = ovly->dst_z > 0; /* 0 or 1 */
+ ovly->regs.col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ ovly->regs.alpha_source = ovly->alpha_source;
+ ovly->regs.alpha_value = ovly->alpha_value;
+
+ ovly->regs.dirty = true;
+ ovly->dirty = false;
+
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "Overlay applied, idx=%d chnl=%d\n",
+ ovly->idx, ovly->chnl->id);
+}
+
+static int init_clocks_and_power(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->regulator_mcde_epod_id) {
+ regulator_mcde_epod = regulator_get(&pdev->dev,
+ pdata->regulator_mcde_epod_id);
+ if (IS_ERR(regulator_mcde_epod)) {
+ ret = PTR_ERR(regulator_mcde_epod);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_mcde_epod_id);
+ regulator_mcde_epod = NULL;
+ return ret;
+ }
+ } else {
+ dev_warn(&pdev->dev, "%s: No mcde regulator id supplied\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->regulator_esram_epod_id) {
+ regulator_esram_epod = regulator_get(&pdev->dev,
+ pdata->regulator_esram_epod_id);
+ if (IS_ERR(regulator_esram_epod)) {
+ ret = PTR_ERR(regulator_esram_epod);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_esram_epod_id);
+ regulator_esram_epod = NULL;
+ goto regulator_esram_err;
+ }
+ } else {
+ dev_warn(&pdev->dev, "%s: No esram regulator id supplied\n",
+ __func__);
+ }
+
+ if (pdata->regulator_vana_id) {
+ regulator_vana = regulator_get(&pdev->dev,
+ pdata->regulator_vana_id);
+ if (IS_ERR(regulator_vana)) {
+ ret = PTR_ERR(regulator_vana);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_vana_id);
+ regulator_vana = NULL;
+ goto regulator_vana_err;
+ }
+ } else {
+ dev_dbg(&pdev->dev, "%s: No vana regulator id supplied\n",
+ __func__);
+ }
+
+ if (!dsi_use_clk_framework) {
+ clock_dsi = clk_get(&pdev->dev, pdata->clock_dsi_id);
+ if (IS_ERR(clock_dsi))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dsi_id);
+
+ clock_dsi_lp = clk_get(&pdev->dev, pdata->clock_dsi_lp_id);
+ if (IS_ERR(clock_dsi_lp))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dsi_lp_id);
+ }
+
+ clock_mcde = clk_get(&pdev->dev, CLK_MCDE);
+ if (IS_ERR(clock_mcde)) {
+ ret = PTR_ERR(clock_mcde);
+ dev_warn(&pdev->dev, "%s: Failed to get mcde_clk\n", __func__);
+ goto clk_mcde_err;
+ }
+
+ return ret;
+
+clk_mcde_err:
+ if (!dsi_use_clk_framework) {
+ clk_put(clock_dsi_lp);
+ clk_put(clock_dsi);
+ }
+
+ if (regulator_vana)
+ regulator_put(regulator_vana);
+regulator_vana_err:
+ if (regulator_esram_epod)
+ regulator_put(regulator_esram_epod);
+regulator_esram_err:
+ regulator_put(regulator_mcde_epod);
+ return ret;
+}
+
+static void remove_clocks_and_power(struct platform_device *pdev)
+{
+ /* REVIEW: Release only if exist */
+ /* REVIEW: Remove make sure MCDE is done */
+ if (!dsi_use_clk_framework) {
+ clk_put(clock_dsi_lp);
+ clk_put(clock_dsi);
+ }
+ clk_put(clock_mcde);
+ if (regulator_vana)
+ regulator_put(regulator_vana);
+ regulator_put(regulator_mcde_epod);
+ regulator_put(regulator_esram_epod);
+}
+
+static int probe_hw(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ u32 pid;
+ struct resource *res;
+
+ dev_info(&mcde_dev->dev, "Probe HW\n");
+
+ /* Get MCDE HW version */
+ regulator_enable(regulator_mcde_epod);
+ clk_enable(clock_mcde);
+ pid = mcde_rreg(MCDE_PID);
+
+ dev_info(&mcde_dev->dev, "MCDE HW revision 0x%.8X\n", pid);
+
+ clk_disable(clock_mcde);
+ regulator_disable(regulator_mcde_epod);
+
+ switch (pid) {
+ case MCDE_VERSION_3_0_8:
+ num_dsilinks = 3;
+ num_channels = 4;
+ num_overlays = 6;
+ dsi_ifc_is_supported = true;
+ input_fifo_size = 128;
+ output_fifo_ab_size = 640;
+ output_fifo_c0c1_size = 160;
+ dsi_use_clk_framework = false;
+ dev_info(&mcde_dev->dev, "db8500 V2 HW\n");
+ break;
+ case MCDE_VERSION_4_0_4:
+ num_dsilinks = 2;
+ num_channels = 2;
+ num_overlays = 3;
+ input_fifo_size = 80;
+ output_fifo_ab_size = 320;
+ dsi_ifc_is_supported = false;
+ dsi_use_clk_framework = false;
+ dev_info(&mcde_dev->dev, "db5500 V2 HW\n");
+ break;
+ case MCDE_VERSION_4_1_3:
+ num_dsilinks = 3;
+ num_channels = 4;
+ num_overlays = 6;
+ dsi_ifc_is_supported = true;
+ input_fifo_size = 192;
+ output_fifo_ab_size = 640;
+ output_fifo_c0c1_size = 160;
+ dsi_use_clk_framework = false;
+ dev_info(&mcde_dev->dev, "db9540 V1 HW\n");
+ break;
+ case MCDE_VERSION_3_0_5:
+ /* Intentional */
+ case MCDE_VERSION_1_0_4:
+ /* Intentional */
+ default:
+ dev_err(&mcde_dev->dev, "Unsupported HW version\n");
+ ret = -ENOTSUPP;
+ goto unsupported_hw;
+ break;
+ }
+
+ channels = kzalloc(num_channels * sizeof(struct mcde_chnl_state),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto failed_channels_alloc;
+ }
+
+ overlays = kzalloc(num_overlays * sizeof(struct mcde_ovly_state),
+ GFP_KERNEL);
+ if (!overlays) {
+ ret = -ENOMEM;
+ goto failed_overlays_alloc;
+ }
+
+ dsiio = kzalloc(num_dsilinks * sizeof(*dsiio), GFP_KERNEL);
+ if (!dsiio) {
+ ret = -ENOMEM;
+ goto failed_dsi_alloc;
+ }
+
+ for (i = 0; i < num_dsilinks; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1+i);
+ if (!res) {
+ dev_dbg(&pdev->dev, "No DSI%d io defined\n", i);
+ ret = -EINVAL;
+ goto failed_get_dsi_io;
+ }
+ dsiio[i] = ioremap(res->start, res->end - res->start + 1);
+ if (!dsiio[i]) {
+ dev_dbg(&pdev->dev, "MCDE DSI%d iomap failed\n", i);
+ ret = -EINVAL;
+ goto failed_map_dsi_io;
+ }
+ dev_info(&pdev->dev, "MCDE DSI%d iomap: 0x%.8X->0x%.8X\n",
+ i, (u32)res->start, (u32)dsiio[i]);
+ }
+
+ /* Init MCDE */
+ for (i = 0; i < num_overlays; i++)
+ overlays[i].idx = i;
+
+ channels[0].ovly0 = &overlays[0];
+ channels[0].ovly1 = &overlays[1];
+ channels[1].ovly0 = &overlays[2];
+
+ if (pid == MCDE_VERSION_3_0_8) {
+ channels[1].ovly1 = &overlays[3];
+ channels[2].ovly0 = &overlays[4];
+ channels[3].ovly0 = &overlays[5];
+ }
+
+ mcde_debugfs_create(&mcde_dev->dev);
+ for (i = 0; i < num_channels; i++) {
+ channels[i].id = i;
+
+ channels[i].ovly0->chnl = &channels[i];
+ if (channels[i].ovly1)
+ channels[i].ovly1->chnl = &channels[i];
+
+ init_waitqueue_head(&channels[i].state_waitq);
+ init_waitqueue_head(&channels[i].vcmp_waitq);
+ init_timer(&channels[i].dsi_te_timer);
+ channels[i].dsi_te_timer.function =
+ dsi_te_timer_function;
+ channels[i].dsi_te_timer.data = i;
+
+ mcde_debugfs_channel_create(i, &channels[i]);
+ mcde_debugfs_overlay_create(i, 0);
+ if (channels[i].ovly1)
+ mcde_debugfs_overlay_create(i, 1);
+ }
+ (void) prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "mcde", 100);
+ mcde_clk_rate = clk_get_rate(clock_mcde);
+ dev_info(&mcde_dev->dev, "MCDE_CLK is %d MHz\n", mcde_clk_rate);
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "mcde");
+
+ return 0;
+
+failed_map_dsi_io:
+ for (i = 0; i < num_dsilinks; i++) {
+ if (dsiio[i])
+ iounmap(dsiio[i]);
+ }
+failed_get_dsi_io:
+ kfree(dsiio);
+ dsiio = NULL;
+failed_dsi_alloc:
+ kfree(overlays);
+ overlays = NULL;
+failed_overlays_alloc:
+ kfree(channels);
+ channels = NULL;
+unsupported_hw:
+failed_channels_alloc:
+ num_dsilinks = 0;
+ num_channels = 0;
+ num_overlays = 0;
+ return ret;
+}
+
+static int __devinit mcde_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ mcde_dev = pdev;
+
+ /* Hook up irq */
+ mcde_irq = platform_get_irq(pdev, 0);
+ if (mcde_irq <= 0) {
+ dev_dbg(&pdev->dev, "No irq defined\n");
+ ret = -EINVAL;
+ goto failed_irq_get;
+ }
+
+ /* Map I/O */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(&pdev->dev, "No MCDE io defined\n");
+ ret = -EINVAL;
+ goto failed_get_mcde_io;
+ }
+ mcdeio = ioremap(res->start, res->end - res->start + 1);
+ if (!mcdeio) {
+ dev_dbg(&pdev->dev, "MCDE iomap failed\n");
+ ret = -EINVAL;
+ goto failed_map_mcde_io;
+ }
+ dev_info(&pdev->dev, "MCDE iomap: 0x%.8X->0x%.8X\n",
+ (u32)res->start, (u32)mcdeio);
+
+ ret = init_clocks_and_power(pdev);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "%s: init_clocks_and_power failed\n"
+ , __func__);
+ goto failed_init_clocks;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&hw_timeout_work, work_sleep_function);
+
+ ret = probe_hw(pdev);
+ if (ret)
+ goto failed_probe_hw;
+
+ ret = enable_mcde_hw();
+ if (ret)
+ goto failed_mcde_enable;
+
+ return 0;
+
+failed_mcde_enable:
+failed_probe_hw:
+ remove_clocks_and_power(pdev);
+failed_init_clocks:
+ iounmap(mcdeio);
+failed_map_mcde_io:
+failed_get_mcde_io:
+failed_irq_get:
+ return ret;
+}
+
+static int __devexit mcde_remove(struct platform_device *pdev)
+{
+ struct mcde_chnl_state *chnl = &channels[0];
+
+ for (; chnl < &channels[num_channels]; chnl++) {
+ if (del_timer(&chnl->dsi_te_timer))
+ dev_vdbg(&mcde_dev->dev,
+ "%s dsi timer could not be stopped\n"
+ , __func__);
+ }
+
+ remove_clocks_and_power(pdev);
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int mcde_resume(struct platform_device *pdev)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+
+ mcde_unlock(__func__, __LINE__);
+
+ return 0;
+}
+
+static int mcde_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+
+ cancel_delayed_work(&hw_timeout_work);
+
+ if (!mcde_is_enabled) {
+ mcde_unlock(__func__, __LINE__);
+ return 0;
+ }
+ disable_mcde_hw(true, true);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+#endif
+
+static struct platform_driver mcde_driver = {
+ .probe = mcde_probe,
+ .remove = mcde_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = mcde_suspend,
+ .resume = mcde_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde",
+ },
+};
+
+int __init mcde_init(void)
+{
+ mutex_init(&mcde_hw_lock);
+ return platform_driver_register(&mcde_driver);
+}
+
+void mcde_exit(void)
+{
+ /* REVIEW: shutdown MCDE? */
+ platform_driver_unregister(&mcde_driver);
+}
diff --git a/drivers/video/mcde/mcde_mod.c b/drivers/video/mcde/mcde_mod.c
new file mode 100644
index 00000000000..60df0d4965f
--- /dev/null
+++ b/drivers/video/mcde/mcde_mod.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <video/mcde.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+#include <video/mcde_display.h>
+
+/* Module init */
+
+static int __init mcde_subsystem_init(void)
+{
+ int ret;
+ pr_info("MCDE subsystem init begin\n");
+
+ /* MCDE module init sequence */
+ ret = mcde_init();
+ if (ret)
+ goto mcde_failed;
+ ret = mcde_display_init();
+ if (ret)
+ goto mcde_display_failed;
+ ret = mcde_dss_init();
+ if (ret)
+ goto mcde_dss_failed;
+ ret = mcde_fb_init();
+ if (ret)
+ goto mcde_fb_failed;
+ pr_info("MCDE subsystem init done\n");
+
+ goto done;
+mcde_fb_failed:
+ mcde_dss_exit();
+mcde_dss_failed:
+ mcde_display_exit();
+mcde_display_failed:
+ mcde_exit();
+mcde_failed:
+done:
+ return ret;
+}
+#ifdef MODULE
+module_init(mcde_subsystem_init);
+#else
+fs_initcall(mcde_subsystem_init);
+#endif
+
+static void __exit mcde_module_exit(void)
+{
+ mcde_exit();
+ mcde_display_exit();
+ mcde_dss_exit();
+}
+module_exit(mcde_module_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE driver");
+
diff --git a/drivers/video/mcde/mcde_regs.h b/drivers/video/mcde/mcde_regs.h
new file mode 100644
index 00000000000..0eece3faea2
--- /dev/null
+++ b/drivers/video/mcde/mcde_regs.h
@@ -0,0 +1,5096 @@
+
+#define MCDE_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define MCDE_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define MCDE_CR 0x00000000
+#define MCDE_CR_IFIFOCTRLEN_SHIFT 15
+#define MCDE_CR_IFIFOCTRLEN_MASK 0x00008000
+#define MCDE_CR_IFIFOCTRLEN(__x) \
+ MCDE_VAL2REG(MCDE_CR, IFIFOCTRLEN, __x)
+#define MCDE_CR_AUTOCLKG_EN_SHIFT 30
+#define MCDE_CR_AUTOCLKG_EN_MASK 0x40000000
+#define MCDE_CR_AUTOCLKG_EN(__x) \
+ MCDE_VAL2REG(MCDE_CR, AUTOCLKG_EN, __x)
+#define MCDE_CR_MCDEEN_SHIFT 31
+#define MCDE_CR_MCDEEN_MASK 0x80000000
+#define MCDE_CR_MCDEEN(__x) \
+ MCDE_VAL2REG(MCDE_CR, MCDEEN, __x)
+#define MCDE_CONF0 0x00000004
+#define MCDE_CONF0_SYNCMUX0_SHIFT 0
+#define MCDE_CONF0_SYNCMUX0_MASK 0x00000001
+#define MCDE_CONF0_SYNCMUX0(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX0, __x)
+#define MCDE_CONF0_SYNCMUX1_SHIFT 1
+#define MCDE_CONF0_SYNCMUX1_MASK 0x00000002
+#define MCDE_CONF0_SYNCMUX1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX1, __x)
+#define MCDE_CONF0_SYNCMUX2_SHIFT 2
+#define MCDE_CONF0_SYNCMUX2_MASK 0x00000004
+#define MCDE_CONF0_SYNCMUX2(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX2, __x)
+#define MCDE_CONF0_SYNCMUX3_SHIFT 3
+#define MCDE_CONF0_SYNCMUX3_MASK 0x00000008
+#define MCDE_CONF0_SYNCMUX3(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX3, __x)
+#define MCDE_CONF0_SYNCMUX4_SHIFT 4
+#define MCDE_CONF0_SYNCMUX4_MASK 0x00000010
+#define MCDE_CONF0_SYNCMUX4(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX4, __x)
+#define MCDE_CONF0_SYNCMUX5_SHIFT 5
+#define MCDE_CONF0_SYNCMUX5_MASK 0x00000020
+#define MCDE_CONF0_SYNCMUX5(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX5, __x)
+#define MCDE_CONF0_SYNCMUX6_SHIFT 6
+#define MCDE_CONF0_SYNCMUX6_MASK 0x00000040
+#define MCDE_CONF0_SYNCMUX6(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX6, __x)
+#define MCDE_CONF0_SYNCMUX7_SHIFT 7
+#define MCDE_CONF0_SYNCMUX7_MASK 0x00000080
+#define MCDE_CONF0_SYNCMUX7(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX7, __x)
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, IFIFOCTRLWTRMRKLVL, __x)
+#define MCDE_CONF0_OUTMUX0_SHIFT 16
+#define MCDE_CONF0_OUTMUX0_MASK 0x00070000
+#define MCDE_CONF0_OUTMUX0(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX0, __x)
+#define MCDE_CONF0_OUTMUX1_SHIFT 19
+#define MCDE_CONF0_OUTMUX1_MASK 0x00380000
+#define MCDE_CONF0_OUTMUX1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX1, __x)
+#define MCDE_CONF0_OUTMUX2_SHIFT 22
+#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000
+#define MCDE_CONF0_OUTMUX2(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX2, __x)
+#define MCDE_CONF0_OUTMUX3_SHIFT 25
+#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000
+#define MCDE_CONF0_OUTMUX3(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX3, __x)
+#define MCDE_CONF0_OUTMUX4_SHIFT 28
+#define MCDE_CONF0_OUTMUX4_MASK 0x70000000
+#define MCDE_CONF0_OUTMUX4(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX4, __x)
+#define MCDE_SSP 0x00000008
+#define MCDE_SSP_SSPDATA_SHIFT 0
+#define MCDE_SSP_SSPDATA_MASK 0x000000FF
+#define MCDE_SSP_SSPDATA(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPDATA, __x)
+#define MCDE_SSP_SSPCMD_SHIFT 8
+#define MCDE_SSP_SSPCMD_MASK 0x00000100
+#define MCDE_SSP_SSPCMD_DATA 0
+#define MCDE_SSP_SSPCMD_COMMAND 1
+#define MCDE_SSP_SSPCMD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPCMD, MCDE_SSP_SSPCMD_##__x)
+#define MCDE_SSP_SSPCMD(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPCMD, __x)
+#define MCDE_SSP_SSPEN_SHIFT 16
+#define MCDE_SSP_SSPEN_MASK 0x00010000
+#define MCDE_SSP_SSPEN(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPEN, __x)
+#define MCDE_AIS 0x00000100
+#define MCDE_AIS_MCDEPPI_SHIFT 0
+#define MCDE_AIS_MCDEPPI_MASK 0x00000001
+#define MCDE_AIS_MCDEPPI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEPPI, __x)
+#define MCDE_AIS_MCDEOVLI_SHIFT 1
+#define MCDE_AIS_MCDEOVLI_MASK 0x00000002
+#define MCDE_AIS_MCDEOVLI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEOVLI, __x)
+#define MCDE_AIS_MCDECHNLI_SHIFT 2
+#define MCDE_AIS_MCDECHNLI_MASK 0x00000004
+#define MCDE_AIS_MCDECHNLI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDECHNLI, __x)
+#define MCDE_AIS_MCDEERRI_SHIFT 3
+#define MCDE_AIS_MCDEERRI_MASK 0x00000008
+#define MCDE_AIS_MCDEERRI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEERRI, __x)
+#define MCDE_AIS_DSI0AI_SHIFT 4
+#define MCDE_AIS_DSI0AI_MASK 0x00000010
+#define MCDE_AIS_DSI0AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI0AI, __x)
+#define MCDE_AIS_DSI1AI_SHIFT 5
+#define MCDE_AIS_DSI1AI_MASK 0x00000020
+#define MCDE_AIS_DSI1AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI1AI, __x)
+#define MCDE_AIS_DSI2AI_SHIFT 6
+#define MCDE_AIS_DSI2AI_MASK 0x00000040
+#define MCDE_AIS_DSI2AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI2AI, __x)
+#define MCDE_IMSCPP 0x00000104
+#define MCDE_IMSCPP_VCMPAIM_SHIFT 0
+#define MCDE_IMSCPP_VCMPAIM_MASK 0x00000001
+#define MCDE_IMSCPP_VCMPAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPAIM, __x)
+#define MCDE_IMSCPP_VCMPBIM_SHIFT 1
+#define MCDE_IMSCPP_VCMPBIM_MASK 0x00000002
+#define MCDE_IMSCPP_VCMPBIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPBIM, __x)
+#define MCDE_IMSCPP_VSCC0IM_SHIFT 2
+#define MCDE_IMSCPP_VSCC0IM_MASK 0x00000004
+#define MCDE_IMSCPP_VSCC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VSCC0IM, __x)
+#define MCDE_IMSCPP_VSCC1IM_SHIFT 3
+#define MCDE_IMSCPP_VSCC1IM_MASK 0x00000008
+#define MCDE_IMSCPP_VSCC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VSCC1IM, __x)
+#define MCDE_IMSCPP_VCMPC0IM_SHIFT 4
+#define MCDE_IMSCPP_VCMPC0IM_MASK 0x00000010
+#define MCDE_IMSCPP_VCMPC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPC0IM, __x)
+#define MCDE_IMSCPP_VCMPC1IM_SHIFT 5
+#define MCDE_IMSCPP_VCMPC1IM_MASK 0x00000020
+#define MCDE_IMSCPP_VCMPC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPC1IM, __x)
+#define MCDE_IMSCPP_ROTFDIM_B_SHIFT 6
+#define MCDE_IMSCPP_ROTFDIM_B_MASK 0x00000040
+#define MCDE_IMSCPP_ROTFDIM_B(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_B, __x)
+#define MCDE_IMSCPP_ROTFDIM_A_SHIFT 7
+#define MCDE_IMSCPP_ROTFDIM_A_MASK 0x00000080
+#define MCDE_IMSCPP_ROTFDIM_A(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_A, __x)
+#define MCDE_IMSCOVL 0x00000108
+#define MCDE_IMSCOVL_OVLRDIM_SHIFT 0
+#define MCDE_IMSCOVL_OVLRDIM_MASK 0x0000FFFF
+#define MCDE_IMSCOVL_OVLRDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCOVL, OVLRDIM, __x)
+#define MCDE_IMSCOVL_OVLFDIM_SHIFT 16
+#define MCDE_IMSCOVL_OVLFDIM_MASK 0xFFFF0000
+#define MCDE_IMSCOVL_OVLFDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCOVL, OVLFDIM, __x)
+#define MCDE_IMSCCHNL 0x0000010C
+#define MCDE_IMSCCHNL_CHNLRDIM_SHIFT 0
+#define MCDE_IMSCCHNL_CHNLRDIM_MASK 0x0000FFFF
+#define MCDE_IMSCCHNL_CHNLRDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLRDIM, __x)
+#define MCDE_IMSCCHNL_CHNLAIM_SHIFT 16
+#define MCDE_IMSCCHNL_CHNLAIM_MASK 0xFFFF0000
+#define MCDE_IMSCCHNL_CHNLAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLAIM, __x)
+#define MCDE_IMSCERR 0x00000110
+#define MCDE_IMSCERR_FUAIM_SHIFT 0
+#define MCDE_IMSCERR_FUAIM_MASK 0x00000001
+#define MCDE_IMSCERR_FUAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUAIM, __x)
+#define MCDE_IMSCERR_FUBIM_SHIFT 1
+#define MCDE_IMSCERR_FUBIM_MASK 0x00000002
+#define MCDE_IMSCERR_FUBIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUBIM, __x)
+#define MCDE_IMSCERR_SCHBLCKDIM_SHIFT 2
+#define MCDE_IMSCERR_SCHBLCKDIM_MASK 0x00000004
+#define MCDE_IMSCERR_SCHBLCKDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, SCHBLCKDIM, __x)
+#define MCDE_IMSCERR_ROTAFEIM_WRITE_SHIFT 3
+#define MCDE_IMSCERR_ROTAFEIM_WRITE_MASK 0x00000008
+#define MCDE_IMSCERR_ROTAFEIM_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_WRITE, __x)
+#define MCDE_IMSCERR_ROTAFEIM_READ_SHIFT 4
+#define MCDE_IMSCERR_ROTAFEIM_READ_MASK 0x00000010
+#define MCDE_IMSCERR_ROTAFEIM_READ(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_READ, __x)
+#define MCDE_IMSCERR_ROTBFEIM_WRITE_SHIFT 5
+#define MCDE_IMSCERR_ROTBFEIM_WRITE_MASK 0x00000020
+#define MCDE_IMSCERR_ROTBFEIM_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_WRITE, __x)
+#define MCDE_IMSCERR_ROTBFEIM_READ_SHIFT 6
+#define MCDE_IMSCERR_ROTBFEIM_READ_MASK 0x00000040
+#define MCDE_IMSCERR_ROTBFEIM_READ(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_READ, __x)
+#define MCDE_IMSCERR_FUC0IM_SHIFT 7
+#define MCDE_IMSCERR_FUC0IM_MASK 0x00000080
+#define MCDE_IMSCERR_FUC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUC0IM, __x)
+#define MCDE_IMSCERR_FUC1IM_SHIFT 8
+#define MCDE_IMSCERR_FUC1IM_MASK 0x00000100
+#define MCDE_IMSCERR_FUC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUC1IM, __x)
+#define MCDE_IMSCERR_OVLFERRIM_SHIFT 16
+#define MCDE_IMSCERR_OVLFERRIM_MASK 0xFFFF0000
+#define MCDE_IMSCERR_OVLFERRIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, OVLFERRIM, __x)
+#define MCDE_RISPP 0x00000114
+#define MCDE_RISPP_VCMPARIS_SHIFT 0
+#define MCDE_RISPP_VCMPARIS_MASK 0x00000001
+#define MCDE_RISPP_VCMPARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPARIS, __x)
+#define MCDE_RISPP_VCMPBRIS_SHIFT 1
+#define MCDE_RISPP_VCMPBRIS_MASK 0x00000002
+#define MCDE_RISPP_VCMPBRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPBRIS, __x)
+#define MCDE_RISPP_VSCC0RIS_SHIFT 2
+#define MCDE_RISPP_VSCC0RIS_MASK 0x00000004
+#define MCDE_RISPP_VSCC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VSCC0RIS, __x)
+#define MCDE_RISPP_VSCC1RIS_SHIFT 3
+#define MCDE_RISPP_VSCC1RIS_MASK 0x00000008
+#define MCDE_RISPP_VSCC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VSCC1RIS, __x)
+#define MCDE_RISPP_VCMPC0RIS_SHIFT 4
+#define MCDE_RISPP_VCMPC0RIS_MASK 0x00000010
+#define MCDE_RISPP_VCMPC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPC0RIS, __x)
+#define MCDE_RISPP_VCMPC1RIS_SHIFT 5
+#define MCDE_RISPP_VCMPC1RIS_MASK 0x00000020
+#define MCDE_RISPP_VCMPC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPC1RIS, __x)
+#define MCDE_RISPP_ROTFDRIS_B_SHIFT 6
+#define MCDE_RISPP_ROTFDRIS_B_MASK 0x00000040
+#define MCDE_RISPP_ROTFDRIS_B(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_B, __x)
+#define MCDE_RISPP_ROTFDRIS_A_SHIFT 7
+#define MCDE_RISPP_ROTFDRIS_A_MASK 0x00000080
+#define MCDE_RISPP_ROTFDRIS_A(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_A, __x)
+#define MCDE_RISOVL 0x00000118
+#define MCDE_RISOVL_OVLRDRIS_SHIFT 0
+#define MCDE_RISOVL_OVLRDRIS_MASK 0x0000FFFF
+#define MCDE_RISOVL_OVLRDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISOVL, OVLRDRIS, __x)
+#define MCDE_RISOVL_OVLFDRIS_SHIFT 16
+#define MCDE_RISOVL_OVLFDRIS_MASK 0xFFFF0000
+#define MCDE_RISOVL_OVLFDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISOVL, OVLFDRIS, __x)
+#define MCDE_RISCHNL 0x0000011C
+#define MCDE_RISCHNL_CHNLRDRIS_SHIFT 0
+#define MCDE_RISCHNL_CHNLRDRIS_MASK 0x0000FFFF
+#define MCDE_RISCHNL_CHNLRDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISCHNL, CHNLRDRIS, __x)
+#define MCDE_RISCHNL_CHNLARIS_SHIFT 16
+#define MCDE_RISCHNL_CHNLARIS_MASK 0xFFFF0000
+#define MCDE_RISCHNL_CHNLARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISCHNL, CHNLARIS, __x)
+#define MCDE_RISERR 0x00000120
+#define MCDE_RISERR_FUARIS_SHIFT 0
+#define MCDE_RISERR_FUARIS_MASK 0x00000001
+#define MCDE_RISERR_FUARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUARIS, __x)
+#define MCDE_RISERR_FUBRIS_SHIFT 1
+#define MCDE_RISERR_FUBRIS_MASK 0x00000002
+#define MCDE_RISERR_FUBRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUBRIS, __x)
+#define MCDE_RISERR_SCHBLCKDRIS_SHIFT 2
+#define MCDE_RISERR_SCHBLCKDRIS_MASK 0x00000004
+#define MCDE_RISERR_SCHBLCKDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, SCHBLCKDRIS, __x)
+#define MCDE_RISERR_ROTAFERIS_WRITE_SHIFT 3
+#define MCDE_RISERR_ROTAFERIS_WRITE_MASK 0x00000008
+#define MCDE_RISERR_ROTAFERIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_WRITE, __x)
+#define MCDE_RISERR_ROTAFERIS_READ_SHIFT 4
+#define MCDE_RISERR_ROTAFERIS_READ_MASK 0x00000010
+#define MCDE_RISERR_ROTAFERIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_READ, __x)
+#define MCDE_RISERR_ROTBFERIS_WRITE_SHIFT 5
+#define MCDE_RISERR_ROTBFERIS_WRITE_MASK 0x00000020
+#define MCDE_RISERR_ROTBFERIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_WRITE, __x)
+#define MCDE_RISERR_ROTBFERIS_READ_SHIFT 6
+#define MCDE_RISERR_ROTBFERIS_READ_MASK 0x00000040
+#define MCDE_RISERR_ROTBFERIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_READ, __x)
+#define MCDE_RISERR_FUC0RIS_SHIFT 7
+#define MCDE_RISERR_FUC0RIS_MASK 0x00000080
+#define MCDE_RISERR_FUC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUC0RIS, __x)
+#define MCDE_RISERR_FUC1RIS_SHIFT 8
+#define MCDE_RISERR_FUC1RIS_MASK 0x00000100
+#define MCDE_RISERR_FUC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUC1RIS, __x)
+#define MCDE_RISERR_OVLFERRRIS_SHIFT 16
+#define MCDE_RISERR_OVLFERRRIS_MASK 0xFFFF0000
+#define MCDE_RISERR_OVLFERRRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, OVLFERRRIS, __x)
+#define MCDE_MISPP 0x00000124
+#define MCDE_MISPP_VCMPAMIS_SHIFT 0
+#define MCDE_MISPP_VCMPAMIS_MASK 0x00000001
+#define MCDE_MISPP_VCMPAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPAMIS, __x)
+#define MCDE_MISPP_VCMPBMIS_SHIFT 1
+#define MCDE_MISPP_VCMPBMIS_MASK 0x00000002
+#define MCDE_MISPP_VCMPBMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPBMIS, __x)
+#define MCDE_MISPP_VSCC0MIS_SHIFT 2
+#define MCDE_MISPP_VSCC0MIS_MASK 0x00000004
+#define MCDE_MISPP_VSCC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VSCC0MIS, __x)
+#define MCDE_MISPP_VSCC1MIS_SHIFT 3
+#define MCDE_MISPP_VSCC1MIS_MASK 0x00000008
+#define MCDE_MISPP_VSCC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VSCC1MIS, __x)
+#define MCDE_MISPP_VCMPC0MIS_SHIFT 4
+#define MCDE_MISPP_VCMPC0MIS_MASK 0x00000010
+#define MCDE_MISPP_VCMPC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPC0MIS, __x)
+#define MCDE_MISPP_VCMPC1MIS_SHIFT 5
+#define MCDE_MISPP_VCMPC1MIS_MASK 0x00000020
+#define MCDE_MISPP_VCMPC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPC1MIS, __x)
+#define MCDE_MISPP_ROTFDMIS_A_SHIFT 6
+#define MCDE_MISPP_ROTFDMIS_A_MASK 0x00000040
+#define MCDE_MISPP_ROTFDMIS_A(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_A, __x)
+#define MCDE_MISPP_ROTFDMIS_B_SHIFT 7
+#define MCDE_MISPP_ROTFDMIS_B_MASK 0x00000080
+#define MCDE_MISPP_ROTFDMIS_B(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_B, __x)
+#define MCDE_MISOVL 0x00000128
+#define MCDE_MISOVL_OVLRDMIS_SHIFT 0
+#define MCDE_MISOVL_OVLRDMIS_MASK 0x0000FFFF
+#define MCDE_MISOVL_OVLRDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISOVL, OVLRDMIS, __x)
+#define MCDE_MISOVL_OVLFDMIS_SHIFT 16
+#define MCDE_MISOVL_OVLFDMIS_MASK 0xFFFF0000
+#define MCDE_MISOVL_OVLFDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISOVL, OVLFDMIS, __x)
+#define MCDE_MISCHNL 0x0000012C
+#define MCDE_MISCHNL_CHNLRDMIS_SHIFT 0
+#define MCDE_MISCHNL_CHNLRDMIS_MASK 0x0000FFFF
+#define MCDE_MISCHNL_CHNLRDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISCHNL, CHNLRDMIS, __x)
+#define MCDE_MISCHNL_CHNLAMIS_SHIFT 16
+#define MCDE_MISCHNL_CHNLAMIS_MASK 0xFFFF0000
+#define MCDE_MISCHNL_CHNLAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISCHNL, CHNLAMIS, __x)
+#define MCDE_MISERR 0x00000130
+#define MCDE_MISERR_FUAMIS_SHIFT 0
+#define MCDE_MISERR_FUAMIS_MASK 0x00000001
+#define MCDE_MISERR_FUAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUAMIS, __x)
+#define MCDE_MISERR_FUBMIS_SHIFT 1
+#define MCDE_MISERR_FUBMIS_MASK 0x00000002
+#define MCDE_MISERR_FUBMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUBMIS, __x)
+#define MCDE_MISERR_SCHBLCKDMIS_SHIFT 2
+#define MCDE_MISERR_SCHBLCKDMIS_MASK 0x00000004
+#define MCDE_MISERR_SCHBLCKDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, SCHBLCKDMIS, __x)
+#define MCDE_MISERR_ROTAFEMIS_WRITE_SHIFT 3
+#define MCDE_MISERR_ROTAFEMIS_WRITE_MASK 0x00000008
+#define MCDE_MISERR_ROTAFEMIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_WRITE, __x)
+#define MCDE_MISERR_ROTAFEMIS_READ_SHIFT 4
+#define MCDE_MISERR_ROTAFEMIS_READ_MASK 0x00000010
+#define MCDE_MISERR_ROTAFEMIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_READ, __x)
+#define MCDE_MISERR_ROTBFEMIS_WRITE_SHIFT 5
+#define MCDE_MISERR_ROTBFEMIS_WRITE_MASK 0x00000020
+#define MCDE_MISERR_ROTBFEMIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_WRITE, __x)
+#define MCDE_MISERR_ROTBFEMIS_READ_SHIFT 6
+#define MCDE_MISERR_ROTBFEMIS_READ_MASK 0x00000040
+#define MCDE_MISERR_ROTBFEMIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_READ, __x)
+#define MCDE_MISERR_FUC0MIS_SHIFT 7
+#define MCDE_MISERR_FUC0MIS_MASK 0x00000080
+#define MCDE_MISERR_FUC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUC0MIS, __x)
+#define MCDE_MISERR_FUC1MIS_SHIFT 8
+#define MCDE_MISERR_FUC1MIS_MASK 0x00000100
+#define MCDE_MISERR_FUC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUC1MIS, __x)
+#define MCDE_MISERR_OVLFERMIS_SHIFT 16
+#define MCDE_MISERR_OVLFERMIS_MASK 0xFFFF0000
+#define MCDE_MISERR_OVLFERMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, OVLFERMIS, __x)
+#define MCDE_SISPP 0x00000134
+#define MCDE_SISPP_VCMPASIS_SHIFT 0
+#define MCDE_SISPP_VCMPASIS_MASK 0x00000001
+#define MCDE_SISPP_VCMPASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPASIS, __x)
+#define MCDE_SISPP_VCMPBSIS_SHIFT 1
+#define MCDE_SISPP_VCMPBSIS_MASK 0x00000002
+#define MCDE_SISPP_VCMPBSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPBSIS, __x)
+#define MCDE_SISPP_VSCC0SIS_SHIFT 2
+#define MCDE_SISPP_VSCC0SIS_MASK 0x00000004
+#define MCDE_SISPP_VSCC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VSCC0SIS, __x)
+#define MCDE_SISPP_VSCC1SIS_SHIFT 3
+#define MCDE_SISPP_VSCC1SIS_MASK 0x00000008
+#define MCDE_SISPP_VSCC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VSCC1SIS, __x)
+#define MCDE_SISPP_VCMPC0SIS_SHIFT 4
+#define MCDE_SISPP_VCMPC0SIS_MASK 0x00000010
+#define MCDE_SISPP_VCMPC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPC0SIS, __x)
+#define MCDE_SISPP_VCMPC1SIS_SHIFT 5
+#define MCDE_SISPP_VCMPC1SIS_MASK 0x00000020
+#define MCDE_SISPP_VCMPC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPC1SIS, __x)
+#define MCDE_SISPP_ROTFDSIS_A_SHIFT 6
+#define MCDE_SISPP_ROTFDSIS_A_MASK 0x00000040
+#define MCDE_SISPP_ROTFDSIS_A(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_A, __x)
+#define MCDE_SISPP_ROTFDSIS_B_SHIFT 7
+#define MCDE_SISPP_ROTFDSIS_B_MASK 0x00000080
+#define MCDE_SISPP_ROTFDSIS_B(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_B, __x)
+#define MCDE_SISOVL 0x00000138
+#define MCDE_SISOVL_OVLRDSIS_SHIFT 0
+#define MCDE_SISOVL_OVLRDSIS_MASK 0x0000FFFF
+#define MCDE_SISOVL_OVLRDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISOVL, OVLRDSIS, __x)
+#define MCDE_SISOVL_OVLFDSIS_SHIFT 16
+#define MCDE_SISOVL_OVLFDSIS_MASK 0xFFFF0000
+#define MCDE_SISOVL_OVLFDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISOVL, OVLFDSIS, __x)
+#define MCDE_SISCHNL 0x0000013C
+#define MCDE_SISCHNL_CHNLRDSIS_SHIFT 0
+#define MCDE_SISCHNL_CHNLRDSIS_MASK 0x0000FFFF
+#define MCDE_SISCHNL_CHNLRDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISCHNL, CHNLRDSIS, __x)
+#define MCDE_SISCHNL_CHNLASIS_SHIFT 16
+#define MCDE_SISCHNL_CHNLASIS_MASK 0xFFFF0000
+#define MCDE_SISCHNL_CHNLASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISCHNL, CHNLASIS, __x)
+#define MCDE_SISERR 0x00000140
+#define MCDE_SISERR_FUASIS_SHIFT 0
+#define MCDE_SISERR_FUASIS_MASK 0x00000001
+#define MCDE_SISERR_FUASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUASIS, __x)
+#define MCDE_SISERR_FUBSIS_SHIFT 1
+#define MCDE_SISERR_FUBSIS_MASK 0x00000002
+#define MCDE_SISERR_FUBSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUBSIS, __x)
+#define MCDE_SISERR_SCHBLCKDSIS_SHIFT 2
+#define MCDE_SISERR_SCHBLCKDSIS_MASK 0x00000004
+#define MCDE_SISERR_SCHBLCKDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, SCHBLCKDSIS, __x)
+#define MCDE_SISERR_ROTAFESIS_WRITE_SHIFT 3
+#define MCDE_SISERR_ROTAFESIS_WRITE_MASK 0x00000008
+#define MCDE_SISERR_ROTAFESIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_WRITE, __x)
+#define MCDE_SISERR_ROTAFESIS_READ_SHIFT 4
+#define MCDE_SISERR_ROTAFESIS_READ_MASK 0x00000010
+#define MCDE_SISERR_ROTAFESIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_READ, __x)
+#define MCDE_SISERR_ROTBFESIS_WRITE_SHIFT 5
+#define MCDE_SISERR_ROTBFESIS_WRITE_MASK 0x00000020
+#define MCDE_SISERR_ROTBFESIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_WRITE, __x)
+#define MCDE_SISERR_ROTBFESIS_READ_SHIFT 6
+#define MCDE_SISERR_ROTBFESIS_READ_MASK 0x00000040
+#define MCDE_SISERR_ROTBFESIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_READ, __x)
+#define MCDE_SISERR_FUC0SIS_SHIFT 7
+#define MCDE_SISERR_FUC0SIS_MASK 0x00000080
+#define MCDE_SISERR_FUC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUC0SIS, __x)
+#define MCDE_SISERR_FUC1SIS_SHIFT 8
+#define MCDE_SISERR_FUC1SIS_MASK 0x00000100
+#define MCDE_SISERR_FUC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUC1SIS, __x)
+#define MCDE_SISERR_OVLFERSIS_SHIFT 16
+#define MCDE_SISERR_OVLFERSIS_MASK 0xFFFF0000
+#define MCDE_SISERR_OVLFERSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, OVLFERSIS, __x)
+#define MCDE_PID 0x000001FC
+#define MCDE_PID_METALFIX_VERSION_SHIFT 0
+#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF
+#define MCDE_PID_METALFIX_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, METALFIX_VERSION, __x)
+#define MCDE_PID_DEVELOPMENT_VERSION_SHIFT 8
+#define MCDE_PID_DEVELOPMENT_VERSION_MASK 0x0000FF00
+#define MCDE_PID_DEVELOPMENT_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, DEVELOPMENT_VERSION, __x)
+#define MCDE_PID_MINOR_VERSION_SHIFT 16
+#define MCDE_PID_MINOR_VERSION_MASK 0x00FF0000
+#define MCDE_PID_MINOR_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, MINOR_VERSION, __x)
+#define MCDE_PID_MAJOR_VERSION_SHIFT 24
+#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000
+#define MCDE_PID_MAJOR_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, MAJOR_VERSION, __x)
+#define MCDE_EXTSRC0A0 0x00000200
+#define MCDE_EXTSRC0A0_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC0A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC0A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC1A0 0x00000220
+#define MCDE_EXTSRC1A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC1A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC1A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC2A0 0x00000240
+#define MCDE_EXTSRC2A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC2A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC2A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC3A0 0x00000260
+#define MCDE_EXTSRC3A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC3A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC3A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC4A0 0x00000280
+#define MCDE_EXTSRC4A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC4A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC4A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC5A0 0x000002A0
+#define MCDE_EXTSRC5A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC5A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC5A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC6A0 0x000002C0
+#define MCDE_EXTSRC6A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC6A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC7A0 0x000002E0
+#define MCDE_EXTSRC7A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC7A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC7A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC8A0 0x00000300
+#define MCDE_EXTSRC8A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC8A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC8A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC9A0 0x00000320
+#define MCDE_EXTSRC9A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC9A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC9A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC0A1 0x00000204
+#define MCDE_EXTSRC0A1_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC0A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC0A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC1A1 0x00000224
+#define MCDE_EXTSRC1A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC1A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC1A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC2A1 0x00000244
+#define MCDE_EXTSRC2A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC2A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC2A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC3A1 0x00000264
+#define MCDE_EXTSRC3A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC3A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC3A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC4A1 0x00000284
+#define MCDE_EXTSRC4A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC4A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC4A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC5A1 0x000002A4
+#define MCDE_EXTSRC5A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC5A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC5A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC6A1 0x000002C4
+#define MCDE_EXTSRC6A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC6A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC7A1 0x000002E4
+#define MCDE_EXTSRC7A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC7A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC7A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC8A1 0x00000304
+#define MCDE_EXTSRC8A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC8A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC8A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC9A1 0x00000324
+#define MCDE_EXTSRC9A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC9A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC9A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC6A2 0x000002C8
+#define MCDE_EXTSRC6A2_BASEADDRESS2_SHIFT 3
+#define MCDE_EXTSRC6A2_BASEADDRESS2_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A2_BASEADDRESS2(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A2, BASEADDRESS2, __x)
+#define MCDE_EXTSRC0CONF 0x0000020C
+#define MCDE_EXTSRC0CONF_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC0CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC0CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_ID, __x)
+#define MCDE_EXTSRC0CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC0CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC0CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_NB, __x)
+#define MCDE_EXTSRC0CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC0CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC0CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC0CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC0CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC0CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC0CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC0CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC0CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC0CONF_BPP_RGB444 4
+#define MCDE_EXTSRC0CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC0CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC0CONF_BPP_RGB565 7
+#define MCDE_EXTSRC0CONF_BPP_RGB888 8
+#define MCDE_EXTSRC0CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC0CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC0CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC0CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, MCDE_EXTSRC0CONF_BPP_##__x)
+#define MCDE_EXTSRC0CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, __x)
+#define MCDE_EXTSRC0CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC0CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC0CONF_BGR_RGB 0
+#define MCDE_EXTSRC0CONF_BGR_BGR 1
+#define MCDE_EXTSRC0CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, MCDE_EXTSRC0CONF_BGR_##__x)
+#define MCDE_EXTSRC0CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, __x)
+#define MCDE_EXTSRC0CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC0CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC0CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC0CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC0CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, MCDE_EXTSRC0CONF_BEBO_##__x)
+#define MCDE_EXTSRC0CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, __x)
+#define MCDE_EXTSRC0CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC0CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC0CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC0CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC0CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, MCDE_EXTSRC0CONF_BEPO_##__x)
+#define MCDE_EXTSRC0CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, __x)
+#define MCDE_EXTSRC1CONF 0x0000022C
+#define MCDE_EXTSRC1CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC1CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC1CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_ID, __x)
+#define MCDE_EXTSRC1CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC1CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC1CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_NB, __x)
+#define MCDE_EXTSRC1CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC1CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC1CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC1CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC1CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC1CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC1CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC1CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC1CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC1CONF_BPP_RGB444 4
+#define MCDE_EXTSRC1CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC1CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC1CONF_BPP_RGB565 7
+#define MCDE_EXTSRC1CONF_BPP_RGB888 8
+#define MCDE_EXTSRC1CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC1CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC1CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC1CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, MCDE_EXTSRC1CONF_BPP_##__x)
+#define MCDE_EXTSRC1CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, __x)
+#define MCDE_EXTSRC1CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC1CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC1CONF_BGR_RGB 0
+#define MCDE_EXTSRC1CONF_BGR_BGR 1
+#define MCDE_EXTSRC1CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, MCDE_EXTSRC1CONF_BGR_##__x)
+#define MCDE_EXTSRC1CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, __x)
+#define MCDE_EXTSRC1CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC1CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC1CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC1CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC1CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, MCDE_EXTSRC1CONF_BEBO_##__x)
+#define MCDE_EXTSRC1CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, __x)
+#define MCDE_EXTSRC1CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC1CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC1CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC1CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC1CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, MCDE_EXTSRC1CONF_BEPO_##__x)
+#define MCDE_EXTSRC1CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, __x)
+#define MCDE_EXTSRC2CONF 0x0000024C
+#define MCDE_EXTSRC2CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC2CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC2CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_ID, __x)
+#define MCDE_EXTSRC2CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC2CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC2CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_NB, __x)
+#define MCDE_EXTSRC2CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC2CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC2CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC2CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC2CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC2CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC2CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC2CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC2CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC2CONF_BPP_RGB444 4
+#define MCDE_EXTSRC2CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC2CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC2CONF_BPP_RGB565 7
+#define MCDE_EXTSRC2CONF_BPP_RGB888 8
+#define MCDE_EXTSRC2CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC2CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC2CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC2CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, MCDE_EXTSRC2CONF_BPP_##__x)
+#define MCDE_EXTSRC2CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, __x)
+#define MCDE_EXTSRC2CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC2CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC2CONF_BGR_RGB 0
+#define MCDE_EXTSRC2CONF_BGR_BGR 1
+#define MCDE_EXTSRC2CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, MCDE_EXTSRC2CONF_BGR_##__x)
+#define MCDE_EXTSRC2CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, __x)
+#define MCDE_EXTSRC2CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC2CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC2CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC2CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC2CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, MCDE_EXTSRC2CONF_BEBO_##__x)
+#define MCDE_EXTSRC2CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, __x)
+#define MCDE_EXTSRC2CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC2CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC2CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC2CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC2CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, MCDE_EXTSRC2CONF_BEPO_##__x)
+#define MCDE_EXTSRC2CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, __x)
+#define MCDE_EXTSRC3CONF 0x0000026C
+#define MCDE_EXTSRC3CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC3CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC3CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_ID, __x)
+#define MCDE_EXTSRC3CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC3CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC3CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_NB, __x)
+#define MCDE_EXTSRC3CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC3CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC3CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC3CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC3CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC3CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC3CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC3CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC3CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC3CONF_BPP_RGB444 4
+#define MCDE_EXTSRC3CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC3CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC3CONF_BPP_RGB565 7
+#define MCDE_EXTSRC3CONF_BPP_RGB888 8
+#define MCDE_EXTSRC3CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC3CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC3CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC3CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, MCDE_EXTSRC3CONF_BPP_##__x)
+#define MCDE_EXTSRC3CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, __x)
+#define MCDE_EXTSRC3CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC3CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC3CONF_BGR_RGB 0
+#define MCDE_EXTSRC3CONF_BGR_BGR 1
+#define MCDE_EXTSRC3CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, MCDE_EXTSRC3CONF_BGR_##__x)
+#define MCDE_EXTSRC3CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, __x)
+#define MCDE_EXTSRC3CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC3CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC3CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC3CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC3CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, MCDE_EXTSRC3CONF_BEBO_##__x)
+#define MCDE_EXTSRC3CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, __x)
+#define MCDE_EXTSRC3CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC3CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC3CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC3CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC3CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, MCDE_EXTSRC3CONF_BEPO_##__x)
+#define MCDE_EXTSRC3CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, __x)
+#define MCDE_EXTSRC4CONF 0x0000028C
+#define MCDE_EXTSRC4CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC4CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC4CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_ID, __x)
+#define MCDE_EXTSRC4CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC4CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC4CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_NB, __x)
+#define MCDE_EXTSRC4CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC4CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC4CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC4CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC4CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC4CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC4CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC4CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC4CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC4CONF_BPP_RGB444 4
+#define MCDE_EXTSRC4CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC4CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC4CONF_BPP_RGB565 7
+#define MCDE_EXTSRC4CONF_BPP_RGB888 8
+#define MCDE_EXTSRC4CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC4CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC4CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC4CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, MCDE_EXTSRC4CONF_BPP_##__x)
+#define MCDE_EXTSRC4CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, __x)
+#define MCDE_EXTSRC4CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC4CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC4CONF_BGR_RGB 0
+#define MCDE_EXTSRC4CONF_BGR_BGR 1
+#define MCDE_EXTSRC4CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, MCDE_EXTSRC4CONF_BGR_##__x)
+#define MCDE_EXTSRC4CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, __x)
+#define MCDE_EXTSRC4CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC4CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC4CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC4CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC4CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, MCDE_EXTSRC4CONF_BEBO_##__x)
+#define MCDE_EXTSRC4CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, __x)
+#define MCDE_EXTSRC4CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC4CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC4CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC4CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC4CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, MCDE_EXTSRC4CONF_BEPO_##__x)
+#define MCDE_EXTSRC4CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, __x)
+#define MCDE_EXTSRC5CONF 0x000002AC
+#define MCDE_EXTSRC5CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC5CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC5CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_ID, __x)
+#define MCDE_EXTSRC5CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC5CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC5CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_NB, __x)
+#define MCDE_EXTSRC5CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC5CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC5CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC5CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC5CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC5CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC5CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC5CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC5CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC5CONF_BPP_RGB444 4
+#define MCDE_EXTSRC5CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC5CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC5CONF_BPP_RGB565 7
+#define MCDE_EXTSRC5CONF_BPP_RGB888 8
+#define MCDE_EXTSRC5CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC5CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC5CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC5CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, MCDE_EXTSRC5CONF_BPP_##__x)
+#define MCDE_EXTSRC5CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, __x)
+#define MCDE_EXTSRC5CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC5CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC5CONF_BGR_RGB 0
+#define MCDE_EXTSRC5CONF_BGR_BGR 1
+#define MCDE_EXTSRC5CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, MCDE_EXTSRC5CONF_BGR_##__x)
+#define MCDE_EXTSRC5CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, __x)
+#define MCDE_EXTSRC5CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC5CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC5CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC5CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC5CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, MCDE_EXTSRC5CONF_BEBO_##__x)
+#define MCDE_EXTSRC5CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, __x)
+#define MCDE_EXTSRC5CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC5CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC5CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC5CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC5CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, MCDE_EXTSRC5CONF_BEPO_##__x)
+#define MCDE_EXTSRC5CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, __x)
+#define MCDE_EXTSRC6CONF 0x000002CC
+#define MCDE_EXTSRC6CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC6CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC6CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_ID, __x)
+#define MCDE_EXTSRC6CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC6CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC6CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_NB, __x)
+#define MCDE_EXTSRC6CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC6CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC6CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC6CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC6CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC6CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC6CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC6CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC6CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC6CONF_BPP_RGB444 4
+#define MCDE_EXTSRC6CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC6CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC6CONF_BPP_RGB565 7
+#define MCDE_EXTSRC6CONF_BPP_RGB888 8
+#define MCDE_EXTSRC6CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC6CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC6CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC6CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, MCDE_EXTSRC6CONF_BPP_##__x)
+#define MCDE_EXTSRC6CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, __x)
+#define MCDE_EXTSRC6CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC6CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC6CONF_BGR_RGB 0
+#define MCDE_EXTSRC6CONF_BGR_BGR 1
+#define MCDE_EXTSRC6CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, MCDE_EXTSRC6CONF_BGR_##__x)
+#define MCDE_EXTSRC6CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, __x)
+#define MCDE_EXTSRC6CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC6CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC6CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC6CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC6CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, MCDE_EXTSRC6CONF_BEBO_##__x)
+#define MCDE_EXTSRC6CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, __x)
+#define MCDE_EXTSRC6CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC6CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC6CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC6CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC6CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, MCDE_EXTSRC6CONF_BEPO_##__x)
+#define MCDE_EXTSRC6CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, __x)
+#define MCDE_EXTSRC7CONF 0x000002EC
+#define MCDE_EXTSRC7CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC7CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC7CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_ID, __x)
+#define MCDE_EXTSRC7CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC7CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC7CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_NB, __x)
+#define MCDE_EXTSRC7CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC7CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC7CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC7CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC7CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC7CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC7CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC7CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC7CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC7CONF_BPP_RGB444 4
+#define MCDE_EXTSRC7CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC7CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC7CONF_BPP_RGB565 7
+#define MCDE_EXTSRC7CONF_BPP_RGB888 8
+#define MCDE_EXTSRC7CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC7CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC7CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC7CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, MCDE_EXTSRC7CONF_BPP_##__x)
+#define MCDE_EXTSRC7CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, __x)
+#define MCDE_EXTSRC7CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC7CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC7CONF_BGR_RGB 0
+#define MCDE_EXTSRC7CONF_BGR_BGR 1
+#define MCDE_EXTSRC7CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, MCDE_EXTSRC7CONF_BGR_##__x)
+#define MCDE_EXTSRC7CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, __x)
+#define MCDE_EXTSRC7CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC7CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC7CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC7CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC7CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, MCDE_EXTSRC7CONF_BEBO_##__x)
+#define MCDE_EXTSRC7CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, __x)
+#define MCDE_EXTSRC7CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC7CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC7CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC7CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC7CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, MCDE_EXTSRC7CONF_BEPO_##__x)
+#define MCDE_EXTSRC7CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, __x)
+#define MCDE_EXTSRC8CONF 0x0000030C
+#define MCDE_EXTSRC8CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC8CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC8CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_ID, __x)
+#define MCDE_EXTSRC8CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC8CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC8CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_NB, __x)
+#define MCDE_EXTSRC8CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC8CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC8CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC8CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC8CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC8CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC8CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC8CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC8CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC8CONF_BPP_RGB444 4
+#define MCDE_EXTSRC8CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC8CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC8CONF_BPP_RGB565 7
+#define MCDE_EXTSRC8CONF_BPP_RGB888 8
+#define MCDE_EXTSRC8CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC8CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC8CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC8CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, MCDE_EXTSRC8CONF_BPP_##__x)
+#define MCDE_EXTSRC8CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, __x)
+#define MCDE_EXTSRC8CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC8CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC8CONF_BGR_RGB 0
+#define MCDE_EXTSRC8CONF_BGR_BGR 1
+#define MCDE_EXTSRC8CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, MCDE_EXTSRC8CONF_BGR_##__x)
+#define MCDE_EXTSRC8CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, __x)
+#define MCDE_EXTSRC8CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC8CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC8CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC8CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC8CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, MCDE_EXTSRC8CONF_BEBO_##__x)
+#define MCDE_EXTSRC8CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, __x)
+#define MCDE_EXTSRC8CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC8CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC8CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC8CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC8CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, MCDE_EXTSRC8CONF_BEPO_##__x)
+#define MCDE_EXTSRC8CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, __x)
+#define MCDE_EXTSRC9CONF 0x0000032C
+#define MCDE_EXTSRC9CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC9CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC9CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_ID, __x)
+#define MCDE_EXTSRC9CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC9CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC9CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_NB, __x)
+#define MCDE_EXTSRC9CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC9CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC9CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC9CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC9CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC9CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC9CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC9CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC9CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC9CONF_BPP_RGB444 4
+#define MCDE_EXTSRC9CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC9CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC9CONF_BPP_RGB565 7
+#define MCDE_EXTSRC9CONF_BPP_RGB888 8
+#define MCDE_EXTSRC9CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC9CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC9CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC9CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, MCDE_EXTSRC9CONF_BPP_##__x)
+#define MCDE_EXTSRC9CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, __x)
+#define MCDE_EXTSRC9CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC9CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC9CONF_BGR_RGB 0
+#define MCDE_EXTSRC9CONF_BGR_BGR 1
+#define MCDE_EXTSRC9CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, MCDE_EXTSRC9CONF_BGR_##__x)
+#define MCDE_EXTSRC9CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, __x)
+#define MCDE_EXTSRC9CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC9CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC9CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC9CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC9CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, MCDE_EXTSRC9CONF_BEBO_##__x)
+#define MCDE_EXTSRC9CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, __x)
+#define MCDE_EXTSRC9CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC9CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC9CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC9CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC9CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, MCDE_EXTSRC9CONF_BEPO_##__x)
+#define MCDE_EXTSRC9CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, __x)
+#define MCDE_EXTSRC0CR 0x00000210
+#define MCDE_EXTSRC0CR_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC0CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC0CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC0CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, MCDE_EXTSRC0CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC0CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, __x)
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC0CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC1CR 0x00000230
+#define MCDE_EXTSRC1CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC1CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC1CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC1CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC1CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC1CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, MCDE_EXTSRC1CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC1CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, __x)
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC1CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC2CR 0x00000250
+#define MCDE_EXTSRC2CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC2CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC2CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC2CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC2CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC2CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, MCDE_EXTSRC2CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC2CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, __x)
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC2CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC3CR 0x00000270
+#define MCDE_EXTSRC3CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC3CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC3CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC3CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC3CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC3CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, MCDE_EXTSRC3CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC3CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, __x)
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC3CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC4CR 0x00000290
+#define MCDE_EXTSRC4CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC4CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC4CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC4CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC4CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC4CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, MCDE_EXTSRC4CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC4CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, __x)
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC4CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC5CR 0x000002B0
+#define MCDE_EXTSRC5CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC5CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC5CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC5CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC5CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC5CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, MCDE_EXTSRC5CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC5CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, __x)
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC5CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC6CR 0x000002D0
+#define MCDE_EXTSRC6CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC6CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC6CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC6CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC6CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC6CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, MCDE_EXTSRC6CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC6CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, __x)
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC6CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC7CR 0x000002F0
+#define MCDE_EXTSRC7CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC7CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC7CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC7CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC7CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC7CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, MCDE_EXTSRC7CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC7CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, __x)
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC7CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC8CR 0x00000310
+#define MCDE_EXTSRC8CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC8CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC8CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC8CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC8CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC8CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, MCDE_EXTSRC8CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC8CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, __x)
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC8CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC9CR 0x00000330
+#define MCDE_EXTSRC9CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC9CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC9CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC9CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC9CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC9CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, MCDE_EXTSRC9CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC9CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, __x)
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC9CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, FORCE_FS_DIV, __x)
+#define MCDE_OVL0CR 0x00000400
+#define MCDE_OVL0CR_GROUPOFFSET 0x20
+#define MCDE_OVL0CR_OVLEN_SHIFT 0
+#define MCDE_OVL0CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL0CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLEN, __x)
+#define MCDE_OVL0CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL0CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL0CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL0CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL0CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, MCDE_OVL0CR_COLCCTRL_##__x)
+#define MCDE_OVL0CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, __x)
+#define MCDE_OVL0CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL0CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL0CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, CKEYGEN, __x)
+#define MCDE_OVL0CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL0CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL0CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ALPHAPMEN, __x)
+#define MCDE_OVL0CR_OVLF_SHIFT 5
+#define MCDE_OVL0CR_OVLF_MASK 0x00000020
+#define MCDE_OVL0CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLF, __x)
+#define MCDE_OVL0CR_OVLR_SHIFT 6
+#define MCDE_OVL0CR_OVLR_MASK 0x00000040
+#define MCDE_OVL0CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLR, __x)
+#define MCDE_OVL0CR_OVLB_SHIFT 7
+#define MCDE_OVL0CR_OVLB_MASK 0x00000080
+#define MCDE_OVL0CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLB, __x)
+#define MCDE_OVL0CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL0CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL0CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, FETCH_ROPC, __x)
+#define MCDE_OVL0CR_STBPRIO_SHIFT 16
+#define MCDE_OVL0CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL0CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, STBPRIO, __x)
+#define MCDE_OVL0CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL0CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL0CR_BURSTSIZE_1W 0
+#define MCDE_OVL0CR_BURSTSIZE_2W 1
+#define MCDE_OVL0CR_BURSTSIZE_4W 2
+#define MCDE_OVL0CR_BURSTSIZE_8W 3
+#define MCDE_OVL0CR_BURSTSIZE_16W 4
+#define MCDE_OVL0CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL0CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL0CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL0CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL0CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL0CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, MCDE_OVL0CR_BURSTSIZE_##__x)
+#define MCDE_OVL0CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, __x)
+#define MCDE_OVL0CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL0CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL0CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL0CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL0CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL0CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL0CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL0CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, \
+ MCDE_OVL0CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL0CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL0CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL0CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL0CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL0CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL0CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL0CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL0CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL0CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, MCDE_OVL0CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL0CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL1CR 0x00000420
+#define MCDE_OVL1CR_OVLEN_SHIFT 0
+#define MCDE_OVL1CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL1CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLEN, __x)
+#define MCDE_OVL1CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL1CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL1CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL1CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL1CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL1CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, MCDE_OVL1CR_COLCCTRL_##__x)
+#define MCDE_OVL1CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, __x)
+#define MCDE_OVL1CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL1CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL1CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, CKEYGEN, __x)
+#define MCDE_OVL1CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL1CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL1CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ALPHAPMEN, __x)
+#define MCDE_OVL1CR_OVLF_SHIFT 5
+#define MCDE_OVL1CR_OVLF_MASK 0x00000020
+#define MCDE_OVL1CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLF, __x)
+#define MCDE_OVL1CR_OVLR_SHIFT 6
+#define MCDE_OVL1CR_OVLR_MASK 0x00000040
+#define MCDE_OVL1CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLR, __x)
+#define MCDE_OVL1CR_OVLB_SHIFT 7
+#define MCDE_OVL1CR_OVLB_MASK 0x00000080
+#define MCDE_OVL1CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLB, __x)
+#define MCDE_OVL1CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL1CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL1CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, FETCH_ROPC, __x)
+#define MCDE_OVL1CR_STBPRIO_SHIFT 16
+#define MCDE_OVL1CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL1CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, STBPRIO, __x)
+#define MCDE_OVL1CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL1CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL1CR_BURSTSIZE_1W 0
+#define MCDE_OVL1CR_BURSTSIZE_2W 1
+#define MCDE_OVL1CR_BURSTSIZE_4W 2
+#define MCDE_OVL1CR_BURSTSIZE_8W 3
+#define MCDE_OVL1CR_BURSTSIZE_16W 4
+#define MCDE_OVL1CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL1CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL1CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL1CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL1CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL1CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, MCDE_OVL1CR_BURSTSIZE_##__x)
+#define MCDE_OVL1CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, __x)
+#define MCDE_OVL1CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL1CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL1CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL1CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL1CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL1CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL1CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL1CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, \
+ MCDE_OVL1CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL1CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL1CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL1CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL1CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL1CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL1CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL1CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL1CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL1CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, MCDE_OVL1CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL1CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL2CR 0x00000440
+#define MCDE_OVL2CR_OVLEN_SHIFT 0
+#define MCDE_OVL2CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL2CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLEN, __x)
+#define MCDE_OVL2CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL2CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL2CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL2CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL2CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL2CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, MCDE_OVL2CR_COLCCTRL_##__x)
+#define MCDE_OVL2CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, __x)
+#define MCDE_OVL2CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL2CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL2CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, CKEYGEN, __x)
+#define MCDE_OVL2CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL2CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL2CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ALPHAPMEN, __x)
+#define MCDE_OVL2CR_OVLF_SHIFT 5
+#define MCDE_OVL2CR_OVLF_MASK 0x00000020
+#define MCDE_OVL2CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLF, __x)
+#define MCDE_OVL2CR_OVLR_SHIFT 6
+#define MCDE_OVL2CR_OVLR_MASK 0x00000040
+#define MCDE_OVL2CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLR, __x)
+#define MCDE_OVL2CR_OVLB_SHIFT 7
+#define MCDE_OVL2CR_OVLB_MASK 0x00000080
+#define MCDE_OVL2CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLB, __x)
+#define MCDE_OVL2CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL2CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL2CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, FETCH_ROPC, __x)
+#define MCDE_OVL2CR_STBPRIO_SHIFT 16
+#define MCDE_OVL2CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL2CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, STBPRIO, __x)
+#define MCDE_OVL2CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL2CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL2CR_BURSTSIZE_1W 0
+#define MCDE_OVL2CR_BURSTSIZE_2W 1
+#define MCDE_OVL2CR_BURSTSIZE_4W 2
+#define MCDE_OVL2CR_BURSTSIZE_8W 3
+#define MCDE_OVL2CR_BURSTSIZE_16W 4
+#define MCDE_OVL2CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL2CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL2CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL2CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL2CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL2CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, MCDE_OVL2CR_BURSTSIZE_##__x)
+#define MCDE_OVL2CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, __x)
+#define MCDE_OVL2CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL2CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL2CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL2CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL2CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL2CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL2CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL2CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, \
+ MCDE_OVL2CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL2CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL2CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL2CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL2CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL2CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL2CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL2CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL2CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL2CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, MCDE_OVL2CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL2CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL3CR 0x00000460
+#define MCDE_OVL3CR_OVLEN_SHIFT 0
+#define MCDE_OVL3CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL3CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLEN, __x)
+#define MCDE_OVL3CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL3CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL3CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL3CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL3CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL3CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, MCDE_OVL3CR_COLCCTRL_##__x)
+#define MCDE_OVL3CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, __x)
+#define MCDE_OVL3CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL3CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL3CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, CKEYGEN, __x)
+#define MCDE_OVL3CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL3CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL3CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ALPHAPMEN, __x)
+#define MCDE_OVL3CR_OVLF_SHIFT 5
+#define MCDE_OVL3CR_OVLF_MASK 0x00000020
+#define MCDE_OVL3CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLF, __x)
+#define MCDE_OVL3CR_OVLR_SHIFT 6
+#define MCDE_OVL3CR_OVLR_MASK 0x00000040
+#define MCDE_OVL3CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLR, __x)
+#define MCDE_OVL3CR_OVLB_SHIFT 7
+#define MCDE_OVL3CR_OVLB_MASK 0x00000080
+#define MCDE_OVL3CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLB, __x)
+#define MCDE_OVL3CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL3CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL3CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, FETCH_ROPC, __x)
+#define MCDE_OVL3CR_STBPRIO_SHIFT 16
+#define MCDE_OVL3CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL3CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, STBPRIO, __x)
+#define MCDE_OVL3CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL3CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL3CR_BURSTSIZE_1W 0
+#define MCDE_OVL3CR_BURSTSIZE_2W 1
+#define MCDE_OVL3CR_BURSTSIZE_4W 2
+#define MCDE_OVL3CR_BURSTSIZE_8W 3
+#define MCDE_OVL3CR_BURSTSIZE_16W 4
+#define MCDE_OVL3CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL3CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL3CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL3CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL3CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL3CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, MCDE_OVL3CR_BURSTSIZE_##__x)
+#define MCDE_OVL3CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, __x)
+#define MCDE_OVL3CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL3CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL3CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL3CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL3CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL3CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL3CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL3CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, \
+ MCDE_OVL3CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL3CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL3CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL3CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL3CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL3CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL3CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL3CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL3CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL3CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, MCDE_OVL3CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL3CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL4CR 0x00000480
+#define MCDE_OVL4CR_OVLEN_SHIFT 0
+#define MCDE_OVL4CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL4CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLEN, __x)
+#define MCDE_OVL4CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL4CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL4CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL4CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL4CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL4CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, MCDE_OVL4CR_COLCCTRL_##__x)
+#define MCDE_OVL4CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, __x)
+#define MCDE_OVL4CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL4CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL4CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, CKEYGEN, __x)
+#define MCDE_OVL4CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL4CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL4CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ALPHAPMEN, __x)
+#define MCDE_OVL4CR_OVLF_SHIFT 5
+#define MCDE_OVL4CR_OVLF_MASK 0x00000020
+#define MCDE_OVL4CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLF, __x)
+#define MCDE_OVL4CR_OVLR_SHIFT 6
+#define MCDE_OVL4CR_OVLR_MASK 0x00000040
+#define MCDE_OVL4CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLR, __x)
+#define MCDE_OVL4CR_OVLB_SHIFT 7
+#define MCDE_OVL4CR_OVLB_MASK 0x00000080
+#define MCDE_OVL4CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLB, __x)
+#define MCDE_OVL4CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL4CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL4CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, FETCH_ROPC, __x)
+#define MCDE_OVL4CR_STBPRIO_SHIFT 16
+#define MCDE_OVL4CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL4CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, STBPRIO, __x)
+#define MCDE_OVL4CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL4CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL4CR_BURSTSIZE_1W 0
+#define MCDE_OVL4CR_BURSTSIZE_2W 1
+#define MCDE_OVL4CR_BURSTSIZE_4W 2
+#define MCDE_OVL4CR_BURSTSIZE_8W 3
+#define MCDE_OVL4CR_BURSTSIZE_16W 4
+#define MCDE_OVL4CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL4CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL4CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL4CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL4CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL4CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, MCDE_OVL4CR_BURSTSIZE_##__x)
+#define MCDE_OVL4CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, __x)
+#define MCDE_OVL4CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL4CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL4CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL4CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL4CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL4CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL4CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL4CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, \
+ MCDE_OVL4CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL4CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL4CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL4CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL4CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL4CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL4CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL4CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL4CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL4CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, MCDE_OVL4CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL4CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL5CR 0x000004A0
+#define MCDE_OVL5CR_OVLEN_SHIFT 0
+#define MCDE_OVL5CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL5CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLEN, __x)
+#define MCDE_OVL5CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL5CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL5CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL5CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL5CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL5CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, MCDE_OVL5CR_COLCCTRL_##__x)
+#define MCDE_OVL5CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, __x)
+#define MCDE_OVL5CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL5CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL5CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, CKEYGEN, __x)
+#define MCDE_OVL5CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL5CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL5CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ALPHAPMEN, __x)
+#define MCDE_OVL5CR_OVLF_SHIFT 5
+#define MCDE_OVL5CR_OVLF_MASK 0x00000020
+#define MCDE_OVL5CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLF, __x)
+#define MCDE_OVL5CR_OVLR_SHIFT 6
+#define MCDE_OVL5CR_OVLR_MASK 0x00000040
+#define MCDE_OVL5CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLR, __x)
+#define MCDE_OVL5CR_OVLB_SHIFT 7
+#define MCDE_OVL5CR_OVLB_MASK 0x00000080
+#define MCDE_OVL5CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLB, __x)
+#define MCDE_OVL5CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL5CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL5CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, FETCH_ROPC, __x)
+#define MCDE_OVL5CR_STBPRIO_SHIFT 16
+#define MCDE_OVL5CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL5CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, STBPRIO, __x)
+#define MCDE_OVL5CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL5CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL5CR_BURSTSIZE_1W 0
+#define MCDE_OVL5CR_BURSTSIZE_2W 1
+#define MCDE_OVL5CR_BURSTSIZE_4W 2
+#define MCDE_OVL5CR_BURSTSIZE_8W 3
+#define MCDE_OVL5CR_BURSTSIZE_16W 4
+#define MCDE_OVL5CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL5CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL5CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL5CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL5CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL5CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, MCDE_OVL5CR_BURSTSIZE_##__x)
+#define MCDE_OVL5CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, __x)
+#define MCDE_OVL5CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL5CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL5CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL5CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL5CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL5CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL5CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL5CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, \
+ MCDE_OVL5CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL5CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL5CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL5CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL5CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL5CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL5CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL5CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL5CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL5CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, MCDE_OVL5CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL5CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL0CONF 0x00000404
+#define MCDE_OVL0CONF_GROUPOFFSET 0x20
+#define MCDE_OVL0CONF_PPL_SHIFT 0
+#define MCDE_OVL0CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL0CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, PPL, __x)
+#define MCDE_OVL0CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL0CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL0CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, EXTSRC_ID, __x)
+#define MCDE_OVL0CONF_LPF_SHIFT 16
+#define MCDE_OVL0CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL0CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, LPF, __x)
+#define MCDE_OVL1CONF 0x00000424
+#define MCDE_OVL1CONF_PPL_SHIFT 0
+#define MCDE_OVL1CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL1CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, PPL, __x)
+#define MCDE_OVL1CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL1CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL1CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, EXTSRC_ID, __x)
+#define MCDE_OVL1CONF_LPF_SHIFT 16
+#define MCDE_OVL1CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL1CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, LPF, __x)
+#define MCDE_OVL2CONF 0x00000444
+#define MCDE_OVL2CONF_PPL_SHIFT 0
+#define MCDE_OVL2CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL2CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, PPL, __x)
+#define MCDE_OVL2CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL2CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL2CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, EXTSRC_ID, __x)
+#define MCDE_OVL2CONF_LPF_SHIFT 16
+#define MCDE_OVL2CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL2CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, LPF, __x)
+#define MCDE_OVL3CONF 0x00000464
+#define MCDE_OVL3CONF_PPL_SHIFT 0
+#define MCDE_OVL3CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL3CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, PPL, __x)
+#define MCDE_OVL3CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL3CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL3CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, EXTSRC_ID, __x)
+#define MCDE_OVL3CONF_LPF_SHIFT 16
+#define MCDE_OVL3CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL3CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, LPF, __x)
+#define MCDE_OVL4CONF 0x00000484
+#define MCDE_OVL4CONF_PPL_SHIFT 0
+#define MCDE_OVL4CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL4CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, PPL, __x)
+#define MCDE_OVL4CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL4CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL4CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, EXTSRC_ID, __x)
+#define MCDE_OVL4CONF_LPF_SHIFT 16
+#define MCDE_OVL4CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL4CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, LPF, __x)
+#define MCDE_OVL5CONF 0x000004A4
+#define MCDE_OVL5CONF_PPL_SHIFT 0
+#define MCDE_OVL5CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL5CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, PPL, __x)
+#define MCDE_OVL5CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL5CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL5CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, EXTSRC_ID, __x)
+#define MCDE_OVL5CONF_LPF_SHIFT 16
+#define MCDE_OVL5CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL5CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, LPF, __x)
+#define MCDE_OVL0CONF2 0x00000408
+#define MCDE_OVL0CONF2_GROUPOFFSET 0x20
+#define MCDE_OVL0CONF2_BP_SHIFT 0
+#define MCDE_OVL0CONF2_BP_MASK 0x00000001
+#define MCDE_OVL0CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL0CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL0CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, BP, MCDE_OVL0CONF2_BP_##__x)
+#define MCDE_OVL0CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, BP, __x)
+#define MCDE_OVL0CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL0CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL0CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL0CONF2_OPQ_SHIFT 9
+#define MCDE_OVL0CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL0CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, OPQ, __x)
+#define MCDE_OVL0CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL0CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL0CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, PIXOFF, __x)
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL1CONF2 0x00000428
+#define MCDE_OVL1CONF2_BP_SHIFT 0
+#define MCDE_OVL1CONF2_BP_MASK 0x00000001
+#define MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL1CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL1CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, BP, MCDE_OVL1CONF2_BP_##__x)
+#define MCDE_OVL1CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, BP, __x)
+#define MCDE_OVL1CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL1CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL1CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL1CONF2_OPQ_SHIFT 9
+#define MCDE_OVL1CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL1CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, OPQ, __x)
+#define MCDE_OVL1CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL1CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL1CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, PIXOFF, __x)
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL2CONF2 0x00000448
+#define MCDE_OVL2CONF2_BP_SHIFT 0
+#define MCDE_OVL2CONF2_BP_MASK 0x00000001
+#define MCDE_OVL2CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL2CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL2CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, BP, MCDE_OVL2CONF2_BP_##__x)
+#define MCDE_OVL2CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, BP, __x)
+#define MCDE_OVL2CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL2CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL2CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL2CONF2_OPQ_SHIFT 9
+#define MCDE_OVL2CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL2CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, OPQ, __x)
+#define MCDE_OVL2CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL2CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL2CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, PIXOFF, __x)
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL3CONF2 0x00000468
+#define MCDE_OVL3CONF2_BP_SHIFT 0
+#define MCDE_OVL3CONF2_BP_MASK 0x00000001
+#define MCDE_OVL3CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL3CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL3CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, BP, MCDE_OVL3CONF2_BP_##__x)
+#define MCDE_OVL3CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, BP, __x)
+#define MCDE_OVL3CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL3CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL3CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL3CONF2_OPQ_SHIFT 9
+#define MCDE_OVL3CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL3CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, OPQ, __x)
+#define MCDE_OVL3CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL3CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL3CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, PIXOFF, __x)
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL4CONF2 0x00000488
+#define MCDE_OVL4CONF2_BP_SHIFT 0
+#define MCDE_OVL4CONF2_BP_MASK 0x00000001
+#define MCDE_OVL4CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL4CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL4CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, BP, MCDE_OVL4CONF2_BP_##__x)
+#define MCDE_OVL4CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, BP, __x)
+#define MCDE_OVL4CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL4CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL4CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL4CONF2_OPQ_SHIFT 9
+#define MCDE_OVL4CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL4CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, OPQ, __x)
+#define MCDE_OVL4CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL4CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL4CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, PIXOFF, __x)
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL5CONF2 0x000004A8
+#define MCDE_OVL5CONF2_BP_SHIFT 0
+#define MCDE_OVL5CONF2_BP_MASK 0x00000001
+#define MCDE_OVL5CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL5CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL5CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, BP, MCDE_OVL5CONF2_BP_##__x)
+#define MCDE_OVL5CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, BP, __x)
+#define MCDE_OVL5CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL5CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL5CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL5CONF2_OPQ_SHIFT 9
+#define MCDE_OVL5CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL5CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, OPQ, __x)
+#define MCDE_OVL5CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL5CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL5CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, PIXOFF, __x)
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL0LJINC 0x0000040C
+#define MCDE_OVL0LJINC_GROUPOFFSET 0x20
+#define MCDE_OVL0LJINC_LJINC_SHIFT 0
+#define MCDE_OVL0LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL0LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL0LJINC, LJINC, __x)
+#define MCDE_OVL1LJINC 0x0000042C
+#define MCDE_OVL1LJINC_LJINC_SHIFT 0
+#define MCDE_OVL1LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL1LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL1LJINC, LJINC, __x)
+#define MCDE_OVL2LJINC 0x0000044C
+#define MCDE_OVL2LJINC_LJINC_SHIFT 0
+#define MCDE_OVL2LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL2LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL2LJINC, LJINC, __x)
+#define MCDE_OVL3LJINC 0x0000046C
+#define MCDE_OVL3LJINC_LJINC_SHIFT 0
+#define MCDE_OVL3LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL3LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL3LJINC, LJINC, __x)
+#define MCDE_OVL4LJINC 0x0000048C
+#define MCDE_OVL4LJINC_LJINC_SHIFT 0
+#define MCDE_OVL4LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL4LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL4LJINC, LJINC, __x)
+#define MCDE_OVL5LJINC 0x000004AC
+#define MCDE_OVL5LJINC_LJINC_SHIFT 0
+#define MCDE_OVL5LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL5LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL5LJINC, LJINC, __x)
+#define MCDE_OVL0CROP 0x00000410
+#define MCDE_OVL0CROP_GROUPOFFSET 0x20
+#define MCDE_OVL0CROP_TMRGN_SHIFT 0
+#define MCDE_OVL0CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL0CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CROP, TMRGN, __x)
+#define MCDE_OVL0CROP_LMRGN_SHIFT 22
+#define MCDE_OVL0CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL0CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CROP, LMRGN, __x)
+#define MCDE_OVL1CROP 0x00000430
+#define MCDE_OVL1CROP_TMRGN_SHIFT 0
+#define MCDE_OVL1CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL1CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CROP, TMRGN, __x)
+#define MCDE_OVL1CROP_LMRGN_SHIFT 22
+#define MCDE_OVL1CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL1CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CROP, LMRGN, __x)
+#define MCDE_OVL2CROP 0x00000450
+#define MCDE_OVL2CROP_TMRGN_SHIFT 0
+#define MCDE_OVL2CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL2CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CROP, TMRGN, __x)
+#define MCDE_OVL2CROP_LMRGN_SHIFT 22
+#define MCDE_OVL2CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL2CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CROP, LMRGN, __x)
+#define MCDE_OVL3CROP 0x00000470
+#define MCDE_OVL3CROP_TMRGN_SHIFT 0
+#define MCDE_OVL3CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL3CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CROP, TMRGN, __x)
+#define MCDE_OVL3CROP_LMRGN_SHIFT 22
+#define MCDE_OVL3CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL3CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CROP, LMRGN, __x)
+#define MCDE_OVL4CROP 0x00000490
+#define MCDE_OVL4CROP_TMRGN_SHIFT 0
+#define MCDE_OVL4CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL4CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CROP, TMRGN, __x)
+#define MCDE_OVL4CROP_LMRGN_SHIFT 22
+#define MCDE_OVL4CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL4CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CROP, LMRGN, __x)
+#define MCDE_OVL5CROP 0x000004B0
+#define MCDE_OVL5CROP_TMRGN_SHIFT 0
+#define MCDE_OVL5CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL5CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CROP, TMRGN, __x)
+#define MCDE_OVL5CROP_LMRGN_SHIFT 22
+#define MCDE_OVL5CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL5CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CROP, LMRGN, __x)
+#define MCDE_OVL0COMP 0x00000414
+#define MCDE_OVL0COMP_GROUPOFFSET 0x20
+#define MCDE_OVL0COMP_XPOS_SHIFT 0
+#define MCDE_OVL0COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL0COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, XPOS, __x)
+#define MCDE_OVL0COMP_CH_ID_SHIFT 11
+#define MCDE_OVL0COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL0COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, CH_ID, __x)
+#define MCDE_OVL0COMP_YPOS_SHIFT 16
+#define MCDE_OVL0COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL0COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, YPOS, __x)
+#define MCDE_OVL0COMP_Z_SHIFT 27
+#define MCDE_OVL0COMP_Z_MASK 0x78000000
+#define MCDE_OVL0COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, Z, __x)
+#define MCDE_OVL1COMP 0x00000434
+#define MCDE_OVL1COMP_XPOS_SHIFT 0
+#define MCDE_OVL1COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL1COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, XPOS, __x)
+#define MCDE_OVL1COMP_CH_ID_SHIFT 11
+#define MCDE_OVL1COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL1COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, CH_ID, __x)
+#define MCDE_OVL1COMP_YPOS_SHIFT 16
+#define MCDE_OVL1COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL1COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, YPOS, __x)
+#define MCDE_OVL1COMP_Z_SHIFT 27
+#define MCDE_OVL1COMP_Z_MASK 0x78000000
+#define MCDE_OVL1COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, Z, __x)
+#define MCDE_OVL2COMP 0x00000454
+#define MCDE_OVL2COMP_XPOS_SHIFT 0
+#define MCDE_OVL2COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL2COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, XPOS, __x)
+#define MCDE_OVL2COMP_CH_ID_SHIFT 11
+#define MCDE_OVL2COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL2COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, CH_ID, __x)
+#define MCDE_OVL2COMP_YPOS_SHIFT 16
+#define MCDE_OVL2COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL2COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, YPOS, __x)
+#define MCDE_OVL2COMP_Z_SHIFT 27
+#define MCDE_OVL2COMP_Z_MASK 0x78000000
+#define MCDE_OVL2COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, Z, __x)
+#define MCDE_OVL3COMP 0x00000474
+#define MCDE_OVL3COMP_XPOS_SHIFT 0
+#define MCDE_OVL3COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL3COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, XPOS, __x)
+#define MCDE_OVL3COMP_CH_ID_SHIFT 11
+#define MCDE_OVL3COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL3COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, CH_ID, __x)
+#define MCDE_OVL3COMP_YPOS_SHIFT 16
+#define MCDE_OVL3COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL3COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, YPOS, __x)
+#define MCDE_OVL3COMP_Z_SHIFT 27
+#define MCDE_OVL3COMP_Z_MASK 0x78000000
+#define MCDE_OVL3COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, Z, __x)
+#define MCDE_OVL4COMP 0x00000494
+#define MCDE_OVL4COMP_XPOS_SHIFT 0
+#define MCDE_OVL4COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL4COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, XPOS, __x)
+#define MCDE_OVL4COMP_CH_ID_SHIFT 11
+#define MCDE_OVL4COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL4COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, CH_ID, __x)
+#define MCDE_OVL4COMP_YPOS_SHIFT 16
+#define MCDE_OVL4COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL4COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, YPOS, __x)
+#define MCDE_OVL4COMP_Z_SHIFT 27
+#define MCDE_OVL4COMP_Z_MASK 0x78000000
+#define MCDE_OVL4COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, Z, __x)
+#define MCDE_OVL5COMP 0x000004B4
+#define MCDE_OVL5COMP_XPOS_SHIFT 0
+#define MCDE_OVL5COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL5COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, XPOS, __x)
+#define MCDE_OVL5COMP_CH_ID_SHIFT 11
+#define MCDE_OVL5COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL5COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, CH_ID, __x)
+#define MCDE_OVL5COMP_YPOS_SHIFT 16
+#define MCDE_OVL5COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL5COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, YPOS, __x)
+#define MCDE_OVL5COMP_Z_SHIFT 27
+#define MCDE_OVL5COMP_Z_MASK 0x78000000
+#define MCDE_OVL5COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, Z, __x)
+#define MCDE_CHNL0CONF 0x00000600
+#define MCDE_CHNL0CONF_GROUPOFFSET 0x20
+#define MCDE_CHNL0CONF_PPL_SHIFT 0
+#define MCDE_CHNL0CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL0CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0CONF, PPL, __x)
+#define MCDE_CHNL0CONF_LPF_SHIFT 16
+#define MCDE_CHNL0CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL0CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0CONF, LPF, __x)
+#define MCDE_CHNL1CONF 0x00000620
+#define MCDE_CHNL1CONF_PPL_SHIFT 0
+#define MCDE_CHNL1CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL1CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1CONF, PPL, __x)
+#define MCDE_CHNL1CONF_LPF_SHIFT 16
+#define MCDE_CHNL1CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL1CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1CONF, LPF, __x)
+#define MCDE_CHNL2CONF 0x00000640
+#define MCDE_CHNL2CONF_PPL_SHIFT 0
+#define MCDE_CHNL2CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL2CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2CONF, PPL, __x)
+#define MCDE_CHNL2CONF_LPF_SHIFT 16
+#define MCDE_CHNL2CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL2CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2CONF, LPF, __x)
+#define MCDE_CHNL3CONF 0x00000660
+#define MCDE_CHNL3CONF_PPL_SHIFT 0
+#define MCDE_CHNL3CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL3CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3CONF, PPL, __x)
+#define MCDE_CHNL3CONF_LPF_SHIFT 16
+#define MCDE_CHNL3CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL3CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3CONF, LPF, __x)
+#define MCDE_CHNL0STAT 0x00000604
+#define MCDE_CHNL0STAT_GROUPOFFSET 0x20
+#define MCDE_CHNL0STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL0STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL0STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLRD, __x)
+#define MCDE_CHNL0STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL0STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL0STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLA, __x)
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL1STAT 0x00000624
+#define MCDE_CHNL1STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL1STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL1STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLRD, __x)
+#define MCDE_CHNL1STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL1STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL1STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLA, __x)
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL2STAT 0x00000644
+#define MCDE_CHNL2STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL2STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL2STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLRD, __x)
+#define MCDE_CHNL2STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL2STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL2STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLA, __x)
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL3STAT 0x00000664
+#define MCDE_CHNL3STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL3STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL3STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLRD, __x)
+#define MCDE_CHNL3STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL3STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL3STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLA, __x)
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL0SYNCHMOD 0x00000608
+#define MCDE_CHNL0SYNCHMOD_GROUPOFFSET 0x20
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL1SYNCHMOD 0x00000628
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL1SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL2SYNCHMOD 0x00000648
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL2SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL3SYNCHMOD 0x00000668
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL3SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL0SYNCHSW 0x0000060C
+#define MCDE_CHNL0SYNCHSW_GROUPOFFSET 0x20
+#define MCDE_CHNL0SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL0SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL0SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL1SYNCHSW 0x0000062C
+#define MCDE_CHNL1SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL1SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL1SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL2SYNCHSW 0x0000064C
+#define MCDE_CHNL2SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL2SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL2SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL3SYNCHSW 0x0000066C
+#define MCDE_CHNL3SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL3SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL3SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL0BCKGNDCOL 0x00000610
+#define MCDE_CHNL0BCKGNDCOL_GROUPOFFSET 0x20
+#define MCDE_CHNL0BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL0BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL0BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, B, __x)
+#define MCDE_CHNL0BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL0BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL0BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, G, __x)
+#define MCDE_CHNL0BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL0BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL0BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, R, __x)
+#define MCDE_CHNL1BCKGNDCOL 0x00000630
+#define MCDE_CHNL1BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL1BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL1BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, B, __x)
+#define MCDE_CHNL1BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL1BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL1BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, G, __x)
+#define MCDE_CHNL1BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL1BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL1BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, R, __x)
+#define MCDE_CHNL2BCKGNDCOL 0x00000650
+#define MCDE_CHNL2BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL2BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL2BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, B, __x)
+#define MCDE_CHNL2BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL2BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL2BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, G, __x)
+#define MCDE_CHNL2BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL2BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL2BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, R, __x)
+#define MCDE_CHNL3BCKGNDCOL 0x00000670
+#define MCDE_CHNL3BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL3BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL3BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, B, __x)
+#define MCDE_CHNL3BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL3BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL3BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, G, __x)
+#define MCDE_CHNL3BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL3BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL3BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, R, __x)
+#define MCDE_CHNL0MUXING 0x00000614
+#define MCDE_CHNL0MUXING_GROUPOFFSET 0x20
+#define MCDE_CHNL0MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL0MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL0MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0MUXING, FIFO_ID, MCDE_CHNL0MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL0MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0MUXING, FIFO_ID, __x)
+#define MCDE_CHNL1MUXING 0x00000634
+#define MCDE_CHNL1MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL1MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL1MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1MUXING, FIFO_ID, MCDE_CHNL1MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL1MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1MUXING, FIFO_ID, __x)
+#define MCDE_CHNL2MUXING 0x00000654
+#define MCDE_CHNL2MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL2MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL2MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2MUXING, FIFO_ID, MCDE_CHNL2MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL2MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2MUXING, FIFO_ID, __x)
+#define MCDE_CHNL3MUXING 0x00000674
+#define MCDE_CHNL3MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL3MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL3MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3MUXING, FIFO_ID, MCDE_CHNL3MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL3MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3MUXING, FIFO_ID, __x)
+#define MCDE_CRA0 0x00000800
+#define MCDE_CRA0_GROUPOFFSET 0x200
+#define MCDE_CRA0_FLOEN_SHIFT 0
+#define MCDE_CRA0_FLOEN_MASK 0x00000001
+#define MCDE_CRA0_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOEN, __x)
+#define MCDE_CRA0_BLENDEN_SHIFT 2
+#define MCDE_CRA0_BLENDEN_MASK 0x00000004
+#define MCDE_CRA0_BLENDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDEN, __x)
+#define MCDE_CRA0_AFLICKEN_SHIFT 3
+#define MCDE_CRA0_AFLICKEN_MASK 0x00000008
+#define MCDE_CRA0_AFLICKEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, AFLICKEN, __x)
+#define MCDE_CRA0_PALEN_SHIFT 4
+#define MCDE_CRA0_PALEN_MASK 0x00000010
+#define MCDE_CRA0_PALEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, PALEN, __x)
+#define MCDE_CRA0_DITHEN_SHIFT 5
+#define MCDE_CRA0_DITHEN_MASK 0x00000020
+#define MCDE_CRA0_DITHEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, DITHEN, __x)
+#define MCDE_CRA0_GAMEN_SHIFT 6
+#define MCDE_CRA0_GAMEN_MASK 0x00000040
+#define MCDE_CRA0_GAMEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, GAMEN, __x)
+#define MCDE_CRA0_KEYCTRL_SHIFT 7
+#define MCDE_CRA0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRA0_KEYCTRL_OFF 0
+#define MCDE_CRA0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRA0_KEYCTRL_RGB 2
+#define MCDE_CRA0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRA0_KEYCTRL_FRGB 5
+#define MCDE_CRA0_KEYCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, MCDE_CRA0_KEYCTRL_##__x)
+#define MCDE_CRA0_KEYCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, __x)
+#define MCDE_CRA0_BLENDCTRL_SHIFT 10
+#define MCDE_CRA0_BLENDCTRL_MASK 0x00000400
+#define MCDE_CRA0_BLENDCTRL_SOURCE 0
+#define MCDE_CRA0_BLENDCTRL_CONSTANT 1
+#define MCDE_CRA0_BLENDCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, MCDE_CRA0_BLENDCTRL_##__x)
+#define MCDE_CRA0_BLENDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, __x)
+#define MCDE_CRA0_FLICKMODE_SHIFT 11
+#define MCDE_CRA0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRA0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRA0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRA0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRA0_FLICKMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, MCDE_CRA0_FLICKMODE_##__x)
+#define MCDE_CRA0_FLICKMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, __x)
+#define MCDE_CRA0_FLOCKFORMAT_SHIFT 13
+#define MCDE_CRA0_FLOCKFORMAT_MASK 0x00002000
+#define MCDE_CRA0_FLOCKFORMAT_YCBCR 0
+#define MCDE_CRA0_FLOCKFORMAT_RGB 1
+#define MCDE_CRA0_FLOCKFORMAT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, MCDE_CRA0_FLOCKFORMAT_##__x)
+#define MCDE_CRA0_FLOCKFORMAT(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, __x)
+#define MCDE_CRA0_PALMODE_SHIFT 14
+#define MCDE_CRA0_PALMODE_MASK 0x00004000
+#define MCDE_CRA0_PALMODE_PALETTE 0
+#define MCDE_CRA0_PALMODE_GAMMA 1
+#define MCDE_CRA0_PALMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, PALMODE, __x)
+#define MCDE_CRA0_OLEDEN_SHIFT 15
+#define MCDE_CRA0_OLEDEN_MASK 0x00008000
+#define MCDE_CRA0_OLEDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, OLEDEN, __x)
+#define MCDE_CRA0_ALPHABLEND_SHIFT 16
+#define MCDE_CRA0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRA0_ALPHABLEND(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ALPHABLEND, __x)
+#define MCDE_CRA0_ROTEN_SHIFT 24
+#define MCDE_CRA0_ROTEN_MASK 0x01000000
+#define MCDE_CRA0_ROTEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ROTEN, __x)
+#define MCDE_CRB0 0x00000A00
+#define MCDE_CRB0_FLOEN_SHIFT 0
+#define MCDE_CRB0_FLOEN_MASK 0x00000001
+#define MCDE_CRB0_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOEN, __x)
+#define MCDE_CRB0_BLENDEN_SHIFT 2
+#define MCDE_CRB0_BLENDEN_MASK 0x00000004
+#define MCDE_CRB0_BLENDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDEN, __x)
+#define MCDE_CRB0_AFLICKEN_SHIFT 3
+#define MCDE_CRB0_AFLICKEN_MASK 0x00000008
+#define MCDE_CRB0_AFLICKEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, AFLICKEN, __x)
+#define MCDE_CRB0_PALEN_SHIFT 4
+#define MCDE_CRB0_PALEN_MASK 0x00000010
+#define MCDE_CRB0_PALEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, PALEN, __x)
+#define MCDE_CRB0_DITHEN_SHIFT 5
+#define MCDE_CRB0_DITHEN_MASK 0x00000020
+#define MCDE_CRB0_DITHEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, DITHEN, __x)
+#define MCDE_CRB0_GAMEN_SHIFT 6
+#define MCDE_CRB0_GAMEN_MASK 0x00000040
+#define MCDE_CRB0_GAMEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, GAMEN, __x)
+#define MCDE_CRB0_KEYCTRL_SHIFT 7
+#define MCDE_CRB0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRB0_KEYCTRL_OFF 0
+#define MCDE_CRB0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRB0_KEYCTRL_RGB 2
+#define MCDE_CRB0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRB0_KEYCTRL_FRGB 5
+#define MCDE_CRB0_KEYCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, MCDE_CRB0_KEYCTRL_##__x)
+#define MCDE_CRB0_KEYCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, __x)
+#define MCDE_CRB0_BLENDCTRL_SHIFT 10
+#define MCDE_CRB0_BLENDCTRL_MASK 0x00000400
+#define MCDE_CRB0_BLENDCTRL_SOURCE 0
+#define MCDE_CRB0_BLENDCTRL_CONSTANT 1
+#define MCDE_CRB0_BLENDCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, MCDE_CRB0_BLENDCTRL_##__x)
+#define MCDE_CRB0_BLENDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, __x)
+#define MCDE_CRB0_FLICKMODE_SHIFT 11
+#define MCDE_CRB0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRB0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRB0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRB0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRB0_FLICKMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, MCDE_CRB0_FLICKMODE_##__x)
+#define MCDE_CRB0_FLICKMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, __x)
+#define MCDE_CRB0_FLOCKFORMAT_SHIFT 13
+#define MCDE_CRB0_FLOCKFORMAT_MASK 0x00002000
+#define MCDE_CRB0_FLOCKFORMAT_YCBCR 0
+#define MCDE_CRB0_FLOCKFORMAT_RGB 1
+#define MCDE_CRB0_FLOCKFORMAT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, MCDE_CRB0_FLOCKFORMAT_##__x)
+#define MCDE_CRB0_FLOCKFORMAT(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, __x)
+#define MCDE_CRB0_PALMODE_SHIFT 14
+#define MCDE_CRB0_PALMODE_MASK 0x00004000
+#define MCDE_CRB0_PALMODE_PALETTE 0
+#define MCDE_CRB0_PALMODE_GAMMA 1
+#define MCDE_CRB0_PALMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, PALMODE, __x)
+#define MCDE_CRB0_OLEDEN_SHIFT 15
+#define MCDE_CRB0_OLEDEN_MASK 0x00008000
+#define MCDE_CRB0_OLEDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, OLEDEN, __x)
+#define MCDE_CRB0_ALPHABLEND_SHIFT 16
+#define MCDE_CRB0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRB0_ALPHABLEND(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ALPHABLEND, __x)
+#define MCDE_CRB0_ROTEN_SHIFT 24
+#define MCDE_CRB0_ROTEN_MASK 0x01000000
+#define MCDE_CRB0_ROTEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ROTEN, __x)
+#define MCDE_CRA1 0x00000804
+#define MCDE_CRA1_GROUPOFFSET 0x200
+#define MCDE_CRA1_PCD_SHIFT 0
+#define MCDE_CRA1_PCD_MASK 0x000003FF
+#define MCDE_CRA1_PCD(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, PCD, __x)
+#define MCDE_CRA1_CLKSEL_SHIFT 10
+#define MCDE_CRA1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRA1_CLKSEL_CLKPLL72 0
+#define MCDE_CRA1_CLKSEL_CLKPLL27 2
+#define MCDE_CRA1_CLKSEL_TV1CLK 3
+#define MCDE_CRA1_CLKSEL_TV2CLK 4
+#define MCDE_CRA1_CLKSEL_MCDECLK 5
+#define MCDE_CRA1_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKSEL, MCDE_CRA1_CLKSEL_##__x)
+#define MCDE_CRA1_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKSEL, __x)
+#define MCDE_CRA1_CDWIN_SHIFT 13
+#define MCDE_CRA1_CDWIN_MASK 0x0001E000
+#define MCDE_CRA1_CDWIN_8BPP_C1 0
+#define MCDE_CRA1_CDWIN_12BPP_C1 1
+#define MCDE_CRA1_CDWIN_12BPP_C2 2
+#define MCDE_CRA1_CDWIN_16BPP_C1 3
+#define MCDE_CRA1_CDWIN_16BPP_C2 4
+#define MCDE_CRA1_CDWIN_16BPP_C3 5
+#define MCDE_CRA1_CDWIN_18BPP_C1 6
+#define MCDE_CRA1_CDWIN_18BPP_C2 7
+#define MCDE_CRA1_CDWIN_24BPP 8
+#define MCDE_CRA1_CDWIN_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CDWIN, MCDE_CRA1_CDWIN_##__x)
+#define MCDE_CRA1_CDWIN(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CDWIN, __x)
+#define MCDE_CRA1_OUTBPP_SHIFT 25
+#define MCDE_CRA1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRA1_OUTBPP_MONO1 0
+#define MCDE_CRA1_OUTBPP_MONO2 1
+#define MCDE_CRA1_OUTBPP_MONO4 2
+#define MCDE_CRA1_OUTBPP_MONO8 3
+#define MCDE_CRA1_OUTBPP_8BPP 4
+#define MCDE_CRA1_OUTBPP_12BPP 5
+#define MCDE_CRA1_OUTBPP_15BPP 6
+#define MCDE_CRA1_OUTBPP_16BPP 7
+#define MCDE_CRA1_OUTBPP_18BPP 8
+#define MCDE_CRA1_OUTBPP_24BPP 9
+#define MCDE_CRA1_OUTBPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, OUTBPP, MCDE_CRA1_OUTBPP_##__x)
+#define MCDE_CRA1_OUTBPP(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, OUTBPP, __x)
+#define MCDE_CRA1_BCD_SHIFT 29
+#define MCDE_CRA1_BCD_MASK 0x20000000
+#define MCDE_CRA1_BCD(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, BCD, __x)
+#define MCDE_CRA1_CLKTYPE_SHIFT 30
+#define MCDE_CRA1_CLKTYPE_MASK 0x40000000
+#define MCDE_CRA1_CLKTYPE_TVXCLKSEL0 0
+#define MCDE_CRA1_CLKTYPE_TVXCLKSEL1 1
+#define MCDE_CRA1_CLKTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, MCDE_CRA1_CLKTYPE_##__x)
+#define MCDE_CRA1_CLKTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, __x)
+#define MCDE_CRB1 0x00000A04
+#define MCDE_CRB1_PCD_SHIFT 0
+#define MCDE_CRB1_PCD_MASK 0x000003FF
+#define MCDE_CRB1_PCD(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, PCD, __x)
+#define MCDE_CRB1_CLKSEL_SHIFT 10
+#define MCDE_CRB1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRB1_CLKSEL_CLKPLL72 0
+#define MCDE_CRB1_CLKSEL_CLKPLL27 2
+#define MCDE_CRB1_CLKSEL_TV1CLK 3
+#define MCDE_CRB1_CLKSEL_TV2CLK 4
+#define MCDE_CRB1_CLKSEL_MCDECLK 5
+#define MCDE_CRB1_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKSEL, MCDE_CRB1_CLKSEL_##__x)
+#define MCDE_CRB1_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKSEL, __x)
+#define MCDE_CRB1_CDWIN_SHIFT 13
+#define MCDE_CRB1_CDWIN_MASK 0x0001E000
+#define MCDE_CRB1_CDWIN_8BPP_C1 0
+#define MCDE_CRB1_CDWIN_12BPP_C1 1
+#define MCDE_CRB1_CDWIN_12BPP_C2 2
+#define MCDE_CRB1_CDWIN_16BPP_C1 3
+#define MCDE_CRB1_CDWIN_16BPP_C2 4
+#define MCDE_CRB1_CDWIN_16BPP_C3 5
+#define MCDE_CRB1_CDWIN_18BPP_C1 6
+#define MCDE_CRB1_CDWIN_18BPP_C2 7
+#define MCDE_CRB1_CDWIN_24BPP 8
+#define MCDE_CRB1_CDWIN_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CDWIN, MCDE_CRB1_CDWIN_##__x)
+#define MCDE_CRB1_CDWIN(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CDWIN, __x)
+#define MCDE_CRB1_OUTBPP_SHIFT 25
+#define MCDE_CRB1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRB1_OUTBPP_MONO1 0
+#define MCDE_CRB1_OUTBPP_MONO2 1
+#define MCDE_CRB1_OUTBPP_MONO4 2
+#define MCDE_CRB1_OUTBPP_MONO8 3
+#define MCDE_CRB1_OUTBPP_8BPP 4
+#define MCDE_CRB1_OUTBPP_12BPP 5
+#define MCDE_CRB1_OUTBPP_15BPP 6
+#define MCDE_CRB1_OUTBPP_16BPP 7
+#define MCDE_CRB1_OUTBPP_18BPP 8
+#define MCDE_CRB1_OUTBPP_24BPP 9
+#define MCDE_CRB1_OUTBPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, OUTBPP, MCDE_CRB1_OUTBPP_##__x)
+#define MCDE_CRB1_OUTBPP(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, OUTBPP, __x)
+#define MCDE_CRB1_BCD_SHIFT 29
+#define MCDE_CRB1_BCD_MASK 0x20000000
+#define MCDE_CRB1_BCD(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, BCD, __x)
+#define MCDE_CRB1_CLKTYPE_SHIFT 30
+#define MCDE_CRB1_CLKTYPE_MASK 0x40000000
+#define MCDE_CRB1_CLKTYPE_TVXCLKSEL0 0
+#define MCDE_CRB1_CLKTYPE_TVXCLKSEL1 1
+#define MCDE_CRB1_CLKTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, MCDE_CRB1_CLKTYPE_##__x)
+#define MCDE_CRB1_CLKTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, __x)
+#define MCDE_COLKEYA 0x00000808
+#define MCDE_COLKEYA_GROUPOFFSET 0x200
+#define MCDE_COLKEYA_KEYB_SHIFT 0
+#define MCDE_COLKEYA_KEYB_MASK 0x000000FF
+#define MCDE_COLKEYA_KEYB(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYB, __x)
+#define MCDE_COLKEYA_KEYG_SHIFT 8
+#define MCDE_COLKEYA_KEYG_MASK 0x0000FF00
+#define MCDE_COLKEYA_KEYG(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYG, __x)
+#define MCDE_COLKEYA_KEYR_SHIFT 16
+#define MCDE_COLKEYA_KEYR_MASK 0x00FF0000
+#define MCDE_COLKEYA_KEYR(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYR, __x)
+#define MCDE_COLKEYA_KEYA_SHIFT 24
+#define MCDE_COLKEYA_KEYA_MASK 0xFF000000
+#define MCDE_COLKEYA_KEYA(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYA, __x)
+#define MCDE_COLKEYB 0x00000A08
+#define MCDE_COLKEYB_KEYB_SHIFT 0
+#define MCDE_COLKEYB_KEYB_MASK 0x000000FF
+#define MCDE_COLKEYB_KEYB(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYB, __x)
+#define MCDE_COLKEYB_KEYG_SHIFT 8
+#define MCDE_COLKEYB_KEYG_MASK 0x0000FF00
+#define MCDE_COLKEYB_KEYG(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYG, __x)
+#define MCDE_COLKEYB_KEYR_SHIFT 16
+#define MCDE_COLKEYB_KEYR_MASK 0x00FF0000
+#define MCDE_COLKEYB_KEYR(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYR, __x)
+#define MCDE_COLKEYB_KEYA_SHIFT 24
+#define MCDE_COLKEYB_KEYA_MASK 0xFF000000
+#define MCDE_COLKEYB_KEYA(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYA, __x)
+#define MCDE_FCOLKEYA 0x0000080C
+#define MCDE_FCOLKEYA_GROUPOFFSET 0x200
+#define MCDE_FCOLKEYA_FKEYB_SHIFT 0
+#define MCDE_FCOLKEYA_FKEYB_MASK 0x000000FF
+#define MCDE_FCOLKEYA_FKEYB(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYB, __x)
+#define MCDE_FCOLKEYA_FKEYG_SHIFT 8
+#define MCDE_FCOLKEYA_FKEYG_MASK 0x0000FF00
+#define MCDE_FCOLKEYA_FKEYG(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYG, __x)
+#define MCDE_FCOLKEYA_FKEYR_SHIFT 16
+#define MCDE_FCOLKEYA_FKEYR_MASK 0x00FF0000
+#define MCDE_FCOLKEYA_FKEYR(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYR, __x)
+#define MCDE_FCOLKEYA_FKEYA_SHIFT 24
+#define MCDE_FCOLKEYA_FKEYA_MASK 0xFF000000
+#define MCDE_FCOLKEYA_FKEYA(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYA, __x)
+#define MCDE_FCOLKEYB 0x00000A0C
+#define MCDE_FCOLKEYB_FKEYB_SHIFT 0
+#define MCDE_FCOLKEYB_FKEYB_MASK 0x000000FF
+#define MCDE_FCOLKEYB_FKEYB(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYB, __x)
+#define MCDE_FCOLKEYB_FKEYG_SHIFT 8
+#define MCDE_FCOLKEYB_FKEYG_MASK 0x0000FF00
+#define MCDE_FCOLKEYB_FKEYG(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYG, __x)
+#define MCDE_FCOLKEYB_FKEYR_SHIFT 16
+#define MCDE_FCOLKEYB_FKEYR_MASK 0x00FF0000
+#define MCDE_FCOLKEYB_FKEYR(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYR, __x)
+#define MCDE_FCOLKEYB_FKEYA_SHIFT 24
+#define MCDE_FCOLKEYB_FKEYA_MASK 0xFF000000
+#define MCDE_FCOLKEYB_FKEYA(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYA, __x)
+#define MCDE_RGBCONV1A 0x00000810
+#define MCDE_RGBCONV1A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV1A_YR_GREEN_SHIFT 0
+#define MCDE_RGBCONV1A_YR_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV1A_YR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1A, YR_GREEN, __x)
+#define MCDE_RGBCONV1A_YR_RED_SHIFT 16
+#define MCDE_RGBCONV1A_YR_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV1A_YR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1A, YR_RED, __x)
+#define MCDE_RGBCONV1B 0x00000A10
+#define MCDE_RGBCONV1B_YR_GREEN_SHIFT 0
+#define MCDE_RGBCONV1B_YR_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV1B_YR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1B, YR_GREEN, __x)
+#define MCDE_RGBCONV1B_YR_RED_SHIFT 16
+#define MCDE_RGBCONV1B_YR_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV1B_YR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1B, YR_RED, __x)
+#define MCDE_RGBCONV2A 0x00000814
+#define MCDE_RGBCONV2A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV2A_CR_RED_SHIFT 0
+#define MCDE_RGBCONV2A_CR_RED_MASK 0x000007FF
+#define MCDE_RGBCONV2A_CR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2A, CR_RED, __x)
+#define MCDE_RGBCONV2A_YR_BLUE_SHIFT 16
+#define MCDE_RGBCONV2A_YR_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV2A_YR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2A, YR_BLUE, __x)
+#define MCDE_RGBCONV2B 0x00000A14
+#define MCDE_RGBCONV2B_CR_RED_SHIFT 0
+#define MCDE_RGBCONV2B_CR_RED_MASK 0x000007FF
+#define MCDE_RGBCONV2B_CR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2B, CR_RED, __x)
+#define MCDE_RGBCONV2B_YR_BLUE_SHIFT 16
+#define MCDE_RGBCONV2B_YR_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV2B_YR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2B, YR_BLUE, __x)
+#define MCDE_RGBCONV3A 0x00000818
+#define MCDE_RGBCONV3A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV3A_CR_BLUE_SHIFT 0
+#define MCDE_RGBCONV3A_CR_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV3A_CR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3A, CR_BLUE, __x)
+#define MCDE_RGBCONV3A_CR_GREEN_SHIFT 16
+#define MCDE_RGBCONV3A_CR_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV3A_CR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3A, CR_GREEN, __x)
+#define MCDE_RGBCONV3B 0x00000A18
+#define MCDE_RGBCONV3B_CR_BLUE_SHIFT 0
+#define MCDE_RGBCONV3B_CR_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV3B_CR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3B, CR_BLUE, __x)
+#define MCDE_RGBCONV3B_CR_GREEN_SHIFT 16
+#define MCDE_RGBCONV3B_CR_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV3B_CR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3B, CR_GREEN, __x)
+#define MCDE_RGBCONV4A 0x0000081C
+#define MCDE_RGBCONV4A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV4A_CB_GREEN_SHIFT 0
+#define MCDE_RGBCONV4A_CB_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV4A_CB_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4A, CB_GREEN, __x)
+#define MCDE_RGBCONV4A_CB_RED_SHIFT 16
+#define MCDE_RGBCONV4A_CB_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV4A_CB_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4A, CB_RED, __x)
+#define MCDE_RGBCONV4B 0x00000A1C
+#define MCDE_RGBCONV4B_CB_GREEN_SHIFT 0
+#define MCDE_RGBCONV4B_CB_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV4B_CB_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4B, CB_GREEN, __x)
+#define MCDE_RGBCONV4B_CB_RED_SHIFT 16
+#define MCDE_RGBCONV4B_CB_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV4B_CB_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4B, CB_RED, __x)
+#define MCDE_RGBCONV5A 0x00000820
+#define MCDE_RGBCONV5A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV5A_OFF_RED_SHIFT 0
+#define MCDE_RGBCONV5A_OFF_RED_MASK 0x000007FF
+#define MCDE_RGBCONV5A_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5A, OFF_RED, __x)
+#define MCDE_RGBCONV5A_CB_BLUE_SHIFT 16
+#define MCDE_RGBCONV5A_CB_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV5A_CB_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5A, CB_BLUE, __x)
+#define MCDE_RGBCONV5B 0x00000A20
+#define MCDE_RGBCONV5B_OFF_RED_SHIFT 0
+#define MCDE_RGBCONV5B_OFF_RED_MASK 0x000007FF
+#define MCDE_RGBCONV5B_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5B, OFF_RED, __x)
+#define MCDE_RGBCONV5B_CB_BLUE_SHIFT 16
+#define MCDE_RGBCONV5B_CB_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV5B_CB_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5B, CB_BLUE, __x)
+#define MCDE_RGBCONV6A 0x00000824
+#define MCDE_RGBCONV6A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV6A_OFF_BLUE_SHIFT 0
+#define MCDE_RGBCONV6A_OFF_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV6A_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_BLUE, __x)
+#define MCDE_RGBCONV6A_OFF_GREEN_SHIFT 16
+#define MCDE_RGBCONV6A_OFF_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV6A_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_GREEN, __x)
+#define MCDE_RGBCONV6B 0x00000A24
+#define MCDE_RGBCONV6B_OFF_BLUE_SHIFT 0
+#define MCDE_RGBCONV6B_OFF_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV6B_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_BLUE, __x)
+#define MCDE_RGBCONV6B_OFF_GREEN_SHIFT 16
+#define MCDE_RGBCONV6B_OFF_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV6B_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_GREEN, __x)
+#define MCDE_FFCOEF0 0x00000828
+#define MCDE_FFCOEF0_COEFF0_N1_SHIFT 0
+#define MCDE_FFCOEF0_COEFF0_N1_MASK 0x000000FF
+#define MCDE_FFCOEF0_COEFF0_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N1, __x)
+#define MCDE_FFCOEF0_COEFF0_N2_SHIFT 8
+#define MCDE_FFCOEF0_COEFF0_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF0_COEFF0_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N2, __x)
+#define MCDE_FFCOEF0_COEFF0_N3_SHIFT 16
+#define MCDE_FFCOEF0_COEFF0_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF0_COEFF0_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N3, __x)
+#define MCDE_FFCOEF0_T0_SHIFT 24
+#define MCDE_FFCOEF0_T0_MASK 0x0F000000
+#define MCDE_FFCOEF0_T0(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, T0, __x)
+#define MCDE_FFCOEF1 0x0000082C
+#define MCDE_FFCOEF1_COEFF1_N1_SHIFT 0
+#define MCDE_FFCOEF1_COEFF1_N1_MASK 0x000000FF
+#define MCDE_FFCOEF1_COEFF1_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N1, __x)
+#define MCDE_FFCOEF1_COEFF1_N2_SHIFT 8
+#define MCDE_FFCOEF1_COEFF1_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF1_COEFF1_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N2, __x)
+#define MCDE_FFCOEF1_COEFF1_N3_SHIFT 16
+#define MCDE_FFCOEF1_COEFF1_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF1_COEFF1_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N3, __x)
+#define MCDE_FFCOEF1_T1_SHIFT 24
+#define MCDE_FFCOEF1_T1_MASK 0x0F000000
+#define MCDE_FFCOEF1_T1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, T1, __x)
+#define MCDE_FFCOEF2 0x00000830
+#define MCDE_FFCOEF2_COEFF2_N1_SHIFT 0
+#define MCDE_FFCOEF2_COEFF2_N1_MASK 0x000000FF
+#define MCDE_FFCOEF2_COEFF2_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N1, __x)
+#define MCDE_FFCOEF2_COEFF2_N2_SHIFT 8
+#define MCDE_FFCOEF2_COEFF2_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF2_COEFF2_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N2, __x)
+#define MCDE_FFCOEF2_COEFF2_N3_SHIFT 16
+#define MCDE_FFCOEF2_COEFF2_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF2_COEFF2_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N3, __x)
+#define MCDE_FFCOEF2_T2_SHIFT 24
+#define MCDE_FFCOEF2_T2_MASK 0x0F000000
+#define MCDE_FFCOEF2_T2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, T2, __x)
+#define MCDE_MCDE_WDATAA 0x00000834
+#define MCDE_MCDE_WDATAA_GROUPOFFSET 0x200
+#define MCDE_MCDE_WDATAA_DC_SHIFT 24
+#define MCDE_MCDE_WDATAA_DC_MASK 0x01000000
+#define MCDE_MCDE_WDATAA_DC(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAA, DC, __x)
+#define MCDE_MCDE_WDATAA_DATAVALUE_SHIFT 0
+#define MCDE_MCDE_WDATAA_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_MCDE_WDATAA_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAA, DATAVALUE, __x)
+#define MCDE_MCDE_WDATAB 0x00000A34
+#define MCDE_MCDE_WDATAB_DC_SHIFT 24
+#define MCDE_MCDE_WDATAB_DC_MASK 0x01000000
+#define MCDE_MCDE_WDATAB_DC(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAB, DC, __x)
+#define MCDE_MCDE_WDATAB_DATAVALUE_SHIFT 0
+#define MCDE_MCDE_WDATAB_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_MCDE_WDATAB_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAB, DATAVALUE, __x)
+#define MCDE_TVCRA 0x00000838
+#define MCDE_TVCRA_GROUPOFFSET 0x200
+#define MCDE_TVCRA_SEL_MOD_SHIFT 0
+#define MCDE_TVCRA_SEL_MOD_MASK 0x00000001
+#define MCDE_TVCRA_SEL_MOD_LCD 0
+#define MCDE_TVCRA_SEL_MOD_TV 1
+#define MCDE_TVCRA_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, MCDE_TVCRA_SEL_MOD_##__x)
+#define MCDE_TVCRA_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, __x)
+#define MCDE_TVCRA_INTEREN_SHIFT 1
+#define MCDE_TVCRA_INTEREN_MASK 0x00000002
+#define MCDE_TVCRA_INTEREN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, INTEREN, __x)
+#define MCDE_TVCRA_IFIELD_SHIFT 2
+#define MCDE_TVCRA_IFIELD_MASK 0x00000004
+#define MCDE_TVCRA_IFIELD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, IFIELD, __x)
+#define MCDE_TVCRA_TVMODE_SHIFT 3
+#define MCDE_TVCRA_TVMODE_MASK 0x00000038
+#define MCDE_TVCRA_TVMODE_SDTV_656P 0
+#define MCDE_TVCRA_TVMODE_SDTV_656P_LE 3
+#define MCDE_TVCRA_TVMODE_SDTV_656P_BE 4
+#define MCDE_TVCRA_TVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, TVMODE, MCDE_TVCRA_TVMODE_##__x)
+#define MCDE_TVCRA_TVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, TVMODE, __x)
+#define MCDE_TVCRA_SDTVMODE_SHIFT 6
+#define MCDE_TVCRA_SDTVMODE_MASK 0x000000C0
+#define MCDE_TVCRA_SDTVMODE_Y0CBY1CR 0
+#define MCDE_TVCRA_SDTVMODE_CBY0CRY1 1
+#define MCDE_TVCRA_SDTVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, MCDE_TVCRA_SDTVMODE_##__x)
+#define MCDE_TVCRA_SDTVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, __x)
+#define MCDE_TVCRA_AVRGEN_SHIFT 8
+#define MCDE_TVCRA_AVRGEN_MASK 0x00000100
+#define MCDE_TVCRA_AVRGEN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, AVRGEN, __x)
+#define MCDE_TVCRA_CKINV_SHIFT 9
+#define MCDE_TVCRA_CKINV_MASK 0x00000200
+#define MCDE_TVCRA_CKINV(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, CKINV, __x)
+#define MCDE_TVCRB 0x00000A38
+#define MCDE_TVCRB_SEL_MOD_SHIFT 0
+#define MCDE_TVCRB_SEL_MOD_MASK 0x00000001
+#define MCDE_TVCRB_SEL_MOD_LCD 0
+#define MCDE_TVCRB_SEL_MOD_TV 1
+#define MCDE_TVCRB_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, MCDE_TVCRB_SEL_MOD_##__x)
+#define MCDE_TVCRB_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, __x)
+#define MCDE_TVCRB_INTEREN_SHIFT 1
+#define MCDE_TVCRB_INTEREN_MASK 0x00000002
+#define MCDE_TVCRB_INTEREN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, INTEREN, __x)
+#define MCDE_TVCRB_IFIELD_SHIFT 2
+#define MCDE_TVCRB_IFIELD_MASK 0x00000004
+#define MCDE_TVCRB_IFIELD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, IFIELD, __x)
+#define MCDE_TVCRB_TVMODE_SHIFT 3
+#define MCDE_TVCRB_TVMODE_MASK 0x00000038
+#define MCDE_TVCRB_TVMODE_SDTV_656P 0
+#define MCDE_TVCRB_TVMODE_SDTV_656P_LE 3
+#define MCDE_TVCRB_TVMODE_SDTV_656P_BE 4
+#define MCDE_TVCRB_TVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, TVMODE, MCDE_TVCRB_TVMODE_##__x)
+#define MCDE_TVCRB_TVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, TVMODE, __x)
+#define MCDE_TVCRB_SDTVMODE_SHIFT 6
+#define MCDE_TVCRB_SDTVMODE_MASK 0x000000C0
+#define MCDE_TVCRB_SDTVMODE_Y0CBY1CR 0
+#define MCDE_TVCRB_SDTVMODE_CBY0CRY1 1
+#define MCDE_TVCRB_SDTVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, MCDE_TVCRB_SDTVMODE_##__x)
+#define MCDE_TVCRB_SDTVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, __x)
+#define MCDE_TVCRB_AVRGEN_SHIFT 8
+#define MCDE_TVCRB_AVRGEN_MASK 0x00000100
+#define MCDE_TVCRB_AVRGEN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, AVRGEN, __x)
+#define MCDE_TVCRB_CKINV_SHIFT 9
+#define MCDE_TVCRB_CKINV_MASK 0x00000200
+#define MCDE_TVCRB_CKINV(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, CKINV, __x)
+#define MCDE_TVBL1A 0x0000083C
+#define MCDE_TVBL1A_GROUPOFFSET 0x200
+#define MCDE_TVBL1A_BEL1_SHIFT 0
+#define MCDE_TVBL1A_BEL1_MASK 0x000007FF
+#define MCDE_TVBL1A_BEL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1A, BEL1, __x)
+#define MCDE_TVBL1A_BSL1_SHIFT 16
+#define MCDE_TVBL1A_BSL1_MASK 0x07FF0000
+#define MCDE_TVBL1A_BSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1A, BSL1, __x)
+#define MCDE_TVBL1B 0x00000A3C
+#define MCDE_TVBL1B_BEL1_SHIFT 0
+#define MCDE_TVBL1B_BEL1_MASK 0x000007FF
+#define MCDE_TVBL1B_BEL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1B, BEL1, __x)
+#define MCDE_TVBL1B_BSL1_SHIFT 16
+#define MCDE_TVBL1B_BSL1_MASK 0x07FF0000
+#define MCDE_TVBL1B_BSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1B, BSL1, __x)
+#define MCDE_TVISLA 0x00000840
+#define MCDE_TVISLA_GROUPOFFSET 0x200
+#define MCDE_TVISLA_FSL1_SHIFT 0
+#define MCDE_TVISLA_FSL1_MASK 0x000007FF
+#define MCDE_TVISLA_FSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVISLA, FSL1, __x)
+#define MCDE_TVISLA_FSL2_SHIFT 16
+#define MCDE_TVISLA_FSL2_MASK 0x07FF0000
+#define MCDE_TVISLA_FSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVISLA, FSL2, __x)
+#define MCDE_TVISLB 0x00000A40
+#define MCDE_TVISLB_FSL1_SHIFT 0
+#define MCDE_TVISLB_FSL1_MASK 0x000007FF
+#define MCDE_TVISLB_FSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVISLB, FSL1, __x)
+#define MCDE_TVISLB_FSL2_SHIFT 16
+#define MCDE_TVISLB_FSL2_MASK 0x07FF0000
+#define MCDE_TVISLB_FSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVISLB, FSL2, __x)
+#define MCDE_TVDVOA 0x00000844
+#define MCDE_TVDVOA_GROUPOFFSET 0x200
+#define MCDE_TVDVOA_DVO1_SHIFT 0
+#define MCDE_TVDVOA_DVO1_MASK 0x000007FF
+#define MCDE_TVDVOA_DVO1(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOA, DVO1, __x)
+#define MCDE_TVDVOA_DVO2_SHIFT 16
+#define MCDE_TVDVOA_DVO2_MASK 0x07FF0000
+#define MCDE_TVDVOA_DVO2(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOA, DVO2, __x)
+#define MCDE_TVDVOB 0x00000A44
+#define MCDE_TVDVOB_DVO1_SHIFT 0
+#define MCDE_TVDVOB_DVO1_MASK 0x000007FF
+#define MCDE_TVDVOB_DVO1(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOB, DVO1, __x)
+#define MCDE_TVDVOB_DVO2_SHIFT 16
+#define MCDE_TVDVOB_DVO2_MASK 0x07FF0000
+#define MCDE_TVDVOB_DVO2(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOB, DVO2, __x)
+#define MCDE_TVTIM1A 0x0000084C
+#define MCDE_TVTIM1A_GROUPOFFSET 0x200
+#define MCDE_TVTIM1A_DHO_SHIFT 0
+#define MCDE_TVTIM1A_DHO_MASK 0x000007FF
+#define MCDE_TVTIM1A_DHO(__x) \
+ MCDE_VAL2REG(MCDE_TVTIM1A, DHO, __x)
+#define MCDE_TVTIM1B 0x00000A4C
+#define MCDE_TVTIM1B_DHO_SHIFT 0
+#define MCDE_TVTIM1B_DHO_MASK 0x000007FF
+#define MCDE_TVTIM1B_DHO(__x) \
+ MCDE_VAL2REG(MCDE_TVTIM1B, DHO, __x)
+#define MCDE_TVLBALWA 0x00000850
+#define MCDE_TVLBALWA_GROUPOFFSET 0x200
+#define MCDE_TVLBALWA_LBW_SHIFT 0
+#define MCDE_TVLBALWA_LBW_MASK 0x000007FF
+#define MCDE_TVLBALWA_LBW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWA, LBW, __x)
+#define MCDE_TVLBALWA_ALW_SHIFT 16
+#define MCDE_TVLBALWA_ALW_MASK 0x07FF0000
+#define MCDE_TVLBALWA_ALW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWA, ALW, __x)
+#define MCDE_TVLBALWB 0x00000A50
+#define MCDE_TVLBALWB_LBW_SHIFT 0
+#define MCDE_TVLBALWB_LBW_MASK 0x000007FF
+#define MCDE_TVLBALWB_LBW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWB, LBW, __x)
+#define MCDE_TVLBALWB_ALW_SHIFT 16
+#define MCDE_TVLBALWB_ALW_MASK 0x07FF0000
+#define MCDE_TVLBALWB_ALW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWB, ALW, __x)
+#define MCDE_TVBL2A 0x00000854
+#define MCDE_TVBL2A_GROUPOFFSET 0x200
+#define MCDE_TVBL2A_BEL2_SHIFT 0
+#define MCDE_TVBL2A_BEL2_MASK 0x000007FF
+#define MCDE_TVBL2A_BEL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2A, BEL2, __x)
+#define MCDE_TVBL2A_BSL2_SHIFT 16
+#define MCDE_TVBL2A_BSL2_MASK 0x07FF0000
+#define MCDE_TVBL2A_BSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2A, BSL2, __x)
+#define MCDE_TVBL2B 0x00000A54
+#define MCDE_TVBL2B_BEL2_SHIFT 0
+#define MCDE_TVBL2B_BEL2_MASK 0x000007FF
+#define MCDE_TVBL2B_BEL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2B, BEL2, __x)
+#define MCDE_TVBL2B_BSL2_SHIFT 16
+#define MCDE_TVBL2B_BSL2_MASK 0x07FF0000
+#define MCDE_TVBL2B_BSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2B, BSL2, __x)
+#define MCDE_TVBLUA 0x00000858
+#define MCDE_TVBLUA_GROUPOFFSET 0x200
+#define MCDE_TVBLUA_TVBLU_SHIFT 0
+#define MCDE_TVBLUA_TVBLU_MASK 0x000000FF
+#define MCDE_TVBLUA_TVBLU(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBLU, __x)
+#define MCDE_TVBLUA_TVBCB_SHIFT 8
+#define MCDE_TVBLUA_TVBCB_MASK 0x0000FF00
+#define MCDE_TVBLUA_TVBCB(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBCB, __x)
+#define MCDE_TVBLUA_TVBCR_SHIFT 16
+#define MCDE_TVBLUA_TVBCR_MASK 0x00FF0000
+#define MCDE_TVBLUA_TVBCR(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBCR, __x)
+#define MCDE_TVBLUB 0x00000A58
+#define MCDE_TVBLUB_TVBLU_SHIFT 0
+#define MCDE_TVBLUB_TVBLU_MASK 0x000000FF
+#define MCDE_TVBLUB_TVBLU(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBLU, __x)
+#define MCDE_TVBLUB_TVBCB_SHIFT 8
+#define MCDE_TVBLUB_TVBCB_MASK 0x0000FF00
+#define MCDE_TVBLUB_TVBCB(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBCB, __x)
+#define MCDE_TVBLUB_TVBCR_SHIFT 16
+#define MCDE_TVBLUB_TVBCR_MASK 0x00FF0000
+#define MCDE_TVBLUB_TVBCR(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBCR, __x)
+#define MCDE_LCDTIM1A 0x00000860
+#define MCDE_LCDTIM1A_GROUPOFFSET 0x200
+#define MCDE_LCDTIM1A_IVP_SHIFT 19
+#define MCDE_LCDTIM1A_IVP_MASK 0x00080000
+#define MCDE_LCDTIM1A_IVP(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IVP, __x)
+#define MCDE_LCDTIM1A_IVS_SHIFT 20
+#define MCDE_LCDTIM1A_IVS_MASK 0x00100000
+#define MCDE_LCDTIM1A_IVS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IVS, __x)
+#define MCDE_LCDTIM1A_IHS_SHIFT 21
+#define MCDE_LCDTIM1A_IHS_MASK 0x00200000
+#define MCDE_LCDTIM1A_IHS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IHS, __x)
+#define MCDE_LCDTIM1A_IPC_SHIFT 22
+#define MCDE_LCDTIM1A_IPC_MASK 0x00400000
+#define MCDE_LCDTIM1A_IPC(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IPC, __x)
+#define MCDE_LCDTIM1A_IOE_SHIFT 23
+#define MCDE_LCDTIM1A_IOE_MASK 0x00800000
+#define MCDE_LCDTIM1A_IOE(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IOE, __x)
+#define MCDE_LCDTIM1B 0x00000A60
+#define MCDE_LCDTIM1B_IVP_SHIFT 19
+#define MCDE_LCDTIM1B_IVP_MASK 0x00080000
+#define MCDE_LCDTIM1B_IVP(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IVP, __x)
+#define MCDE_LCDTIM1B_IVS_SHIFT 20
+#define MCDE_LCDTIM1B_IVS_MASK 0x00100000
+#define MCDE_LCDTIM1B_IVS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IVS, __x)
+#define MCDE_LCDTIM1B_IHS_SHIFT 21
+#define MCDE_LCDTIM1B_IHS_MASK 0x00200000
+#define MCDE_LCDTIM1B_IHS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IHS, __x)
+#define MCDE_LCDTIM1B_IPC_SHIFT 22
+#define MCDE_LCDTIM1B_IPC_MASK 0x00400000
+#define MCDE_LCDTIM1B_IPC(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IPC, __x)
+#define MCDE_LCDTIM1B_IOE_SHIFT 23
+#define MCDE_LCDTIM1B_IOE_MASK 0x00800000
+#define MCDE_LCDTIM1B_IOE(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IOE, __x)
+#define MCDE_DITCTRLA 0x00000864
+#define MCDE_DITCTRLA_GROUPOFFSET 0x200
+#define MCDE_DITCTRLA_TEMP_SHIFT 0
+#define MCDE_DITCTRLA_TEMP_MASK 0x00000001
+#define MCDE_DITCTRLA_TEMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, TEMP, __x)
+#define MCDE_DITCTRLA_COMP_SHIFT 1
+#define MCDE_DITCTRLA_COMP_MASK 0x00000002
+#define MCDE_DITCTRLA_COMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, COMP, __x)
+#define MCDE_DITCTRLA_MODE_SHIFT 2
+#define MCDE_DITCTRLA_MODE_MASK 0x0000000C
+#define MCDE_DITCTRLA_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, MODE, __x)
+#define MCDE_DITCTRLA_MASK_SHIFT 4
+#define MCDE_DITCTRLA_MASK_MASK 0x00000010
+#define MCDE_DITCTRLA_MASK(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, MASK, __x)
+#define MCDE_DITCTRLA_FOFFX_SHIFT 5
+#define MCDE_DITCTRLA_FOFFX_MASK 0x000003E0
+#define MCDE_DITCTRLA_FOFFX(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, FOFFX, __x)
+#define MCDE_DITCTRLA_FOFFY_SHIFT 10
+#define MCDE_DITCTRLA_FOFFY_MASK 0x00007C00
+#define MCDE_DITCTRLA_FOFFY(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, FOFFY, __x)
+#define MCDE_DITCTRLB 0x00000A64
+#define MCDE_DITCTRLB_TEMP_SHIFT 0
+#define MCDE_DITCTRLB_TEMP_MASK 0x00000001
+#define MCDE_DITCTRLB_TEMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, TEMP, __x)
+#define MCDE_DITCTRLB_COMP_SHIFT 1
+#define MCDE_DITCTRLB_COMP_MASK 0x00000002
+#define MCDE_DITCTRLB_COMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, COMP, __x)
+#define MCDE_DITCTRLB_MODE_SHIFT 2
+#define MCDE_DITCTRLB_MODE_MASK 0x0000000C
+#define MCDE_DITCTRLB_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, MODE, __x)
+#define MCDE_DITCTRLB_MASK_SHIFT 4
+#define MCDE_DITCTRLB_MASK_MASK 0x00000010
+#define MCDE_DITCTRLB_MASK(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, MASK, __x)
+#define MCDE_DITCTRLB_FOFFX_SHIFT 5
+#define MCDE_DITCTRLB_FOFFX_MASK 0x000003E0
+#define MCDE_DITCTRLB_FOFFX(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, FOFFX, __x)
+#define MCDE_DITCTRLB_FOFFY_SHIFT 10
+#define MCDE_DITCTRLB_FOFFY_MASK 0x00007C00
+#define MCDE_DITCTRLB_FOFFY(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, FOFFY, __x)
+#define MCDE_DITOFFA 0x00000868
+#define MCDE_DITOFFA_GROUPOFFSET 0x200
+#define MCDE_DITOFFA_XG_SHIFT 0
+#define MCDE_DITOFFA_XG_MASK 0x0000001F
+#define MCDE_DITOFFA_XG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, XG, __x)
+#define MCDE_DITOFFA_YG_SHIFT 8
+#define MCDE_DITOFFA_YG_MASK 0x00001F00
+#define MCDE_DITOFFA_YG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, YG, __x)
+#define MCDE_DITOFFA_XB_SHIFT 16
+#define MCDE_DITOFFA_XB_MASK 0x001F0000
+#define MCDE_DITOFFA_XB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, XB, __x)
+#define MCDE_DITOFFA_YB_SHIFT 24
+#define MCDE_DITOFFA_YB_MASK 0x1F000000
+#define MCDE_DITOFFA_YB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, YB, __x)
+#define MCDE_DITOFFB 0x00000A68
+#define MCDE_DITOFFB_XG_SHIFT 0
+#define MCDE_DITOFFB_XG_MASK 0x0000001F
+#define MCDE_DITOFFB_XG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, XG, __x)
+#define MCDE_DITOFFB_YG_SHIFT 8
+#define MCDE_DITOFFB_YG_MASK 0x00001F00
+#define MCDE_DITOFFB_YG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, YG, __x)
+#define MCDE_DITOFFB_XB_SHIFT 16
+#define MCDE_DITOFFB_XB_MASK 0x001F0000
+#define MCDE_DITOFFB_XB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, XB, __x)
+#define MCDE_DITOFFB_YB_SHIFT 24
+#define MCDE_DITOFFB_YB_MASK 0x1F000000
+#define MCDE_DITOFFB_YB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, YB, __x)
+#define MCDE_PAL0A 0x0000086C
+#define MCDE_PAL0A_GROUPOFFSET 0x200
+#define MCDE_PAL0A_BLUE_SHIFT 0
+#define MCDE_PAL0A_BLUE_MASK 0x00000FFF
+#define MCDE_PAL0A_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_PAL0A, BLUE, __x)
+#define MCDE_PAL0A_GREEN_SHIFT 16
+#define MCDE_PAL0A_GREEN_MASK 0x0FFF0000
+#define MCDE_PAL0A_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_PAL0A, GREEN, __x)
+#define MCDE_PAL0B 0x00000A6C
+#define MCDE_PAL0B_BLUE_SHIFT 0
+#define MCDE_PAL0B_BLUE_MASK 0x00000FFF
+#define MCDE_PAL0B_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_PAL0B, BLUE, __x)
+#define MCDE_PAL0B_GREEN_SHIFT 16
+#define MCDE_PAL0B_GREEN_MASK 0x0FFF0000
+#define MCDE_PAL0B_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_PAL0B, GREEN, __x)
+#define MCDE_PAL1A 0x00000870
+#define MCDE_PAL1A_GROUPOFFSET 0x200
+#define MCDE_PAL1A_RED_SHIFT 0
+#define MCDE_PAL1A_RED_MASK 0x00000FFF
+#define MCDE_PAL1A_RED(__x) \
+ MCDE_VAL2REG(MCDE_PAL1A, RED, __x)
+#define MCDE_PAL1B 0x00000A70
+#define MCDE_PAL1B_RED_SHIFT 0
+#define MCDE_PAL1B_RED_MASK 0x00000FFF
+#define MCDE_PAL1B_RED(__x) \
+ MCDE_VAL2REG(MCDE_PAL1B, RED, __x)
+#define MCDE_ROTADD0A 0x00000874
+#define MCDE_ROTADD0A_GROUPOFFSET 0x200
+#define MCDE_ROTADD0A_ROTADD0_SHIFT 3
+#define MCDE_ROTADD0A_ROTADD0_MASK 0xFFFFFFF8
+#define MCDE_ROTADD0A_ROTADD0(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD0A, ROTADD0, __x)
+#define MCDE_ROTADD0B 0x00000A74
+#define MCDE_ROTADD0B_ROTADD0_SHIFT 3
+#define MCDE_ROTADD0B_ROTADD0_MASK 0xFFFFFFF8
+#define MCDE_ROTADD0B_ROTADD0(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD0B, ROTADD0, __x)
+#define MCDE_ROTADD1A 0x00000878
+#define MCDE_ROTADD1A_GROUPOFFSET 0x200
+#define MCDE_ROTADD1A_ROTADD1_SHIFT 3
+#define MCDE_ROTADD1A_ROTADD1_MASK 0xFFFFFFF8
+#define MCDE_ROTADD1A_ROTADD1(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD1A, ROTADD1, __x)
+#define MCDE_ROTADD1B 0x00000A78
+#define MCDE_ROTADD1B_ROTADD1_SHIFT 3
+#define MCDE_ROTADD1B_ROTADD1_MASK 0xFFFFFFF8
+#define MCDE_ROTADD1B_ROTADD1(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD1B, ROTADD1, __x)
+#define MCDE_ROTACONF 0x0000087C
+#define MCDE_ROTACONF_GROUPOFFSET 0x200
+#define MCDE_ROTACONF_ROTBURSTSIZE_SHIFT 0
+#define MCDE_ROTACONF_ROTBURSTSIZE_MASK 0x00000007
+#define MCDE_ROTACONF_ROTBURSTSIZE_1W 0
+#define MCDE_ROTACONF_ROTBURSTSIZE_2W 1
+#define MCDE_ROTACONF_ROTBURSTSIZE_4W 2
+#define MCDE_ROTACONF_ROTBURSTSIZE_8W 3
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_1W 4
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_2W 5
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_4W 6
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_8W 7
+#define MCDE_ROTACONF_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, \
+ MCDE_ROTACONF_ROTBURSTSIZE_##__x)
+#define MCDE_ROTACONF_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, __x)
+#define MCDE_ROTACONF_ROTDIR_SHIFT 3
+#define MCDE_ROTACONF_ROTDIR_MASK 0x00000008
+#define MCDE_ROTACONF_ROTDIR_CCW 0
+#define MCDE_ROTACONF_ROTDIR_CW 1
+#define MCDE_ROTACONF_ROTDIR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, MCDE_ROTACONF_ROTDIR_##__x)
+#define MCDE_ROTACONF_ROTDIR(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, __x)
+#define MCDE_ROTACONF_WR_MAXOUT_SHIFT 4
+#define MCDE_ROTACONF_WR_MAXOUT_MASK 0x00000030
+#define MCDE_ROTACONF_WR_MAXOUT_1_REQ 0
+#define MCDE_ROTACONF_WR_MAXOUT_2_REQ 1
+#define MCDE_ROTACONF_WR_MAXOUT_4_REQ 2
+#define MCDE_ROTACONF_WR_MAXOUT_8_REQ 3
+#define MCDE_ROTACONF_WR_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, MCDE_ROTACONF_WR_MAXOUT_##__x)
+#define MCDE_ROTACONF_WR_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, __x)
+#define MCDE_ROTACONF_RD_MAXOUT_SHIFT 6
+#define MCDE_ROTACONF_RD_MAXOUT_MASK 0x000000C0
+#define MCDE_ROTACONF_RD_MAXOUT_1_REQ 0
+#define MCDE_ROTACONF_RD_MAXOUT_2_REQ 1
+#define MCDE_ROTACONF_RD_MAXOUT_4_REQ 2
+#define MCDE_ROTACONF_RD_MAXOUT_8_REQ 3
+#define MCDE_ROTACONF_RD_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, MCDE_ROTACONF_RD_MAXOUT_##__x)
+#define MCDE_ROTACONF_RD_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, __x)
+#define MCDE_ROTACONF_STRIP_WIDTH_SHIFT 8
+#define MCDE_ROTACONF_STRIP_WIDTH_MASK 0x00007F00
+#define MCDE_ROTACONF_STRIP_WIDTH_2PIX 0
+#define MCDE_ROTACONF_STRIP_WIDTH_4PIX 1
+#define MCDE_ROTACONF_STRIP_WIDTH_8PIX 2
+#define MCDE_ROTACONF_STRIP_WIDTH_16PIX 3
+#define MCDE_ROTACONF_STRIP_WIDTH_32PIX 4
+#define MCDE_ROTACONF_STRIP_WIDTH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, \
+ MCDE_ROTACONF_STRIP_WIDTH_##__x)
+#define MCDE_ROTACONF_STRIP_WIDTH(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, __x)
+#define MCDE_ROTACONF_SINGLE_BUF_SHIFT 15
+#define MCDE_ROTACONF_SINGLE_BUF_MASK 0x00008000
+#define MCDE_ROTACONF_SINGLE_BUF(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, SINGLE_BUF, __x)
+#define MCDE_ROTACONF_WR_ROPC_SHIFT 16
+#define MCDE_ROTACONF_WR_ROPC_MASK 0x00FF0000
+#define MCDE_ROTACONF_WR_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_ROPC, __x)
+#define MCDE_ROTACONF_RD_ROPC_SHIFT 24
+#define MCDE_ROTACONF_RD_ROPC_MASK 0xFF000000
+#define MCDE_ROTACONF_RD_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_ROPC, __x)
+#define MCDE_ROTBCONF 0x00000A7C
+#define MCDE_ROTBCONF_ROTBURSTSIZE_SHIFT 0
+#define MCDE_ROTBCONF_ROTBURSTSIZE_MASK 0x00000007
+#define MCDE_ROTBCONF_ROTBURSTSIZE_1W 0
+#define MCDE_ROTBCONF_ROTBURSTSIZE_2W 1
+#define MCDE_ROTBCONF_ROTBURSTSIZE_4W 2
+#define MCDE_ROTBCONF_ROTBURSTSIZE_8W 3
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_1W 4
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_2W 5
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_4W 6
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_8W 7
+#define MCDE_ROTBCONF_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, \
+ MCDE_ROTBCONF_ROTBURSTSIZE_##__x)
+#define MCDE_ROTBCONF_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, __x)
+#define MCDE_ROTBCONF_ROTDIR_SHIFT 3
+#define MCDE_ROTBCONF_ROTDIR_MASK 0x00000008
+#define MCDE_ROTBCONF_ROTDIR_CCW 0
+#define MCDE_ROTBCONF_ROTDIR_CW 1
+#define MCDE_ROTBCONF_ROTDIR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, MCDE_ROTBCONF_ROTDIR_##__x)
+#define MCDE_ROTBCONF_ROTDIR(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, __x)
+#define MCDE_ROTBCONF_WR_MAXOUT_SHIFT 4
+#define MCDE_ROTBCONF_WR_MAXOUT_MASK 0x00000030
+#define MCDE_ROTBCONF_WR_MAXOUT_1_REQ 0
+#define MCDE_ROTBCONF_WR_MAXOUT_2_REQ 1
+#define MCDE_ROTBCONF_WR_MAXOUT_4_REQ 2
+#define MCDE_ROTBCONF_WR_MAXOUT_8_REQ 3
+#define MCDE_ROTBCONF_WR_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, MCDE_ROTBCONF_WR_MAXOUT_##__x)
+#define MCDE_ROTBCONF_WR_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, __x)
+#define MCDE_ROTBCONF_RD_MAXOUT_SHIFT 6
+#define MCDE_ROTBCONF_RD_MAXOUT_MASK 0x000000C0
+#define MCDE_ROTBCONF_RD_MAXOUT_1_REQ 0
+#define MCDE_ROTBCONF_RD_MAXOUT_2_REQ 1
+#define MCDE_ROTBCONF_RD_MAXOUT_4_REQ 2
+#define MCDE_ROTBCONF_RD_MAXOUT_8_REQ 3
+#define MCDE_ROTBCONF_RD_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, MCDE_ROTBCONF_RD_MAXOUT_##__x)
+#define MCDE_ROTBCONF_RD_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, __x)
+#define MCDE_ROTBCONF_STRIP_WIDTH_SHIFT 8
+#define MCDE_ROTBCONF_STRIP_WIDTH_MASK 0x00007F00
+#define MCDE_ROTBCONF_STRIP_WIDTH_2PIX 0
+#define MCDE_ROTBCONF_STRIP_WIDTH_4PIX 1
+#define MCDE_ROTBCONF_STRIP_WIDTH_8PIX 2
+#define MCDE_ROTBCONF_STRIP_WIDTH_16PIX 3
+#define MCDE_ROTBCONF_STRIP_WIDTH_32PIX 4
+#define MCDE_ROTBCONF_STRIP_WIDTH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, \
+ MCDE_ROTBCONF_STRIP_WIDTH_##__x)
+#define MCDE_ROTBCONF_STRIP_WIDTH(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, __x)
+#define MCDE_ROTBCONF_SINGLE_BUF_SHIFT 15
+#define MCDE_ROTBCONF_SINGLE_BUF_MASK 0x00008000
+#define MCDE_ROTBCONF_SINGLE_BUF(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, SINGLE_BUF, __x)
+#define MCDE_ROTBCONF_WR_ROPC_SHIFT 16
+#define MCDE_ROTBCONF_WR_ROPC_MASK 0x00FF0000
+#define MCDE_ROTBCONF_WR_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_ROPC, __x)
+#define MCDE_ROTBCONF_RD_ROPC_SHIFT 24
+#define MCDE_ROTBCONF_RD_ROPC_MASK 0xFF000000
+#define MCDE_ROTBCONF_RD_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_ROPC, __x)
+#define MCDE_SYNCHCONFA 0x00000880
+#define MCDE_SYNCHCONFA_GROUPOFFSET 0x200
+#define MCDE_SYNCHCONFA_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONFA_HWREQVEVENT_MASK 0x00000003
+#define MCDE_SYNCHCONFA_HWREQVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFA_HWREQVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFA_HWREQVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFA_HWREQVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, \
+ MCDE_SYNCHCONFA_HWREQVEVENT_##__x)
+#define MCDE_SYNCHCONFA_HWREQVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, __x)
+#define MCDE_SYNCHCONFA_HWREQVCNT_SHIFT 2
+#define MCDE_SYNCHCONFA_HWREQVCNT_MASK 0x0000FFFC
+#define MCDE_SYNCHCONFA_HWREQVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVCNT, __x)
+#define MCDE_SYNCHCONFA_SWINTVEVENT_SHIFT 16
+#define MCDE_SYNCHCONFA_SWINTVEVENT_MASK 0x00030000
+#define MCDE_SYNCHCONFA_SWINTVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFA_SWINTVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFA_SWINTVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFA_SWINTVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, \
+ MCDE_SYNCHCONFA_SWINTVEVENT_##__x)
+#define MCDE_SYNCHCONFA_SWINTVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, __x)
+#define MCDE_SYNCHCONFA_SWINTVCNT_SHIFT 18
+#define MCDE_SYNCHCONFA_SWINTVCNT_MASK 0xFFFC0000
+#define MCDE_SYNCHCONFA_SWINTVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVCNT, __x)
+#define MCDE_SYNCHCONFB 0x00000A80
+#define MCDE_SYNCHCONFB_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONFB_HWREQVEVENT_MASK 0x00000003
+#define MCDE_SYNCHCONFB_HWREQVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFB_HWREQVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFB_HWREQVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFB_HWREQVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, \
+ MCDE_SYNCHCONFB_HWREQVEVENT_##__x)
+#define MCDE_SYNCHCONFB_HWREQVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, __x)
+#define MCDE_SYNCHCONFB_HWREQVCNT_SHIFT 2
+#define MCDE_SYNCHCONFB_HWREQVCNT_MASK 0x0000FFFC
+#define MCDE_SYNCHCONFB_HWREQVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVCNT, __x)
+#define MCDE_SYNCHCONFB_SWINTVEVENT_SHIFT 16
+#define MCDE_SYNCHCONFB_SWINTVEVENT_MASK 0x00030000
+#define MCDE_SYNCHCONFB_SWINTVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFB_SWINTVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFB_SWINTVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFB_SWINTVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, \
+ MCDE_SYNCHCONFB_SWINTVEVENT_##__x)
+#define MCDE_SYNCHCONFB_SWINTVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, __x)
+#define MCDE_SYNCHCONFB_SWINTVCNT_SHIFT 18
+#define MCDE_SYNCHCONFB_SWINTVCNT_MASK 0xFFFC0000
+#define MCDE_SYNCHCONFB_SWINTVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVCNT, __x)
+#define MCDE_CTRLA 0x00000884
+#define MCDE_CTRLA_GROUPOFFSET 0x200
+#define MCDE_CTRLA_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLA_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLA_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOWTRMRK, __x)
+#define MCDE_CTRLA_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLA_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLA_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOEMPTY, __x)
+#define MCDE_CTRLA_FIFOFULL_SHIFT 13
+#define MCDE_CTRLA_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLA_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOFULL, __x)
+#define MCDE_CTRLA_FORMID_SHIFT 16
+#define MCDE_CTRLA_FORMID_MASK 0x00070000
+#define MCDE_CTRLA_FORMID_DSI0VID 0
+#define MCDE_CTRLA_FORMID_DSI0CMD 1
+#define MCDE_CTRLA_FORMID_DSI1VID 2
+#define MCDE_CTRLA_FORMID_DSI1CMD 3
+#define MCDE_CTRLA_FORMID_DSI2VID 4
+#define MCDE_CTRLA_FORMID_DSI2CMD 5
+#define MCDE_CTRLA_FORMID_DPIA 0
+#define MCDE_CTRLA_FORMID_DPIB 1
+#define MCDE_CTRLA_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMID, MCDE_CTRLA_FORMID_##__x)
+#define MCDE_CTRLA_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMID, __x)
+#define MCDE_CTRLA_FORMTYPE_SHIFT 20
+#define MCDE_CTRLA_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLA_FORMTYPE_DPITV 0
+#define MCDE_CTRLA_FORMTYPE_DBI 1
+#define MCDE_CTRLA_FORMTYPE_DSI 2
+#define MCDE_CTRLA_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, MCDE_CTRLA_FORMTYPE_##__x)
+#define MCDE_CTRLA_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, __x)
+#define MCDE_CTRLB 0x00000A84
+#define MCDE_CTRLB_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLB_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLB_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOWTRMRK, __x)
+#define MCDE_CTRLB_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLB_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLB_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOEMPTY, __x)
+#define MCDE_CTRLB_FIFOFULL_SHIFT 13
+#define MCDE_CTRLB_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLB_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOFULL, __x)
+#define MCDE_CTRLB_FORMID_SHIFT 16
+#define MCDE_CTRLB_FORMID_MASK 0x00070000
+#define MCDE_CTRLB_FORMID_DSI0VID 0
+#define MCDE_CTRLB_FORMID_DSI0CMD 1
+#define MCDE_CTRLB_FORMID_DSI1VID 2
+#define MCDE_CTRLB_FORMID_DSI1CMD 3
+#define MCDE_CTRLB_FORMID_DSI2VID 4
+#define MCDE_CTRLB_FORMID_DSI2CMD 5
+#define MCDE_CTRLB_FORMID_DPIA 0
+#define MCDE_CTRLB_FORMID_DPIB 1
+#define MCDE_CTRLB_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMID, MCDE_CTRLB_FORMID_##__x)
+#define MCDE_CTRLB_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMID, __x)
+#define MCDE_CTRLB_FORMTYPE_SHIFT 20
+#define MCDE_CTRLB_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLB_FORMTYPE_DPITV 0
+#define MCDE_CTRLB_FORMTYPE_DBI 1
+#define MCDE_CTRLB_FORMTYPE_DSI 2
+#define MCDE_CTRLB_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, MCDE_CTRLB_FORMTYPE_##__x)
+#define MCDE_CTRLB_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, __x)
+#define MCDE_GAM0A 0x00000888
+#define MCDE_GAM0A_GROUPOFFSET 0x200
+#define MCDE_GAM0A_BLUE_SHIFT 0
+#define MCDE_GAM0A_BLUE_MASK 0x00FFFFFF
+#define MCDE_GAM0A_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_GAM0A, BLUE, __x)
+#define MCDE_GAM0B 0x00000A88
+#define MCDE_GAM0B_BLUE_SHIFT 0
+#define MCDE_GAM0B_BLUE_MASK 0x00FFFFFF
+#define MCDE_GAM0B_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_GAM0B, BLUE, __x)
+#define MCDE_GAM1A 0x0000088C
+#define MCDE_GAM1A_GROUPOFFSET 0x200
+#define MCDE_GAM1A_GREEN_SHIFT 0
+#define MCDE_GAM1A_GREEN_MASK 0x00FFFFFF
+#define MCDE_GAM1A_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_GAM1A, GREEN, __x)
+#define MCDE_GAM1B 0x00000A8C
+#define MCDE_GAM1B_GREEN_SHIFT 0
+#define MCDE_GAM1B_GREEN_MASK 0x00FFFFFF
+#define MCDE_GAM1B_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_GAM1B, GREEN, __x)
+#define MCDE_GAM2A 0x00000890
+#define MCDE_GAM2A_GROUPOFFSET 0x200
+#define MCDE_GAM2A_RED_SHIFT 0
+#define MCDE_GAM2A_RED_MASK 0x00FFFFFF
+#define MCDE_GAM2A_RED(__x) \
+ MCDE_VAL2REG(MCDE_GAM2A, RED, __x)
+#define MCDE_GAM2B 0x00000A90
+#define MCDE_GAM2B_RED_SHIFT 0
+#define MCDE_GAM2B_RED_MASK 0x00FFFFFF
+#define MCDE_GAM2B_RED(__x) \
+ MCDE_VAL2REG(MCDE_GAM2B, RED, __x)
+#define MCDE_OLEDCONV1A 0x00000894
+#define MCDE_OLEDCONV1A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV1A_ALPHA_RED_SHIFT 0
+#define MCDE_OLEDCONV1A_ALPHA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV1A_ALPHA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_RED, __x)
+#define MCDE_OLEDCONV1A_ALPHA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV1A_ALPHA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV1A_ALPHA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_GREEN, __x)
+#define MCDE_OLEDCONV1B 0x00000A94
+#define MCDE_OLEDCONV1B_ALPHA_RED_SHIFT 0
+#define MCDE_OLEDCONV1B_ALPHA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV1B_ALPHA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_RED, __x)
+#define MCDE_OLEDCONV1B_ALPHA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV1B_ALPHA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV1B_ALPHA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_GREEN, __x)
+#define MCDE_OLEDCONV2A 0x00000898
+#define MCDE_OLEDCONV2A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV2A_ALPHA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV2A_ALPHA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV2A_ALPHA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2A, ALPHA_BLUE, __x)
+#define MCDE_OLEDCONV2A_BETA_RED_SHIFT 16
+#define MCDE_OLEDCONV2A_BETA_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV2A_BETA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2A, BETA_RED, __x)
+#define MCDE_OLEDCONV2B 0x00000A98
+#define MCDE_OLEDCONV2B_ALPHA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV2B_ALPHA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV2B_ALPHA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2B, ALPHA_BLUE, __x)
+#define MCDE_OLEDCONV2B_BETA_RED_SHIFT 16
+#define MCDE_OLEDCONV2B_BETA_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV2B_BETA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2B, BETA_RED, __x)
+#define MCDE_OLEDCONV3A 0x0000089C
+#define MCDE_OLEDCONV3A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV3A_BETA_GREEN_SHIFT 0
+#define MCDE_OLEDCONV3A_BETA_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV3A_BETA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_GREEN, __x)
+#define MCDE_OLEDCONV3A_BETA_BLUE_SHIFT 16
+#define MCDE_OLEDCONV3A_BETA_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV3A_BETA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_BLUE, __x)
+#define MCDE_OLEDCONV3B 0x00000A9C
+#define MCDE_OLEDCONV3B_BETA_GREEN_SHIFT 0
+#define MCDE_OLEDCONV3B_BETA_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV3B_BETA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_GREEN, __x)
+#define MCDE_OLEDCONV3B_BETA_BLUE_SHIFT 16
+#define MCDE_OLEDCONV3B_BETA_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV3B_BETA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_BLUE, __x)
+#define MCDE_OLEDCONV4A 0x000008A0
+#define MCDE_OLEDCONV4A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV4A_GAMMA_RED_SHIFT 0
+#define MCDE_OLEDCONV4A_GAMMA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV4A_GAMMA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_RED, __x)
+#define MCDE_OLEDCONV4A_GAMMA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV4A_GAMMA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV4A_GAMMA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_GREEN, __x)
+#define MCDE_OLEDCONV4B 0x00000AA0
+#define MCDE_OLEDCONV4B_GAMMA_RED_SHIFT 0
+#define MCDE_OLEDCONV4B_GAMMA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV4B_GAMMA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_RED, __x)
+#define MCDE_OLEDCONV4B_GAMMA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV4B_GAMMA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV4B_GAMMA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_GREEN, __x)
+#define MCDE_OLEDCONV5A 0x000008A4
+#define MCDE_OLEDCONV5A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV5A_GAMMA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV5A_GAMMA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV5A_GAMMA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5A, GAMMA_BLUE, __x)
+#define MCDE_OLEDCONV5A_OFF_RED_SHIFT 16
+#define MCDE_OLEDCONV5A_OFF_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV5A_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5A, OFF_RED, __x)
+#define MCDE_OLEDCONV5B 0x00000AA4
+#define MCDE_OLEDCONV5B_GAMMA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV5B_GAMMA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV5B_GAMMA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5B, GAMMA_BLUE, __x)
+#define MCDE_OLEDCONV5B_OFF_RED_SHIFT 16
+#define MCDE_OLEDCONV5B_OFF_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV5B_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5B, OFF_RED, __x)
+#define MCDE_OLEDCONV6A 0x000008A8
+#define MCDE_OLEDCONV6A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV6A_OFF_GREEN_SHIFT 0
+#define MCDE_OLEDCONV6A_OFF_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV6A_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_GREEN, __x)
+#define MCDE_OLEDCONV6A_OFF_BLUE_SHIFT 16
+#define MCDE_OLEDCONV6A_OFF_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV6A_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_BLUE, __x)
+#define MCDE_OLEDCONV6B 0x00000AA8
+#define MCDE_OLEDCONV6B_OFF_GREEN_SHIFT 0
+#define MCDE_OLEDCONV6B_OFF_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV6B_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_GREEN, __x)
+#define MCDE_OLEDCONV6B_OFF_BLUE_SHIFT 16
+#define MCDE_OLEDCONV6B_OFF_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV6B_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_BLUE, __x)
+#define MCDE_CRC 0x00000C00
+#define MCDE_CRC_C1EN_SHIFT 2
+#define MCDE_CRC_C1EN_MASK 0x00000004
+#define MCDE_CRC_C1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, C1EN, __x)
+#define MCDE_CRC_C2EN_SHIFT 3
+#define MCDE_CRC_C2EN_MASK 0x00000008
+#define MCDE_CRC_C2EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, C2EN, __x)
+#define MCDE_CRC_SYCEN0_SHIFT 7
+#define MCDE_CRC_SYCEN0_MASK 0x00000080
+#define MCDE_CRC_SYCEN0(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYCEN0, __x)
+#define MCDE_CRC_SYCEN1_SHIFT 8
+#define MCDE_CRC_SYCEN1_MASK 0x00000100
+#define MCDE_CRC_SYCEN1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYCEN1, __x)
+#define MCDE_CRC_SIZE1_SHIFT 9
+#define MCDE_CRC_SIZE1_MASK 0x00000200
+#define MCDE_CRC_SIZE1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SIZE1, __x)
+#define MCDE_CRC_SIZE2_SHIFT 10
+#define MCDE_CRC_SIZE2_MASK 0x00000400
+#define MCDE_CRC_SIZE2(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SIZE2, __x)
+#define MCDE_CRC_YUVCONVC1EN_SHIFT 15
+#define MCDE_CRC_YUVCONVC1EN_MASK 0x00008000
+#define MCDE_CRC_YUVCONVC1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, YUVCONVC1EN, __x)
+#define MCDE_CRC_CS1EN_SHIFT 16
+#define MCDE_CRC_CS1EN_MASK 0x00010000
+#define MCDE_CRC_CS1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS1EN, __x)
+#define MCDE_CRC_CS2EN_SHIFT 17
+#define MCDE_CRC_CS2EN_MASK 0x00020000
+#define MCDE_CRC_CS2EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS2EN, __x)
+#define MCDE_CRC_CS1POL_SHIFT 19
+#define MCDE_CRC_CS1POL_MASK 0x00080000
+#define MCDE_CRC_CS1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS1POL, __x)
+#define MCDE_CRC_CS2POL_SHIFT 20
+#define MCDE_CRC_CS2POL_MASK 0x00100000
+#define MCDE_CRC_CS2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS2POL, __x)
+#define MCDE_CRC_CD1POL_SHIFT 21
+#define MCDE_CRC_CD1POL_MASK 0x00200000
+#define MCDE_CRC_CD1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CD1POL, __x)
+#define MCDE_CRC_CD2POL_SHIFT 22
+#define MCDE_CRC_CD2POL_MASK 0x00400000
+#define MCDE_CRC_CD2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CD2POL, __x)
+#define MCDE_CRC_WR1POL_SHIFT 23
+#define MCDE_CRC_WR1POL_MASK 0x00800000
+#define MCDE_CRC_WR1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WR1POL, __x)
+#define MCDE_CRC_WR2POL_SHIFT 24
+#define MCDE_CRC_WR2POL_MASK 0x01000000
+#define MCDE_CRC_WR2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WR2POL, __x)
+#define MCDE_CRC_RD1POL_SHIFT 25
+#define MCDE_CRC_RD1POL_MASK 0x02000000
+#define MCDE_CRC_RD1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RD1POL, __x)
+#define MCDE_CRC_RD2POL_SHIFT 26
+#define MCDE_CRC_RD2POL_MASK 0x04000000
+#define MCDE_CRC_RD2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RD2POL, __x)
+#define MCDE_CRC_SYNCCTRL_SHIFT 29
+#define MCDE_CRC_SYNCCTRL_MASK 0x60000000
+#define MCDE_CRC_SYNCCTRL_NO_SYNC 0
+#define MCDE_CRC_SYNCCTRL_DBI0 1
+#define MCDE_CRC_SYNCCTRL_DBI1 2
+#define MCDE_CRC_SYNCCTRL_PING_PONG 3
+#define MCDE_CRC_SYNCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, MCDE_CRC_SYNCCTRL_##__x)
+#define MCDE_CRC_SYNCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, __x)
+#define MCDE_CRC_CLAMPC1EN_SHIFT 31
+#define MCDE_CRC_CLAMPC1EN_MASK 0x80000000
+#define MCDE_CRC_CLAMPC1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CLAMPC1EN, __x)
+#define MCDE_PBCCRC0 0x00000C04
+#define MCDE_PBCCRC0_GROUPOFFSET 0x4
+#define MCDE_PBCCRC0_BSCM_SHIFT 0
+#define MCDE_PBCCRC0_BSCM_MASK 0x00000007
+#define MCDE_PBCCRC0_BSCM_1_8BIT 0
+#define MCDE_PBCCRC0_BSCM_2_8BIT 1
+#define MCDE_PBCCRC0_BSCM_3_8BIT 2
+#define MCDE_PBCCRC0_BSCM_1_16BIT 3
+#define MCDE_PBCCRC0_BSCM_2_16BIT 4
+#define MCDE_PBCCRC0_BSCM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, MCDE_PBCCRC0_BSCM_##__x)
+#define MCDE_PBCCRC0_BSCM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, __x)
+#define MCDE_PBCCRC0_BSDM_SHIFT 3
+#define MCDE_PBCCRC0_BSDM_MASK 0x00000038
+#define MCDE_PBCCRC0_BSDM_1_8BIT 0
+#define MCDE_PBCCRC0_BSDM_2_8BIT 1
+#define MCDE_PBCCRC0_BSDM_3_8BIT 2
+#define MCDE_PBCCRC0_BSDM_1_16BIT 3
+#define MCDE_PBCCRC0_BSDM_2_16BIT 4
+#define MCDE_PBCCRC0_BSDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, MCDE_PBCCRC0_BSDM_##__x)
+#define MCDE_PBCCRC0_BSDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, __x)
+#define MCDE_PBCCRC0_PDM_SHIFT 6
+#define MCDE_PBCCRC0_PDM_MASK 0x000000C0
+#define MCDE_PBCCRC0_PDM_NORMAL 0
+#define MCDE_PBCCRC0_PDM_16_TO_32 1
+#define MCDE_PBCCRC0_PDM_24_TO_32_RIGHT 2
+#define MCDE_PBCCRC0_PDM_24_TO_32_LEFT 3
+#define MCDE_PBCCRC0_PDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDM, MCDE_PBCCRC0_PDM_##__x)
+#define MCDE_PBCCRC0_PDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDM, __x)
+#define MCDE_PBCCRC0_PDCTRL_SHIFT 12
+#define MCDE_PBCCRC0_PDCTRL_MASK 0x00001000
+#define MCDE_PBCCRC0_PDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDCTRL, __x)
+#define MCDE_PBCCRC0_BPP_SHIFT 13
+#define MCDE_PBCCRC0_BPP_MASK 0x0000E000
+#define MCDE_PBCCRC0_BPP_8BPP 0
+#define MCDE_PBCCRC0_BPP_12BPP 1
+#define MCDE_PBCCRC0_BPP_15BPP 2
+#define MCDE_PBCCRC0_BPP_16BPP 3
+#define MCDE_PBCCRC0_BPP_18BPP 4
+#define MCDE_PBCCRC0_BPP_24BPP 5
+#define MCDE_PBCCRC0_BPP(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BPP, __x)
+#define MCDE_PBCCRC1 0x00000C08
+#define MCDE_PBCCRC1_BSCM_SHIFT 0
+#define MCDE_PBCCRC1_BSCM_MASK 0x00000007
+#define MCDE_PBCCRC1_BSCM_1_8BIT 0
+#define MCDE_PBCCRC1_BSCM_2_8BIT 1
+#define MCDE_PBCCRC1_BSCM_3_8BIT 2
+#define MCDE_PBCCRC1_BSCM_1_16BIT 3
+#define MCDE_PBCCRC1_BSCM_2_16BIT 4
+#define MCDE_PBCCRC1_BSCM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, MCDE_PBCCRC1_BSCM_##__x)
+#define MCDE_PBCCRC1_BSCM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, __x)
+#define MCDE_PBCCRC1_BSDM_SHIFT 3
+#define MCDE_PBCCRC1_BSDM_MASK 0x00000038
+#define MCDE_PBCCRC1_BSDM_1_8BIT 0
+#define MCDE_PBCCRC1_BSDM_2_8BIT 1
+#define MCDE_PBCCRC1_BSDM_3_8BIT 2
+#define MCDE_PBCCRC1_BSDM_1_16BIT 3
+#define MCDE_PBCCRC1_BSDM_2_16BIT 4
+#define MCDE_PBCCRC1_BSDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, MCDE_PBCCRC1_BSDM_##__x)
+#define MCDE_PBCCRC1_BSDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, __x)
+#define MCDE_PBCCRC1_PDM_SHIFT 6
+#define MCDE_PBCCRC1_PDM_MASK 0x000000C0
+#define MCDE_PBCCRC1_PDM_NORMAL 0
+#define MCDE_PBCCRC1_PDM_16_TO_32 1
+#define MCDE_PBCCRC1_PDM_24_TO_32_RIGHT 2
+#define MCDE_PBCCRC1_PDM_24_TO_32_LEFT 3
+#define MCDE_PBCCRC1_PDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDM, MCDE_PBCCRC1_PDM_##__x)
+#define MCDE_PBCCRC1_PDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDM, __x)
+#define MCDE_PBCCRC1_PDCTRL_SHIFT 12
+#define MCDE_PBCCRC1_PDCTRL_MASK 0x00001000
+#define MCDE_PBCCRC1_PDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDCTRL, __x)
+#define MCDE_PBCCRC1_BPP_SHIFT 13
+#define MCDE_PBCCRC1_BPP_MASK 0x0000E000
+#define MCDE_PBCCRC1_BPP_8BPP 0
+#define MCDE_PBCCRC1_BPP_12BPP 1
+#define MCDE_PBCCRC1_BPP_15BPP 2
+#define MCDE_PBCCRC1_BPP_16BPP 3
+#define MCDE_PBCCRC1_BPP_18BPP 4
+#define MCDE_PBCCRC1_BPP_24BPP 5
+#define MCDE_PBCCRC1_BPP(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BPP, __x)
+#define MCDE_PBCBMRC00 0x00000C0C
+#define MCDE_PBCBMRC00_GROUPOFFSET 0x4
+#define MCDE_PBCBMRC00_MUXI_SHIFT 0
+#define MCDE_PBCBMRC00_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC00_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC00, MUXI, __x)
+#define MCDE_PBCBMRC01 0x00000C10
+#define MCDE_PBCBMRC01_MUXI_SHIFT 0
+#define MCDE_PBCBMRC01_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC01_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC01, MUXI, __x)
+#define MCDE_PBCBMRC02 0x00000C14
+#define MCDE_PBCBMRC02_MUXI_SHIFT 0
+#define MCDE_PBCBMRC02_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC02_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC02, MUXI, __x)
+#define MCDE_PBCBMRC03 0x00000C18
+#define MCDE_PBCBMRC03_MUXI_SHIFT 0
+#define MCDE_PBCBMRC03_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC03_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC03, MUXI, __x)
+#define MCDE_PBCBMRC04 0x00000C1C
+#define MCDE_PBCBMRC04_MUXI_SHIFT 0
+#define MCDE_PBCBMRC04_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC04_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC04, MUXI, __x)
+#define MCDE_PBCBMRC10 0x00000C20
+#define MCDE_PBCBMRC10_MUXI_SHIFT 0
+#define MCDE_PBCBMRC10_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC10_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC10, MUXI, __x)
+#define MCDE_PBCBMRC11 0x00000C24
+#define MCDE_PBCBMRC11_MUXI_SHIFT 0
+#define MCDE_PBCBMRC11_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC11_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC11, MUXI, __x)
+#define MCDE_PBCBMRC12 0x00000C28
+#define MCDE_PBCBMRC12_MUXI_SHIFT 0
+#define MCDE_PBCBMRC12_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC12_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC12, MUXI, __x)
+#define MCDE_PBCBMRC13 0x00000C2C
+#define MCDE_PBCBMRC13_MUXI_SHIFT 0
+#define MCDE_PBCBMRC13_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC13_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC13, MUXI, __x)
+#define MCDE_PBCBMRC14 0x00000C30
+#define MCDE_PBCBMRC14_MUXI_SHIFT 0
+#define MCDE_PBCBMRC14_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC14_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC14, MUXI, __x)
+#define MCDE_PBCBCRC00 0x00000C34
+#define MCDE_PBCBCRC00_GROUPOFFSET 0x4
+#define MCDE_PBCBCRC00_CTLI_SHIFT 0
+#define MCDE_PBCBCRC00_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC00_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC00, CTLI, __x)
+#define MCDE_PBCBCRC10 0x00000C38
+#define MCDE_PBCBCRC10_CTLI_SHIFT 0
+#define MCDE_PBCBCRC10_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC10_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC10, CTLI, __x)
+#define MCDE_PBCBCRC01 0x00000C48
+#define MCDE_PBCBCRC01_GROUPOFFSET 0x4
+#define MCDE_PBCBCRC01_CTLI_SHIFT 0
+#define MCDE_PBCBCRC01_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC01_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC01, CTLI, __x)
+#define MCDE_PBCBCRC11 0x00000C4C
+#define MCDE_PBCBCRC11_CTLI_SHIFT 0
+#define MCDE_PBCBCRC11_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC11_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC11, CTLI, __x)
+#define MCDE_VSCRC0 0x00000C5C
+#define MCDE_VSCRC0_GROUPOFFSET 0x4
+#define MCDE_VSCRC0_VSPMIN_SHIFT 0
+#define MCDE_VSCRC0_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC0_VSPMIN(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPMIN, __x)
+#define MCDE_VSCRC0_VSPMAX_SHIFT 12
+#define MCDE_VSCRC0_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC0_VSPMAX(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPMAX, __x)
+#define MCDE_VSCRC0_VSPDIV_SHIFT 24
+#define MCDE_VSCRC0_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_1 0
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_2 1
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_4 2
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_8 3
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_16 4
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_32 5
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_64 6
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_128 7
+#define MCDE_VSCRC0_VSPDIV_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, MCDE_VSCRC0_VSPDIV_##__x)
+#define MCDE_VSCRC0_VSPDIV(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, __x)
+#define MCDE_VSCRC0_VSPOL_SHIFT 27
+#define MCDE_VSCRC0_VSPOL_MASK 0x08000000
+#define MCDE_VSCRC0_VSPOL_ACTIVE_HIGH 0
+#define MCDE_VSCRC0_VSPOL_ACTIVE_LOW 1
+#define MCDE_VSCRC0_VSPOL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, MCDE_VSCRC0_VSPOL_##__x)
+#define MCDE_VSCRC0_VSPOL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, __x)
+#define MCDE_VSCRC0_VSSEL_SHIFT 28
+#define MCDE_VSCRC0_VSSEL_MASK 0x10000000
+#define MCDE_VSCRC0_VSSEL_VSYNC0 0
+#define MCDE_VSCRC0_VSSEL_VSYNC1 1
+#define MCDE_VSCRC0_VSSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, MCDE_VSCRC0_VSSEL_##__x)
+#define MCDE_VSCRC0_VSSEL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, __x)
+#define MCDE_VSCRC0_VSDBL_SHIFT 29
+#define MCDE_VSCRC0_VSDBL_MASK 0xE0000000
+#define MCDE_VSCRC0_VSDBL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSDBL, __x)
+#define MCDE_VSCRC1 0x00000C60
+#define MCDE_VSCRC1_VSPMIN_SHIFT 0
+#define MCDE_VSCRC1_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC1_VSPMIN(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPMIN, __x)
+#define MCDE_VSCRC1_VSPMAX_SHIFT 12
+#define MCDE_VSCRC1_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC1_VSPMAX(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPMAX, __x)
+#define MCDE_VSCRC1_VSPDIV_SHIFT 24
+#define MCDE_VSCRC1_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_1 0
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_2 1
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_4 2
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_8 3
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_16 4
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_32 5
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_64 6
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_128 7
+#define MCDE_VSCRC1_VSPDIV_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, MCDE_VSCRC1_VSPDIV_##__x)
+#define MCDE_VSCRC1_VSPDIV(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, __x)
+#define MCDE_VSCRC1_VSPOL_SHIFT 27
+#define MCDE_VSCRC1_VSPOL_MASK 0x08000000
+#define MCDE_VSCRC1_VSPOL_ACTIVE_HIGH 0
+#define MCDE_VSCRC1_VSPOL_ACTIVE_LOW 1
+#define MCDE_VSCRC1_VSPOL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, MCDE_VSCRC1_VSPOL_##__x)
+#define MCDE_VSCRC1_VSPOL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, __x)
+#define MCDE_VSCRC1_VSSEL_SHIFT 28
+#define MCDE_VSCRC1_VSSEL_MASK 0x10000000
+#define MCDE_VSCRC1_VSSEL_VSYNC0 0
+#define MCDE_VSCRC1_VSSEL_VSYNC1 1
+#define MCDE_VSCRC1_VSSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, MCDE_VSCRC1_VSSEL_##__x)
+#define MCDE_VSCRC1_VSSEL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, __x)
+#define MCDE_VSCRC1_VSDBL_SHIFT 29
+#define MCDE_VSCRC1_VSDBL_MASK 0xE0000000
+#define MCDE_VSCRC1_VSDBL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSDBL, __x)
+#define MCDE_SCTRC 0x00000C64
+#define MCDE_SCTRC_SYNCDELC0_SHIFT 0
+#define MCDE_SCTRC_SYNCDELC0_MASK 0x000000FF
+#define MCDE_SCTRC_SYNCDELC0(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC0, __x)
+#define MCDE_SCTRC_SYNCDELC1_SHIFT 8
+#define MCDE_SCTRC_SYNCDELC1_MASK 0x0000FF00
+#define MCDE_SCTRC_SYNCDELC1(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC1, __x)
+#define MCDE_SCTRC_TRDELC_SHIFT 16
+#define MCDE_SCTRC_TRDELC_MASK 0x0FFF0000
+#define MCDE_SCTRC_TRDELC(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, TRDELC, __x)
+#define MCDE_SCSRC 0x00000C68
+#define MCDE_SCSRC_VSTAC0_SHIFT 0
+#define MCDE_SCSRC_VSTAC0_MASK 0x00000001
+#define MCDE_SCSRC_VSTAC0(__x) \
+ MCDE_VAL2REG(MCDE_SCSRC, VSTAC0, __x)
+#define MCDE_SCSRC_VSTAC1_SHIFT 1
+#define MCDE_SCSRC_VSTAC1_MASK 0x00000002
+#define MCDE_SCSRC_VSTAC1(__x) \
+ MCDE_VAL2REG(MCDE_SCSRC, VSTAC1, __x)
+#define MCDE_BCNR0 0x00000C6C
+#define MCDE_BCNR0_GROUPOFFSET 0x4
+#define MCDE_BCNR0_BCN_SHIFT 0
+#define MCDE_BCNR0_BCN_MASK 0x000000FF
+#define MCDE_BCNR0_BCN(__x) \
+ MCDE_VAL2REG(MCDE_BCNR0, BCN, __x)
+#define MCDE_BCNR1 0x00000C70
+#define MCDE_BCNR1_BCN_SHIFT 0
+#define MCDE_BCNR1_BCN_MASK 0x000000FF
+#define MCDE_BCNR1_BCN(__x) \
+ MCDE_VAL2REG(MCDE_BCNR1, BCN, __x)
+#define MCDE_CSCDTR0 0x00000C74
+#define MCDE_CSCDTR0_GROUPOFFSET 0x4
+#define MCDE_CSCDTR0_CSACT_SHIFT 0
+#define MCDE_CSCDTR0_CSACT_MASK 0x000000FF
+#define MCDE_CSCDTR0_CSACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CSACT, __x)
+#define MCDE_CSCDTR0_CSDEACT_SHIFT 8
+#define MCDE_CSCDTR0_CSDEACT_MASK 0x0000FF00
+#define MCDE_CSCDTR0_CSDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CSDEACT, __x)
+#define MCDE_CSCDTR0_CDACT_SHIFT 16
+#define MCDE_CSCDTR0_CDACT_MASK 0x00FF0000
+#define MCDE_CSCDTR0_CDACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CDACT, __x)
+#define MCDE_CSCDTR0_CDDEACT_SHIFT 24
+#define MCDE_CSCDTR0_CDDEACT_MASK 0xFF000000
+#define MCDE_CSCDTR0_CDDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CDDEACT, __x)
+#define MCDE_CSCDTR1 0x00000C78
+#define MCDE_CSCDTR1_CSACT_SHIFT 0
+#define MCDE_CSCDTR1_CSACT_MASK 0x000000FF
+#define MCDE_CSCDTR1_CSACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CSACT, __x)
+#define MCDE_CSCDTR1_CSDEACT_SHIFT 8
+#define MCDE_CSCDTR1_CSDEACT_MASK 0x0000FF00
+#define MCDE_CSCDTR1_CSDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CSDEACT, __x)
+#define MCDE_CSCDTR1_CDACT_SHIFT 16
+#define MCDE_CSCDTR1_CDACT_MASK 0x00FF0000
+#define MCDE_CSCDTR1_CDACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CDACT, __x)
+#define MCDE_CSCDTR1_CDDEACT_SHIFT 24
+#define MCDE_CSCDTR1_CDDEACT_MASK 0xFF000000
+#define MCDE_CSCDTR1_CDDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CDDEACT, __x)
+#define MCDE_RDWRTR0 0x00000C7C
+#define MCDE_RDWRTR0_GROUPOFFSET 0x4
+#define MCDE_RDWRTR0_RWACT_SHIFT 0
+#define MCDE_RDWRTR0_RWACT_MASK 0x000000FF
+#define MCDE_RDWRTR0_RWACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, RWACT, __x)
+#define MCDE_RDWRTR0_RWDEACT_SHIFT 8
+#define MCDE_RDWRTR0_RWDEACT_MASK 0x0000FF00
+#define MCDE_RDWRTR0_RWDEACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, RWDEACT, __x)
+#define MCDE_RDWRTR0_MOTINT_SHIFT 16
+#define MCDE_RDWRTR0_MOTINT_MASK 0x00010000
+#define MCDE_RDWRTR0_MOTINT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, MOTINT, __x)
+#define MCDE_RDWRTR1 0x00000C80
+#define MCDE_RDWRTR1_RWACT_SHIFT 0
+#define MCDE_RDWRTR1_RWACT_MASK 0x000000FF
+#define MCDE_RDWRTR1_RWACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, RWACT, __x)
+#define MCDE_RDWRTR1_RWDEACT_SHIFT 8
+#define MCDE_RDWRTR1_RWDEACT_MASK 0x0000FF00
+#define MCDE_RDWRTR1_RWDEACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, RWDEACT, __x)
+#define MCDE_RDWRTR1_MOTINT_SHIFT 16
+#define MCDE_RDWRTR1_MOTINT_MASK 0x00010000
+#define MCDE_RDWRTR1_MOTINT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, MOTINT, __x)
+#define MCDE_DOTR0 0x00000C84
+#define MCDE_DOTR0_GROUPOFFSET 0x4
+#define MCDE_DOTR0_DOACT_SHIFT 0
+#define MCDE_DOTR0_DOACT_MASK 0x000000FF
+#define MCDE_DOTR0_DOACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR0, DOACT, __x)
+#define MCDE_DOTR0_DODEACT_SHIFT 8
+#define MCDE_DOTR0_DODEACT_MASK 0x0000FF00
+#define MCDE_DOTR0_DODEACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR0, DODEACT, __x)
+#define MCDE_DOTR1 0x00000C88
+#define MCDE_DOTR1_DOACT_SHIFT 0
+#define MCDE_DOTR1_DOACT_MASK 0x000000FF
+#define MCDE_DOTR1_DOACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR1, DOACT, __x)
+#define MCDE_DOTR1_DODEACT_SHIFT 8
+#define MCDE_DOTR1_DODEACT_MASK 0x0000FF00
+#define MCDE_DOTR1_DODEACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR1, DODEACT, __x)
+#define MCDE_WDATADC0 0x00000C94
+#define MCDE_WDATADC0_GROUPOFFSET 0x4
+#define MCDE_WDATADC0_DATAVALUE_SHIFT 0
+#define MCDE_WDATADC0_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_WDATADC0_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC0, DATAVALUE, __x)
+#define MCDE_WDATADC0_DC_SHIFT 24
+#define MCDE_WDATADC0_DC_MASK 0x01000000
+#define MCDE_WDATADC0_DC(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC0, DC, __x)
+#define MCDE_WDATADC1 0x00000C98
+#define MCDE_WDATADC1_DATAVALUE_SHIFT 0
+#define MCDE_WDATADC1_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_WDATADC1_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC1, DATAVALUE, __x)
+#define MCDE_WDATADC1_DC_SHIFT 24
+#define MCDE_WDATADC1_DC_MASK 0x01000000
+#define MCDE_WDATADC1_DC(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC1, DC, __x)
+#define MCDE_RDATADC0 0x00000C9C
+#define MCDE_RDATADC0_GROUPOFFSET 0x4
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_SHIFT 0
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC0, DATAREADFROMDISPLAYMODULE, __x)
+#define MCDE_RDATADC0_STARTREAD_SHIFT 16
+#define MCDE_RDATADC0_STARTREAD_MASK 0x00010000
+#define MCDE_RDATADC0_STARTREAD(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC0, STARTREAD, __x)
+#define MCDE_RDATADC1 0x00000CA0
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_SHIFT 0
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC1, DATAREADFROMDISPLAYMODULE, __x)
+#define MCDE_RDATADC1_STARTREAD_SHIFT 16
+#define MCDE_RDATADC1_STARTREAD_MASK 0x00010000
+#define MCDE_RDATADC1_STARTREAD(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC1, STARTREAD, __x)
+#define MCDE_STATC 0x00000CA4
+#define MCDE_STATC_STATBUSY0_SHIFT 0
+#define MCDE_STATC_STATBUSY0_MASK 0x00000001
+#define MCDE_STATC_STATBUSY0(__x) \
+ MCDE_VAL2REG(MCDE_STATC, STATBUSY0, __x)
+#define MCDE_STATC_STATBUSY1_SHIFT 5
+#define MCDE_STATC_STATBUSY1_MASK 0x00000020
+#define MCDE_STATC_STATBUSY1(__x) \
+ MCDE_VAL2REG(MCDE_STATC, STATBUSY1, __x)
+#define MCDE_CTRLC0 0x00000CA8
+#define MCDE_CTRLC0_GROUPOFFSET 0x4
+#define MCDE_CTRLC0_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLC0_FIFOWTRMRK_MASK 0x000000FF
+#define MCDE_CTRLC0_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOWTRMRK, __x)
+#define MCDE_CTRLC0_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLC0_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLC0_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOEMPTY, __x)
+#define MCDE_CTRLC0_FIFOFULL_SHIFT 13
+#define MCDE_CTRLC0_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLC0_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOFULL, __x)
+#define MCDE_CTRLC0_FORMID_SHIFT 16
+#define MCDE_CTRLC0_FORMID_MASK 0x00070000
+#define MCDE_CTRLC0_FORMID_DSI0VID 0
+#define MCDE_CTRLC0_FORMID_DSI0CMD 1
+#define MCDE_CTRLC0_FORMID_DSI1VID 2
+#define MCDE_CTRLC0_FORMID_DSI1CMD 3
+#define MCDE_CTRLC0_FORMID_DSI2VID 4
+#define MCDE_CTRLC0_FORMID_DSI2CMD 5
+#define MCDE_CTRLC0_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMID, MCDE_CTRLC0_FORMID_##__x)
+#define MCDE_CTRLC0_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMID, __x)
+#define MCDE_CTRLC0_FORMTYPE_SHIFT 20
+#define MCDE_CTRLC0_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLC0_FORMTYPE_DPITV 0
+#define MCDE_CTRLC0_FORMTYPE_DBI 1
+#define MCDE_CTRLC0_FORMTYPE_DSI 2
+#define MCDE_CTRLC0_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, MCDE_CTRLC0_FORMTYPE_##__x)
+#define MCDE_CTRLC0_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, __x)
+#define MCDE_CTRLC1 0x00000CAC
+#define MCDE_CTRLC1_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLC1_FIFOWTRMRK_MASK 0x000000FF
+#define MCDE_CTRLC1_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOWTRMRK, __x)
+#define MCDE_CTRLC1_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLC1_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLC1_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOEMPTY, __x)
+#define MCDE_CTRLC1_FIFOFULL_SHIFT 13
+#define MCDE_CTRLC1_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLC1_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOFULL, __x)
+#define MCDE_CTRLC1_FORMID_SHIFT 16
+#define MCDE_CTRLC1_FORMID_MASK 0x00070000
+#define MCDE_CTRLC1_FORMID_DSI0VID 0
+#define MCDE_CTRLC1_FORMID_DSI0CMD 1
+#define MCDE_CTRLC1_FORMID_DSI1VID 2
+#define MCDE_CTRLC1_FORMID_DSI1CMD 3
+#define MCDE_CTRLC1_FORMID_DSI2VID 4
+#define MCDE_CTRLC1_FORMID_DSI2CMD 5
+#define MCDE_CTRLC1_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMID, MCDE_CTRLC1_FORMID_##__x)
+#define MCDE_CTRLC1_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMID, __x)
+#define MCDE_CTRLC1_FORMTYPE_SHIFT 20
+#define MCDE_CTRLC1_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLC1_FORMTYPE_DPITV 0
+#define MCDE_CTRLC1_FORMTYPE_DBI 1
+#define MCDE_CTRLC1_FORMTYPE_DSI 2
+#define MCDE_CTRLC1_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, MCDE_CTRLC1_FORMTYPE_##__x)
+#define MCDE_CTRLC1_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, __x)
+#define MCDE_DSIVID0CONF0 0x00000E00
+#define MCDE_DSIVID0CONF0_GROUPOFFSET 0x20
+#define MCDE_DSIVID0CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID0CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID0CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BLANKING, __x)
+#define MCDE_DSIVID0CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID0CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID0CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID0CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID0CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, \
+ MCDE_DSIVID0CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID0CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, __x)
+#define MCDE_DSIVID0CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID0CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID0CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, CMD8, __x)
+#define MCDE_DSIVID0CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID0CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID0CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID0CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID0CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID0CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID0CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID0CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID0CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID0CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID0CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID0CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID0CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID0CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, \
+ MCDE_DSIVID0CONF0_PACKING_##__x)
+#define MCDE_DSIVID0CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, __x)
+#define MCDE_DSICMD0CONF0 0x00000E20
+#define MCDE_DSICMD0CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD0CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD0CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BLANKING, __x)
+#define MCDE_DSICMD0CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD0CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD0CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD0CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD0CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, \
+ MCDE_DSICMD0CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD0CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, __x)
+#define MCDE_DSICMD0CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD0CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD0CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, CMD8, __x)
+#define MCDE_DSICMD0CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD0CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD0CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD0CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD0CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD0CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD0CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD0CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD0CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD0CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD0CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD0CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD0CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD0CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, \
+ MCDE_DSICMD0CONF0_PACKING_##__x)
+#define MCDE_DSICMD0CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, __x)
+#define MCDE_DSIVID1CONF0 0x00000E40
+#define MCDE_DSIVID1CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID1CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID1CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BLANKING, __x)
+#define MCDE_DSIVID1CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID1CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID1CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID1CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID1CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, \
+ MCDE_DSIVID1CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID1CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, __x)
+#define MCDE_DSIVID1CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID1CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID1CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, CMD8, __x)
+#define MCDE_DSIVID1CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID1CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID1CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID1CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID1CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID1CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID1CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID1CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID1CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID1CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID1CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID1CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID1CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID1CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, \
+ MCDE_DSIVID1CONF0_PACKING_##__x)
+#define MCDE_DSIVID1CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, __x)
+#define MCDE_DSICMD1CONF0 0x00000E60
+#define MCDE_DSICMD1CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD1CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD1CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BLANKING, __x)
+#define MCDE_DSICMD1CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD1CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD1CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD1CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD1CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, \
+ MCDE_DSICMD1CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD1CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, __x)
+#define MCDE_DSICMD1CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD1CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD1CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, CMD8, __x)
+#define MCDE_DSICMD1CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD1CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD1CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD1CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD1CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD1CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD1CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD1CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD1CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD1CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD1CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD1CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD1CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD1CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, \
+ MCDE_DSICMD1CONF0_PACKING_##__x)
+#define MCDE_DSICMD1CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, __x)
+#define MCDE_DSIVID2CONF0 0x00000E80
+#define MCDE_DSIVID2CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID2CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID2CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BLANKING, __x)
+#define MCDE_DSIVID2CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID2CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID2CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID2CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID2CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, \
+ MCDE_DSIVID2CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID2CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, __x)
+#define MCDE_DSIVID2CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID2CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID2CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, CMD8, __x)
+#define MCDE_DSIVID2CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID2CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID2CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID2CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID2CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID2CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID2CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID2CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID2CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID2CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID2CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID2CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID2CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID2CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, \
+ MCDE_DSIVID2CONF0_PACKING_##__x)
+#define MCDE_DSIVID2CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, __x)
+#define MCDE_DSICMD2CONF0 0x00000EA0
+#define MCDE_DSICMD2CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD2CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD2CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BLANKING, __x)
+#define MCDE_DSICMD2CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD2CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD2CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD2CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD2CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, \
+ MCDE_DSICMD2CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD2CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, __x)
+#define MCDE_DSICMD2CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD2CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD2CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, CMD8, __x)
+#define MCDE_DSICMD2CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD2CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD2CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD2CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD2CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD2CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD2CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD2CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD2CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD2CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD2CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD2CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD2CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD2CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, \
+ MCDE_DSICMD2CONF0_PACKING_##__x)
+#define MCDE_DSICMD2CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, __x)
+#define MCDE_DSIVID0FRAME 0x00000E04
+#define MCDE_DSIVID0FRAME_GROUPOFFSET 0x20
+#define MCDE_DSIVID0FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID0FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID0FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0FRAME, FRAME, __x)
+#define MCDE_DSICMD0FRAME 0x00000E24
+#define MCDE_DSICMD0FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD0FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD0FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0FRAME, FRAME, __x)
+#define MCDE_DSIVID1FRAME 0x00000E44
+#define MCDE_DSIVID1FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID1FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID1FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1FRAME, FRAME, __x)
+#define MCDE_DSICMD1FRAME 0x00000E64
+#define MCDE_DSICMD1FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD1FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD1FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1FRAME, FRAME, __x)
+#define MCDE_DSIVID2FRAME 0x00000E84
+#define MCDE_DSIVID2FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID2FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID2FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2FRAME, FRAME, __x)
+#define MCDE_DSICMD2FRAME 0x00000EA4
+#define MCDE_DSICMD2FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD2FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD2FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2FRAME, FRAME, __x)
+#define MCDE_DSIVID0PKT 0x00000E08
+#define MCDE_DSIVID0PKT_GROUPOFFSET 0x20
+#define MCDE_DSIVID0PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID0PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID0PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0PKT, PACKET, __x)
+#define MCDE_DSICMD0PKT 0x00000E28
+#define MCDE_DSICMD0PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD0PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD0PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0PKT, PACKET, __x)
+#define MCDE_DSIVID1PKT 0x00000E48
+#define MCDE_DSIVID1PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID1PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID1PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1PKT, PACKET, __x)
+#define MCDE_DSICMD1PKT 0x00000E68
+#define MCDE_DSICMD1PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD1PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD1PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1PKT, PACKET, __x)
+#define MCDE_DSIVID2PKT 0x00000E88
+#define MCDE_DSIVID2PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID2PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID2PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2PKT, PACKET, __x)
+#define MCDE_DSICMD2PKT 0x00000EA8
+#define MCDE_DSICMD2PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD2PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD2PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2PKT, PACKET, __x)
+#define MCDE_DSIVID0SYNC 0x00000E0C
+#define MCDE_DSIVID0SYNC_GROUPOFFSET 0x20
+#define MCDE_DSIVID0SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID0SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID0SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0SYNC, DMA, __x)
+#define MCDE_DSIVID0SYNC_SW_SHIFT 16
+#define MCDE_DSIVID0SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID0SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0SYNC, SW, __x)
+#define MCDE_DSICMD0SYNC 0x00000E2C
+#define MCDE_DSICMD0SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD0SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD0SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0SYNC, DMA, __x)
+#define MCDE_DSICMD0SYNC_SW_SHIFT 16
+#define MCDE_DSICMD0SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD0SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0SYNC, SW, __x)
+#define MCDE_DSIVID1SYNC 0x00000E4C
+#define MCDE_DSIVID1SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID1SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID1SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1SYNC, DMA, __x)
+#define MCDE_DSIVID1SYNC_SW_SHIFT 16
+#define MCDE_DSIVID1SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID1SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1SYNC, SW, __x)
+#define MCDE_DSICMD1SYNC 0x00000E6C
+#define MCDE_DSICMD1SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD1SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD1SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1SYNC, DMA, __x)
+#define MCDE_DSICMD1SYNC_SW_SHIFT 16
+#define MCDE_DSICMD1SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD1SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1SYNC, SW, __x)
+#define MCDE_DSIVID2SYNC 0x00000E8C
+#define MCDE_DSIVID2SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID2SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID2SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2SYNC, DMA, __x)
+#define MCDE_DSIVID2SYNC_SW_SHIFT 16
+#define MCDE_DSIVID2SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID2SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2SYNC, SW, __x)
+#define MCDE_DSICMD2SYNC 0x00000EAC
+#define MCDE_DSICMD2SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD2SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD2SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2SYNC, DMA, __x)
+#define MCDE_DSICMD2SYNC_SW_SHIFT 16
+#define MCDE_DSICMD2SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD2SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2SYNC, SW, __x)
+#define MCDE_DSIVID0CMDW 0x00000E10
+#define MCDE_DSIVID0CMDW_GROUPOFFSET 0x20
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID0CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID0CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID0CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_START, __x)
+#define MCDE_DSICMD0CMDW 0x00000E30
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD0CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD0CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD0CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_START, __x)
+#define MCDE_DSIVID1CMDW 0x00000E50
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID1CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID1CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID1CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_START, __x)
+#define MCDE_DSICMD1CMDW 0x00000E70
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD1CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD1CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD1CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_START, __x)
+#define MCDE_DSIVID2CMDW 0x00000E90
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID2CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID2CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID2CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_START, __x)
+#define MCDE_DSICMD2CMDW 0x00000EB0
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD2CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD2CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD2CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_START, __x)
+#define MCDE_DSIVID0DELAY0 0x00000E14
+#define MCDE_DSIVID0DELAY0_GROUPOFFSET 0x20
+#define MCDE_DSIVID0DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID0DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID0DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD0DELAY0 0x00000E34
+#define MCDE_DSICMD0DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD0DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD0DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID1DELAY0 0x00000E54
+#define MCDE_DSIVID1DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID1DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID1DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD1DELAY0 0x00000E74
+#define MCDE_DSICMD1DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD1DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD1DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID2DELAY0 0x00000E94
+#define MCDE_DSIVID2DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID2DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID2DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD2DELAY0 0x00000EB4
+#define MCDE_DSICMD2DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD2DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD2DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID0DELAY1 0x00000E18
+#define MCDE_DSIVID0DELAY1_GROUPOFFSET 0x20
+#define MCDE_DSIVID0DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID0DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID0DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD0DELAY1 0x00000E38
+#define MCDE_DSICMD0DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD0DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD0DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSIVID1DELAY1 0x00000E58
+#define MCDE_DSIVID1DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID1DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID1DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD1DELAY1 0x00000E78
+#define MCDE_DSICMD1DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD1DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD1DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSIVID2DELAY1 0x00000E98
+#define MCDE_DSIVID2DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID2DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID2DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD2DELAY1 0x00000EB8
+#define MCDE_DSICMD2DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD2DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD2DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY1, FRAMESTARTDEL, __x)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index df9e8f0e327..f3284a90ee3 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -343,6 +343,22 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config UX500_WATCHDOG
+ bool "ST-Ericsson Ux500 watchdog"
+ depends on UX500_SOC_DB8500 || UX500_SOC_DB5500
+ default y
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog existing in the prcmu of ST-Ericsson Ux500 series platforms.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
+config UX500_WATCHDOG_DEBUG
+ bool "ST-Ericsson Ux500 watchdog DEBUG"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && DEBUG_FS
+ help
+ Say Y here to add various debugfs entries in wdog/
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index e8f479a1640..738a0f3ad21 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 82ccd36e2c9..018c1ffc7dc 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -32,11 +32,13 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/kexec.h>
#include <asm/smp_twd.h>
struct mpcore_wdt {
- unsigned long timer_alive;
+ cpumask_t timer_alive;
struct device *dev;
void __iomem *base;
int irq;
@@ -47,6 +49,8 @@ struct mpcore_wdt {
static struct platform_device *mpcore_wdt_dev;
static DEFINE_SPINLOCK(wdt_lock);
+static DEFINE_PER_CPU(unsigned long, mpcore_wdt_rate);
+
#define TIMER_MARGIN 60
static int mpcore_margin = TIMER_MARGIN;
module_param(mpcore_margin, int, 0);
@@ -67,6 +71,8 @@ MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, "
"set to 1 to ignore reboots, 0 to reboot (default="
__MODULE_STRING(ONLY_TESTING) ")");
+#define MPCORE_WDT_PERIPHCLK_PRESCALER 2
+
/*
* This is the interrupt handler. Note that we only use this
* in testing mode, so don't actually do a reboot here.
@@ -99,9 +105,8 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
spin_lock(&wdt_lock);
/* Assume prescale is set to 256 */
- count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
- count = (0xFFFFFFFFU - count) * (HZ / 5);
- count = (count / 256) * mpcore_margin;
+ count = per_cpu(mpcore_wdt_rate, smp_processor_id()) / 256;
+ count = count*mpcore_margin;
/* Reload the counter */
writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
@@ -109,6 +114,56 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
spin_unlock(&wdt_lock);
}
+static void mpcore_wdt_set_rate(unsigned long new_rate)
+{
+ unsigned long count;
+ unsigned long long rate_tmp;
+ unsigned long old_rate;
+
+ spin_lock(&wdt_lock);
+ old_rate = per_cpu(mpcore_wdt_rate, smp_processor_id());
+ per_cpu(mpcore_wdt_rate, smp_processor_id()) = new_rate;
+
+ if (mpcore_wdt_dev) {
+ struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
+ count = readl(wdt->base + TWD_WDOG_COUNTER);
+ /* The goal: count = count * (new_rate/old_rate); */
+ rate_tmp = (unsigned long long)count * new_rate;
+ do_div(rate_tmp, old_rate);
+ count = rate_tmp;
+ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
+ wdt->perturb = wdt->perturb ? 0 : 1;
+ }
+ spin_unlock(&wdt_lock);
+}
+
+static void mpcore_wdt_update_cpu_frequency_on_cpu(void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ mpcore_wdt_set_rate((freq->new * 1000) /
+ MPCORE_WDT_PERIPHCLK_PRESCALER);
+}
+
+static int mpcore_wdt_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (event == CPUFREQ_RESUMECHANGE ||
+ (event == CPUFREQ_PRECHANGE && freq->new > freq->old) ||
+ (event == CPUFREQ_POSTCHANGE && freq->new < freq->old))
+ smp_call_function_single(freq->cpu,
+ mpcore_wdt_update_cpu_frequency_on_cpu,
+ freq, 1);
+
+ return 0;
+}
+
+static struct notifier_block mpcore_wdt_cpufreq_notifier_block = {
+ .notifier_call = mpcore_wdt_cpufreq_notifier,
+};
+
+
static void mpcore_wdt_stop(struct mpcore_wdt *wdt)
{
spin_lock(&wdt_lock);
@@ -143,6 +198,20 @@ static int mpcore_wdt_set_heartbeat(int t)
return 0;
}
+static int mpcore_wdt_stop_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
+ printk(KERN_INFO "Stopping watchdog on non-crashing core %u\n",
+ smp_processor_id());
+ mpcore_wdt_stop(wdt);
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block mpcore_wdt_stop_block = {
+ .notifier_call = mpcore_wdt_stop_notifier,
+};
+
/*
* /dev/watchdog handling
*/
@@ -150,7 +219,7 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file)
{
struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
- if (test_and_set_bit(0, &wdt->timer_alive))
+ if (cpumask_test_and_set_cpu(smp_processor_id(), &wdt->timer_alive))
return -EBUSY;
if (nowayout)
@@ -158,6 +227,9 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file)
file->private_data = wdt;
+ atomic_notifier_chain_register(&crash_percpu_notifier_list,
+ &mpcore_wdt_stop_block);
+
/*
* Activate timer
*/
@@ -181,7 +253,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
"unexpected close, not stopping watchdog!\n");
mpcore_wdt_keepalive(wdt);
}
- clear_bit(0, &wdt->timer_alive);
+ cpumask_clear_cpu(smp_processor_id(), &wdt->timer_alive);
wdt->expect_close = 0;
return 0;
}
@@ -447,16 +519,31 @@ static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. "
static int __init mpcore_wdt_init(void)
{
+ int i;
+
/*
* Check that the margin value is within it's range;
* if not reset to the default
*/
if (mpcore_wdt_set_heartbeat(mpcore_margin)) {
mpcore_wdt_set_heartbeat(TIMER_MARGIN);
- printk(KERN_INFO "mpcore_margin value must be 0 < mpcore_margin < 65536, using %d\n",
+ printk(KERN_INFO "mpcore_wdt: mpcore_margin value must be 0 < mpcore_margin < 65536, using %d\n",
TIMER_MARGIN);
}
+ cpufreq_register_notifier(&mpcore_wdt_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ for_each_online_cpu(i)
+ per_cpu(mpcore_wdt_rate, i) =
+ (cpufreq_get(i) * 1000) / MPCORE_WDT_PERIPHCLK_PRESCALER;
+
+ for_each_online_cpu(i)
+ printk(KERN_INFO
+ "mpcore_wdt: rate for core %d is %lu.%02luMHz.\n", i,
+ per_cpu(mpcore_wdt_rate, i) / 1000000,
+ (per_cpu(mpcore_wdt_rate, i) / 10000) % 100);
+
printk(banner, mpcore_noboot, mpcore_margin, nowayout);
return platform_driver_register(&mpcore_wdt_driver);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
new file mode 100644
index 00000000000..52747e59172
--- /dev/null
+++ b/drivers/watchdog/ux500_wdt.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * Heavily based upon geodewdt.c
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define WATCHDOG_TIMEOUT 600 /* 10 minutes */
+
+#define WDT_FLAGS_OPEN 1
+#define WDT_FLAGS_ORPHAN 2
+
+static unsigned long wdt_flags;
+
+static int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static u8 wdog_id;
+static bool wdt_en;
+static bool wdt_auto_off = false;
+static bool safe_close;
+
+static int ux500_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags))
+ return -EBUSY;
+
+ if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags))
+ __module_get(THIS_MODULE);
+
+ prcmu_enable_a9wdog(wdog_id);
+ wdt_en = true;
+
+ return nonseekable_open(inode, file);
+}
+
+static int ux500_wdt_release(struct inode *inode, struct file *file)
+{
+ if (safe_close) {
+ prcmu_disable_a9wdog(wdog_id);
+ module_put(THIS_MODULE);
+ } else {
+ pr_crit("Unexpected close - watchdog is not stopping.\n");
+ prcmu_kick_a9wdog(wdog_id);
+
+ set_bit(WDT_FLAGS_ORPHAN, &wdt_flags);
+ }
+
+ clear_bit(WDT_FLAGS_OPEN, &wdt_flags);
+ safe_close = false;
+ return 0;
+}
+
+static ssize_t ux500_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (!len)
+ return len;
+
+ if (!nowayout) {
+ size_t i;
+ safe_close = false;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ safe_close = true;
+ }
+ }
+
+ prcmu_kick_a9wdog(wdog_id);
+
+ return len;
+}
+
+static long ux500_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int interval;
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 1,
+ .identity = "Ux500 WDT",
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_SETOPTIONS:
+ {
+ int options;
+ int ret = -EINVAL;
+
+ if (get_user(options, p))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD) {
+ prcmu_disable_a9wdog(wdog_id);
+ wdt_en = false;
+ ret = 0;
+ }
+
+ if (options & WDIOS_ENABLECARD) {
+ prcmu_enable_a9wdog(wdog_id);
+ wdt_en = true;
+ ret = 0;
+ }
+
+ return ret;
+ }
+ case WDIOC_KEEPALIVE:
+ return prcmu_kick_a9wdog(wdog_id);
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(interval, p))
+ return -EFAULT;
+
+ if (cpu_is_u8500()) {
+ /* 28 bit resolution in ms, becomes 268435.455 s */
+ if (interval > 268435 || interval < 0)
+ return -EINVAL;
+ } else if (cpu_is_u5500()) {
+ /* 32 bit resolution in ms, becomes 4294967.295 s */
+ if (interval > 4294967 || interval < 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ timeout = interval;
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+
+ /* Fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, p);
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static const struct file_operations ux500_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = ux500_wdt_write,
+ .unlocked_ioctl = ux500_wdt_ioctl,
+ .open = ux500_wdt_open,
+ .release = ux500_wdt_release,
+};
+
+static struct miscdevice ux500_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ux500_wdt_fops,
+};
+
+#ifdef CONFIG_UX500_WATCHDOG_DEBUG
+enum wdog_dbg {
+ WDOG_DBG_CONFIG,
+ WDOG_DBG_LOAD,
+ WDOG_DBG_KICK,
+ WDOG_DBG_EN,
+ WDOG_DBG_DIS,
+};
+
+static ssize_t wdog_dbg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long val;
+ int err;
+ enum wdog_dbg v = (enum wdog_dbg)((struct seq_file *)
+ (file->private_data))->private;
+
+ switch(v) {
+ case WDOG_DBG_CONFIG:
+ err = kstrtoul_from_user(user_buf, count, 0, &val);
+
+ if (!err) {
+ wdt_auto_off = val != 0;
+ (void) prcmu_config_a9wdog(1,
+ wdt_auto_off);
+ }
+ else {
+ pr_err("ux500_wdt:dbg: unknown value\n");
+ }
+ break;
+ case WDOG_DBG_LOAD:
+ err = kstrtoul_from_user(user_buf, count, 0, &val);
+
+ if (!err) {
+ timeout = val;
+ /* Convert seconds to ms */
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ else {
+ pr_err("ux500_wdt:dbg: unknown value\n");
+ }
+ break;
+ case WDOG_DBG_KICK:
+ (void) prcmu_kick_a9wdog(wdog_id);
+ break;
+ case WDOG_DBG_EN:
+ wdt_en = true;
+ (void) prcmu_enable_a9wdog(wdog_id);
+ break;
+ case WDOG_DBG_DIS:
+ wdt_en = false;
+ (void) prcmu_disable_a9wdog(wdog_id);
+ break;
+ }
+
+ return count;
+}
+
+static int wdog_dbg_read(struct seq_file *s, void *p)
+{
+ enum wdog_dbg v = (enum wdog_dbg)s->private;
+
+ switch(v) {
+ case WDOG_DBG_CONFIG:
+ seq_printf(s,"wdog is on id %d, auto off on sleep: %s\n",
+ (int)wdog_id,
+ wdt_auto_off ? "enabled": "disabled");
+ break;
+ case WDOG_DBG_LOAD:
+ /* In 1s */
+ seq_printf(s, "wdog load is: %d s\n",
+ timeout);
+ break;
+ case WDOG_DBG_KICK:
+ break;
+ case WDOG_DBG_EN:
+ case WDOG_DBG_DIS:
+ seq_printf(s, "wdog is %sabled\n",
+ wdt_en ? "en" : "dis");
+ break;
+ }
+ return 0;
+}
+
+static int wdog_dbg_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, wdog_dbg_read, inode->i_private);
+}
+
+static const struct file_operations wdog_dbg_fops = {
+ .open = wdog_dbg_open,
+ .write = wdog_dbg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int __init wdog_dbg_init(void)
+{
+ struct dentry *wdog_dir;
+
+ wdog_dir = debugfs_create_dir("wdog", NULL);
+ if (IS_ERR_OR_NULL(wdog_dir))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_u8("id",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ &wdog_id)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("config",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_CONFIG,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("load",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_LOAD,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("kick",
+ S_IWUGO, wdog_dir,
+ (void *)WDOG_DBG_KICK,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("enable",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_EN,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("disable",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_DIS,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ return 0;
+fail:
+ pr_err("ux500:wdog: Failed to initialize wdog dbg\n");
+ debugfs_remove_recursive(wdog_dir);
+
+ return -EFAULT;
+}
+
+#else
+static inline int __init wdog_dbg_init(void)
+{
+ return 0;
+}
+#endif
+
+static int __init ux500_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ /* Number of watch dogs */
+ prcmu_config_a9wdog(1, wdt_auto_off);
+ /* convert to ms */
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+
+ ret = misc_register(&ux500_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register misc\n");
+ return ret;
+ }
+
+ ret = wdog_dbg_init();
+ if (ret < 0)
+ goto fail;
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+fail:
+ misc_deregister(&ux500_wdt_miscdev);
+ return ret;
+}
+
+static int __exit ux500_wdt_remove(struct platform_device *dev)
+{
+ prcmu_disable_a9wdog(wdog_id);
+ wdt_en = false;
+ misc_deregister(&ux500_wdt_miscdev);
+ return 0;
+}
+#ifdef CONFIG_PM
+static int ux500_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (wdt_en && cpu_is_u5500()) {
+ prcmu_disable_a9wdog(wdog_id);
+ return 0;
+ }
+
+ if (wdt_en && !wdt_auto_off) {
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_config_a9wdog(1, true);
+
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ return 0;
+}
+
+static int ux500_wdt_resume(struct platform_device *pdev)
+{
+ if (wdt_en && cpu_is_u5500()) {
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ return 0;
+ }
+
+ if (wdt_en && !wdt_auto_off) {
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_config_a9wdog(1, wdt_auto_off);
+
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ return 0;
+}
+
+#else
+#define ux500_wdt_suspend NULL
+#define ux500_wdt_resume NULL
+#endif
+static struct platform_driver ux500_wdt_driver = {
+ .remove = __exit_p(ux500_wdt_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ux500_wdt",
+ },
+ .suspend = ux500_wdt_suspend,
+ .resume = ux500_wdt_resume,
+};
+
+static int __init ux500_wdt_init(void)
+{
+ return platform_driver_probe(&ux500_wdt_driver, ux500_wdt_probe);
+}
+module_init(ux500_wdt_init);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_DESCRIPTION("Ux500 Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/firmware/Makefile b/firmware/Makefile
index 0d15a3d113a..b74752ef53f 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -32,6 +32,7 @@ fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \
adaptec/starfire_tx.bin
fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
+fw-shipped-$(CONFIG_AV8100) += av8100.fw
fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \
bnx2x/bnx2x-e1h-6.2.9.0.fw \
bnx2x/bnx2x-e2-6.2.9.0.fw
diff --git a/firmware/av8100.fw.ihex b/firmware/av8100.fw.ihex
new file mode 100644
index 00000000000..384a432ea43
--- /dev/null
+++ b/firmware/av8100.fw.ihex
@@ -0,0 +1,1281 @@
+:1000000080FECBFEA3EE5AF05AF0F8F159EFFDF165
+:1000100045F307F51CF52DF53FF552F557F55CF561
+:1000200061F566F57BF5B5F54CF69DEE9DEE9EEE21
+:10003000000000000000000000000081EAADECAD0F
+:1000400038201B7280814D514D514D514D514D51B4
+:100050004D51B420F42A5A9082FECD8000AF7293A5
+:10006000A3205F90808097908D200FAB01A6888998
+:100070009096AD97909AAD8B939EAD9790A2AD80E0
+:10008000B7A6AD81B7AAAD82B7AEAD263120087252
+:1000900082B721A681B7C2A680B701A6B420F52AEF
+:1000A0005A908000A77293CCAD492797905D90D469
+:1000B000AD82B7D8AD81B7DCAD80B7E0ADFFAE9013
+:1000C000815929EA265AFB382007724D38201A72C6
+:1000D000490038200572BCFFCD08AEF320792449D1
+:1000E0004424492324490BAD82FECD9E82FECD9F40
+:1000F0005B82FECD80B682FECD81B682FECD82B619
+:1001000082AD9F9086AD859084428C209F9001ABFC
+:100110000FA62A31200872FB382007724D38201AAA
+:1001200072BCFFCDFB382007723020107231201EC8
+:100130007230201C729505A09E94F6CC5F905F4FA4
+:1001400007382005720C39200572814129382018A2
+:1001500072D5265AFB382007729D9D4D38201A72A1
+:100160004DAEFFCD4DAEFFCD38201B724D4D4D4DE8
+:100170004D382019720425493820187208AE3420F1
+:1001800000000000000000191817161514131211B2
+:10019000100F0E0D0C0B0A090807060504030201D7
+:1001A000000000000000000000000000000000004F
+:1001B000000000000000000000000000000000003F
+:1001C000000000000000000000000000000000002F
+:1001D000000000000000000000000000000000001F
+:1001E000000000000000000000000000000000000F
+:1001F00000000000000000000000000000000000FF
+:1002000000000000000000000000000000000000EE
+:1002100000000000000000000000000000000000DE
+:1002200000000000000000000000000000000000CE
+:1002300000000000000000000000000000000000BE
+:1002400000000000000000000000000000000000AE
+:10025000000000000000000000000000000000009E
+:10026000000000000000000000000000000000008E
+:10027000000000000000000000000000000000007E
+:10028000000000000000000000000000000000006E
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B00000000000000000008101A60227FBD69289
+:1003C000FC3C0726FBD692FC3C0E26FBD692FC3C5E
+:1003D0001726FBD692FC3F81FAB7FBD8925CFAB69F
+:1003E000F9B7FBD8925CF9B6F8B7FBD89201AEF832
+:1003F000B6F7B7FBC892F7B6FCBF81FCBE84FAB76C
+:10040000FBD6925CF9B7FBD6925CF8B7FBD69201AB
+:10041000AEF7B7FBC69288FCBF81FAB7FBDA925CF5
+:10042000FAB6F9B7FBDA925CF9B6F8B7FBDA9201E3
+:10043000AEF8B6F7B7FBCA92F7B6FCBFBAFACCC4AF
+:10044000FBCD81FCBEE9264AFB003972FB006972D4
+:100450005AFB0069725AFB00687203AEFCBF1B278F
+:100460004DBAFACC0FFBCD81F12001ABFBD6920740
+:100470002B5A0A24FBD792FBDB9203AEFCBF81010F
+:10048000A60220FFA604240827FBD192FAB6FC3C62
+:100490000926FBD192F9B6FC3C1226FBD192F8B6A4
+:1004A000FC3C2326FBD192F7B6FC3F81FAB7FBD484
+:1004B000925CFAB6F9B7FBD4925CF9B6F8B7FBD404
+:1004C0009201AEF8B6F7B7FBC492F7B6FCBF81F75E
+:1004D000B7FBC992F7B6F8B7FBD9925AF8B6F9B795
+:1004E000FBD9925AF9B6FAB7FBDB92FAB603AEFC27
+:1004F000BF81F73C0224F8B7F8B99FF9B7F9BB42BE
+:10050000930F274D8400FBCDFFB60527FEBEFAB73B
+:10051000F9BF4293FFB6F8B7F7BF42FEB689FFB7FF
+:1005200081F73FF83FF9BFFAB781FCBE84FBD79251
+:10053000FAB65CFBD792F9B65CFBD792F8B601AE7F
+:10054000FBC792F7B688FCBF81FCBEFBD792FAB618
+:100550005CFBD792F9B65CFBD792F8B601AEFBC74D
+:1005600092F7B6FCBF8101A60227036D0426026D37
+:100570000826016D0E267D81F5264AFA36F936F8F1
+:1005800036F7340B274D8184FAB703E6F9B702E654
+:10059000F8B701E6F7B7F6888101A60227FA3D040D
+:1005A00026F93D0826F83D0E26F73D81F5264AF747
+:1005B00039F839F939FA380B274D81F7F7B9F601CF
+:1005C000E7F8B901E602E7F9B902E603E7FABB0387
+:1005D000E681F73FF83F81F7B7F8B7FFA6072AF99A
+:1005E000BFFAB78141FFBB41529384FFB7FFBB52B3
+:1005F000017BFEBEFFB7529F908881FCBEFAB7FB1D
+:10060000D6925CF9B7FBD6925CF8B7FBD69201AEF6
+:10061000F7B7FBC692FCBF81F73FF9B7F8BF5C01A3
+:1006200024F9BB4293F7B6FAB7F9BF4293F7BFE09C
+:10063000208FE326017B1A15CD0326B203CAB3032C
+:10064000C6DAEDCD032420A11227016B6AF6CDBBDB
+:10065000F6CD888155FACDF73FF83FF9B781F9B665
+:10066000FA3FE7FBCDFBB700A94101AB818484844D
+:100670008484062008359A1A2010721520C7EEA42B
+:100680001520C61420C77CA41420C61320C708A4B4
+:100690001320C609C0CD84848484848484846EDB62
+:1006A000CD47033B48033B49033B4A033B014B8097
+:1006B0004B854B804B1F2729FCCD47AEFB000335F4
+:1006C000B0840035B1840035B2840035B38413356D
+:1006D000A4840035A5840035A6840035A7840035A0
+:1006E000A0840035A1840035A2840035A3840035A0
+:1006F0000C8C00350D8C00350E8C00350F8C0035C0
+:10070000188C0035198C00351A8C00351B8C03357C
+:10071000048C0035058C0035068C0035078C0A35B5
+:10072000008C0035018C0035028C0035038C0035BF
+:100730001C8C00351D8C00351E8C00351F8C04353B
+:100740001FC1CDACC3CD248300352583003526835E
+:1007500002352783003584840035858400358684FE
+:10076000003587842035B4840035B5842035B684BF
+:100770000035B784A0350C20B5840035B68400352B
+:10078000B78480350E2420A1C803C618840035190B
+:100790008400351A8400351B84003578840035794F
+:1007A0008400357A8400357B8400353F2001353D57
+:1007B00020C7FFA63C20C706A6470301354803868D
+:1007C000354903A0354A03C70F20470302354803C4
+:1007D00049354903F0354A03C71C2048039B354976
+:1007E00003FC354A03C0352A2048038C354903BA37
+:1007F000354A0380353820480324354903F8354A03
+:1008000003C7492047035F7248035F7249035F7261
+:100810004A035F724C2708A03F274A34274A2927FA
+:100820004A1F270FA40820C6BE035F72BF03C7017B
+:10083000A63E20FE35C80310350A2003A63E202020
+:1008400035C803C721A6113F042023A61100033594
+:100850000827A3F9CD04A494F9CD5B172022A61193
+:100860000001350827A3F9CD02A494F9CD5B2A2015
+:1008700011B730A60627A3F9CD08A494F9CD5B01E2
+:100880006B9484C6026B9584C6036B9684C6046B16
+:100890009784C65A2510A10320C6F22510A1056B26
+:1008A0004CA2034F7205EE72056B4FAD025F72AE44
+:1008B000025F72B8035F7297025F7299025F72986B
+:1008C000025F72B3035F72B2035F7276030A357917
+:1008D000035F727A035F7283035F727B035F727CD4
+:1008E000035F727D035F7261035F72BD035F72C25B
+:1008F000035F724C035F724D035F7286025F724F3B
+:10090000035F7262035F7250035F729B025F729CAF
+:10091000025F729D025F724B035F72113F103F4E88
+:10092000035F72A0035F72A1035F72C3035F72C4AF
+:1009300003FF35C7035F72BC035F729B06205F72C3
+:1009400088888888888184E3ADF42540A1016B4CB8
+:10095000126F01EE72016B4F888105201072152015
+:100960001172101EADF9CC8BFFA606AE8185017BFE
+:100970009AA003C70FA4A003C6A2034F72A0035CF2
+:1009800072016BA203D6A003CE1827A003C1A10356
+:10099000C69B016B4F8881A103C70FA4A103C6A208
+:1009A00003D7A1035C72A103CE808602C75120C683
+:1009B00050201172808590FD0032FC0032FB003225
+:1009C000000132FF0032FE00328484848457F6CD69
+:1009D00063A60520B3035A72B2035A720426B30306
+:1009E000C60F27B203CAB303C67D035F72B2035FAB
+:1009F00072B3031835082500A2B203C619A0B303C9
+:100A0000C618207D030135062710A5047B016B1055
+:100A100040C6026B1140C6036B1240C6046B134004
+:100A2000C609C0CD4620C74620C641205F720A4293
+:100A3000200D7288888888FE003BFF003B00013B48
+:100A4000FB003BFC003BFD003B8990808590FD0056
+:100A500032FC0032FB0032000132FF0032FE003275
+:100A600057F6CD152010720727092B10B61220015A
+:100A700035FE003BFF003B00013BFB003BFC003B25
+:100A8000FD003B8990800C4000350D4000350E4044
+:100A900000350F400435112080358011204035800D
+:100AA0001120203580112010358011200835801745
+:100AB000201472801720157205172005721120046A
+:100AC0003580BB03C74A05201A7206201872112010
+:100AD000C702A680BB03C705201A7206201972112F
+:100AE00020C701A6800488003505883035068800B7
+:100AF00035078800351020803581B8FACDBCAEFBB3
+:100B000000843581FBB700A94101AB808590FD00D1
+:100B100032FC0032FB0032000132FF0032FE0032B4
+:100B2000FA0032F90032F80032F70032848484840B
+:100B30008484B8FACDA0AEFB008435C4FBCD33ADC0
+:100B40005BF73FF83FF91FD7FACDC903CECA03C6FA
+:100B5000036B80A4037B046B4F016BA084C6026B04
+:100B6000A184C6036BA284C6046BA384C68484C913
+:100B700003CFCA03C76EC0CD9802CE9902C6CB037D
+:100B80003BCC033B0F2052C0CD9802CE9902C6CB7E
+:100B9000033BCC033B112405E272CB03C606E07293
+:100BA000CC03C61F2405E2729802C606E0729902C1
+:100BB000C61C25CB03C2057BCC03C0067B0C249846
+:100BC00002C2057B9902C0067B5C20C903CF980254
+:100BD000CECA039902550D264C03C60D20CB03CE79
+:100BE000CA03C7CC03C60B274A4B03C607200135EF
+:100BF00007200335082604A105201D7205201C725C
+:100C0000082502A19702C61B269802C1057B222651
+:100C10009902C1067B29264A4B03C6CB035F72CCDF
+:100C200003C706A6FDF4CDF9B718AAF9B6E7FBCDC0
+:100C3000F5F4CD5B12200AA6FDF4CDF918E7FBCD43
+:100C4000F5F4CD5B102400A2057B0AA0067B1A25D3
+:100C500000A2057B06A0067B036BC7A4037B016B88
+:100C6000BC84C6026BBD84C6036BBE84C6046BBF66
+:100C700084C657274C03C6066BA783C6056BA6839D
+:100C8000C6CA035F72C9035F72CC035F72CB035F96
+:100C90007210204035888888888888F7003BF800E3
+:100CA0003BF9003BFA003BFE003BFF003B00013BF1
+:100CB000FB003BFC003BFD003B89908148830035F5
+:100CC000498300354A8300354B830F35808590FD7D
+:100CD0000032FC0032FB0032000132FF0032FE0025
+:100CE0003284848484BC035F725DC7CD20AD052649
+:100CF0004ABC03C60B2603A14B03C6C4035C7204A3
+:100D0000270FA1C403C60B20BB0301351126C40362
+:100D10005A721727C403C6112603EA72047B0727F9
+:100D20004C037B05264C047B1127037B04264A04D5
+:100D30007B7DDECD03EE729F036B04EF7203E272E4
+:100D40004104E0725F01A6036B01E272037B046B56
+:100D500002E072047B0E2403E2729F04E0724654A8
+:100D600001EF72026B46542903CE2A03C6082029DC
+:100D700003CE2A03C608264A2203C6E32603E172ED
+:100D8000A683C6EB2604E172A783C6046BA783C6BD
+:100D9000036BA683C605F3CC848484848B0235352B
+:100DA0008C0286358D0237358E02BD35C4030F3572
+:100DB000C703013534F3CD8AC0CD8F023B90023B8F
+:100DC00091023B92023B3126C703C62BC7CD0327B6
+:100DD000C403C61BF3CC032602A1BA03C605F3CC99
+:100DE000032702A14B03C61020203588888888FE7F
+:100DF000003BFF003B00013BFB003BFC003BFD00D8
+:100E00003B8990801020083581E7FBCDFBB700A916
+:100E10004102AB808590FD0032FC0032FB003200C5
+:100E20000132FF0032FE0032FA0032F90032F800DF
+:100E300032F700328484848484B8FACDFBBF84AE58
+:100E4000FA1A2FAD5B092004AEFB004435F73F3C96
+:100E5000AD5B026B0884C6036B0984C6046B0A840D
+:100E6000C6056B0B84C6B8FACDFBBF84AEFA1B5C1B
+:100E7000AD5B2D274D03C632279D02C6026B8484CD
+:100E8000C6036B8584C6046B8684C6056B8784C6DF
+:100E9000848484E8C8CD4102A941AFAB5F5221AE42
+:100EA000AD02C6024B104B004BAD025C720420AD8C
+:100EB000025F720626FFB1FFBF900D26FEB3FE3F14
+:100EC000AD02CE904A5A01264D5F3A2502A1AE02EC
+:100ED000C6EBC6CDBB035F7207264ABB03C6062618
+:100EE00004A14603C607264A4B03C6CFD5CDBD0392
+:100EF000C706264ABD03C661035C7204274C610322
+:100F0000C60A2061035F720627C403C6C5035C726C
+:100F10000424FFA1C503C6BC030135B8FACDA0AEB9
+:100F2000FB008435F71FEDF1CD5B026BA084C60397
+:100F30006BA184C6046BA284C6056BA384C684849B
+:100F400084848AC0CD8F023B90023B91023B920287
+:100F50003BC7035F72412504A1C503C665274A4B01
+:100F600003C6B8FACD9CAEFB008335F7B7F8B7F9E6
+:100F7000B74FFAB7016B9F83C60520147210200685
+:100F8000358888888888F7003BF8003BF9003BFAF1
+:100F9000003BFE003BFF003B00013BFB003BFC0035
+:100FA0003BFD003B899081FB008C35F7B7F8B7F922
+:100FB000B74FFAB7808590FD0032FC0032FB00325B
+:100FC000000132FF0032FE0032FA0032F90032F83E
+:100FD0000032F7003284B8FACD0CAE27ADF0A40190
+:100FE0007B62035F72052016720620C705AA0620E1
+:100FF000C61B2704A5017BB8FACD0CAE48ADF0A402
+:101000000520167206201472112062035A7247EFEF
+:10101000CD02A605264A05204F032502A16203C67C
+:10102000B8FACD10AE71AD6303D622276203CE5954
+:10103000270FA5017BB8FACD0CAE4CF0CD0FA4055F
+:10104000201672062012724F030135182720A501C1
+:101050007B98AD4F052780A5017B5103D7178CC620
+:1010600050035C725003CE0F2780A507205003CF9A
+:1010700006201372092710A520264F03CE016B0FFF
+:101080008CC61020103588F7003BF8003BF9003B78
+:10109000FA003BFE003BFF003B00013BFB003BFC3A
+:1010A000003BFD003B8990B8FACC0CAEFB008C35C0
+:1010B000F7B7F8B7F9B74FFAB7808590FD0032FC63
+:1010C0000032FB0032000132FF0032FE0032848425
+:1010D0008484A084C7017BA184C7027BA284C70348
+:1010E0007BA384C7047BBC030135016B7FA4017B18
+:1010F0000A20016B80AA017B122702A1BA03C60F46
+:101100002603A1042702A14B03C61A274C03C605D8
+:10111000264D03C6016BA084C6026BA184C6036B77
+:10112000A284C6046BA384C6C5035F72BA03C70357
+:10113000A48384C61FC1CD03264AB903C605201265
+:10114000721020013588888888FE003BFF003B0034
+:10115000013BFB003BFC003BFD003B8990801220E3
+:101160001035808113000135042712B7C084C6A74B
+:10117000F6CD11B71EEDCDA7F6CC11B701A6072409
+:1011800020A1C803C609206CEBCDD6206CEBCD05A1
+:101190002604A112B6062702A112B6112602A11139
+:1011A000B6B32057EBCD2A209CEACD2F2017EACDED
+:1011B000C22011B717EACD072602A111B68112008D
+:1011C000184D551300194D5514001A4D5515001B97
+:1011D0004D556E2520A1C803C6A7F6CD11B7C9E7A6
+:1011E000CD65202BE7CD6A2049E4CD9CF6CC8CE37D
+:1011F000CD7520ADE3CD7A20B4E2CD84EECC5EE2B5
+:10120000CD84EECC1EE2CD84EECC71E1CD84EECC6B
+:1012100047E0CD8188BCEDD688BDEDD658979CEED1
+:10122000CC03250FA14A7BEE5FEE5AEE55EE43EE5E
+:101230001FEE1AEE15EE0FEE0AEE05EEFFEDF9EDDC
+:10124000F3EDEDED81E7FBCDFBB700A94102ABF675
+:101250002001A6818585858585062611BE04204F3F
+:1012600026C0CD0AA69A7884C7027B7984C7037BFF
+:101270007A84C7047B7B84C7057BB8FACD78AEFB44
+:10128000008435F71836AD5B026B7884C6036B7942
+:1012900084C6046B7A84C6056B7B84C656C8CD01B0
+:1012A000A656C8CD4F9B492701E4724F01204C033D
+:1012B000264A11B6016B4F012001A6042655FACD2E
+:1012C000F83FF93FFA3F77AD5B026BC084C6036B12
+:1012D000C184C6046BC284C6056BC384C688888873
+:1012E000888881B8FACDB4AEFB00843581E7FBCDA8
+:1012F000FBB700A94101AB818585858585057BB854
+:10130000030135042702A111B60A264A13B6056B5C
+:1013100001A6042012009803550920D2C8CD052744
+:1013200085854D2F1ECD13BE14B6014B004B1E20DC
+:10133000850A1FCD13BE14B6014B9803C715B621FD
+:101340002033274D721FCD39209A5EADF91857AD65
+:101350005B9B016BB484C6026BB584C6036BB68419
+:10136000C6046BB784C626C0CDFAA69A1884003589
+:10137000198403351A8483351B84003514450035E0
+:1013800015450035164500351745023514EDCDF9E4
+:1013900016FA1209EDCD5B9B016BB484C6026BB5E6
+:1013A00084C6036BB684C6046BB784C68484848405
+:1013B000848484846EDBCD47033B48033B49033B75
+:1013C0004A033B044BC44BB44B004BEEECCCD2EC89
+:1013D000CC03264AC1ECCC03264AB9ECCC03264AFE
+:1013E00015274A12B6EEECCC032702A111B6F2EC97
+:1013F000CCBC03C74C9AB8FACD18AEFB008435F7C5
+:10140000B7F8B7F9B74FFAB712B69BF2ECCC03278F
+:1014100003A14B03C6056B01A6C2035F724B035FBA
+:10142000720C274A0F274A12274A15274AC203C6B9
+:10143000C0035F72C10301352320C0035F72C10383
+:10144000C702A602200AA6262006274A05274A0C1C
+:10145000274AC203C61D2420A1C803C6C203C77FF2
+:10146000A412B64B03033579264A11B6B8035F724E
+:101470009A18840035198400351A8400351B8400BD
+:10148000359B4B035F721A2702A111B6056B4F887B
+:1014900088888888814FD2C8CC032785854D2F1E28
+:1014A000CD11BE12B6804B004B8185858585854F59
+:1014B000848484E8C8CD00AE11A688047B88047BB0
+:1014C00088017BAE025C72042403A10826AE02C12F
+:1014D000047BE22205E17213B6056B4C9F90B202C9
+:1014E000D714E69005E6729705EB725221AE047BA5
+:1014F00015204FB102D713B6B002D712B6AF02D73C
+:1015000011B6975221AE432403A1047BE82503A121
+:10151000056B4C057B046B057B042611B1AF02D62D
+:10152000975221AE056B4F046BAE02C6036B10A63B
+:10153000026B02A67120026B03EF7206A94100AB99
+:10154000594859485948594859484A5A01264D5FFF
+:10155000016B0FA411B6222581A111B6016B4F8832
+:101560008888888881858585854F790301357A0348
+:10157000C74C082017B70C267B03C616B7047B019F
+:101580006B026B036B4F046B7E03C615B7017B14B4
+:10159000B7027B13B7037B12B7047B016B7F03C6D3
+:1015A000026B8003C6036B8103C6046B8203C61102
+:1015B000B74F01204C03267A03CE502079035F7287
+:1015C0007A03C74C5A2079035F727A035F725C27F3
+:1015D0004A17274A10274A11B678030835042712FC
+:1015E0003D78035F728888888881B8FACDB4AEFBF5
+:1015F00000843581E7FBCDFBB700A94101AB811029
+:101600004D0035114D0035124D0035819ACBCD017D
+:10161000A61CCACD044800350548003506480035EB
+:101620000748003531ADFA1A81858585854F9A7C4A
+:1016300003187288031A005589031B00558A031C7E
+:1016400000558B031D005584031E005585031F00A4
+:10165000558603200055870321005590031200553D
+:101660009103130055920314005593031500558CF4
+:10167000031600558D031700558E031800558F0370
+:10168000190055542628A111B65A207C03C710A46E
+:101690007C03C664207C031072062627A111B69B2A
+:1016A000FACDFBB748A94144AB5208AE11B668FA6F
+:1016B000CD12AE9BFACDFBB748A94140AB5208AE64
+:1016C00011B668FACD16AE1448880355154889033B
+:1016D0005516488A035517488B03551048840355FF
+:1016E000114885035512488603551348870355D87A
+:1016F000E9CDF81B02EACD5B016BB484C6026BB581
+:1017000084C6036BB684C6046BB784C64526113DF8
+:1017100077E9CC032528A111B6D1E9CC0DEACDF8A3
+:101720001B02EACD5B26C0CD0AA6016BB484C602BB
+:101730006BB584C6036BB684C6046BB784C67C03E2
+:1017400010726DE9CC032791A111B6F5E9CD134DC7
+:101750000335F5E9CD134D43350C4D1E00550D4DA8
+:101760001F00550E4D2000550F4D210055084D1AF4
+:101770000055094D1B00550A4D1C00550B4D1D0011
+:1017800055044D160055054D170055064D180055CA
+:10179000074D190055004D120055014D130055021B
+:1017A0004D140055034D150055C42755FACDF73F8C
+:1017B000F8B706A4F8B6F93FFA3F02EACD5B016B31
+:1017C000144DC6026B154DC6036B164DC6046B1740
+:1017D0004DC626C0CD0FA6F5E9CD134D0335F5E96D
+:1017E000CD134DC3352520026B036B046B4FD8E935
+:1017F000CDF81A02EACD5B016BB484C6026BB584E6
+:10180000C6036BB684C6046BB784C6622680A1117A
+:10181000B6F72492A111B6E7E8CC032480A111B653
+:101820009BD3E9CC01A6052420A1C803C60C2580C2
+:10183000A111B68888888881484200354942003520
+:101840004A42003581858585854F9A9DC5CD4D03DA
+:1018500001356E14CD03279D02C616AD4B420535EA
+:1018600004204B42013506277703C60CC4CDB8FAD5
+:10187000CDB4AEFB008435F916E7FBCDFBB700A96C
+:101880004101AB5B016BB484C6026BB584C6036BCC
+:10189000B684C6046BB784C6F4DACD502059AD4B7C
+:1018A00042053504204B42013506277703C613262F
+:1018B0004A4D03C66920CCC4CDACC3CD0826113D2A
+:1018C0009B77030135042077035F72062602A1117E
+:1018D000B68888888881B8FACD78AEFB00843581D7
+:1018E00018800035198000351A8000358108800085
+:1018F00035098000350A80003581585812BE81E7CD
+:10190000FBCDFBB700A94107AB81848B4100A94106
+:1019100009AB5B0A6B4F9ACCC4CD4C035F72BC849D
+:101920000035BD840035BE841835BF84003542AD16
+:101930001B808035148000351580003516800035F9
+:10194000178000354BAD0B800035048000350580D5
+:1019500000350680003507800035008000350180A5
+:101960000035028000350380003521E7CDFAB7F855
+:10197000A4FAB66AAD5B076B7884C6086B7984C637
+:10198000096B7A84C60A6B7B84C673209DC5CD4CD7
+:101990000301359084C7037B9184C7047B9284C77D
+:1019A000057B9384C7067BBEFBCDFBB700A9410333
+:1019B000AB5BF71EF7E6CD5B036BBFA4037B056B48
+:1019C000066B4F0F20F814F7E6CD5B036BBFA40343
+:1019D0007B046BC3A4047B056B066B4F192622A105
+:1019E000C803C69FFBCD10A6FBB700A94103AB5BA4
+:1019F000036B9484C6046B9584C6056B9684C606F7
+:101A00006B9784C682FBCD01A6FBB700A94107AB4B
+:101A10005B076B086B4F076B9884C6086B9984C68D
+:101A2000096B9A84C60A6B9B84C66DE6CC0325308D
+:101A3000A1C803C6CDD9CDBC840035BD840035BE58
+:101A4000841B35BF8440350420BF844135062614ED
+:101A50003D14E7CD1B808135B8FACD14AEFB008074
+:101A6000351DFACD5280AE30E4D602E7CD07E7CD82
+:101A70000B8065353080003531800035328000358F
+:101A800033808B352C8000352D8000352E8000353D
+:101A90002F80093528800035298000352A800035BF
+:101AA0002B802A3530264A30E4D602E7CD44810027
+:101AB000354581003546810035478132350C80003F
+:101AC000350D8000350E8000350F80083510800000
+:101AD00035118000351280003513800835B8FACDF5
+:101AE0005FFB0080351DFACD06AA4101EA72410272
+:101AF000EA7228AA5240AE2DE4D602E7CD01EF7279
+:101B0000026B5210AE15B6B8FACD04AEFB008035AC
+:101B10001DFACD4101EA724102EA7280AA5220AE5A
+:101B20002FE4D602E7CD01EF72026B5204AE2EE431
+:101B3000D602E7CD21E7CDFA10FA12FA14F7E6CD76
+:101B40005B076B7884C6086B7984C6096B7A84C698
+:101B50000A6B7B84C6B8FACDB4AEFB008435F916A7
+:101B6000F7E6CD5B076BB484C6086BB584C6096B1A
+:101B7000B684C60A6BB784C676E6CC03274A13B68A
+:101B80009BEBE6CC01A6052755FACDF73FF83FF9C8
+:101B9000B740A4F9B6FA3FF7E6CD5B076B9484C66D
+:101BA000086B9584C6096B9684C60A6B9784C68BAE
+:101BB0004100A2410AA05B000200010103010200F2
+:101BC0000200020002010300020102000301000002
+:101BD00003000081854F62035A729A0C8C00350D08
+:101BE0008C00350E8C00350F8C013504200F8C03D2
+:101BF0003506264A6203C6B8FACD10AEFB008C3516
+:101C0000F7B7F8B7F9B74FFA00630355B8FACD1C28
+:101C1000AEFB008C351DFACD5F0FA44E6303C69B4F
+:101C20000620C7FAA40620C65227E6256203C6622C
+:101C300003C1016B4C9F906303D712E69001E672DB
+:101C40009701E072102001A66303C712B66203C7B2
+:101C500011B68881854F4F035F72EF255003C10194
+:101C60006B4C9F14E75103D601EE720A204F13000C
+:101C700050035588814F9AA884290055A9842A00C9
+:101C800055AA842B0055AB842C0055B8FACD48AE2C
+:101C9000FB0084351DFACD27BE28B6B8FACD44AE78
+:101CA000FB0084351DFACD25BE26B6B8FACD40AE70
+:101CB000FB0084351DFACD23BE24B6B8FACD3CAE68
+:101CC000FB0084351DFACD21BE22B6B8FACD38AE60
+:101CD000FB0084351DFACD1FBE20B6B8FACD34AE58
+:101CE000FB0084351DFACD1DBE1EB6B8FACD30AE50
+:101CF000FB0084351DFACD1BBE1CB6B8FACD2CAE48
+:101D0000FB0084351DFACD19BE1AB6B8FACD28AE3F
+:101D1000FB0084351DFACD17BE18B6B8FACD24AE37
+:101D2000FB0084351DFACD15BE16B6B8FACD20AE2F
+:101D3000FB0084351DFACD13BE14B6B8FACD1CAE27
+:101D4000FB0084351DFACD11BE12B69B814FBD0339
+:101D500001352003CF1FBE2103C720B61E03CF1DB0
+:101D6000BE1F03C71EB61C03CF1BBE1D03C71CB678
+:101D70001A03CF19BE1B03C71AB61803CF17BE1913
+:101D800003C718B61603CF15BE1703C716B614033C
+:101D9000CF13BE1503C714B61203CF11BE1303C76A
+:101DA00012B6814F9A30420035314200353242003E
+:101DB0003533420035CFD5CD2BD7CD848484848470
+:101DC0008484846EDBCD47033B48033B49033B4A95
+:101DD000033B39033B3A033B3B033B3C033BFED213
+:101DE000CD9B8185858585854F9D0201359A8484AB
+:101DF000C7027B8584C7037B8684C7047B8784C72F
+:101E00009B056BFBA4057B042004AA057B06270128
+:101E10007B026B8484C6036B8584C6046B8684C690
+:101E2000056B8784C69A6E14CD9B05274D03C600AB
+:101E300010CD5A14CD9B02C712B6022004A6042668
+:101E400002A111B69AB8FACDFBBF84AEFA1BE7FB2C
+:101E5000CDFBB700A94102AB5B9B056B04AA057BD8
+:101E6000026B8484C6036B8584C6046B8684C605B6
+:101E70006B8784C616000135016B16B69C025F7233
+:101E800004209C0201350627173D888888888881B0
+:101E90008585858585017BBC0301359AB8FACDB46B
+:101EA000AEFB008435FA1FE7FBCDFBB700A941026A
+:101EB000AB5BB484C7027BB584C7037BB684C7041D
+:101EC0007BB784C7057BBEFBCDFBB700A94102AB46
+:101ED0005BFA1EF73FF83FF9B703A4F9B6FA3FE003
+:101EE000FACD5F90FE0001355F12B6046BFCA404CE
+:101EF0007B026BB484C6036BB584C6046BB684C620
+:101F0000056BB784C6CC840035CD840035CE844AB9
+:101F100035CF840035102520A1C803C6172603A19C
+:101F20004603C665C1CD1CBE20A69B4703220055B3
+:101F3000480323005549032400554A03250055143E
+:101F40002789FACD22AE4603C712B6C603C726B606
+:101F500026B69702C721B69802CF1FBE9902C720A6
+:101F6000B64003CF19BE4103C71AB64403CF17BE0C
+:101F70004503C718B63E03CF15BE3F03C716B6428A
+:101F800003CF13BE4303C714B669E1CCB903C74CF2
+:101F900069E1CC016B01A647274A11274A0D274D5D
+:101FA00097025F7299025F7298025F724B03C711CA
+:101FB000B6016B4F88888888880000BA420000B854
+:101FC000410AD7233C0000803F818B4100A9411486
+:101FD000AB5B84848484AFC0CD88127B88127B88FD
+:101FE000127B88127B0F208AC0CD88127B88127BDF
+:101FF00088127B88127B1127C403C6C7035F720453
+:102000002099FBCDFBB700A9410FAB5BE7FBCD8F60
+:10201000AEFB000235162BFFA20D7BFDA00E7B0A46
+:102020002B00A20D7B04A00E6B107B0D6B0F7B0FA2
+:102030006B6020C6106B6120C6116B6220C6126BEC
+:102040006320C66820263568202435642087025521
+:102050006520880255662089025567208A025568E6
+:102060002021356420C7057B6520C7067B6620C715
+:10207000077B6720C7087B60208B025561208C029C
+:102080005562208D025563208E02558702602055CF
+:10209000880261205589026220558A0263205568B2
+:1020A0002021356420C7017B6520C7027B6620C7DD
+:1020B000037B6720C7047B8B026420558C0265205C
+:1020C000558D026620558E02672055682064356460
+:1020D000208B025565208C025566208D02556720A5
+:1020E0008E0255682021356420C7097B6520C70A08
+:1020F0007B6620C70B7B6720C70C7B68202235B826
+:10210000FACD60AEFB0020351DFACD13EE72147BC4
+:1021100005E0CC032613EA72147BB8FACDFBB700B6
+:10212000A9410FAB5BF73FE7FBCD5FFB008435B800
+:10213000FACDFBB700A94109AB5BE7FBCD37AEFB9E
+:1021400000E03506203BAEFB00E03508264C61037D
+:10215000C6056B3FE0C6066B40E0C6076B41E0C6B4
+:10216000086B42E0C6016B43E0C6026B44E0C60365
+:102170006B45E0C6046B46E0C68B4100A24112A04D
+:102180005B898881F7B7F8B7F9B74FFAB781FBB722
+:1021900000A94101ABE2DDCC03A6E2DDCC02A681C1
+:1021A000204C8184848484B8FACD10AEFB00843541
+:1021B000F81EE7FBCD20AD5B1084C7017B1184C7FF
+:1021C000027B1284C7037B1384C7047B026B7FA44A
+:1021D000027BB8FACD40AD5BC4FBCDFBB700A94193
+:1021E00007AB5BF7B703A4F7B6F83FF93FFA3FE751
+:1021F000FBCD5DAD5B016B1084C6026B1184C60321
+:102200006B1284C6046B1384C6BEFBCDFBB700A95A
+:102210004107AB5B46FACD14A673DECD0B6B04A66B
+:1022200066DECC032608A061DECC032604A05EDEB9
+:10223000CC032602A0172702A01B204C1E2006A6B6
+:10224000222002A6262007A60C2710A00C2708A0F3
+:102250000C2704A0342702A03A2702A023240B7BDA
+:1022600020A1C803C6076B6020C6086B6120C609A1
+:102270006B6220C60A6B6320C66820263568202161
+:102280003568205235642000356520003566208091
+:1022900035672000359D9D682027356020930255C5
+:1022A000612094025562209502556320960255687C
+:1022B00020613568205235B8FACD64AEFB00203578
+:1022C00073DECD0B7B682022356020C7077B612041
+:1022D000C7087B6220C7097B6320C70A7B88888880
+:1022E00088816420003565200035662000358184B2
+:1022F000B8FACD10AEFB008435F9B755AAF9B6FA95
+:10230000B755AAFAB6F8B755AAF8B646FACD18A640
+:10231000F7B7F8B7F9B74FFAB7017BB8FACD14AEF3
+:10232000FB008435F7B73FA4F7B6F83FF93FFA3F13
+:1023300046FACD18A6E7FBCDFBB700A94104AB5B7D
+:1023400025240FA1077B26C0CD64A6848484848ABB
+:10235000C0CD8F023B90023B91023B92023B0F208B
+:10236000AFC0CD8F023B90023B91023B92023B11EA
+:10237000264A4B03C614840F351584003516840095
+:102380003517840035046B6020C6056B6120C606D6
+:102390006B6220C6076B6320C66820C74A9D9D6894
+:1023A00020C727A668205235642005356520F535FD
+:1023B0006620E13567200035602093025561209446
+:1023C0000255622095025563209602558F026020C7
+:1023D000559002612055910262205592026320556A
+:1023E0006820263568202135682052356420003564
+:1023F0006520003566208035672000356820273548
+:1024000093026020559402612055950262205596F2
+:1024100002632055682052356420C7047B6520C7BD
+:10242000057B6620C7067B6720C7077B6820C74AF5
+:102430006820C722A66020C7087B6120C7097B628D
+:1024400020C70A7B6320C70B7B68205235108402AB
+:1024500035118455351284553513845535016B0279
+:10246000A612DDCD67202835092001A612DDCD6733
+:102470002014350B2502A2087BDDA0097B1E204F0E
+:1024800012DDCD67200A350A2504A2087BD9A009F0
+:102490007B88812D03CE2E03C6B8FACD818484B803
+:1024A000FACD0CAEFB004135D7FACD412503C94129
+:1024B0002603CB17AD08AEFB004135D7FACD5C0142
+:1024C000264C26AD04AEFB004135D7FACD4123039F
+:1024D000C9412403CB4101E2724102E0722B03CED9
+:1024E0002C03C6B8FACD5FFB004135D7FACD5C01AD
+:1024F000264C4101E2724102E0722B03CE2C03C64E
+:10250000026B3003C6016B2F03C68888813303CE6C
+:102510003403C6B8FACD81412F03C9413003CB46FD
+:10252000542703CE2803C6812F03CE3003C6B8FA42
+:10253000CD8146FACD10A6D7FACD81FBB700A941CF
+:1025400001AB818484848484847C8300357D830012
+:10255000357E830F357F83FF35B8FACD6CAEFB0037
+:102560008335C4FBCD23AD5B2EAD43ADB8FACD2D85
+:10257000AD5BD7FACD4EAD0D2048AD39AD5BD7FA86
+:10258000CD2F03CE3003C610264A2203C6B8FACD9B
+:1025900068AEFB008335C4FBCD57AD5B62AD6DAD5E
+:1025A0005EAD5BD7FACD75AD64AEFB008335C4FB81
+:1025B000CD6FAD5B7AAD413503C9413603CB05EE36
+:1025C00072067BB8FACDBEDACD5BD7FACD05EE72D6
+:1025D000067BB8FACD60AEFB008335C4FBCDBEDA16
+:1025E000CD5BC6DACD413503C9413603CBEADACD3E
+:1025F000BEDACD5BD7FACDEADACD5CAEFB0083352F
+:10260000D7FACD46542703CE2803C605EF72066BD2
+:102610004A5A01264D3303CE3403C61020066B34CC
+:1026200003C6056B3303C60C264A2203C688888876
+:10263000888888812B03CE2C03C6B8FACD8133035A
+:10264000CE3403C6B8FACD814D412503C9412603D6
+:10265000CB81412F03C9413003CB46542703CE28F9
+:1026600003C681D7FACD41BE03C241BF03C031AD1D
+:10267000814D41BE03C241BF03C0412303C9412470
+:1026800003CB45AD812F03CE3003C6B8FACD8146CA
+:10269000FACD10A6D7FACD81FBB700A94101AB81D5
+:1026A0008B4100A9410EAB5B9883003599830035BF
+:1026B0009A8300359B831835B8FACD3CAEFB008376
+:1026C00035C4FBCD25AD5B30AD67ADB8FACD2FADD0
+:1026D0005BD7FACD72AD0D204AAD3BAD5BD7FACDDD
+:1026E0002F03CE3003C610264A2203C6B8FACD38CF
+:1026F000AEFB008335C4FBCD59AD5B64AD6FAD60FF
+:10270000AD5BD7FACD77AD34AEFB008335C4FBCDDE
+:1027100071AD5B7CAD413503C9413603CB07EE7229
+:10272000087BB8FACD61D9CD5BD7FACD07EE720838
+:102730007BB8FACD30AEFB008335C4FBCD61D9CD7B
+:102740005B69D9CD413503C9413603CBB9D9CD61D8
+:10275000D9CD5BD7FACDB9D9CD2CAEFB008335C42A
+:10276000FBCD61D9CD5B69D9CD413103C94132037C
+:10277000CB72D9CD61D9CD5BD7FACD72D9CD10AEA0
+:10278000FB008335C4FBCD61D9CD5B69D9CD4A5AF5
+:102790000126AFD9CD0DEE720E7BB8FACD61D9CD41
+:1027A0005BD7FACD4A5A01267CD9CD0CAEFB00830B
+:1027B00035C4FBCD61D9CD5B69D9CD0DEE720E7BF1
+:1027C000B8FACD61D9CD5B90D9CD08AEFB00833589
+:1027D000C4FBCD61D9CD5B69D9CD4A5A0126AFD9A9
+:1027E000CD0DEE720E7BB8FACD61D9CD5BD7FACDA7
+:1027F0004A5A01267CD9CD04AEFB008335C4FBCDFB
+:1028000061D9CD5B69D9CD0DEE720E7BB8FACD6181
+:10281000D9CD5B90D9CD40AEFB008335F73FF83F73
+:10282000F9B70FA4F9B6D7FACD05EE72067BB8FA60
+:10283000CD28AEFB008335F73FF83FF9B71FA4F969
+:10284000B6D7FACD2703CE2803C67884C7097B798B
+:1028500084C70A7B7A84C70B7B7B84C70C7B07EF1A
+:1028600072086B4A5A01264D3303CE3403C6066BF9
+:102870002A03C6056B2903C60A6B05AA0A7B20201A
+:10288000086B3403C6076B3303C605EF72066B593A
+:10289000482903CE2A03C60A6B01AA0A7B1F264ACF
+:1028A0002203C6096B80AA097B062604A14603C63B
+:1028B000096B20A60A6B0B6B0C6B4F0D6B0EEF7246
+:1028C0003303C9413403CB2D03CE2E03C68B410005
+:1028D000A2410EA05B8184848484BB030135B8FAD5
+:1028E000CDB4AEFB008435F716E7FBCDFBB700A9EE
+:1028F0004101AB5B016BE3A4017B016BB484C602B5
+:102900006BB584C6036BB684C6046BB784C630252A
+:1029100020A1C803C6B8FACD78AEFB008435C4FB4D
+:10292000CDFBB700A94101AB5BF7B703A4F7B6F83D
+:10293000B7C0A4F8B6F93FFA3F46FACD16A6F7B7E6
+:10294000F8B7F9B74FFA003D0355016BFCA4017BC2
+:10295000026B3FA4027B016B7884C6026B7984C64C
+:10296000036B7A84C6046B7B84C6B8FACD74AEFB65
+:10297000008435D7FACD2003CE2103C6B8FACD7036
+:10298000AEFB008435D7FACD1E03CE1F03C6B8FABE
+:10299000CD6CAEFB008435D7FACD1C03CE1D03C62B
+:1029A000B8FACD68AEFB008435D7FACD1A03CE1B3A
+:1029B00003C6B8FACD64AEFB008435D7FACD180350
+:1029C000CE1903C612206484003565840035668400
+:1029D00002356784CB35122502A21803C6CBA01995
+:1029E00003C61E2602A1C203C6252603A14B03C6A9
+:1029F0002C2420A1C803C6B8FACD60AEFB008435F4
+:102A0000D7FACD1603CE1703C6B8FACD5CAEFB00DD
+:102A10008435D7FACD1403CE1503C6B8FACD58AE17
+:102A2000FB008435D7FACD1203CE1303C6888888FD
+:102A30008881FBC692FCBFFBB781FBD692FC3FFBB3
+:102A4000B781842003CF1803CE2103C71903C6180A
+:102A500003CF1903C74A5A01264D59484003CE41B6
+:102A600003C60B200E264D4003CE4103C60B264A5B
+:102A70002203C61E03CF1603CE1F03C71703C617B4
+:102A8000035F7216035F721C03CF1403CE1D03C7CE
+:102A90001503C61403CF1503C74A5A01264DFA265B
+:102AA0005A90465406273D03CE903E03CE3F03C6C0
+:102AB0001A03CF1203CE1B03C71303C613035F729F
+:102AC00012035F723D035F72042604A1C203C60BAA
+:102AD0002603A10F202D035F722E03233519260331
+:102AE000A1C203C6202603A115244B03C620A1C8FA
+:102AF00003C6400325035541032603554403290318
+:102B00005545032A03553E032303553F032403552C
+:102B10004203270355430328035597D3CCC7D5CD8C
+:102B2000D0A94109AB521CAE017BB8FACD39AEFB3E
+:102B3000000335E9F9CDFBB7D0A94105AB521CAE76
+:102B4000017B3803C7C7D5CDD0A94104AB521CAE19
+:102B5000017B3703C7C7D5CDD0A94103AB521CAE0B
+:102B6000017B3603C7FBD692FC3C3503C7BFD5CDEE
+:102B7000D0A94101AB521CAE017B33035F72340319
+:102B800001353203C7FBD692FC3C3103C7BFD5CD1C
+:102B9000CFA941FDAB521CAE017B3003C7FBD692DF
+:102BA000FC3C2F03C7BFD5CDCFA941FBAB521CAE18
+:102BB000017B2E03C7FBD692FC3C2D03C7BFD5CDAE
+:102BC000CFA941F9AB521CAE017B2C03C7FBD692B7
+:102BD000FC3C2B03C7BFD5CDCFA941F7AB521CAEF0
+:102BE000017B2A03C7FBD692FC3C2903C7BFD5CD86
+:102BF000CFA941F5AB521CAE017B2803C7FBD6928F
+:102C0000FC3C2703C7BFD5CDCFA941F3AB521CAEC7
+:102C1000017B2603C7FBD692FC3C2503C7BFD5CD5D
+:102C2000CFA941F1AB521CAE017B2403C7FBD69266
+:102C3000FC3C2303C7BFD5CDCFA941EFAB521CAE9F
+:102C4000017B2203C7C7D5CDCFA941EEAB521CAE45
+:102C5000017BE6D4CC0ED5CC032702A1E6D4CC036D
+:102C60002603A14B03C63D03C729B639032A0055E5
+:102C70003A032B00553B032C00553C032D005537E0
+:102C800003C713B613B63503CF22BE3603C723B628
+:102C90003303CF20BE3403C721B63103CF1EBE326B
+:102CA00003C71FB62F03CF1CBE3003C71DB62D03AD
+:102CB000CF26BE2E03C727B62B03CF24BE2C03C7B7
+:102CC00025B62903CF18BE2A03C719B62703CF1488
+:102CD000BE2803C715B62503CF1ABE2603C71BB6E9
+:102CE0002303CF16BE2403C717B62203C728B6AEE8
+:102CF000D3CC03241DA111B60927113D016B4A1144
+:102D0000B68800F0701C050101030003008F004627
+:102D1000001B00AA011E030007000350050100600C
+:102D2000A01805010106000300700040001800B063
+:102D3000011B030007000350050100E01BFA04011A
+:102D40000006000300800048001C0090013F039033
+:102D500006200300050100C05F3B040001060003DC
+:102D600000200030001400A0003703A0052003005D
+:102D7000050100E012BD04010007000300800040CF
+:102D8000001B0080011E0380060003000501009067
+:102D9000691104000107000300200030001300A0A7
+:102DA000001603A00500030005010040D2DF030068
+:102DB000000600030088001800230040012603409D
+:102DC00005000300040100F0FB02020101080006F7
+:102DD00000700010001F00F00005024004E00150E8
+:102DE000030100005A620201010400010080002872
+:102DF000001B00000174022004580220030100108F
+:102E0000F76C040101050001002800E0061900E448
+:102E100007EE02E40CD0020005010010F76C04017B
+:102E20000105000100280074091900780AEE0278F3
+:102E30000FD00200050100405F8A03010105000177
+:102E4000002800E0061900E407EE02E40CD00200BE
+:102E500005010010F76C040101050001002C005869
+:102E60000029001801650498083804800701001043
+:102E7000F76C040101050001002C0010022900D0AC
+:102E8000026504500A38048007010010F76C040141
+:102E900001050001002C007E0229003E036504BEEE
+:102EA0000A38048007010020EED90801010500015D
+:102EB000002C0010022900D0026504500A3804805A
+:102EC00007010080F93703000005000100800018A9
+:102ED000002C0020017102C0064002A0050101C0C3
+:102EE000FC9B010000030001007E0018001600207A
+:102EF000017102C0062001A005000010F76C04015A
+:102F000001050001002C0010021400D002650450DD
+:102F10000A1C028007000010F76C04010105000183
+:102F2000002800B8011900BC02EE02BC07D0020064
+:102F3000050100C0FC9B0100000500010040000CE1
+:102F4000002C009000710260034002D002010020BA
+:102F5000EED9080101050001002C005800290018D5
+:102F6000016504980838048007010080F9370300E0
+:102F700000060007007C002000240014010D02B4AC
+:102F800006E001A0050101C0FC9B01000003000454
+:102F9000007C002600120014010D02B406F000A00F
+:102FA00005000010F76C040101050001002C005819
+:102FB0000014001801650498081C02800700001026
+:102FC000F76C0401010500010028006E0019007271
+:102FD00001EE027206D00200050100C0FC9B010058
+:102FE00000060007003E00100024008A000D025A6F
+:102FF00003E001D002010080858001000002000191
+:1030000000600010002300A0000D022003E00180FA
+:10301000020181848B4100A94108AB5B096B016B04
+:103020004F012001A604269803C1057B0B2604E16D
+:10303000729903C608279803C6B72655FBCDFBB780
+:1030400000A94102AB5BE7FBCDFBB700A94106AB92
+:103050005B066B3048C6076B3148C6086B3248C602
+:10306000096B3348C684842F1ECD74AE08A6024B6C
+:10307000004B026B067B036B077B046B087B056BC5
+:10308000097B066B3048C6076B3148C6086B32486F
+:10309000C6096B3348C6016B4F8B4100A24109A0A2
+:1030A0005B8118E7FEC692FFBF9019CECC17B6170A
+:1030B000B780AA027BC62505A10D6B4C0D7BD225DE
+:1030C00004A10C6B4C0C7B18CECC0327A0E118E6B6
+:1030D0009097900CEB720BEB7248480D7B9701EBCD
+:1030E0007248480D7B016B0CE07203A60C6B4F0D10
+:1030F0006B4F84842F1ECD74AE20A6144B8807AB73
+:103100000B7B761DCD84B417CD00AE18A6880AAB14
+:103110000B7BFD16CDAE2504A10D6B4C0D7B74AD64
+:10312000FE3C02249790FFBB0DE07203A693FFBF05
+:10313000FEB700A94107AB5B93909706AB0DEB720E
+:103140000B7B56CFCDFE3C02249790FFBB0DE07267
+:1031500003A693FFBFFEB700A94103AB5B93905C4E
+:103160005C970DEB720B7B0D6B4F076B2C48C60801
+:103170006B2D48C6096B2E48C60A6B2F48C6036BD9
+:103180002848C6046B2948C6056B2A48C6066B2B1F
+:1031900048C6F02505A10D6B4C9F18E79803D60D86
+:1031A000EE720D6B12264A027B84842F1ECD74AE04
+:1031B00043A6880C7B074B19E79903C618E79803C9
+:1031C000C60BEE724DCFCC0326027B0B6B5205AEC5
+:1031D000E82414A1026B7FA49803C681848B41006C
+:1031E000A9410CAB5B0D6B4F0D99030772052002D3
+:1031F00098030F7284842F1ECD74AE41A6024B003B
+:103200004B192660A160A49803C684842F1ECD7438
+:10321000AE40A6014B004B8B4100A2410DA05B814B
+:103220007B03C67B0301350498030D7284842F1E33
+:10323000CD74AE40A6014B004B7B035F7281859A33
+:10324000B8FACD14AEFB004835F7B7F8B7F9B74F69
+:10325000FA007E035510487F03551148800355122C
+:103260004881035513488203559BCD2505A1016B69
+:103270004C017BBEFBCD7FAEFB00033546FACD484B
+:1032800048489FF7B7F8B7F9B74FFAB79803D60190
+:10329000EE721F207E039C0355072604A1016B4F8D
+:1032A00084847F035F7280035F7281035F72820395
+:1032B0005F722F1ECD74AE4F054B004B8881848BFF
+:1032C0004100A94108AB5B096B016B850A1FCD74F6
+:1032D000AE10A6054B9C03C7057B026B9003C6038B
+:1032E0006B9103C6046B9203C6056B9303C69B03E5
+:1032F000C7027B9A03C7037B9903C7047B9803C764
+:10330000057B026B8C03C6036B8D03C6046B8E03B7
+:10331000C6056B8F03C6016B850A1FCD74AE18A658
+:10332000084B9F03C7067B9E03C7077B9D03C70807
+:103330007B9C03C7097B066B1C48C6076B1D48C6F0
+:10334000086B1E48C6096B1F48C69B03C7067B9ABD
+:1033500003C7077B9903C7087B9803C7097B066BE4
+:103360001848C6076B1948C6086B1A48C6096B1B74
+:1033700048C6016B4F8B4100A24109A05B81858546
+:10338000858585017B016B01A6042614A1027B0AB9
+:103390002614A1037BD12505A1056B4C057BDD25FA
+:1033A00008A1046B4C047B98034472026B4C027BB3
+:1033B0000520036B4C037B072601A49803D605EE7A
+:1033C00072046B4F056B4F44204F032785854D2FAB
+:1033D0001ECD74AE054B004B9A144890035515480A
+:1033E0009103551648920355174893035510488C7E
+:1033F000035511488D035512488E035513488F030A
+:10340000559B036B026B016B4F888888888881FB12
+:10341000004835F7B7F8B7F9B74FFAB78185858512
+:1034200085017B9AB8FACD0CAE0DAD037B9B016B89
+:1034300001A60426027B04264A037BEE26027B06B5
+:1034400026037B026B4A027B036B0F48C60A209A55
+:10345000B8FACD5F38AD047B9B026B32A6016B03DB
+:103460006B4F8888888881E7FBCDFBB700A94105B1
+:10347000AB8184848484848484849AB8FACD04AE35
+:10348000FB004835FA15F7B7F8B7F9B74FFAB703A5
+:10349000AA3FA4087B9B056B0448C6066B0548C67B
+:1034A000076B0648C6086B0748C69AB8FACD5FFB9B
+:1034B000004035C4FBCDFBB700A94101AB5BFA145A
+:1034C0004FAD5BB8FACDFBB700A94101AB5BF7B7D5
+:1034D000F8B7F9B74FFA007803559B056B0040C663
+:1034E000066B0140C6076B0240C6086B0340C69AD4
+:1034F000B8FACDB4AEFB008435FA1AFA198FCBCDE9
+:103500005B9B056BB484C6066BB584C6076BB6843B
+:10351000C6086BB784C6888888888888888881E7C9
+:10352000FBCDFBB700A94101AB81848484849AB8A8
+:10353000FACD04AEFB004835FA1411AD5B9B016B6C
+:103540000448C6026B0548C6036B0648C6046B07F1
+:1035500048C69AB8FACD5FFB004035FA1534AD5B2A
+:103560009B016B0040C6026B0140C6036B0240C664
+:10357000046B0340C68888888881858585854F9A35
+:10358000B8FACD04AEFB004835F73FF83FF93FFAF3
+:10359000B730A4FAB6F7B7F8B7F9B74FFAB7047B64
+:1035A0009B016B0448C6026B0548C6036B0648C600
+:1035B000046B0748C69AB8FACD5FFB004035FA1590
+:1035C000E7FBCDFBB700A94101AB5B9B016B004062
+:1035D000C6026B0140C6036B0240C6046B0340C6C3
+:1035E00088888888819A9BFACDFBB740A94100ABB7
+:1035F00081FBB700A94107AB818B4100A9410DAB0D
+:103600005B9AB8FACD30AEFB004235C4FBCDFBB7B8
+:1036100000A94101AB5B46FACD4848127BF73FF861
+:103620003FF93FFA0003359B016B3042C6026B3114
+:1036300042C6036B3242C6046B3342C639C9CC035F
+:103640002506E172FBD69202AEFCB70D7BFBB70CF0
+:103650007B066B04AB067B67AD5C012406EB721046
+:10366000EE72117BE7FBCD6FAD5B9BAF2504A10B29
+:103670006B4C0B7BFBC792FCBF85FB0032FBD692E9
+:1036800003AEFCBFFBB70EE972410FEB725F4A0756
+:103690006B4C077BFB003B89FB3C022497FCBB0B7C
+:1036A000E07203A6FCBF08CACD5B362505E172FBBC
+:1036B000D69202AEFCB70D7BFBB70C7B0B6B076B96
+:1036C000086B096B0A6B4F7420056B4F106B00A9D8
+:1036D000107B116B04AB117B10CACD10EE72117B05
+:1036E000E7FBCD08CACD5B9B076B4F086BFBD692FF
+:1036F0005C096BFBD69201AEFCB70D7BFBB70C7B74
+:103700000A6BFBC692FCB70D7BFBBF0CEE728B41C4
+:1037100000A2410BA05B89888101A61500DE35144B
+:1037200000C0351300AD351200DE35A7F6CD81841B
+:103730008484848493C7CDAC2510A1046B4C047B96
+:10374000C22508A1056B4C057BDBC7CD03275D03B4
+:10375000264D4101E4724102E472FA265A9059481A
+:10376000062705E67201A601EF72026B5F037B007C
+:10377000C8CD03209AAD0426057B0826047B056B83
+:103780004F036B12E69704E0720FA6046B4FF5250A
+:1037900010A1046B4C047B97AD046B4FDBC7CD933A
+:1037A000C7CDBDC7CD888888888881B3840035B0EF
+:1037B000840035B184003581B0840035B184003592
+:1037C000B28400350DADB284033513ADB284023539
+:1037D00019ADB2840035B384003581B0840035B1B1
+:1037E000840035B2840035B38400358101ADB084E6
+:1037F0000035B1840035B2840135B384003513AD92
+:103800008164A6B0840035B184003526C0CC03ADF8
+:10381000B2840035B384003526C0CD10ADB2840427
+:1038200035B384003581AD840035AE840035AF8476
+:10383000003581AC84003505AD81AC8480350CAD9C
+:1038400007274D26C0CC64A6B0840035B18400356E
+:10385000B2840035B384003526C0CD0AA6B08400FA
+:1038600035B1840035B2841035B384003581848449
+:103870008484BB030135B8FACD5FFB008435F71AA9
+:10388000E7FBCDFBB700A94101AB5B016B0084C630
+:10389000026B0184C6036B0284C6046B0384C68872
+:1038A0008888888184848484B8FACD5FFB0084355D
+:1038B000F71BE7FBCDFBB700A94101AB5B016B0038
+:1038C00084C6026B0184C6036B0284C6046B038446
+:1038D000C68888888881B8FACD78AEFB00843581A7
+:1038E000E7FBCDFBB700A94101AB81848484841040
+:1038F000ADFA1D09AD5B17ADFA1C10AD5B016B781D
+:1039000084C6026B7984C6036B7A84C6046B7B849D
+:10391000C68888888881D7FACD9802CE9902C68158
+:1039200084848484A084C7017BA184C7027BA28491
+:10393000C7037BA384C7047B036B80AA037B046B50
+:1039400006AA047B0C20BEFBCDFBB700A94101AB4E
+:103950005BF91E34AD112400A29802C606A099029C
+:10396000C60C274C03C6222502A19702C629264A67
+:103970004B03C6036B80A4037B046B4F016BA084D5
+:10398000C6026BA184C6036BA284C6046BA384C663
+:103990006C2502A19702C673264A4B03C60D274A1F
+:1039A0004C03C6B8FACD78AEFB008435F71CE7FBB4
+:1039B000CDFBB700A94101AB5B016B7884C6026BFC
+:1039C0007984C6036B7A84C6046B7B84C648830003
+:1039D00035498300354A8300354B830F3510264A1D
+:1039E0002483003525830035268300352783003561
+:1039F0004D274B03C6B8FACDA4AEFB008435F91EA3
+:103A0000F73FF83FF91FE1C6CD142603A1042701B3
+:103A1000A19702C61F264A4B03C6B8FACDA0AEFB3B
+:103A2000008435F71FF93FFA3F46FACD10A6D7FAC2
+:103A3000CD5C012402AB3303CE3403C60B20C0039C
+:103A4000CEC103C62A264A0B2702A04B03C6C703D2
+:103A50005F725DC6CC03274D03C608274C03C6889A
+:103A60008888888184848484A084C7017BA184C7DA
+:103A7000027BA284C7037BA384C7047BBEFBCDFB70
+:103A8000B700A94101AB5BF91ED7FACD9802CE99D8
+:103A900002C6036B80A4037B046B4F1F2502A19712
+:103AA00002C626264A4B03C6036B7FA4037B016B29
+:103AB000A084C6026BA184C6036BA284C6046BA358
+:103AC00084C65A264C03C65F202483003525830014
+:103AD000352683023527830035A0840035A1840074
+:103AE00035A2840035A3840035A4840035A5840064
+:103AF00035A6840035A7840035B8FACD78AEFB0032
+:103B00008435F71DE7FBCDFBB700A94101AB5B0195
+:103B10006B7884C6026B7984C6036B7A84C6046BA7
+:103B20007B84C6C703C75F264C03CE64264D03C6FD
+:103B30008888888881E7FBCDFBB700A94101AB816C
+:103B4000848484840040C7017B0140C7027B02401B
+:103B5000C7037B0340C7047BB8FACD5FFB00403549
+:103B6000F71E23AD5BB8FACD1CAEFB004035F7B7AE
+:103B7000F8B7F9B74FFAB713B614201C4000351D3B
+:103B80004000351E4000351F400035046B02AA047A
+:103B90007B1826123D046B40AA047B06263803C618
+:103BA000046B10AA047B06263703C6016B026B0365
+:103BB0006B4F046B01A6B8FACDB4AEFB008435FAA6
+:103BC00018F918C1C4CD5B016BB484C6026BB5840F
+:103BD000C6036BB684C6046BB784C626C0CDFAA6EE
+:103BE0001445003515450035164500351745023595
+:103BF00088888888810140003502400035034000F4
+:103C00003581848484844D035F72B8FACDB4AEFBF1
+:103C1000008435F919E7FBCDFBB700A94101AB5B87
+:103C2000016BB484C6026BB584C6036BB684C6044C
+:103C30006BB784C61445003515450035164500356B
+:103C4000174501350040003547AD004080354DAD8A
+:103C50008888888881B8FACDB4AEFB008435F91E17
+:103C600081AC840035AD840035AE84003581FBB76E
+:103C700000A94105AB81FBB700A94101AB81FBB7AE
+:103C800000A94109AB810088003501880035028810
+:103C9000003581F914FA14F73FF83FF9B701A4F998
+:103CA000B6FAB7F8A4FAB6D1F9CD08AE905FC6035C
+:103CB000C6818B4100A9410EAB5B28AD038801355D
+:103CC00008880035098830350A8800350B880035AA
+:103CD000B8FACD0CAEFB008835FA1EF7B7F8B7F985
+:103CE000B74FFAB707A40D7B18880035198800353F
+:103CF0001A88FF351B88FF35148800351588003574
+:103D00001688FF351788FF351088003511885F3514
+:103D10001288FF351388FF356EC3CD03880035A0A8
+:103D2000C3CDF71EE7FBCD83C3CD5B0C2621A110CD
+:103D3000209084C7057B9184C7067B9284C7077B4C
+:103D40009384C7087BB8FACD8BC3CD5BC4FBCD830E
+:103D5000C3CD5BC4FBCD7BC3CD5BF71EE7FBCD8B37
+:103D6000C3CD5B056BBFA4057B066BC3A4067B07B5
+:103D70006B086B4F9FFBCD10A68BC3CD5B9FFBCD1C
+:103D800010A67BC3CD5B096B0A6B0B6B4F0C6B3CB6
+:103D9000A60B2082FBCD04A67BC3CD5B0B2455FB79
+:103DA000CD61AEFB00C135E7FBCD7BC3CD5B096BBD
+:103DB0000A6B0B6B4F0C6B3CA4087B3E20096B4FCE
+:103DC0000A6B04A60B6B0C6B4F0E2430A1C803C604
+:103DD000056B9484C6066B9584C6076B9684C608EB
+:103DE0006B9784C6016B026B4F016B9884C6026BA4
+:103DF0009984C6036B9A84C6046B9B84C6A0C3CD0A
+:103E0000F7B760AAF7B6E7FBCD83C3CD5BD1C2CCD1
+:103E1000032422A1C803C6016BB484C6026BB58417
+:103E2000C6036BB684C6046BB784C693C3CDAF8498
+:103E30002035B8FACD7CAEFB008435F714F81AF8BB
+:103E4000104FC3CD0920F81AF8104FC3CD1220F837
+:103E5000104FC3CD19204FC3CD272017274A112754
+:103E60004A0D274A0B274A0D7B93C3CDAF840035FB
+:103E70007C8400357D8400357E8400357F84003568
+:103E80006EC3CD03880035D8C0CD212520A1C8033D
+:103E9000C68B4100A2410CA05B89883C00000081D8
+:103EA000AC840035AD840035AE840035AF84003578
+:103EB0007C8400357D8400357E8400357F84003528
+:103EC00000880035018800350288003503880035F8
+:103ED000A8AD022520A1C803C64B035F72B9035FDA
+:103EE000728184848484B484C7017BB584C7027BD7
+:103EF000B684C7037BB784C7047BB8FACDB4AEFBE6
+:103F0000008435F81CE7FBCDFBB700A94101AB5B92
+:103F1000016BB484C6026BB584C6036BB684C60459
+:103F20006BB784C68888888881E7FBCDFBB700A97A
+:103F30004103ABB8FACC5FFB008435F70030350C99
+:103F4000AD5BB8FACD5FFB008435F71AF73F1BADC8
+:103F50005B81F73FE7FBCDFBB700A94103ABB8FAA4
+:103F6000CC5FFB008435F7180AAD5BB8FACD5FFB78
+:103F700000843515AD5B818590859001EE72027BE2
+:103F8000052005EE72067B072405E2729F06E072AB
+:103F90008988818590859005EE72067B052001EE6B
+:103FA00072027B072405E2729F06E0728988818491
+:103FB00084E926FBB6ED265D016B00A2FBB7017B11
+:103FC000026B01A097027B9D012001EF72026B063C
+:103FD000FACD03AE90FE3F5F88888141204035488E
+:103FE000205F7246205F724520C701A60220D8A636
+:103FF000042420A1C803C6819AB203CFB303C79B90
+:1040000000000000000000000000000000000000B0
+:1040100000000000000000000000000000000000A0
+:104020000000000000000000000000000000000090
+:104030000000000000000000000000000000000080
+:1040400000000000814F13B75320C6502012728128
+:1040500001A603274DB61DCD88A612B75320C65022
+:104060002015728101A603274DB61DCD88A68101BA
+:10407000A603274D131ECD41A68101A603274DFEA1
+:104080001DCD86025F725020203552200935814DAA
+:10409000B61DCD88A6502020355320C7C5204F50CF
+:1040A000201272EC2506E172016B4C017BD42613C1
+:1040B000AD9803D601EE720D204F172760A1027B49
+:1040C00026C0CD05A6EC262BAD037BF2264DB61DF2
+:1040D000CD88A68185858501A606274D131ECD02B4
+:1040E0007B08264DFE1DCD86025F72502020355282
+:1040F000200935888988814DB61DCD88A64F1ECCF4
+:104100004F26C0CD05A6952506E172017B9803D701
+:104110005320C6016B4C9F01EE720D2011E7532016
+:10412000C69707EB724A016B4C017B112509A1066A
+:104130007B502022350426FFB1FFBF900B26FEB333
+:10414000FE3F01E6724A5A01264D5F067B5020155C
+:10415000720426FFB1FFBF900B26FEB3FE3F01E6BF
+:10416000725A012402A05F1A2502A1067BBA2672A8
+:10417000AD5020157204264A067BC7264D90AD012E
+:10418000AA027BD0264D84AD26C0CD05A6DA260333
+:104190001FCD502020355320C7037B02205280AE14
+:1041A000037B072680A1067BF526031FCD8185852D
+:1041B0008501A606274DC9AD027B07264DBBAD86FE
+:1041C000025F72016B4F50202035522009358889DB
+:1041D00088814F5020243526C0CD05A68101A60335
+:1041E000274DB7AD82A6502021355320C7814F26D9
+:1041F000C0CD05A68101A603274DCFAD81A65020D5
+:10420000293581854F86025F72818501A686025F0E
+:1042100072502022350C86020D72052601E17201D2
+:10422000E472FA278602C68F01208881854F818536
+:1042300001A650200535082701E17201E4725120E2
+:10424000C6FA51200F729D0120880218CCF5B545A1
+:10425000F4B445F33FF23FF13FF03FEF3FEE3FF65E
+:104260002538A3F6BEB66FF63C04200218CDF4251F
+:1042700040A1F6B6B66FF63CF6BE0820172539A168
+:10428000F6B6B6E780A6F63CF6BE8106FCCDFBB7D7
+:1042900000A941C6AB5204AE8106FCCDFBB700A914
+:1042A00041CEAB5204AE8106FCCDFBB700A941FA6A
+:1042B000AB5204AE81E9F9CDFBB700A941E6AB52A0
+:1042C00004AE81FBB700A94101AB81F7B7F8B7F99C
+:1042D000B74FFAB7B6E681B8FACDFBB700A9410DE2
+:1042E000AB810FFBCDFBB700A94111AB81FBB70040
+:1042F000A94119AB81FBB700A94105AB81FBB70010
+:10430000A94115AB81E7FBCDFBB700A9411DAB81EE
+:10431000FBB701A94106AB5204AE81FBB700A9412E
+:1043200009AB81E7FBCDFBB700A94121AB818B41F4
+:1043300000A94125AB5BF63F2FFACDB0AEE7FBCD30
+:10434000FBB700A94111AB5B2FFACDACAEE7FBCDBB
+:104350005CAD5B2FFACDA8AEE7FBCD57AD5B2FFA76
+:10436000CDA4AE36AD5B2FFACDA0AE5CAD5BC61B67
+:10437000CC032450A1256B4C257B1D6B0D7B1E6B44
+:104380000E7B1F6B0F7B206B107B216B1D7B226BC9
+:104390001E7B236B1F7B246B207BB8FACDFC1CCDCE
+:1043A0005BC4FBCDDE1CCD5B46FACD1EA6D31CCD77
+:1043B0005BB8FACDDE1CCD5B7AFACD02A6D31CCD5C
+:1043C0005B196B157B1A6B167B1B6B177B1C6B18B1
+:1043D0007B116B197B126B1A7B136B1B7B146B1C91
+:1043E0007B1F1DCD5B0FFBCDFEAEFB0017350FFB1A
+:1043F000CDE61CCD257B141DCD5B0FFBCDDE1CCD8A
+:104400005BC4FBCD041DCD5B46FACD05A6F11CCDEA
+:104410005BB8FACD041DCD5B7AFACD1BA6F11CCD9D
+:104420005BB8FACDDE1CCD5B06FCCD0C1DCD5B066A
+:10443000FCCDFC1CCD5BD31CCD5B256B3CA6D31AFD
+:10444000CC03243CA1256B4C257B1D6B0D7B1E6B87
+:104450000E7B1F6B0F7B206B107B216B1D7B226BF8
+:104460001E7B236B1F7B246B207BB8FACDFC1CCDFD
+:104470005BC4FBCDDE1CCD5B46FACD1EA6D31CCDA6
+:104480005BB8FACDDE1CCD5B7AFACD02A6D31CCD8B
+:104490005B196B157B1A6B167B1B6B177B1C6B18E0
+:1044A0007B116B197B126B1A7B136B1B7B146B1CC0
+:1044B0007B1F1DCD5B0FFBCDFAAEFB0017350FFB4D
+:1044C000CDE61CCD257B141DCD5B0FFBCD041DCD92
+:1044D0005BC4FBCD361DCD5B46FACD05A6F11CCDE8
+:1044E0005BB8FACD361DCD5B7AFACD1BA6F11CCD9B
+:1044F0005BB8FACD041DCD5BC4FBCDDE1CCD5B32B9
+:10450000FBCDFBB700A94121AB5BC4FBCD0C1DCD9E
+:104510005BE7FBCDFC1CCD5BB8FACDDE1CCD5B327E
+:10452000FBCD0C1DCD5BE7FBCDFC1CCD5B256B28CB
+:10453000A6031ACC032428A1256B4C257B1D6B0DEB
+:104540007B1E6B0E7B1F6B0F7B206B107B216B1D0B
+:104550007B226B1E7B236B1F7B246B207BB8FACDE9
+:10456000FC1CCD5BC4FBCDDE1CCD5B46FACD1EA68C
+:10457000D31CCD5BB8FACDDE1CCD5B7AFACD02A69A
+:10458000D31CCD5B196B157B1A6B167B1B6B177BD2
+:104590001C6B187B116B197B126B1A7B136B1B7BCB
+:1045A000146B1C7B1F1DCD5B0FFBCDF6AEFB001704
+:1045B000350FFBCDE61CCD257B141DCD5B0FFBCD50
+:1045C000DE1CCD5BC4FBCD041DCD5B46FACD05A63C
+:1045D000F11CCD5BB8FACD041DCD5B7AFACD1BA6DC
+:1045E000F11CCD5BB8FACDDE1CCD5B06FCCD0C1DFD
+:1045F000CD5B06FCCDFC1CCD5BD31CCD5B256B14C9
+:10460000A61919CC032414A1256B4C257B1D6B0D19
+:104610007B1E6B0E7B1F6B0F7B206B107B216B1D3A
+:104620007B226B1E7B236B1F7B246B207BB8FACD18
+:10463000FC1CCD5BC4FBCDDE1CCD5B46FACD1EA6BB
+:10464000D31CCD5BB8FACDDE1CCD5B7AFACD02A6C9
+:10465000D31CCD5B196B157B1A6B167B1B6B177B01
+:104660001C6B187B116B197B126B1A7B136B1B7BFA
+:10467000146B1C7B1F1DCD5B0FFBCDF2AEFB001737
+:10468000350FFBCDE61CCD257B141DCD5B0FFBCD7F
+:10469000041DCD5BC4FBCD361DCD5B46FACD05A612
+:1046A000F11CCD5BB8FACD361DCD5B7AFACD1BA6D9
+:1046B000F11CCD5BB8FACD041DCD5BC4FBCDDE1C77
+:1046C000CD5B32FBCDFC1CCD5BD31CCD5BB8FACDF2
+:1046D000DE1CCD5B32FBCD0C1DCD5BF733F833F91F
+:1046E00033FA33D31CCD5B256B4F116BB0B6126B15
+:1046F000B1B6136BB2B6146BB3B6196BACB61A6B1A
+:10470000ADB61B6BAEB61C6BAFB6156BA8B6166B11
+:10471000A9B6176BAAB6186BABB6216BA4B6226B01
+:10472000A5B6236BA6B6246BA7B61D6BA0B61E6BF1
+:10473000A1B61F6BA2B6206BA3B6AE2550A1256B08
+:104740004C257B9BFACDE61CCD257BC4FBCDDE1C26
+:10475000CD5BF739F839F939FA38681DCD257B5A20
+:104760001DCD257B4C1DCD257B3E1DCD257BB8FA6F
+:10477000CDDE1CCD5B7AFACD1FA6681DCD257B5AF8
+:104780001DCD257B4C1DCD257B3E1DCD256B10A65B
+:104790009D2510A1256B4C257BBEFBCDE61CCD25B0
+:1047A0007B2A1DCD9703AB9F585825EE72BEFBCDDB
+:1047B000E61CCD257BE0FACD5F90FE0001355FB6AB
+:1047C000E65C5C585825EE72BEFBCDE61CCD257B21
+:1047D00046FACD10A62A1DCD5C585825EE729BFADC
+:1047E000CDE61CCD257B46FACD18A62A1DCD5858FE
+:1047F00025EE72256B4F8B4100A24125A05BD6C1EF
+:1048000062CADCBC1B8FA1EBD96E9979825A818474
+:1048100084C9264C056B4A057B016B00A9017B020C
+:104820006B01AB027B27AD022640A1F6B6B43C0279
+:1048300024B5B708ABB5B6B6E7F63CF6BEFBC692F4
+:10484000FCB7027BFBB7017B2F208988818484849D
+:10485000B4B7B4B99FB5B7B5BB5208AEF6B7037BD2
+:10486000EF2503E172016B4C9FB6E74602D601EEDD
+:10487000720A204F76ADB4000235B53FF6004035E0
+:10488000F12540A1016B4C9FB6E701EE72027B015E
+:104890006B19A6ED2519A1016B4C9FB6E702E872D2
+:1048A00060FED601EE72016B4F88898881B000C32B
+:1048B00035B100D235B200E135B300F035AC0010AF
+:1048C00035AD003235AE005435AF007635A80098CE
+:1048D00035A900BA35AA00DC35AB00FE35A400EFDF
+:1048E00035A500CD35A600AB35A7008935A00067FA
+:1048F00035A1004535A2002335A3000135F63FB5AB
+:104900003FB43F818484761DCD5DAD5CAE14A60CB2
+:10491000ADDE2505A1026B4C027BEA2504A1016BEB
+:104920004C017B4602D7A0E69701EB724848027B18
+:10493000016B4F026B4F761DCD5417CD36AE05017E
+:10494000C63EAD05013235F02532A1026B4C9F46C3
+:1049500002D7CDA602EE72026B4F88883A15CC289A
+:104960004B1CCACD3E15CCC1A6004B084B87CACD07
+:104970003E15CCE0A6014BE04BBF15CC03267503DA
+:104980005A7281848475030235DF14CDE0A6014B91
+:10499000E04B10274D5FCFCD28277703C57903C69D
+:1049A0003E15CCC1A6004B284B3C15CC014B904B7F
+:1049B0001CCACD0A277703C57903C6C520284B3EFC
+:1049C00015CCE0A6014B904BE2CACD0C277703C56E
+:1049D0007903C6DD20084B3E15CC82A6014B904BD7
+:1049E00076035A72CE277603C612264DE1CDCD3E10
+:1049F00015CCC0A6004B504BB7274D5FCFCDE82656
+:104A00004D9ACBCD03A6262076030A35CB274D5FE2
+:104A1000CFCDD320604B04274D9ACBCD83A618274A
+:104A20004DC3CDCD3E15CC81A6004B604BEC264D41
+:104A30009ACBCD02A643CDCD3E15CCA0A6004B0807
+:104A40004B09274D83CCCD06264D9ACBCD4C8C20DF
+:104A500080A688884F932040A6004BA04B08264D87
+:104A6000F2CBCD9C16CC6016CC03261FA04516CCED
+:104A700003264A2D16CC032620A0E0271EA01116DF
+:104A8000CC03264A60274A392720A0C32720A02C20
+:104A90002720A0302720A09C16CC03268303C6DD48
+:104AA0002060A6004B504B1CCACD032561A1830397
+:104AB000C6122711A17C03C607277A03C681848406
+:104AC0009FAD20A6004B504B0B264D03C6052783F8
+:104AD00003C610204F004BA04B1CCACD03278303F5
+:104AE000C60F267D03C68184848400C0CD06EE7285
+:104AF000077B8303C7037B05201872042701E1723B
+:104B0000F0A48303C6016BF0A4037B0620C7026BED
+:104B100002EA72E0A4037B026B1FA40620C6888809
+:104B200088814442003545420035464200354742BF
+:104B30000035004200350142003502420135034292
+:104B4000013510200042083501420035024201358E
+:104B50000342003512264A9B02C620400035214000
+:104B60000035224000352340013504449E0255059E
+:104B7000449F02550644A002550744A10255004433
+:104B8000A202550144A302550244A402550344A5C0
+:104B9000025581854FF3259B02C1016B4C017B7649
+:104BA00012CD06204F88819A9BFACDF73FF83FF946
+:104BB0003F81F7B7F8B7F9B74FFAB7819A9BFACDAB
+:104BC000FBB701AA4181FBC692FCBFFBB7811DADBB
+:104BD000FA3FFBB701AA3313CC4F29ADFA003D359C
+:104BE00003EE72FBB7027B9B37ADFA000835FBB7CB
+:104BF00002AA4124AA02EE72037B9BD32604E1722F
+:104C000033AD02AA4103AA02EE72037B3213CC0336
+:104C100026016B4A017B9A9BFACDFBB702AA027B65
+:104C200003EE7268AD047B9B1C20016BFFA6046B36
+:104C300080AA047B062602A111B6046B03A60220FB
+:104C400001A6062002A60427153D0C279C02C67962
+:104C5000AD411CAA02EE72037B9B2A14CD4118AA17
+:104C600002EE72037B9B2A14CD4114AA02EE72035A
+:104C70007B9BD72604E1723314CD01AA4113AA020B
+:104C8000EE72037BB627016B4A017B3B14CD10AA61
+:104C900002EE72037B4514CD047B9B1720016BFF52
+:104CA000A6046BCFA602200FA606200CA60820069D
+:104CB000274A052714B6CC2604E1723314CD01AA85
+:104CC000410DAA02EE72037B81858585854C0626FF
+:104CD000016B4A017B3B14CD0CAA02EE72037B46AA
+:104CE000FACD10A64514CD047B9B2220016BFFA6B4
+:104CF000046BFCA60220F0A60620CCA60A20C0A6C3
+:104D00000E200CA612200FA616200CA618274A1754
+:104D1000274A16274A15274A14274A29274A1627B9
+:104D200013B6C92604E1723314CD01AA410BAA02BD
+:104D3000EE72037B6627016B4A017B3B14CD08AA08
+:104D400002EE72037BF7B703AAF7B6F8B7C3AAF768
+:104D5000B7F9B74FFAB7047B9B2520016BFFA60478
+:104D60006B4F01200CA60427153D2A14CD4104AA3F
+:104D700002EE72037B9B2A14CD9B026B03EF7250F1
+:104D8000AA414F5858978888888881E7FBCDA2AE02
+:104D9000FB00023581FBB700A94101AB81F7B7F8F1
+:104DA000B7F9B74FFAB781F7B703A4F7B6F83FF9E9
+:104DB0003FFA3F46FACD18A61DFACD4A81FBB7004F
+:104DC000A94107AB81A2020035A3020035A4021855
+:104DD00035812CAD18A44848484A12B6B8FACD43DC
+:104DE000AD818B4100A9410AAB5BB8FACD0CAEFB9B
+:104DF000008435FA36F936F836F73467AD0A20F70D
+:104E000039F839F939FA3873AD0C2602A111B68494
+:104E1000848484841FDDCD880B7B880B7B880B7B8F
+:104E2000880B7B88067B066B4444067B9FFBCD0288
+:104E3000A66DAD5B0E2602A111B6066B08A6076B28
+:104E40004F086BBBA6096B80A60A6BA2020035A3B4
+:104E5000020035A4026035A50200351820ACA60971
+:104E60006B44A60A6BA2020035A3020035A40262BD
+:104E700035A5020035392010A6076B4F086B5DA6DB
+:104E8000096BC0A60A6BA2020035A3020035A4027A
+:104E90003035A5020035182056A6096B22A60A6BEC
+:104EA000A2020035A3020035A4023135A502003567
+:104EB000742020A6076B4F086B2EA6096BE0A60A8C
+:104EC0006B2F12CDA5020035C411CC20A6076B4F65
+:104ED000086B2BA6096B11A60A6B2F12CDA50280B9
+:104EE00035C411CC20A6076B4F086B1FA6096B4079
+:104EF000A60A6BA2020035A3020035A4021035A554
+:104F0000020035C411CC20A6076B4F086B2EA609F2
+:104F10006BE0A60A6B4F2F12CDA5020035A511CC70
+:104F200003264A8B11CC03264A6A11CC03264A5029
+:104F300011CC03264A6D274A57274A382713B69AB9
+:104F4000B8FACDFBBF84AEFA1AE7FBCD3C12CD5BBD
+:104F50009BBEFBCD3C12CD5BC4FBCD6412CD5B1F71
+:104F600012CD5B4412CD5A01264D5F12B61520FAC0
+:104F700010C4FBCD6412CD5B1F12CD5B4412CD5A21
+:104F800001264D5F12B619264A11B63A20086B4029
+:104F9000AA087B0A6B02AA0A7B0E2602A111B60A96
+:104FA0006B04AA0A7B0627163DB8FACD3C12CD5BEE
+:104FB000F91EF7BFF8B73EA4F8B6F9BFFABF46FA34
+:104FC000CD11A65A12CD1420F714F73FF8B73EA41E
+:104FD000F8B6F93FFA3F46FACD11A65A12CD182776
+:104FE000B803CE056B18A6022014A6062010A6084A
+:104FF0002006274A052714B68B4100A2410AA05B70
+:00000001FF
diff --git a/include/Kbuild b/include/Kbuild
index 8d226bfa269..506f6d7dba7 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -10,3 +10,4 @@ header-y += video/
header-y += drm/
header-y += xen/
header-y += scsi/
+header-y += trace/
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index c94e71781b7..36c560e08b3 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -3,6 +3,7 @@ header-y += can/
header-y += caif/
header-y += dvb/
header-y += hdlc/
+header-y += hsi/
header-y += isdn/
header-y += mmc/
header-y += nfsd/
@@ -151,6 +152,7 @@ header-y += hid.h
header-y += hiddev.h
header-y += hidraw.h
header-y += hpet.h
+header-y += hwmem.h
header-y += hysdn_if.h
header-y += i2c-dev.h
header-y += i2c.h
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 0101e9c17fa..32a89cf5ec4 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -6,6 +6,19 @@
#include <linux/mmc/host.h>
+
+/*
+ * These defines is places here due to access is needed from machine
+ * configuration files. The ST Micro version does not have ROD and
+ * reuse the voltage registers for direction settings.
+ */
+#define MCI_ST_DATA2DIREN (1 << 2)
+#define MCI_ST_CMDDIREN (1 << 3)
+#define MCI_ST_DATA0DIREN (1 << 4)
+#define MCI_ST_DATA31DIREN (1 << 5)
+#define MCI_ST_FBCLKEN (1 << 7)
+#define MCI_ST_DATA74DIREN (1 << 8)
+
/* Just some dummy forwarding */
struct dma_chan;
@@ -18,7 +31,8 @@ struct dma_chan;
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @vdd_handler: a callback function to translate a MMC_VDD_*
+ * @ios_handler: a callback function to act on specfic ios changes,
+ * used for example to control a levelshifter
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
@@ -31,6 +45,8 @@ struct dma_chan;
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
* @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h
+ * @sigdir: a bit field indicating for what bits in the MMC bus the host
+ * should enable signal direction indication.
* @dma_filter: function used to select an appropriate RX and TX
* DMA channel to be used for DMA, if and only if you're deploying the
* generic DMA engine
@@ -46,14 +62,14 @@ struct dma_chan;
struct mmci_platform_data {
unsigned int f_max;
unsigned int ocr_mask;
- u32 (*vdd_handler)(struct device *, unsigned int vdd,
- unsigned char power_mode);
+ int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
unsigned long capabilities2;
+ u32 sigdir;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
diff --git a/include/linux/boottime.h b/include/linux/boottime.h
new file mode 100644
index 00000000000..9836c5b3175
--- /dev/null
+++ b/include/linux/boottime.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009-2010
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * boottime is a tool for collecting start-up timing
+ * information and can together with boot loader support
+ * display a total system start-up time.
+ *
+ */
+
+#ifndef LINUX_BOOTTIME_H
+#define LINUX_BOOTTIME_H
+
+#ifdef CONFIG_BOOTTIME
+#include <linux/kernel.h>
+
+/**
+ * struct boottime_timer - Callbacks for generic timer.
+ * @init: Function to call at boottime initialization
+ * @get_time: Returns the number of us since start-up
+ * Preferable this is based upon a free running timer.
+ * This is the only required entry.
+ * @finalize: Called before init is executed and boottime is done.
+ */
+struct boottime_timer {
+ int (*init)(void);
+ unsigned long (*get_time)(void);
+ void (*finalize)(void);
+};
+
+/**
+ * boottime_mark_wtime()
+ * Add a sample point with a given time. Useful for adding data collected
+ * by for example a boot loader.
+ * @name: The name of the sample point
+ * @time: The time in us when this point was reached
+ */
+void __init boottime_mark_wtime(char *name, unsigned long time);
+
+/**
+ * boottime_mark()
+ * Add a sample point with the current time.
+ * @name: The name of this sample point
+ */
+void __init boottime_mark(char *name);
+
+/**
+ * boottime_mark_symbolic()
+ * Add a sample point where the name is a symbolic function
+ * and %pF is needed to get the correct function name.
+ * @name: function name.
+ */
+void __init boottime_mark_symbolic(void *name);
+
+/**
+ * boottime_activate()
+ * Activates boottime and register callbacks.
+ * @bt: struct with callbacks.
+ */
+void __ref boottime_activate(struct boottime_timer *bt);
+
+/**
+ * boottime_deactivate()
+ * This function is called when the kernel boot is done.
+ * (before "free init memory" is called)
+ */
+void __init boottime_deactivate(void);
+
+/**
+ * boottime_system_up()
+ * A function is called when the basics of the kernel
+ * is up and running.
+ */
+void __init boottime_system_up(void);
+
+#else
+
+#define boottime_mark_wtime(name, time)
+#define boottime_mark(name)
+#define boottime_mark_symbolic(name)
+#define boottime_activate(bt)
+#define boottime_deactivate()
+#define boottime_system_up()
+#endif
+
+#endif /* LINUX_BOOTTIME_H */
diff --git a/include/linux/clksrc-db5500-mtimer.h b/include/linux/clksrc-db5500-mtimer.h
new file mode 100644
index 00000000000..3112c7f2709
--- /dev/null
+++ b/include/linux/clksrc-db5500-mtimer.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+#ifndef __CLKSRC_DB5500_MTIMER_H
+#define __CLKSRC_DB5500_MTIMER_H
+
+#include <linux/io.h>
+
+#ifdef CONFIG_CLKSRC_DB5500_MTIMER
+void db5500_mtimer_init(void __iomem *base);
+#else
+static inline void db5500_mtimer_init(void __iomem *base) {}
+#endif
+
+#endif
diff --git a/include/linux/compdev.h b/include/linux/compdev.h
new file mode 100644
index 00000000000..9e707c7b770
--- /dev/null
+++ b/include/linux/compdev.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * ST-Ericsson Display overlay compositer device driver
+ *
+ * Author: Anders Bauer <anders.bauer@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * Modified: Per-Daniel Olsson <per-daniel.olsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _COMPDEV_H_
+#define _COMPDEV_H_
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#include <video/mcde.h>
+#endif
+
+#if defined(__KERNEL__) || defined(_KERNEL)
+#include <linux/mm_types.h>
+#include <linux/bitops.h>
+#else
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#define COMPDEV_DEFAULT_DEVICE_PREFIX "comp"
+#define NUM_COMPDEV_BUFS 2
+
+enum compdev_fmt {
+ COMPDEV_FMT_RGB565,
+ COMPDEV_FMT_RGB888,
+ COMPDEV_FMT_RGBX8888,
+ COMPDEV_FMT_RGBA8888,
+ COMPDEV_FMT_YUV422,
+};
+
+struct compdev_size {
+ uint16_t width;
+ uint16_t height;
+};
+
+/* Display rotation */
+enum compdev_rotation {
+ COMPDEV_ROT_0 = 0,
+ COMPDEV_ROT_90_CCW = 90,
+ COMPDEV_ROT_180_CCW = 180,
+ COMPDEV_ROT_270_CCW = 270,
+ COMPDEV_ROT_90_CW = COMPDEV_ROT_270_CCW,
+ COMPDEV_ROT_180_CW = COMPDEV_ROT_180_CCW,
+ COMPDEV_ROT_270_CW = COMPDEV_ROT_90_CCW,
+};
+
+enum compdev_ptr_type {
+ COMPDEV_PTR_PHYSICAL,
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET,
+};
+
+struct compdev_rect {
+ __s32 x;
+ __s32 y;
+ __s32 width;
+ __s32 height;
+};
+
+struct compdev_buf {
+ enum compdev_ptr_type type;
+ __s32 hwmem_buf_name;
+ __s32 fd;
+ __u32 offset;
+ __u32 len;
+};
+
+struct compdev_img {
+ enum compdev_fmt fmt;
+ struct compdev_buf buf;
+ __s32 width;
+ __s32 height;
+ __u32 pitch;
+ struct compdev_rect dst_rect;
+};
+
+struct compdev_post_buffers_req {
+ enum compdev_rotation rotation;
+ struct compdev_img img_buffers[NUM_COMPDEV_BUFS];
+ __u8 buffer_count;
+};
+
+#define COMPDEV_GET_SIZE_IOC _IOR('D', 1, struct compdev_size)
+#define COMPDEV_POST_BUFFERS_IOC _IOW('D', 2, struct compdev_post_buffers_req)
+
+#ifdef __KERNEL__
+
+int compdev_create(struct mcde_display_device *ddev,
+ struct mcde_overlay *parent_ovly);
+void compdev_destroy(struct mcde_display_device *ddev);
+
+#endif /* __KERNEL__ */
+
+#endif /* _COMPDEV_H_ */
+
diff --git a/include/linux/cpufreq-dbx500.h b/include/linux/cpufreq-dbx500.h
new file mode 100644
index 00000000000..15c59c3b1c8
--- /dev/null
+++ b/include/linux/cpufreq-dbx500.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+#ifndef __CPUFREQ_DBX500_H
+#define __CPUFREQ_DBX500_H
+
+#include <linux/cpufreq.h>
+
+int dbx500_cpufreq_get_limits(int cpu, int r,
+ unsigned int *min, unsigned int *max);
+
+int dbx500_cpufreq_percent2freq(int percent);
+
+#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6216115c778..1f8e97c2a08 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -199,6 +199,7 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
@@ -336,6 +337,7 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
}
#endif
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
/*********************************************************************
* CPUFREQ DEFAULT GOVERNOR *
diff --git a/include/linux/cyttsp.h b/include/linux/cyttsp.h
new file mode 100755
index 00000000000..38ef1236d7a
--- /dev/null
+++ b/include/linux/cyttsp.h
@@ -0,0 +1,114 @@
+/* Header file for:
+ * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver.
+ * include/linux/cyttsp.h
+ *
+ * Copyright (C) 2009-2011 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Cypress reserves the right to make changes without further notice
+ * to the materials described herein. Cypress does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com
+ *
+ */
+#include <linux/input.h>
+
+#ifndef _CYTTSP_H_
+#define _CYTTSP_H_
+
+#include <linux/input.h>
+
+#define CY_SPI_NAME "cyttsp-spi"
+#define CY_I2C_NAME "cyttsp-i2c"
+/* Scan Type selection for finger and/or stylus activation */
+#define CY_SCN_TYP_DFLT 0x01 /* finger only mutual scan */
+/* Active Power state scanning/processing refresh interval */
+#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
+/* touch timeout for the Active power */
+#define CY_TCH_TMOUT_DFLT 0x64 /* ms */
+/* Low Power state scanning/processing refresh interval */
+#define CY_LP_INTRVL_DFLT 0x32 /* ms */
+/*
+ *defines for Gen2 (Txx2xx); Gen3 (Txx3xx)
+ * use these defines to set cyttsp_platform_data.gen in board config file
+ */
+enum cyttsp_gen {
+ CY_GEN2,
+ CY_GEN3,
+};
+/*
+ * Active distance in pixels for a gesture to be reported
+ * if set to 0, then all gesture movements are reported
+ * Valid range is 0 - 15
+ */
+#define CY_ACT_DIST_DFLT 8
+#define CY_ACT_DIST CY_ACT_DIST_DFLT
+#define CY_ACT_DIST_BITS 0x0F
+/* max retries for read/write ops */
+#define CY_NUM_RETRY 6
+
+enum cyttsp_gest {
+ CY_GEST_GRP_NONE = 0,
+ CY_GEST_GRP1 = 0x10,
+ CY_GEST_GRP2 = 0x20,
+ CY_GEST_GRP3 = 0x40,
+ CY_GEST_GRP4 = 0x80,
+};
+
+enum cyttsp_powerstate {
+ CY_IDLE_STATE, /* IC cannot be reached */
+ CY_READY_STATE, /* pre-operational; ready to go to ACTIVE */
+ CY_ACTIVE_STATE, /* app is running, IC is scanning */
+ CY_LOW_PWR_STATE, /* not currently used */
+ CY_SLEEP_STATE, /* app is running, IC is idle */
+ CY_BL_STATE, /* bootloader is running */
+ CY_LDR_STATE, /* loader is running */
+ CY_SYSINFO_STATE, /* Switching to SysInfo mode */
+ CY_INVALID_STATE /* always last in the list */
+};
+
+struct cyttsp_platform_data {
+ u32 maxx;
+ u32 maxy;
+ u32 flags;
+ enum cyttsp_gen gen;
+ unsigned use_st:1;
+ unsigned use_mt:1;
+ unsigned use_trk_id:1;
+ unsigned use_hndshk:1;
+ unsigned use_timer:1;
+ unsigned use_sleep:1;
+ unsigned use_gestures:1;
+ unsigned use_load_file:1;
+ unsigned use_force_fw_update:1;
+ unsigned use_virtual_keys:1;
+ enum cyttsp_powerstate power_state;
+ u8 gest_set;
+ u8 scn_typ; /* finger and/or stylus scanning */
+ u8 act_intrvl; /* Active refresh interval; ms */
+ u8 tch_tmout; /* Active touch timeout; ms */
+ u8 lp_intrvl; /* Low power refresh interval; ms */
+ int (*wakeup)(void);
+ int (*init)(int on_off);
+ void (*mt_sync)(struct input_dev *);
+ char *name;
+ s16 irq_gpio;
+ s16 rst_gpio;
+ bool invert;
+};
+
+#endif /* _CYTTSP_H_ */
diff --git a/include/linux/db8500-modem-trace.h b/include/linux/db8500-modem-trace.h
new file mode 100644
index 00000000000..4863e1a0b03
--- /dev/null
+++ b/include/linux/db8500-modem-trace.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors: Michel JAOUEN <michel.jaouen@stericsson.com>
+ * Maxime COQUELIN <maxime.coquelin-nonst@stericsson.com>
+ * for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+/* macro for requesting a trace read */
+
+struct modem_trace_req {
+ __u32 phys_addr;
+ __u8 filler;
+ __u8 *buff;
+ __u32 size;
+};
+
+#define TM_IO_NUMBER 0xfc
+#define TM_GET_DUMPINFO _IOR(TM_IO_NUMBER, 1, unsigned long)
+#define TM_TRACE_REQ _IOWR(TM_IO_NUMBER, 2, unsigned long)
+
+struct db8500_trace_platform_data {
+ unsigned long ape_base;
+ unsigned long modem_base;
+};
diff --git a/include/linux/dispdev.h b/include/linux/dispdev.h
new file mode 100644
index 00000000000..cbcf6705150
--- /dev/null
+++ b/include/linux/dispdev.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * ST-Ericsson Display device driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _DISPDEV_H_
+#define _DISPDEV_H_
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#include <video/mcde.h>
+#endif
+
+#define DISPDEV_DEFAULT_DEVICE_PREFIX "disp"
+
+enum dispdev_fmt {
+ DISPDEV_FMT_RGB565,
+ DISPDEV_FMT_RGB888,
+ DISPDEV_FMT_RGBX8888,
+ DISPDEV_FMT_RGBA8888,
+ DISPDEV_FMT_YUV422,
+};
+
+struct dispdev_config {
+ uint16_t format;
+ uint16_t stride;
+ uint16_t x;
+ uint16_t y;
+ uint16_t z;
+ uint16_t width;
+ uint16_t height;
+
+ uint32_t user_flags;
+};
+
+struct dispdev_buffer_info {
+ uint16_t buf_idx;
+ uint16_t display_update;
+ struct dispdev_config buf_cfg;
+};
+
+#define DISPDEV_SET_CONFIG_IOC _IOW('D', 1, struct dispdev_config)
+#define DISPDEV_GET_CONFIG_IOC _IOR('D', 2, struct dispdev_config)
+#define DISPDEV_REGISTER_BUFFER_IOC _IO('D', 3)
+#define DISPDEV_UNREGISTER_BUFFER_IOC _IO('D', 4)
+#define DISPDEV_QUEUE_BUFFER_IOC _IOW('D', 5, struct dispdev_buffer_info)
+#define DISPDEV_DEQUEUE_BUFFER_IOC _IO('D', 6)
+
+#ifdef __KERNEL__
+
+int dispdev_create(struct mcde_display_device *ddev, bool overlay,
+ struct mcde_overlay *parent_ovly);
+void dispdev_destroy(struct mcde_display_device *ddev);
+
+#endif /* __KERNEL__ */
+
+#endif /* _DISPDEV_H_ */
+
diff --git a/arch/arm/plat-nomadik/include/plat/gpio-nomadik.h b/include/linux/gpio/nomadik.h
index 9605bf227df..3e8b7f16fb7 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio-nomadik.h
+++ b/include/linux/gpio/nomadik.h
@@ -29,6 +29,7 @@
#define NMK_GPIO_SLPC 0x1c
#define NMK_GPIO_AFSLA 0x20
#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
#define NMK_GPIO_RIMSC 0x40
#define NMK_GPIO_FIMSC 0x44
diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild
new file mode 100644
index 00000000000..271a770b478
--- /dev/null
+++ b/include/linux/hsi/Kbuild
@@ -0,0 +1 @@
+header-y += hsi_char.h
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
new file mode 100644
index 00000000000..455120c1fb3
--- /dev/null
+++ b/include/linux/hsi/hsi.h
@@ -0,0 +1,391 @@
+/*
+ * hsi.h
+ *
+ * HSI core header file.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_H__
+#define __LINUX_HSI_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+/* HSI message ttype */
+#define HSI_MSG_READ 0
+#define HSI_MSG_WRITE 1
+
+/* HSI configuration values */
+enum {
+ HSI_MODE_STREAM = 1,
+ HSI_MODE_FRAME,
+};
+
+enum {
+ HSI_FLOW_SYNC, /* Synchronized flow */
+ HSI_FLOW_PIPE, /* Pipelined flow */
+};
+
+enum {
+ HSI_ARB_RR, /* Round-robin arbitration */
+ HSI_ARB_PRIO, /* Channel priority arbitration */
+};
+
+#define HSI_MAX_CHANNELS 16
+
+/* HSI message status codes */
+enum {
+ HSI_STATUS_COMPLETED, /* Message transfer is completed */
+ HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */
+ HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */
+ HSI_STATUS_QUEUED, /* Message waiting to be served */
+ HSI_STATUS_ERROR, /* Error when message transfer was ongoing */
+};
+
+/* HSI port event codes */
+enum {
+ HSI_EVENT_START_RX,
+ HSI_EVENT_STOP_RX,
+};
+
+/**
+ * struct hsi_config - Configuration for RX/TX HSI modules
+ * @mode: Bit transmission mode (STREAM or FRAME)
+ * @channels: Number of channels to use [1..16]
+ * @speed: Max bit transmission speed (Kbit/s)
+ * @flow: RX flow type (SYNCHRONIZED or PIPELINE)
+ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
+ */
+struct hsi_config {
+ unsigned int mode;
+ unsigned int channels;
+ unsigned int speed;
+ /* ch_prio for TX only, Valid if arb_mode == HSI_ARB_PRIO */
+ unsigned char ch_prio[HSI_MAX_CHANNELS];
+ union {
+ unsigned int flow; /* RX only */
+ unsigned int arb_mode; /* TX only */
+ };
+};
+
+/**
+ * struct hsi_board_info - HSI client board info
+ * @name: Name for the HSI device
+ * @hsi_id: HSI controller id where the client sits
+ * @port: Port number in the controller where the client sits
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @platform_data: Platform related data
+ * @archdata: Architecture-dependent device data
+ */
+struct hsi_board_info {
+ const char *name;
+ int hsi_id;
+ unsigned int port;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ void *platform_data;
+ struct dev_archdata *archdata;
+};
+
+#ifdef CONFIG_HSI_BOARDINFO
+extern int hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len);
+#else
+static inline int hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len)
+{
+ return 0;
+}
+#endif /* CONFIG_HSI_BOARDINFO */
+
+/**
+ * struct hsi_client - HSI client attached to an HSI port
+ * @device: Driver model representation of the device
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @hsi_start_rx: Called after incoming wake line goes high
+ * @hsi_stop_rx: Called after incoming wake line goes low
+ */
+struct hsi_client {
+ struct device device;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ void (*hsi_start_rx)(struct hsi_client *cl);
+ void (*hsi_stop_rx)(struct hsi_client *cl);
+ /* private: */
+ unsigned int pclaimed:1;
+ struct list_head link;
+};
+
+#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
+
+static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
+{
+ dev_set_drvdata(&cl->device, data);
+}
+
+static inline void *hsi_client_drvdata(struct hsi_client *cl)
+{
+ return dev_get_drvdata(&cl->device);
+}
+
+/**
+ * struct hsi_client_driver - Driver associated to an HSI client
+ * @driver: Driver model representation of the driver
+ */
+struct hsi_client_driver {
+ struct device_driver driver;
+};
+
+#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\
+ driver)
+
+int hsi_register_client_driver(struct hsi_client_driver *drv);
+
+static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+
+/**
+ * struct hsi_msg - HSI message descriptor
+ * @link: Free to use by the current descriptor owner
+ * @cl: HSI device client that issues the transfer
+ * @sgt: Head of the scatterlist array
+ * @context: Client context data associated to the transfer
+ * @complete: Transfer completion callback
+ * @destructor: Destructor to free resources when flushing
+ * @status: Status of the transfer when completed
+ * @actual_len: Actual length of data transfered on completion
+ * @channel: Channel were to TX/RX the message
+ * @ttype: Transfer type (TX if set, RX otherwise)
+ * @break_frame: if true HSI will send/receive a break frame (FRAME MODE)
+ */
+struct hsi_msg {
+ struct list_head link;
+ struct hsi_client *cl;
+ struct sg_table sgt;
+ void *context;
+
+ void (*complete)(struct hsi_msg *msg);
+ void (*destructor)(struct hsi_msg *msg);
+
+ int status;
+ unsigned int actual_len;
+ unsigned int channel;
+ unsigned int ttype:1;
+ unsigned int break_frame:1;
+};
+
+struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags);
+void hsi_free_msg(struct hsi_msg *msg);
+
+/**
+ * struct hsi_port - HSI port device
+ * @device: Driver model representation of the device
+ * @tx_cfg: Current TX path configuration
+ * @rx_cfg: Current RX path configuration
+ * @num: Port number
+ * @shared: Set when port can be shared by different clients
+ * @claimed: Reference count of clients which claimed the port
+ * @lock: Serialize port claim
+ * @async: Asynchronous transfer callback
+ * @setup: Callback to set the HSI client configuration
+ * @flush: Callback to clean the HW state and destroy all pending transfers
+ * @start_tx: Callback to inform that a client wants to TX data
+ * @stop_tx: Callback to inform that a client no longer wishes to TX data
+ * @release: Callback to inform that a client no longer uses the port
+ * @clients: List of hsi_clients using the port.
+ * @clock: Lock to serialize access to the clients list.
+ */
+struct hsi_port {
+ struct device device;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ unsigned int num;
+ unsigned int shared:1;
+ int claimed;
+ struct mutex lock;
+ int (*async)(struct hsi_msg *msg);
+ int (*setup)(struct hsi_client *cl);
+ int (*flush)(struct hsi_client *cl);
+ int (*start_tx)(struct hsi_client *cl);
+ int (*stop_tx)(struct hsi_client *cl);
+ int (*release)(struct hsi_client *cl);
+ struct list_head clients;
+ spinlock_t clock;
+};
+
+#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
+#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
+
+void hsi_event(struct hsi_port *port, unsigned int event);
+int hsi_claim_port(struct hsi_client *cl, unsigned int share);
+void hsi_release_port(struct hsi_client *cl);
+
+static inline int hsi_port_claimed(struct hsi_client *cl)
+{
+ return cl->pclaimed;
+}
+
+static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data)
+{
+ dev_set_drvdata(&port->device, data);
+}
+
+static inline void *hsi_port_drvdata(struct hsi_port *port)
+{
+ return dev_get_drvdata(&port->device);
+}
+
+/**
+ * struct hsi_controller - HSI controller device
+ * @device: Driver model representation of the device
+ * @owner: Pointer to the module owning the controller
+ * @id: HSI controller ID
+ * @num_ports: Number of ports in the HSI controller
+ * @port: Array of HSI ports
+ */
+struct hsi_controller {
+ struct device device;
+ struct module *owner;
+ int id;
+ unsigned int num_ports;
+ struct hsi_port *port;
+};
+
+#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
+
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
+void hsi_free_controller(struct hsi_controller *hsi);
+int hsi_register_controller(struct hsi_controller *hsi);
+void hsi_unregister_controller(struct hsi_controller *hsi);
+
+static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi,
+ void *data)
+{
+ dev_set_drvdata(&hsi->device, data);
+}
+
+static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
+{
+ return dev_get_drvdata(&hsi->device);
+}
+
+static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
+ unsigned int num)
+{
+ return (num < hsi->num_ports) ? &hsi->port[num] : NULL;
+}
+
+/*
+ * API for HSI clients
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
+
+/**
+ * hsi_setup - Configure the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * When sharing ports, clients should either relay on a single
+ * client setup or have the same setup for all of them.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_setup(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->setup(cl);
+}
+
+/**
+ * hsi_flush - Flush all pending transactions on the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * This function will destroy all pending hsi_msg in the port and reset
+ * the HW port so it is ready to receive and transmit from a clean state.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_flush(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->flush(cl);
+}
+
+/**
+ * hsi_async_read - Submit a read transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ msg->ttype = HSI_MSG_READ;
+ return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_async_write - Submit a write transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ msg->ttype = HSI_MSG_WRITE;
+ return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_start_tx - Signal the port that the client wants to start a TX
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_start_tx(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->start_tx(cl);
+}
+
+/**
+ * hsi_stop_tx - Signal the port that the client no longer wants to transmit
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_stop_tx(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->stop_tx(cl);
+}
+#endif /* __LINUX_HSI_H__ */
diff --git a/include/linux/hsi/hsi_char.h b/include/linux/hsi/hsi_char.h
new file mode 100644
index 00000000000..4ffe33f7376
--- /dev/null
+++ b/include/linux/hsi/hsi_char.h
@@ -0,0 +1,66 @@
+/*
+ * hsi_char.h
+ *
+ * Part of the HSI character device driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Andras Domokos <andras.domokos at nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+
+#ifndef __HSI_CHAR_H
+#define __HSI_CHAR_H
+
+#define HSI_CHAR_MAGIC 'k'
+#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IOWR(num, dtype) _IOWR(HSI_CHAR_MAGIC, num, dtype)
+#define HSC_IO(num) _IO(HSI_CHAR_MAGIC, num)
+
+#define HSC_RESET HSC_IO(16)
+#define HSC_SET_PM HSC_IO(17)
+#define HSC_SEND_BREAK HSC_IO(18)
+#define HSC_SET_RX HSC_IOW(19, struct hsc_rx_config)
+#define HSC_GET_RX HSC_IOW(20, struct hsc_rx_config)
+#define HSC_SET_TX HSC_IOW(21, struct hsc_tx_config)
+#define HSC_GET_TX HSC_IOW(22, struct hsc_tx_config)
+
+#define HSC_PM_DISABLE 0
+#define HSC_PM_ENABLE 1
+
+#define HSC_MODE_STREAM 1
+#define HSC_MODE_FRAME 2
+#define HSC_FLOW_SYNC 0
+#define HSC_ARB_RR 0
+#define HSC_ARB_PRIO 1
+
+struct hsc_rx_config {
+ uint32_t mode;
+ uint32_t flow;
+ uint32_t channels;
+};
+
+struct hsc_tx_config {
+ uint32_t mode;
+ uint32_t channels;
+ uint32_t speed;
+ uint32_t arb_mode;
+ uint32_t priority;
+};
+
+#endif /* __HSI_CHAR_H */
diff --git a/include/linux/hwmem.h b/include/linux/hwmem.h
new file mode 100644
index 00000000000..cb9e0dc9aaa
--- /dev/null
+++ b/include/linux/hwmem.h
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _HWMEM_H_
+#define _HWMEM_H_
+
+#include <linux/types.h>
+
+#if !defined(__KERNEL__)
+#include <sys/types.h>
+#else
+#include <linux/mm_types.h>
+#endif
+
+#define HWMEM_DEFAULT_DEVICE_NAME "hwmem"
+
+/**
+ * @brief Flags defining behavior of allocation
+ */
+enum hwmem_alloc_flags {
+ /**
+ * @brief Buffered
+ */
+ HWMEM_ALLOC_HINT_WRITE_COMBINE = (1 << 0),
+ /**
+ * @brief Non-buffered
+ */
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE = (1 << 1),
+ /**
+ * @brief Cached
+ */
+ HWMEM_ALLOC_HINT_CACHED = (1 << 2),
+ /**
+ * @brief Uncached
+ */
+ HWMEM_ALLOC_HINT_UNCACHED = (1 << 3),
+ /**
+ * @brief Write back
+ */
+ HWMEM_ALLOC_HINT_CACHE_WB = (1 << 4),
+ /**
+ * @brief Write through
+ */
+ HWMEM_ALLOC_HINT_CACHE_WT = (1 << 5),
+ /**
+ * @brief No alloc on write
+ */
+ HWMEM_ALLOC_HINT_CACHE_NAOW = (1 << 6),
+ /**
+ * @brief Alloc on write
+ */
+ HWMEM_ALLOC_HINT_CACHE_AOW = (1 << 7),
+ /**
+ * @brief Inner and outer cache
+ */
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE = (1 << 8),
+ /**
+ * @brief Inner cache only
+ */
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY = (1 << 9),
+ /**
+ * @brief Reserved for use by the cache handler integration
+ */
+ HWMEM_ALLOC_RESERVED_CHI = (1 << 31),
+};
+
+/**
+ * @brief Flags defining buffer access mode.
+ */
+enum hwmem_access {
+ /**
+ * @brief Buffer will be read from.
+ */
+ HWMEM_ACCESS_READ = (1 << 0),
+ /**
+ * @brief Buffer will be written to.
+ */
+ HWMEM_ACCESS_WRITE = (1 << 1),
+ /**
+ * @brief Buffer will be imported.
+ */
+ HWMEM_ACCESS_IMPORT = (1 << 2),
+};
+
+/**
+ * @brief Values defining memory types.
+ */
+enum hwmem_mem_type {
+ /**
+ * @brief Scattered system memory.
+ */
+ HWMEM_MEM_SCATTERED_SYS,
+ /**
+ * @brief Contiguous system memory.
+ */
+ HWMEM_MEM_CONTIGUOUS_SYS,
+ /**
+ * @brief Protected system memory.
+ */
+ HWMEM_MEM_PROTECTED_SYS,
+};
+
+/* User space API */
+
+/**
+ * @see struct hwmem_region.
+ */
+struct hwmem_region_us {
+ __u32 offset;
+ __u32 count;
+ __u32 start;
+ __u32 end;
+ __u32 size;
+};
+
+/**
+ * @brief Alloc request data.
+ */
+struct hwmem_alloc_request {
+ /**
+ * @brief [in] Size of requested allocation in bytes. Size will be
+ * aligned to PAGE_SIZE bytes.
+ */
+ __u32 size;
+ /**
+ * @brief [in] Flags describing requested allocation options.
+ */
+ __u32 flags; /* enum hwmem_alloc_flags */
+ /**
+ * @brief [in] Default access rights for buffer.
+ */
+ __u32 default_access; /* enum hwmem_access */
+ /**
+ * @brief [in] Memory type of the buffer.
+ */
+ __u32 mem_type; /* enum hwmem_mem_type */
+};
+
+/**
+ * @brief Set domain request data.
+ */
+struct hwmem_set_domain_request {
+ /**
+ * @brief [in] Identifier of buffer to be prepared. If 0 is specified
+ * the buffer associated with the current file instance will be used.
+ */
+ __s32 id;
+ /**
+ * @brief [in] Flags specifying access mode of the operation.
+ *
+ * One of HWMEM_ACCESS_READ and HWMEM_ACCESS_WRITE is required.
+ * For details, @see enum hwmem_access.
+ */
+ __u32 access; /* enum hwmem_access */
+ /**
+ * @brief [in] The region of bytes to be prepared.
+ *
+ * For details, @see struct hwmem_region.
+ */
+ struct hwmem_region_us region;
+};
+
+/**
+ * @brief Pin request data.
+ */
+struct hwmem_pin_request {
+ /**
+ * @brief [in] Identifier of buffer to be pinned. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ __s32 id;
+ /**
+ * @brief [out] Physical address of first word in buffer.
+ */
+ __u32 phys_addr;
+};
+
+/**
+ * @brief Set access rights request data.
+ */
+struct hwmem_set_access_request {
+ /**
+ * @brief [in] Identifier of buffer to set access rights for. If 0 is
+ * specified, the buffer associated with the current file instance will
+ * be used.
+ */
+ __s32 id;
+ /**
+ * @param access Access value indicating what is allowed.
+ */
+ __u32 access; /* enum hwmem_access */
+ /**
+ * @param pid Process ID to set rights for.
+ */
+ pid_t pid;
+};
+
+/**
+ * @brief Get info request data.
+ */
+struct hwmem_get_info_request {
+ /**
+ * @brief [in] Identifier of buffer to get info about. If 0 is specified,
+ * the buffer associated with the current file instance will be used.
+ */
+ __s32 id;
+ /**
+ * @brief [out] Size in bytes of buffer.
+ */
+ __u32 size;
+ /**
+ * @brief [out] Memory type of buffer.
+ */
+ __u32 mem_type; /* enum hwmem_mem_type */
+ /**
+ * @brief [out] Access rights for buffer.
+ */
+ __u32 access; /* enum hwmem_access */
+};
+
+/**
+ * @brief Allocates <size> number of bytes and returns a buffer identifier.
+ *
+ * Input is a pointer to a hwmem_alloc_request struct.
+ *
+ * @return A buffer identifier on success, or a negative error code.
+ */
+#define HWMEM_ALLOC_IOC _IOW('W', 1, struct hwmem_alloc_request)
+
+/**
+ * @brief Allocates <size> number of bytes and associates the created buffer
+ * with the current file instance.
+ *
+ * If the current file instance is already associated with a buffer the call
+ * will fail. Buffers referenced through files instances shall not be released
+ * with HWMEM_RELEASE_IOC, instead the file instance shall be closed.
+ *
+ * Input is a pointer to a hwmem_alloc_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_ALLOC_FD_IOC _IOW('W', 2, struct hwmem_alloc_request)
+
+/**
+ * @brief Releases buffer.
+ *
+ * Buffers are reference counted and will not be destroyed until the last
+ * reference is released. Buffers allocated with ALLOC_FD_IOC shall not be
+ * released with this IOC, @see HWMEM_ALLOC_FD_IOC.
+ *
+ * Input is the buffer identifier.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_RELEASE_IOC _IO('W', 3)
+
+/**
+ * Memory Mapping
+ *
+ * To map a hwmem buffer mmap the hwmem fd and supply the buffer identifier as
+ * the offset. If the buffer is linked to the fd and thus have no buffer
+ * identifier supply 0 as the offset. Note that the offset feature of mmap is
+ * disabled in both cases, you can only mmap starting a position 0.
+ */
+
+/**
+ * @brief Prepares the buffer for CPU access.
+ *
+ * Input is a pointer to a hwmem_set_domain_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_CPU_DOMAIN_IOC _IOW('W', 4, struct hwmem_set_domain_request)
+
+/**
+ * DEPRECATED: Set sync domain from driver instead!
+ *
+ * @brief Prepares the buffer for access by any DMA hardware.
+ *
+ * Input is a pointer to a hwmem_set_domain_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_SYNC_DOMAIN_IOC _IOW('W', 5, struct hwmem_set_domain_request)
+
+/**
+ * DEPRECATED: Pin from driver instead!
+ *
+ * @brief Pins the buffer.
+ *
+ * Input is a pointer to a hwmem_pin_request struct. Only contiguous buffers
+ * can be pinned from user space.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_PIN_IOC _IOWR('W', 6, struct hwmem_pin_request)
+
+/**
+ * DEPRECATED: Unpin from driver instead!
+ *
+ * @brief Unpins the buffer.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_UNPIN_IOC _IO('W', 7)
+
+/**
+ * @brief Set access rights for buffer.
+ *
+ * Input is a pointer to a hwmem_set_access_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_SET_ACCESS_IOC _IOW('W', 8, struct hwmem_set_access_request)
+
+/**
+ * @brief Get buffer information.
+ *
+ * Input is a pointer to a hwmem_get_info_request struct.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_GET_INFO_IOC _IOWR('W', 9, struct hwmem_get_info_request)
+
+/**
+ * @brief Export the buffer identifier for use in another process.
+ *
+ * The global name will not increase the buffers reference count and will
+ * therefore not keep the buffer alive.
+ *
+ * Input is the buffer identifier. If 0 is specified the buffer associated with
+ * the current file instance will be exported.
+ *
+ * @return A global buffer name on success, or a negative error code.
+ */
+#define HWMEM_EXPORT_IOC _IO('W', 10)
+
+/**
+ * @brief Import a buffer to allow local access to the buffer.
+ *
+ * Input is the buffer's global name.
+ *
+ * @return The imported buffer's identifier on success, or a negative error
+ * code.
+ */
+#define HWMEM_IMPORT_IOC _IO('W', 11)
+
+/**
+ * @brief Import a buffer to allow local access to the buffer using the current
+ * fd.
+ *
+ * Input is the buffer's global name.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+#define HWMEM_IMPORT_FD_IOC _IO('W', 12)
+
+#ifdef __KERNEL__
+
+/* Kernel API */
+
+/**
+ * @brief Values defining memory domain.
+ */
+enum hwmem_domain {
+ /**
+ * @brief This value specifies the neutral memory domain. Setting this
+ * domain will syncronize all supported memory domains.
+ */
+ HWMEM_DOMAIN_SYNC = 0,
+ /**
+ * @brief This value specifies the CPU memory domain.
+ */
+ HWMEM_DOMAIN_CPU,
+};
+
+struct hwmem_alloc;
+
+/**
+ * @brief Structure defining a region of a memory buffer.
+ *
+ * A buffer is defined to contain a number of equally sized blocks. Each block
+ * has a part of it included in the region [<start>-<end>). That is
+ * <end>-<start> bytes. Each block is <size> bytes long. Total number of bytes
+ * in the region is (<end> - <start>) * <count>. First byte of the region is
+ * <offset> + <start> bytes into the buffer.
+ *
+ * Here's an example of a region in a graphics buffer (X = buffer, R = region):
+ *
+ * XXXXXXXXXXXXXXXXXXXX \
+ * XXXXXXXXXXXXXXXXXXXX |-- offset = 60
+ * XXXXXXXXXXXXXXXXXXXX /
+ * XXRRRRRRRRXXXXXXXXXX \
+ * XXRRRRRRRRXXXXXXXXXX |-- count = 4
+ * XXRRRRRRRRXXXXXXXXXX |
+ * XXRRRRRRRRXXXXXXXXXX /
+ * XXXXXXXXXXXXXXXXXXXX
+ * --| start = 2
+ * ----------| end = 10
+ * --------------------| size = 20
+ */
+struct hwmem_region {
+ /**
+ * @brief The first block's offset from beginning of buffer.
+ */
+ size_t offset;
+ /**
+ * @brief The number of blocks included in this region.
+ */
+ size_t count;
+ /**
+ * @brief The index of the first byte included in this block.
+ */
+ size_t start;
+ /**
+ * @brief The index of the last byte included in this block plus one.
+ */
+ size_t end;
+ /**
+ * @brief The size in bytes of each block.
+ */
+ size_t size;
+};
+
+struct hwmem_mem_chunk {
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
+ * @brief Allocates <size> number of bytes.
+ *
+ * @param size Number of bytes to allocate. All allocations are page aligned.
+ * @param flags Allocation options.
+ * @param def_access Default buffer access rights.
+ * @param mem_type Memory type.
+ *
+ * @return Pointer to allocation, or a negative error code.
+ */
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type);
+
+/**
+ * @brief Release a previously allocated buffer.
+ * When last reference is released, the buffer will be freed.
+ *
+ * @param alloc Buffer to be released.
+ */
+void hwmem_release(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Set the buffer domain and prepare it for access.
+ *
+ * @param alloc Buffer to be prepared.
+ * @param access Flags defining memory access mode of the call.
+ * @param domain Value specifying the memory domain.
+ * @param region Structure defining the minimum area of the buffer to be
+ * prepared.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+/**
+ * @brief Pins the buffer.
+ *
+ * Notice that the number of mem chunks a buffer consists of can change at any
+ * time if the buffer is not pinned. Because of this one can not assume that
+ * pin will succeed if <mem_chunks> has the length specified by a previous call
+ * to pin as the buffer layout may have changed between the calls. There are
+ * two ways of handling this situation, keep redoing the pin procedure till it
+ * succeeds or allocate enough mem chunks for the worst case ("buffer size" /
+ * "page size" mem chunks). Contiguous buffers always require only one mem
+ * chunk.
+ *
+ * @param alloc Buffer to be pinned.
+ * @param mem_chunks Pointer to array of mem chunks.
+ * @param mem_chunks_length Pointer to variable that contains the length of
+ * <mem_chunks> array. On success the number of written mem chunks will be
+ * stored in this variable. If the call fails with -ENOSPC the required length
+ * of <mem_chunks> will be stored in this variable.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ size_t *mem_chunks_length);
+
+/**
+ * @brief Unpins the buffer.
+ *
+ * @param alloc Buffer to be unpinned.
+ */
+void hwmem_unpin(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Map the buffer to user space.
+ *
+ * @param alloc Buffer to be mapped.
+ *
+ * @return Zero on success, or a negative error code.
+ */
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma);
+
+/**
+ * @brief Map the buffer for use in the kernel.
+ *
+ * This function implicitly pins the buffer.
+ *
+ * @param alloc Buffer to be mapped.
+ *
+ * @return Pointer to buffer, or a negative error code.
+ */
+void *hwmem_kmap(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Un-map a buffer previously mapped with hwmem_kmap.
+ *
+ * This function implicitly unpins the buffer.
+ *
+ * @param alloc Buffer to be un-mapped.
+ */
+void hwmem_kunmap(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Set access rights for buffer.
+ *
+ * @param alloc Buffer to set rights for.
+ * @param access Access value indicating what is allowed.
+ * @param pid Process ID to set rights for.
+ */
+int hwmem_set_access(struct hwmem_alloc *alloc, enum hwmem_access access,
+ pid_t pid);
+
+/**
+ * @brief Get buffer information.
+ *
+ * @param alloc Buffer to get information about.
+ * @param size Pointer to size output variable. Can be NULL.
+ * @param size Pointer to memory type output variable. Can be NULL.
+ * @param size Pointer to access rights output variable. Can be NULL.
+ */
+void hwmem_get_info(struct hwmem_alloc *alloc, size_t *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access);
+
+/**
+ * @brief Allocate a global buffer name.
+ * Generated buffer name is valid in all processes. Consecutive calls will get
+ * the same name for the same buffer.
+ *
+ * @param alloc Buffer to be made public.
+ *
+ * @return Positive global name on success, or a negative error code.
+ */
+s32 hwmem_get_name(struct hwmem_alloc *alloc);
+
+/**
+ * @brief Import the global buffer name to allow local access to the buffer.
+ * This call will add a buffer reference. Resulting buffer should be
+ * released with a call to hwmem_release.
+ *
+ * @param name A valid global buffer name.
+ *
+ * @return Pointer to allocation, or a negative error code.
+ */
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name);
+
+/* Integration */
+
+struct hwmem_allocator_api {
+ void *(*alloc)(void *instance, size_t size);
+ void (*free)(void *instance, void *alloc);
+ phys_addr_t (*get_alloc_paddr)(void *alloc);
+ void *(*get_alloc_kaddr)(void *instance, void *alloc);
+ size_t (*get_alloc_size)(void *alloc);
+};
+
+struct hwmem_mem_type_struct {
+ enum hwmem_mem_type id;
+ struct hwmem_allocator_api allocator_api;
+ void *allocator_instance;
+};
+
+extern struct hwmem_mem_type_struct *hwmem_mem_types;
+extern unsigned int hwmem_num_mem_types;
+
+#endif
+
+#endif /* _HWMEM_H_ */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 6b6ee702b00..8e891b5a777 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -15,11 +15,16 @@
#define _HWMON_H_
#include <linux/device.h>
+#include <linux/notifier.h>
struct device *hwmon_device_register(struct device *dev);
void hwmon_device_unregister(struct device *dev);
+int hwmon_notifier_register(struct notifier_block *nb);
+int hwmon_notifier_unregister(struct notifier_block *nb);
+void hwmon_notify(unsigned long val, void *v);
+
/* Scale user input to sensible values */
static inline int SENSORS_LIMIT(long value, long low, long high)
{
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
index 05e03284b92..143f433b9ee 100644
--- a/include/linux/input/bu21013.h
+++ b/include/linux/input/bu21013.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2009
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* License terms:GNU General Public License (GPL) version 2
*/
@@ -9,32 +9,36 @@
/**
* struct bu21013_platform_device - Handle the platform data
- * @cs_en: pointer to the cs enable function
- * @cs_dis: pointer to the cs disable function
- * @irq_read_val: pointer to read the pen irq value function
+ * @cs_en: pointer to the cs enable function
+ * @cs_dis: pointer to the cs disable function
+ * @irq_read_val: pointer to read the pen irq value function
+ * @x_max_res: xmax resolution
+ * @y_max_res: ymax resolution
* @touch_x_max: touch x max
* @touch_y_max: touch y max
* @cs_pin: chip select pin
* @irq: irq pin
- * @ext_clk: external clock flag
+ * @has_ext_clk: has external clock
+ * @enable_ext_clk: enable external clock
+ * @portrait: portrait mode flag
* @x_flip: x flip flag
* @y_flip: y flip flag
- * @wakeup: wakeup flag
- *
* This is used to handle the platform data
- */
+ **/
struct bu21013_platform_device {
int (*cs_en)(int reset_pin);
int (*cs_dis)(int reset_pin);
int (*irq_read_val)(void);
+ int x_max_res;
+ int y_max_res;
int touch_x_max;
int touch_y_max;
unsigned int cs_pin;
unsigned int irq;
- bool ext_clk;
+ bool has_ext_clk;
+ bool enable_ext_clk;
+ bool portrait;
bool x_flip;
bool y_flip;
- bool wakeup;
};
-
#endif
diff --git a/include/linux/input/lps001wp.h b/include/linux/input/lps001wp.h
new file mode 100644
index 00000000000..d83cf924048
--- /dev/null
+++ b/include/linux/input/lps001wp.h
@@ -0,0 +1,87 @@
+/******************** (C) COPYRIGHT 2010 STMicroelectronics ********************
+*
+* File Name : lps001wp.h
+* Authors : MSH - Motion Mems BU - Application Team
+* : Matteo Dameno (matteo.dameno@st.com)*
+* : Carmine Iascone (carmine.iascone@st.com)
+* Version : V 1.1.1
+* Date : 05/11/2010
+* Description : LPS001WP pressure temperature sensor driver
+*
+********************************************************************************
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THE PRESENT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
+* OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, FOR THE SOLE
+* PURPOSE TO SUPPORT YOUR APPLICATION DEVELOPMENT.
+* AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
+* INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
+* CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
+* INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+*
+*******************************************************************************/
+
+#ifndef __LPS001WP_H__
+#define __LPS001WP_H__
+
+
+#include <linux/input.h>
+
+#define SAD0L 0x00
+#define SAD0H 0x01
+#define LPS001WP_PRS_I2C_SADROOT 0x2E
+#define LPS001WP_PRS_I2C_SAD_L ((LPS001WP_PRS_I2C_SADROOT<<1)|SAD0L)
+#define LPS001WP_PRS_I2C_SAD_H ((LPS001WP_PRS_I2C_SADROOT<<1)|SAD0H)
+#define LPS001WP_PRS_DEV_NAME "lps001wp_prs"
+
+/* input define mappings */
+#define ABS_PR ABS_PRESSURE
+#define ABS_TEMP ABS_GAS
+#define ABS_DLTPR ABS_MISC
+
+
+
+/************************************************/
+/* Pressure section defines */
+/************************************************/
+
+/* Pressure Sensor Operating Mode */
+#define LPS001WP_PRS_ENABLE 0x01
+#define LPS001WP_PRS_DISABLE 0x00
+
+
+
+
+#define LPS001WP_PRS_PM_NORMAL 0x40
+#define LPS001WP_PRS_PM_OFF LPS001WP_PRS_DISABLE
+
+#define SENSITIVITY_T 64 /** = 64 LSB/degrC */
+#define SENSITIVITY_P 16 /** = 16 LSB/mbar */
+
+
+#ifdef __KERNEL__
+/**
+ * struct lps001wp_prs_platform_data - platform datastructure for lps001wp_prs
+ * @poll_interval: maximum polling interval
+ * @min_interval: minimum polling interval
+ * @init: pointer to init function
+ * @exit: pointer to deinitialisation function
+ * @power_on: pointer to device enable function
+ * @power_off: pointer to device disable function
+ */
+struct lps001wp_prs_platform_data {
+
+ int poll_interval;
+ int min_interval;
+
+ int (*init)(void);
+ void (*exit)(void);
+
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __LPS001WP_H__ */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 0d7d6a1b172..9943c5dd618 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -120,6 +120,7 @@ struct kimage {
/* kexec interface functions */
extern void machine_kexec(struct kimage *image);
+extern void machine_crash_swreset(void);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
extern asmlinkage long sys_kexec_load(unsigned long entry,
@@ -170,6 +171,7 @@ unsigned long paddr_vmcoreinfo_note(void);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
+extern struct atomic_notifier_head crash_percpu_notifier_list;
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
diff --git a/include/linux/l3g4200d.h b/include/linux/l3g4200d.h
new file mode 100644
index 00000000000..28459601e4f
--- /dev/null
+++ b/include/linux/l3g4200d.h
@@ -0,0 +1,27 @@
+/*
+ * ST L3G4200D 3-Axis Gyroscope header file
+ *
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Chethan Krishna N <chethan.krishna@stericsson.com> for ST-Ericsson
+ * Licence terms: GNU General Public Licence (GPL) version 2
+ */
+
+#ifndef __L3G4200D_H__
+#define __L3G4200D_H__
+
+#ifdef __KERNEL__
+struct l3g4200d_gyr_platform_data {
+ const char *name_gyr;
+
+ u8 axis_map_x;
+ u8 axis_map_y;
+ u8 axis_map_z;
+
+ u8 negative_x;
+ u8 negative_y;
+ u8 negative_z;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __L3G4200D_H__ */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index 8eb12357a11..2794e3b9e1d 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -58,6 +58,12 @@
#define LM3530_ALS_IMPD_700Ohm (0x0E)
#define LM3530_ALS_IMPD_667Ohm (0x0F)
+/*
+ * If lm3530 does not use a gpio for HWEN, set LM3530_NO_HWEN_GPIO
+ * for hw_en_gpio in lm3530_platform data member
+ */
+#define LM3530_NO_HWEN_GPIO -1
+
enum lm3530_mode {
LM3530_BL_MODE_MANUAL = 0, /* "man" */
LM3530_BL_MODE_ALS, /* "als" */
@@ -87,6 +93,7 @@ enum lm3530_als_mode {
* @als_vmin: als input voltage calibrated for max brightness in mV
* @als_vmax: als input voltage calibrated for min brightness in mV
* @brt_val: brightness value (0-255)
+ * @hw_en_gpio: GPIO line for LM3530 HWEN
*/
struct lm3530_platform_data {
enum lm3530_mode mode;
@@ -107,6 +114,8 @@ struct lm3530_platform_data {
u32 als_vmax;
u8 brt_val;
+
+ int hw_en_gpio;
};
#endif /* _LINUX_LED_LM3530_H__ */
diff --git a/include/linux/leds-ab5500.h b/include/linux/leds-ab5500.h
new file mode 100644
index 00000000000..9ba9ac61d90
--- /dev/null
+++ b/include/linux/leds-ab5500.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Simple driver for HVLED in ST-Ericsson AB5500 Analog baseband Controller
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ */
+
+#define AB5500_HVLED0 0
+#define AB5500_HVLED1 1
+#define AB5500_HVLED2 2
+#define AB5500_HVLEDS_MAX 3
+
+enum ab5500_fade_delay {
+ AB5500_FADE_DELAY_BYPASS = 0,
+ AB5500_FADE_DELAY_HALFSEC,
+ AB5500_FADE_DELAY_ONESEC,
+ AB5500_FADE_DELAY_TWOSEC
+};
+
+struct ab5500_led_conf {
+ char *name;
+ u8 led_id;
+ u8 max_current;
+ u8 fade_hi;
+ u8 fade_lo;
+ bool led_on;
+};
+
+struct ab5500_hvleds_platform_data {
+ bool hw_fade;
+ struct ab5500_led_conf leds[AB5500_HVLEDS_MAX];
+};
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
index 33a07116748..9c5eab6e086 100644
--- a/include/linux/leds_pwm.h
+++ b/include/linux/leds_pwm.h
@@ -11,6 +11,7 @@ struct led_pwm {
u8 active_low;
unsigned max_brightness;
unsigned pwm_period_ns;
+ unsigned int lth_brightness;
};
struct led_pwm_platform_data {
diff --git a/include/linux/lsm303dlh.h b/include/linux/lsm303dlh.h
new file mode 100644
index 00000000000..a565faa79ba
--- /dev/null
+++ b/include/linux/lsm303dlh.h
@@ -0,0 +1,63 @@
+/*
+ * lsm303dlh.h
+ * ST 3-Axis Accelerometer/Magnetometer header file
+ *
+ * Copyright (C) 2010 STMicroelectronics
+ * Author: Carmine Iascone (carmine.iascone@st.com)
+ * Author: Matteo Dameno (matteo.dameno@st.com)
+ *
+ * Copyright (C) 2010 STEricsson
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * Updated:Preetham Rao Kaskurthi <preetham.rao@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LSM303DLH_H__
+#define __LSM303DLH_H__
+
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+/**
+ * struct lsm303dlh_platform_data - platform datastructure for lsm303dlh
+ * @name_a: accelerometer name
+ * @name_m: magnetometer name
+ * @irq_a1: interrupt line 1 of accelerometer
+ * @irq_a2: interrupt line 2 of accelerometer
+ * @irq_m: interrupt line of magnetometer
+ * @axis_map_x: x axis position on the hardware, 0 1 or 2
+ * @axis_map_y: y axis position on the hardware, 0 1 or 2
+ * @axis_map_z: z axis position on the hardware, 0 1 or 2
+ * @negative_x: x axis is orientation, 0 or 1
+ * @negative_y: y axis is orientation, 0 or 1
+ * @negative_z: z axis is orientation, 0 or 1
+ * @chip_id: to store ID of the LSM chip
+ */
+struct lsm303dlh_platform_data {
+ const char *name_a;
+ const char *name_m;
+ u32 irq_a1;
+ u32 irq_a2;
+ u32 irq_m;
+ u8 axis_map_x;
+ u8 axis_map_y;
+ u8 axis_map_z;
+ u8 negative_x;
+ u8 negative_y;
+ u8 negative_z;
+ u32 chip_id;
+};
+#endif /* __KERNEL__ */
+
+#endif /* __LSM303DLH_H__ */
diff --git a/include/linux/mfd/ab8500/bm.h b/include/linux/mfd/ab8500/bm.h
new file mode 100644
index 00000000000..a4c4519ab11
--- /dev/null
+++ b/include/linux/mfd/ab8500/bm.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright ST-Ericsson 2009.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB8500_BM_H
+#define _AB8500_BM_H
+
+#include <linux/kernel.h>
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB8500_MAIN_WDOG_CTRL_REG 0x01
+#define AB8500_LOW_BAT_REG 0x03
+#define AB8500_BATT_OK_REG 0x04
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB8500_USB_LINE_STAT_REG 0x80
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_STATUS1_REG 0x00
+#define AB8500_CH_STATUS2_REG 0x01
+#define AB8500_CH_USBCH_STAT1_REG 0x02
+#define AB8500_CH_USBCH_STAT2_REG 0x03
+#define AB8500_CH_FSM_STAT_REG 0x04
+#define AB8500_CH_STAT_REG 0x05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_VOLT_LVL_REG 0x40
+#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
+#define AB8500_CH_OPT_CRNTLVL_REG 0x42
+#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
+#define AB8500_CH_WD_TIMER_REG 0x50
+#define AB8500_CHARG_WD_CTRL 0x51
+#define AB8500_BTEMP_HIGH_TH 0x52
+#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
+#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
+#define AB8500_BATT_OVV 0x55
+#define AB8500_CHARGER_CTRL 0x56
+#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
+
+/*
+ * Charger / main control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_MCH_CTRL1 0x80
+#define AB8500_MCH_CTRL2 0x81
+#define AB8500_MCH_IPT_CURLVL_REG 0x82
+#define AB8500_CH_WD_REG 0x83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_USBCH_CTRL1_REG 0xC0
+#define AB8500_USBCH_CTRL2_REG 0xC1
+#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
+
+/*
+ * Gas Gauge register offsets
+ * Bank : 0x0C
+ */
+#define AB8500_GASG_CC_CTRL_REG 0x00
+#define AB8500_GASG_CC_ACCU1_REG 0x01
+#define AB8500_GASG_CC_ACCU2_REG 0x02
+#define AB8500_GASG_CC_ACCU3_REG 0x03
+#define AB8500_GASG_CC_ACCU4_REG 0x04
+#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
+#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
+#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
+#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
+#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
+#define AB8500_GASG_CC_OFFSET_REG 0x0A
+#define AB8500_GASG_CC_NCOV_ACCU 0x10
+#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
+#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
+#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
+#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE21_REG 0x14
+
+/*
+ * RTC register offsets
+ * Bank: 0x0F
+ */
+#define AB8500_RTC_BACKUP_CHG_REG 0x0C
+#define AB8500_RTC_CC_CONF_REG 0x01
+#define AB8500_RTC_CTRL_REG 0x0B
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_OTP_CONF_15 0x0E
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION 1024
+#define ADC_CH_MAIN_MIN 0
+#define ADC_CH_MAIN_MAX 20030
+#define ADC_CH_VBUS_MIN 0
+#define ADC_CH_VBUS_MAX 20030
+#define ADC_CH_VBAT_MIN 2300
+#define ADC_CH_VBAT_MAX 4800
+#define ADC_CH_BKBAT_MIN 0
+#define ADC_CH_BKBAT_MAX 3200
+
+/* Main charge i/p current */
+#define MAIN_CH_IP_CUR_0P9A 0x80
+#define MAIN_CH_IP_CUR_1P0A 0x90
+#define MAIN_CH_IP_CUR_1P1A 0xA0
+#define MAIN_CH_IP_CUR_1P2A 0xB0
+#define MAIN_CH_IP_CUR_1P3A 0xC0
+#define MAIN_CH_IP_CUR_1P4A 0xD0
+#define MAIN_CH_IP_CUR_1P5A 0xE0
+
+/* ChVoltLevel */
+#define CH_VOL_LVL_3P5 0x00
+#define CH_VOL_LVL_4P0 0x14
+#define CH_VOL_LVL_4P05 0x16
+#define CH_VOL_LVL_4P1 0x1B
+#define CH_VOL_LVL_4P15 0x20
+#define CH_VOL_LVL_4P2 0x25
+#define CH_VOL_LVL_4P6 0x4D
+
+/* ChOutputCurrentLevel */
+#define CH_OP_CUR_LVL_0P1 0x00
+#define CH_OP_CUR_LVL_0P2 0x01
+#define CH_OP_CUR_LVL_0P3 0x02
+#define CH_OP_CUR_LVL_0P4 0x03
+#define CH_OP_CUR_LVL_0P5 0x04
+#define CH_OP_CUR_LVL_0P6 0x05
+#define CH_OP_CUR_LVL_0P7 0x06
+#define CH_OP_CUR_LVL_0P8 0x07
+#define CH_OP_CUR_LVL_0P9 0x08
+#define CH_OP_CUR_LVL_1P4 0x0D
+#define CH_OP_CUR_LVL_1P5 0x0E
+#define CH_OP_CUR_LVL_1P6 0x0F
+
+/* BTEMP High thermal limits */
+#define BTEMP_HIGH_TH_57_0 0x00
+#define BTEMP_HIGH_TH_52 0x01
+#define BTEMP_HIGH_TH_57_1 0x02
+#define BTEMP_HIGH_TH_62 0x03
+
+/* current is mA */
+#define USB_0P1A 100
+#define USB_0P2A 200
+#define USB_0P3A 300
+#define USB_0P4A 400
+#define USB_0P5A 500
+
+#define LOW_BAT_3P1V 0x20
+#define LOW_BAT_2P3V 0x00
+#define LOW_BAT_RESET 0x01
+#define LOW_BAT_ENABLE 0x01
+
+/* Backup battery constants */
+#define BUP_ICH_SEL_50UA 0x00
+#define BUP_ICH_SEL_150UA 0x04
+#define BUP_ICH_SEL_300UA 0x08
+#define BUP_ICH_SEL_700UA 0x0C
+
+#define BUP_VCH_SEL_2P5V 0x00
+#define BUP_VCH_SEL_2P6V 0x01
+#define BUP_VCH_SEL_2P8V 0x02
+#define BUP_VCH_SEL_3P1V 0x03
+
+/* Battery OVV constants */
+#define BATT_OVV_ENA 0x02
+#define BATT_OVV_TH_3P7 0x00
+#define BATT_OVV_TH_4P75 0x01
+
+/* VBUS OVV constants */
+#define VBUS_OVV_SELECT_MASK 0x78
+#define VBUS_OVV_SELECT_5P6V 0x00
+#define VBUS_OVV_SELECT_5P7V 0x08
+#define VBUS_OVV_SELECT_5P8V 0x10
+#define VBUS_OVV_SELECT_5P9V 0x18
+#define VBUS_OVV_SELECT_6P0V 0x20
+#define VBUS_OVV_SELECT_6P1V 0x28
+#define VBUS_OVV_SELECT_6P2V 0x30
+#define VBUS_OVV_SELECT_6P3V 0x38
+
+#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
+
+/* Fuel Gauge constants */
+#define RESET_ACCU 0x02
+#define READ_REQ 0x01
+#define CC_DEEP_SLEEP_ENA 0x02
+#define CC_PWR_UP_ENA 0x01
+#define CC_SAMPLES_40 0x28
+#define RD_NCONV_ACCU_REQ 0x01
+#define CC_CALIB 0x08
+#define CC_INTAVGOFFSET_ENA 0x10
+#define CC_MUXOFFSET 0x80
+#define CC_INT_CAL_N_AVG_MASK 0x60
+#define CC_INT_CAL_SAMPLES_16 0x40
+#define CC_INT_CAL_SAMPLES_8 0x20
+#define CC_INT_CAL_SAMPLES_4 0x00
+
+/* RTC constants */
+#define RTC_BUP_CH_ENA 0x10
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA 0x01
+#define BAT_CTRL_20U_ENA 0x02
+#define BAT_CTRL_CMP_ENA 0x04
+#define FORCE_BAT_CTRL_CMP_HIGH 0x08
+#define BAT_CTRL_PULL_UP_ENA 0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN 00
+
+/*
+ * ADC for the battery thermistor.
+ * When using the ADC_THERM_BATCTRL the battery ID resistor is combined with
+ * a NTC resistor to both identify the battery and to measure its temperature.
+ * Different phone manufactures uses different techniques to both identify the
+ * battery and to read its temperature.
+ */
+enum adc_therm {
+ ADC_THERM_BATCTRL,
+ ADC_THERM_BATTEMP,
+};
+
+/**
+ * struct res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct batres_vs_temp - defines one point in a temp vs battery internal
+ * resistance curve.
+ * @temp: battery pack temperature in Celcius
+ * @resist: battery internal reistance in mOhm
+ */
+struct batres_vs_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct v_to_cap - Table for translating voltage to capacity
+ * @voltage: Voltage in mV
+ * @capacity: Capacity in percent
+ */
+struct v_to_cap {
+ int voltage;
+ int capacity;
+};
+
+/* Forward declaration */
+struct ab8500_fg;
+
+/**
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
+ * Resolution in 50 mV step.
+ * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
+ * Resolution in 50 mV step.
+ * @user_cap_limit Capacity reported from user must be within this
+ * limit to be considered as sane, in percentage
+ * points.
+ * @maint_thres This is the threshold where we stop reporting
+ * battery full while in maintenance, in per cent
+ */
+struct ab8500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+ int battok_falling_th_sel0;
+ int battok_raising_th_sel1;
+ int user_cap_limit;
+ int maint_thres;
+};
+
+/**
+ * struct ab8500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct ab8500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct battery_type - different batteries supported
+ * @name: battery technology
+ * @resis_high: battery upper resistance limit
+ * @resis_low: battery lower resistance limit
+ * @charge_full_design: Maximum battery capacity in mAh
+ * @nominal_voltage: Nominal voltage of the battery in mV
+ * @termination_vol: max voltage upto which battery can be charged
+ * @termination_curr battery charging termination current in mA
+ * @recharge_vol battery voltage limit that will trigger a new
+ * full charging cycle in the case where maintenan-
+ * -ce charging has been disabled
+ * @normal_cur_lvl: charger current in normal state in mA
+ * @normal_vol_lvl: charger voltage in normal state in mV
+ * @maint_a_cur_lvl: charger current in maintenance A state in mA
+ * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
+ * @maint_a_chg_timer_h: charge time in maintenance A state
+ * @maint_b_cur_lvl: charger current in maintenance B state in mA
+ * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
+ * @maint_b_chg_timer_h: charge time in maintenance B state
+ * @low_high_cur_lvl: charger current in temp low/high state in mA
+ * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
+ * @battery_resistance: battery inner resistance in mOhm.
+ * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
+ * @r_to_t_tbl: table containing resistance to temp points
+ * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
+ * @v_to_cap_tbl: Voltage to capacity (in %) table
+ * @n_batres_tbl_elements number of elements in the batres_tbl
+ * @batres_tbl battery internal resistance vs temperature table
+ */
+struct battery_type {
+ int name;
+ int resis_high;
+ int resis_low;
+ int charge_full_design;
+ int nominal_voltage;
+ int termination_vol;
+ int termination_curr;
+ int recharge_vol;
+ int normal_cur_lvl;
+ int normal_vol_lvl;
+ int maint_a_cur_lvl;
+ int maint_a_vol_lvl;
+ int maint_a_chg_timer_h;
+ int maint_b_cur_lvl;
+ int maint_b_vol_lvl;
+ int maint_b_chg_timer_h;
+ int low_high_cur_lvl;
+ int low_high_vol_lvl;
+ int battery_resistance;
+ int n_temp_tbl_elements;
+ struct res_to_temp *r_to_t_tbl;
+ int n_v_cap_tbl_elements;
+ struct v_to_cap *v_to_cap_tbl;
+ int n_batres_tbl_elements;
+ struct batres_vs_temp *batres_tbl;
+};
+
+/**
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct ab8500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct ab8500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct ab8500_bm_data - ab8500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @temp_interval_chg temperature measurement interval in s when charging
+ * @temp_interval_nochg temperature measurement interval in s when not charging
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @fg_res resistance of FG resistor in 0.1mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
+ * @maxi: maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct ab8500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_interval_chg;
+ int temp_interval_nochg;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool no_maintenance;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ enum adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ int gnd_lift_resistance;
+ const struct ab8500_maxim_parameters *maxi;
+ const struct ab8500_bm_capacity_levels *cap_levels;
+ const struct battery_type *bat_type;
+ const struct ab8500_bm_charger_parameters *chg_params;
+ const struct ab8500_fg_parameters *fg_params;
+};
+
+struct ab8500_charger_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+ bool autopower_cfg;
+};
+
+struct ab8500_btemp_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_fg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct ab8500_chargalg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+struct ab8500_btemp;
+struct ab8500_gpadc;
+struct ab8500_fg;
+#ifdef CONFIG_AB8500_BM
+void ab8500_fg_reinit(void);
+void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
+struct ab8500_btemp *ab8500_btemp_get(void);
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
+struct ab8500_fg *ab8500_fg_get(void);
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
+
+#else
+static void ab8500_fg_reinit(void)
+{
+}
+static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
+{
+}
+static struct ab8500_btemp *ab8500_btemp_get(void)
+{
+ return NULL;
+}
+static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
+{
+ return 0;
+}
+struct ab8500_fg *ab8500_fg_get(void)
+{
+ return NULL;
+}
+static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
+{
+ return -ENODEV;
+}
+
+static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+{
+ return -ENODEV;
+}
+
+static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/ab8500/denc-regs.h b/include/linux/mfd/ab8500/denc-regs.h
new file mode 100644
index 00000000000..a6683ca7470
--- /dev/null
+++ b/include/linux/mfd/ab8500/denc-regs.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson AB8500 DENC related registers
+ *
+ * Author: Marcus Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __AB8500_DENC_H
+#define __AB8500_DENC_H
+
+#define AB8500_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define AB8500_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define AB8500_CTRL3 0x00000200
+#define AB8500_CTRL3_TH_SD_ENA_SHIFT 3
+#define AB8500_CTRL3_TH_SD_ENA_MASK 0x00000008
+#define AB8500_CTRL3_TH_SD_ENA(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, TH_SD_ENA, __x)
+#define AB8500_CTRL3_RESET_DENC_N_SHIFT 2
+#define AB8500_CTRL3_RESET_DENC_N_MASK 0x00000004
+#define AB8500_CTRL3_RESET_DENC_N(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, RESET_DENC_N, __x)
+#define AB8500_CTRL3_RESET_AUD_N_SHIFT 1
+#define AB8500_CTRL3_RESET_AUD_N_MASK 0x00000002
+#define AB8500_CTRL3_RESET_AUD_N(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, RESET_AUD_N, __x)
+#define AB8500_CTRL3_CLK_32K_OUT2_IS_SHIFT 0
+#define AB8500_CTRL3_CLK_32K_OUT2_IS_MASK 0x00000001
+#define AB8500_CTRL3_CLK_32K_OUT2_IS(__x) \
+ AB8500_VAL2REG(AB8500_CTRL3, CLK_32K_OUT2_IS, __x)
+#define AB8500_SYS_ULP_CLK_CONF 0x0000020A
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_PD_ENA_SHIFT 7
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_PD_ENA_MASK 0x00000080
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_PD_ENA(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_PD_ENA, __x)
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_BUF_ENA_SHIFT 6
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_BUF_ENA_MASK 0x00000040
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_BUF_ENA(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_BUF_ENA, __x)
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_STRE_SHIFT 5
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_STRE_MASK 0x00000020
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_STRE(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, ULP_CLK_STRE, __x)
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_INV_SHIFT 4
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_INV_MASK 0x00000010
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_INV(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_INV, __x)
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_DE_IN_SHIFT 3
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_DE_IN_MASK 0x00000008
+#define AB8500_SYS_ULP_CLK_CONF_TVOUT_CLK_DE_IN(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_DE_IN, __x)
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_STRE_SHIFT 2
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_STRE_MASK 0x00000004
+#define AB8500_SYS_ULP_CLK_CONF_CLK_27MHZ_STRE(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_STRE, __x)
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_SHIFT 0
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_MASK 0x00000003
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_NO_FUNC 0
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_AS_OUTPUT 1
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_AS_INPUT 2
+#define AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF(__x) \
+ AB8500_VAL2REG(AB8500_SYS_ULP_CLK_CONF, ULP_CLK_CONF, \
+ AB8500_SYS_ULP_CLK_CONF_ULP_CLK_CONF_##__x)
+#define AB8500_SYS_CLK_CTRL 0x0000020C
+#define AB8500_SYS_CLK_CTRL_USB_CLK_VALID_SHIFT 2
+#define AB8500_SYS_CLK_CTRL_USB_CLK_VALID_MASK 0x00000004
+#define AB8500_SYS_CLK_CTRL_USB_CLK_VALID(__x) \
+ AB8500_VAL2REG(AB8500_SYS_CLK_CTRL, USB_CLK_VALID, __x)
+#define AB8500_SYS_CLK_CTRL_TVOUT_CLK_VALID_SHIFT 1
+#define AB8500_SYS_CLK_CTRL_TVOUT_CLK_VALID_MASK 0x00000002
+#define AB8500_SYS_CLK_CTRL_TVOUT_CLK_VALID(__x) \
+ AB8500_VAL2REG(AB8500_SYS_CLK_CTRL, TVOUT_CLK_VALID, __x)
+#define AB8500_SYS_CLK_CTRL_TVOUT_PLL_ENA_SHIFT 0
+#define AB8500_SYS_CLK_CTRL_TVOUT_PLL_ENA_MASK 0x00000001
+#define AB8500_SYS_CLK_CTRL_TVOUT_PLL_ENA(__x) \
+ AB8500_VAL2REG(AB8500_SYS_CLK_CTRL, TVOUT_PLL_ENA, __x)
+#define AB8500_REGU_MISC1 0x00000380
+#define AB8500_REGU_MISC1_V_TVOUT_LP_SHIFT 7
+#define AB8500_REGU_MISC1_V_TVOUT_LP_MASK 0x00000080
+#define AB8500_REGU_MISC1_V_TVOUT_LP(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_TVOUT_LP, __x)
+#define AB8500_REGU_MISC1_V_INT_CORE_12_LP_SHIFT 6
+#define AB8500_REGU_MISC1_V_INT_CORE_12_LP_MASK 0x00000040
+#define AB8500_REGU_MISC1_V_INT_CORE_12_LP(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_INT_CORE_12_LP, __x)
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_SHIFT 3
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_MASK 0x00000038
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_2V 0
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_225V 1
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_25V 2
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_275V 3
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_3V 4
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_325V 5
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL_1_35V 6
+#define AB8500_REGU_MISC1_V_INT_CORE_12_SEL(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_INT_CORE_12_SEL, \
+ AB8500_REGU_MISC1_V_INT_CORE_12_SEL_##__x)
+#define AB8500_REGU_MISC1_V_INT_CORE_12_ENA_SHIFT 2
+#define AB8500_REGU_MISC1_V_INT_CORE_12_ENA_MASK 0x00000004
+#define AB8500_REGU_MISC1_V_INT_CORE_12_ENA(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_INT_CORE_12_ENA, __x)
+#define AB8500_REGU_MISC1_V_TVOUT_ENA_SHIFT 1
+#define AB8500_REGU_MISC1_V_TVOUT_ENA_MASK 0x00000002
+#define AB8500_REGU_MISC1_V_TVOUT_ENA(__x) \
+ AB8500_VAL2REG(AB8500_REGU_MISC1, V_TVOUT_ENA, __x)
+#define AB8500_VAUX12_REGU 0x00000409
+#define AB8500_VAUX12_REGU_VAUX_1_SHIFT 2
+#define AB8500_VAUX12_REGU_VAUX_1_MASK 0x0000000C
+#define AB8500_VAUX12_REGU_VAUX_1_DISABLE 0
+#define AB8500_VAUX12_REGU_VAUX_1_FORCE_HP 1
+#define AB8500_VAUX12_REGU_VAUX_1_BY_CTRL_REG 2
+#define AB8500_VAUX12_REGU_VAUX_1_FORCE_LP 3
+#define AB8500_VAUX12_REGU_VAUX_1(__x) \
+ AB8500_VAL2REG(AB8500_VAUX12_REGU, VAUX_1, \
+ AB8500_VAUX12_REGU_VAUX_1_##__x)
+#define AB8500_VAUX12_REGU_VAUX_2_SHIFT 0
+#define AB8500_VAUX12_REGU_VAUX_2_MASK 0x00000003
+#define AB8500_VAUX12_REGU_VAUX_2_DISABLE 0
+#define AB8500_VAUX12_REGU_VAUX_2_FORCE_HP 1
+#define AB8500_VAUX12_REGU_VAUX_2_BY_CTRL_REG 2
+#define AB8500_VAUX12_REGU_VAUX_2_FORCE_LP 3
+#define AB8500_VAUX12_REGU_VAUX_2(__x) \
+ AB8500_VAL2REG(AB8500_VAUX12_REGU, VAUX_2, \
+ AB8500_VAUX12_REGU_VAUX_2_##__x)
+#define AB8500_VAUX1_SEL 0x0000041F
+#define AB8500_VAUX1_SEL_VAL_SHIFT 0
+#define AB8500_VAUX1_SEL_VAL_MASK 0x0000000F
+#define AB8500_VAUX1_SEL_VAL_1_1V 0
+#define AB8500_VAUX1_SEL_VAL_1_2V 1
+#define AB8500_VAUX1_SEL_VAL_1_3V 2
+#define AB8500_VAUX1_SEL_VAL_1_4V 3
+#define AB8500_VAUX1_SEL_VAL_1_5V 4
+#define AB8500_VAUX1_SEL_VAL_1_8V 5
+#define AB8500_VAUX1_SEL_VAL_1_85V 6
+#define AB8500_VAUX1_SEL_VAL_1_9V 7
+#define AB8500_VAUX1_SEL_VAL_2_5V 8
+#define AB8500_VAUX1_SEL_VAL_2_65V 9
+#define AB8500_VAUX1_SEL_VAL_2_7V 10
+#define AB8500_VAUX1_SEL_VAL_2_75V 11
+#define AB8500_VAUX1_SEL_VAL_2_8V 12
+#define AB8500_VAUX1_SEL_VAL_2_9V 13
+#define AB8500_VAUX1_SEL_VAL_3_0V 14
+#define AB8500_VAUX1_SEL_VAL_3_3V 15
+#define AB8500_VAUX1_SEL_VAL(__x) \
+ AB8500_VAL2REG(AB8500_VAUX1_SEL, VAL, AB8500_VAUX1_SEL_VAL_##__x)
+#define AB8500_DENC_CONF0 0x00000600
+#define AB8500_DENC_CONF0_STD_SHIFT 6
+#define AB8500_DENC_CONF0_STD_MASK 0x000000C0
+#define AB8500_DENC_CONF0_STD_PAL_BDGHI 0
+#define AB8500_DENC_CONF0_STD_PAL_N 1
+#define AB8500_DENC_CONF0_STD_NTSC_M 2
+#define AB8500_DENC_CONF0_STD_PAL_M 3
+#define AB8500_DENC_CONF0_STD(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF0, STD, AB8500_DENC_CONF0_STD_##__x)
+#define AB8500_DENC_CONF0_SYNC_SHIFT 3
+#define AB8500_DENC_CONF0_SYNC_MASK 0x00000038
+#define AB8500_DENC_CONF0_SYNC_F_BASED_SLAVE 1
+#define AB8500_DENC_CONF0_SYNC_AUTO_TEST 7
+#define AB8500_DENC_CONF0_SYNC(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF0, SYNC, AB8500_DENC_CONF0_SYNC_##__x)
+#define AB8500_DENC_CONF1 0x00000601
+#define AB8500_DENC_CONF1_BLK_LI_SHIFT 7
+#define AB8500_DENC_CONF1_BLK_LI_MASK 0x00000080
+#define AB8500_DENC_CONF1_BLK_LI_PARTIAL 0
+#define AB8500_DENC_CONF1_BLK_LI_FULL 1
+#define AB8500_DENC_CONF1_BLK_LI(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, BLK_LI, \
+ AB8500_DENC_CONF1_BLK_LI_##__x)
+#define AB8500_DENC_CONF1_FLT_SHIFT 5
+#define AB8500_DENC_CONF1_FLT_MASK 0x00000060
+#define AB8500_DENC_CONF1_FLT_1_1MHZ 0
+#define AB8500_DENC_CONF1_FLT_1_3MHZ 1
+#define AB8500_DENC_CONF1_FLT_1_6MHZ 2
+#define AB8500_DENC_CONF1_FLT_1_9MHZ 3
+#define AB8500_DENC_CONF1_FLT(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, FLT, AB8500_DENC_CONF1_FLT_##__x)
+#define AB8500_DENC_CONF1_CO_KI_SHIFT 3
+#define AB8500_DENC_CONF1_CO_KI_MASK 0x00000008
+#define AB8500_DENC_CONF1_CO_KI(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CO_KI, __x)
+#define AB8500_DENC_CONF1_SETUP_MAIN_SHIFT 2
+#define AB8500_DENC_CONF1_SETUP_MAIN_MASK 0x00000004
+#define AB8500_DENC_CONF1_SETUP_MAIN_BLACK_EQ_BLANK 0
+#define AB8500_DENC_CONF1_SETUP_MAIN_BLACK_GT_BLANK 1
+#define AB8500_DENC_CONF1_SETUP_MAIN(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, SETUP_MAIN, \
+ AB8500_DENC_CONF1_SETUP_MAIN_##__x)
+#define AB8500_DENC_CONF1_CC_SHIFT 0
+#define AB8500_DENC_CONF1_CC_MASK 0x00000003
+#define AB8500_DENC_CONF1_CC_NONE 0
+#define AB8500_DENC_CONF1_CC_FIELD_1 1
+#define AB8500_DENC_CONF1_CC_FIELD_2 2
+#define AB8500_DENC_CONF1_CC_ALL 3
+#define AB8500_DENC_CONF1_CC(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CC, AB8500_DENC_CONF1_CC_##__x)
+#define AB8500_DENC_CONF2 0x00000602
+#define AB8500_DENC_CONF2_N_INTRL_SHIFT 7
+#define AB8500_DENC_CONF2_N_INTRL_MASK 0x00000080
+#define AB8500_DENC_CONF2_N_INTRL(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, N_INTRL, __x)
+#define AB8500_DENC_CONF2_EN_RST_SHIFT 6
+#define AB8500_DENC_CONF2_EN_RST_MASK 0x00000040
+#define AB8500_DENC_CONF2_EN_RST(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, EN_RST, __x)
+#define AB8500_DENC_CONF2_BURST_EN_SHIFT 5
+#define AB8500_DENC_CONF2_BURST_EN_MASK 0x00000020
+#define AB8500_DENC_CONF2_BURST_EN(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, BURST_EN, __x)
+#define AB8500_DENC_CONF2_SEL_RST_SHIFT 4
+#define AB8500_DENC_CONF2_SEL_RST_MASK 0x00000010
+#define AB8500_DENC_CONF2_SEL_RST_USE_HW_VAL 0
+#define AB8500_DENC_CONF2_SEL_RST_USE_PROG_VAL 1
+#define AB8500_DENC_CONF2_SEL_RST(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, SEL_RST, \
+ AB8500_DENC_CONF2_SEL_RST_##__x)
+#define AB8500_DENC_CONF2_RST_OSC_BUF_SHIFT 2
+#define AB8500_DENC_CONF2_RST_OSC_BUF_MASK 0x00000004
+#define AB8500_DENC_CONF2_RST_OSC_BUF(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, RST_OSC_BUF, __x)
+#define AB8500_DENC_CONF2_VAL_RST_SHIFT 0
+#define AB8500_DENC_CONF2_VAL_RST_MASK 0x00000003
+#define AB8500_DENC_CONF2_VAL_RST_ALL_LINES 0
+#define AB8500_DENC_CONF2_VAL_RST_EVERY_2ND_FIELD 1
+#define AB8500_DENC_CONF2_VAL_RST_EVERY_4TH_FIELD 2
+#define AB8500_DENC_CONF2_VAL_RST_EVERY_8TH_FIELD 3
+#define AB8500_DENC_CONF2_VAL_RST(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF2, VAL_RST, \
+ AB8500_DENC_CONF2_VAL_RST_##__x)
+#define AB8500_DENC_CONF6 0x00000606
+#define AB8500_DENC_CONF6_SOFT_RESET_SHIFT 7
+#define AB8500_DENC_CONF6_SOFT_RESET_MASK 0x00000080
+#define AB8500_DENC_CONF6_SOFT_RESET(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, SOFT_RESET, __x)
+#define AB8500_DENC_CONF6_JUMP_SHIFT 6
+#define AB8500_DENC_CONF6_JUMP_MASK 0x00000040
+#define AB8500_DENC_CONF6_JUMP(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, JUMP, __x)
+#define AB8500_DENC_CONF6_DEC_NINC_SHIFT 5
+#define AB8500_DENC_CONF6_DEC_NINC_MASK 0x00000020
+#define AB8500_DENC_CONF6_DEC_NINC(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, DEC_NINC, __x)
+#define AB8500_DENC_CONF6_FREE_JUMP_SHIFT 4
+#define AB8500_DENC_CONF6_FREE_JUMP_MASK 0x00000010
+#define AB8500_DENC_CONF6_FREE_JUMP(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, FREE_JUMP, __x)
+#define AB8500_DENC_CONF6_MAX_DYN_SHIFT 0
+#define AB8500_DENC_CONF6_MAX_DYN_MASK 0x00000001
+#define AB8500_DENC_CONF6_MAX_DYN(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF6, MAX_DYN, __x)
+#define AB8500_DENC_CONF8 0x00000608
+#define AB8500_DENC_CONF8_PH_RST_MODE_SHIFT 6
+#define AB8500_DENC_CONF8_PH_RST_MODE_MASK 0x000000C0
+#define AB8500_DENC_CONF8_PH_RST_MODE_DISABLED 0
+#define AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_PHASE_BUF 1
+#define AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_INC_DFS 2
+#define AB8500_DENC_CONF8_PH_RST_MODE_RESET 3
+#define AB8500_DENC_CONF8_PH_RST_MODE(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF8, PH_RST_MODE, \
+ AB8500_DENC_CONF8_PH_RST_MODE_##__x)
+#define AB8500_DENC_CONF8_VAL_422_MUX_SHIFT 4
+#define AB8500_DENC_CONF8_VAL_422_MUX_MASK 0x00000010
+#define AB8500_DENC_CONF8_VAL_422_MUX_TEST 0
+#define AB8500_DENC_CONF8_VAL_422_MUX_ACTIVE 1
+#define AB8500_DENC_CONF8_VAL_422_MUX(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF8, VAL_422_MUX, \
+ AB8500_DENC_CONF8_VAL_422_MUX_##__x)
+#define AB8500_DENC_CONF8_BLK_ALL_SHIFT 3
+#define AB8500_DENC_CONF8_BLK_ALL_MASK 0x00000008
+#define AB8500_DENC_CONF8_BLK_ALL(__x) \
+ AB8500_VAL2REG(AB8500_DENC_CONF8, BLK_ALL, __x)
+#define AB8500_TVOUT_CTRL 0x00000680
+#define AB8500_TVOUT_CTRL_TV_LOAD_RC_SHIFT 6
+#define AB8500_TVOUT_CTRL_TV_LOAD_RC_MASK 0x00000040
+#define AB8500_TVOUT_CTRL_TV_LOAD_RC(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, TV_LOAD_RC, __x)
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_SHIFT 3
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_MASK 0x00000038
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_0_5S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_1S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_1_5S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_2S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_2_5S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME_3S 0
+#define AB8500_TVOUT_CTRL_PLUG_TV_TIME(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, PLUG_TV_TIME, \
+ AB8500_TVOUT_CTRL_PLUG_TV_TIME_##__x)
+#define AB8500_TVOUT_CTRL_TV_PLUG_ON_SHIFT 2
+#define AB8500_TVOUT_CTRL_TV_PLUG_ON_MASK 0x00000004
+#define AB8500_TVOUT_CTRL_TV_PLUG_ON(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, TV_PLUG_ON, __x)
+#define AB8500_TVOUT_CTRL_DAC_CTRL0_SHIFT 1
+#define AB8500_TVOUT_CTRL_DAC_CTRL0_MASK 0x00000002
+#define AB8500_TVOUT_CTRL_DAC_CTRL0(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, DAC_CTRL0, __x)
+#define AB8500_TVOUT_CTRL_DAC_CTRL1_SHIFT 0
+#define AB8500_TVOUT_CTRL_DAC_CTRL1_MASK 0x00000001
+#define AB8500_TVOUT_CTRL_DAC_CTRL1(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL, DAC_CTRL1, __x)
+#define AB8500_TVOUT_CTRL2 0x00000681
+#define AB8500_TVOUT_CTRL2_SWAP_DDR_DATA_IN_SHIFT 1
+#define AB8500_TVOUT_CTRL2_SWAP_DDR_DATA_IN_MASK 0x00000002
+#define AB8500_TVOUT_CTRL2_SWAP_DDR_DATA_IN(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, SWAP_DDR_DATA_IN, __x)
+#define AB8500_TVOUT_CTRL2_DENC_DDR_SHIFT 0
+#define AB8500_TVOUT_CTRL2_DENC_DDR_MASK 0x00000001
+#define AB8500_TVOUT_CTRL2_DENC_DDR(__x) \
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, DENC_DDR, __x)
+#define AB8500_IT_MASK1 0x00000E40
+#define AB8500_IT_MASK1_PON_KEY1_DBR_SHIFT 7
+#define AB8500_IT_MASK1_PON_KEY1_DBR_MASK 0x00000080
+#define AB8500_IT_MASK1_PON_KEY1_DBR(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY1_DBR, __x)
+#define AB8500_IT_MASK1_PON_KEY1_DBF_SHIFT 6
+#define AB8500_IT_MASK1_PON_KEY1_DBF_MASK 0x00000040
+#define AB8500_IT_MASK1_PON_KEY1_DBF(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY1_DBF, __x)
+#define AB8500_IT_MASK1_PON_KEY2_DBR_SHIFT 5
+#define AB8500_IT_MASK1_PON_KEY2_DBR_MASK 0x00000020
+#define AB8500_IT_MASK1_PON_KEY2_DBR(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY2_DBR, __x)
+#define AB8500_IT_MASK1_PON_KEY2_DBF_SHIFT 4
+#define AB8500_IT_MASK1_PON_KEY2_DBF_MASK 0x00000010
+#define AB8500_IT_MASK1_PON_KEY2_DBF(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PON_KEY2_DBF, __x)
+#define AB8500_IT_MASK1_TEMP_WARN_SHIFT 3
+#define AB8500_IT_MASK1_TEMP_WARN_MASK 0x00000008
+#define AB8500_IT_MASK1_TEMP_WARN(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, TEMP_WARN, __x)
+#define AB8500_IT_MASK1_PLUG_TV_DET_SHIFT 2
+#define AB8500_IT_MASK1_PLUG_TV_DET_MASK 0x00000004
+#define AB8500_IT_MASK1_PLUG_TV_DET(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, PLUG_TV_DET, __x)
+#define AB8500_IT_MASK1_UNPLUG_TV_DET_SHIFT 1
+#define AB8500_IT_MASK1_UNPLUG_TV_DET_MASK 0x00000002
+#define AB8500_IT_MASK1_UNPLUG_TV_DET(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, UNPLUG_TV_DET, __x)
+#define AB8500_IT_MASK1_MAIN_EXT_CH_NOK_SHIFT 0
+#define AB8500_IT_MASK1_MAIN_EXT_CH_NOK_MASK 0x00000001
+#define AB8500_IT_MASK1_MAIN_EXT_CH_NOK(__x) \
+ AB8500_VAL2REG(AB8500_IT_MASK1, MAIN_EXT_CH_NOK, __x)
+#define AB8500_REV 0x00001080
+#define AB8500_REV_FULL_MASK_SHIFT 4
+#define AB8500_REV_FULL_MASK_MASK 0x000000F0
+#define AB8500_REV_FULL_MASK(__x) \
+ AB8500_VAL2REG(AB8500_REV, FULL_MASK, __x)
+#define AB8500_REV_METAL_FIX_SHIFT 0
+#define AB8500_REV_METAL_FIX_MASK 0x0000000F
+#define AB8500_REV_METAL_FIX(__x) \
+ AB8500_VAL2REG(AB8500_REV, METAL_FIX, __x)
+
+#endif /* __AB8500_DENC_H */
diff --git a/include/linux/mfd/ab8500/denc.h b/include/linux/mfd/ab8500/denc.h
new file mode 100644
index 00000000000..25a09a2c2bd
--- /dev/null
+++ b/include/linux/mfd/ab8500/denc.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 tvout driver interface
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __AB8500_DENC__H__
+#define __AB8500_DENC__H__
+
+#include <linux/platform_device.h>
+
+struct ab8500_denc_platform_data {
+ /* Platform info */
+ bool ddr_enable;
+ bool ddr_little_endian;
+};
+
+enum ab8500_denc_TV_std {
+ TV_STD_PAL_BDGHI,
+ TV_STD_PAL_N,
+ TV_STD_PAL_M,
+ TV_STD_NTSC_M,
+};
+
+enum ab8500_denc_cr_filter_bandwidth {
+ TV_CR_NTSC_LOW_DEF_FILTER,
+ TV_CR_PAL_LOW_DEF_FILTER,
+ TV_CR_NTSC_HIGH_DEF_FILTER,
+ TV_CR_PAL_HIGH_DEF_FILTER,
+};
+
+enum ab8500_denc_phase_reset_mode {
+ TV_PHASE_RST_MOD_DISABLE,
+ TV_PHASE_RST_MOD_FROM_PHASE_BUF,
+ TV_PHASE_RST_MOD_FROM_INC_DFS,
+ TV_PHASE_RST_MOD_RST,
+};
+
+enum ab8500_denc_plug_time {
+ TV_PLUG_TIME_0_5S,
+ TV_PLUG_TIME_1S,
+ TV_PLUG_TIME_1_5S,
+ TV_PLUG_TIME_2S,
+ TV_PLUG_TIME_2_5S,
+ TV_PLUG_TIME_3S,
+};
+
+struct ab8500_denc_conf {
+ /* register settings for DENC_configuration */
+ bool act_output;
+ enum ab8500_denc_TV_std TV_std;
+ bool progressive;
+ bool test_pattern;
+ bool partial_blanking;
+ bool blank_all;
+ bool black_level_setup;
+ enum ab8500_denc_cr_filter_bandwidth cr_filter;
+ bool suppress_col;
+ enum ab8500_denc_phase_reset_mode phase_reset_mode;
+ bool dac_enable;
+ bool act_dc_output;
+};
+
+struct platform_device *ab8500_denc_get_device(void);
+void ab8500_denc_put_device(struct platform_device *pdev);
+
+void ab8500_denc_reset(struct platform_device *pdev, bool hard);
+void ab8500_denc_power_up(struct platform_device *pdev);
+void ab8500_denc_power_down(struct platform_device *pdev);
+
+void ab8500_denc_conf(struct platform_device *pdev,
+ struct ab8500_denc_conf *conf);
+void ab8500_denc_conf_plug_detect(struct platform_device *pdev,
+ bool enable, bool load_RC,
+ enum ab8500_denc_plug_time time);
+void ab8500_denc_mask_int_plug_det(struct platform_device *pdev, bool plug,
+ bool unplug);
+#endif /* __AB8500_DENC__H__ */
diff --git a/include/linux/mfd/ab8500/ux500_chargalg.h b/include/linux/mfd/ab8500/ux500_chargalg.h
new file mode 100644
index 00000000000..f04e47ff56a
--- /dev/null
+++ b/include/linux/mfd/ab8500/ux500_chargalg.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+#define psy_to_ux500_charger(x) container_of((x), \
+ struct ux500_charger, psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy power supply base class
+ * @ops ux500 charger operations
+ * @max_out_volt maximum output charger voltage in mV
+ * @max_out_curr maximum output charger current in mA
+ */
+struct ux500_charger {
+ struct power_supply psy;
+ struct ux500_charger_ops ops;
+ int max_out_volt;
+ int max_out_curr;
+};
+
+#endif
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 9970337ff04..36e61a96cc8 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -1,6 +1,6 @@
/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License v2
* AB3100 core access functions
* Author: Linus Walleij <linus.walleij@stericsson.com>
*
@@ -178,6 +178,12 @@ int abx500_get_chip_id(struct device *dev);
int abx500_event_registers_startup_state_get(struct device *dev, u8 *event);
int abx500_startup_irq_enabled(struct device *dev, unsigned int irq);
+#define abx500_get abx500_get_register_interruptible
+#define abx500_set abx500_set_register_interruptible
+#define abx500_get_page abx500_get_register_page_interruptible
+#define abx500_set_page abx500_set_register_page_interruptible
+#define abx500_mask_and_set abx500_mask_and_set_register_interruptible
+
struct abx500_ops {
int (*get_chip_id) (struct device *);
int (*get_register) (struct device *, u8, u8, u8 *);
@@ -189,6 +195,252 @@ struct abx500_ops {
int (*startup_irq_enabled) (struct device *, unsigned int);
};
-int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
+/* Battery driver related data */
+/*
+ * ADC for the battery thermistor.
+ * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * with a NTC resistor to both identify the battery and to measure its
+ * temperature. Different phone manufactures uses different techniques to both
+ * identify the battery and to read its temperature.
+ */
+enum abx500_adc_therm {
+ ABx500_ADC_THERM_BATCTRL,
+ ABx500_ADC_THERM_BATTEMP,
+};
+
+/**
+ * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct abx500_res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * @voltage: Voltage in mV
+ * @capacity: Capacity in percent
+ */
+struct abx500_v_to_cap {
+ int voltage;
+ int capacity;
+};
+
+/* Forward declaration */
+struct abx500_fg;
+
+/**
+ * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ * @overbat_threshold: Over battery threshold, in mV
+ */
+struct abx500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+ int overbat_threshold;
+};
+
+/**
+ * struct abx500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct abx500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct abx500_battery_type - different batteries supported
+ * @name: battery technology
+ * @resis_high: battery upper resistance limit
+ * @resis_low: battery lower resistance limit
+ * @charge_full_design: Maximum battery capacity in mAh
+ * @nominal_voltage: Nominal voltage of the battery in mV
+ * @termination_vol: max voltage upto which battery can be charged
+ * @termination_curr battery charging termination current in mA
+ * @recharge_vol battery voltage limit that will trigger a new
+ * full charging cycle in the case where maintenan-
+ * -ce charging has been disabled
+ * @normal_cur_lvl: charger current in normal state in mA
+ * @normal_vol_lvl: charger voltage in normal state in mV
+ * @maint_a_cur_lvl: charger current in maintenance A state in mA
+ * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
+ * @maint_a_chg_timer_h: charge time in maintenance A state
+ * @maint_b_cur_lvl: charger current in maintenance B state in mA
+ * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
+ * @maint_b_chg_timer_h: charge time in maintenance B state
+ * @low_high_cur_lvl: charger current in temp low/high state in mA
+ * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
+ * @battery_resistance: battery inner resistance in mOhm.
+ * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
+ * @r_to_t_tbl: table containing resistance to temp points
+ * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
+ * @v_to_cap_tbl: Voltage to capacity (in %) table
+ */
+struct abx500_battery_type {
+ int name;
+ int resis_high;
+ int resis_low;
+ int charge_full_design;
+ int nominal_voltage;
+ int termination_vol;
+ int termination_curr;
+ int recharge_vol;
+ int normal_cur_lvl;
+ int normal_vol_lvl;
+ int maint_a_cur_lvl;
+ int maint_a_vol_lvl;
+ int maint_a_chg_timer_h;
+ int maint_b_cur_lvl;
+ int maint_b_vol_lvl;
+ int maint_b_chg_timer_h;
+ int low_high_cur_lvl;
+ int low_high_vol_lvl;
+ int battery_resistance;
+ int n_temp_tbl_elements;
+ struct abx500_res_to_temp *r_to_t_tbl;
+ int n_v_cap_tbl_elements;
+ struct abx500_v_to_cap *v_to_cap_tbl;
+};
+
+/**
+ * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct abx500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct abx500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct abx500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct abx500_bm_data - abx500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @fg_res resistance of FG resistor in 0.1mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @maxi: maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct abx500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_now;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool no_maintenance;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ enum abx500_adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ const struct abx500_maxim_parameters *maxi;
+ const struct abx500_bm_capacity_levels *cap_levels;
+ const struct abx500_battery_type *bat_type;
+ const struct abx500_bm_charger_parameters *chg_params;
+ const struct abx500_fg_parameters *fg_params;
+};
+
+struct abx500_chargalg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_charger_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_btemp_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_fg_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+struct abx500_bm_plat_data {
+ struct abx500_bm_data *battery;
+ struct abx500_charger_platform_data *charger;
+ struct abx500_btemp_platform_data *btemp;
+ struct abx500_fg_platform_data *fg;
+ struct abx500_chargalg_platform_data *chargalg;
+};
+
+int abx500_register_ops(struct device *dev, struct abx500_ops *ops);
void abx500_remove_ops(struct device *dev);
#endif
diff --git a/include/linux/mfd/abx500/ab5500-bm.h b/include/linux/mfd/abx500/ab5500-bm.h
new file mode 100644
index 00000000000..05ebc8c3840
--- /dev/null
+++ b/include/linux/mfd/abx500/ab5500-bm.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright ST-Ericsson 2011.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB5500_BM_H
+#define _AB5500_BM_H
+
+#define AB5500_MCB 0x2F
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB5500_USB_LINE_STATUS 0x80
+#define AB5500_USB_PHY_STATUS 0x89
+#define AB5500_CHGFSM_CHARGER_DETECT 0xBF
+#define AB5500_CHGFSM_USB_BTEMP_CURR_LIM 0xAD
+#define AB5500_USB_LINE_CTRL2 0x82
+#define AB5500_USB_OTG_CTRL 0x87
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB5500_CVBUSM 0x11
+#define AB5500_LEDT 0x12
+#define AB5500_VSRC 0x13
+#define AB5500_ICSR 0x14
+#define AB5500_OCSRV 0x15
+#define AB5500_CVREC 0x16
+#define AB5500_CREVS 0x17
+#define AB5500_CCTRL 0x18
+#define AB5500_TBDATA 0x19
+#define AB5500_CPWM 0x1A
+#define AB5500_DCIOCURRENT 0x1B
+#define AB5500_USB_HS_CURR_LIM 0x1C
+#define AB5500_WALL_HS_CURR_LIM 0x1D
+
+/*
+ * FG, Battcom and ACC registers offsets
+ * Bank : 0x0C
+ */
+#define AB5500_FG_CH0 0x00
+#define AB5500_FG_CH1 0x01
+#define AB5500_FG_CH2 0x02
+#define AB5500_FG_DIS_CH0 0x03
+#define AB5500_FG_DIS_CH1 0x04
+#define AB5500_FG_DIS_CH2 0x05
+#define AB5500_FGDIS_COUNT0 0x06
+#define AB5500_FGDIS_COUNT1 0x07
+#define AB5500_FG_VAL_COUNT0 0x08
+#define AB5500_FG_VAL_COUNT1 0x09
+#define AB5500_FGDIR_READ0 0x0A
+#define AB5500_FGDIR_READ1 0x0B
+#define AB5500_FG_CONTROL_A 0x0C
+#define AB5500_FG_CONTROL_B 0x0F
+#define AB5500_FG_CONTROL_C 0x10
+#define AB5500_FG_DIS 0x0D
+#define AB5500_FG_EOC 0x0E
+#define AB5500_FG_CB 0x0F
+#define AB5500_FG_CC 0x10
+#define AB5500_UIOR 0x1A
+#define AB5500_UART 0x1B
+#define AB5500_URI 0x1C
+#define AB5500_UART_RQ 0x1D
+#define AB5500_ACC_DETECT1 0x20
+#define AB5500_ACC_DETECT2 0x21
+#define AB5500_ACC_DETECTCTRL 0x23
+#define AB5500_ACC_AVCTRL 0x24
+#define AB5500_ACC_DETECT3_DEG_LITCH_TIME 0x30
+#define AB5500_ACC_DETECT3_KEY_PRESS_TIME 0x31
+#define AB5500_ACC_DETECT3_LONG_KEY_TIME 0x32
+#define AB5500_ACC_DETECT3_TIME_READ_MS 0x33
+#define AB5500_ACC_DETECT3_TIME_READ_LS 0x34
+#define AB5500_ACC_DETECT3_CONTROL 0x35
+#define AB5500_ACC_DETECT3_LEVEL 0x36
+#define AB5500_ACC_DETECT3_TIMER_READ_CTL 0x37
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB5500_IT_SOURCE8 0x28
+#define AB5500_IT_SOURCE9 0x29
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA (0x01 << 0)
+#define BAT_CTRL_15U_ENA (0x01 << 1)
+#define BAT_CTRL_30U_ENA (0x01 << 2)
+#define BAT_CTRL_60U_ENA (0x01 << 3)
+#define BAT_CTRL_120U_ENA (0x01 << 4)
+#define BAT_CTRL_CMP_ENA 0x04
+#define FORCE_BAT_CTRL_CMP_HIGH 0x08
+#define BAT_CTRL_PULL_UP_ENA 0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN 0
+
+#ifdef CONFIG_AB5500_BM
+struct ab5500_btemp *ab5500_btemp_get(void);
+int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *btemp);
+void ab5500_fg_reinit(void);
+#else
+static inline struct ab5500_btemp *ab5500_btemp_get(void)
+{
+ return 0;
+}
+static inline int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *btemp)
+{
+ return 0;
+}
+static inline void ab5500_fg_reinit(void) {}
+#endif
+#endif /* _AB5500_BM_H */
diff --git a/include/linux/mfd/abx500/ab5500-gpadc.h b/include/linux/mfd/abx500/ab5500-gpadc.h
new file mode 100644
index 00000000000..67dc3cc9034
--- /dev/null
+++ b/include/linux/mfd/abx500/ab5500-gpadc.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson SA
+ * Licensed under GPLv2.
+ *
+ * Author: Vijaya Kumar K <vijay.kilari@stericsson.com>
+ */
+
+#ifndef _AB5500_GPADC_H
+#define _AB5500_GPADC_H
+
+/*
+ * GPADC source:
+ * The BTEMP_BALL and PCB_TEMP are same. They differ if the
+ * battery supports internal NTC resistor connected to BDATA
+ * line. In this case, the BTEMP_BALL correspondss to BDATA
+ * of GPADC as per AB5500 product spec.
+ */
+
+#define BTEMP_BALL 0
+#define ACC_DETECT2 1
+#define ACC_DETECT3 2
+#define MAIN_BAT_V 3
+#define MAIN_BAT_V_TXON 4
+#define VBUS_V 5
+#define USB_CHARGER_C 6
+#define BK_BAT_V 7
+#define DIE_TEMP 8
+#define PCB_TEMP 9
+#define XTAL_TEMP 10
+#define USB_ID 11
+#define BAT_CTRL 12
+/* VBAT with TXON only min trigger */
+#define MAIN_BAT_V_TXON_TRIG_MIN 13
+/* VBAT with TX off only min trigger */
+#define MAIN_BAT_V_TRIG_MIN 14
+#define GPADC0_V 15
+
+/*
+ * Frequency of auto adc conversion
+ */
+#define MS1000 0x0
+#define MS500 0x1
+#define MS200 0x2
+#define MS100 0x3
+#define MS10 0x4
+
+struct ab5500_gpadc;
+
+/*
+ * struct adc_auto_input - AB5500 GPADC auto trigger
+ * @adc_mux Mux input
+ * @freq freq of conversion
+ * @min min value for trigger
+ * @max max value for trigger
+ * @auto_adc_callback notification callback
+ */
+struct adc_auto_input {
+ u8 mux;
+ u8 freq;
+ int min;
+ int max;
+ int (*auto_adc_callback)(int mux);
+};
+
+struct ab5500_gpadc *ab5500_gpadc_get(const char *name);
+int ab5500_gpadc_convert(struct ab5500_gpadc *gpadc, u8 input);
+int ab5500_gpadc_convert_auto(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *auto_input);
+
+#endif /* _AB5500_GPADC_H */
diff --git a/include/linux/mfd/abx500/ab5500.h b/include/linux/mfd/abx500/ab5500.h
index a720051ae93..3dd9f9f7e68 100644
--- a/include/linux/mfd/abx500/ab5500.h
+++ b/include/linux/mfd/abx500/ab5500.h
@@ -24,6 +24,10 @@ enum ab5500_devid {
AB5500_DEVID_VIDEO,
AB5500_DEVID_DBIECI,
AB5500_DEVID_ONSWA,
+ AB5500_DEVID_CHARGALG,
+ AB5500_DEVID_BTEMP,
+ AB5500_DEVID_TEMPMON,
+ AB5500_DEVID_ACCDET,
AB5500_NUM_DEVICES,
};
@@ -92,8 +96,9 @@ enum ab5500_banks_addr {
#define AB5500_IT_SOURCE21_REG 0x35
#define AB5500_IT_SOURCE22_REG 0x36
#define AB5500_IT_SOURCE23_REG 0x37
+#define AB5500_IT_SOURCE24_REG 0x38
-#define AB5500_NUM_IRQ_REGS 23
+#define AB5500_NUM_IRQ_REGS 25
/**
* struct ab5500
@@ -118,6 +123,7 @@ struct ab5500 {
char chip_name[32];
u8 chip_id;
struct mutex irq_lock;
+ u32 num_event_reg;
u32 abb_events;
u8 mask[AB5500_NUM_IRQ_REGS];
u8 oldmask[AB5500_NUM_IRQ_REGS];
@@ -129,12 +135,31 @@ struct ab5500 {
#endif
};
+#ifndef CONFIG_AB5500_CORE
+static inline int ab5500_clock_rtc_enable(int num, bool enable)
+{
+ return -ENOSYS;
+}
+#else
+extern int ab5500_clock_rtc_enable(int num, bool enable);
+#endif
+
+/* Forward Declaration */
+struct ab5500_regulator_platform_data;
+
struct ab5500_platform_data {
struct {unsigned int base; unsigned int count; } irq;
void *dev_data[AB5500_NUM_DEVICES];
+ size_t dev_data_sz[AB5500_NUM_DEVICES];
struct abx500_init_settings *init_settings;
unsigned int init_settings_sz;
bool pm_power_off;
+ struct ab5500_regulator_platform_data *regulator;
+ struct ab5500_usbgpio_platform_data *usb;
+ struct abx500_accdet_platform_data *accdet;
};
+struct ab5500_ponkey_platform_data {
+ u8 shutdown_secs;
+};
#endif /* MFD_AB5500_H */
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
index 252966769d9..fa706c5a04a 100644
--- a/include/linux/mfd/abx500/ab8500-gpadc.h
+++ b/include/linux/mfd/abx500/ab8500-gpadc.h
@@ -26,7 +26,7 @@
struct ab8500_gpadc;
-struct ab8500_gpadc *ab8500_gpadc_get(char *name);
+struct ab8500_gpadc *ab8500_gpadc_get(void);
int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel);
int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel);
int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
diff --git a/include/linux/mfd/abx500/ab8500-gpio.h b/include/linux/mfd/abx500/ab8500-gpio.h
index 488a8c920a2..d88e3025317 100644
--- a/include/linux/mfd/abx500/ab8500-gpio.h
+++ b/include/linux/mfd/abx500/ab8500-gpio.h
@@ -8,6 +8,8 @@
#ifndef _AB8500_GPIO_H
#define _AB8500_GPIO_H
+#include <mach/gpio.h>
+
/*
* Platform data to register a block: only the initial gpio/irq number.
*/
@@ -16,6 +18,62 @@ struct ab8500_gpio_platform_data {
int gpio_base;
u32 irq_base;
u8 config_reg[7];
+ u8 config_direction[6];
+ u8 config_pullups[6];
+};
+
+enum ab8500_pin {
+ AB8500_PIN_GPIO1 = AB8500_GPIO_BASE,
+ AB8500_PIN_GPIO2,
+ AB8500_PIN_GPIO3,
+ AB8500_PIN_GPIO4,
+ AB8500_PIN_GPIO5,
+ AB8500_PIN_GPIO6,
+ AB8500_PIN_GPIO7,
+ AB8500_PIN_GPIO8,
+ AB8500_PIN_GPIO9,
+ AB8500_PIN_GPIO10,
+ AB8500_PIN_GPIO11,
+ AB8500_PIN_GPIO12,
+ AB8500_PIN_GPIO13,
+ AB8500_PIN_GPIO14,
+ AB8500_PIN_GPIO15,
+ AB8500_PIN_GPIO16,
+ AB8500_PIN_GPIO17,
+ AB8500_PIN_GPIO18,
+ AB8500_PIN_GPIO19,
+ AB8500_PIN_GPIO20,
+ AB8500_PIN_GPIO21,
+ AB8500_PIN_GPIO22,
+ AB8500_PIN_GPIO23,
+ AB8500_PIN_GPIO24,
+ AB8500_PIN_GPIO25,
+ AB8500_PIN_GPIO26,
+ AB8500_PIN_GPIO27,
+ AB8500_PIN_GPIO28,
+ AB8500_PIN_GPIO29,
+ AB8500_PIN_GPIO30,
+ AB8500_PIN_GPIO31,
+ AB8500_PIN_GPIO32,
+ AB8500_PIN_GPIO33,
+ AB8500_PIN_GPIO34,
+ AB8500_PIN_GPIO35,
+ AB8500_PIN_GPIO36,
+ AB8500_PIN_GPIO37,
+ AB8500_PIN_GPIO38,
+ AB8500_PIN_GPIO39,
+ AB8500_PIN_GPIO40,
+ AB8500_PIN_GPIO41,
+ AB8500_PIN_GPIO42,
};
+int ab8500_config_pulldown(struct device *dev,
+ enum ab8500_pin gpio, bool enable);
+
+int ab8500_gpio_config_select(struct device *dev,
+ enum ab8500_pin gpio, bool gpio_select);
+
+int ab8500_gpio_config_get_select(struct device *dev,
+ enum ab8500_pin gpio, bool *gpio_select);
+
#endif /* _AB8500_GPIO_H */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index 10da0291f8f..504725bebfe 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -37,6 +37,11 @@ static inline int ab8500_sysctrl_clear(u16 reg, u8 bits)
return ab8500_sysctrl_write(reg, bits, 0);
}
+/* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */
+struct ab8500_sysctrl_platform_data {
+ u8 initial_req_buf_config[8];
+};
+
/* Registers */
#define AB8500_TURNONSTATUS 0x100
#define AB8500_RESETSTATUS 0x101
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 838c6b487cc..d3a50db5003 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -157,7 +157,6 @@ struct ab8500 {
struct device *dev;
struct mutex lock;
struct mutex irq_lock;
-
int irq_base;
int irq;
u8 chip_id;
@@ -172,27 +171,46 @@ struct ab8500 {
u8 oldmask[AB8500_NUM_IRQ_REGS];
};
-struct regulator_reg_init;
-struct regulator_init_data;
+struct ab8500_regulator_platform_data;
+struct ab8500_accdet_platform_data;
+struct ab8500_denc_platform_data;
+struct ab8500_audio_platform_data;
struct ab8500_gpio_platform_data;
+struct ab8500_sysctrl_platform_data;
/**
* struct ab8500_platform_data - AB8500 platform data
+ * @pm_power_off: Should machine pm power off hook be registered or not
+ * @thermal_power_off_pending: Set if there was a thermal alarm
+ * @thermal_set_time_sec: Time of the thermal alarm
+ * @thermal_time_out: Time out before the thermal alarm should be ignored
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @num_regulator_reg_init: number of regulator init registers
- * @regulator_reg_init: regulator init registers
- * @num_regulator: number of regulators
* @regulator: machine-specific constraints for regulators
+ * @accdet: machine-specific Accessory detection data
+ * @battery: machine-specific battery management data
+ * @charger: machine-specific charger data
+ * @btemp: machine-specific battery temp data
*/
struct ab8500_platform_data {
int irq_base;
+ bool pm_power_off;
+ bool thermal_power_off_pending;
+ long thermal_set_time_sec;
+ long thermal_time_out;
void (*init) (struct ab8500 *);
- int num_regulator_reg_init;
- struct ab8500_regulator_reg_init *regulator_reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
+ struct ab8500_regulator_platform_data *regulator;
+ struct abx500_accdet_platform_data *accdet;
+ struct ab8500_bm_data *battery;
+ struct ab8500_denc_platform_data *denc;
+ struct ab8500_audio_platform_data *audio;
+ struct ab8500_charger_platform_data *charger;
+ struct ab8500_btemp_platform_data *btemp;
+ struct ab8500_fg_platform_data *fg;
+ struct ab8500_chargalg_platform_data *chargalg;
struct ab8500_gpio_platform_data *gpio;
+ struct ab8500_sysctrl_platform_data *sysctrl;
+ struct abx500_usbgpio_platform_data *usb;
};
extern int __devinit ab8500_init(struct ab8500 *ab8500);
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
new file mode 100644
index 00000000000..f04e47ff56a
--- /dev/null
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+#define psy_to_ux500_charger(x) container_of((x), \
+ struct ux500_charger, psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy power supply base class
+ * @ops ux500 charger operations
+ * @max_out_volt maximum output charger voltage in mV
+ * @max_out_curr maximum output charger current in mA
+ */
+struct ux500_charger {
+ struct power_supply psy;
+ struct ux500_charger_ops ops;
+ int max_out_volt;
+ int max_out_curr;
+};
+
+#endif
diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h
index 9890687f582..681c8f99bf1 100644
--- a/include/linux/mfd/db5500-prcmu.h
+++ b/include/linux/mfd/db5500-prcmu.h
@@ -24,12 +24,53 @@ void db5500_prcmu_get_abb_event_buffer(void __iomem **buf);
int prcmu_resetout(u8 resoutn, u8 state);
int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll);
+u8 db5500_prcmu_get_power_state_result(void);
int db5500_prcmu_config_esram0_deep_sleep(u8 state);
void db5500_prcmu_system_reset(u16 reset_code);
u16 db5500_prcmu_get_reset_code(void);
-bool db5500_prcmu_is_ac_wake_requested(void);
+#ifdef CONFIG_UX500_SOC_DB5500
+void prcmu_modem_req(void);
+void prcmu_modem_rel(void);
+void prcmu_ape_ack(void);
+#endif
+bool db5500_prcmu_is_modem_requested(void);
+void db5500_prcmu_modem_reset(void);
int db5500_prcmu_set_arm_opp(u8 opp);
int db5500_prcmu_get_arm_opp(void);
+int db5500_prcmu_set_ape_opp(u8 opp);
+int db5500_prcmu_get_ape_opp(void);
+int db5500_prcmu_set_ddr_opp(u8 opp);
+int db5500_prcmu_get_ddr_opp(void);
+
+u32 db5500_prcmu_read(unsigned int reg);
+void db5500_prcmu_write(unsigned int reg, u32 value);
+void db5500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
+
+static inline unsigned long prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
+static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+int db5500_prcmu_get_hotdog(void);
+int db5500_prcmu_config_hotdog(u8 threshold);
+int db5500_prcmu_config_hotmon(u8 low, u8 high);
+int db5500_prcmu_start_temp_sense(u16 cycles32k);
+int db5500_prcmu_stop_temp_sense(void);
+
+int db5500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off);
+int db5500_prcmu_enable_a9wdog(u8 id);
+int db5500_prcmu_disable_a9wdog(u8 id);
+int db5500_prcmu_kick_a9wdog(u8 id);
+int db5500_prcmu_load_a9wdog(u8 id, u32 timeout);
#else /* !CONFIG_UX500_SOC_DB5500 */
@@ -50,6 +91,11 @@ static inline int db5500_prcmu_request_clock(u8 clock, bool enable)
return 0;
}
+static inline unsigned long db5500_prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
static inline int db5500_prcmu_set_display_clocks(void)
{
return 0;
@@ -72,6 +118,16 @@ static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state)
static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {}
+static inline long db5500_prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
static inline int prcmu_resetout(u8 resoutn, u8 state)
{
return 0;
@@ -91,6 +147,11 @@ static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
return 0;
}
+static inline u8 db5500_prcmu_get_power_state_result(void)
+{
+ return 0;
+}
+
static inline void db5500_prcmu_system_reset(u16 reset_code) {}
static inline u16 db5500_prcmu_get_reset_code(void)
@@ -98,11 +159,18 @@ static inline u16 db5500_prcmu_get_reset_code(void)
return 0;
}
-static inline bool db5500_prcmu_is_ac_wake_requested(void)
+static inline void db5500_prcmu_modem_reset(void) {}
+static inline bool db5500_prcmu_is_modem_requested(void)
{
return 0;
}
+#ifdef CONFIG_UX500_SOC_DB5500
+static void prcmu_ape_ack(void) {}
+static void prcmu_modem_req(void) {}
+static void prcmu_modem_rel(void) {}
+#endif
+
static inline int db5500_prcmu_set_arm_opp(u8 opp)
{
return 0;
@@ -113,6 +181,83 @@ static inline int db5500_prcmu_get_arm_opp(void)
return 0;
}
+static inline int db5500_prcmu_set_ape_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_get_ape_opp(void)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_set_ddr_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_get_ddr_opp(void)
+{
+ return 0;
+}
+
+static inline u32 db5500_prcmu_read(unsigned int reg)
+{
+ return 0;
+}
+
+static inline void db5500_prcmu_write(unsigned int reg, u32 value) {}
+
+static inline void db5500_prcmu_write_masked(unsigned int reg, u32 mask,
+ u32 value) {}
+
+static inline int db5500_prcmu_get_hotdog(void)
+{
+ return -ENOSYS;
+}
+static inline int db5500_prcmu_config_hotdog(u8 threshold)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_config_hotmon(u8 low, u8 high)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_start_temp_sense(u16 cycles32k)
+{
+ return 0;
+}
+static inline int db5500_prcmu_stop_temp_sense(void)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_enable_a9wdog(u8 id)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_disable_a9wdog(u8 id)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_kick_a9wdog(u8 id)
+{
+ return 0;
+}
+
+static inline int db5500_prcmu_load_a9wdog(u8 id, u32 timeout)
+{
+ return 0;
+}
#endif /* CONFIG_MFD_DB5500_PRCMU */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 60d27f7bfc1..260537fbc54 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -11,6 +11,24 @@
#define __MFD_DB8500_PRCMU_H
#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+/*
+ * Registers
+ */
+#define DB8500_PRCM_GPIOCR 0x138
+#define DB8500_PRCM_GPIOCR_DBG_UARTMOD_CMD0 BIT(0)
+#define DB8500_PRCM_GPIOCR_DBG_STM_APE_CMD BIT(9)
+#define DB8500_PRCM_GPIOCR_DBG_STM_MOD_CMD1 BIT(11)
+#define DB8500_PRCM_GPIOCR_SPI2_SELECT BIT(23)
+
+#define DB8500_PRCM_LINE_VALUE 0x170
+#define DB8500_PRCM_LINE_VALUE_HSI_CAWAKE0 BIT(3)
+
+#define DB8500_PRCM_DSI_SW_RESET 0x324
+#define DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN BIT(0)
+#define DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN BIT(1)
+#define DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN BIT(2)
/* This portion previously known as <mach/prcmu-fw-defs_v1.h> */
@@ -163,84 +181,6 @@ enum hw_acc_state {
};
/**
- * enum mbox_2_arm_stat - Status messages definition for mbox_arm
- * @BOOT_TO_EXECUTEOK: The apBoot to apExecute state transition has been
- * completed
- * @DEEPSLEEPOK: The apExecute to apDeepSleep state transition has been
- * completed
- * @SLEEPOK: The apExecute to apSleep state transition has been completed
- * @IDLEOK: The apExecute to apIdle state transition has been completed
- * @SOFTRESETOK: The A9 watchdog/ SoftReset state has been completed
- * @SOFTRESETGO : The A9 watchdog/SoftReset state is on going
- * @BOOT_TO_EXECUTE: The apBoot to apExecute state transition is on going
- * @EXECUTE_TO_DEEPSLEEP: The apExecute to apDeepSleep state transition is on
- * going
- * @DEEPSLEEP_TO_EXECUTE: The apDeepSleep to apExecute state transition is on
- * going
- * @DEEPSLEEP_TO_EXECUTEOK: The apDeepSleep to apExecute state transition has
- * been completed
- * @EXECUTE_TO_SLEEP: The apExecute to apSleep state transition is on going
- * @SLEEP_TO_EXECUTE: The apSleep to apExecute state transition is on going
- * @SLEEP_TO_EXECUTEOK: The apSleep to apExecute state transition has been
- * completed
- * @EXECUTE_TO_IDLE: The apExecute to apIdle state transition is on going
- * @IDLE_TO_EXECUTE: The apIdle to apExecute state transition is on going
- * @IDLE_TO_EXECUTEOK: The apIdle to apExecute state transition has been
- * completed
- * @INIT_STATUS: Status init
- */
-enum ap_pwrsttr_status {
- BOOT_TO_EXECUTEOK = 0xFF,
- DEEPSLEEPOK = 0xFE,
- SLEEPOK = 0xFD,
- IDLEOK = 0xFC,
- SOFTRESETOK = 0xFB,
- SOFTRESETGO = 0xFA,
- BOOT_TO_EXECUTE = 0xF9,
- EXECUTE_TO_DEEPSLEEP = 0xF8,
- DEEPSLEEP_TO_EXECUTE = 0xF7,
- DEEPSLEEP_TO_EXECUTEOK = 0xF6,
- EXECUTE_TO_SLEEP = 0xF5,
- SLEEP_TO_EXECUTE = 0xF4,
- SLEEP_TO_EXECUTEOK = 0xF3,
- EXECUTE_TO_IDLE = 0xF2,
- IDLE_TO_EXECUTE = 0xF1,
- IDLE_TO_EXECUTEOK = 0xF0,
- RDYTODS_RETURNTOEXE = 0xEF,
- NORDYTODS_RETURNTOEXE = 0xEE,
- EXETOSLEEP_RETURNTOEXE = 0xED,
- EXETOIDLE_RETURNTOEXE = 0xEC,
- INIT_STATUS = 0xEB,
-
- /*error messages */
- INITERROR = 0x00,
- PLLARMLOCKP_ER = 0x01,
- PLLDDRLOCKP_ER = 0x02,
- PLLSOCLOCKP_ER = 0x03,
- PLLSOCK1LOCKP_ER = 0x04,
- ARMWFI_ER = 0x05,
- SYSCLKOK_ER = 0x06,
- I2C_NACK_DATA_ER = 0x07,
- BOOT_ER = 0x08,
- I2C_STATUS_ALWAYS_1 = 0x0A,
- I2C_NACK_REG_ADDR_ER = 0x0B,
- I2C_NACK_DATA0123_ER = 0x1B,
- I2C_NACK_ADDR_ER = 0x1F,
- CURAPPWRSTISNOT_BOOT = 0x20,
- CURAPPWRSTISNOT_EXECUTE = 0x21,
- CURAPPWRSTISNOT_SLEEPMODE = 0x22,
- CURAPPWRSTISNOT_CORRECTFORIT10 = 0x23,
- FIFO4500WUISNOT_WUPEVENT = 0x24,
- PLL32KLOCKP_ER = 0x29,
- DDRDEEPSLEEPOK_ER = 0x2A,
- ROMCODEREADY_ER = 0x50,
- WUPBEFOREDS = 0x51,
- DDRCONFIG_ER = 0x52,
- WUPBEFORESLEEP = 0x53,
- WUPBEFOREIDLE = 0x54
-}; /* earlier called as mbox_2_arm_stat */
-
-/**
* enum dvfs_stat - DVFS status messages definition
* @DVFS_GO: A state transition DVFS is on going
* @DVFS_ARM100OPPOK: The state transition DVFS has been completed for 100OPP
@@ -457,6 +397,25 @@ enum hw_acc_dev {
NUM_HW_ACC
};
+/**
+ * enum prcmu_power_status - results from set_power_state
+ * @PRCMU_SLEEP_OK: Sleep went ok
+ * @PRCMU_DEEP_SLEEP_OK: DeepSleep went ok
+ * @PRCMU_IDLE_OK: Idle went ok
+ * @PRCMU_DEEPIDLE_OK: DeepIdle went ok
+ * @PRCMU_PRCMU2ARMPENDINGIT_ER: Pending interrupt detected
+ * @PRCMU_ARMPENDINGIT_ER: Pending interrupt detected
+ *
+ */
+enum prcmu_power_status {
+ PRCMU_SLEEP_OK = 0xf3,
+ PRCMU_DEEP_SLEEP_OK = 0xf6,
+ PRCMU_IDLE_OK = 0xf0,
+ PRCMU_DEEPIDLE_OK = 0xe3,
+ PRCMU_PRCMU2ARMPENDINGIT_ER = 0x91,
+ PRCMU_ARMPENDINGIT_ER = 0x93,
+};
+
/*
* Definitions for autonomous power management configuration.
*/
@@ -493,6 +452,18 @@ struct prcmu_auto_pm_config {
u8 sva_policy;
};
+#define PRCMU_FW_PROJECT_U8500 2
+#define PRCMU_FW_PROJECT_U9500 4
+#define PRCMU_FW_PROJECT_U8500_C2 7
+#define PRCMU_FW_PROJECT_U9500_C2 11
+
+struct prcmu_fw_version {
+ u8 project;
+ u8 api_version;
+ u8 func_version;
+ u8 errata;
+};
+
#ifdef CONFIG_MFD_DB8500_PRCMU
void db8500_prcmu_early_init(void);
@@ -500,42 +471,36 @@ int prcmu_set_rc_a2p(enum romcode_write);
enum romcode_read prcmu_get_rc_p2a(void);
enum ap_pwrst prcmu_get_xp70_current_state(void);
bool prcmu_has_arm_maxopp(void);
-bool prcmu_is_u8400(void);
-int prcmu_set_ape_opp(u8 opp);
-int prcmu_get_ape_opp(void);
+struct prcmu_fw_version *prcmu_get_fw_version(void);
int prcmu_request_ape_opp_100_voltage(bool enable);
int prcmu_release_usb_wakeup_state(void);
-int prcmu_set_ddr_opp(u8 opp);
-int prcmu_get_ddr_opp(void);
/* NOTE! Use regulator framework instead */
int prcmu_set_hwacc(u16 hw_acc_dev, u8 state);
void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
struct prcmu_auto_pm_config *idle);
bool prcmu_is_auto_pm_enabled(void);
-int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
-int prcmu_set_clock_divider(u8 clock, u8 divider);
-int prcmu_config_hotdog(u8 threshold);
-int prcmu_config_hotmon(u8 low, u8 high);
-int prcmu_start_temp_sense(u16 cycles32k);
-int prcmu_stop_temp_sense(void);
+
+int db8500_prcmu_config_hotdog(u8 threshold);
+int db8500_prcmu_config_hotmon(u8 low, u8 high);
+int db8500_prcmu_start_temp_sense(u16 cycles32k);
+int db8500_prcmu_stop_temp_sense(void);
int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
void prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
-void prcmu_modem_reset(void);
-void prcmu_enable_spi2(void);
-void prcmu_disable_spi2(void);
+void db8500_prcmu_modem_reset(void);
-int prcmu_config_a9wdog(u8 num, bool sleep_auto_off);
-int prcmu_enable_a9wdog(u8 id);
-int prcmu_disable_a9wdog(u8 id);
-int prcmu_kick_a9wdog(u8 id);
-int prcmu_load_a9wdog(u8 id, u32 val);
+int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off);
+int db8500_prcmu_enable_a9wdog(u8 id);
+int db8500_prcmu_disable_a9wdog(u8 id);
+int db8500_prcmu_kick_a9wdog(u8 id);
+int db8500_prcmu_load_a9wdog(u8 id, u32 val);
void db8500_prcmu_system_reset(u16 reset_code);
int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll);
+u8 db8500_prcmu_get_power_state_result(void);
void db8500_prcmu_enable_wakeups(u32 wakeups);
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
int db8500_prcmu_request_clock(u8 clock, bool enable);
@@ -549,6 +514,14 @@ u16 db8500_prcmu_get_reset_code(void);
bool db8500_prcmu_is_ac_wake_requested(void);
int db8500_prcmu_set_arm_opp(u8 opp);
int db8500_prcmu_get_arm_opp(void);
+int db8500_prcmu_set_ape_opp(u8 opp);
+int db8500_prcmu_get_ape_opp(void);
+int db8500_prcmu_set_ddr_opp(u8 opp);
+int db8500_prcmu_get_ddr_opp(void);
+
+u32 db8500_prcmu_read(unsigned int reg);
+void db8500_prcmu_write(unsigned int reg, u32 value);
+void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
#else /* !CONFIG_MFD_DB8500_PRCMU */
@@ -574,17 +547,17 @@ static inline bool prcmu_has_arm_maxopp(void)
return false;
}
-static inline bool prcmu_is_u8400(void)
+static inline struct prcmu_fw_version *prcmu_get_fw_version(void)
{
- return false;
+ return NULL;
}
-static inline int prcmu_set_ape_opp(u8 opp)
+static inline int db8500_prcmu_set_ape_opp(u8 opp)
{
return 0;
}
-static inline int prcmu_get_ape_opp(void)
+static inline int db8500_prcmu_get_ape_opp(void)
{
return APE_100_OPP;
}
@@ -599,12 +572,12 @@ static inline int prcmu_release_usb_wakeup_state(void)
return 0;
}
-static inline int prcmu_set_ddr_opp(u8 opp)
+static inline int db8500_prcmu_set_ddr_opp(u8 opp)
{
return 0;
}
-static inline int prcmu_get_ddr_opp(void)
+static inline int db8500_prcmu_get_ddr_opp(void)
{
return DDR_100_OPP;
}
@@ -613,7 +586,6 @@ static inline int prcmu_set_hwacc(u16 hw_acc_dev, u8 state)
{
return 0;
}
-
static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
struct prcmu_auto_pm_config *idle)
{
@@ -624,32 +596,22 @@ static inline bool prcmu_is_auto_pm_enabled(void)
return false;
}
-static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
-{
- return 0;
-}
-
-static inline int prcmu_set_clock_divider(u8 clock, u8 divider)
-{
- return 0;
-}
-
-static inline int prcmu_config_hotdog(u8 threshold)
+static inline int db8500_prcmu_config_hotdog(u8 threshold)
{
return 0;
}
-static inline int prcmu_config_hotmon(u8 low, u8 high)
+static inline int db8500_prcmu_config_hotmon(u8 low, u8 high)
{
return 0;
}
-static inline int prcmu_start_temp_sense(u16 cycles32k)
+static inline int db8500_prcmu_start_temp_sense(u16 cycles32k)
{
return 0;
}
-static inline int prcmu_stop_temp_sense(void)
+static inline int db8500_prcmu_stop_temp_sense(void)
{
return 0;
}
@@ -668,22 +630,17 @@ static inline void prcmu_ac_wake_req(void) {}
static inline void prcmu_ac_sleep_req(void) {}
-static inline void prcmu_modem_reset(void) {}
+static inline void db8500_prcmu_modem_reset(void) {}
-static inline int prcmu_enable_spi2(void)
-{
- return 0;
-}
+static inline void db8500_prcmu_system_reset(u16 reset_code) {}
-static inline int prcmu_disable_spi2(void)
+static inline int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
+ bool keep_ap_pll)
{
return 0;
}
-static inline void db8500_prcmu_system_reset(u16 reset_code) {}
-
-static inline int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
- bool keep_ap_pll)
+static inline u8 db8500_prcmu_get_power_state_result(void)
{
return 0;
}
@@ -729,27 +686,27 @@ static inline u16 db8500_prcmu_get_reset_code(void)
return 0;
}
-static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+static inline int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
{
return 0;
}
-static inline int prcmu_enable_a9wdog(u8 id)
+static inline int db8500_prcmu_enable_a9wdog(u8 id)
{
return 0;
}
-static inline int prcmu_disable_a9wdog(u8 id)
+static inline int db8500_prcmu_disable_a9wdog(u8 id)
{
return 0;
}
-static inline int prcmu_kick_a9wdog(u8 id)
+static inline int db8500_prcmu_kick_a9wdog(u8 id)
{
return 0;
}
-static inline int prcmu_load_a9wdog(u8 id, u32 val)
+static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val)
{
return 0;
}
@@ -769,6 +726,16 @@ static inline int db8500_prcmu_get_arm_opp(void)
return 0;
}
+static inline u32 db8500_prcmu_read(unsigned int reg)
+{
+ return 0;
+}
+
+static inline void db8500_prcmu_write(unsigned int reg, u32 value) {}
+
+static inline void db8500_prcmu_write_masked(unsigned int reg, u32 mask,
+ u32 value) {}
+
#endif /* !CONFIG_MFD_DB8500_PRCMU */
#endif /* __MFD_DB8500_PRCMU_H */
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index bac942f959c..e99fecda5f6 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <asm/mach-types.h>
+#include <linux/err.h>
/* PRCMU Wakeup defines */
enum prcmu_wakeup_index {
@@ -80,6 +80,29 @@ enum prcmu_wakeup_index {
#define EPOD_STATE_ON_CLK_OFF 0x03
#define EPOD_STATE_ON 0x04
+/* DB5500 CLKOUT IDs */
+enum {
+ DB5500_CLKOUT0 = 0,
+ DB5500_CLKOUT1,
+};
+
+/* DB5500 CLKOUTx sources */
+enum {
+ DB5500_CLKOUT_REF_CLK_SEL0,
+ DB5500_CLKOUT_RTC_CLK0_SEL0,
+ DB5500_CLKOUT_ULP_CLK_SEL0,
+ DB5500_CLKOUT_STATIC0,
+ DB5500_CLKOUT_REFCLK,
+ DB5500_CLKOUT_ULPCLK,
+ DB5500_CLKOUT_ARMCLK,
+ DB5500_CLKOUT_SYSACC0CLK,
+ DB5500_CLKOUT_SOC0PLLCLK,
+ DB5500_CLKOUT_SOC1PLLCLK,
+ DB5500_CLKOUT_DDRPLLCLK,
+ DB5500_CLKOUT_TVCLK,
+ DB5500_CLKOUT_IRDACLK,
+};
+
/*
* CLKOUT sources
*/
@@ -111,6 +134,7 @@ enum prcmu_clock {
PRCMU_MSP1CLK,
PRCMU_I2CCLK,
PRCMU_SDMMCCLK,
+ PRCMU_SPARE1CLK,
PRCMU_SLIMCLK,
PRCMU_PER1CLK,
PRCMU_PER2CLK,
@@ -139,12 +163,20 @@ enum prcmu_clock {
PRCMU_IRRCCLK,
PRCMU_SIACLK,
PRCMU_SVACLK,
+ PRCMU_ACLK,
PRCMU_NUM_REG_CLOCKS,
PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
+ PRCMU_CDCLK,
PRCMU_TIMCLK,
PRCMU_PLLSOC0,
PRCMU_PLLSOC1,
PRCMU_PLLDDR,
+ PRCMU_PLLDSI,
+ PRCMU_DSI0CLK,
+ PRCMU_DSI1CLK,
+ PRCMU_DSI0ESCCLK,
+ PRCMU_DSI1ESCCLK,
+ PRCMU_DSI2ESCCLK,
};
/**
@@ -153,12 +185,14 @@ enum prcmu_clock {
* @APE_NO_CHANGE: The APE operating point is unchanged
* @APE_100_OPP: The new APE operating point is ape100opp
* @APE_50_OPP: 50%
+ * @APE_50_PARTLY_25_OPP: 50%, except some clocks at 25%.
*/
enum ape_opp {
APE_OPP_INIT = 0x00,
APE_NO_CHANGE = 0x01,
APE_100_OPP = 0x02,
- APE_50_OPP = 0x03
+ APE_50_OPP = 0x03,
+ APE_50_PARTLY_25_OPP = 0xFF,
};
/**
@@ -218,9 +252,11 @@ enum ddr_pwrst {
#if defined(CONFIG_UX500_SOC_DB8500) || defined(CONFIG_UX500_SOC_DB5500)
+#include <mach/id.h>
+
static inline void __init prcmu_early_init(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_early_init();
else
return db8500_prcmu_early_init();
@@ -229,7 +265,7 @@ static inline void __init prcmu_early_init(void)
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_set_power_state(state, keep_ulp_clk,
keep_ap_pll);
else
@@ -237,17 +273,25 @@ static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
keep_ap_pll);
}
+static inline u8 prcmu_get_power_state_result(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_power_state_result();
+ else
+ return db8500_prcmu_get_power_state_result();
+}
+
static inline int prcmu_set_epod(u16 epod_id, u8 epod_state)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_epod(epod_id, epod_state);
else
return db8500_prcmu_set_epod(epod_id, epod_state);
}
static inline void prcmu_enable_wakeups(u32 wakeups)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
db5500_prcmu_enable_wakeups(wakeups);
else
db8500_prcmu_enable_wakeups(wakeups);
@@ -260,7 +304,7 @@ static inline void prcmu_disable_wakeups(void)
static inline void prcmu_config_abb_event_readout(u32 abb_events)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
db5500_prcmu_config_abb_event_readout(abb_events);
else
db8500_prcmu_config_abb_event_readout(abb_events);
@@ -268,7 +312,7 @@ static inline void prcmu_config_abb_event_readout(u32 abb_events)
static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
db5500_prcmu_get_abb_event_buffer(buf);
else
db8500_prcmu_get_abb_event_buffer(buf);
@@ -281,36 +325,66 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
static inline int prcmu_request_clock(u8 clock, bool enable)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_request_clock(clock, enable);
else
return db8500_prcmu_request_clock(clock, enable);
}
-int prcmu_set_ape_opp(u8 opp);
-int prcmu_get_ape_opp(void);
-int prcmu_set_ddr_opp(u8 opp);
-int prcmu_get_ddr_opp(void);
+unsigned long prcmu_clock_rate(u8 clock);
+long prcmu_round_clock_rate(u8 clock, unsigned long rate);
+int prcmu_set_clock_rate(u8 clock, unsigned long rate);
+
+static inline int prcmu_set_ddr_opp(u8 opp)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_ddr_opp(opp);
+ else
+ return db8500_prcmu_set_ddr_opp(opp);
+}
+static inline int prcmu_get_ddr_opp(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_ddr_opp();
+ else
+ return db8500_prcmu_get_ddr_opp();
+}
static inline int prcmu_set_arm_opp(u8 opp)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_arm_opp(opp);
else
return db8500_prcmu_set_arm_opp(opp);
}
static inline int prcmu_get_arm_opp(void)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_arm_opp();
else
return db8500_prcmu_get_arm_opp();
}
+static inline int prcmu_set_ape_opp(u8 opp)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_set_ape_opp(opp);
+ else
+ return db8500_prcmu_set_ape_opp(opp);
+}
+
+static inline int prcmu_get_ape_opp(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_get_ape_opp();
+ else
+ return db8500_prcmu_get_ape_opp();
+}
+
static inline void prcmu_system_reset(u16 reset_code)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_system_reset(reset_code);
else
return db8500_prcmu_system_reset(reset_code);
@@ -318,7 +392,7 @@ static inline void prcmu_system_reset(u16 reset_code)
static inline u16 prcmu_get_reset_code(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_get_reset_code();
else
return db8500_prcmu_get_reset_code();
@@ -326,18 +400,25 @@ static inline u16 prcmu_get_reset_code(void)
void prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
-void prcmu_modem_reset(void);
+static inline void prcmu_modem_reset(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_modem_reset();
+ else
+ return db8500_prcmu_modem_reset();
+}
+
static inline bool prcmu_is_ac_wake_requested(void)
{
- if (machine_is_u5500())
- return db5500_prcmu_is_ac_wake_requested();
+ if (cpu_is_u5500())
+ return db5500_prcmu_is_modem_requested();
else
return db8500_prcmu_is_ac_wake_requested();
}
static inline int prcmu_set_display_clocks(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_set_display_clocks();
else
return db8500_prcmu_set_display_clocks();
@@ -345,7 +426,7 @@ static inline int prcmu_set_display_clocks(void)
static inline int prcmu_disable_dsipll(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_disable_dsipll();
else
return db8500_prcmu_disable_dsipll();
@@ -353,7 +434,7 @@ static inline int prcmu_disable_dsipll(void)
static inline int prcmu_enable_dsipll(void)
{
- if (machine_is_u5500())
+ if (cpu_is_u5500())
return db5500_prcmu_enable_dsipll();
else
return db8500_prcmu_enable_dsipll();
@@ -361,11 +442,107 @@ static inline int prcmu_enable_dsipll(void)
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
- if (machine_is_u5500())
- return -EINVAL;
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_esram0_deep_sleep(state);
else
return db8500_prcmu_config_esram0_deep_sleep(state);
}
+
+static inline int prcmu_config_hotdog(u8 threshold)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_hotdog(threshold);
+ else
+ return db8500_prcmu_config_hotdog(threshold);
+}
+
+static inline int prcmu_config_hotmon(u8 low, u8 high)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_hotmon(low, high);
+ else
+ return db8500_prcmu_config_hotmon(low, high);
+}
+
+static inline int prcmu_start_temp_sense(u16 cycles32k)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_start_temp_sense(cycles32k);
+ else
+ return db8500_prcmu_start_temp_sense(cycles32k);
+}
+
+static inline int prcmu_stop_temp_sense(void)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_stop_temp_sense();
+ else
+ return db8500_prcmu_stop_temp_sense();
+}
+
+static inline u32 prcmu_read(unsigned int reg)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_read(reg);
+ else
+ return db8500_prcmu_read(reg);
+}
+
+static inline void prcmu_write(unsigned int reg, u32 value)
+{
+ if (cpu_is_u5500())
+ db5500_prcmu_write(reg, value);
+ else
+ db8500_prcmu_write(reg, value);
+}
+
+static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
+{
+ if (cpu_is_u5500())
+ db5500_prcmu_write_masked(reg, mask, value);
+ else
+ db8500_prcmu_write_masked(reg, mask, value);
+}
+
+static inline int prcmu_enable_a9wdog(u8 id)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_enable_a9wdog(id);
+ else
+ return db8500_prcmu_enable_a9wdog(id);
+}
+
+static inline int prcmu_disable_a9wdog(u8 id)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_disable_a9wdog(id);
+ else
+ return db8500_prcmu_disable_a9wdog(id);
+}
+
+static inline int prcmu_kick_a9wdog(u8 id)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_kick_a9wdog(id);
+ else
+ return db8500_prcmu_kick_a9wdog(id);
+}
+
+static inline int prcmu_load_a9wdog(u8 id, u32 timeout)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_load_a9wdog(id, timeout);
+ else
+ return db8500_prcmu_load_a9wdog(id, timeout);
+}
+
+static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ if (cpu_is_u5500())
+ return db5500_prcmu_config_a9wdog(num, sleep_auto_off);
+ else
+ return db8500_prcmu_config_a9wdog(num, sleep_auto_off);
+}
#else
static inline void __init prcmu_early_init(void) {}
@@ -405,6 +582,21 @@ static inline int prcmu_request_clock(u8 clock, bool enable)
return 0;
}
+static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline unsigned long prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
static inline int prcmu_set_ape_opp(u8 opp)
{
return 0;
@@ -480,14 +672,133 @@ static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
*buf = NULL;
}
+static inline int prcmu_config_hotdog(u8 threshold)
+{
+ return 0;
+}
+
+static inline int prcmu_config_hotmon(u8 low, u8 high)
+{
+ return 0;
+}
+
+static inline int prcmu_start_temp_sense(u16 cycles32k)
+{
+ return 0;
+}
+
+static inline int prcmu_stop_temp_sense(void)
+{
+ return 0;
+}
+
+static inline u32 prcmu_read(unsigned int reg)
+{
+ return 0;
+}
+
+static inline void prcmu_write(unsigned int reg, u32 value) {}
+
+static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) {}
+
+#endif
+
+static inline void prcmu_set(unsigned int reg, u32 bits)
+{
+ prcmu_write_masked(reg, bits, bits);
+}
+
+static inline void prcmu_clear(unsigned int reg, u32 bits)
+{
+ prcmu_write_masked(reg, bits, 0);
+}
+
+#if defined(CONFIG_UX500_SOC_DB8500) || defined(CONFIG_UX500_SOC_DB5500)
+
+/**
+ * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
+ */
+static inline void prcmu_enable_spi2(void)
+{
+ if (cpu_is_u8500())
+ prcmu_set(DB8500_PRCM_GPIOCR, DB8500_PRCM_GPIOCR_SPI2_SELECT);
+}
+
+/**
+ * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
+ */
+static inline void prcmu_disable_spi2(void)
+{
+ if (cpu_is_u8500())
+ prcmu_clear(DB8500_PRCM_GPIOCR, DB8500_PRCM_GPIOCR_SPI2_SELECT);
+}
+
+/**
+ * prcmu_enable_stm_mod_uart - Enables pin muxing for STMMOD
+ * and UARTMOD on OtherAlternateC3.
+ */
+static inline void prcmu_enable_stm_mod_uart(void)
+{
+ if (cpu_is_u8500()) {
+ prcmu_set(DB8500_PRCM_GPIOCR,
+ (DB8500_PRCM_GPIOCR_DBG_STM_MOD_CMD1 |
+ DB8500_PRCM_GPIOCR_DBG_UARTMOD_CMD0));
+ }
+}
+
+/**
+ * prcmu_disable_stm_mod_uart - Disables pin muxing for STMMOD
+ * and UARTMOD on OtherAlternateC3.
+ */
+static inline void prcmu_disable_stm_mod_uart(void)
+{
+ if (cpu_is_u8500()) {
+ prcmu_clear(DB8500_PRCM_GPIOCR,
+ (DB8500_PRCM_GPIOCR_DBG_STM_MOD_CMD1 |
+ DB8500_PRCM_GPIOCR_DBG_UARTMOD_CMD0));
+ }
+}
+
+/**
+ * prcmu_enable_stm_ape - Enables pin muxing for STM APE on OtherAlternateC1.
+ */
+static inline void prcmu_enable_stm_ape(void)
+{
+ if (cpu_is_u8500()) {
+ prcmu_set(DB8500_PRCM_GPIOCR,
+ DB8500_PRCM_GPIOCR_DBG_STM_APE_CMD);
+ }
+}
+
+/**
+ * prcmu_disable_stm_ape - Disables pin muxing for STM APE on OtherAlternateC1.
+ */
+static inline void prcmu_disable_stm_ape(void)
+{
+ if (cpu_is_u8500()) {
+ prcmu_clear(DB8500_PRCM_GPIOCR,
+ DB8500_PRCM_GPIOCR_DBG_STM_APE_CMD);
+ }
+}
+
+#else
+
+static inline void prcmu_enable_spi2(void) {}
+static inline void prcmu_disable_spi2(void) {}
+static inline void prcmu_enable_stm_mod_uart(void) {}
+static inline void prcmu_disable_stm_mod_uart(void) {}
+static inline void prcmu_enable_stm_ape(void) {}
+static inline void prcmu_disable_stm_ape(void) {}
+
#endif
/* PRCMU QoS APE OPP class */
#define PRCMU_QOS_APE_OPP 1
#define PRCMU_QOS_DDR_OPP 2
+#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_UX500_PRCMU_QOS_POWER
+#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
@@ -500,6 +811,7 @@ int prcmu_qos_add_notifier(int prcmu_qos_class,
struct notifier_block *notifier);
int prcmu_qos_remove_notifier(int prcmu_qos_class,
struct notifier_block *notifier);
+void prcmu_qos_voice_call_override(bool enable);
#else
@@ -543,7 +855,7 @@ static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
{
return 0;
}
-
+static inline void prcmu_qos_voice_call_override(bool enable) {}
#endif
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index ca1d7a34760..3e86d53f3d5 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -114,7 +114,7 @@ struct matrix_keymap_data;
* @no_autorepeat: disable key autorepeat
*/
struct stmpe_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
+ const struct matrix_keymap_data *keymap_data;
unsigned int debounce_ms;
unsigned int scan_count;
bool no_autorepeat;
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h
new file mode 100644
index 00000000000..8c5385c2191
--- /dev/null
+++ b/include/linux/mfd/tc35892.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ */
+
+#ifndef __LINUX_MFD_TC35892_H
+#define __LINUX_MFD_TC35892_H
+
+#include <linux/device.h>
+
+#define TC35892_RSTCTRL_IRQRST (1 << 4)
+#define TC35892_RSTCTRL_TIMRST (1 << 3)
+#define TC35892_RSTCTRL_ROTRST (1 << 2)
+#define TC35892_RSTCTRL_KBDRST (1 << 1)
+#define TC35892_RSTCTRL_GPIRST (1 << 0)
+
+
+#define TC35892_MANFCODE 0x80
+#define TC35892_MANFCODE_MAGIC 0x03
+#define TC35892_VERSION 0x81
+#define TC35892_RSTCTRL 0x82
+#define TC35892_EXTRSTN 0x83
+#define TC35892_RSTINTCLR 0x84
+#define TC35892_CLKMODE 0x88
+#define TC35892_CLKCFG 0x89
+#define TC35892_CLKEN 0x8A
+#define TC35892_IRQST 0x91
+
+#define TC35892_DRIVE0_L 0xA0
+#define TC35892_DRIVE0_H 0xA1
+#define TC35892_DRIVE1_L 0xA2
+#define TC35892_DRIVE1_H 0xA3
+#define TC35892_DRIVE2_L 0xA4
+#define TC35892_DRIVE2_H 0XA5
+#define TC35892_DRIVE3 0xA6
+#define TC35892_IOCFG 0xA7
+
+#define TC35892_IOPC0_L 0xAA
+#define TC35892_IOPC0_H 0xAB
+#define TC35892_IOPC1_L 0xAC
+#define TC35892_IOPC1_H 0xAD
+#define TC35892_IOPC2_L 0xAE
+#define TC35892_IOPC2_H 0xAF
+
+#define TC35892_GPIODATA0 0xC0
+#define TC35892_GPIOMASK0 0xC1
+#define TC35892_GPIODATA1 0xC2
+#define TC35892_GPIOMASK1 0xC3
+#define TC35892_GPIODATA2 0xC4
+#define TC35892_GPIOMASK2 0xC5
+#define TC35892_GPIODIR0 0xC6
+#define TC35892_GPIODIR1 0xC7
+#define TC35892_GPIODIR2 0xC8
+#define TC35892_GPIOIS0 0xC9
+#define TC35892_GPIOIS1 0xCA
+#define TC35892_GPIOIS2 0xCB
+#define TC35892_GPIOIBE0 0xCC
+#define TC35892_GPIOIBE1 0xCD
+#define TC35892_GPIOIBE2 0xCE
+#define TC35892_GPIOIEV0 0xCF
+#define TC35892_GPIOIEV1 0xD0
+#define TC35892_GPIOIEV2 0xD1
+#define TC35892_GPIOIE0 0xD2
+#define TC35892_GPIOIE1 0xD3
+#define TC35892_GPIOIE2 0xD4
+#define TC35892_GPIORIS0 0xD6
+#define TC35892_GPIORIS1 0xD7
+#define TC35892_GPIORIS2 0xD8
+#define TC35892_GPIOMIS0 0xD9
+#define TC35892_GPIOMIS1 0xDA
+#define TC35892_GPIOMIS2 0xDB
+#define TC35892_GPIOIC0 0xDC
+#define TC35892_GPIOIC1 0xDD
+#define TC35892_GPIOIC2 0xDE
+#define TC35892_GPIOODM0 0xE0
+#define TC35892_GPIOODE0 0xE1
+#define TC35892_GPIOODM1 0xE2
+#define TC35892_GPIOODE1 0xE3
+#define TC35892_GPIOODM2 0xE4
+#define TC35892_GPIOODE2 0xE5
+
+#define TC35892_GPIOSYNC0 0xE6
+#define TC35892_GPIOSYNC1 0xE7
+#define TC35892_GPIOSYNC2 0xE8
+
+#define TC35892_GPIOWAKE0 0xE9
+#define TC35892_GPIOWAKE1 0xEA
+#define TC35892_GPIOWAKE2 0xEB
+
+#define TC35892_INT_GPIIRQ 0
+#define TC35892_INT_TI0IRQ 1
+#define TC35892_INT_TI1IRQ 2
+#define TC35892_INT_TI2IRQ 3
+#define TC35892_INT_ROTIRQ 5
+#define TC35892_INT_KBDIRQ 6
+#define TC35892_INT_PORIRQ 7
+
+#define TC35892_NR_INTERNAL_IRQS 8
+#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x))
+
+struct tc35892 {
+ struct mutex lock;
+ struct device *dev;
+ struct i2c_client *i2c;
+
+ int irq_base;
+ int num_gpio;
+ struct tc35892_platform_data *pdata;
+};
+
+extern int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data);
+extern int tc35892_reg_read(struct tc35892 *tc35892, u8 reg);
+extern int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length,
+ u8 *values);
+extern int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values);
+extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val);
+
+/**
+ * struct tc35892_gpio_platform_data - TC35892 GPIO platform data
+ * @gpio_base: first gpio number assigned to TC35892. A maximum of
+ * %TC35892_NR_GPIOS GPIOs will be allocated.
+ * @setup: callback for board-specific initialization
+ * @remove: callback for board-specific teardown
+ */
+struct tc35892_gpio_platform_data {
+ int gpio_base;
+ void (*setup)(struct tc35892 *tc35892, unsigned gpio_base);
+ void (*remove)(struct tc35892 *tc35892, unsigned gpio_base);
+};
+
+/**
+ * struct tc35892_platform_data - TC35892 platform data
+ * @irq_base: base IRQ number. %TC35892_NR_IRQS irqs will be used.
+ * @gpio: GPIO-specific platform data
+ */
+struct tc35892_platform_data {
+ int irq_base;
+ struct tc35892_gpio_platform_data *gpio;
+};
+
+#define TC35892_NR_GPIOS 24
+#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS)
+
+#endif
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index 16c76e124f9..b8c6a941071 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -31,20 +31,43 @@ enum tx3589x_block {
#define TC3589x_EVTCODE_FIFO 0x10
#define TC3589x_KBDMFS 0x8F
-#define TC3589x_IRQST 0x91
-
-#define TC3589x_MANFCODE_MAGIC 0x03
#define TC3589x_MANFCODE 0x80
+#define TC3589x_MANFCODE_MAGIC 0x03
#define TC3589x_VERSION 0x81
-#define TC3589x_IOCFG 0xA7
+#define TC3589x_RSTCTRL 0x82
+#define TC3589x_EXTRSTN 0x83
+#define TC3589x_RSTINTCLR 0x84
#define TC3589x_CLKMODE 0x88
#define TC3589x_CLKCFG 0x89
#define TC3589x_CLKEN 0x8A
+#define TC3589x_IRQST 0x91
-#define TC3589x_RSTCTRL 0x82
-#define TC3589x_EXTRSTN 0x83
-#define TC3589x_RSTINTCLR 0x84
+#define TC3589x_DRIVE0_L 0xA0
+#define TC3589x_DRIVE0_H 0xA1
+#define TC3589x_DRIVE1_L 0xA2
+#define TC3589x_DRIVE1_H 0xA3
+#define TC3589x_DRIVE2_L 0xA4
+#define TC3589x_DRIVE2_H 0XA5
+#define TC3589x_DRIVE3 0xA6
+#define TC3589x_IOCFG 0xA7
+
+#define TC3589x_IOPC0_L 0xAA
+#define TC3589x_IOPC0_H 0xAB
+#define TC3589x_IOPC1_L 0xAC
+#define TC3589x_IOPC1_H 0xAD
+#define TC3589x_IOPC2_L 0xAE
+#define TC3589x_IOPC2_H 0xAF
+
+#define TC3589x_GPIODATA0 0xC0
+#define TC3589x_GPIOMASK0 0xC1
+#define TC3589x_GPIODATA1 0xC2
+#define TC3589x_GPIOMASK1 0xC3
+#define TC3589x_GPIODATA2 0xC4
+#define TC3589x_GPIOMASK2 0xC5
+#define TC3589x_GPIODIR0 0xC6
+#define TC3589x_GPIODIR1 0xC7
+#define TC3589x_GPIODIR2 0xC8
/* Pull up/down configuration registers */
#define TC3589x_IOCFG 0xA7
@@ -75,17 +98,12 @@ enum tx3589x_block {
#define TC3589x_GPIOIC0 0xDC
#define TC3589x_GPIOIC1 0xDD
#define TC3589x_GPIOIC2 0xDE
-
-#define TC3589x_GPIODATA0 0xC0
-#define TC3589x_GPIOMASK0 0xc1
-#define TC3589x_GPIODATA1 0xC2
-#define TC3589x_GPIOMASK1 0xc3
-#define TC3589x_GPIODATA2 0xC4
-#define TC3589x_GPIOMASK2 0xC5
-
-#define TC3589x_GPIODIR0 0xC6
-#define TC3589x_GPIODIR1 0xC7
-#define TC3589x_GPIODIR2 0xC8
+#define TC3589x_GPIOODM0 0xE0
+#define TC3589x_GPIOODE0 0xE1
+#define TC3589x_GPIOODM1 0xE2
+#define TC3589x_GPIOODE1 0xE3
+#define TC3589x_GPIOODM2 0xE4
+#define TC3589x_GPIOODE2 0xE5
#define TC3589x_GPIOSYNC0 0xE6
#define TC3589x_GPIOSYNC1 0xE7
@@ -95,13 +113,6 @@ enum tx3589x_block {
#define TC3589x_GPIOWAKE1 0xEA
#define TC3589x_GPIOWAKE2 0xEB
-#define TC3589x_GPIOODM0 0xE0
-#define TC3589x_GPIOODE0 0xE1
-#define TC3589x_GPIOODM1 0xE2
-#define TC3589x_GPIOODE1 0xE3
-#define TC3589x_GPIOODM2 0xE4
-#define TC3589x_GPIOODE2 0xE5
-
#define TC3589x_INT_GPIIRQ 0
#define TC3589x_INT_TI0IRQ 1
#define TC3589x_INT_TI1IRQ 2
diff --git a/include/linux/mloader.h b/include/linux/mloader.h
new file mode 100644
index 00000000000..ceca3245856
--- /dev/null
+++ b/include/linux/mloader.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Ludovic Barre <ludovic.barre@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _MLOADER_H_
+#define _MLOADER_H_
+
+/* not use in ioctl-number.txt */
+#define ML_IO_NUMBER 0xFE
+
+#define ML_UPLOAD _IO(ML_IO_NUMBER, 1)
+#define ML_GET_NBIMAGES _IOR(ML_IO_NUMBER, 2, int)
+#define ML_GET_DUMPINFO _IOR(ML_IO_NUMBER, 3, struct dump_image*)
+
+#define MAX_NAME 16
+
+struct dump_image {
+ char name[MAX_NAME];
+ unsigned int offset;
+ unsigned int size;
+};
+
+#endif /* _MLOADER_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index ee2b0363c04..37f995176bb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -318,6 +318,11 @@ struct mmc_host {
int detect_change; /* card detect flag */
struct mmc_hotplug hotplug;
+ struct delayed_work resume; /* deferred resume work */
+ unsigned int pm_state; /* used for deferred resume */
+#define MMC_HOST_DEFERRED_RESUME (1 << 0)
+#define MMC_HOST_NEEDS_RESUME (1 << 1)
+
const struct mmc_bus_ops *bus_ops; /* current bus driver */
unsigned int bus_refs; /* reference counter */
@@ -366,6 +371,7 @@ static inline void *mmc_priv(struct mmc_host *host)
extern int mmc_suspend_host(struct mmc_host *);
extern int mmc_resume_host(struct mmc_host *);
+extern void mmc_resume_host_sync(struct mmc_host *);
extern int mmc_power_save_host(struct mmc_host *host);
extern int mmc_power_restore_host(struct mmc_host *host);
@@ -464,4 +470,15 @@ static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
return host->ios.clock;
}
#endif
+
+static inline int mmc_host_deferred_resume(struct mmc_host *host)
+{
+ return host->pm_state & MMC_HOST_DEFERRED_RESUME;
+}
+
+static inline int mmc_host_needs_resume(struct mmc_host *host)
+{
+ return host->pm_state & MMC_HOST_NEEDS_RESUME;
+}
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/modem/m6718_spi/modem_char.h b/include/linux/modem/m6718_spi/modem_char.h
new file mode 100644
index 00000000000..04d82eaa03c
--- /dev/null
+++ b/include/linux/modem/m6718_spi/modem_char.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_driver.h
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver char interface header.
+ */
+#ifndef _MODEM_CHAR_H_
+#define _MODEM_CHAR_H_
+
+#include <linux/modem/m6718_spi/modem_driver.h>
+
+int modem_isa_init(struct modem_spi_dev *modem_spi_dev);
+void modem_isa_exit(struct modem_spi_dev *modem_spi_dev);
+
+int modem_isa_queue_msg(struct message_queue *q, u32 size);
+int modem_isa_msg_size(struct message_queue *q);
+int modem_isa_unqueue_msg(struct message_queue *q);
+void modem_isa_reset(struct modem_spi_dev *modem_spi_dev);
+int modem_get_cdev_index(u8 l2_header);
+int modem_get_cdev_l2header(u8 idx);
+
+#endif /* _MODEM_CHAR_H_ */
diff --git a/include/linux/modem/m6718_spi/modem_driver.h b/include/linux/modem/m6718_spi/modem_driver.h
new file mode 100644
index 00000000000..f3aae4a7116
--- /dev/null
+++ b/include/linux/modem/m6718_spi/modem_driver.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_driver.h
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC driver interface header.
+ */
+#ifndef _MODEM_DRIVER_H_
+#define _MODEM_DRIVER_H_
+
+#include <linux/device.h>
+#include <linux/modem/modem.h>
+#include <linux/cdev.h>
+#include <linux/spi/spi.h>
+
+
+/* driver L2 mux channels */
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+#define MODEM_M6718_SPI_MAX_CHANNELS (9)
+#else
+#define MODEM_M6718_SPI_MAX_CHANNELS (3)
+#endif
+
+#define MODEM_M6718_SPI_CHN_ISI (0)
+/*#define MODEM_M6718_SPI_CHN_RPC (1) not supported */
+#define MODEM_M6718_SPI_CHN_AUDIO (2)
+/*#define MODEM_M6718_SPI_CHN_SECURITY (3) not supported */
+/* (4) not supported */
+#ifdef CONFIG_MODEM_M6718_SPI_ENABLE_FEATURE_LOOPBACK
+#define MODEM_M6718_SPI_CHN_MASTER_LOOPBACK0 (5)
+#define MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK0 (6)
+#define MODEM_M6718_SPI_CHN_MASTER_LOOPBACK1 (7)
+#define MODEM_M6718_SPI_CHN_SLAVE_LOOPBACK1 (8)
+#endif
+
+/**
+ * struct queue_element - information to add an element to queue
+ * @entry: list entry
+ * @offset: message offset
+ * @size: message size
+ * @no: total number of messages
+ */
+struct queue_element {
+ struct list_head entry;
+ u32 offset;
+ u32 size;
+ u32 no;
+};
+
+/**
+ * struct message_queue - ISI, RPC, AUDIO, SECURITY message queue information
+ * @channel: L2 mux channel served by this queue
+ * @fifo_base: pointer to the respective fifo base
+ * @size: size of the data to be read
+ * @free: free space in the queue
+ * @readptr: fifo read pointer
+ * @writeptr: fifo write pointer
+ * @no: total number of messages
+ * @update_lock: spinlock for protecting the queue read operation
+ * @q_rp: queue read pointer is valid
+ * @wq_readable: wait queue head
+ * @msg_list: message list
+ * @modem_spi_dev: pointer to modem device information structure
+ */
+struct message_queue {
+ u8 channel;
+ u8 *fifo_base;
+ u32 size;
+ u32 free;
+ u32 readptr;
+ u32 writeptr;
+ u32 no;
+ spinlock_t update_lock;
+ atomic_t q_rp;
+ wait_queue_head_t wq_readable;
+ struct list_head msg_list;
+ struct modem_spi_dev *modem_spi_dev;
+};
+
+/**
+ * struct isa_device_context - modem char interface device information
+ * @dl_queue: structre to store the queue related info
+ * @device_id: channel id (ISI, AUDIO, RPC, ...)
+ * @addr: device address
+ */
+struct isa_device_context {
+ struct message_queue dl_queue;
+ u8 device_id;
+ void *addr;
+};
+
+/**
+ * struct isa_driver_context - modem char interface driver information
+ * @is_open: flag to check the usage of queue
+ * @isadev: pointer to struct t_isadev_context
+ * @common_tx_lock: spinlock for protecting common channel
+ * @audio_tx_mutex: mutex for protecting audio channel
+ * @cdev: character device structre
+ * @modem_class: pointer to the class structure
+ */
+struct isa_driver_context {
+ atomic_t is_open[MODEM_M6718_SPI_MAX_CHANNELS];
+ struct isa_device_context *isadev;
+ spinlock_t common_tx_lock;
+ struct mutex audio_tx_mutex;
+ struct cdev cdev;
+ struct class *modem_class;
+};
+
+/**
+ * struct modem_spi_dev - modem device information
+ * @dev pointer to device
+ * @ndev pointer to net_device interface
+ * @modem pointer to registered modem structure
+ * @isa_context pointer to char device interface
+ * @netdev_flag_up: flag to indicate up/down of network device
+ * @msr_flag: flag to indicate modem-silent-reset is in progress
+ */
+struct modem_spi_dev {
+ struct device *dev;
+ struct net_device *ndev;
+ struct modem *modem;
+ struct isa_driver_context *isa_context;
+ int netdev_flag_up;
+ bool msr_flag;
+};
+
+/**
+ * struct modem_m6718_spi_link_gpio - gpio configuration for an IPC link
+ * @ss_pin: pins to use for slave-select
+ * @ss_active: active level for slave-select pin
+ * @int_pin: pin to use for slave-int (ready)
+ * @int_active: active level for slave-int
+ */
+struct modem_m6718_spi_link_gpio {
+ int ss_pin;
+ int ss_active;
+ int int_pin;
+ int int_active;
+};
+
+/**
+ * struct modem_m6718_spi_link_platform_data - IPC link data
+ * @id: link id
+ * @gpio: link gpio configuration
+ * @name: link name (to appear in debugfs)
+ */
+struct modem_m6718_spi_link_platform_data {
+ int id;
+ struct modem_m6718_spi_link_gpio gpio;
+#ifdef CONFIG_DEBUG_FS
+ const char *name;
+#endif
+};
+
+int modem_m6718_spi_receive(struct spi_device *sdev, u8 channel,
+ u32 len, void *data);
+int modem_m6718_spi_send(struct modem_spi_dev *modem_spi_dev, u8 channel,
+ u32 len, void *data);
+bool modem_m6718_spi_is_boot_done(void);
+
+#endif /* _MODEM_DRIVER_H_ */
diff --git a/include/linux/modem/m6718_spi/modem_net.h b/include/linux/modem/m6718_spi/modem_net.h
new file mode 100644
index 00000000000..521103bf006
--- /dev/null
+++ b/include/linux/modem/m6718_spi/modem_net.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Chris Blair <chris.blair@stericsson.com> for ST-Ericsson
+ * based on shrm_net.h
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Modem IPC net device interface header.
+ */
+#ifndef _MODEM_NET_H_
+#define _MODEM_NET_H_
+
+#include <linux/modem/m6718_spi/modem_driver.h>
+
+#define MODEM_HLEN (1)
+#define PHONET_ALEN (1)
+
+#define PN_PIPE (0xD9)
+#define PN_DEV_HOST (0x00)
+#define PN_LINK_ADDR (0x26)
+#define PN_TX_QUEUE_LEN (3)
+
+#define RESOURCE_ID_INDEX (3)
+#define SRC_OBJ_INDEX (7)
+#define MSG_ID_INDEX (9)
+#define PIPE_HDL_INDEX (10)
+#define NETLINK_MODEM (20)
+
+/**
+ * struct modem_spi_net_dev - modem net interface device information
+ * @modem_spi_dev: pointer to the modem spi device information structure
+ * @iface_num: flag used to indicate the up/down of netdev
+ */
+struct modem_spi_net_dev {
+ struct modem_spi_dev *modem_spi_dev;
+ unsigned int iface_num;
+};
+
+int modem_net_init(struct modem_spi_dev *modem_spi_dev);
+void modem_net_exit(struct modem_spi_dev *modem_spi_dev);
+
+int modem_net_receive(struct net_device *dev);
+int modem_net_suspend(struct net_device *dev);
+int modem_net_resume(struct net_device *dev);
+int modem_net_start(struct net_device *dev);
+int modem_net_restart(struct net_device *dev);
+int modem_net_stop(struct net_device *dev);
+
+#endif /* _MODEM_NET_H_ */
diff --git a/include/linux/modem/modem.h b/include/linux/modem/modem.h
new file mode 100644
index 00000000000..c9614a9b061
--- /dev/null
+++ b/include/linux/modem/modem.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+ *
+ * Heavily adapted from Regulator framework
+ */
+#ifndef __MODEM_H__
+#define __MODEM_H__
+
+#include <linux/device.h>
+
+struct modem_dev;
+
+struct modem_ops {
+ void (*request)(struct modem_dev *);
+ void (*release)(struct modem_dev *);
+ int (*is_requested)(struct modem_dev *);
+};
+
+struct modem_desc {
+ const char *name;
+ int id;
+ struct modem_ops *ops;
+ struct module *owner;
+};
+
+struct modem_dev {
+ struct modem_desc *desc;
+ int use_count;
+ int open_count;
+ int exclusive;
+
+ struct list_head modem_list;
+
+ struct list_head client_list;
+
+ struct blocking_notifier_head notifier;
+ struct mutex mutex;
+ struct module *owner;
+ struct device dev;
+ void *modem_data;
+};
+
+#ifdef CONFIG_MODEM
+struct modem_dev *modem_register(struct modem_desc *modem_desc,
+ struct device *dev,
+ void *driver_data);
+void modem_unregister(struct modem_dev *mdev);
+
+#else
+static inline struct modem_dev *modem_register(struct modem_desc *modem_desc,
+ struct device *dev, void *driver_data)
+{
+ return NULL;
+}
+
+static inline void modem_unregister(struct modem_dev *mdev)
+{
+}
+#endif
+#endif /* __MODEM_H__ */
diff --git a/include/linux/modem/modem_client.h b/include/linux/modem/modem_client.h
new file mode 100644
index 00000000000..21f04798490
--- /dev/null
+++ b/include/linux/modem/modem_client.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+ *
+ * Heavily adapted from Regulator framework
+ */
+#ifndef __MODEM_CLIENT_H__
+#define __MODEM_CLIENT_H__
+
+#include <linux/device.h>
+
+struct modem;
+
+#ifdef CONFIG_MODEM
+struct modem *modem_get(struct device *dev, const char *id);
+void modem_put(struct modem *modem);
+void modem_request(struct modem *modem);
+void modem_release(struct modem *modem);
+int modem_is_requested(struct modem *modem);
+int modem_get_usage(struct modem *modem);
+
+#else
+
+static inline struct modem *modem_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline void modem_put(struct modem *modem)
+{
+}
+
+static inline void modem_request(struct modem *modem)
+{
+}
+
+static inline void modem_release(struct modem *modem)
+{
+}
+
+static inline int modem_is_requested(struct modem *modem)
+{
+ return 0;
+}
+
+static inline int modem_get_usage(struct modem *modem)
+{
+ return 0;
+}
+#endif
+#endif /* __MODEM_CLIENT_H__ */
diff --git a/include/linux/modem/shrm/shrm.h b/include/linux/modem/shrm/shrm.h
new file mode 100644
index 00000000000..6deeeb16ba8
--- /dev/null
+++ b/include/linux/modem/shrm/shrm.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __SHM_DRIVER_IF_H__
+#define __SHM_DRIVER_IF_H__
+
+#include <linux/device.h>
+
+/* forward declaration */
+struct shrm_dev;
+
+typedef void (*rx_cb)(void *data, unsigned int length);
+typedef void (*received_msg_handler)(unsigned char l2_header,
+ void *msg_ptr, unsigned int length,
+ struct shrm_dev *shrm);
+
+#endif
diff --git a/include/linux/modem/shrm/shrm_config.h b/include/linux/modem/shrm/shrm_config.h
new file mode 100644
index 00000000000..a82b35ef77b
--- /dev/null
+++ b/include/linux/modem/shrm/shrm_config.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __SHRM_CONFIG_H
+#define __SHRM_CONFIG_H
+
+
+/*
+Note: modem need to define IPC as a non-cacheable area.
+In Cortex R4 MPU requires that base address of NC area is aligned on a
+region-sized boundary.On modem side, only 1 NC area can be defined, hence
+the whole IPC area must be defined as NC (at least).
+
+*/
+
+/* cache line size = 32bytes*/
+#define SHM_CACHE_LINE 32
+#define SHM_PTR_SIZE 4
+
+/* FIFO 0 address configuration */
+/* ---------------------------- */
+/* 128KB */
+#define SHM_FIFO_0_SIZE (128*1024)
+
+
+/* == APE addresses == */
+#ifdef CONFIG_SHRM_V1_UPDATES_VERSION
+#define SHM_IPC_BASE_AMCU 0x06F80000
+#else
+#define SHM_IPC_BASE_AMCU 0x06000000
+#endif
+
+/* offset pointers */
+#define SHM_ACFIFO_0_WRITE_AMCU SHM_IPC_BASE_AMCU
+#define SHM_ACFIFO_0_READ_AMCU (SHM_ACFIFO_0_WRITE_AMCU + SHM_PTR_SIZE)
+#define SHM_CAFIFO_0_WRITE_AMCU (SHM_ACFIFO_0_WRITE_AMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_0_READ_AMCU (SHM_CAFIFO_0_WRITE_AMCU + SHM_PTR_SIZE)
+/* FIFO start */
+#define SHM_ACFIFO_0_START_AMCU (SHM_CAFIFO_0_WRITE_AMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_0_START_AMCU (SHM_ACFIFO_0_START_AMCU + SHM_FIFO_0_SIZE)
+
+
+/* == CMT addresses ==*/
+#define SHM_IPC_BASE_CMCU (SHM_IPC_BASE_AMCU+0x08000000)
+/* offset pointers */
+#define SHM_ACFIFO_0_WRITE_CMCU SHM_IPC_BASE_CMCU
+#define SHM_ACFIFO_0_READ_CMCU (SHM_ACFIFO_0_WRITE_CMCU + SHM_PTR_SIZE)
+#define SHM_CAFIFO_0_WRITE_CMCU (SHM_ACFIFO_0_WRITE_CMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_0_READ_CMCU (SHM_CAFIFO_0_WRITE_CMCU + SHM_PTR_SIZE)
+/* FIFO*/
+#define SHM_ACFIFO_0_START_CMCU (SHM_CAFIFO_0_WRITE_CMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_0_START_CMCU (SHM_ACFIFO_0_START_CMCU + SHM_FIFO_0_SIZE)
+
+
+/* ADSP addresses*/
+#define SHM_ACFIFO_0_START_ADSP 0x0
+#define SHM_CAFIFO_0_START_ADSP 0x0
+#define SHM_ACFIFO_0_WRITE_ADSP 0x0
+#define SHM_ACFIFO_0_READ_ADSP 0x0
+#define SHM_CAFIFO_0_WRITE_ADSP 0x0
+#define SHM_CAFIFO_0_READ_ADSP 0x0
+
+/* FIFO 1 address configuration */
+/* ---------------------------- */
+
+
+/* FIFO 1 - 4K */
+#define SHM_FIFO_1_SIZE (4*1024)
+
+
+/* == APE addresses == */
+#define SHM_ACFIFO_1_WRITE_AMCU (SHM_CAFIFO_0_START_AMCU + SHM_FIFO_0_SIZE)
+#define SHM_ACFIFO_1_READ_AMCU (SHM_ACFIFO_1_WRITE_AMCU + SHM_PTR_SIZE)
+#define SHM_CAFIFO_1_WRITE_AMCU (SHM_ACFIFO_1_WRITE_AMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_1_READ_AMCU (SHM_CAFIFO_1_WRITE_AMCU + SHM_PTR_SIZE)
+/* FIFO*/
+#define SHM_ACFIFO_1_START_AMCU (SHM_CAFIFO_1_WRITE_AMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_1_START_AMCU (SHM_ACFIFO_1_START_AMCU + SHM_FIFO_1_SIZE)
+
+
+/* == CMT addresses ==*/
+#define SHM_ACFIFO_1_WRITE_CMCU (SHM_CAFIFO_0_START_CMCU + SHM_FIFO_0_SIZE)
+#define SHM_ACFIFO_1_READ_CMCU (SHM_ACFIFO_1_WRITE_CMCU + SHM_PTR_SIZE)
+#define SHM_CAFIFO_1_WRITE_CMCU (SHM_ACFIFO_1_WRITE_CMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_1_READ_CMCU (SHM_CAFIFO_1_WRITE_CMCU + SHM_PTR_SIZE)
+/* FIFO1 start */
+#define SHM_ACFIFO_1_START_CMCU (SHM_CAFIFO_1_WRITE_CMCU + SHM_CACHE_LINE)
+#define SHM_CAFIFO_1_START_CMCU (SHM_ACFIFO_1_START_CMCU + SHM_FIFO_1_SIZE)
+
+
+/* ADSP addresses*/
+#define SHM_ACFIFO_1_START_ADSP 0x0
+#define SHM_CAFIFO_1_START_ADSP 0x0
+#define SHM_ACFIFO_1_WRITE_ADSP 0x0
+#define SHM_ACFIFO_1_READ_ADSP 0x0
+#define SHM_CAFIFO_1_WRITE_ADSP 0x0
+#define SHM_CAFIFO_1_READ_ADSP 0x0
+
+
+#define U8500_SHM_FIFO_APE_COMMON_BASE (SHM_ACFIFO_0_START_AMCU)
+#define U8500_SHM_FIFO_CMT_COMMON_BASE (SHM_CAFIFO_0_START_AMCU)
+#define U8500_SHM_FIFO_APE_AUDIO_BASE (SHM_ACFIFO_1_START_AMCU)
+#define U8500_SHM_FIFO_CMT_AUDIO_BASE (SHM_CAFIFO_1_START_AMCU)
+
+#endif /* __SHRM_CONFIG_H */
diff --git a/include/linux/modem/shrm/shrm_driver.h b/include/linux/modem/shrm/shrm_driver.h
new file mode 100644
index 00000000000..96b5c594d34
--- /dev/null
+++ b/include/linux/modem/shrm/shrm_driver.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __SHRM_DRIVER_H__
+#define __SHRM_DRIVER_H__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/modem/modem_client.h>
+#include <linux/modem/shrm/shrm.h>
+#include <linux/cdev.h>
+#include <linux/kthread.h>
+
+#define ISA_DEVICES 8
+
+#define BOOT_INIT (0)
+#define BOOT_INFO_SYNC (1)
+#define BOOT_DONE (2)
+#define BOOT_UNKNOWN (3)
+
+/**
+ * struct shrm_dev - shrm device information
+ * @ca_wake_irq: CMT wake interrupt number
+ * @ac_read_notif_0_irq: ape-cmt common channel read notify interrupt
+ * @ac_read_notif_1_irq: ape-cmt audio channel read notify interrupt
+ * @ca_msg_pending_notif_0_irq: cmt-ape common channel msg pending interrupt
+ * @ca_msg_pending_notif_1_irq: cmt-ape audio channel msg pending interrupt
+ * @intr_base: interrupt base register address
+ * @ape_common_fifo_base: ape side common channel fifo base addr
+ * @ape_audio_fifo_base: ape side audio channel fifo base addr
+ * @cmt_common_fifo_base: cmt side common channel fifo base addr
+ * @cmt_audio_fifo_base: cmt side audio channel fifo base addr
+ * @ape_common_fifo_base_phy: physical addr of ape common fifo
+ * @ape_audio_fifo_base_phy: physical addr of ape audio fifo
+ * @cmt_common_fifo_base_phy: physical addr of cmt common fifo
+ * @cmt_audio_fifo_base_phy: physical addr of cmt audio fifo
+ * @ape_common_fifo_size: ape side common channel fifo size
+ * @ape_audio_fifo_size: ape side audio channel fifo size
+ * @cmt_common_fifo_size: cmt side common channel fifo size
+ * @cmt_audio_fifo_size: cmt side audio channel fifo size
+ * @netdev_flag_up: flag to indicate up/down of netwok device
+ * @msr_flag: flag to check on-going MSR sequence
+ * @ac_common_shared_wptr: ape-cmt common channel write pointer
+ * @ac_common_shared_rptr: ape-cmt common channel read pointer
+ * @ca_common_shared_wptr: cmt-ape common channel write pointer
+ * @ca_common_shared_rptr: cmt-ape common channel read pointer
+ * @ac_audio_shared_wptr: ape-cmt audio channel write pointer
+ * @ac_audio_shared_rptr: ape-cmt audio channel read pointer
+ * @ca_audio_shared_wptr: cmt-ape audio channel write pointer
+ * @ca_audio_shared_rptr: cmt-ape audio channel read pointer
+ * @dev: pointer to the driver device
+ * @ndev: pointer to the network device structure
+ * @modem: poiner to struct modem
+ * @isa_context: pointer to t_isa_driver_sontext dtructure
+ * @shm_common_ch_wr_kw: kthread worker for writing to common channel
+ * @shm_common_ch_wr_kw_task: task for writing to common channel
+ * @shm_audio_ch_wr_kw: kthread worker for writing to audio channel
+ * @shm_audio_ch_wr_kw_task: task for writing to audio channel
+ * @shm_ac_wake_kw: kthread worker for receiving ape-cmt wake requests
+ * @shm_ac_wake_kw_task: task for receiving ape-cmt wake requests
+ * @shm_ca_wake_kw: kthread worker for receiving cmt-ape wake requests
+ * @shm_ca_wake_kw_task: task for receiving cmt-ape wake requests
+ * @shm_ac_sleep_kw: kthread worker for recieving ape-cmt sleep requests
+ * @shm_ac_sleep_kw_task: task for recieving ape-cmt sleep requests
+ * @send_ac_msg_pend_notify_0: work for handling pending message on common
+ * channel
+ * @send_ac_msg_pend_notify_1: work for handling pending message on audio
+ * channel
+ * @shm_ac_wake_req: work to send ape-cmt wake request
+ * @shm_ca_wake_req: work to send cmt-ape wake request
+ * @shm_ca_sleep_req: work to send cmt-ape sleep request
+ * @shm_ac_sleep_req: work to send ape-cmt sleep request
+ * @shm_mod_reset_req: work to send a reset request to modem
+ */
+struct shrm_dev {
+ u8 ca_wake_irq;
+ u8 ac_read_notif_0_irq;
+ u8 ac_read_notif_1_irq;
+ u8 ca_msg_pending_notif_0_irq;
+ u8 ca_msg_pending_notif_1_irq;
+ void __iomem *intr_base;
+ void __iomem *ape_common_fifo_base;
+ void __iomem *ape_audio_fifo_base;
+ void __iomem *cmt_common_fifo_base;
+ void __iomem *cmt_audio_fifo_base;
+
+ u32 *ape_common_fifo_base_phy;
+ u32 *ape_audio_fifo_base_phy;
+ u32 *cmt_common_fifo_base_phy;
+ u32 *cmt_audio_fifo_base_phy;
+
+ int ape_common_fifo_size;
+ int ape_audio_fifo_size;
+ int cmt_common_fifo_size;
+ int cmt_audio_fifo_size;
+ int netdev_flag_up;
+ int msr_flag;
+
+ void __iomem *ac_common_shared_wptr;
+ void __iomem *ac_common_shared_rptr;
+ void __iomem *ca_common_shared_wptr;
+ void __iomem *ca_common_shared_rptr;
+
+ void __iomem *ac_audio_shared_wptr;
+ void __iomem *ac_audio_shared_rptr;
+ void __iomem *ca_audio_shared_wptr;
+ void __iomem *ca_audio_shared_rptr;
+
+ struct device *dev;
+ struct net_device *ndev;
+ struct modem *modem;
+ struct isa_driver_context *isa_context;
+ struct kthread_worker shm_common_ch_wr_kw;
+ struct task_struct *shm_common_ch_wr_kw_task;
+ struct kthread_worker shm_audio_ch_wr_kw;
+ struct task_struct *shm_audio_ch_wr_kw_task;
+ struct kthread_worker shm_ac_wake_kw;
+ struct task_struct *shm_ac_wake_kw_task;
+ struct kthread_worker shm_ca_wake_kw;
+ struct task_struct *shm_ca_wake_kw_task;
+ struct kthread_worker shm_ac_sleep_kw;
+ struct task_struct *shm_ac_sleep_kw_task;
+ struct kthread_work send_ac_msg_pend_notify_0;
+ struct kthread_work send_ac_msg_pend_notify_1;
+ struct kthread_work shm_ac_wake_req;
+ struct kthread_work shm_ca_wake_req;
+ struct kthread_work shm_ca_sleep_req;
+ struct kthread_work shm_ac_sleep_req;
+ struct kthread_work shm_mod_reset_req;
+};
+
+/**
+ * struct queue_element - information to add an element to queue
+ * @entry: list entry
+ * @offset: message offset
+ * @size: message size
+ * @no: total number of messages
+ */
+struct queue_element {
+ struct list_head entry;
+ u32 offset;
+ u32 size;
+ u32 no;
+};
+
+/**
+ * struct message_queue - ISI, RPC, AUDIO, SECURITY message queue information
+ * @fifo_base: pointer to the respective fifo base
+ * @size: size of the data to be read
+ * @readptr: fifo read pointer
+ * @writeptr: fifo write pointer
+ * @no: total number of messages
+ * @update_lock: spinlock for protecting the queue read operation
+ * @q_rp: queue write pointer
+ * @wq_readable: wait queue head
+ * @msg_list: message list
+ * @shrm: pointer to shrm device information structure
+ */
+struct message_queue {
+ u8 *fifo_base;
+ u32 size;
+ u32 readptr;
+ u32 writeptr;
+ u32 no;
+ spinlock_t update_lock;
+ atomic_t q_rp;
+ wait_queue_head_t wq_readable;
+ struct list_head msg_list;
+ struct shrm_dev *shrm;
+};
+
+/**
+ * struct isadev_context - shrm char interface context
+ * @dl_queue: structre to store the queue related info
+ * @device_id: message id(ISI, RPC, AUDIO, SECURITY)
+ * @addr: device addresses.
+ */
+struct isadev_context {
+ struct message_queue dl_queue;
+ u8 device_id;
+ void *addr;
+};
+
+/**
+ * struct isa_driver_context - shrm char interface device information
+ * @is_open: flag to check the usage of queue
+ * @isadev: pointer to struct t_isadev_context
+ * @common_tx: spinlock for protecting common channel
+ * @tx_audio_mutex: mutex for protecting audio channel
+ * @cdev: character device structre
+ * @shm_class: pointer to the class structure
+ */
+struct isa_driver_context {
+ atomic_t is_open[ISA_DEVICES];
+ struct isadev_context *isadev;
+ spinlock_t common_tx;
+ struct mutex tx_audio_mutex;
+ struct cdev cdev;
+ struct class *shm_class;
+};
+
+#endif
diff --git a/include/linux/modem/shrm/shrm_net.h b/include/linux/modem/shrm/shrm_net.h
new file mode 100644
index 00000000000..a97b276ee15
--- /dev/null
+++ b/include/linux/modem/shrm/shrm_net.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009
+ *
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __SHRM_NET_H
+#define __SHRM_NET_H
+
+#define SHRM_HLEN 1
+#define PHONET_ALEN 1
+
+#define PN_PIPE 0xD9
+#define PN_DEV_HOST 0x00
+#define PN_LINK_ADDR 0x26
+#define PN_TX_QUEUE_LEN 3
+
+#define RESOURCE_ID_INDEX 3
+#define SRC_OBJ_INDEX 7
+#define MSG_ID_INDEX 9
+#define PIPE_HDL_INDEX 10
+#define NETLINK_SHRM 20
+
+/**
+ * struct shrm_net_iface_priv - shrm net interface device information
+ * @shrm_device: pointer to the shrm device information structure
+ * @iface_num: flag used to indicate the up/down of netdev
+ */
+struct shrm_net_iface_priv {
+ struct shrm_dev *shrm_device;
+ unsigned int iface_num;
+};
+
+int shrm_register_netdev(struct shrm_dev *shrm_dev_data);
+int shrm_net_receive(struct net_device *dev);
+int shrm_suspend_netdev(struct net_device *dev);
+int shrm_resume_netdev(struct net_device *dev);
+int shrm_stop_netdev(struct net_device *dev);
+int shrm_restart_netdev(struct net_device *dev);
+int shrm_start_netdev(struct net_device *dev);
+void shrm_unregister_netdev(struct shrm_dev *shrm_dev_data);
+
+#endif /* __SHRM_NET_H */
diff --git a/include/linux/modem/shrm/shrm_private.h b/include/linux/modem/shrm/shrm_private.h
new file mode 100644
index 00000000000..23caabf5a06
--- /dev/null
+++ b/include/linux/modem/shrm/shrm_private.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson
+ * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson
+ * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __SHRM_PRIVATE_INCLUDED
+#define __SHRM_PRIVATE_INCLUDED
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/modem/shrm/shrm.h>
+
+#define GOP_OUTPUT_REGISTER_BASE (0x0)
+#define GOP_SET_REGISTER_BASE (0x4)
+#define GOP_CLEAR_REGISTER_BASE (0x8)
+#define GOP_TOGGLE_REGISTER_BASE (0xc)
+
+
+#define GOP_AUDIO_AC_READ_NOTIFICATION_BIT (0)
+#define GOP_AUDIO_CA_MSG_PENDING_NOTIFICATION_BIT (1)
+#define GOP_COMMON_AC_READ_NOTIFICATION_BIT (2)
+#define GOP_COMMON_CA_MSG_PENDING_NOTIFICATION_BIT (3)
+#define GOP_CA_WAKE_REQ_BIT (7)
+#define GOP_AUDIO_CA_READ_NOTIFICATION_BIT (23)
+#define GOP_AUDIO_AC_MSG_PENDING_NOTIFICATION_BIT (24)
+#define GOP_COMMON_CA_READ_NOTIFICATION_BIT (25)
+#define GOP_COMMON_AC_MSG_PENDING_NOTIFICATION_BIT (26)
+#define GOP_CA_WAKE_ACK_BIT (27)
+
+#define L2_MSG_MAPID_OFFSET (24)
+#define L1_MSG_MAPID_OFFSET (28)
+
+#define SHRM_SLEEP_STATE (0)
+#define SHRM_PTR_FREE (1)
+#define SHRM_PTR_BUSY (2)
+#define SHRM_IDLE (3)
+
+#define ISI_MESSAGING (0)
+#define RPC_MESSAGING (1)
+#define AUDIO_MESSAGING (2)
+#define SECURITY_MESSAGING (3)
+#define COMMON_LOOPBACK_MESSAGING (0xC0)
+#define AUDIO_LOOPBACK_MESSAGING (0x80)
+#define CIQ_MESSAGING (0xC3)
+#define RTC_CAL_MESSAGING (0xC8)
+
+#define COMMON_CHANNEL 0
+#define AUDIO_CHANNEL 1
+
+typedef void (*MSG_PENDING_NOTIF)(const u32 Wptr);
+
+/**
+ * struct fifo_write_params - parameters used for FIFO write operation.
+ * @writer_local_rptr: pointer to local read buffer
+ * @writer_local_wptr: pointer to local write buffer
+ * @shared_wptr: write pointer shared by cmt and ape
+ * @shared_rptr: read pointer shared by cmt and ape
+ * @availablesize: available memory in fifo
+ * @end_addr_fifo: fifo end addr
+ * @fifo_virtual_addr: fifo virtual addr
+ * @fifo_update_lock: spin lock to update fifo.
+ *
+ * On writting a message to FIFO the same has to be read by the modem before
+ * writing the next message to the FIFO. In oder to over come this a local
+ * write and read pointer is used for internal purpose.
+ */
+struct fifo_write_params {
+ u32 writer_local_rptr;
+ u32 writer_local_wptr;
+ u32 shared_wptr;
+ u32 shared_rptr;
+ u32 availablesize;
+ u32 end_addr_fifo;
+ u32 *fifo_virtual_addr;
+ spinlock_t fifo_update_lock;
+} ;
+
+/**
+ * struct fifo_read_params - parameters used for FIFO read operation
+ * @reader_local_rptr: pointer to local read buffer
+ * @reader_local_wptr: pointer to local write buffer
+ * @shared_wptr: write pointer shared by cmt and ape
+ * @shared_rptr: read pointer shared by cmt and ape
+ * @availablesize: available memory in fifo
+ * @end_addr_fifo: fifo end add
+ * @fifo_virtual_addr: fifo virtual addr
+ */
+struct fifo_read_params{
+ u32 reader_local_rptr;
+ u32 reader_local_wptr;
+ u32 shared_wptr;
+ u32 shared_rptr;
+ u32 availablesize;
+ u32 end_addr_fifo;
+ u32 *fifo_virtual_addr;
+
+} ;
+
+int shrm_protocol_init(struct shrm_dev *shrm,
+ received_msg_handler common_rx_handler,
+ received_msg_handler audio_rx_handler);
+void shrm_protocol_deinit(struct shrm_dev *shrm);
+void shm_fifo_init(struct shrm_dev *shrm);
+int shm_write_msg_to_fifo(struct shrm_dev *shrm, u8 channel,
+ u8 l2header, void *addr, u32 length);
+int shm_write_msg(struct shrm_dev *shrm,
+ u8 l2_header, void *addr, u32 length);
+
+u8 is_the_only_one_unread_message(struct shrm_dev *shrm,
+ u8 channel, u32 length);
+u8 read_remaining_messages_common(void);
+u8 read_remaining_messages_audio(void);
+u8 read_one_l2msg_audio(struct shrm_dev *shrm,
+ u8 *p_l2_msg, u32 *p_len);
+u8 read_one_l2msg_common(struct shrm_dev *shrm,
+ u8 *p_l2_msg, u32 *p_len);
+void receive_messages_common(struct shrm_dev *shrm);
+void receive_messages_audio(struct shrm_dev *shrm);
+
+void update_ac_common_local_rptr(struct shrm_dev *shrm);
+void update_ac_audio_local_rptr(struct shrm_dev *shrm);
+void update_ca_common_local_wptr(struct shrm_dev *shrm);
+void update_ca_audio_local_wptr(struct shrm_dev *shrm);
+void update_ac_common_shared_wptr(struct shrm_dev *shrm);
+void update_ac_audio_shared_wptr(struct shrm_dev *shrm);
+void update_ca_common_shared_rptr(struct shrm_dev *shrm);
+void update_ca_audio_shared_rptr(struct shrm_dev *shrm);
+
+
+void get_writer_pointers(u8 msg_type, u32 *WriterLocalRptr, \
+ u32 *WriterLocalWptr, u32 *SharedWptr);
+void get_reader_pointers(u8 msg_type, u32 *ReaderLocalRptr, \
+ u32 *ReaderLocalWptr, u32 *SharedRptr);
+u8 read_boot_info_req(struct shrm_dev *shrm,
+ u32 *pConfig,
+ u32 *pVersion);
+void write_boot_info_resp(struct shrm_dev *shrm, u32 Config,
+ u32 Version);
+
+void send_ac_msg_pending_notification_0(struct shrm_dev *shrm);
+void send_ac_msg_pending_notification_1(struct shrm_dev *shrm);
+void ca_msg_read_notification_0(struct shrm_dev *shrm);
+void ca_msg_read_notification_1(struct shrm_dev *shrm);
+
+void set_ca_msg_0_read_notif_send(u8 val);
+u8 get_ca_msg_0_read_notif_send(void);
+void set_ca_msg_1_read_notif_send(u8 val);
+u8 get_ca_msg_1_read_notif_send(void);
+
+irqreturn_t ca_wake_irq_handler(int irq, void *ctrlr);
+irqreturn_t ac_read_notif_0_irq_handler(int irq, void *ctrlr);
+irqreturn_t ac_read_notif_1_irq_handler(int irq, void *ctrlr);
+irqreturn_t ca_msg_pending_notif_0_irq_handler(int irq, void *ctrlr);
+irqreturn_t ca_msg_pending_notif_1_irq_handler(int irq, void *ctrlr);
+
+void shm_ca_msgpending_0_tasklet(unsigned long);
+void shm_ca_msgpending_1_tasklet(unsigned long);
+void shm_ac_read_notif_0_tasklet(unsigned long);
+void shm_ac_read_notif_1_tasklet(unsigned long);
+void shm_ca_wake_req_tasklet(unsigned long);
+
+u8 get_boot_state(void);
+
+int get_ca_wake_req_state(void);
+
+/* shrm character interface */
+int isa_init(struct shrm_dev *shrm);
+void isa_exit(struct shrm_dev *shrm);
+int add_msg_to_queue(struct message_queue *q, u32 size);
+ssize_t isa_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *ppos);
+int get_size_of_new_msg(struct message_queue *q);
+int remove_msg_from_queue(struct message_queue *q);
+void shrm_char_reset_queues(struct shrm_dev *shrm);
+int shrm_get_cdev_index(u8 l2_header);
+int shrm_get_cdev_l2header(u8 idx);
+
+#endif
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index c47f4d60db0..e05b8f5a766 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -169,7 +169,7 @@ struct kparam_array
/* We don't get oldget: it's often a new-style param_get_uint, etc. */
static inline int
-__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
+__check_old_set_param(int (*oldset)(const char *, const struct kernel_param *))
{
return 0;
}
diff --git a/include/linux/regulator/ab5500.h b/include/linux/regulator/ab5500.h
new file mode 100644
index 00000000000..b5a8dec0be6
--- /dev/null
+++ b/include/linux/regulator/ab5500.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __LINUX_REGULATOR_AB5500_H
+#define __LINUX_REGULATOR_AB5500_H
+
+enum ab5500_regulator_id {
+ AB5500_LDO_G,
+ AB5500_LDO_H,
+ AB5500_LDO_K,
+ AB5500_LDO_L,
+ AB5500_LDO_VDIGMIC,
+ AB5500_LDO_SIM,
+ AB5500_BIAS2,
+ AB5500_NUM_REGULATORS,
+};
+
+struct regulator_init_data;
+
+struct ab5500_regulator_data {
+ bool off_is_lowpower;
+};
+
+struct ab5500_regulator_platform_data {
+ struct regulator_init_data *regulator;
+ struct ab5500_regulator_data *data;
+ int num_regulator;
+};
+
+#endif
diff --git a/include/linux/regulator/ab8500-debug.h b/include/linux/regulator/ab8500-debug.h
new file mode 100644
index 00000000000..01655fc7fc1
--- /dev/null
+++ b/include/linux/regulator/ab8500-debug.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __LINUX_MFD_AB8500_REGULATOR_DEBUG_H
+#define __LINUX_MFD_AB8500_REGULATOR_DEBUG_H
+
+#ifdef CONFIG_REGULATOR_AB8500_DEBUG
+/* AB8500 debug force/restore functions */
+void ab8500_regulator_debug_force(void);
+void ab8500_regulator_debug_restore(void);
+#else
+static inline void ab8500_regulator_debug_force(void) {}
+static inline void ab8500_regulator_debug_restore(void) {}
+#endif
+
+#endif
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index 76579f964a2..1c721867919 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -10,6 +10,8 @@
#ifndef __LINUX_MFD_AB8500_REGULATOR_H
#define __LINUX_MFD_AB8500_REGULATOR_H
+#include <linux/platform_device.h>
+
/* AB8500 regulators */
enum ab8500_regulator_id {
AB8500_LDO_AUX1,
@@ -17,25 +19,28 @@ enum ab8500_regulator_id {
AB8500_LDO_AUX3,
AB8500_LDO_INTCORE,
AB8500_LDO_TVOUT,
- AB8500_LDO_USB,
AB8500_LDO_AUDIO,
AB8500_LDO_ANAMIC1,
AB8500_LDO_ANAMIC2,
AB8500_LDO_DMIC,
AB8500_LDO_ANA,
+ AB8500_SYSCLKREQ_2,
+ AB8500_SYSCLKREQ_4,
AB8500_NUM_REGULATORS,
};
/* AB8500 register initialization */
struct ab8500_regulator_reg_init {
int id;
+ u8 mask;
u8 value;
};
-#define INIT_REGULATOR_REGISTER(_id, _value) \
- { \
- .id = _id, \
- .value = _value, \
+#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
+ { \
+ .id = _id, \
+ .mask = _mask, \
+ .value = _value, \
}
/* AB8500 registers */
@@ -67,8 +72,38 @@ enum ab8500_regulator_reg {
AB8500_REGUCTRL2SPARE,
AB8500_REGUCTRLDISCH,
AB8500_REGUCTRLDISCH2,
- AB8500_VSMPS1SEL1,
AB8500_NUM_REGULATOR_REGISTERS,
};
+/* AB8500 external regulators */
+struct ab8500_ext_regulator_cfg {
+ bool hwreq; /* requires hw mode or high power mode */
+};
+
+enum ab8500_ext_regulator_id {
+ AB8500_EXT_SUPPLY1,
+ AB8500_EXT_SUPPLY2,
+ AB8500_EXT_SUPPLY3,
+ AB8500_NUM_EXT_REGULATORS,
+};
+
+/* AB8500 regulator platform data */
+struct ab8500_regulator_platform_data {
+ int num_reg_init;
+ struct ab8500_regulator_reg_init *reg_init;
+ int num_regulator;
+ struct regulator_init_data *regulator;
+ int num_ext_regulator;
+ struct regulator_init_data *ext_regulator;
+};
+
+/* AB8500 external regulator functions (internal) */
+#ifdef CONFIG_REGULATOR_AB8500_EXT
+__devinit int ab8500_ext_regulator_init(struct platform_device *pdev);
+__devexit int ab8500_ext_regulator_exit(struct platform_device *pdev);
+#else
+inline __devinit int ab8500_ext_regulator_init(struct platform_device *pdev) {}
+inline __devexit int ab8500_ext_regulator_exit(struct platform_device *pdev) {}
+#endif
+
#endif
diff --git a/include/linux/regulator/db5500-prcmu.h b/include/linux/regulator/db5500-prcmu.h
new file mode 100644
index 00000000000..fee68795867
--- /dev/null
+++ b/include/linux/regulator/db5500-prcmu.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * Interface to power domain regulators on DB5500
+ */
+
+#ifndef __DB5500_REGULATOR_H__
+#define __DB5500_REGULATOR_H__
+
+#include <linux/regulator/dbx500-prcmu.h>
+
+/* Number of DB5500 regulators and regulator enumeration */
+enum db5500_regulator_id {
+ DB5500_REGULATOR_VAPE,
+ DB5500_REGULATOR_SWITCH_SGA,
+ DB5500_REGULATOR_SWITCH_HVA,
+ DB5500_REGULATOR_SWITCH_SIA,
+ DB5500_REGULATOR_SWITCH_DISP,
+ DB5500_REGULATOR_SWITCH_ESRAM12,
+ DB5500_NUM_REGULATORS
+};
+
+#endif
diff --git a/include/linux/regulator/dbx500-prcmu.h b/include/linux/regulator/dbx500-prcmu.h
new file mode 100644
index 00000000000..2ecb34c56aa
--- /dev/null
+++ b/include/linux/regulator/dbx500-prcmu.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) ST Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+#ifndef __LINUX_REGULATOR_DBX500_H
+#define __LINUX_REGULATOR_DBX500_H
+
+struct ux500_regulator;
+
+#ifdef CONFIG_REGULATOR
+/*
+ * NOTE! The device will be connected to the correct regulator by this
+ * new framework. A list with connections will match up dev_name(dev)
+ * to the specific regulator. This follows the same principle as the
+ * normal regulator framework.
+ *
+ * This framework shall only be used in special cases when a regulator
+ * has to be enabled/disabled in atomic context.
+ */
+
+/**
+ * ux500_regulator_get()
+ *
+ * @dev: Drivers device struct
+ *
+ * Returns a ux500_regulator struct. Shall be used as argument for
+ * ux500_regulator_atomic_enable/disable calls.
+ * Return ERR_PTR(-EINVAL) upon no matching regulator found.
+ */
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev);
+
+/**
+ * ux500_regulator_atomic_enable()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ *
+ * The enable/disable functions keep an internal counter, so every
+ * enable must be paired with an disable in order to turn off regulator.
+ */
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator);
+
+/**
+ * ux500_regulator_atomic_disable()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ *
+ */
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator);
+
+/**
+ * ux500_regulator_put()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ */
+void ux500_regulator_put(struct ux500_regulator *regulator);
+
+#else
+
+static inline struct ux500_regulator *__must_check
+ux500_regulator_get(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int
+ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline int
+ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+}
+#endif /* CONFIG_REGULATOR */
+
+#ifdef CONFIG_REGULATOR_DEBUG
+void ux500_regulator_suspend_debug(void);
+void ux500_regulator_resume_debug(void);
+#else
+static inline void ux500_regulator_suspend_debug(void) { }
+static inline void ux500_regulator_resume_debug(void) { }
+#endif
+
+#endif
diff --git a/include/linux/spi/stm_msp.h b/include/linux/spi/stm_msp.h
new file mode 100644
index 00000000000..501023105cb
--- /dev/null
+++ b/include/linux/spi/stm_msp.h
@@ -0,0 +1,126 @@
+/*
+ * include/linux/spi/stm_msp.h
+ *
+ * Copyright (C) 2010 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _STM_MSP_H
+#define _STM_MSP_H
+
+#include <linux/device.h>
+
+/* CHIP select/deselect commands */
+enum spi_chip_select {
+ SPI_CHIP_SELECT,
+ SPI_CHIP_DESELECT
+};
+
+/* Common configuration for different SPI controllers */
+enum spi_loopback {
+ SPI_LOOPBACK_DISABLED,
+ SPI_LOOPBACK_ENABLED
+};
+
+enum spi_hierarchy {
+ SPI_MASTER,
+ SPI_SLAVE
+};
+
+/* Endianess of FIFO Data */
+enum spi_fifo_endian {
+ SPI_FIFO_MSB,
+ SPI_FIFO_LSB
+};
+
+/* SPI mode of operation (Communication modes) */
+enum spi_mode {
+ SPI_INTERRUPT_TRANSFER,
+ SPI_POLLING_TRANSFER,
+};
+
+enum msp_data_size {
+ MSP_DATA_BITS_DEFAULT = -1,
+ MSP_DATA_BITS_8 = 0x00,
+ MSP_DATA_BITS_10,
+ MSP_DATA_BITS_12,
+ MSP_DATA_BITS_14,
+ MSP_DATA_BITS_16,
+ MSP_DATA_BITS_20,
+ MSP_DATA_BITS_24,
+ MSP_DATA_BITS_32,
+};
+
+enum msp_clk_src {
+ MSP_INTERNAL_CLK = 0x0,
+ MSP_EXTERNAL_CLK,
+};
+
+struct msp_clock_params {
+ enum msp_clk_src clk_src;
+ /* value from 0 to 1023 */
+ u16 sckdiv;
+ /* Used only when MSPSCK clocks the sample rate
+ * generator (SCKSEL = 1Xb):
+ * 0b: The rising edge of MSPSCK clocks the sample rate generator
+ * 1b: The falling edge of MSPSCK clocks the sample rate generator */
+ int sckpol;
+};
+
+/* Motorola SPI protocol specific definitions */
+enum spi_clk_phase {
+ SPI_CLK_ZERO_CYCLE_DELAY = 0x0, /* Receive data on rising edge. */
+ SPI_CLK_HALF_CYCLE_DELAY /* Receive data on falling edge. */
+};
+
+/* SPI Clock Polarity */
+enum spi_clk_pol {
+ SPI_CLK_POL_IDLE_LOW, /* Low inactive level */
+ SPI_CLK_POL_IDLE_HIGH /* High inactive level */
+};
+
+struct motorola_spi_proto_params {
+ enum spi_clk_phase clk_phase;
+ enum spi_clk_pol clk_pol;
+};
+
+struct stm_msp_config_chip {
+ struct device *dev;
+ enum spi_loopback lbm;
+ enum spi_hierarchy hierarchy;
+ enum spi_fifo_endian endian_rx;
+ enum spi_fifo_endian endian_tx;
+ enum spi_mode com_mode;
+ enum msp_data_size data_size;
+ struct msp_clock_params clk_freq;
+ int spi_burst_mode_enable;
+ struct motorola_spi_proto_params proto_params;
+ u32 freq;
+ void (*cs_control)(u32 control);
+};
+
+/**
+ * struct stm_msp_controller - device.platform_data for SPI controller devices.
+ *
+ * @num_chipselect: chipselects are used to distinguish individual
+ * SPI slaves, and are numbered from zero to num_chipselects - 1.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
+ */
+struct stm_msp_controller {
+ u8 num_chipselect;
+ u32 id;
+ u32 base_addr;
+ char *device_name;
+};
+#endif /* _STM_MSP_H */
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
new file mode 100644
index 00000000000..05e5529a6aa
--- /dev/null
+++ b/include/linux/sys_soc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin-nonst@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#ifndef __SYS_SOC_H
+#define __SYS_SOC_H
+
+#include <linux/kobject.h>
+
+/**
+ * struct sys_soc_info - SoC exports related informations
+ * @name: name of the export
+ * @info: pointer on the key to export
+ * @get_info: callback to retrieve key if info field is NULL
+ * @attr: export's sysdev class attribute
+ */
+struct sysfs_soc_info {
+ const char *info;
+ ssize_t (*get_info)(char *buf, struct sysfs_soc_info *);
+ struct kobj_attribute attr;
+};
+
+ssize_t show_soc_info(struct kobject *, struct kobj_attribute *, char *);
+
+#define SYSFS_SOC_ATTR_VALUE(_name, _value) { \
+ .attr.attr.name = _name, \
+ .attr.attr.mode = S_IRUGO, \
+ .attr.show = show_soc_info, \
+ .info = _value, \
+}
+
+#define SYSFS_SOC_ATTR_CALLBACK(_name, _callback) { \
+ .attr.attr.name = _name, \
+ .attr.attr.mode = S_IRUGO, \
+ .attr.show = show_soc_info, \
+ .get_info = _callback, \
+}
+
+/**
+ * register_sys_soc - register the soc information
+ * @name: name of the machine
+ * @info: pointer on the info table to export
+ * @num: number of info to export
+ *
+ * NOTE: This function must only be called once
+ */
+int register_sysfs_soc(struct sysfs_soc_info *info, size_t num);
+
+#endif /* __SYS_SOC_H */
diff --git a/include/linux/tee.h b/include/linux/tee.h
new file mode 100644
index 00000000000..c0f8f11d58d
--- /dev/null
+++ b/include/linux/tee.h
@@ -0,0 +1,314 @@
+/*
+ * Trusted Execution Environment (TEE) interface for TrustZone enabled ARM CPUs.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com>
+ * Author: Martin Hovang <martin.xm.hovang@stericsson.com>
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef TEE_H
+#define TEE_H
+
+/* tee_cmd id values */
+#define TEED_OPEN_SESSION 0x00000000U
+#define TEED_CLOSE_SESSION 0x00000001U
+#define TEED_INVOKE 0x00000002U
+
+/* tee_retval id values */
+#define TEED_SUCCESS 0x00000000U
+#define TEED_ERROR_GENERIC 0xFFFF0000U
+#define TEED_ERROR_ACCESS_DENIED 0xFFFF0001U
+#define TEED_ERROR_CANCEL 0xFFFF0002U
+#define TEED_ERROR_ACCESS_CONFLICT 0xFFFF0003U
+#define TEED_ERROR_EXCESS_DATA 0xFFFF0004U
+#define TEED_ERROR_BAD_FORMAT 0xFFFF0005U
+#define TEED_ERROR_BAD_PARAMETERS 0xFFFF0006U
+#define TEED_ERROR_BAD_STATE 0xFFFF0007U
+#define TEED_ERROR_ITEM_NOT_FOUND 0xFFFF0008U
+#define TEED_ERROR_NOT_IMPLEMENTED 0xFFFF0009U
+#define TEED_ERROR_NOT_SUPPORTED 0xFFFF000AU
+#define TEED_ERROR_NO_DATA 0xFFFF000BU
+#define TEED_ERROR_OUT_OF_MEMORY 0xFFFF000CU
+#define TEED_ERROR_BUSY 0xFFFF000DU
+#define TEED_ERROR_COMMUNICATION 0xFFFF000EU
+#define TEED_ERROR_SECURITY 0xFFFF000FU
+#define TEED_ERROR_SHORT_BUFFER 0xFFFF0010U
+
+/* TEE origin codes */
+#define TEED_ORIGIN_DRIVER 0x00000002U
+#define TEED_ORIGIN_TEE 0x00000003U
+#define TEED_ORIGIN_TEE_APPLICATION 0x00000004U
+
+#define TEE_UUID_CLOCK_SIZE 8
+
+#define TEEC_CONFIG_PAYLOAD_REF_COUNT 4
+
+/*
+ * Flag constants indicating which of the memory references in an open session
+ * or invoke command operation payload (TEEC_Operation) that are used.
+ */
+#define TEEC_MEMREF_0_USED 0x00000001
+#define TEEC_MEMREF_1_USED 0x00000002
+#define TEEC_MEMREF_2_USED 0x00000004
+#define TEEC_MEMREF_3_USED 0x00000008
+
+/*
+ * Flag constants indicating the data transfer direction of memory in
+ * TEEC_SharedMemory and TEEC_MemoryReference. TEEC_MEM_INPUT signifies data
+ * transfer direction from the client application to the TEE. TEEC_MEM_OUTPUT
+ * signifies data transfer direction from the TEE to the client application.
+ */
+#define TEEC_MEM_INPUT 0x00000001
+#define TEEC_MEM_OUTPUT 0x00000002
+
+/*
+ * Session login methods, for use in TEEC_OpenSession() as parameter
+ * connectionMethod. Type is t_uint32.
+ *
+ * TEEC_LOGIN_PUBLIC No login data is provided.
+ */
+#define TEEC_LOGIN_PUBLIC 0x0
+
+/*
+ * Exposed functions (command_id) in the static TA
+ */
+#define TEE_STA_GET_PRODUCT_CONFIG 10
+#define TEE_STA_SET_L2CC_PREFETCH_CTRL_REGISTER 11
+
+/* Flags indicating run-time environment */
+#define TEE_RT_FLAGS_NORMAL 0x00000000
+#define TEE_RT_FLAGS_MASK_ITP_PROD 0x00000001
+#define TEE_RT_FLAGS_MODEM_DEBUG 0x00000002
+#define TEE_RT_FLAGS_RNG_REG_PUBLIC 0x00000004
+#define TEE_RT_FLAGS_JTAG_ENABLED 0x00000008
+
+/*
+ * Product id numbers
+ */
+#define TEE_PRODUCT_ID_UNKNOWN 0
+#define TEE_PRODUCT_ID_8400 1
+#define TEE_PRODUCT_ID_8500 2
+#define TEE_PRODUCT_ID_9500 3
+#define TEE_PRODUCT_ID_5500 4
+#define TEE_PRODUCT_ID_7400 5
+#define TEE_PRODUCT_ID_8500C 6
+
+/* Flags indicating fuses */
+#define TEE_FUSE_FLAGS_MODEM_DISABLE 0x00000001
+
+/**
+ * struct tee_product_config - System configuration structure.
+ *
+ * @product_id: Product identification.
+ * @rt_flags: Runtime configuration flags.
+ * @fuse_flags: Fuse flags.
+ *
+ */
+struct tee_product_config {
+ uint32_t product_id;
+ uint32_t rt_flags;
+ uint32_t fuse_flags;
+};
+
+/**
+ * struct tee_uuid - Structure that represent an uuid.
+ * @timeLow: The low field of the time stamp.
+ * @timeMid: The middle field of the time stamp.
+ * @timeHiAndVersion: The high field of the timestamp multiplexed
+ * with the version number.
+ * @clockSeqAndNode: The clock sequence and the node.
+ *
+ * This structure have different naming (camel case) to comply with Global
+ * Platforms TEE Client API spec. This type is defined in RFC4122.
+ */
+struct tee_uuid {
+ uint32_t timeLow;
+ uint16_t timeMid;
+ uint16_t timeHiAndVersion;
+ uint8_t clockSeqAndNode[TEE_UUID_CLOCK_SIZE];
+};
+
+/**
+ * struct tee_sharedmemory - Shared memory block for TEE.
+ * @buffer: The in/out data to TEE.
+ * @size: The size of the data.
+ * @flags: Variable telling whether it is a in, out or in/out parameter.
+ */
+struct tee_sharedmemory {
+ void *buffer;
+ size_t size;
+ uint32_t flags;
+};
+
+/**
+ * struct tee_operation - Payload for sessions or invoke operation.
+ * @shm: Array containing the shared memory buffers.
+ * @flags: Tells which if memory buffers that are in use.
+ */
+struct tee_operation {
+ struct tee_sharedmemory shm[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+ uint32_t flags;
+};
+
+struct tee_context {};
+
+/**
+ * struct tee_session - The session of an open tee device.
+ * @state: The current state in the linux kernel.
+ * @err: Error code (as in Global Platform TEE Client API spec)
+ * @origin: Origin for the error code (also from spec).
+ * @id: Implementation defined type, 0 if not used.
+ * @vaddr: Virtual address for the memrefs.
+ * @ta: The trusted application.
+ * @uuid: The uuid for the trusted application.
+ * @cmd: The command to be executed in the trusted application.
+ * @driver_cmd: The command type in the driver. This is used from a client (user
+ * space to tell the Linux kernel whether it's a open-,
+ * close-session or if it is an invoke command.
+ * @ta_size: The size of the trusted application.
+ * @op: The payload for the trusted application.
+ * @sync: Mutex to handle multiple use of clients.
+ *
+ * This structure is mainly used in the Linux kernel as a session context for
+ * ongoing operations. Other than that it is also used in the communication with
+ * the user space.
+ */
+struct tee_session {
+ uint32_t state;
+ uint32_t err;
+ uint32_t origin;
+ uint32_t id;
+ uint32_t *vaddr[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+ void *ta;
+ struct tee_uuid *uuid;
+ unsigned int cmd;
+ unsigned int driver_cmd;
+ unsigned int ta_size;
+ struct tee_operation *op;
+ struct mutex *sync;
+};
+
+/**
+ * struct tee_read - Contains the error message and the origin.
+ * @err: Error code (as in Global Platform TEE Client API spec)
+ * @origin: Origin for the error code (also from spec).
+ *
+ * This is used by user space when a user space application wants to get more
+ * information about an error.
+ */
+struct tee_read {
+ unsigned int err; /* return value */
+ unsigned int origin; /* error origin */
+};
+
+/**
+ * Function that handles the function calls to trusted applications.
+ * @param ts: The session of a operation to be executed.
+ * @param sec_cmd: The type of command to be executed, open-, close-session,
+ * invoke command.
+ */
+int call_sec_world(struct tee_session *ts, int sec_cmd);
+
+
+/**
+ * teec_initialize_context() - Initializes a context holding connection
+ * information on the specific TEE.
+ * @param name: A zero-terminated string identifying the TEE to connect to.
+ * If name is set to NULL, the default TEE is connected to.
+ * NULL is the only supported value in this version of the
+ * API implementation.
+ * @param context: The context structure which is to be initialized.
+ *
+ * Initializes a context holding connection information between the calling
+ * client application and the TEE designated by the name string.
+ */
+int teec_initialize_context(const char *name, struct tee_context *context);
+
+/**
+ * teec_finalize_context() - Destroys a context holding connection information
+ * on the specific TEE.
+ * @param context: The context to be destroyed.
+ *
+ * This function destroys an initialized TEE context, closing the connection
+ * between the client application and the TEE. This function must only be
+ * called when all sessions related to this TEE context have been closed and
+ * all shared memory blocks have been released.
+ */
+int teec_finalize_context(struct tee_context *context);
+
+/**
+ * teec_open_session() - Opens a new session with the specified trusted
+ * application.
+ * @param context: The initialized TEE context structure in which scope to
+ * open the session.
+ * @param session: The session to initialize.
+ * @param destination: A structure identifying the trusted application with
+ * which to open a session. If this is set to NULL the
+ * operation TEEC_MEMREF_0 is expected to contain the blob
+ * which holds the Trusted Application.
+ * @param connection_method: The connection method to use.
+ * @param connection_data: Any data necessary to connect with the chosen
+ * connection method. Not supported should be set to
+ * NULL.
+ * @param operation: An operation structure to use in the session. May be
+ * set to NULL to signify no operation structure needed.
+ * If destination is set to NULL, TEEC_MEMREF_0 is
+ * expected to hold the TA binary as described above.
+ * @param error_origin: A parameter which will hold the error origin if this
+ * function returns any value other than TEEC_SUCCESS.
+ *
+ * Opens a new session with the specified trusted application. Only
+ * connectionMethod == TEEC_LOGIN_PUBLIC is supported. connectionData and
+ * operation shall be set to NULL.
+ */
+int teec_open_session(struct tee_context *context, struct tee_session *session,
+ const struct tee_uuid *destination,
+ unsigned int connection_method,
+ void *connection_data, struct tee_operation *operation,
+ unsigned int *error_origin);
+
+/**
+ * teec_close_session() - Closes the session which has been opened with the
+ * specific trusted application.
+ * @param session: The opened session to close.
+ *
+ * Closes the session which has been opened with the specific trusted
+ * application.
+ */
+int teec_close_session(struct tee_session *session);
+
+/**
+ * teec_invoke_command() - Executes a command in the specified trusted
+ * application.
+ * @param destination: A structure identifying the trusted application.
+ * @param command_id: Identifier of the command in the trusted application to
+ * invoke.
+ * @param operation: An operation structure to use in the invoke command. May
+ * be set to NULL to signify no operation structure needed.
+ * @param error_origin: A parameter which will hold the error origin if this
+ * function returns any value other than TEEC_SUCCESS.
+ *
+ * Executes a command in the specified trusted application.
+ */
+int teec_invoke_command(struct tee_session *session, unsigned int command_id,
+ struct tee_operation *operation,
+ unsigned int *error_origin);
+
+/**
+ * teec_allocate_shared_memory() - Allocate shared memory for TEE.
+ * @param context: The initialized TEE context structure in which scope to
+ * open the session.
+ * @param shared_memory: Pointer to the allocated shared memory.
+ */
+int teec_allocate_shared_memory(struct tee_context *context,
+ struct tee_sharedmemory *shared_memory);
+
+/**
+ * teec_release_shared_memory() - Free the shared memory.
+ * @param shared_memory: Pointer to the shared memory to be freed.
+ */
+void teec_release_shared_memory(struct tee_sharedmemory *shared_memory);
+
+#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 69d845739bc..25c1cbf75d8 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -372,7 +372,15 @@ struct usb_bus {
* limit. Because the arrays need to add a bit for hub status data, we
* do 31, so plus one evens out to four bytes.
*/
+
+#ifdef CONFIG_ARCH_U8500
+/**
+* On U8500 platform we support 16 ports only
+*/
+#define USB_MAXCHILDREN (16)
+#else
#define USB_MAXCHILDREN (31)
+#endif
struct usb_tt;
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index da653b5c713..e5c900a3d94 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -476,6 +476,8 @@ struct usb_gadget_ops {
int (*start)(struct usb_gadget_driver *,
int (*bind)(struct usb_gadget *));
int (*stop)(struct usb_gadget_driver *);
+ struct usb_ep* (*configure_ep)(struct usb_gadget *, u8 type,
+ struct usb_endpoint_descriptor *);
};
/**
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index d87f44f5b04..010b40d1b52 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -41,6 +41,9 @@ enum usb_xceiv_events {
USB_EVENT_ID, /* id was grounded */
USB_EVENT_CHARGER, /* usb dedicated charger */
USB_EVENT_ENUMERATED, /* gadget driver enumerated */
+ USB_EVENT_RIDA,
+ USB_EVENT_RIDB,
+ USB_EVENT_RIDC,
};
struct otg_transceiver;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 5e11f8a1f86..1db39e15a1a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1629,6 +1629,64 @@ enum v4l2_mpeg_mfc51_video_force_frame_type {
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_MPEG_MFC51_BASE+53)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_MPEG_MFC51_BASE+54)
+/* Private Base control IDs specific to the CG2900 FM driver as defined by V4L2 */
+#define V4L2_CID_CG2900_RADIO_PRIVATE_BASE (V4L2_CID_PRIVATE_BASE | 0x1000)
+#define V4L2_CID_CG2900_RADIO_BANDSCAN (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+1)
+enum v4l2_cg2900_radio_bandscan {
+ V4L2_CG2900_RADIO_BANDSCAN_START = 0,
+ V4L2_CG2900_RADIO_BANDSCAN_STOP = 1,
+};
+#define V4L2_CID_CG2900_RADIO_BANDSCAN_GET_RESULTS (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+2)
+#define V4L2_CID_CG2900_RADIO_BLOCKSCAN_START (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+3)
+#define V4L2_CID_CG2900_RADIO_BLOCKSCAN_GET_RESULTS (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+4)
+#define V4L2_CID_CG2900_RADIO_CHIP_STATE (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+5)
+enum v4l2_cg2900_radio_chip_state {
+ V4L2_CG2900_RADIO_STANDBY = 0,
+ V4L2_CG2900_RADIO_POWERUP = 1,
+};
+#define V4L2_CID_CG2900_RADIO_RSSI_THRESHOLD (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+6)
+#define V4L2_CID_CG2900_RADIO_SELECT_ANTENNA (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+7)
+enum v4l2_cg2900_radio_select_antenna {
+ V4L2_CG2900_RADIO_EMBEDDED_ANTENNA = 0,
+ V4L2_CG2900_RADIO_WIRED_ANTENNA = 1,
+};
+#define V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_START (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+8)
+#define V4L2_CID_CG2900_RADIO_RDS_AF_UPDATE_GET_RESULT (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+9)
+#define V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_START (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+10)
+#define V4L2_CID_CG2900_RADIO_RDS_AF_SWITCH_GET_RESULT (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+11)
+#define V4L2_CID_CG2900_RADIO_TEST_TONE_GENERATOR_SET_STATUS (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+12)
+enum v4l2_cg2900_radio_test_tone_generator_set_status {
+ V4L2_CG2900_RADIO_TEST_TONE_GEN_OFF = 0,
+ V4L2_CG2900_RADIO_TEST_TONE_GEN_ON_W_SRC = 1,
+ V4L2_CG2900_RADIO_TEST_TONE_GENERATOR_ON_WO_SRC = 2,
+};
+#define V4L2_CID_CG2900_RADIO_TEST_TONE_CONNECT (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+13)
+enum v4l2_cg2900_radio_test_tone_generator {
+ V4L2_CG2900_RADIO_TEST_TONE_NORMAL_AUDIO = 0,
+ V4L2_CG2900_RADIO_TEST_TONE_ZERO = 1,
+ V4L2_CG2900_RADIO_TEST_TONE_TONE_1 = 2,
+ V4L2_CG2900_RADIO_TEST_TONE_TONE_2 = 3,
+ V4L2_CG2900_RADIO_TEST_TONE_TONE_SUM = 4,
+};
+#define V4L2_CID_CG2900_RADIO_TEST_TONE_SET_PARAMS (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+14)
+#define V4L2_CID_CG2900_RADIO_TUNE_DEEMPHASIS (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+15)
+enum v4l2_cg2900_radio_deemphasis {
+ V4L2_CG2900_RADIO_DEEMPHASIS_DISABLED = 0,
+ V4L2_CG2900_RADIO_DEEMPHASIS_50_uS = 1,
+ V4L2_CG2900_RADIO_DEEMPHASIS_75_uS = 2,
+};
+#define V4L2_CID_CG2900_RADIO_GET_INTERRUPT (V4L2_CID_CG2900_RADIO_PRIVATE_BASE+16)
+enum v4l2_cg2900_radio_interrupt {
+ V4L2_CG2900_RADIO_INTERRUPT_UNKNOWN = 0,
+ V4L2_CG2900_RADIO_INTERRUPT_SEARCH_COMPLETED = 1,
+ V4L2_CG2900_RADIO_INTERRUPT_BAND_SCAN_COMPLETED = 2,
+ V4L2_CG2900_RADIO_INTERRUPT_BLOCK_SCAN_COMPLETED = 3,
+ V4L2_CG2900_RADIO_INTERRUPT_SCAN_CANCELLED = 4,
+ V4L2_CG2900_RADIO_INTERRUPT_MONO_STEREO_TRANSITION = 5,
+ V4L2_CG2900_RADIO_INTERRUPT_DEVICE_RESET = 6,
+ V4L2_CG2900_RADIO_INTERRUPT_RDS_RECEIVED = 7
+};
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 4a82ca0bb0b..dda8bb18420 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -116,6 +116,18 @@ int bt_printk(const char *level, const char *fmt, ...);
#define BT_ERR(fmt, arg...) bt_printk(KERN_ERR, pr_fmt(fmt), ##arg)
#define BT_DBG(fmt, arg...) pr_debug(fmt "\n", ##arg)
+#define BT_SCO_PARAMETERS 8
+struct bt_sco_parameters {
+ __u32 tx_bandwidth;
+ __u32 rx_bandwidth;
+ __u16 max_latency;
+ __u16 voice_setting;
+ __u8 retrans_effort;
+ __u16 pkt_type;
+} __packed;
+
+#define BT_NO_AUTORETRY 9
+
/* Connection and socket states */
enum {
BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 00596e816b4..9aa8301a71f 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -167,6 +167,8 @@ enum {
#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define ALL_ESCO_MASK (SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
+ EDR_ESCO_MASK)
/* ACL flags */
#define ACL_START_NO_FLUSH 0x00
@@ -430,6 +432,21 @@ struct hci_cp_setup_sync_conn {
__le16 pkt_type;
} __packed;
+/* Air coding format types */
+#define HCI_SYNC_AIR_CODING_CVSD 0x00
+#define HCI_SYNC_AIR_CODING_ULAW 0x01
+#define HCI_SYNC_AIR_CODING_ALAW 0x02
+#define HCI_SYNC_AIR_CODING_TRANSPARENT 0x03
+
+/* Max latency constants */
+#define HCI_SYNC_MAX_LATENCY_DONTCARE 0xffff
+
+/* Retransmission effort constants */
+#define HCI_SYNC_RETRANS_EFFORT_NO 0x00
+#define HCI_SYNC_RETRANS_EFFORT_POWER 0x01
+#define HCI_SYNC_RETRANS_EFFORT_QUALITY 0x02
+#define HCI_SYNC_RETRANS_EFFORT_DONTCARE 0xff
+
#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
struct hci_cp_accept_sync_conn_req {
bdaddr_t bdaddr;
@@ -1377,6 +1394,7 @@ struct hci_conn_info_req {
struct hci_auth_info_req {
bdaddr_t bdaddr;
__u8 type;
+ __u8 sec_level;
};
struct hci_inquiry_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 453893b3120..733d5bb6b1e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -272,6 +272,7 @@ struct hci_conn {
__u8 type;
__u8 out;
__u8 attempt;
+ __u8 no_autoretry;
__u8 dev_class[3];
__u8 features[8];
__u8 ssp_mode;
@@ -311,6 +312,8 @@ struct hci_conn {
void *sco_data;
void *smp_conn;
+ struct bt_sco_parameters *sco_parameters;
+
struct hci_conn *link;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
@@ -515,7 +518,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle);
void hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+ bdaddr_t *dst);
int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
@@ -525,7 +529,8 @@ int hci_chan_del(struct hci_chan *chan);
void hci_chan_list_flush(struct hci_conn *conn);
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u8 sec_level, __u8 auth_type);
+ __u8 sec_level, __u8 auth_type,
+ struct bt_sco_parameters *sco_parameters);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1e35c43657c..e6b5a94f7e8 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -72,7 +72,9 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
- __u32 flags;
+ struct bt_sco_parameters param;
+ __u8 no_autoretry;
+
struct sco_conn *conn;
};
diff --git a/include/sound/ux500_ab8500.h b/include/sound/ux500_ab8500.h
new file mode 100644
index 00000000000..7858bfdb4fa
--- /dev/null
+++ b/include/sound/ux500_ab8500.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_AB8500_H
+#define UX500_AB8500_H
+
+extern struct snd_soc_ops ux500_ab8500_ops[];
+
+struct snd_soc_pcm_runtime;
+
+int ux500_ab8500_startup(struct snd_pcm_substream *substream);
+
+void ux500_ab8500_shutdown(struct snd_pcm_substream *substream);
+
+int ux500_ab8500_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+
+int ux500_ab8500_soc_machine_drv_init(void);
+
+void ux500_ab8500_soc_machine_drv_cleanup(void);
+
+int ux500_ab8500_machine_codec_init(struct snd_soc_pcm_runtime *runtime);
+
+extern void ux500_ab8500_jack_report(int);
+
+#endif
diff --git a/include/sound/ux500_ab8500_ext.h b/include/sound/ux500_ab8500_ext.h
new file mode 100644
index 00000000000..1cc9a74585c
--- /dev/null
+++ b/include/sound/ux500_ab8500_ext.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_AB8500_EXT_H
+#define UX500_AB8500_EXT_H
+
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+
+int ux500_ab8500_audio_gpadc_measure(struct ab8500_gpadc *gpadc,
+ u8 channel, bool mode, int *value);
+
+#endif
diff --git a/include/trace/Kbuild b/include/trace/Kbuild
new file mode 100644
index 00000000000..7e8b704d610
--- /dev/null
+++ b/include/trace/Kbuild
@@ -0,0 +1 @@
+header-y += stm.h
diff --git a/include/trace/stm.h b/include/trace/stm.h
new file mode 100644
index 00000000000..de3ed1dc381
--- /dev/null
+++ b/include/trace/stm.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson STM Trace driver
+ *
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Philippe Langlais <philippe.langlais@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef STM_H
+#define STM_H
+
+#include <linux/types.h>
+
+#define STM_DEV_NAME "stm"
+
+/* One single channel mapping */
+struct stm_channel {
+ union {
+ __u8 no_stamp8;
+ __u16 no_stamp16;
+ __u32 no_stamp32;
+ __u64 no_stamp64;
+ };
+ union {
+ __u8 stamp8;
+ __u16 stamp16;
+ __u32 stamp32;
+ __u64 stamp64;
+ };
+};
+
+/* Possible trace modes */
+#define STM_SW_LOSSLESS 0 /* Software mode: lossless data but intrusive */
+#define STM_HW_LOSSY 1 /* Hardware mode: lossy data but less intrusive */
+
+/* Possible clock setting */
+enum clock_div {
+ STM_CLOCK_DIV2 = 0,
+ STM_CLOCK_DIV4,
+ STM_CLOCK_DIV6,
+ STM_CLOCK_DIV8,
+ STM_CLOCK_DIV10,
+ STM_CLOCK_DIV12,
+ STM_CLOCK_DIV14,
+ STM_CLOCK_DIV16,
+};
+
+/* ioctl commands */
+#define STM_CONNECTION _IOW('t', 0, enum stm_connection_type)
+#define STM_DISABLE _IO('t', 1)
+#define STM_GET_NB_MAX_CHANNELS _IOR('t', 2, int)
+#define STM_GET_NB_FREE_CHANNELS _IOR('t', 3, int)
+#define STM_GET_CHANNEL_NO _IOR('t', 4, int)
+#define STM_SET_CLOCK_DIV _IOW('t', 5, enum clock_div)
+#define STM_GET_CTRL_REG _IOR('t', 6, int)
+#define STM_ENABLE_SRC _IOWR('t', 7, int)
+#define STM_GET_FREE_CHANNEL _IOW('t', 8, int)
+#define STM_RELEASE_CHANNEL _IOW('t', 9, int)
+#define STM_SET_MODE _IOWR('t', 10, int)
+#define STM_GET_MODE _IOR('t', 11, int)
+
+enum stm_connection_type {
+ STM_DISCONNECT = 0,
+ STM_DEFAULT_CONNECTION = 1,
+ STM_STE_MODEM_ON_MIPI34_NONE_ON_MIPI60 = 2,
+ STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60 = 3,
+ STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60 = 4,
+ STM_STE_MODEM_ON_MICROSD = 5,
+ STM_STE_APE_ON_MICROSD = 6,
+ STM_STE_INVALID_CONNECTION = 0xff
+};
+
+#ifdef __KERNEL__
+
+struct stm_platform_data {
+ u32 regs_phys_base;
+ u32 channels_phys_base;
+ u32 id_mask;
+ u32 masters_enabled;
+ const s16 *channels_reserved;
+ int channels_reserved_sz;
+ int (*stm_connection)(enum stm_connection_type);
+};
+
+/* Channels base address */
+extern volatile struct stm_channel __iomem *stm_channels;
+
+/* Provides stm_trace_XX() and stm_tracet_XX() trace API */
+#define DEFLLTFUN(size) \
+static inline void stm_trace_##size(int channel, __u##size data) \
+{ \
+ stm_channels[channel].no_stamp##size = data; \
+} \
+static inline void stm_tracet_##size(int channel, __u##size data) \
+{ \
+ stm_channels[channel].stamp##size = data; \
+} \
+
+DEFLLTFUN(8);
+DEFLLTFUN(16);
+DEFLLTFUN(32);
+DEFLLTFUN(64);
+
+/*
+ * Trace a buffer on a given channel
+ * with auto time stamping on the last byte(s) only
+ */
+int stm_trace_buffer_onchannel(int channel, const void *data, size_t length);
+/*
+ * Trace a buffer on a dynamically allocated channel
+ * with auto time stamping on the last byte(s) only
+ * Dynamic channel are allocated in the 128 highest channels
+ */
+int stm_trace_buffer(const void *data, size_t length);
+
+/* printk equivalent for STM */
+int stm_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+#if defined(CONFIG_STM_PRINTK)
+#define stm_dup_printk(buf, length) \
+ stm_trace_buffer_onchannel(CONFIG_STM_PRINTK_CHANNEL, buf, length)
+
+#else
+static inline int stm_dup_printk(char *buf, size_t size)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_STM_TRACE_PRINTK)
+static inline int stm_trace_printk_buf(
+ unsigned long ip, const char *buf, size_t size)
+{
+ stm_trace_32(CONFIG_STM_TRACE_PRINTK_CHANNEL, ip);
+ return stm_trace_buffer_onchannel(CONFIG_STM_TRACE_PRINTK_CHANNEL,
+ buf, size);
+}
+
+static inline int stm_trace_bprintk_buf(
+ unsigned long ip, const char *fmt, const void *buf, size_t size)
+{
+ stm_trace_64(CONFIG_STM_TRACE_BPRINTK_CHANNEL, ((u64)ip<<32)+(u32)fmt);
+ return stm_trace_buffer_onchannel(CONFIG_STM_TRACE_PRINTK_CHANNEL,
+ buf, size);
+}
+#else
+static inline int stm_trace_printk_buf(
+ unsigned long ip, const char *buf, size_t size)
+{
+ return 0;
+}
+
+static inline int stm_trace_bprintk_buf(
+ unsigned long ip, const char *fmt, const void *buf, size_t size)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_STM_FTRACE)
+static inline void stm_ftrace(unsigned long ip, unsigned long parent_ip)
+{
+ stm_tracet_64(CONFIG_STM_FTRACE_CHANNEL, (((__u64)ip)<<32) + parent_ip);
+}
+#else
+static inline void stm_ftrace(unsigned long ip, unsigned long parent_ip)
+{
+}
+#endif
+
+#if defined(CONFIG_STM_CTX_SWITCH)
+static inline void stm_sched_switch(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+ stm_trace_64(CONFIG_STM_CTX_SWITCH_CHANNEL,
+ (((__u64)prev_pid)<<32) + next_pid);
+ stm_tracet_64(CONFIG_STM_CTX_SWITCH_CHANNEL, (((__u64)next_cpu)<<32)
+ + (prev_prio<<24) + (prev_state<<16)
+ + (next_prio<<8) + next_state);
+}
+#else
+static inline void stm_sched_switch(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_STM_WAKEUP)
+static inline void stm_sched_wakeup(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+ stm_trace_64(CONFIG_STM_WAKEUP_CHANNEL,
+ (((__u64)prev_pid)<<32) + next_pid);
+ stm_tracet_64(CONFIG_STM_WAKEUP_CHANNEL, (((__u64)next_cpu)<<32)
+ + (prev_prio<<24) + (prev_state<<16)
+ + (next_prio<<8) + next_state);
+}
+#else
+static inline void stm_sched_wakeup(u32 prev_pid, u8 prev_prio, u8 prev_state,
+ u32 next_pid, u8 next_prio, u8 next_state, u32 next_cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_STM_STACK_TRACE)
+static inline void stm_stack_trace(unsigned long *callers)
+{
+ while (*(callers + 1) != ULONG_MAX) {
+ stm_trace_32(CONFIG_STM_STACK_TRACE_CHANNEL, *callers++);
+ }
+ /* Time stamp the latest */
+ stm_tracet_32(CONFIG_STM_STACK_TRACE_CHANNEL, *callers);
+}
+#else
+static inline void stm_stack_trace(unsigned long *callers)
+{
+}
+#endif
+
+/* Alloc/Free STM channel */
+int stm_alloc_channel(int offset);
+void stm_free_channel(int channel);
+
+#endif /* __KERNEL__ */
+
+#endif /* STM_H */
diff --git a/include/video/Kbuild b/include/video/Kbuild
index ad3e622c533..d73b95df921 100644
--- a/include/video/Kbuild
+++ b/include/video/Kbuild
@@ -1,3 +1,4 @@
+header-y += b2r2_blt.h
header-y += edid.h
header-y += sisfb.h
header-y += uvesafb.h
diff --git a/include/video/av8100.h b/include/video/av8100.h
new file mode 100644
index 00000000000..23e96a0b871
--- /dev/null
+++ b/include/video/av8100.h
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * AV8100 driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __AV8100__H__
+#define __AV8100__H__
+
+#define AV8100_CEC_MESSAGE_SIZE 16
+#define AV8100_HDCP_SEND_KEY_SIZE 16
+#define AV8100_INFOFRAME_SIZE 28
+#define AV8100_FUSE_KEY_SIZE 16
+#define AV8100_CHIPVER_1 1
+#define AV8100_CHIPVER_2 2
+
+struct av8100_platform_data {
+ unsigned gpio_base;
+ int irq;
+ int reset;
+ const char *inputclk_id;
+ const char *regulator_pwr_id;
+ bool alt_powerupseq;
+ unsigned char mclk_freq;
+};
+
+enum av8100_command_type {
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT = 0x1,
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ AV8100_COMMAND_VIDEO_SCALING_FORMAT,
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ AV8100_COMMAND_DENC,
+ AV8100_COMMAND_HDMI,
+ AV8100_COMMAND_HDCP_SENDKEY,
+ AV8100_COMMAND_HDCP_MANAGEMENT,
+ AV8100_COMMAND_INFOFRAMES,
+ AV8100_COMMAND_EDID_SECTION_READBACK,
+ AV8100_COMMAND_PATTERNGENERATOR,
+ AV8100_COMMAND_FUSE_AES_KEY,
+};
+
+enum interface_type {
+ I2C_INTERFACE = 0x0,
+ DSI_INTERFACE = 0x1,
+};
+
+enum av8100_dsi_mode {
+ AV8100_HDMI_DSI_OFF,
+ AV8100_HDMI_DSI_COMMAND_MODE,
+ AV8100_HDMI_DSI_VIDEO_MODE
+};
+
+enum av8100_pixel_format {
+ AV8100_INPUT_PIX_RGB565,
+ AV8100_INPUT_PIX_RGB666,
+ AV8100_INPUT_PIX_RGB666P,
+ AV8100_INPUT_PIX_RGB888,
+ AV8100_INPUT_PIX_YCBCR422
+};
+
+enum av8100_video_mode {
+ AV8100_VIDEO_INTERLACE,
+ AV8100_VIDEO_PROGRESSIVE
+};
+
+enum av8100_dsi_nb_data_lane {
+ AV8100_DATA_LANES_USED_0,
+ AV8100_DATA_LANES_USED_1,
+ AV8100_DATA_LANES_USED_2,
+ AV8100_DATA_LANES_USED_3,
+ AV8100_DATA_LANES_USED_4
+};
+
+enum av8100_te_config {
+ AV8100_TE_OFF, /* NO TE*/
+ AV8100_TE_DSI_LANE, /* TE generated on DSI lane */
+ AV8100_TE_IT_LINE, /* TE generated on IT line (GPIO) */
+ AV8100_TE_DSI_IT, /* TE generatedon both DSI lane & IT line*/
+ AV8100_TE_GPIO_IT /* TE on GPIO I2S DAT3 & or IT line*/
+};
+
+enum av8100_audio_if_format {
+ AV8100_AUDIO_I2S_MODE,
+ AV8100_AUDIO_I2SDELAYED_MODE, /* I2S Mode by default*/
+ AV8100_AUDIO_TDM_MODE /* 8 Channels by default*/
+};
+
+enum av8100_sample_freq {
+ AV8100_AUDIO_FREQ_32KHZ,
+ AV8100_AUDIO_FREQ_44_1KHZ,
+ AV8100_AUDIO_FREQ_48KHZ,
+ AV8100_AUDIO_FREQ_64KHZ,
+ AV8100_AUDIO_FREQ_88_2KHZ,
+ AV8100_AUDIO_FREQ_96KHZ,
+ AV8100_AUDIO_FREQ_128KHZ,
+ AV8100_AUDIO_FREQ_176_1KHZ,
+ AV8100_AUDIO_FREQ_192KHZ
+};
+
+enum av8100_audio_word_length {
+ AV8100_AUDIO_16BITS,
+ AV8100_AUDIO_20BITS,
+ AV8100_AUDIO_24BITS
+};
+
+enum av8100_audio_format {
+ AV8100_AUDIO_LPCM_MODE,
+ AV8100_AUDIO_COMPRESS_MODE
+};
+
+enum av8100_audio_if_mode {
+ AV8100_AUDIO_SLAVE,
+ AV8100_AUDIO_MASTER
+};
+
+enum av8100_audio_mute {
+ AV8100_AUDIO_MUTE_DISABLE,
+ AV8100_AUDIO_MUTE_ENABLE
+};
+
+enum av8100_output_CEA_VESA {
+ AV8100_CUSTOM,
+ AV8100_CEA1_640X480P_59_94HZ,
+ AV8100_CEA2_3_720X480P_59_94HZ,
+ AV8100_CEA4_1280X720P_60HZ,
+ AV8100_CEA5_1920X1080I_60HZ,
+ AV8100_CEA6_7_NTSC_60HZ,
+ AV8100_CEA14_15_480p_60HZ,
+ AV8100_CEA16_1920X1080P_60HZ,
+ AV8100_CEA17_18_720X576P_50HZ,
+ AV8100_CEA19_1280X720P_50HZ,
+ AV8100_CEA20_1920X1080I_50HZ,
+ AV8100_CEA21_22_576I_PAL_50HZ,
+ AV8100_CEA29_30_576P_50HZ,
+ AV8100_CEA31_1920x1080P_50Hz,
+ AV8100_CEA32_1920X1080P_24HZ,
+ AV8100_CEA33_1920X1080P_25HZ,
+ AV8100_CEA34_1920X1080P_30HZ,
+ AV8100_CEA60_1280X720P_24HZ,
+ AV8100_CEA61_1280X720P_25HZ,
+ AV8100_CEA62_1280X720P_30HZ,
+ AV8100_VESA9_800X600P_60_32HZ,
+ AV8100_VESA14_848X480P_60HZ,
+ AV8100_VESA16_1024X768P_60HZ,
+ AV8100_VESA22_1280X768P_59_99HZ,
+ AV8100_VESA23_1280X768P_59_87HZ,
+ AV8100_VESA27_1280X800P_59_91HZ,
+ AV8100_VESA28_1280X800P_59_81HZ,
+ AV8100_VESA39_1360X768P_60_02HZ,
+ AV8100_VESA81_1366X768P_59_79HZ,
+ AV8100_VIDEO_OUTPUT_CEA_VESA_MAX
+};
+
+enum av8100_video_sync_pol {
+ AV8100_SYNC_POSITIVE,
+ AV8100_SYNC_NEGATIVE
+};
+
+enum av8100_hdmi_mode {
+ AV8100_HDMI_OFF,
+ AV8100_HDMI_ON,
+ AV8100_HDMI_AVMUTE
+};
+
+enum av8100_hdmi_format {
+ AV8100_HDMI,
+ AV8100_DVI
+};
+
+enum av8100_DVI_format {
+ AV8100_DVI_CTRL_CTL0,
+ AV8100_DVI_CTRL_CTL1,
+ AV8100_DVI_CTRL_CTL2
+};
+
+enum av8100_pattern_type {
+ AV8100_PATTERN_OFF,
+ AV8100_PATTERN_GENERATOR,
+ AV8100_PRODUCTION_TESTING
+};
+
+enum av8100_pattern_format {
+ AV8100_NO_PATTERN,
+ AV8100_PATTERN_VGA,
+ AV8100_PATTERN_720P,
+ AV8100_PATTERN_1080P
+};
+
+enum av8100_pattern_audio {
+ AV8100_PATTERN_AUDIO_OFF,
+ AV8100_PATTERN_AUDIO_ON,
+ AV8100_PATTERN_AUDIO_I2S_MEM
+};
+
+struct av8100_video_input_format_cmd {
+ enum av8100_dsi_mode dsi_input_mode;
+ enum av8100_pixel_format input_pixel_format;
+ unsigned short total_horizontal_pixel;
+ unsigned short total_horizontal_active_pixel;
+ unsigned short total_vertical_lines;
+ unsigned short total_vertical_active_lines;
+ enum av8100_video_mode video_mode;
+ enum av8100_dsi_nb_data_lane nb_data_lane;
+ unsigned char nb_virtual_ch_command_mode;
+ unsigned char nb_virtual_ch_video_mode;
+ unsigned short TE_line_nb;
+ enum av8100_te_config TE_config;
+ unsigned long master_clock_freq;
+ unsigned char ui_x4;
+};
+
+struct av8100_audio_input_format_cmd {
+ enum av8100_audio_if_format audio_input_if_format;
+ unsigned char i2s_input_nb;
+ enum av8100_sample_freq sample_audio_freq;
+ enum av8100_audio_word_length audio_word_lg;
+ enum av8100_audio_format audio_format;
+ enum av8100_audio_if_mode audio_if_mode;
+ enum av8100_audio_mute audio_mute;
+};
+
+struct av8100_video_output_format_cmd {
+ enum av8100_output_CEA_VESA video_output_cea_vesa;
+ enum av8100_video_sync_pol vsync_polarity;
+ enum av8100_video_sync_pol hsync_polarity;
+ unsigned short total_horizontal_pixel;
+ unsigned short total_horizontal_active_pixel;
+ unsigned short total_vertical_in_half_lines;
+ unsigned short total_vertical_active_in_half_lines;
+ unsigned short hsync_start_in_pixel;
+ unsigned short hsync_length_in_pixel;
+ unsigned short vsync_start_in_half_line;
+ unsigned short vsync_length_in_half_line;
+ unsigned short hor_video_start_pixel;
+ unsigned short vert_video_start_pixel;
+ enum av8100_video_mode video_type;
+ unsigned short pixel_repeat;
+ unsigned long pixel_clock_freq_Hz;
+};
+
+struct av8100_video_scaling_format_cmd {
+ unsigned short h_start_in_pixel;
+ unsigned short h_stop_in_pixel;
+ unsigned short v_start_in_line;
+ unsigned short v_stop_in_line;
+ unsigned short h_start_out_pixel;
+ unsigned short h_stop_out_pixel;
+ unsigned short v_start_out_line;
+ unsigned short v_stop_out_line;
+};
+
+enum av8100_color_transform {
+ AV8100_COLOR_TRANSFORM_INDENTITY,
+ AV8100_COLOR_TRANSFORM_INDENTITY_CLAMP_YUV,
+ AV8100_COLOR_TRANSFORM_YUV_TO_RGB,
+ AV8100_COLOR_TRANSFORM_YUV_TO_DENC,
+ AV8100_COLOR_TRANSFORM_RGB_TO_DENC,
+};
+
+struct av8100_cec_message_write_format_cmd {
+ unsigned char buffer_length;
+ unsigned char buffer[AV8100_CEC_MESSAGE_SIZE];
+};
+
+struct av8100_cec_message_read_back_format_cmd {
+};
+
+enum av8100_cvbs_video_format {
+ AV8100_CVBS_625,
+ AV8100_CVBS_525,
+};
+
+enum av8100_standard_selection {
+ AV8100_PAL_BDGHI,
+ AV8100_PAL_N,
+ AV8100_NTSC_M,
+ AV8100_PAL_M
+};
+
+struct av8100_denc_format_cmd {
+ enum av8100_cvbs_video_format cvbs_video_format;
+ enum av8100_standard_selection standard_selection;
+ unsigned char enable;
+ unsigned char macrovision_enable;
+ unsigned char internal_generator;
+};
+
+struct av8100_hdmi_cmd {
+ enum av8100_hdmi_mode hdmi_mode;
+ enum av8100_hdmi_format hdmi_format;
+ enum av8100_DVI_format dvi_format; /* used only if HDMI_format = DVI*/
+};
+
+struct av8100_hdcp_send_key_format_cmd {
+ unsigned char key_number;
+ unsigned char data_len;
+ unsigned char data[AV8100_HDCP_SEND_KEY_SIZE];
+};
+
+enum av8100_hdcp_auth_req_type {
+ AV8100_HDCP_AUTH_REQ_OFF = 0,
+ AV8100_HDCP_AUTH_REQ_ON = 1,
+ AV8100_HDCP_REV_LIST_REQ = 2,
+ AV8100_HDCP_AUTH_CONT = 3,
+};
+
+enum av8100_hdcp_encr_use {
+ AV8100_HDCP_ENCR_USE_OESS = 0,
+ AV8100_HDCP_ENCR_USE_EESS = 1,
+};
+
+struct av8100_hdcp_management_format_cmd {
+ unsigned char req_type;
+ unsigned char encr_use;
+};
+
+struct av8100_infoframes_format_cmd {
+ unsigned char type;
+ unsigned char version;
+ unsigned char length;
+ unsigned char crc;
+ unsigned char data[AV8100_INFOFRAME_SIZE];
+};
+
+struct av8100_edid_section_readback_format_cmd {
+ unsigned char address;
+ unsigned char block_number;
+};
+
+struct av8100_pattern_generator_format_cmd {
+ enum av8100_pattern_type pattern_type;
+ enum av8100_pattern_format pattern_video_format;
+ enum av8100_pattern_audio pattern_audio_mode;
+};
+
+enum av8100_fuse_operation {
+ AV8100_FUSE_READ = 0,
+ AV8100_FUSE_WRITE = 1,
+};
+
+struct av8100_fuse_aes_key_format_cmd {
+ unsigned char fuse_operation;
+ unsigned char key[AV8100_FUSE_KEY_SIZE];
+};
+
+union av8100_configuration {
+ struct av8100_video_input_format_cmd video_input_format;
+ struct av8100_audio_input_format_cmd audio_input_format;
+ struct av8100_video_output_format_cmd video_output_format;
+ struct av8100_video_scaling_format_cmd video_scaling_format;
+ enum av8100_color_transform color_transform;
+ struct av8100_cec_message_write_format_cmd
+ cec_message_write_format;
+ struct av8100_cec_message_read_back_format_cmd
+ cec_message_read_back_format;
+ struct av8100_denc_format_cmd denc_format;
+ struct av8100_hdmi_cmd hdmi_format;
+ struct av8100_hdcp_send_key_format_cmd hdcp_send_key_format;
+ struct av8100_hdcp_management_format_cmd hdcp_management_format;
+ struct av8100_infoframes_format_cmd infoframes_format;
+ struct av8100_edid_section_readback_format_cmd
+ edid_section_readback_format;
+ struct av8100_pattern_generator_format_cmd pattern_generator_format;
+ struct av8100_fuse_aes_key_format_cmd fuse_aes_key_format;
+};
+
+enum av8100_operating_mode {
+ AV8100_OPMODE_UNDEFINED = 0,
+ AV8100_OPMODE_SHUTDOWN,
+ AV8100_OPMODE_STANDBY,
+ AV8100_OPMODE_SCAN,
+ AV8100_OPMODE_INIT,
+ AV8100_OPMODE_IDLE,
+ AV8100_OPMODE_VIDEO,
+};
+
+enum av8100_plugin_status {
+ AV8100_PLUGIN_NONE = 0x0,
+ AV8100_HDMI_PLUGIN = 0x1,
+ AV8100_CVBS_PLUGIN = 0x2,
+};
+
+enum av8100_hdmi_event {
+ AV8100_HDMI_EVENT_NONE = 0x0,
+ AV8100_HDMI_EVENT_HDMI_PLUGIN = 0x1,
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT = 0x2,
+ AV8100_HDMI_EVENT_CEC = 0x4,
+ AV8100_HDMI_EVENT_HDCP = 0x8,
+ AV8100_HDMI_EVENT_CECTXERR = 0x10,
+ AV8100_HDMI_EVENT_CECTX = 0x20, /* Transm no error */
+};
+
+struct av8100_status {
+ enum av8100_operating_mode av8100_state;
+ enum av8100_plugin_status av8100_plugin_status;
+ int hdmi_on;
+};
+
+
+int av8100_init(void);
+void av8100_exit(void);
+int av8100_powerscan(void);
+int av8100_powerup(void);
+int av8100_powerdown(void);
+int av8100_disable_interrupt(void);
+int av8100_enable_interrupt(void);
+int av8100_download_firmware(enum interface_type if_type);
+int av8100_reg_stby_w(
+ unsigned char cpd,
+ unsigned char stby,
+ unsigned char mclkrng);
+int av8100_reg_hdmi_5_volt_time_w(
+ unsigned char denc_off_time,
+ unsigned char hdmi_off_time,
+ unsigned char on_time);
+int av8100_reg_stby_int_mask_w(
+ unsigned char hpdm,
+ unsigned char cpdm,
+ unsigned char stbygpiocfg,
+ unsigned char ipol);
+int av8100_reg_stby_pend_int_w(
+ unsigned char hpdi,
+ unsigned char cpdi,
+ unsigned char oni,
+ unsigned char bpdig);
+int av8100_reg_gen_int_mask_w(
+ unsigned char eocm,
+ unsigned char vsim,
+ unsigned char vsom,
+ unsigned char cecm,
+ unsigned char hdcpm,
+ unsigned char uovbm,
+ unsigned char tem);
+int av8100_reg_gen_int_w(
+ unsigned char eoci,
+ unsigned char vsii,
+ unsigned char vsoi,
+ unsigned char ceci,
+ unsigned char hdcpi,
+ unsigned char uovbi);
+int av8100_reg_gpio_conf_w(
+ unsigned char dat3dir,
+ unsigned char dat3val,
+ unsigned char dat2dir,
+ unsigned char dat2val,
+ unsigned char dat1dir,
+ unsigned char dat1val,
+ unsigned char ucdbg);
+int av8100_reg_gen_ctrl_w(
+ unsigned char fdl,
+ unsigned char hld,
+ unsigned char wa,
+ unsigned char ra);
+int av8100_reg_fw_dl_entry_w(
+ unsigned char mbyte_code_entry);
+int av8100_reg_w(
+ unsigned char offset,
+ unsigned char value);
+int av8100_reg_stby_r(
+ unsigned char *cpd,
+ unsigned char *stby,
+ unsigned char *hpds,
+ unsigned char *cpds,
+ unsigned char *mclkrng);
+int av8100_reg_hdmi_5_volt_time_r(
+ unsigned char *denc_off_time,
+ unsigned char *hdmi_off_time,
+ unsigned char *on_time);
+int av8100_reg_stby_int_mask_r(
+ unsigned char *hpdm,
+ unsigned char *cpdm,
+ unsigned char *stbygpiocfg,
+ unsigned char *ipol);
+int av8100_reg_stby_pend_int_r(
+ unsigned char *hpdi,
+ unsigned char *cpdi,
+ unsigned char *oni,
+ unsigned char *sid);
+int av8100_reg_gen_int_mask_r(
+ unsigned char *eocm,
+ unsigned char *vsim,
+ unsigned char *vsom,
+ unsigned char *cecm,
+ unsigned char *hdcpm,
+ unsigned char *uovbm,
+ unsigned char *tem);
+int av8100_reg_gen_int_r(
+ unsigned char *eoci,
+ unsigned char *vsii,
+ unsigned char *vsoi,
+ unsigned char *ceci,
+ unsigned char *hdcpi,
+ unsigned char *uovbi,
+ unsigned char *tei);
+int av8100_reg_gen_status_r(
+ unsigned char *cectxerr,
+ unsigned char *cecrec,
+ unsigned char *cectrx,
+ unsigned char *uc,
+ unsigned char *onuvb,
+ unsigned char *hdcps);
+int av8100_reg_gpio_conf_r(
+ unsigned char *dat3dir,
+ unsigned char *dat3val,
+ unsigned char *dat2dir,
+ unsigned char *dat2val,
+ unsigned char *dat1dir,
+ unsigned char *dat1val,
+ unsigned char *ucdbg);
+int av8100_reg_gen_ctrl_r(
+ unsigned char *fdl,
+ unsigned char *hld,
+ unsigned char *wa,
+ unsigned char *ra);
+int av8100_reg_fw_dl_entry_r(
+ unsigned char *mbyte_code_entry);
+int av8100_reg_r(
+ unsigned char offset,
+ unsigned char *value);
+int av8100_conf_get(enum av8100_command_type command_type,
+ union av8100_configuration *config);
+int av8100_conf_prep(enum av8100_command_type command_type,
+ union av8100_configuration *config);
+int av8100_conf_w(enum av8100_command_type command_type,
+ unsigned char *return_buffer_length,
+ unsigned char *return_buffer, enum interface_type if_type);
+int av8100_conf_w_raw(enum av8100_command_type command_type,
+ unsigned char buffer_length,
+ unsigned char *buffer,
+ unsigned char *return_buffer_length,
+ unsigned char *return_buffer);
+struct av8100_status av8100_status_get(void);
+enum av8100_output_CEA_VESA av8100_video_output_format_get(int xres,
+ int yres,
+ int htot,
+ int vtot,
+ int pixelclk,
+ bool interlaced);
+void av8100_hdmi_event_cb_set(void (*event_callback)(enum av8100_hdmi_event));
+u8 av8100_ver_get(void);
+
+#endif /* __AV8100__H__ */
diff --git a/include/video/b2r2_blt.h b/include/video/b2r2_blt.h
new file mode 100644
index 00000000000..771d4f60fdb
--- /dev/null
+++ b/include/video/b2r2_blt.h
@@ -0,0 +1,638 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 user interface
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef _LINUX_VIDEO_B2R2_BLT_H
+#define _LINUX_VIDEO_B2R2_BLT_H
+
+#include <linux/types.h>
+
+#if defined(__KERNEL__)
+#include <linux/mm_types.h>
+#include <linux/bitops.h>
+#else
+#define BIT(nr) (1UL << (nr))
+#endif
+
+/**
+ * struct b2r2_blt_rect - Specifies a B2R2 rectangle
+ *
+ * @left: X-coordinate of top left corner
+ * @top: Y-coordinate of top left corner
+ * @width: Rectangle width. Must be >= 0.
+ * @height: Rectangle height. Must be >= 0.
+ */
+struct b2r2_blt_rect {
+ __s32 x;
+ __s32 y;
+ __s32 width;
+ __s32 height;
+};
+
+/**
+ * enum b2r2_blt_fmt - Defines the available B2R2 buffer formats
+ *
+ * Inspired by Khronos OpenMAX, please see
+ * OpenMAX IL specification for detailed descriptions of the formats
+ *
+ * @B2R2_BLT_FMT_UNUSED: Placeholder value when format is unknown,
+ * or specified using a vendor-specific means.
+ * @B2R2_BLT_FMT_16_BIT_ARGB4444: 16 bits per pixel ARGB format with colors
+ * stored as Alpha 15:12, Red 11:8, Green 7:4, and Blue 3:0.
+ * @B2R2_BLT_FMT_16_BIT_ARGB1555: 16 bits per pixel ARGB format with colors
+ * stored as Alpha 15, Red 14:10, Green 9:5, and Blue 4:0.
+ * @B2R2_BLT_FMT_16_BIT_RGB565: 16 bits per pixel RGB format with colors
+ * stored as Red 15:11, Green 10:5, and Blue 4:0.
+ * @B2R2_BLT_FMT_24_BIT_RGB888: 24 bits per pixel RGB format with colors
+ * stored as Red 23:16, Green 15:8, and Blue 7:0.
+ * @B2R2_BLT_FMT_32_BIT_ARGB8888: 32 bits per pixel ARGB format with colors
+ * stored as Alpha 31:24, Red 23:16, Green 15:8, and Blue 7:0.
+ * @B2R2_BLT_FMT_YUV420_PACKED_PLANAR: YUV planar format, organized with
+ * three separate planes for each color component, namely Y, U, and V.
+ * U and V pixels are sub-sampled by a factor of two both horizontally and
+ * vertically. The buffer shall contain a plane of Y, U, and V data in this
+ * order
+ * @B2R2_BLT_FMT_YUV422_PACKED_PLANAR: YUV planar format, organized with
+ * three separate planes for each color component, namely Y, U, and V.
+ * U and V pixels are subsampled by a factor of two horizontally.
+ * The buffer shall contain a plane of Y, U, and V data in this order.
+ * @B2R2_BLT_FMT_Y_CB_Y_CR: 16 bits per pixel YUV interleaved format organized
+ * as YUYV (i.e., YCbYCr).
+ * (Corresponds to YUV422 interleaved)
+ * @B2R2_BLT_FMT_CB_Y_CR_Y: 16 bits per pixel YUV interleaved format organized
+ * as UYVY (i.e., CbYCrY).
+ * (Corresponds to YUV422R)
+ * @B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: YUV planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing U and V
+ * pixels interleaved with the first U value first. U and V pixels are
+ * sub-sampled by a factor of two both horizontally and vertically. The buffer
+ * shall contain a plane of Y, U and V data.
+ * (Same as B2R2 420 Raster 2 buffer - 420 R2B)
+ * @B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: YUV planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing U and V
+ * pixels interleaved with the first U value first. U and V pixels are
+ * sub-sampled by a factor of two horizontally. The buffer shall contain a
+ * plane of Y, U and V data.
+ * (Same as B2R2 422 Raster 2 buffer - 422 R2B)
+ * @B2R2_BLT_FMT_32_BIT_ABGR8888: 32 bits per pixel ABGR format with colors
+ * stored as Alpha 31:24,Blue 23:16, Green 15:8, and Red 7:0.
+ * @B2R2_BLT_FMT_24_BIT_ARGB8565: 24 bits per pixel ARGB format with colors
+ * stored as Alpha 23:16, Red 15:11, Green 10:5, and Blue 4:0.
+ * @B2R2_BLT_FMT_24_BIT_YUV888: 24 bits per pixel YUV format with colors
+ * stored as Y 23:16, U 15:8, and V 7:0.
+ * @B2R2_BLT_FMT_32_BIT_AYUV8888: 32 bits per pixel AYUV format with colors
+ * stored as Alpha 31:24, Y 23:16, U 15:8, and V 7:0.
+ * @B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: Nomadik YUV 420 macro block
+ * format, see B2R2 spec for details
+ * @B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: Nomadik YUV 422 macro block
+ * format, see B2R2 spec for details
+ * @B2R2_BLT_FMT_1_BIT_A1: 1 bit per pixel A format, 1 bit alpha
+ * @B2R2_BLT_FMT_8_BIT_A8: 8 bit per pixel A format, 8 bit alpha
+ * @B2R2_BLT_FMT_YUV444_PACKED_PLANAR: YUV planar format, organized with
+ * three separate planes, one for each color component, namely Y, U, and V.
+ * All planes use full resolution, there is no subsampling.
+ * The buffer shall contain a plane of Y, U, and V data in this order.
+ * @B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: YVU planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing V and U
+ * pixels interleaved with the first V value first. V and U pixels are
+ * sub-sampled by a factor of two both horizontally and vertically. The buffer
+ * shall contain two planes, one plane with Y, and one with V and U data.
+ * (Same as B2R2 420 Raster 2 buffer - 420 R2B except that chroma order is
+ * swapped.)
+ * @B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: YVU planar format, organized with
+ * a first plane containing Y pixels, and a second plane containing V and U
+ * pixels interleaved with the first V value first. V and U pixels are
+ * sub-sampled by a factor of two horizontally. The buffer shall contain a
+ * two planes, one with Y, and one with V and U data.
+ * (Same as B2R2 422 Raster 2 buffer - 422 R2B except that chroma order is
+ * swapped.)
+ * @B2R2_BLT_FMT_YVU420_PACKED_PLANAR: YVU planar format, organized with
+ * three separate planes for each color component, namely Y, V, and U.
+ * V and U pixels are sub-sampled by a factor of two both horizontally and
+ * vertically. The buffer shall contain a plane of Y, V, and U data in this
+ * order. (Same as B2R2_BLT_FMT_YUV420_PACKED_PLANAR except that chroma
+ * order is swapped.)
+ * @B2R2_BLT_FMT_YVU422_PACKED_PLANAR: YVU planar format, organized with
+ * three separate planes for each color component, namely Y, V, and U.
+ * V and U pixels are subsampled by a factor of two horizontally.
+ * The buffer shall contain a plane of Y, V, and U data in this order.
+ * (Same as B2R2_BLT_FMT_YUV422_PACKED_PLANAR except that chroma
+ * order is swapped.)
+ * @B2R2_BLT_FMT_24_BIT_VUY888: 24 bits per pixel VUY format with colors
+ * stored as V 23:16, U 15:8, and Y 7:0.
+ * @B2R2_BLT_FMT_32_BIT_VUYA8888: 32 bits per pixel VUYA format with colors
+ * stored as V 31:24, U 23:16, Y 15:8, and Alpha 7:0.
+ */
+enum b2r2_blt_fmt {
+ B2R2_BLT_FMT_UNUSED = 0,
+ B2R2_BLT_FMT_16_BIT_ARGB4444 = 4,
+ B2R2_BLT_FMT_16_BIT_ARGB1555 = 5,
+ B2R2_BLT_FMT_16_BIT_RGB565 = 6,
+ B2R2_BLT_FMT_24_BIT_RGB888 = 11,
+ B2R2_BLT_FMT_32_BIT_ARGB8888 = 16,
+ B2R2_BLT_FMT_YUV420_PACKED_PLANAR = 20,
+ B2R2_BLT_FMT_YUV422_PACKED_PLANAR = 23,
+ B2R2_BLT_FMT_Y_CB_Y_CR = 25,
+ B2R2_BLT_FMT_CB_Y_CR_Y = 27,
+ B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR = 39,
+ B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR = 40,
+ /* Extensions, non OpenMAX formats */
+ B2R2_BLT_FMT_32_BIT_ABGR8888 = 0x7F000000, /* OpenMax vendor start */
+ B2R2_BLT_FMT_24_BIT_ARGB8565 = 0x7F000001,
+ B2R2_BLT_FMT_24_BIT_YUV888 = 0x7F000002,
+ B2R2_BLT_FMT_32_BIT_AYUV8888 = 0x7F000003,
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE = 0x7F000004,
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE = 0x7F000005,
+ B2R2_BLT_FMT_1_BIT_A1 = 0x7F000006,
+ B2R2_BLT_FMT_8_BIT_A8 = 0x7F000007,
+ B2R2_BLT_FMT_YUV444_PACKED_PLANAR = 0x7F000008,
+ B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR = 0x7F000009,
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR = 0x7F00000A,
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR = 0x7F00000B,
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR = 0x7F00000C,
+ B2R2_BLT_FMT_24_BIT_VUY888 = 0x7F00000D,
+ B2R2_BLT_FMT_32_BIT_VUYA8888 = 0x7F00000E,
+};
+
+/**
+ * enum b2r2_blt_ptr_type - Specifies a B2R2 buffer pointer type
+ *
+ * @B2R2_BLT_PTR_NONE:
+ * No pointer (NULL). E.g. src fill.
+ * @B2R2_BLT_PTR_VIRTUAL:
+ * Use offset as a userspace virtual address
+ * @B2R2_BLT_PTR_PHYSICAL:
+ * Use offset as a physical address
+ * @B2R2_BLT_PTR_FD_OFFSET:
+ * Use fd + offset to determine buffer location.
+ * @B2R2_BLT_PTR_HWMEM_BUF_NAME:
+ * Use hwmem_buf_name and offset to determine buffer location.
+ */
+enum b2r2_blt_ptr_type {
+ B2R2_BLT_PTR_NONE,
+ B2R2_BLT_PTR_VIRTUAL,
+ B2R2_BLT_PTR_PHYSICAL,
+ B2R2_BLT_PTR_FD_OFFSET,
+ B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET,
+};
+
+/**
+ * struct b2r2_blt_buf - Specifies a B2R2 buffer pointer
+ *
+ * @type: Buffer pointer type
+ * @hwmem_global_buf_id: Hwmem buffer name
+ * @fd: File descriptor (e.g. file handle to pmem or fb device)
+ * @offset: Offset where buffer can be found or address.
+ * @len: Size of buffer in bytes
+ * @bits: Pointer to the bitmap data. This field can be used to specify
+ * an alternative way to access the buffer. Whenever the 'bits' pointer
+ * is set to non-NULL, the underlying implementation is free to decide
+ * whether or not to use it in favor of other ways to locate the buffer.
+ */
+struct b2r2_blt_buf {
+ enum b2r2_blt_ptr_type type;
+ __s32 hwmem_buf_name;
+ __s32 fd;
+ __u32 offset;
+ __u32 len;
+ void *bits;
+};
+
+
+/**
+ * struct b2r2_blt_img - Specifies a B2R2 image
+ *
+ * @fmt: Pixel format of image
+ * @buf: Pixel buffer
+ * @width: Width in pixels
+ * @height: Height in pixels
+ * @pitch: Pitch in bytes (from start of one line to start of next)
+ */
+struct b2r2_blt_img {
+ enum b2r2_blt_fmt fmt;
+ struct b2r2_blt_buf buf;
+ __s32 width;
+ __s32 height;
+ __u32 pitch;
+};
+
+
+/**
+ * enum b2r2_blt_transform- Specifies rotation and flipping, mutually exclusive
+ * @B2R2_BLT_TRANSFORM_NONE:
+ * No rotation or flip
+ * @B2R2_BLT_TRANSFORM_FLIP_H
+ * Flip horizontally
+ * @B2R2_BLT_TRANSFORM_FLIP_V
+ * Flip vertically
+ * @B2R2_BLT_TRANSFORM_CCW_ROT_90
+ * Rotate 90 degrees counter clockwise
+ * @B2R2_BLT_TRANSFORM_CCW_ROT_180
+ * Rotate 180 degrees (same as flip horizontally together with
+ * flip vertically)
+ * @B2R2_BLT_TRANSFORM_CCW_ROT_270
+ * Rotate 270 degrees counter clockwise
+ * @B2R2_BLT_TRANSFORM_FLIP_H_CCW_ROT_90
+ * Flip horizontally and then rotate 90 degrees counter clockwise
+ * @B2R2_BLT_TRANSFORM_FLIP_V_CCW_ROT_90
+ * Flip vertically and then rotate 90 degrees counter clockwise
+ */
+enum b2r2_blt_transform {
+ B2R2_BLT_TRANSFORM_NONE = 0,
+ B2R2_BLT_TRANSFORM_FLIP_H = 1,
+ B2R2_BLT_TRANSFORM_FLIP_V = 2,
+ B2R2_BLT_TRANSFORM_CCW_ROT_90 = 4,
+ B2R2_BLT_TRANSFORM_CCW_ROT_180 = 3,
+ B2R2_BLT_TRANSFORM_CCW_ROT_270 = 7,
+ B2R2_BLT_TRANSFORM_FLIP_H_CCW_ROT_90 = 5,
+ B2R2_BLT_TRANSFORM_FLIP_V_CCW_ROT_90 = 6,
+};
+
+
+/**
+ * enum b2r2_blt_flag - Flags that controls the B2R2 request
+ *
+ * Can be combined.
+ *
+ * @B2R2_BLT_FLAG_ASYNCH:
+ * Asynchronous request. b2r2_blt will returns when the request
+ * has been queued.
+ * @B2R2_BLT_FLAG_DRY_RUN:
+ * Dry run, just to check if request can be performed.
+ * @B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND:
+ * Enable per pixel alpha blend
+ * @B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND:
+ * Enable global alpha blend (alpha value in global_alpha)
+ * @B2R2_BLT_FLAG_SOURCE_COLOR_KEY:
+ * Enable source color key (color in src_color). Color should be in raw
+ * format.
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY, B2R2_BLT_FLAG_SOURCE_FILL and
+ * B2R2_BLT_FLAG_SOURCE_FILL_RAW cannot be specified at the same time.
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY and B2R2_BLT_FLAG_DEST_COLOR_KEY cannot be
+ * specified at the same time.
+ * @B2R2_BLT_FLAG_SOURCE_FILL:
+ * Enable ARGB/AYUV source fill (color in src_color). Which of ARGB and AYUV
+ * is determined by the destination format.
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY, B2R2_BLT_FLAG_SOURCE_FILL and
+ * B2R2_BLT_FLAG_SOURCE_FILL_RAW cannot be specified at the same time
+ * @B2R2_BLT_FLAG_SOURCE_FILL_RAW:
+ * Enable raw color source fill (color in src_color)
+ * B2R2_BLT_FLAG_SOURCE_COLOR_KEY, B2R2_BLT_FLAG_SOURCE_FILL and
+ * B2R2_BLT_FLAG_SOURCE_FILL_RAW cannot be specified at the same time
+ * @B2R2_BLT_FLAG_DEST_COLOR_KEY:
+ * Enable dest color key (color in dst_color). Color in raw format.
+ * @B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT:
+ * Source color not premultiplied (Valid for alpha formats only).
+ * @B2R2_BLT_FLAG_DITHER:
+ * Enable dithering
+ * @B2R2_BLT_FLAG_BLUR:
+ * Enable blur
+ * @B2R2_BLT_FLAG_SOURCE_MASK:
+ * Enable source mask
+ * @B2R2_BLT_FLAG_DESTINATION_CLIP:
+ * Enable destination clip rectangle
+ * @B2R2_BLT_FLAG_INHERIT_PRIO
+ * Inherit process priority
+ * @B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH
+ * Skip cache flush of source image buffer
+ * @B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH
+ * Skip cache flush of source mask buffer
+ * @B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH
+ * Skip cache flush of destination image buffer
+ * @B2R2_BLT_FLAG_BG_BLEND
+ * Indicate that a background buffer is supplied
+ * to the blit operation. B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND,
+ * B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT, and
+ * B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND will control the blend operation.
+ * The destination blending is in this case disabled and the destination
+ * buffer will be overwritten with the source and background blend result.
+ * @B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH
+ * Skip cache flush of background image buffer
+ * @B2R2_BLT_FLAG_REPORT_WHEN_DONE
+ * Report through b2r2_blt file when done. A b2r2_blt_report structure is
+ * read. Use poll() or select() if anything to read. (i.e. to help user space
+ * to implement callback functionality)
+ * @B2R2_BLT_FLAG_REPORT_PERFORMANCE
+ * Include performance data in the report structure
+ * @B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION
+ * Use color look-up table for color correction.
+ * Pointer to the table must be specified in *clut field of
+ * the b2r2_blt_req structure.
+ * The table must map all input color values
+ * for each channel to the desired output values.
+ * It is an array with the following format:
+ * R0 G0 B0 A0 R1 G1 B1 A1...R255 G255 B255 A255
+ * where R0 is the 8 bit output value for red channel whenever its input
+ * equals 0.
+ * Similarly, R1 through R255 are the red channel outputs whenever
+ * the channel's inputs equal 1 through 255 respectively.
+ * Gn, Bn, An denote green, blue and alpha channel.
+ * Whenever the input bitmap format lacks the alpha channel,
+ * all alpha values in the color correction table should be set to 255.
+ * Size of the array that specifies the color correction table
+ * must be 1024 bytes.
+ * A table that does not change anything has the form:
+ * 0 0 0 0 1 1 1 1 2 2 2 2 ... 254 254 254 254 255 255 255 255.
+ * CLUT color correction can be applied to YUV raster buffers as well,
+ * in which case the RGB color channels are mapped onto YUV-space
+ * as follows:
+ * R = red chrominance
+ * G = luminance
+ * B = blue chrominance
+ * A = alpha
+ * If any of the planar or semi-planar formats is used, luminance cannot
+ * be changed by the color correction table.
+ */
+enum b2r2_blt_flag {
+ B2R2_BLT_FLAG_ASYNCH = BIT(0),/*0x1*/
+ B2R2_BLT_FLAG_DRY_RUN = BIT(1),/*0x2*/
+ B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND = BIT(2),/*0x4*/
+ B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND = BIT(3),/*0x8*/
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY = BIT(4),/*0x10*/
+ B2R2_BLT_FLAG_SOURCE_FILL = BIT(5),/*0x20*/
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW = BIT(6),/*0x40*/
+ B2R2_BLT_FLAG_DEST_COLOR_KEY = BIT(7),/*0x80*/
+ B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT = BIT(8),/*0x100*/
+ B2R2_BLT_FLAG_DITHER = BIT(9),/*0x200*/
+ B2R2_BLT_FLAG_BLUR = BIT(10),/*0x400*/
+ B2R2_BLT_FLAG_SOURCE_MASK = BIT(11),/*0x800*/
+ B2R2_BLT_FLAG_DESTINATION_CLIP = BIT(12),/*0x1000*/
+ B2R2_BLT_FLAG_INHERIT_PRIO = BIT(13),/*0x2000*/
+ B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH = BIT(14),/*0x4000*/
+ B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH = BIT(15),/*0x8000*/
+ B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH = BIT(16),/*0x10000*/
+ B2R2_BLT_FLAG_BG_BLEND = BIT(17),/*0x20000*/
+ B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH = BIT(18),/*0x40000*/
+ B2R2_BLT_FLAG_REPORT_WHEN_DONE = BIT(29),/*0x20000000*/
+ B2R2_BLT_FLAG_REPORT_PERFORMANCE = BIT(30),/*0x40000000*/
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION = BIT(31),/*0x80000000*/
+};
+
+
+/**
+ * struct b2r2_blt_req - Specifies a request to B2R2
+ *
+ * @size: Size of this structure. Used for versioning. MUST be specified.
+ * @flags: Flags that control the B2R2 request ORed together
+ * @tfm: How source should be flipped and rotated when blitting
+ * @prio: Priority (-20 to 19). Inherits process prio
+ * if B2R2_BLT_FLAG_INHERIT_PRIO. Given priority is mapped onto B2R2.
+ * TBD: How?
+ * @clut: Pointer to the look-up table for color correction.
+ * @src_img: Source image. Not used if source fill.
+ * @src_mask: Source mask. Not used if source fill.
+ * @src_rect: Source area to be blitted.
+ * @src_color: Source fill color or color key
+ * @bg_img: Background image.
+ * @bg_rect: Background area to blend with.
+ * @dst_img: Destination image.
+ * @dst_rect: Destination area to be blitted to.
+ * @dst_color: Destination color key
+ * @dst_clip_rect: Destination clip rectangle.
+ * @global_alpha: Global alpha value (0 - 255)
+ * @report1: Data 1 to report back when request is done.
+ * See struct b2r2_blt_report.
+ * @report2: Data 2 to report back when request is done.
+ * See struct b2r2_blt_report.
+ *
+ */
+struct b2r2_blt_req {
+ __u32 size;
+ enum b2r2_blt_flag flags;
+ enum b2r2_blt_transform transform;
+ __s32 prio;
+ void *clut;
+ struct b2r2_blt_img src_img;
+ struct b2r2_blt_img src_mask;
+ struct b2r2_blt_rect src_rect;
+ __u32 src_color;
+ struct b2r2_blt_img bg_img;
+ struct b2r2_blt_rect bg_rect;
+ struct b2r2_blt_img dst_img;
+ struct b2r2_blt_rect dst_rect;
+ struct b2r2_blt_rect dst_clip_rect;
+ __u32 dst_color;
+ __u8 global_alpha;
+ __u32 report1;
+ __u32 report2;
+};
+
+/**
+ * enum b2r2_blt_cap - Capabilities that can be queried for.
+ *
+ * Capabilities can be queried for a specific format or for formats in
+ * general. To query for capabilities in general, specify BLT_FMT_UNUSED
+ * as format.
+ *
+ * B2R2_BLT_CAP_UNUSED: Unused/unspecified capability
+ * B2R2_BLT_CAP_FMT_SOURCE: Is format supported as source?
+ * B2R2_BLT_CAP_FMT_SOURCE_MASK: Is format supported as source mask?
+ * B2R2_BLT_CAP_FMT_DEST: Is format supported as dest?
+ * B2R2_BLT_CAP_PER_PIXEL_ALPHA_BLEND: Is per pixel alpha blending supported
+ * with format as source
+ * B2R2_BLT_CAP_GLOBAL_ALPHA_BLEND: Is per global alpha blending supported
+ * with format as source
+ * B2R2_BLT_CAP_SOURCE_COLOR_KEY: Is source color key supported with format as
+ * source
+ * B2R2_BLT_CAP_SOURCE_FILL: Is source fill supported with format as source
+ * B2R2_BLT_CAP_SOURCE_FILL_RAW: Is source fill raw supported with format as
+ * dest
+ * B2R2_BLT_CAP_DEST_COLOR_KEY: Is dest color key supported with format as dest
+ * B2R2_BLT_CAP_DITHER: Is dithering supported with format as source
+ * B2R2_BLT_CAP_BLUR: Is blur supported with format as source
+ * B2R2_BLT_CAP_MINIFICATION_LIMIT: Minification limit (copybit support)
+ * B2R2_BLT_CAP_MAGNIFICATION_LIMIT: Magnification limit (copybit support)
+ * B2R2_BLT_CAP_SCALING_FRAC_BITS: Number of scaling fractional bits (copybit
+ * support)
+ * B2R2_BLT_CAP_ROTATION_STEP_DEG: Supported rotation step in degrees (copybit
+ * support)
+ */
+
+enum b2r2_blt_cap {
+ B2R2_BLT_CAP_UNUSED = 0,
+ /**
+ * @brief Is format supported as source.
+ */
+ B2R2_BLT_CAP_FMT_SOURCE,
+ /**
+ * @brief Is format supported as source mask
+ */
+ B2R2_BLT_CAP_FMT_SOURCE_MASK,
+ /**
+ * @brief Is format supported as destination
+ */
+ B2R2_BLT_CAP_FMT_DEST,
+ /**
+ * @brief Is per pixel alpha blending supported with format as source
+ */
+ B2R2_BLT_CAP_PER_PIXEL_ALPHA_BLEND,
+ /**
+ * @brief Is global alpha blending supported with format as source
+ */
+ B2R2_BLT_CAP_GLOBAL_ALPHA_BLEND,
+ /**
+ * @brief Is source color key supported with format as source
+ */
+ B2R2_BLT_CAP_SOURCE_COLOR_KEY,
+ /**
+ * @brief Is source fill supported with format as source
+ */
+ B2R2_BLT_CAP_SOURCE_FILL,
+ /**
+ * @brief Is source fill raw supported with format as dest
+ */
+ B2R2_BLT_CAP_SOURCE_FILL_RAW,
+ /**
+ * @brief Is dest color key supported with format as dest
+ */
+ B2R2_BLT_CAP_DEST_COLOR_KEY,
+ /**
+ * @brief Is dithering supported with format as source
+ */
+ B2R2_BLT_CAP_DITHER,
+ /**
+ * @brief Is blur supported with format as source
+ */
+ B2R2_BLT_CAP_BLUR,
+ /**
+ * @brief Minification limit (copybit support)
+ */
+ B2R2_BLT_CAP_MINIFICATION_LIMIT,
+ /**
+ * @brief Magnification limit (copybit support)
+ */
+ B2R2_BLT_CAP_MAGNIFICATION_LIMIT,
+ /**
+ * @brief Number of scaling fractional bits (copybit support)
+ */
+ B2R2_BLT_CAP_SCALING_FRAC_BITS,
+ /**
+ * @brief Supported rotation step in degrees (copybit support)
+ */
+ B2R2_BLT_CAP_ROTATION_STEP_DEG,
+};
+
+/**
+ * struct b2r2_blt_query_cap - Query B2R2 capabilities
+ *
+ * fmt: Format to query capabilities for or BLT_FMT_UNUSED for all
+ * cap: Capability to query for
+ * result: Returned capability. Interpretaion of this variable varies
+ * with the capability queried
+ */
+struct b2r2_blt_query_cap {
+ enum b2r2_blt_fmt fmt;
+ enum b2r2_blt_cap cap;
+ __u32 result;
+};
+
+/**
+ * struct b2r2_blt_report - Report from B2R2 driver back to user space
+ *
+ * This structure can be read from B2R2 driver if B2R2_BLT_FLAG_REPORT_WHEN_DONE
+ * flag was specified when the request was issued.
+ *
+ * @request_id: The id for the request, same as reported from blt_request
+ * @report1: Client data specified in struct blt_request
+ * @report2: Client data specified in struct blt_request
+ * @usec_elapsed: Number of microseconds needed to perform this blit
+ * if B2R2_BLT_FLAG_REPORT_PERFORMANCE was specified when the
+ * request was issued.
+ *
+ */
+struct b2r2_blt_report {
+ __u32 request_id;
+ __u32 report1;
+ __u32 report2;
+ __u32 usec_elapsed;
+};
+
+/**
+ * B2R2 BLT driver is used in the following way:
+ *
+ * Obtain a file descriptor to the driver:
+ * fd = open("/dev/b2r2_blt", O_RDWR);
+ *
+ * Issue requests:
+ * struct b2r2_blt_request blt_request;
+ * blt_request.size = sizeof(blt_request);
+ * ... Fill request with data...
+ *
+ * request_id = ioctl(fd, B2R2_BLT_IOC, (__u32) &blt_request);
+ *
+ * Wait for a request to finish
+ * ret = ioctl(fd, B2R2_BLT_SYNCH_IOC, (__u32) request_id);
+ *
+ * Wait for all requests from this context to finish
+ * ret = ioctl(fd, B2R2_BLT_SYNCH_IOC, (__u32) 0);
+ *
+ * Wait indefinitely for report data from driver:
+ * pollfd.fd = fd
+ * pollfd.events = 0xFFFFFFFF;
+ * pollfd.revents = 0;
+ * ret = poll(&pollfd, 1, -1);
+ *
+ * Read report data from driver
+ * struct b2r2_blt_report blt_report;
+ *
+ * nread = read(fd, &blt_report, sizeof(blt_report));
+ *
+ * Close the driver
+ * close(fd);
+ */
+
+/* B2R2 BLT IOCTLS */
+
+/**
+ * B2R2_BLT_IOC_MAGIC is ioctl type group for B2R2 driver
+ */
+#define B2R2_BLT_IOC_MAGIC 0xb2
+
+/**
+ * The B2R2_BLT_IOC ioctl adds a blit request to B2R2.
+ *
+ * The ioctl returns when the blit has been performed if not
+ * asynchronous execution has been specified. If asynchronous,
+ * control is returned as soon as the request has been queued.
+ *
+ * Supplied parameter shall be a pointer to a struct b2r2_blt_req.
+ *
+ * Returns an unique request id if >= 0, else a negative error code.
+ * This request id can be waited for using B2R2_BLT_SYNC_IOC.
+ * Return values: -ESOMERROR Description of an error
+ */
+#define B2R2_BLT_IOC _IOW(B2R2_BLT_IOC_MAGIC, 1, struct b2r2_blt_req)
+
+/**
+ * The B2R2_BLT_SYNC_IOC waits for all or a specified request to be finished.
+ *
+ * Supplied parameter shall be a request id previously returned by
+ * B2R2_BLT_IOC or 0 for all requests.
+ *
+ * Returns 0 if OK, else a negative error code
+ * Return value: -ESOMERROR Description of an error
+ */
+#define B2R2_BLT_SYNCH_IOC _IOW(B2R2_BLT_IOC_MAGIC, 2, int)
+
+/**
+ * The BLT_QUERY_CAP_IOC returns capability information for all or
+ * for a certain format
+ *
+ * Supplied parameter shall be a pointer to a struct b2r2_blt_query_cap.
+ *
+ * @return Returns 0 if OK, else a negative error code
+ * @retval -ESOMERROR Description of an error
+ */
+#define B2R2_BLT_QUERY_CAP_IOC _IOWR(B2R2_BLT_IOC_MAGIC, 3, \
+ struct b2r2_blt_query_cap)
+
+#endif /* #ifdef _LINUX_VIDEO_B2R2_BLT_H */
diff --git a/include/video/hdmi.h b/include/video/hdmi.h
new file mode 100644
index 00000000000..55dcd003fcd
--- /dev/null
+++ b/include/video/hdmi.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * HDMI driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __HDMI__H__
+#define __HDMI__H__
+
+#define HDMI_RESULT_OK 0
+#define HDMI_RESULT_NOT_OK 1
+#define HDMI_AES_NOT_FUSED 2
+#define HDMI_RESULT_CRC_MISMATCH 3
+
+#define HDMI_CEC_READ_MAXSIZE 16
+#define HDMI_CEC_WRITE_MAXSIZE 15
+#define HDMI_INFOFRAME_MAX_SIZE 27
+#define HDMI_HDCP_FUSEAES_KEYSIZE 16
+#define HDMI_HDCP_AES_BLOCK_START 128
+#define HDMI_HDCP_KSV_BLOCK 40
+#define HDMI_HDCP_AES_NR_OF_BLOCKS 18
+#define HDMI_HDCP_AES_KEYSIZE 16
+#define HDMI_HDCP_AES_KSVSIZE 5
+#define HDMI_HDCP_AES_KSVZEROESSIZE 3
+#define HDMI_EDID_DATA_SIZE 128
+#define HDMI_CEC_SIZE 15
+#define HDMI_INFOFR_SIZE 27
+#define HDMI_FUSE_KEYSIZE 16
+#define HDMI_AES_KSVSIZE 5
+#define HDMI_AES_KEYSIZE 288
+#define HDMI_CRC32_SIZE 4
+#define HDMI_HDCPAUTHRESP_SIZE 126
+
+#define HDMI_STOREASTEXT_TEXT_SIZE 2
+#define HDMI_STOREASTEXT_BIN_SIZE 1
+#define HDMI_PLUGDETEN_TEXT_SIZE 6
+#define HDMI_PLUGDETEN_BIN_SIZE 3
+#define HDMI_EDIDREAD_TEXT_SIZE 4
+#define HDMI_EDIDREAD_BIN_SIZE 2
+#define HDMI_CECEVEN_TEXT_SIZE 2
+#define HDMI_CECEVEN_BIN_SIZE 1
+#define HDMI_CECSEND_TEXT_SIZE_MAX 37
+#define HDMI_CECSEND_TEXT_SIZE_MIN 6
+#define HDMI_CECSEND_BIN_SIZE_MAX 18
+#define HDMI_CECSEND_BIN_SIZE_MIN 3
+#define HDMI_INFOFRSEND_TEXT_SIZE_MIN 8
+#define HDMI_INFOFRSEND_TEXT_SIZE_MAX 63
+#define HDMI_INFOFRSEND_BIN_SIZE_MIN 4
+#define HDMI_INFOFRSEND_BIN_SIZE_MAX 31
+#define HDMI_HDCPEVEN_TEXT_SIZE 2
+#define HDMI_HDCPEVEN_BIN_SIZE 1
+#define HDMI_HDCP_FUSEAES_TEXT_SIZE 34
+#define HDMI_HDCP_FUSEAES_BIN_SIZE 17
+#define HDMI_HDCP_LOADAES_TEXT_SIZE 594
+#define HDMI_HDCP_LOADAES_BIN_SIZE 297
+#define HDMI_HDCPAUTHENCR_TEXT_SIZE 4
+#define HDMI_HDCPAUTHENCR_BIN_SIZE 2
+#define HDMI_EVCLR_TEXT_SIZE 2
+#define HDMI_EVCLR_BIN_SIZE 1
+#define HDMI_AUDIOCFG_TEXT_SIZE 14
+#define HDMI_AUDIOCFG_BIN_SIZE 7
+#define HDMI_POWERONOFF_TEXT_SIZE 2
+#define HDMI_POWERONOFF_BIN_SIZE 1
+
+#define HDMI_IOC_MAGIC 0xcc
+
+/** IOCTL Operations */
+#define IOC_PLUG_DETECT_ENABLE _IOWR(HDMI_IOC_MAGIC, 1, int)
+#define IOC_EDID_READ _IOWR(HDMI_IOC_MAGIC, 2, int)
+#define IOC_CEC_EVENT_ENABLE _IOWR(HDMI_IOC_MAGIC, 3, int)
+#define IOC_CEC_READ _IOWR(HDMI_IOC_MAGIC, 4, int)
+#define IOC_CEC_SEND _IOWR(HDMI_IOC_MAGIC, 5, int)
+#define IOC_INFOFRAME_SEND _IOWR(HDMI_IOC_MAGIC, 6, int)
+#define IOC_HDCP_EVENT_ENABLE _IOWR(HDMI_IOC_MAGIC, 7, int)
+#define IOC_HDCP_CHKAESOTP _IOWR(HDMI_IOC_MAGIC, 8, int)
+#define IOC_HDCP_FUSEAES _IOWR(HDMI_IOC_MAGIC, 9, int)
+#define IOC_HDCP_LOADAES _IOWR(HDMI_IOC_MAGIC, 10, int)
+#define IOC_HDCP_AUTHENCR_REQ _IOWR(HDMI_IOC_MAGIC, 11, int)
+#define IOC_HDCP_STATE_GET _IOWR(HDMI_IOC_MAGIC, 12, int)
+#define IOC_EVENTS_READ _IOWR(HDMI_IOC_MAGIC, 13, int)
+#define IOC_EVENTS_CLEAR _IOWR(HDMI_IOC_MAGIC, 14, int)
+#define IOC_AUDIO_CFG _IOWR(HDMI_IOC_MAGIC, 15, int)
+#define IOC_PLUG_STATUS _IOWR(HDMI_IOC_MAGIC, 16, int)
+#define IOC_POWERONOFF _IOWR(HDMI_IOC_MAGIC, 17, int)
+#define IOC_EVENT_WAKEUP _IOWR(HDMI_IOC_MAGIC, 18, int)
+#define IOC_POWERSTATE _IOWR(HDMI_IOC_MAGIC, 19, int)
+
+
+/* HDMI driver */
+void hdmi_event(enum av8100_hdmi_event);
+int hdmi_init(void);
+void hdmi_exit(void);
+
+enum hdmi_event {
+ HDMI_EVENT_NONE = 0x0,
+ HDMI_EVENT_HDMI_PLUGIN = 0x1,
+ HDMI_EVENT_HDMI_PLUGOUT = 0x2,
+ HDMI_EVENT_CEC = 0x4,
+ HDMI_EVENT_HDCP = 0x8,
+ HDMI_EVENT_CECTXERR = 0x10,
+ HDMI_EVENT_WAKEUP = 0x20,
+ HDMI_EVENT_CECTX = 0x40,
+};
+
+enum hdmi_hdcp_auth_type {
+ HDMI_HDCP_AUTH_OFF = 0,
+ HDMI_HDCP_AUTH_START = 1,
+ HDMI_HDCP_AUTH_REV_LIST_REQ = 2,
+ HDMI_HDCP_AUTH_CONT = 3,
+};
+
+enum hdmi_hdcp_encr_type {
+ HDMI_HDCP_ENCR_OESS = 0,
+ HDMI_HDCP_ENCR_EESS = 1,
+};
+
+struct plug_detect {
+ __u8 hdmi_detect_enable;
+ __u8 on_time;
+ __u8 hdmi_off_time;
+};
+
+struct edid_read {
+ __u8 address;
+ __u8 block_nr;
+ __u8 data_length;
+ __u8 data[HDMI_EDID_DATA_SIZE];
+};
+
+struct cec_rw {
+ __u8 src;
+ __u8 dest;
+ __u8 length;
+ __u8 data[HDMI_CEC_SIZE];
+};
+
+struct info_fr {
+ __u8 type;
+ __u8 ver;
+ __u8 crc;
+ __u8 length;
+ __u8 data[HDMI_INFOFR_SIZE];
+};
+
+struct hdcp_fuseaes {
+ __u8 key[HDMI_FUSE_KEYSIZE];
+ __u8 crc;
+ __u8 result;
+};
+
+struct hdcp_loadaesall {
+ __u8 key[HDMI_AES_KEYSIZE];
+ __u8 ksv[HDMI_AES_KSVSIZE];
+ __u8 crc32[HDMI_CRC32_SIZE];
+ __u8 result;
+};
+
+
+/* hdcp_authencr resp coding
+ *
+ * When encr_type is 2 (request revoc list), the response is given by
+ * resp_size is != 0 and resp containing the folllowing:
+ *
+ * __u8[5] Bksv from sink (not belonging to revocation list)
+ * __u8 Device count
+ * Additional output if Nrofdevices > 0:
+ * __u8[5 * Nrofdevices] Bksv per connected equipment
+ * __u8[20] SHA signature
+ *
+ * Device count coding:
+ * 0 = a simple receiver is connected
+ * 0x80 = a repeater is connected without downstream equipment
+ * 0x81 = a repeater is connected with one downstream equipment
+ * up to 0x94 = (0x80 + 0x14) a repeater is connected with downstream
+ * equipment (thus up to 20 connected equipments)
+ * 1 = repeater without sink equipment connected
+ * >1 = number of connected equipment on the repeater
+ * Nrofdevices = Device count & 0x7F (max 20)
+ *
+ * Max resp_size is 5 + 1 + 5 * 20 + 20 = 126 bytes
+ *
+ */
+struct hdcp_authencr {
+ __u8 auth_type;
+ __u8 encr_type;
+ __u8 result;
+ __u8 resp_size;
+ __u8 resp[HDMI_HDCPAUTHRESP_SIZE];
+};
+
+struct audio_cfg {
+ __u8 if_format;
+ __u8 i2s_entries;
+ __u8 freq;
+ __u8 word_length;
+ __u8 format;
+ __u8 if_mode;
+ __u8 mute;
+};
+
+#endif /* __HDMI__H__ */
diff --git a/include/video/mcde.h b/include/video/mcde.h
new file mode 100644
index 00000000000..015350f4495
--- /dev/null
+++ b/include/video/mcde.h
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE__H__
+#define __MCDE__H__
+
+/* Physical interface types */
+enum mcde_port_type {
+ MCDE_PORTTYPE_DSI = 0,
+ MCDE_PORTTYPE_DPI = 1,
+};
+
+/* Interface mode */
+enum mcde_port_mode {
+ MCDE_PORTMODE_CMD = 0,
+ MCDE_PORTMODE_VID = 1,
+};
+
+/* MCDE fifos */
+enum mcde_fifo {
+ MCDE_FIFO_A = 0,
+ MCDE_FIFO_B = 1,
+ MCDE_FIFO_C0 = 2,
+ MCDE_FIFO_C1 = 3,
+};
+
+/* MCDE channels (pixel pipelines) */
+enum mcde_chnl {
+ MCDE_CHNL_A = 0,
+ MCDE_CHNL_B = 1,
+ MCDE_CHNL_C0 = 2,
+ MCDE_CHNL_C1 = 3,
+};
+
+/* Update sync mode */
+enum mcde_sync_src {
+ MCDE_SYNCSRC_OFF = 0, /* No sync */
+ MCDE_SYNCSRC_TE0 = 1, /* MCDE ext TE0 */
+ MCDE_SYNCSRC_TE1 = 2, /* MCDE ext TE1 */
+ MCDE_SYNCSRC_BTA = 3, /* DSI BTA */
+ MCDE_SYNCSRC_TE_POLLING = 4, /* DSI TE_POLLING */
+ MCDE_SYNCSRC_FORMATTER = 5, /* Sync from formatter */
+};
+
+/* Interface pixel formats (output) */
+/*
+* REVIEW: Define formats
+* Add explanatory comments how the formats are ordered in memory
+*/
+enum mcde_port_pix_fmt {
+ /* MIPI standard formats */
+
+ MCDE_PORTPIXFMT_DPI_16BPP_C1 = 0x21,
+ MCDE_PORTPIXFMT_DPI_16BPP_C2 = 0x22,
+ MCDE_PORTPIXFMT_DPI_16BPP_C3 = 0x23,
+ MCDE_PORTPIXFMT_DPI_18BPP_C1 = 0x24,
+ MCDE_PORTPIXFMT_DPI_18BPP_C2 = 0x25,
+ MCDE_PORTPIXFMT_DPI_24BPP = 0x26,
+
+ MCDE_PORTPIXFMT_DSI_16BPP = 0x31,
+ MCDE_PORTPIXFMT_DSI_18BPP = 0x32,
+ MCDE_PORTPIXFMT_DSI_18BPP_PACKED = 0x33,
+ MCDE_PORTPIXFMT_DSI_24BPP = 0x34,
+
+ /* Custom formats */
+ MCDE_PORTPIXFMT_DSI_YCBCR422 = 0x40,
+};
+
+enum mcde_hdmi_sdtv_switch {
+ HDMI_SWITCH,
+ SDTV_SWITCH,
+ DVI_SWITCH
+};
+
+enum mcde_col_convert {
+ MCDE_CONVERT_RGB_2_RGB,
+ MCDE_CONVERT_RGB_2_YCBCR,
+ MCDE_CONVERT_YCBCR_2_RGB,
+ MCDE_CONVERT_YCBCR_2_YCBCR,
+};
+
+struct mcde_col_transform {
+ u16 matrix[3][3];
+ u16 offset[3];
+};
+
+/* DSI video mode */
+enum mcde_dsi_vid_mode {
+ NON_BURST_MODE_WITH_SYNC_EVENT = 0,
+ /* enables tvg, test video generator */
+ NON_BURST_MODE_WITH_SYNC_EVENT_TVG_ENABLED = 1,
+ BURST_MODE_WITH_SYNC_EVENT = 2,
+ BURST_MODE_WITH_SYNC_PULSE = 3,
+};
+
+#define MCDE_PORT_DPI_NO_CLOCK_DIV 0
+
+#define DPI_ACT_HIGH_ALL 0 /* all signals are active high */
+#define DPI_ACT_LOW_HSYNC 1 /* horizontal sync signal is active low */
+#define DPI_ACT_LOW_VSYNC 2 /* vertical sync signal is active low */
+#define DPI_ACT_LOW_DATA_ENABLE 4 /* data enable signal is active low */
+#define DPI_ACT_ON_FALLING_EDGE 8 /* drive data on the falling edge of the
+ * pixel clock
+ */
+
+struct mcde_port {
+ enum mcde_port_type type;
+ enum mcde_port_mode mode;
+ enum mcde_port_pix_fmt pixel_format;
+ u8 refresh_rate; /* display refresh rate given in Hz */
+ u8 ifc;
+ u8 link;
+ enum mcde_sync_src sync_src;
+ bool update_auto_trig;
+ enum mcde_hdmi_sdtv_switch hdmi_sdtv_switch;
+ union {
+ struct {
+ u8 virt_id;
+ u8 num_data_lanes;
+ u8 ui;
+ bool clk_cont;
+ bool host_eot_gen;
+
+ /* DSI video mode operating modes */
+ enum mcde_dsi_vid_mode vid_mode;
+
+ /*
+ * wakeup_time is the time to perform
+ * LP->HS on D-PHY. Given in clock
+ * cycles of byte clock frequency.
+ */
+ u32 vid_wakeup_time;
+
+ u32 hs_freq;
+ u32 lp_freq;
+
+ /* DSI data lanes are swapped if true */
+ bool data_lanes_swap;
+ } dsi;
+ struct {
+ u8 bus_width;
+ bool tv_mode;
+ u16 clock_div; /* use 0 or 1 for no clock divider */
+ u32 polarity; /* see DPI_ACT_LOW_* definitions */
+ u32 lcd_freq;
+ } dpi;
+ } phy;
+};
+
+/* Overlay pixel formats (input) *//* REVIEW: Define byte order */
+enum mcde_ovly_pix_fmt {
+ MCDE_OVLYPIXFMT_RGB565 = 1,
+ MCDE_OVLYPIXFMT_RGBA5551 = 2,
+ MCDE_OVLYPIXFMT_RGBA4444 = 3,
+ MCDE_OVLYPIXFMT_RGB888 = 4,
+ MCDE_OVLYPIXFMT_RGBX8888 = 5,
+ MCDE_OVLYPIXFMT_RGBA8888 = 6,
+ MCDE_OVLYPIXFMT_YCbCr422 = 7,
+};
+
+/* Display power modes */
+enum mcde_display_power_mode {
+ MCDE_DISPLAY_PM_OFF = 0, /* Power off */
+ MCDE_DISPLAY_PM_STANDBY = 1, /* DCS sleep mode */
+ MCDE_DISPLAY_PM_ON = 2, /* DCS normal mode, display on */
+};
+
+/* Display rotation */
+enum mcde_display_rotation {
+ MCDE_DISPLAY_ROT_0 = 0,
+ MCDE_DISPLAY_ROT_90_CCW = 90,
+ MCDE_DISPLAY_ROT_180_CCW = 180,
+ MCDE_DISPLAY_ROT_270_CCW = 270,
+ MCDE_DISPLAY_ROT_90_CW = MCDE_DISPLAY_ROT_270_CCW,
+ MCDE_DISPLAY_ROT_180_CW = MCDE_DISPLAY_ROT_180_CCW,
+ MCDE_DISPLAY_ROT_270_CW = MCDE_DISPLAY_ROT_90_CCW,
+};
+
+/* REVIEW: Verify */
+#define MCDE_MIN_WIDTH 16
+#define MCDE_MIN_HEIGHT 16
+#define MCDE_MAX_WIDTH 2048
+#define MCDE_MAX_HEIGHT 2048
+#define MCDE_BUF_START_ALIGMENT 8
+#define MCDE_BUF_LINE_ALIGMENT 8
+
+/* Tv-out defines */
+#define MCDE_CONFIG_TVOUT_BACKGROUND_LUMINANCE 0x83
+#define MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CB 0x9C
+#define MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CR 0x2C
+
+/* In seconds */
+#define MCDE_AUTO_SYNC_WATCHDOG 5
+
+/* DSI modes */
+#define DSI_VIDEO_MODE 0
+#define DSI_CMD_MODE 1
+
+/* Video mode descriptor */
+struct mcde_video_mode {
+ u32 xres;
+ u32 yres;
+ u32 pixclock; /* pixel clock in ps (pico seconds) */
+ u32 hbp; /* horizontal back porch: left margin (excl. hsync) */
+ u32 hfp; /* horizontal front porch: right margin (excl. hsync) */
+ u32 hsw; /* horizontal sync width */
+ u32 vbp; /* vertical back porch: upper margin (excl. vsync) */
+ u32 vfp; /* vertical front porch: lower margin (excl. vsync) */
+ u32 vsw; /* vertical sync width*/
+ bool interlaced;
+ bool force_update; /* when switching between hdmi and sdtv */
+};
+
+struct mcde_rectangle {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+struct mcde_overlay_info {
+ u32 paddr;
+ u32 *vaddr;
+ u16 stride; /* buffer line len in bytes */
+ enum mcde_ovly_pix_fmt fmt;
+
+ u16 src_x;
+ u16 src_y;
+ u16 dst_x;
+ u16 dst_y;
+ u16 dst_z;
+ u16 w;
+ u16 h;
+ struct mcde_rectangle dirty;
+};
+
+struct mcde_overlay {
+ struct kobject kobj;
+ struct list_head list; /* mcde_display_device.ovlys */
+
+ struct mcde_display_device *ddev;
+ struct mcde_overlay_info info;
+ struct mcde_ovly_state *state;
+};
+
+/*
+ * Three functions for mapping 8 bits colour channels on 12 bits colour
+ * channels. The colour channels (ch0, ch1, ch2) can represent (r, g, b) or
+ * (Y, Cb, Cr) respectively.
+ */
+struct mcde_palette_table {
+ u16 (*map_col_ch0)(u8);
+ u16 (*map_col_ch1)(u8);
+ u16 (*map_col_ch2)(u8);
+};
+
+struct mcde_chnl_state;
+
+struct mcde_chnl_state *mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port);
+int mcde_chnl_set_pixel_format(struct mcde_chnl_state *chnl,
+ enum mcde_port_pix_fmt pix_fmt);
+int mcde_chnl_set_palette(struct mcde_chnl_state *chnl,
+ struct mcde_palette_table *palette);
+void mcde_chnl_set_col_convert(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform,
+ enum mcde_col_convert convert);
+int mcde_chnl_set_video_mode(struct mcde_chnl_state *chnl,
+ struct mcde_video_mode *vmode);
+/* TODO: Remove rotbuf* parameters when ESRAM allocator is implemented*/
+int mcde_chnl_set_rotation(struct mcde_chnl_state *chnl,
+ enum mcde_display_rotation rotation, u32 rotbuf1, u32 rotbuf2);
+int mcde_chnl_enable_synchronized_update(struct mcde_chnl_state *chnl,
+ bool enable);
+int mcde_chnl_set_power_mode(struct mcde_chnl_state *chnl,
+ enum mcde_display_power_mode power_mode);
+
+int mcde_chnl_apply(struct mcde_chnl_state *chnl);
+int mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer);
+void mcde_chnl_put(struct mcde_chnl_state *chnl);
+
+void mcde_chnl_stop_flow(struct mcde_chnl_state *chnl);
+
+void mcde_chnl_enable(struct mcde_chnl_state *chnl);
+void mcde_chnl_disable(struct mcde_chnl_state *chnl);
+
+/* MCDE overlay */
+struct mcde_ovly_state;
+
+struct mcde_ovly_state *mcde_ovly_get(struct mcde_chnl_state *chnl);
+void mcde_ovly_set_source_buf(struct mcde_ovly_state *ovly,
+ u32 paddr);
+void mcde_ovly_set_source_info(struct mcde_ovly_state *ovly,
+ u32 stride, enum mcde_ovly_pix_fmt pix_fmt);
+void mcde_ovly_set_source_area(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u16 w, u16 h);
+void mcde_ovly_set_dest_pos(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u8 z);
+void mcde_ovly_apply(struct mcde_ovly_state *ovly);
+void mcde_ovly_put(struct mcde_ovly_state *ovly);
+
+/* MCDE dsi */
+
+#define DCS_CMD_ENTER_IDLE_MODE 0x39
+#define DCS_CMD_ENTER_INVERT_MODE 0x21
+#define DCS_CMD_ENTER_NORMAL_MODE 0x13
+#define DCS_CMD_ENTER_PARTIAL_MODE 0x12
+#define DCS_CMD_ENTER_SLEEP_MODE 0x10
+#define DCS_CMD_EXIT_IDLE_MODE 0x38
+#define DCS_CMD_EXIT_INVERT_MODE 0x20
+#define DCS_CMD_EXIT_SLEEP_MODE 0x11
+#define DCS_CMD_GET_ADDRESS_MODE 0x0B
+#define DCS_CMD_GET_BLUE_CHANNEL 0x08
+#define DCS_CMD_GET_DIAGNOSTIC_RESULT 0x0F
+#define DCS_CMD_GET_DISPLAY_MODE 0x0D
+#define DCS_CMD_GET_GREEN_CHANNEL 0x07
+#define DCS_CMD_GET_PIXEL_FORMAT 0x0C
+#define DCS_CMD_GET_POWER_MODE 0x0A
+#define DCS_CMD_GET_RED_CHANNEL 0x06
+#define DCS_CMD_GET_SCANLINE 0x45
+#define DCS_CMD_GET_SIGNAL_MODE 0x0E
+#define DCS_CMD_NOP 0x00
+#define DCS_CMD_READ_DDB_CONTINUE 0xA8
+#define DCS_CMD_READ_DDB_START 0xA1
+#define DCS_CMD_READ_MEMORY_CONTINE 0x3E
+#define DCS_CMD_READ_MEMORY_START 0x2E
+#define DCS_CMD_SET_ADDRESS_MODE 0x36
+#define DCS_CMD_SET_COLUMN_ADDRESS 0x2A
+#define DCS_CMD_SET_DISPLAY_OFF 0x28
+#define DCS_CMD_SET_DISPLAY_ON 0x29
+#define DCS_CMD_SET_GAMMA_CURVE 0x26
+#define DCS_CMD_SET_PAGE_ADDRESS 0x2B
+#define DCS_CMD_SET_PARTIAL_AREA 0x30
+#define DCS_CMD_SET_PIXEL_FORMAT 0x3A
+#define DCS_CMD_SET_SCROLL_AREA 0x33
+#define DCS_CMD_SET_SCROLL_START 0x37
+#define DCS_CMD_SET_TEAR_OFF 0x34
+#define DCS_CMD_SET_TEAR_ON 0x35
+#define DCS_CMD_SET_TEAR_SCANLINE 0x44
+#define DCS_CMD_SOFT_RESET 0x01
+#define DCS_CMD_WRITE_LUT 0x2D
+#define DCS_CMD_WRITE_CONTINUE 0x3C
+#define DCS_CMD_WRITE_START 0x2C
+
+#define MCDE_MAX_DCS_READ 4
+#define MCDE_MAX_DSI_DIRECT_CMD_WRITE 15
+
+int mcde_dsi_generic_write(struct mcde_chnl_state *chnl, u8* para, int len);
+int mcde_dsi_dcs_write(struct mcde_chnl_state *chnl,
+ u8 cmd, u8 *data, int len);
+int mcde_dsi_dcs_read(struct mcde_chnl_state *chnl,
+ u8 cmd, u32 *data, int *len);
+int mcde_dsi_set_max_pkt_size(struct mcde_chnl_state *chnl);
+
+/* MCDE */
+
+/* Driver data */
+#define MCDE_IRQ "MCDE IRQ"
+#define MCDE_IO_AREA "MCDE I/O Area"
+
+struct mcde_platform_data {
+ /* DPI */
+ u8 outmux[5]; /* MCDE_CONF0.OUTMUXx */
+ u8 syncmux; /* MCDE_CONF0.SYNCMUXx */
+
+ const char *regulator_vana_id;
+ const char *regulator_mcde_epod_id;
+ const char *regulator_esram_epod_id;
+ const char *clock_dsi_id;
+ const char *clock_dsi_lp_id;
+ const char *clock_dpi_id;
+ const char *clock_mcde_id;
+
+ int (*platform_set_clocks)(void);
+ int (*platform_enable_dsipll)(void);
+ int (*platform_disable_dsipll)(void);
+};
+
+int mcde_init(void);
+void mcde_exit(void);
+
+#endif /* __MCDE__H__ */
diff --git a/include/video/mcde_display-ab8500.h b/include/video/mcde_display-ab8500.h
new file mode 100644
index 00000000000..ffebe62af92
--- /dev/null
+++ b/include/video/mcde_display-ab8500.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 tvout driver interface
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __DISPLAY_AB8500__H__
+#define __DISPLAY_AB8500__H__
+
+#include <video/mcde.h>
+
+struct ab8500_display_platform_data {
+ /* Platform info */
+ struct mcde_col_transform *rgb_2_yCbCr_transform;
+ int nr_regulators;
+ const char *regulator_id[];
+};
+
+#endif /* __DISPLAY_AB8500__H__*/
+
diff --git a/include/video/mcde_display-av8100.h b/include/video/mcde_display-av8100.h
new file mode 100644
index 00000000000..7c13b49e58f
--- /dev/null
+++ b/include/video/mcde_display-av8100.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE HDMI display driver
+ *
+ * Author: Per Persson <per-xb-persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __DISPLAY_AV8100__H__
+#define __DISPLAY_AV8100__H__
+
+#include <linux/regulator/consumer.h>
+
+#include "mcde_display.h"
+
+#define GPIO_AV8100_RSTN 196
+#define NATIVE_XRES_HDMI 1280
+#define NATIVE_YRES_HDMI 720
+#define NATIVE_XRES_SDTV 720
+#define NATIVE_YRES_SDTV 576
+#define DISPONOFF_SIZE 6
+#define TIMING_SIZE 2
+#define STAYALIVE_SIZE 1
+
+struct mcde_display_hdmi_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ bool reset_high;
+ const char *regulator_id;
+ const char *cvbs_regulator_id;
+ int reset_delay; /* ms */
+ u32 ddb_id;
+ struct mcde_col_transform *rgb_2_yCbCr_transform;
+
+ /* Driver data */ /* TODO: move to driver data instead */
+ bool hdmi_platform_enable;
+ struct regulator *regulator;
+};
+
+struct display_driver_data {
+ struct regulator *cvbs_regulator;
+ bool cvbs_regulator_enabled;
+ bool update_port_pixel_format;
+ const char *fbdevname;
+ struct mcde_video_mode *video_mode;
+};
+
+void hdmi_fb_onoff(struct mcde_display_device *ddev, bool enable,
+ u8 cea, u8 vesa_cea_nr);
+
+#endif /* __DISPLAY_AV8100__H__ */
diff --git a/include/video/mcde_display-generic_dsi.h b/include/video/mcde_display-generic_dsi.h
new file mode 100644
index 00000000000..87ef6baf67a
--- /dev/null
+++ b/include/video/mcde_display-generic_dsi.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE generic DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DISPLAY_GENERIC__H__
+#define __MCDE_DISPLAY_GENERIC__H__
+
+#include <linux/regulator/consumer.h>
+
+#include "mcde_display.h"
+
+struct mcde_display_generic_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ bool reset_high;
+ const char *regulator_id;
+ int reset_delay; /* ms */
+ int sleep_out_delay; /* ms */
+ u32 ddb_id;
+
+ /* Driver data */
+ bool generic_platform_enable;
+ struct regulator *regulator;
+ int max_supply_voltage;
+ int min_supply_voltage;
+};
+
+#endif /* __MCDE_DISPLAY_GENERIC__H__ */
+
diff --git a/include/video/mcde_display-sony_acx424akp_dsi.h b/include/video/mcde_display-sony_acx424akp_dsi.h
new file mode 100644
index 00000000000..29fb14a3fdb
--- /dev/null
+++ b/include/video/mcde_display-sony_acx424akp_dsi.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Sony acx424akp DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DISPLAY_SONY_ACX424AKP__H__
+#define __MCDE_DISPLAY_SONY_ACX424AKP__H__
+
+enum display_panel_type {
+ DISPLAY_NONE = 0,
+ DISPLAY_SONY_ACX424AKP = 0x1b81,
+ DISPLAY_SONY_ACX424AKP_ID2 = 0x1a81,
+};
+
+struct mcde_display_sony_acx424akp_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ enum display_panel_type disp_panel; /* display panel types */
+};
+
+#endif /* __MCDE_DISPLAY_SONY_ACX424AKP__H__ */
+
diff --git a/include/video/mcde_display-vuib500-dpi.h b/include/video/mcde_display-vuib500-dpi.h
new file mode 100644
index 00000000000..94bad83bf97
--- /dev/null
+++ b/include/video/mcde_display-vuib500-dpi.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE DPI display driver
+ *
+ * Author: Torbjorn Svensson <torbjorn.x.svensson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __MCDE_DISPLAY_DPI__H__
+#define __MCDE_DISPLAY_DPI__H__
+
+#include <linux/regulator/consumer.h>
+
+#include "mcde_display.h"
+
+struct mcde_display_dpi_platform_data {
+ /* Platform info */
+ int reset_gpio;
+ bool reset_high;
+ const char *regulator_id;
+ int reset_delay;
+
+ /* Driver data */
+ struct regulator *regulator;
+ int max_supply_voltage;
+ int min_supply_voltage;
+};
+#endif /* __MCDE_DISPLAY_DPI__H__ */
diff --git a/include/video/mcde_display.h b/include/video/mcde_display.h
new file mode 100644
index 00000000000..97326cbc28c
--- /dev/null
+++ b/include/video/mcde_display.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DISPLAY__H__
+#define __MCDE_DISPLAY__H__
+
+#include <linux/device.h>
+#include <linux/pm.h>
+
+#include <video/mcde.h>
+
+#define UPDATE_FLAG_PIXEL_FORMAT 0x1
+#define UPDATE_FLAG_VIDEO_MODE 0x2
+#define UPDATE_FLAG_ROTATION 0x4
+
+struct mcde_display_dsi_platform_data {
+ int reset_gpio;
+ int link;
+};
+
+#define to_mcde_display_device(__dev) \
+ container_of((__dev), struct mcde_display_device, dev)
+
+struct mcde_display_device {
+ /* MCDE driver static */
+ struct device dev;
+ const char *name;
+ int id;
+ struct mcde_port *port;
+ struct fb_info *fbi;
+ bool fictive;
+
+ /* MCDE dss driver internal */
+ bool initialized;
+ enum mcde_chnl chnl_id;
+ enum mcde_fifo fifo;
+ bool first_update;
+ struct mutex display_lock;
+
+ bool enabled;
+ struct mcde_chnl_state *chnl_state;
+ struct list_head ovlys;
+ struct mcde_rectangle update_area;
+ /* TODO: Remove once ESRAM allocator is done */
+ u32 rotbuf1;
+ u32 rotbuf2;
+
+ /* Display driver internal */
+ u16 native_x_res;
+ u16 native_y_res;
+ u16 physical_width;
+ u16 physical_height;
+ enum mcde_display_power_mode power_mode;
+ enum mcde_ovly_pix_fmt default_pixel_format;
+ enum mcde_ovly_pix_fmt pixel_format;
+ enum mcde_display_rotation rotation;
+ bool synchronized_update;
+ struct mcde_video_mode video_mode;
+ int update_flags;
+ bool stay_alive;
+ int check_transparency;
+
+ /* Driver API */
+ void (*get_native_resolution)(struct mcde_display_device *dev,
+ u16 *x_res, u16 *y_res);
+ enum mcde_ovly_pix_fmt (*get_default_pixel_format)(
+ struct mcde_display_device *dev);
+ void (*get_physical_size)(struct mcde_display_device *dev,
+ u16 *x_size, u16 *y_size);
+
+ int (*set_power_mode)(struct mcde_display_device *dev,
+ enum mcde_display_power_mode power_mode);
+ enum mcde_display_power_mode (*get_power_mode)(
+ struct mcde_display_device *dev);
+
+ int (*try_video_mode)(struct mcde_display_device *dev,
+ struct mcde_video_mode *video_mode);
+ int (*set_video_mode)(struct mcde_display_device *dev,
+ struct mcde_video_mode *video_mode);
+ void (*get_video_mode)(struct mcde_display_device *dev,
+ struct mcde_video_mode *video_mode);
+ int (*set_pixel_format)(struct mcde_display_device *dev,
+ enum mcde_ovly_pix_fmt pix_fmt);
+ enum mcde_ovly_pix_fmt (*get_pixel_format)(
+ struct mcde_display_device *dev);
+ enum mcde_port_pix_fmt (*get_port_pixel_format)(
+ struct mcde_display_device *dev);
+
+ int (*set_rotation)(struct mcde_display_device *dev,
+ enum mcde_display_rotation rotation);
+ enum mcde_display_rotation (*get_rotation)(
+ struct mcde_display_device *dev);
+
+ int (*set_synchronized_update)(struct mcde_display_device *dev,
+ bool enable);
+ bool (*get_synchronized_update)(struct mcde_display_device *dev);
+
+ int (*apply_config)(struct mcde_display_device *dev);
+ int (*invalidate_area)(struct mcde_display_device *dev,
+ struct mcde_rectangle *area);
+ int (*update)(struct mcde_display_device *dev, bool tripple_buffer);
+ int (*on_first_update)(struct mcde_display_device *dev);
+ int (*platform_enable)(struct mcde_display_device *dev);
+ int (*platform_disable)(struct mcde_display_device *dev);
+ int (*ceanr_convert)(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, int buffering,
+ u16 *w, u16 *h, u16 *vw, u16 *vh);
+};
+
+struct mcde_display_driver {
+ int (*probe)(struct mcde_display_device *dev);
+ int (*remove)(struct mcde_display_device *dev);
+ void (*shutdown)(struct mcde_display_device *dev);
+ int (*suspend)(struct mcde_display_device *dev,
+ pm_message_t state);
+ int (*resume)(struct mcde_display_device *dev);
+
+ struct device_driver driver;
+};
+
+/* MCDE dsi (Used by MCDE display drivers) */
+
+int mcde_display_dsi_dcs_write(struct mcde_display_device *dev,
+ u8 cmd, u8 *data, int len);
+int mcde_display_dsi_dcs_read(struct mcde_display_device *dev,
+ u8 cmd, u8 *data, int *len);
+int mcde_display_dsi_bta_sync(struct mcde_display_device *dev);
+
+/* MCDE display bus */
+
+int mcde_display_driver_register(struct mcde_display_driver *drv);
+void mcde_display_driver_unregister(struct mcde_display_driver *drv);
+int mcde_display_device_register(struct mcde_display_device *dev);
+void mcde_display_device_unregister(struct mcde_display_device *dev);
+
+void mcde_display_init_device(struct mcde_display_device *dev);
+
+int mcde_display_init(void);
+void mcde_display_exit(void);
+
+#endif /* __MCDE_DISPLAY__H__ */
+
diff --git a/include/video/mcde_dss.h b/include/video/mcde_dss.h
new file mode 100644
index 00000000000..efed79ad023
--- /dev/null
+++ b/include/video/mcde_dss.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display sub system driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_DSS__H__
+#define __MCDE_DSS__H__
+
+#include <linux/kobject.h>
+#include <linux/notifier.h>
+
+#include "mcde.h"
+#include "mcde_display.h"
+
+/* Public MCDE dss (Used by MCDE fb ioctl & MCDE display sysfs) */
+int mcde_dss_open_channel(struct mcde_display_device *ddev);
+void mcde_dss_close_channel(struct mcde_display_device *ddev);
+int mcde_dss_enable_display(struct mcde_display_device *ddev);
+void mcde_dss_disable_display(struct mcde_display_device *ddev);
+int mcde_dss_apply_channel(struct mcde_display_device *ddev);
+struct mcde_overlay *mcde_dss_create_overlay(struct mcde_display_device *ddev,
+ struct mcde_overlay_info *info);
+void mcde_dss_destroy_overlay(struct mcde_overlay *ovl);
+int mcde_dss_enable_overlay(struct mcde_overlay *ovl);
+void mcde_dss_disable_overlay(struct mcde_overlay *ovl);
+int mcde_dss_apply_overlay(struct mcde_overlay *ovl,
+ struct mcde_overlay_info *info);
+void mcde_dss_get_overlay_info(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info);
+int mcde_dss_update_overlay(struct mcde_overlay *ovl, bool tripple_buffer);
+
+void mcde_dss_get_native_resolution(struct mcde_display_device *ddev,
+ u16 *x_res, u16 *y_res);
+enum mcde_ovl_pix_fmt mcde_dss_get_default_color_format(
+ struct mcde_display_device *ddev);
+void mcde_dss_get_physical_size(struct mcde_display_device *ddev,
+ u16 *x_size, u16 *y_size); /* mm */
+
+int mcde_dss_try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+int mcde_dss_set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+void mcde_dss_get_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+
+int mcde_dss_set_pixel_format(struct mcde_display_device *ddev,
+ enum mcde_ovly_pix_fmt pix_fmt);
+int mcde_dss_get_pixel_format(struct mcde_display_device *ddev);
+
+int mcde_dss_set_rotation(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation);
+enum mcde_display_rotation mcde_dss_get_rotation(
+ struct mcde_display_device *ddev);
+
+int mcde_dss_set_synchronized_update(struct mcde_display_device *ddev,
+ bool enable);
+bool mcde_dss_get_synchronized_update(struct mcde_display_device *ddev);
+
+/* MCDE dss events */
+
+/* A display device and driver has been loaded, probed and bound */
+#define MCDE_DSS_EVENT_DISPLAY_REGISTERED 1
+/* A display device has been removed */
+#define MCDE_DSS_EVENT_DISPLAY_UNREGISTERED 2
+
+/* Note! Notifier callback will be called holding the dev sem */
+int mcde_dss_register_notifier(struct notifier_block *nb);
+int mcde_dss_unregister_notifier(struct notifier_block *nb);
+
+/* MCDE dss driver */
+
+int mcde_dss_init(void);
+void mcde_dss_exit(void);
+
+#endif /* __MCDE_DSS__H__ */
+
diff --git a/include/video/mcde_fb.h b/include/video/mcde_fb.h
new file mode 100644
index 00000000000..17556414aa0
--- /dev/null
+++ b/include/video/mcde_fb.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display sub system frame buffer driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __MCDE_FB__H__
+#define __MCDE_FB__H__
+
+#include <linux/fb.h>
+#include <linux/ioctl.h>
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#include <linux/hwmem.h>
+#endif
+
+#ifdef __KERNEL__
+#include "mcde_dss.h"
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#endif
+
+#define MCDE_GET_BUFFER_NAME_IOC _IO('M', 1)
+
+#ifdef __KERNEL__
+#define to_mcde_fb(x) ((struct mcde_fb *)(x)->par)
+
+#define MCDE_FB_MAX_NUM_OVERLAYS 3
+
+struct mcde_fb {
+ int num_ovlys;
+ struct mcde_overlay *ovlys[MCDE_FB_MAX_NUM_OVERLAYS];
+ u32 pseudo_palette[17];
+ enum mcde_ovly_pix_fmt pix_fmt;
+ int id;
+ struct hwmem_alloc *alloc;
+ int alloc_name;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+};
+
+/* MCDE fbdev API */
+struct fb_info *mcde_fb_create(struct mcde_display_device *ddev,
+ uint16_t w, uint16_t h, uint16_t vw, uint16_t vh,
+ enum mcde_ovly_pix_fmt pix_fmt, uint32_t rotate);
+
+int mcde_fb_attach_overlay(struct fb_info *fb_info,
+ struct mcde_overlay *ovl);
+void mcde_fb_destroy(struct mcde_display_device *ddev);
+
+/* MCDE fb driver */
+int mcde_fb_init(void);
+void mcde_fb_exit(void);
+#endif
+
+#endif /* __MCDE_FB__H__ */
+
diff --git a/init/Kconfig b/init/Kconfig
index 3f42cd66f0f..55d4dad7e46 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1320,6 +1320,15 @@ config PROFILING
Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile.
+config BOOTTIME
+ bool "Boot time measurments"
+ default n
+ help
+ Adds sysfs entries (boottime/) with start-up timing information.
+ If CONFIG_DEBUG_FS is enabled, detailed information about the
+ boot time, including system load during boot can be extraced.
+ This information can be visualised with help of the bootgraph script.
+
#
# Place an empty function call at each tracepoint site. Can be
# dynamically changed for a probe function.
diff --git a/init/Makefile b/init/Makefile
index 0bf677aa087..6b77be3855f 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -9,6 +9,7 @@ else
obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o
endif
obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
+obj-$(CONFIG_BOOTTIME) += boottime.o
mounts-y := do_mounts.o
mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
diff --git a/init/boottime.c b/init/boottime.c
new file mode 100644
index 00000000000..5bdf291a627
--- /dev/null
+++ b/init/boottime.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2009-2010
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * boottime is a tool for collecting start-up timing
+ * information and can together with boot loader support
+ * display a total system start-up time.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/boottime.h>
+#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+/*
+ * BOOTTIME_MAX_NAME_LEN is defined in arch/arm/include/asm/setup.h to 64.
+ * No crisis if they don't match.
+ */
+#ifndef BOOTTIME_MAX_NAME_LEN
+#define BOOTTIME_MAX_NAME_LEN 64
+#endif
+
+/*
+ * We have a few static entries, since it is good to have measure points
+ * before the system is up and running properly
+ */
+#define NUM_STATIC_BOOTTIME_ENTRIES 16
+
+struct boottime_list {
+ struct list_head list;
+ char name[BOOTTIME_MAX_NAME_LEN];
+ /* Time in us since power on, possible including boot loader. */
+ unsigned long time;
+ bool cpu_load;
+ struct kernel_cpustat cpu_usage[NR_CPUS];
+};
+
+enum boottime_filter_type {
+ BOOTTIME_FILTER_OUT_ZERO,
+ BOOTTIME_FILTER_OUT_LESS_100,
+ BOOTTIME_FILTER_NOTHING,
+};
+
+enum boottime_symbolic_print {
+ BOOTTIME_SYMBOLIC_PRINT,
+ BOOTTIME_NORMAL_PRINT,
+};
+
+enum boottime_cpu_load {
+ BOOTTIME_CPU_LOAD,
+ BOOTTIME_NO_CPU_LOAD,
+};
+
+static LIST_HEAD(boottime_list);
+static __initdata DEFINE_SPINLOCK(boottime_list_lock);
+static __initdata struct boottime_timer boottime_timer;
+static __initdata int num_const_boottime_list;
+static struct boottime_list const_boottime_list[NUM_STATIC_BOOTTIME_ENTRIES];
+static unsigned long time_kernel_done;
+static unsigned long time_bootloader_done;
+static __initdata bool system_up;
+static bool boottime_done;
+
+int __attribute__((weak)) boottime_arch_startup(void)
+{
+ return 0;
+}
+
+int __attribute__((weak)) boottime_bootloader_idle(void)
+{
+ return 0;
+}
+
+static void __init boottime_mark_core(char *name,
+ unsigned long time,
+ enum boottime_symbolic_print symbolic,
+ enum boottime_cpu_load cpu_load)
+{
+ struct boottime_list *b;
+ unsigned long flags = 0;
+ int i;
+
+ if (system_up) {
+ b = kmalloc(sizeof(struct boottime_list), GFP_KERNEL);
+ if (!b) {
+ printk(KERN_ERR
+ "boottime: failed to allocate memory!\n");
+ return;
+ }
+
+ } else {
+ if (num_const_boottime_list < NUM_STATIC_BOOTTIME_ENTRIES) {
+ b = &const_boottime_list[num_const_boottime_list];
+ num_const_boottime_list++;
+ } else {
+ printk(KERN_ERR
+ "boottime: too many early measure points!\n");
+ return;
+ }
+ }
+
+ INIT_LIST_HEAD(&b->list);
+
+ if (symbolic == BOOTTIME_SYMBOLIC_PRINT)
+ snprintf(b->name, BOOTTIME_MAX_NAME_LEN, "%pF", name);
+ else
+ strncpy(b->name, name, BOOTTIME_MAX_NAME_LEN);
+
+ b->name[BOOTTIME_MAX_NAME_LEN - 1] = '\0';
+ b->time = time;
+ b->cpu_load = cpu_load;
+
+ if (cpu_load == BOOTTIME_CPU_LOAD && system_up)
+ for_each_possible_cpu(i) {
+ b->cpu_usage[i].cpustat[CPUTIME_SYSTEM] =
+ kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
+ b->cpu_usage[i].cpustat[CPUTIME_IDLE] =
+ kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
+ b->cpu_usage[i].cpustat[CPUTIME_IOWAIT] =
+ kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT];
+ b->cpu_usage[i].cpustat[CPUTIME_IRQ] =
+ kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ /*
+ * TODO: Make sure that user, nice, softirq, steal
+ * and guest are not used during boot
+ */
+ }
+ else
+ b->cpu_load = BOOTTIME_NO_CPU_LOAD;
+
+ if (system_up) {
+ spin_lock_irqsave(&boottime_list_lock, flags);
+ list_add(&b->list, &boottime_list);
+ spin_unlock_irqrestore(&boottime_list_lock, flags);
+ } else {
+ list_add(&b->list, &boottime_list);
+ }
+}
+
+void __init boottime_mark_wtime(char *name, unsigned long time)
+{
+ boottime_mark_core(name, time,
+ BOOTTIME_NORMAL_PRINT,
+ BOOTTIME_NO_CPU_LOAD);
+}
+
+void __ref boottime_mark_symbolic(void *name)
+{
+
+ if (boottime_done)
+ return;
+
+ if (boottime_timer.get_time)
+ boottime_mark_core((char *) name,
+ boottime_timer.get_time(),
+ BOOTTIME_SYMBOLIC_PRINT,
+ BOOTTIME_CPU_LOAD);
+}
+
+void __init boottime_mark(char *name)
+{
+ if (boottime_timer.get_time)
+ boottime_mark_core(name,
+ boottime_timer.get_time(),
+ BOOTTIME_NORMAL_PRINT,
+ BOOTTIME_CPU_LOAD);
+}
+
+void __init boottime_activate(struct boottime_timer *bt)
+{
+ struct boottime_list *b;
+ int res = 0;
+ unsigned long flags;
+
+ if (bt == NULL) {
+ printk(KERN_ERR
+ "boottime: error: bad configured\n");
+ return;
+ }
+
+ if (bt->get_time == NULL) {
+ printk(KERN_ERR
+ "boottime: error: you must provide a get_time() function\n");
+ return;
+ }
+ memcpy(&boottime_timer, bt, sizeof(struct boottime_timer));
+
+ if (boottime_timer.init)
+ res = boottime_timer.init();
+
+ if (res) {
+ printk(KERN_ERR "boottime: initialization failed\n");
+ return;
+ }
+
+ if (boottime_arch_startup())
+ printk(KERN_ERR
+ "boottime: arch specfic initialization failed\n");
+
+ spin_lock_irqsave(&boottime_list_lock, flags);
+
+ if (!list_empty(&boottime_list)) {
+
+ b = list_first_entry(&boottime_list, struct boottime_list,
+ list);
+ if (b)
+ time_bootloader_done = b->time;
+ }
+
+ spin_unlock_irqrestore(&boottime_list_lock, flags);
+}
+
+void __init boottime_system_up(void)
+{
+ system_up = true;
+}
+
+void __init boottime_deactivate(void)
+{
+ struct boottime_list *b;
+ unsigned long flags;
+
+ boottime_mark("execute_init+0x0/0x0");
+
+ boottime_done = true;
+
+ spin_lock_irqsave(&boottime_list_lock, flags);
+ b = list_first_entry(&boottime_list, struct boottime_list, list);
+ spin_unlock_irqrestore(&boottime_list_lock, flags);
+
+ time_kernel_done = b->time;
+
+ if (boottime_timer.finalize)
+ boottime_timer.finalize();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void boottime_debugfs_load(struct seq_file *s,
+ struct boottime_list *b,
+ struct boottime_list *p)
+{
+ int i;
+ unsigned long total_p, total_b;
+ unsigned long system_total, idle_total, irq_total, iowait_total;
+ unsigned long system_load, idle_load, irq_load, iowait_load;
+
+ for_each_possible_cpu(i) {
+ total_b = (b->cpu_usage[i].cpustat[CPUTIME_SYSTEM] +
+ b->cpu_usage[i].cpustat[CPUTIME_IDLE] +
+ b->cpu_usage[i].cpustat[CPUTIME_IOWAIT] +
+ b->cpu_usage[i].cpustat[CPUTIME_IRQ]);
+
+ total_p = (p->cpu_usage[i].cpustat[CPUTIME_SYSTEM] +
+ p->cpu_usage[i].cpustat[CPUTIME_IDLE] +
+ p->cpu_usage[i].cpustat[CPUTIME_IOWAIT] +
+ p->cpu_usage[i].cpustat[CPUTIME_IRQ]);
+
+ if (total_b == total_p)
+ continue;
+
+ system_total = b->cpu_usage[i].cpustat[CPUTIME_SYSTEM]
+ - p->cpu_usage[i].cpustat[CPUTIME_SYSTEM];
+ idle_total = b->cpu_usage[i].cpustat[CPUTIME_IDLE]
+ - p->cpu_usage[i].cpustat[CPUTIME_IDLE];
+ irq_total = b->cpu_usage[i].cpustat[CPUTIME_IRQ]
+ - p->cpu_usage[i].cpustat[CPUTIME_IRQ];
+ iowait_total = b->cpu_usage[i].cpustat[CPUTIME_IOWAIT]
+ - p->cpu_usage[i].cpustat[CPUTIME_IOWAIT];
+
+ system_load = (100 * system_total / (total_b - total_p));
+ idle_load = (100 * idle_total / (total_b - total_p));
+ irq_load = (100 * irq_total / (total_b - total_p));
+ iowait_load = (100 * iowait_total / (total_b - total_p));
+
+ seq_printf(s,
+ " cpu%d system: %lu%% idle: %lu%% iowait: %lu%% irq: %lu%%",
+ i,
+ system_load,
+ idle_load,
+ iowait_load,
+ irq_load);
+ }
+ seq_printf(s, "\n");
+}
+
+static void boottime_debugfs_print(struct seq_file *s,
+ struct boottime_list *b,
+ struct boottime_list *p)
+{
+ seq_printf(s, "[%5lu.%06lu] calling %s\n",
+ p->time / 1000000,
+ (p->time % 1000000),
+ p->name);
+ seq_printf(s, "[%5lu.%06lu] initcall %s returned 0 after %ld msecs.",
+ b->time / 1000000,
+ (b->time % 1000000),
+ p->name, (b->time - p->time) / 1000);
+
+ if (p->cpu_load == BOOTTIME_NO_CPU_LOAD ||
+ b->cpu_load == BOOTTIME_NO_CPU_LOAD) {
+ seq_printf(s, "\n");
+ return;
+ }
+
+ boottime_debugfs_load(s, b, p);
+}
+
+static int boottime_debugfs_bootgraph_show(struct seq_file *s, void *iter)
+{
+ struct boottime_list *b, *p = NULL, *old_p = NULL;
+ enum boottime_filter_type filter = (int)s->private;
+
+ list_for_each_entry_reverse(b, &boottime_list, list) {
+ if (p) {
+ if (!(filter == BOOTTIME_FILTER_OUT_ZERO &&
+ (b->time - p->time) / 1000 == 0)
+ && !(filter == BOOTTIME_FILTER_OUT_LESS_100 &&
+ (b->time - p->time) < 100 * 1000))
+ boottime_debugfs_print(s, b, p);
+ old_p = p;
+ }
+ p = b;
+ }
+
+ if (filter == BOOTTIME_FILTER_NOTHING && p)
+ boottime_debugfs_print(s, p, p);
+
+ if (p)
+ seq_printf(s, "[%5lu.%06lu] Freeing init memory: 0K\n",
+ p->time / 1000000, p->time % 1000000);
+ return 0;
+}
+
+static int boottime_debugfs_summary_show(struct seq_file *s, void *data)
+{
+ struct boottime_list *b, b_zero;
+
+ if (time_bootloader_done)
+ seq_printf(s, "bootloader: %ld msecs\n",
+ time_bootloader_done / 1000);
+
+ seq_printf(s, "kernel: %ld msecs\ntotal: %ld msecs\n",
+ (time_kernel_done - time_bootloader_done) / 1000,
+ time_kernel_done / 1000);
+ seq_printf(s, "kernel:");
+ b = list_first_entry(&boottime_list,
+ struct boottime_list, list);
+ memset(&b_zero, 0, sizeof(struct boottime_list));
+ boottime_debugfs_load(s, b, &b_zero);
+
+ if (time_bootloader_done)
+ seq_printf(s,
+ "bootloader: cpu0 system: %d%% idle: %d%% iowait: 0%% irq: 0%%\n",
+ 100 - boottime_bootloader_idle(),
+ boottime_bootloader_idle());
+ return 0;
+}
+
+static int boottime_debugfs_bootgraph_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file,
+ boottime_debugfs_bootgraph_show,
+ inode->i_private);
+}
+
+static int boottime_debugfs_summary_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file,
+ boottime_debugfs_summary_show,
+ inode->i_private);
+}
+
+static const struct file_operations boottime_debugfs_bootgraph_operations = {
+ .open = boottime_debugfs_bootgraph_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations boottime_debugfs_summary_operations = {
+ .open = boottime_debugfs_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void boottime_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("boottime", NULL);
+
+ (void) debugfs_create_file("bootgraph", S_IFREG | S_IRUGO,
+ dir, (void *)BOOTTIME_FILTER_NOTHING,
+ &boottime_debugfs_bootgraph_operations);
+ (void) debugfs_create_file("bootgraph_all_except0", S_IFREG | S_IRUGO,
+ dir, (void *)BOOTTIME_FILTER_OUT_ZERO,
+ &boottime_debugfs_bootgraph_operations);
+ (void) debugfs_create_file("bootgraph_larger100",
+ S_IFREG | S_IRUGO,
+ dir, (void *)BOOTTIME_FILTER_OUT_LESS_100,
+ &boottime_debugfs_bootgraph_operations);
+ (void) debugfs_create_file("summary", S_IFREG | S_IRUGO,
+ dir, NULL,
+ &boottime_debugfs_summary_operations);
+}
+#else
+#define boottime_debugfs_init(x)
+#endif
+
+static ssize_t show_bootloader(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ld\n", time_bootloader_done / 1000);
+}
+
+static ssize_t show_kernel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ld\n",
+ (time_kernel_done - time_bootloader_done) / 1000);
+}
+
+DEVICE_ATTR(kernel, 0444, show_kernel, NULL);
+DEVICE_ATTR(bootloader, 0444, show_bootloader, NULL);
+
+static struct attribute *boottime_sysfs_entries[] = {
+ &dev_attr_kernel.attr,
+ &dev_attr_bootloader.attr,
+ NULL
+};
+
+static struct attribute_group boottime_attr_grp = {
+ .name = NULL,
+ .attrs = boottime_sysfs_entries,
+};
+
+static int __init boottime_init(void)
+{
+ struct kobject *boottime_kobj;
+
+ boottime_kobj = kobject_create_and_add("boottime", NULL);
+ if (!boottime_kobj) {
+ printk(KERN_ERR "boottime: out of memory!\n");
+ return -ENOMEM;
+ }
+
+ if (sysfs_create_group(boottime_kobj, &boottime_attr_grp) < 0) {
+ kobject_put(boottime_kobj);
+ printk(KERN_ERR "boottime: Failed creating sysfs group\n");
+ return -ENOMEM;
+ }
+
+ boottime_debugfs_init();
+
+ return 0;
+}
+
+late_initcall(boottime_init);
diff --git a/init/main.c b/init/main.c
index ff49a6dacfb..d349e227574 100644
--- a/init/main.c
+++ b/init/main.c
@@ -68,6 +68,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/boottime.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -676,6 +677,8 @@ int __init_or_module do_one_initcall(initcall_t fn)
int count = preempt_count();
int ret;
+ boottime_mark_symbolic(fn);
+
if (initcall_debug)
ret = do_one_initcall_debug(fn);
else
@@ -752,6 +755,7 @@ static noinline int init_post(void)
{
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
+ boottime_deactivate();
free_initmem();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
@@ -807,6 +811,7 @@ static int __init kernel_init(void * unused)
do_pre_smp_initcalls();
lockup_detector_init();
+ boottime_system_up();
smp_init();
sched_init_smp();
@@ -829,6 +834,7 @@ static int __init kernel_init(void * unused)
if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
ramdisk_execute_command = NULL;
+ boottime_mark("mount+0x0/0x0");
prepare_namespace();
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fb7db75ee0c..9b571fabf66 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,7 +265,7 @@ void handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
- irqreturn_t action_ret;
+ irqreturn_t action_ret = IRQ_NONE;
might_sleep();
@@ -280,7 +280,11 @@ void handle_nested_irq(unsigned int irq)
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock);
- action_ret = action->thread_fn(action->irq, action->dev_id);
+ do {
+ action_ret |= action->thread_fn(action->irq, action->dev_id);
+ action = action->next;
+ } while (action);
+
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7b088678670..a650adb2594 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -49,6 +49,8 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+ATOMIC_NOTIFIER_HEAD(crash_percpu_notifier_list);
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1081,6 +1083,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
void crash_kexec(struct pt_regs *regs)
{
+ struct pt_regs fixed_regs;
/* Take the kexec_mutex here to prevent sys_kexec_load
* running on one cpu from replacing the crash kernel
* we are using after a panic on a different cpu.
@@ -1091,13 +1094,20 @@ void crash_kexec(struct pt_regs *regs)
*/
if (mutex_trylock(&kexec_mutex)) {
if (kexec_crash_image) {
- struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
+#ifdef CONFIG_CRASH_SWRESET
+ else {
+ crash_setup_regs(&fixed_regs, regs);
+ crash_save_vmcoreinfo();
+ machine_crash_shutdown(&fixed_regs);
+ machine_crash_swreset();
+ }
+#endif
mutex_unlock(&kexec_mutex);
}
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4fd51beed87..f40d3095747 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/suspend.h>
+#include <linux/kthread.h>
#include <linux/syscore_ops.h>
#include <trace/events/power.h>
@@ -162,9 +163,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
+
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
- goto Enable_cpus;
+ goto Platform_wake;
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -182,9 +184,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
- Enable_cpus:
- enable_nonboot_cpus();
-
Platform_wake:
if (suspend_ops->wake)
suspend_ops->wake();
@@ -264,6 +263,15 @@ static void suspend_finish(void)
pm_restore_console();
}
+static int plug_secondary_cpus(void *data)
+{
+ if (!(suspend_test(TEST_FREEZER) ||
+ suspend_test(TEST_DEVICES) ||
+ suspend_test(TEST_PLATFORM)))
+ enable_nonboot_cpus();
+ return 0;
+}
+
/**
* enter_state - Do common work of entering low-power state.
* @state: pm_state structure for state we're entering.
@@ -277,6 +285,7 @@ static void suspend_finish(void)
int enter_state(suspend_state_t state)
{
int error;
+ struct task_struct *cpu_task;
if (!valid_state(state))
return -ENODEV;
@@ -305,6 +314,11 @@ int enter_state(suspend_state_t state)
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
+
+ cpu_task = kthread_run(plug_secondary_cpus,
+ NULL, "cpu-plug");
+ BUG_ON(IS_ERR(cpu_task));
+
mutex_unlock(&pm_mutex);
return error;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 32690a0b7a1..7137480b21f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -41,6 +41,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/rculist.h>
+#include <trace/stm.h>
#include <asm/uaccess.h>
@@ -53,6 +54,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#ifdef CONFIG_PRINTK_LL
+extern void printascii(char *);
+#endif
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -879,6 +884,10 @@ asmlinkage int vprintk(const char *fmt, va_list args)
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
+#ifdef CONFIG_PRINTK_LL
+ printascii(printk_buf);
+#endif
+
p = printk_buf;
/* Read log level and handle special printk prefix */
@@ -900,6 +909,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
}
}
+ /* Send printk buffer to MIPI STM trace hardware too if enable */
+ stm_dup_printk(printk_buf, printed_len);
+
/*
* Copy the output into log_buf. If the caller didn't provide
* the appropriate log prefix, we insert them here
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a3f1bc5d2a0..2d1c28efc8b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -37,6 +37,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/fs.h>
+#include <trace/stm.h>
#include "trace.h"
#include "trace_output.h"
@@ -911,7 +912,7 @@ void tracing_reset_current_online_cpus(void)
tracing_reset_online_cpus(&global_trace);
}
-#define SAVED_CMDLINES 128
+#define SAVED_CMDLINES 2048
#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -1239,6 +1240,8 @@ trace_function(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ stm_ftrace(ip, parent_ip);
}
void
@@ -1333,6 +1336,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+ stm_stack_trace(trace.entries);
+
out:
/* Again, don't let gcc optimize things here */
barrier();
@@ -1503,6 +1508,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
ftrace_trace_stack(buffer, flags, 6, pc);
}
+ stm_trace_bprintk_buf(ip, fmt, trace_buf, sizeof(u32) * len);
+
out_unlock:
arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
@@ -1579,6 +1586,8 @@ int trace_array_vprintk(struct trace_array *tr,
ftrace_trace_stack(buffer, irq_flags, 6, pc);
}
+ stm_trace_printk_buf(ip, trace_buf, len);
+
out_unlock:
arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c212a7f934e..773cb84adc8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <asm/setup.h>
+#include <trace/stm.h>
#include "trace_output.h"
@@ -1703,6 +1704,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ stm_ftrace(ip, parent_ip);
+
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace();
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a1845..a136fd86533 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <trace/events/sched.h>
+#include <trace/stm.h>
#include "trace.h"
@@ -47,6 +48,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
+
+ stm_sched_switch(entry->prev_pid, entry->prev_prio, entry->prev_state,
+ entry->next_pid, entry->next_prio, entry->next_state,
+ entry->next_cpu);
}
static void
@@ -103,6 +108,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ stm_sched_wakeup(entry->prev_pid, entry->prev_prio, entry->prev_state,
+ entry->next_pid, entry->next_prio, entry->next_state,
+ entry->next_cpu);
+
ftrace_trace_stack(tr->buffer, flags, 6, pc);
ftrace_trace_userstack(tr->buffer, flags, pc);
}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index e7ee5314f39..0545fe0493f 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -166,6 +166,8 @@ struct bnep_session {
struct socket *sock;
struct net_device *dev;
+
+ unsigned int setup_done;
};
void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a779ec70332..fe3b9faeb68 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -117,7 +117,8 @@ static inline void bnep_set_default_proto_filter(struct bnep_session *s)
}
#endif
-static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len)
+static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data,
+ int len, int *pkt_size)
{
int n;
@@ -133,6 +134,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
BT_DBG("filter len %d", n);
+ *pkt_size = 2 + n;
+
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
n /= 4;
if (n <= BNEP_MAX_PROTO_FILTERS) {
@@ -163,7 +166,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
return 0;
}
-static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
+static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len,
+ int *pkt_size)
{
int n;
@@ -179,6 +183,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
BT_DBG("filter len %d", n);
+ *pkt_size = 2 + n;
+
#ifdef CONFIG_BT_BNEP_MC_FILTER
n /= (ETH_ALEN * 2);
@@ -224,7 +230,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
return 0;
}
-static int bnep_rx_control(struct bnep_session *s, void *data, int len)
+static int bnep_rx_control(struct bnep_session *s, void *data, int len,
+ int *pkt_size)
{
u8 cmd = *(u8 *)data;
int err = 0;
@@ -232,6 +239,8 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
data++;
len--;
+ *pkt_size = 0;
+
switch (cmd) {
case BNEP_CMD_NOT_UNDERSTOOD:
case BNEP_SETUP_CONN_RSP:
@@ -241,15 +250,27 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
break;
case BNEP_FILTER_NET_TYPE_SET:
- err = bnep_ctrl_set_netfilter(s, data, len);
+ err = bnep_ctrl_set_netfilter(s, data, len, pkt_size);
break;
case BNEP_FILTER_MULTI_ADDR_SET:
- err = bnep_ctrl_set_mcfilter(s, data, len);
+ err = bnep_ctrl_set_mcfilter(s, data, len, pkt_size);
break;
- case BNEP_SETUP_CONN_REQ:
- err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED);
+ case BNEP_SETUP_CONN_REQ: {
+ u8 uuid_size = *(u8 *)data;
+
+ /* First setup connection should be silently discarded,
+ * it was already handled when accepting connection.
+ */
+ if (s->setup_done)
+ err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
+ BNEP_CONN_NOT_ALLOWED);
+ else
+ s->setup_done = 1;
+
+ *pkt_size = 1 + 2 * uuid_size;
+ }
break;
default: {
@@ -262,6 +283,10 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
break;
}
+ if (*pkt_size > 0)
+ /* Add 1 byte for type field */
+ (*pkt_size)++;
+
return err;
}
@@ -269,6 +294,7 @@ static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb)
{
struct bnep_ext_hdr *h;
int err = 0;
+ int pkt_size;
do {
h = (void *) skb->data;
@@ -281,7 +307,7 @@ static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb)
switch (h->type & BNEP_TYPE_MASK) {
case BNEP_EXT_CONTROL:
- bnep_rx_control(s, skb->data, skb->len);
+ bnep_rx_control(s, skb->data, skb->len, &pkt_size);
break;
default:
@@ -321,7 +347,16 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
goto badframe;
if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
- bnep_rx_control(s, skb->data, skb->len);
+ int pkt_size = 0;
+
+ bnep_rx_control(s, skb->data, skb->len, &pkt_size);
+
+ if (pkt_size > 0 && (type & BNEP_EXT_HEADER)) {
+ skb_pull(skb, pkt_size);
+ if (bnep_rx_extension(s, skb) < 0)
+ goto badframe;
+ }
+
kfree_skb(skb);
return 0;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 07bc69ed949..265f0601c80 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -44,6 +44,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/sco.h>
static void hci_le_connect(struct hci_conn *conn)
{
@@ -147,16 +148,21 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_add_sco cp;
+ struct bt_sco_parameters *p = conn->sco_parameters;
+ __u16 pkt_type;
BT_DBG("%p", conn);
+ /* HCI_Add_SCO_Connection uses shifted bitmask for packet type */
+ pkt_type = (p->pkt_type << 5) & conn->pkt_type;
+
conn->state = BT_CONNECT;
conn->out = 1;
conn->attempt++;
cp.handle = cpu_to_le16(handle);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ cp.pkt_type = cpu_to_le16(pkt_type);
hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
}
@@ -165,22 +171,35 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
+ struct bt_sco_parameters *p = conn->sco_parameters;
+ __u16 voice_setting;
+ __u16 pkt_type;
BT_DBG("%p", conn);
+ /*
+ * Combine voice setting using device parameters and air coding
+ * format set by user.
+ */
+ voice_setting = (hdev->voice_setting & 0xfffc) |
+ (p->voice_setting & 0x0003);
+
+ /* Bits for EDR packets have inverted logic in BT spec. */
+ pkt_type = (p->pkt_type & conn->pkt_type) ^ EDR_ESCO_MASK;
+
conn->state = BT_CONNECT;
conn->out = 1;
conn->attempt++;
cp.handle = cpu_to_le16(handle);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
- cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.tx_bandwidth = cpu_to_le32(p->tx_bandwidth);
+ cp.rx_bandwidth = cpu_to_le32(p->rx_bandwidth);
+ cp.max_latency = cpu_to_le16(p->max_latency);
+ cp.voice_setting = cpu_to_le16(voice_setting);
+ cp.retrans_effort = p->retrans_effort;
+ cp.pkt_type = cpu_to_le16(pkt_type);
hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
}
@@ -366,7 +385,8 @@ static void hci_conn_auto_accept(unsigned long arg)
&conn->dst);
}
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+ bdaddr_t *dst)
{
struct hci_conn *conn;
@@ -395,13 +415,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
break;
case SCO_LINK:
if (lmp_esco_capable(hdev))
- conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
- (hdev->esco_type & EDR_ESCO_MASK);
+ conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
else
conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
break;
case ESCO_LINK:
- conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+ conn->pkt_type = hdev->esco_type;
break;
}
@@ -519,7 +538,9 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u8 sec_level, __u8 auth_type,
+ struct bt_sco_parameters *sco_parameters)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -584,6 +605,8 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
hci_conn_hold(sco);
+ sco->sco_parameters = sco_parameters;
+
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
acl->power_save = 1;
@@ -933,8 +956,10 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
- if (conn)
+ if (conn) {
req.type = conn->auth_type;
+ req.sec_level = max(conn->sec_level, conn->pending_sec_level);
+ }
hci_dev_unlock(hdev);
if (!conn)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 001307f8105..8357436fb1a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1645,6 +1645,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
+ /* pkt_type not yet used for incoming connections */
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
@@ -2677,9 +2678,8 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
- if (conn->out && conn->attempt < 2) {
- conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
- (hdev->esco_type & EDR_ESCO_MASK);
+ if (conn->out && !conn->no_autoretry && conn->attempt < 2) {
+ conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
hci_setup_sync(conn, conn->link->handle);
goto unlock;
}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 32d338c30e6..f68a1108d25 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1198,10 +1198,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
if (chan->dcid == L2CAP_CID_LE_DATA)
hcon = hci_connect(hdev, LE_LINK, dst,
- chan->sec_level, auth_type);
+ chan->sec_level, auth_type, NULL);
else
hcon = hci_connect(hdev, ACL_LINK, dst,
- chan->sec_level, auth_type);
+ chan->sec_level, auth_type, NULL);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 86a6bed229d..f9b2dde4c0a 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -136,6 +136,8 @@ int bt_to_errno(__u16 code)
return EPROTONOSUPPORT;
case 0x1b:
+ case 0x1c:
+ case 0x1d:
return ECONNREFUSED;
case 0x19:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index bc8e59dda78..87389cab147 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1551,10 +1551,10 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
if (cp->addr.type == MGMT_ADDR_BREDR)
conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ auth_type, NULL);
else
conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ auth_type, NULL);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8bf26d1bc5c..ef816b0dda8 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -178,6 +178,7 @@ static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
+ struct bt_sco_parameters *param = &sco_pi(sk)->param;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -196,12 +197,15 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, dst,
+ BT_SECURITY_LOW, HCI_AT_NO_BONDING, param);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
+ hcon->no_autoretry = sco_pi(sk)->no_autoretry;
+
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
@@ -402,12 +406,26 @@ static void sco_sock_close(struct sock *sk)
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
+ struct sco_pinfo *pi = sco_pi(sk);
+
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
security_sk_clone(parent, sk);
}
+
+ pi->param.tx_bandwidth = 8000;
+ pi->param.rx_bandwidth = 8000;
+ pi->param.max_latency = HCI_SYNC_MAX_LATENCY_DONTCARE;
+
+ /* Only Air Coding Format matters here, other data will be
+ * overriden by device settings during connection setup.
+ */
+ pi->param.voice_setting = HCI_SYNC_AIR_CODING_CVSD;
+
+ pi->param.retrans_effort = HCI_SYNC_RETRANS_EFFORT_DONTCARE;
+ pi->param.pkt_type = ALL_ESCO_MASK;
}
static struct proto sco_proto = {
@@ -661,13 +679,45 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
+ int len;
int err = 0;
+ struct bt_sco_parameters *param;
+ u32 opt;
BT_DBG("sk %p", sk);
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
lock_sock(sk);
switch (optname) {
+ case BT_SCO_PARAMETERS:
+ /* We do not support changing SCO parameters during
+ * connection.
+ */
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+ err = -EBUSY;
+ break;
+ }
+
+ param = &sco_pi(sk)->param;
+
+ len = min_t(unsigned int, sizeof(*param), optlen);
+ if (copy_from_user((char *) param, optval, len))
+ err = -EFAULT;
+
+ break;
+
+ case BT_NO_AUTORETRY:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ sco_pi(sk)->no_autoretry = opt;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -737,18 +787,36 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
{
struct sock *sk = sock->sk;
int len, err = 0;
+ struct bt_sco_parameters *params;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
+ case BT_SCO_PARAMETERS:
+ params = &sco_pi(sk)->param;
+
+ len = min_t(unsigned int, len, sizeof(*params));
+ if (copy_to_user(optval, (char *) params, len))
+ err = -EFAULT;
+
+ break;
+
+ case BT_NO_AUTORETRY:
+ if (put_user(sco_pi(sk)->no_autoretry, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2f0642d9e15..e65ee7f6244 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -330,6 +330,7 @@ struct ieee80211_work {
u8 key_len, key_idx;
bool privacy;
bool synced;
+ struct cfg80211_bss *bss;
} probe_auth;
struct {
struct cfg80211_bss *bss;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 295be92f7c7..f24430b66ee 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2502,6 +2502,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
wk->probe_auth.algorithm = auth_alg;
wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
+ wk->probe_auth.bss = req->bss;
/* if we already have a probe, don't probe again */
if (req->bss->proberesp_ies)
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6dd01a0529..290ddbb4d1d 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -454,6 +454,30 @@ ieee80211_authenticate(struct ieee80211_work *wk)
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
+ /* HACK!!! cw1200 device requires SSID to be available at AUTH stage.
+ * cfg80211 beacon cache is designed to handle multi-SSID BSSes, so
+ * bss struct returned by cfg80211_get_bss() has random SSID if BSS
+ * just changed SSID before authentication (typical for p2p).
+ * This is a firmware design fault, however as a workaround cfg80211
+ * beacon cache is purged to make sure target BSS is searchable
+ * in rb-tree at the AUTH stage.
+ */
+ struct cfg80211_bss *bss;
+ while (true) {
+ bss = cfg80211_get_bss(local->hw.wiphy,
+ wk->probe_auth.bss->channel,
+ wk->probe_auth.bss->bssid,
+ NULL, 0, 0, 0);
+ if (WARN_ON(!bss))
+ break;
+ if (bss == wk->probe_auth.bss) {
+ cfg80211_put_bss(bss);
+ break;
+ }
+ cfg80211_unlink_bss(local->hw.wiphy, bss);
+ }
+ /* End of the hack */
+
if (!wk->probe_auth.synced) {
int ret = drv_tx_sync(local, sdata, wk->filter_ta,
IEEE80211_TX_SYNC_AUTH);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 354760ebbbd..696e1fb1065 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -722,7 +722,7 @@ static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
__ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
__ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
- __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
+ __ATTR(state, S_IRUGO|S_IWUGO, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
__ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 4d403844e13..06c33adfe7f 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -10,23 +10,37 @@
#
usage() {
- echo "Usage: $0 [--save-scmversion] [srctree]" >&2
+ echo "Usage: $0 [--save-scmversion] [-s srctree] [-t ref_tag]" >&2
exit 1
}
scm_only=false
srctree=.
-if test "$1" = "--save-scmversion"; then
- scm_only=true
- shift
-fi
-if test $# -gt 0; then
- srctree=$1
+match_option=--exact-match
+
+while [ $# -ne 0 ]; do
+ if test "$1" = "--save-scmversion"; then
+ scm_only=true
+ elif test "$1" = "-s"; then
+ shift
+ if test $# -ne 0 -a -d "$1"; then
+ srctree=$1
+ else
+ usage
+ fi
+ elif test "$1" = "-t"; then
+ shift
+ if [ $# -ne 0 ]; then
+ match=" --tags --match "$1
+ rev_refs="--refs refs/tags/"$1
+ else
+ usage
+ fi
+ else
+ usage
+ fi
shift
-fi
-if test $# -gt 0 -o ! -d "$srctree"; then
- usage
-fi
+done
scm_version()
{
@@ -47,8 +61,8 @@ scm_version()
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
# it, because this version is defined in the top level Makefile.
- if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
-
+ if git name-rev --tags $rev_refs HEAD | \
+ grep -E '^HEAD[[:space:]]+(.*~[0-9]*|undefined)$' > /dev/null; then
# If only the short version is requested, don't bother
# running further git commands
if $short; then
@@ -57,7 +71,7 @@ scm_version()
fi
# If we are past a tagged commit (like
# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
- if atag="`git describe 2>/dev/null`"; then
+ if atag="`git describe $match 2>/dev/null`"; then
echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
# If we don't have a tag at all we print -g{commitish}.
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a3b0b..86cd48bfd8d 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -39,5 +39,17 @@ config SND_PXA2XX_AC97
Say Y or M if you want to support any AC97 codec attached to
the PXA2xx AC97 interface.
+config SND_U8500_ALSA_AB8500
+ tristate "U8500 alsa support for AB8500"
+ depends on SND && STE_DMA40 && U8500_ACODEC && (U8500_AB8500_ED || U8500_AB8500_CUT10)
+ default y
+ select SND_PCM
+ help
+ Say Y here if you have a u8500 based device
+ and want to use alsa for pcm playback and capture.
+
+ To compile this driver as a module, choose M here: the module
+ will be called u8500mod_alsa.
+
endif # SND_ARM
diff --git a/sound/arm/Makefile b/sound/arm/Makefile
index 8c0c851d464..e41f1f4db14 100644
--- a/sound/arm/Makefile
+++ b/sound/arm/Makefile
@@ -14,3 +14,7 @@ snd-pxa2xx-lib-$(CONFIG_SND_PXA2XX_LIB_AC97) += pxa2xx-ac97-lib.o
obj-$(CONFIG_SND_PXA2XX_AC97) += snd-pxa2xx-ac97.o
snd-pxa2xx-ac97-objs := pxa2xx-ac97.o
+obj-$(CONFIG_SND_U8500_ALSA_AB8500) += u8500mod_alsa.o
+ifneq ($(CONFIG_SND_U8500_ALSA_AB8500),n)
+u8500mod_alsa-objs := u8500_alsa_ab8500.o u8500_alsa_hdmi.o
+endif
diff --git a/sound/arm/u8500_alsa_ab8500.c b/sound/arm/u8500_alsa_ab8500.c
new file mode 100644
index 00000000000..39752388ab1
--- /dev/null
+++ b/sound/arm/u8500_alsa_ab8500.c
@@ -0,0 +1,2691 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Deepak Karda
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2.1 as published
+ * by the Free Software Foundation.
+ */
+
+/* This include must be defined at this point */
+//#include <sound/driver.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <mach/hardware.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/* alsa system */
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include "u8500_alsa_ab8500.h"
+#include <mach/msp.h>
+#include <mach/debug.h>
+
+#define ALSA_NAME "DRIVER ALSA"
+
+#define DRIVER_DEBUG CONFIG_STM_ALSA_DEBUG /* enables/disables debug msgs */
+#define DRIVER_DEBUG_PFX ALSA_NAME /* msg header represents this module */
+#define DRIVER_DBG KERN_ERR /* message level */
+
+static struct platform_device *device;
+static int active_user = 0;
+
+/*
+** Externel references
+*/
+#if DRIVER_DEBUG > 0
+t_ab8500_codec_error dump_acodec_registers(void);
+t_ab8500_codec_error dump_msp_registers(void);
+#endif
+
+extern int u8500_acodec_rates[MAX_NO_OF_RATES];
+extern char *lpbk_state_in_texts[NUMBER_LOOPBACK_STATE];
+extern char *switch_state_in_texts[NUMBER_SWITCH_STATE];
+extern char *power_state_in_texts[NUMBER_POWER_STATE];
+extern char *tdm_mode_state_in_texts[NUMBER_TDM_MODE_STATE];
+extern char *direct_rendering_state_in_texts[NUMBER_DIRECT_RENDERING_STATE];
+extern char *pcm_rendering_state_in_texts[NUMBER_PCM_RENDERING_STATE];
+extern char *codec_dest_texts[NUMBER_OUTPUT_DEVICE];
+extern char *codec_in_texts[NUMBER_INPUT_DEVICE];
+extern struct driver_debug_st DBG_ST;
+extern int second_config;
+extern int u8500_register_alsa_hdmi_controls(struct snd_card *card,
+ u8500_acodec_chip_t * u8500_chip);
+extern int snd_card_u8500_alsa_hdmi_new(u8500_acodec_chip_t * chip, int device);
+/*
+** Declaration for local functions
+*/
+static int u8500_analog_lpbk_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_analog_lpbk_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_analog_lpbk_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_digital_lpbk_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_digital_lpbk_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_digital_lpbk_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_playback_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_playback_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_playback_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_capture_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_capture_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_capture_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_playback_sink_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_playback_sink_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_playback_sink_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_capture_src_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_capture_src_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_capture_src_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_playback_switch_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_playback_switch_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_playback_switch_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_capture_switch_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_capture_switch_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_capture_switch_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_playback_power_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_playback_power_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_playback_power_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_capture_power_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_capture_power_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_capture_power_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_tdm_mode_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_tdm_mode_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_tdm_mode_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+static int u8500_direct_rendering_mode_ctrl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info
+ *uinfo);
+static int u8500_direct_rendering_mode_ctrl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value
+ *uinfo);
+static int u8500_direct_rendering_mode_ctrl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value
+ *uinfo);
+static int u8500_register_alsa_controls(struct snd_card *card,
+ u8500_acodec_chip_t * u8500_chip);
+
+static int u8500_pcm_rendering_mode_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_pcm_rendering_mode_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_pcm_rendering_mode_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+#if 0 /* DUMP REGISTER CONTROL */
+static int u8500_dump_register_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_dump_register_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_dump_register_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+#endif /* DUMP REGISTER CONTROL */
+
+static int configure_rate(struct snd_pcm_substream *,
+ t_u8500_acodec_config_need acodec_config_need);
+static void dma_eot_handler(void *data);
+/**
+* configure_rate
+* @substream - pointer to the playback/capture substream structure
+*
+* This functions configures audio codec in to stream frequency frequency
+*/
+
+static int configure_rate(struct snd_pcm_substream *substream,
+ t_u8500_acodec_config_need acodec_config_need)
+{
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ t_codec_sample_frequency sampling_frequency = 0;
+ t_ab8500_codec_direction direction = 0;
+ struct acodec_configuration acodec_config;
+ int stream_id = substream->pstr->stream;
+
+ FUNC_ENTER();
+ switch (chip->freq) {
+ case 48000:
+ sampling_frequency = CODEC_SAMPLING_FREQ_48KHZ;
+ break;
+ default:
+ printk("not supported frequnecy \n");
+ stm_error("not supported frequnecy \n");
+ return -EINVAL;
+ }
+
+ switch (stream_id) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ direction = AB8500_CODEC_DIRECTION_OUT;
+ break;
+ case SNDRV_PCM_STREAM_CAPTURE:
+ direction = AB8500_CODEC_DIRECTION_IN;
+ break;
+ default:
+ stm_error(": wrong pcm stream\n");
+ return -EINVAL;
+ }
+
+ stm_dbg(DBG_ST.alsa, "enabling audiocodec audio mode\n");
+ acodec_config.direction = direction;
+ acodec_config.input_frequency = T_CODEC_SAMPLING_FREQ_48KHZ;
+ acodec_config.output_frequency = T_CODEC_SAMPLING_FREQ_48KHZ;
+ acodec_config.mspClockSel = CODEC_MSP_APB_CLOCK;
+ acodec_config.mspInClockFreq = CODEC_MSP_INPUT_FREQ_48MHZ;
+ acodec_config.channels = chip->channels;
+ acodec_config.user = 2;
+ acodec_config.acodec_config_need = acodec_config_need;
+ acodec_config.handler = dma_eot_handler;
+ acodec_config.tx_callback_data =
+ &chip->stream[ALSA_PCM_DEV][SNDRV_PCM_STREAM_PLAYBACK];
+ acodec_config.rx_callback_data =
+ &chip->stream[ALSA_PCM_DEV][SNDRV_PCM_STREAM_CAPTURE];
+ acodec_config.direct_rendering_mode = chip->direct_rendering_mode;
+ acodec_config.tdm8_ch_mode = chip->tdm8_ch_mode;
+ acodec_config.digital_loopback = DISABLE;
+ u8500_acodec_enable_audio_mode(&acodec_config);
+ FUNC_EXIT();
+
+ return 0;
+}
+
+/*
+****************************************************************************************
+* playback vol control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_playback_vol_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 0,
+ .name = "PCM Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_playback_vol_info,
+ .get = u8500_playback_vol_get,
+ .put = u8500_playback_vol_put
+};
+
+/**
+* u8500_playback_vol_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback volume info into user structure.
+*/
+
+static int u8500_playback_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 100;
+ uinfo->value.integer.step = 10;
+ return 0;
+}
+
+/**
+* u8500_playback_vol_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills the current volume setting to user structure.
+*/
+
+static int u8500_playback_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+
+ int *p_left_volume = NULL;
+ int *p_right_volume = NULL;
+
+ p_left_volume = (int *)&uinfo->value.integer.value[0];
+ p_right_volume = (int *)&uinfo->value.integer.value[1];
+
+ u8500_acodec_get_output_volume(chip->output_device, p_left_volume,
+ p_right_volume, USER_ALSA);
+ return 0;
+}
+
+/**
+* u8500_playback_vol_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure.
+*
+* This functions sets the playback audio codec volume .
+*/
+
+static int u8500_playback_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0, error = 0;
+
+ if (chip->output_lvolume != uinfo->value.integer.value[0]
+ || chip->output_rvolume != uinfo->value.integer.value[1]) {
+ chip->output_lvolume = uinfo->value.integer.value[0];
+ chip->output_rvolume = uinfo->value.integer.value[1];
+
+ if (chip->output_lvolume > 100)
+ chip->output_lvolume = 100;
+ else if (chip->output_lvolume < 0)
+ chip->output_lvolume = 0;
+
+ if (chip->output_rvolume > 100)
+ chip->output_rvolume = 100;
+ else if (chip->output_rvolume < 0)
+ chip->output_rvolume = 0;
+
+ error =
+ u8500_acodec_set_output_volume(chip->output_device,
+ chip->output_lvolume,
+ chip->output_rvolume,
+ USER_ALSA);
+
+ if (error) {
+ stm_error
+ (" : set volume for speaker/headphone failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+
+ return changed;
+}
+
+/*
+****************************************************************************************
+* capture vol control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_capture_vol_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 1,
+ .name = "PCM Capture Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_capture_vol_info,
+ .get = u8500_capture_vol_get,
+ .put = u8500_capture_vol_put
+};
+
+/**
+* u8500_capture_vol_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills capture volume info into user structure.
+*/
+static int u8500_capture_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 2;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 100;
+ uinfo->value.integer.step = 10;
+ return 0;
+}
+
+/**
+* u8500_capture_vol_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current capture volume setting to user structure.
+*/
+
+static int u8500_capture_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+
+ int *p_left_volume = NULL;
+ int *p_right_volume = NULL;
+
+ p_left_volume = (int *)&uinfo->value.integer.value[0];
+ p_right_volume = (int *)&uinfo->value.integer.value[1];
+
+ u8500_acodec_get_input_volume(chip->input_device, p_left_volume,
+ p_right_volume, USER_ALSA);
+ return 0;
+}
+
+/**
+* u8500_capture_vol_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure.
+*
+* This functions sets the capture audio codec volume with values provided.
+*/
+
+static int u8500_capture_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0, error = 0;
+
+ if (chip->input_lvolume != uinfo->value.integer.value[0]
+ || chip->input_rvolume != uinfo->value.integer.value[1]) {
+ chip->input_lvolume = uinfo->value.integer.value[0];
+ chip->input_rvolume = uinfo->value.integer.value[1];
+
+ if (chip->input_lvolume > 100)
+ chip->input_lvolume = 100;
+ else if (chip->input_lvolume < 0)
+ chip->input_lvolume = 0;
+
+ if (chip->input_rvolume > 100)
+ chip->input_rvolume = 100;
+ else if (chip->input_rvolume < 0)
+ chip->input_rvolume = 0;
+
+ error = u8500_acodec_set_input_volume(chip->input_device,
+ chip->input_rvolume,
+ chip->input_lvolume,
+ USER_ALSA);
+ if (error) {
+ stm_error(" : set input volume failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+
+ return changed;
+}
+
+/*
+****************************************************************************************
+* playback sink control *
+****************************************************************************************
+*/
+
+static struct snd_kcontrol_new u8500_playback_sink_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 0,
+ .name = "PCM Playback Sink",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xffff,
+ .info = u8500_playback_sink_info,
+ .get = u8500_playback_sink_get,
+ .put = u8500_playback_sink_put
+};
+
+/**
+* u8500_playback_sink_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_playback_sink_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_OUTPUT_DEVICE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_OUTPUT_DEVICE)
+ uinfo->value.enumerated.item = NUMBER_OUTPUT_DEVICE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ codec_dest_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_playback_sink_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_playback_sink_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->output_device;
+ return 0;
+}
+
+/**
+* u8500_playback_sink_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_playback_sink_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0, error;
+
+ if (chip->output_device != uinfo->value.enumerated.item[0]) {
+ chip->output_device = uinfo->value.enumerated.item[0];
+ error =
+ u8500_acodec_select_output(chip->output_device,
+ USER_ALSA, chip->tdm8_ch_mode);
+ if (error) {
+ stm_error(" : select output failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* capture src control *
+****************************************************************************************
+*/
+
+static struct snd_kcontrol_new u8500_capture_src_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 1,
+ .name = "PCM Capture Source",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xffff,
+ .info = u8500_capture_src_ctrl_info,
+ .get = u8500_capture_src_ctrl_get,
+ .put = u8500_capture_src_ctrl_put
+};
+
+/**
+* u8500_capture_src_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills capture device info into user structure.
+*/
+static int u8500_capture_src_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_INPUT_DEVICE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_INPUT_DEVICE)
+ uinfo->value.enumerated.item = NUMBER_INPUT_DEVICE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ codec_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_capture_src_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current capture device selected.
+*/
+static int u8500_capture_src_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->input_device;
+ return 0;
+}
+
+/**
+* u8500_capture_src_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure,
+*
+* This functions sets the capture device.
+*/
+static int u8500_capture_src_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0, error;
+
+ if (chip->input_device != uinfo->value.enumerated.item[0]) {
+ chip->input_device = uinfo->value.enumerated.item[0];
+ error =
+ u8500_acodec_select_input(chip->input_device, USER_ALSA,
+ chip->tdm8_ch_mode);
+ if (error) {
+ stm_error(" : select input failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+***************************************************************************************
+* analog lpbk control *
+***************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_analog_lpbk_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .name = "Analog Loopback",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_analog_lpbk_info,
+ .get = u8500_analog_lpbk_get,
+ .put = u8500_analog_lpbk_put
+};
+
+/**
+* u8500_analog_lpbk_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_analog_lpbk_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_LOOPBACK_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_LOOPBACK_STATE)
+ uinfo->value.enumerated.item = NUMBER_LOOPBACK_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ lpbk_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_analog_lpbk_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_analog_lpbk_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->analog_lpbk;
+ return 0;
+}
+
+/**
+* u8500_analog_lpbk_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_analog_lpbk_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ if (chip->analog_lpbk != uinfo->value.enumerated.item[0]) {
+ chip->analog_lpbk = uinfo->value.enumerated.item[0];
+
+ error =
+ u8500_acodec_toggle_analog_lpbk(chip->analog_lpbk,
+ USER_ALSA);
+
+ if (AB8500_CODEC_OK != error) {
+ stm_error
+ (" : select u8500_acodec_set_analog_lpbk_state failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* digital lpbk control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_digital_lpbk_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .name = "Digital Loopback",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_digital_lpbk_info,
+ .get = u8500_digital_lpbk_get,
+ .put = u8500_digital_lpbk_put
+};
+
+/**
+* u8500_digital_lpbk_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_digital_lpbk_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_LOOPBACK_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_LOOPBACK_STATE)
+ uinfo->value.enumerated.item = NUMBER_LOOPBACK_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ lpbk_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_digital_lpbk_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_digital_lpbk_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->digital_lpbk;
+ return 0;
+}
+
+/**
+* u8500_analog_lpbk_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_digital_lpbk_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ if (chip->digital_lpbk != uinfo->value.enumerated.item[0]) {
+ chip->digital_lpbk = uinfo->value.enumerated.item[0];
+
+ error = u8500_acodec_toggle_digital_lpbk(chip->digital_lpbk,
+ chip->output_device,
+ chip->input_device,
+ USER_ALSA,
+ chip->tdm8_ch_mode);
+
+ /*if((error = u8500_acodec_set_output_volume(chip->output_device,50,50,USER_ALSA)))
+ {
+ stm_error(" : set output volume failed\n");
+ return error;
+ }
+
+ if ((error = u8500_acodec_set_input_volume(chip->input_device,50,50,USER_ALSA)))
+ {
+ stm_error(" : set input volume failed\n");
+ return error;
+ } */
+
+ if (AB8500_CODEC_OK != error) {
+ stm_error
+ (" : select u8500_acodec_set_digital_lpbk_state failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* playback switch control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_playback_switch_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 0,
+ .name = "PCM Playback Mute",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_playback_switch_ctrl_info,
+ .get = u8500_playback_switch_ctrl_get,
+ .put = u8500_playback_switch_ctrl_put
+};
+
+/**
+* u8500_playback_switch_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_playback_switch_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_SWITCH_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_SWITCH_STATE)
+ uinfo->value.enumerated.item = NUMBER_SWITCH_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ switch_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_playback_switch_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_playback_switch_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->playback_switch;
+ return 0;
+}
+
+/**
+* u8500_playback_switch_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_playback_switch_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ if (chip->playback_switch != uinfo->value.enumerated.item[0]) {
+ chip->playback_switch = uinfo->value.enumerated.item[0];
+
+ error =
+ u8500_acodec_toggle_playback_mute_control(chip->
+ output_device,
+ chip->
+ playback_switch,
+ USER_ALSA);
+
+ if (AB8500_CODEC_OK != error) {
+ stm_error
+ (" : select u8500_playback_switch_ctrl_put failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* Capture switch control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_capture_switch_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 1,
+ .name = "PCM Capture Mute",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_capture_switch_ctrl_info,
+ .get = u8500_capture_switch_ctrl_get,
+ .put = u8500_capture_switch_ctrl_put
+};
+
+/**
+* u8500_capture_switch_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_capture_switch_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_SWITCH_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_SWITCH_STATE)
+ uinfo->value.enumerated.item = NUMBER_SWITCH_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ switch_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_capture_switch_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_capture_switch_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->capture_switch;
+ return 0;
+}
+
+/**
+* u8500_capture_switch_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_capture_switch_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ if (chip->capture_switch != uinfo->value.enumerated.item[0]) {
+ chip->capture_switch = uinfo->value.enumerated.item[0];
+
+ error =
+ u8500_acodec_toggle_capture_mute_control(chip->input_device,
+ chip->
+ capture_switch,
+ USER_ALSA);
+
+ if (AB8500_CODEC_OK != error) {
+ stm_error
+ (" : select u8500_capture_switch_ctrl_put failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* playback power control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_playback_power_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 0,
+ .name = "PCM Playback Power",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_playback_power_ctrl_info,
+ .get = u8500_playback_power_ctrl_get,
+ .put = u8500_playback_power_ctrl_put
+};
+
+/**
+* u8500_playback_power_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_playback_power_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_POWER_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_POWER_STATE)
+ uinfo->value.enumerated.item = NUMBER_POWER_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ power_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_playback_power_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_playback_power_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] =
+ u8500_acodec_get_dest_power_state(chip->output_device);
+ return 0;
+}
+
+/**
+* u8500_playback_power_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_playback_power_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+ t_u8500_bool_state power_state;
+
+ power_state = u8500_acodec_get_dest_power_state(chip->output_device);
+
+ if (power_state != uinfo->value.enumerated.item[0]) {
+ power_state = uinfo->value.enumerated.item[0];
+
+ error =
+ u8500_acodec_set_dest_power_cntrl(chip->output_device,
+ power_state);
+
+ if (AB8500_CODEC_OK != error) {
+ stm_error
+ (" : select u8500_acodec_set_dest_power_cntrl failed\n");
+ return changed;
+ }
+
+ /* configure the volume settings for the acodec */
+ if ((error =
+ u8500_acodec_set_output_volume(chip->output_device,
+ chip->output_lvolume,
+ chip->output_rvolume,
+ USER_ALSA))) {
+ stm_error(" : set output volume failed\n");
+ return error;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* capture power control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_capture_power_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 0,
+ .name = "PCM Capture Power",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_capture_power_ctrl_info,
+ .get = u8500_capture_power_ctrl_get,
+ .put = u8500_capture_power_ctrl_put
+};
+
+/**
+* u8500_capture_power_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_capture_power_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_POWER_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_POWER_STATE)
+ uinfo->value.enumerated.item = NUMBER_POWER_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ power_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_capture_power_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_capture_power_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] =
+ u8500_acodec_get_src_power_state(chip->input_device);
+ return 0;
+}
+
+/**
+* u8500_capture_power_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_capture_power_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+ t_u8500_bool_state power_state;
+
+ power_state = u8500_acodec_get_src_power_state(chip->input_device);
+
+ if (power_state != uinfo->value.enumerated.item[0]) {
+ power_state = uinfo->value.enumerated.item[0];
+
+ error =
+ u8500_acodec_set_src_power_cntrl(chip->input_device,
+ power_state);
+
+ if (AB8500_CODEC_OK != error) {
+ stm_error
+ (" : select u8500_acodec_set_src_power_cntrl failed\n");
+ return changed;
+ }
+ changed = 1;
+ }
+ return changed;
+}
+
+/*
+****************************************************************************************
+* TDM 8 channel mode control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_tdm_mode_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .subdevice = 0,
+ .name = "TDM 8 Channel Mode",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_tdm_mode_ctrl_info,
+ .get = u8500_tdm_mode_ctrl_get,
+ .put = u8500_tdm_mode_ctrl_put
+};
+
+/**
+* u8500_tdm_mode_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_tdm_mode_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_TDM_MODE_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_TDM_MODE_STATE)
+ uinfo->value.enumerated.item = NUMBER_TDM_MODE_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ tdm_mode_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_tdm_mode_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_tdm_mode_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->tdm8_ch_mode;
+ return 0;
+}
+
+/**
+* u8500_tdm_mode_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_tdm_mode_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ chip->tdm8_ch_mode = uinfo->value.enumerated.item[0];
+
+ if (ENABLE == chip->tdm8_ch_mode)
+ printk("\n TDM 8 channel mode enabled\n");
+ else
+ printk("\n TDM 8 channel mode disabled\n");
+
+ changed = 1;
+
+ return changed;
+}
+
+/*
+****************************************************************************************
+* Direct Rendering Mode control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_direct_rendering_mode_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .name = "Direct Rendering Mode",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_direct_rendering_mode_ctrl_info,
+ .get = u8500_direct_rendering_mode_ctrl_get,
+ .put = u8500_direct_rendering_mode_ctrl_put
+};
+
+/**
+* u8500_direct_rendering_mode_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_direct_rendering_mode_ctrl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info
+ *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_DIRECT_RENDERING_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_DIRECT_RENDERING_STATE)
+ uinfo->value.enumerated.item =
+ NUMBER_DIRECT_RENDERING_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ direct_rendering_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_direct_rendering_mode_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_direct_rendering_mode_ctrl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value
+ *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->direct_rendering_mode;
+ return 0;
+}
+
+/**
+* u8500_direct_rendering_mode_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_direct_rendering_mode_ctrl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value
+ *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ chip->direct_rendering_mode = uinfo->value.enumerated.item[0];
+ changed = 1;
+
+ return changed;
+}
+
+/*
+****************************************************************************************
+* PCM Rendering Mode control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_pcm_rendering_mode_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .name = "PCM Rendering Mode",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_pcm_rendering_mode_ctrl_info,
+ .get = u8500_pcm_rendering_mode_ctrl_get,
+ .put = u8500_pcm_rendering_mode_ctrl_put
+};
+
+/**
+* u8500_pcm_rendering_mode_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_pcm_rendering_mode_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_PCM_RENDERING_STATE;
+ uinfo->count = 3;
+ if (uinfo->value.enumerated.item >= NUMBER_PCM_RENDERING_STATE)
+ uinfo->value.enumerated.item = NUMBER_PCM_RENDERING_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ pcm_rendering_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_pcm_rendering_mode_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_pcm_rendering_mode_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->burst_fifo_mode;
+ uinfo->value.enumerated.item[1] = chip->fm_playback_mode;
+ uinfo->value.enumerated.item[2] = chip->fm_tx_mode;
+ return 0;
+}
+
+/**
+* u8500_pcm_rendering_mode_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_pcm_rendering_mode_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ if (RENDERING_PENDING == uinfo->value.enumerated.item[0]) {
+ return changed;
+ }
+ if (chip->burst_fifo_mode != uinfo->value.enumerated.item[0]) {
+ chip->burst_fifo_mode = uinfo->value.enumerated.item[0];
+ u8500_acodec_set_burst_mode_fifo(chip->burst_fifo_mode);
+ }
+
+ chip->fm_playback_mode = uinfo->value.enumerated.item[1];
+ chip->fm_tx_mode = uinfo->value.enumerated.item[2];
+
+ changed = 1;
+
+ return changed;
+}
+
+#if 0 /* DUMP REGISTER CONTROL */
+/*
+****************************************************************************************
+* dump registers control *
+****************************************************************************************
+*/
+
+struct snd_kcontrol_new u8500_dump_register_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 0,
+ .name = "",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_dump_register_ctrl_info,
+ .get = u8500_dump_register_ctrl_get,
+ .put = u8500_dump_register_ctrl_put
+};
+
+/**
+* u8500_dump_register_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_dump_register_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_PCM_RENDERING_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_PCM_RENDERING_STATE)
+ uinfo->value.enumerated.item = NUMBER_PCM_RENDERING_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ pcm_rendering_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_dump_register_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_dump_register_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = chip->burst_fifo_mode;
+ uinfo->value.enumerated.item[1] = chip->fm_playback_mode;
+ uinfo->value.enumerated.item[2] = chip->fm_tx_mode;
+ return 0;
+}
+
+/**
+* u8500_dump_register_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_dump_register_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+
+ if (RENDERING_PENDING == uinfo->value.enumerated.item[0]) {
+ return changed;
+ }
+ if (chip->burst_fifo_mode != uinfo->value.enumerated.item[0]) {
+ chip->burst_fifo_mode = uinfo->value.enumerated.item[0];
+ //u8500_acodec_set_burst_mode_fifo(chip->burst_fifo_mode);
+ }
+
+ chip->fm_playback_mode = uinfo->value.enumerated.item[1];
+ chip->fm_tx_mode = uinfo->value.enumerated.item[2];
+
+ changed = 1;
+
+ return changed;
+}
+
+#endif /* DUMP REGISTER CONTROL */
+
+/* Hardware description , this structure (struct snd_pcm_hardware )
+ * contains the definitions of the fundamental hardware configuration.
+ * This configuration will be applied on the runtime structure
+ */
+static struct snd_pcm_hardware snd_u8500_playback_hw = {
+ .info =
+ (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE),
+ .formats =
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_BE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE |
+ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_U24_BE |
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_LE |
+ SNDRV_PCM_FMTBIT_S32_BE | SNDRV_PCM_FMTBIT_U32_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = MIN_RATE_PLAYBACK,
+ .rate_max = MAX_RATE_PLAYBACK,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = NMDK_BUFFER_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = PAGE_SIZE,
+ .periods_min = NMDK_BUFFER_SIZE / PAGE_SIZE,
+ .periods_max = NMDK_BUFFER_SIZE / 128
+};
+
+static struct snd_pcm_hardware snd_u8500_capture_hw = {
+ .info =
+ (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE),
+ .formats =
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_BE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE |
+ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_U24_BE |
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_LE |
+ SNDRV_PCM_FMTBIT_S32_BE | SNDRV_PCM_FMTBIT_U32_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = MIN_RATE_CAPTURE,
+ .rate_max = MAX_RATE_CAPTURE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = NMDK_BUFFER_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = PAGE_SIZE,
+ .periods_min = NMDK_BUFFER_SIZE / PAGE_SIZE,
+ .periods_max = NMDK_BUFFER_SIZE / 128
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rate = {
+ .count = sizeof(u8500_acodec_rates) / sizeof(u8500_acodec_rates[0]),
+ .list = u8500_acodec_rates,
+ .mask = 0,
+};
+
+/**
+ * snd_u8500_alsa_pcm_close
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This routine is used by alsa framework to close a pcm stream .
+ * Here a dma pipe is disabled and freed.
+ */
+static int snd_u8500_alsa_pcm_close(struct snd_pcm_substream *substream)
+{
+ int stream_id, error = 0;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ stream_id = substream->pstr->stream;
+ ptr_audio_stream = &chip->stream[ALSA_PCM_DEV][stream_id];
+
+ if (ENABLE == chip->direct_rendering_mode) {
+ ptr_audio_stream->substream = NULL;
+ return 0;
+ } else {
+ stm_close_alsa(chip, ALSA_PCM_DEV, stream_id);
+
+ /* reset the different variables to default */
+
+ ptr_audio_stream->active = 0;
+ ptr_audio_stream->period = 0;
+ ptr_audio_stream->periods = 0;
+ ptr_audio_stream->old_offset = 0;
+ ptr_audio_stream->substream = NULL;
+ if (!(--active_user)) {
+ /* Disable the MSP1 */
+ error = u8500_acodec_unsetuser(USER_ALSA);
+ u8500_acodec_close(I2S_CLIENT_MSP1, ACODEC_DISABLE_ALL);
+ } else {
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+ u8500_acodec_close(I2S_CLIENT_MSP1,
+ ACODEC_DISABLE_TRANSMIT);
+ else if (stream_id == SNDRV_PCM_STREAM_CAPTURE)
+ u8500_acodec_close(I2S_CLIENT_MSP1,
+ ACODEC_DISABLE_RECEIVE);
+ }
+
+ stm_hw_free(substream);
+
+ return error;
+ }
+}
+
+void my_write(u32 address, u8 data)
+{
+ ab8500_write(AB8500_AUDIO, address, data);
+}
+
+void dsp_configure_audio_codec(void)
+{
+ //4500 config for both record DMIC1&2 and playback HS stereo
+ //data width is 16 bits
+
+ my_write(0x200, 0x02); // Start-up audio unreset
+ my_write(0x20B, 0x10); // Start-up audio clk audio enable
+ my_write(0x383, 0x06); // Start-up audio Vaudio supply
+
+ my_write(0xD00, 0x88); // General power up=0x88
+ my_write(0xD01, 0x00); // Software Reset=0x0
+ my_write(0xD02, 0xC0); // Digital AD Channels Enable=0xC0
+ my_write(0xD03, 0xC0); // Digital DA Channels Enable=0xC0
+ my_write(0xD04, 0x00); // Low Power and Conf=0x0
+ my_write(0xD05, 0x0F); // Line in Conf=0xF
+ my_write(0xD06, 0xC0); // Analog Inputs Enable=0xC0
+ my_write(0xD07, 0x30); // ADC Enable=0x30
+ my_write(0xD08, 0x30); // Analog Output Enable=0x30
+ my_write(0xD09, 0x30); // Digital Output Enable=0x30
+ my_write(0xD0A, 0x4F); // Mute Enable=0x4F
+ my_write(0xD0B, 0x7F); // Short Circuit Disable=0x7F
+ my_write(0xD0C, 0x80); // Power-up for Headset=0x80
+ my_write(0xD0D, 0x00); // Envelope Threshold=0x0
+ my_write(0xD0E, 0x00); // Envelope Decay Time=0x0
+ my_write(0xD0F, 0xF0); // Class-D Configuration=0xF0
+ my_write(0xD10, 0x32); // PWM VIBNL Configuration=0x32
+ my_write(0xD11, 0x32); // PWM VIBPL Configuration=0x32
+ my_write(0xD12, 0x32); // PWM VIBNR Configuration=0x32
+ my_write(0xD13, 0x32); // PWM VIBPR Configuration=0x32
+ my_write(0xD14, 0x00); // Microphone 1 Gain=0x0
+ my_write(0xD15, 0x00); // Microphone 2 Gain=0x0
+ my_write(0xD16, 0x00); // Left line-in and HS Analog Gain=0x0
+ my_write(0xD17, 0x00); // Right line-in and HS Analog Gain=0x0
+ my_write(0xD18, 0x1F); // Line-in to HSL Gain=0x1F
+ my_write(0xD19, 0x1F); // Line-in to HSR Gain=0x1F
+ my_write(0xD1A, 0xF0); // AD Channel Filters Configuration=0xF0
+ my_write(0xD1B, 0x85); // TDM Configuration 1=0x85
+ my_write(0xD1C, 0x94); // TDM Configuration 2=0x94
+ my_write(0xD1D, 0x02); // TDM loopback control=0x2
+ my_write(0xD1E, 0x00); // TDM format=0x0
+ my_write(0xD1F, 0x10); // AD Data allocation in Slot 0 to 1=0x10
+ my_write(0xD20, 0xCC); // AD Data allocation in Slot 2 to 3=0xCC
+ my_write(0xD21, 0xCC); // AD Data allocation in Slots 4 to 5=0xCC
+ my_write(0xD22, 0xCC); // AD Data allocation in Slots 6 to 7=0xCC
+ my_write(0xD23, 0xCC); // AD Data allocation in Slots 8 to 9=0xCC
+ my_write(0xD24, 0xCC); // AD Data allocation in Slots 10 to 11=0xCC
+ my_write(0xD25, 0xCC); // AD Data allocation in Slots 12 to 13=0xCC
+ my_write(0xD26, 0xCC); // AD Data allocation in Slots 14 to 15=0xCC
+ my_write(0xD27, 0xCC); // AD Data allocation in Slots 16 to 17=0xCC
+ my_write(0xD28, 0xCC); // AD Data allocation in Slots 18 to 19=0xCC
+ my_write(0xD29, 0xCC); // AD Data allocation in Slots 20 to 21=0xCC
+ my_write(0xD2A, 0xCC); // AD Data allocation in Slots 22 to 23=0xCC
+ my_write(0xD2B, 0xCC); // AD Data allocation in Slots 24 to 25=0xCC
+ my_write(0xD2C, 0xCC); // AD Data allocation in Slots 26 to 27=0xCC
+ my_write(0xD2D, 0xCC); // AD Data allocation in Slots 28 to 29=0xCC
+ my_write(0xD2E, 0xCC); // AD Data allocation in Slots 30 to 31=0xCC
+ my_write(0xD2F, 0x00); // AD slot 0/7 tristate=0x0
+ my_write(0xD30, 0x00); // AD slot 8/15 tristate=0x0
+ my_write(0xD31, 0x00); // AD slot 16/23 tristate=0x0
+ my_write(0xD32, 0x00); // AD slot 24/31 tristate=0x0
+ my_write(0xD33, 0x08); // Slots selection for DA path 1=0x8
+ my_write(0xD34, 0x09); // Slots selection for DA path 2=0x9
+ my_write(0xD35, 0x00); // Slots selection for DA path 3=0x0
+ my_write(0xD36, 0x00); // Slots selection for DA path 4=0x0
+ my_write(0xD37, 0x00); // Slots selection for DA path 5=0x0
+ my_write(0xD38, 0x00); // Slots selection for DA path 6=0x0
+ my_write(0xD39, 0x00); // IRQ mask lsb=0x0
+ my_write(0xD3A, 0x00); // IRQ status lsb=0x0
+ my_write(0xD3B, 0x00); // IRQ mask msb=0x0
+ my_write(0xD3C, 0x00); // IRQ status msb=0x0
+ my_write(0xD3D, 0x00); // Fade speed=0x0
+ my_write(0xD3E, 0x00); // DMIC decimator filter=0x0
+ my_write(0xD3F, 0xF0); // muxing lsb=0xF0
+ my_write(0xD40, 0x00); // muxing msb=0x0
+ my_write(0xD41, 0x1F); // AD1 Digital Gain=0x1F
+ my_write(0xD42, 0x1F); // AD2 Digital Gain=0x1F
+ my_write(0xD43, 0x1F); // AD3 Digital Gain=0x1F
+ my_write(0xD44, 0x1F); // AD4 Digital Gain=0x1F
+ my_write(0xD45, 0x1F); // AD5 Digital Gain=0x1F
+ my_write(0xD46, 0x1F); // AD6 Digital Gain=0x1F
+ my_write(0xD47, 0x00); // DA1 digital Gain=0x00
+ my_write(0xD48, 0x00); // DA2 digital Gain=0x00
+ my_write(0xD49, 0x3F); // DA3 digital Gain=0x3F
+ my_write(0xD4A, 0x3F); // DA4 digital Gain=0x3F
+ my_write(0xD4B, 0x3F); // DA5 digital Gain=0x3F
+ my_write(0xD4C, 0x3F); // DA6 digital Gain=0x3F
+ my_write(0xD4D, 0x3F); // AD1 loopback to HFL digital gain=0x3F
+ my_write(0xD4E, 0x3F); // AD2 loopback to HFR digital gain=0x3F
+ my_write(0xD4F, 0x08); // HSL and EAR digital gain=0x8
+ my_write(0xD50, 0x08); // HSR digital gain=0x8
+ my_write(0xD51, 0x1F); // Side tone FIR1 gain=0x1F
+ my_write(0xD52, 0x1F); // Side tone FIR2 gain=0x1F
+ my_write(0xD53, 0x00); // ANC filter control=0x0
+ my_write(0xD54, 0x00); // ANC Warped Delay Line Shift=0x0
+ my_write(0xD55, 0x00); // ANC FIR output Shift=0x0
+ my_write(0xD56, 0x00); // ANC IIR output Shift=0x0
+ my_write(0xD57, 0x00); // ANC FIR coefficients msb=0x0
+ my_write(0xD58, 0x00); // ANC FIR coefficients lsb=0x0
+ my_write(0xD59, 0x00); // ANC IIR coefficients msb=0x0
+ my_write(0xD5A, 0x00); // ANC IIR coefficients lsb=0x0
+ my_write(0xD5B, 0x00); // ANC Warp delay msb=0x0
+ my_write(0xD5C, 0x00); // ANC Warp delay lsb=0x0
+ my_write(0xD5D, 0x00); // ANC FIR peak register MSB=0x0
+ my_write(0xD5E, 0x00); // ANC FIR peak register LSB=0x0
+ my_write(0xD5F, 0x00); // ANC IIR peak register. MSB part=0x0
+ my_write(0xD60, 0x00); // ANC IIR peak register. LSB part=0x0
+ my_write(0xD61, 0x00); // Side tone FIR address=0x0
+ my_write(0xD62, 0x00); // Side tone FIR coefficient MSB=0x0
+ my_write(0xD63, 0x00); // Side tone FIR coefficient LSB=0x0
+ my_write(0xD64, 0x00); // Filters control=0x0
+ my_write(0xD65, 0x00); // Class D EMI Control=0x0
+ my_write(0xD66, 0x00); // Class D control path=0x0
+ my_write(0xD67, 0x00); // Class D control gain=0x0
+ my_write(0xD68, 0x00); // Burst FIFO int control=0x0
+ my_write(0xD69, 0x00); // Burst FIFO length=0x0
+ my_write(0xD6A, 0x00); // Burst FIFO control=0x0
+ my_write(0xD6B, 0x00); // Burst FIFO switch frame=0x0
+ my_write(0xD6C, 0x00); // Burst FIFO wake up delay=0x0
+ my_write(0xD6D, 0x00); // Burst FIFO samples number=0x0
+ my_write(0xD70, 0x00); // CR112=0x0
+ my_write(0xD71, 0x04); // CR113=0x4
+ my_write(0xD72, 0x00); // CR114=0x0
+ my_write(0xD73, 0x00); // CR115=0x0
+ my_write(0xD74, 0x00); // CR116=0x0
+ my_write(0xD75, 0x00); // CR117=0x0
+ my_write(0xD76, 0x00); // CR118=0x0
+ my_write(0xD77, 0x00); // CR119=0x0
+ my_write(0xD78, 0x00); // CR120=0x0
+ my_write(0xD79, 0x00); // CR121=0x0
+ my_write(0xD7A, 0x00); // CR122=0x0
+ my_write(0xD7B, 0x00); // CR123=0x0
+}
+
+static int configure_direct_rendering(struct snd_pcm_substream *substream)
+{
+ int error = 0, stream_id;
+ int status = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ stream_id = substream->pstr->stream;
+
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw = snd_u8500_playback_hw;
+ } else {
+ runtime->hw = snd_u8500_capture_hw;
+ }
+
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x04))); //MSP_GCR
+
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x08))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x0C))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x10))); //MSP
+
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x30))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x34))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x38))); //MSP
+
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x40))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x44))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x48))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x4C))); //MSP
+
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x60))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x64))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x68))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x6C))); //MSP
+
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x18))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x20))); //MSP
+ writel(0x0, ((char *)(IO_ADDRESS(U8500_MSP1_BASE) + 0x2C))); //MSP
+
+ dsp_configure_audio_codec();
+
+#if DRIVER_DEBUG > 0
+ {
+ dump_msp_registers();
+ dump_acodec_registers();
+ }
+#endif
+
+ ptr_audio_stream = &chip->stream[ALSA_PCM_DEV][stream_id];
+
+ ptr_audio_stream->substream = substream;
+
+ FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * snd_u8500_alsa_pcm_open
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This routine is used by alsa framework to open a pcm stream .
+ * Here a dma pipe is requested and device is configured(default).
+ */
+static int snd_u8500_alsa_pcm_open(struct snd_pcm_substream *substream)
+{
+ int error = 0, stream_id, status = 0;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ FUNC_ENTER();
+
+ if (ENABLE == chip->direct_rendering_mode) {
+ configure_direct_rendering(substream);
+ return 0;
+ } else {
+ stream_id = substream->pstr->stream;
+ status = u8500_acodec_open(I2S_CLIENT_MSP1, stream_id);
+
+ if (status) {
+ printk("failed in getting open\n");
+ return -1;
+ }
+
+ if (!active_user)
+ error = u8500_acodec_setuser(USER_ALSA);
+ if (error)
+ return error;
+ else
+ active_user++;
+
+ error =
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rate);
+ if (error < 0) {
+ stm_error
+ (": error initializing hw sample rate constraint\n");
+ return error;
+ }
+
+ /* configure the default sampling rate for the acodec */
+ second_config = 0;
+
+ if ((error = configure_rate(substream, ACODEC_CONFIG_REQUIRED)))
+ return error;
+
+ /* Set the hardware configuration */
+ stream_id = substream->pstr->stream;
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw = snd_u8500_playback_hw;
+ /* configure the output sink for the acodec */
+ if ((error =
+ u8500_acodec_select_output(chip->output_device,
+ USER_ALSA,
+ chip->tdm8_ch_mode))) {
+ stm_error(" : select output failed\n");
+ return error;
+ }
+
+ /* configure the volume settings for the acodec */
+ if ((error =
+ u8500_acodec_set_output_volume(chip->output_device,
+ chip->
+ output_lvolume,
+ chip->
+ output_rvolume,
+ USER_ALSA))) {
+ stm_error(" : set output volume failed\n");
+ return error;
+ }
+ } else {
+ runtime->hw = snd_u8500_capture_hw;
+ /* configure the input source for the acodec */
+ if ((error =
+ u8500_acodec_select_input(chip->input_device,
+ USER_ALSA,
+ chip->tdm8_ch_mode))) {
+ stm_error(" : select input failed\n");
+ return error;
+ }
+ /*u8500_acodec_set_src_power_cntrl(AB8500_CODEC_SRC_D_MICROPHONE_1,ENABLE);
+ u8500_acodec_set_src_power_cntrl(AB8500_CODEC_SRC_D_MICROPHONE_2,ENABLE);
+
+ u8500_acodec_set_input_volume(AB8500_CODEC_SRC_D_MICROPHONE_1,
+ chip->input_lvolume,
+ chip->input_rvolume,
+ USER_ALSA);
+
+ u8500_acodec_set_input_volume(AB8500_CODEC_SRC_D_MICROPHONE_2,
+ chip->input_lvolume,
+ chip->input_rvolume,
+ USER_ALSA);
+ */
+
+ if ((error =
+ u8500_acodec_set_input_volume(chip->input_device,
+ chip->input_lvolume,
+ chip->input_rvolume,
+ USER_ALSA))) {
+ stm_error(" : set input volume failed\n");
+ return error;
+ }
+ }
+
+ u8500_acodec_set_burst_mode_fifo(chip->burst_fifo_mode);
+
+#if DRIVER_DEBUG > 0
+ {
+ dump_msp_registers();
+ dump_acodec_registers();
+ }
+#endif
+
+ ptr_audio_stream = &chip->stream[ALSA_PCM_DEV][stream_id];
+
+ ptr_audio_stream->substream = substream;
+
+ if (DISABLE == chip->direct_rendering_mode) {
+ stm_config_hw(chip, substream, ALSA_PCM_DEV, stream_id);
+ }
+ sema_init(&(ptr_audio_stream->alsa_sem), 1);
+ init_completion(&(ptr_audio_stream->alsa_com));
+ ptr_audio_stream->state = ALSA_STATE_UNPAUSE;
+
+ FUNC_EXIT();
+ return 0;
+ }
+}
+
+/**
+ * snd_u8500_alsa_pcm_hw_params
+ * @substream - pointer to the playback/capture substream structure
+ * @hw_params - specifies the hw parameters like format/no of channels etc
+ *
+ * This routine is used by alsa framework to allocate a dma buffer
+ * used to transfer the data from user space to kernel space
+ *
+ */
+static int snd_u8500_alsa_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+}
+
+/**
+ * snd_u8500_alsa_pcm_hw_free
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This routine is used by alsa framework to deallocate a dma buffer
+ * allocated berfore by snd_u8500_alsa_pcm_hw_params
+ */
+static int snd_u8500_alsa_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ stm_hw_free(substream);
+ return 0;
+}
+
+/**
+ * snd_u8500_alsa_pcm_prepare
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This callback is called whene the pcm is "prepared" Here is possible
+ * to set the format type ,sample rate ,etc.The callback is called as
+ * well everytime a recovery after an underrun happens.
+ */
+
+static int snd_u8500_alsa_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int error, stream_id;
+
+ FUNC_ENTER();
+
+ if (chip->freq != runtime->rate || chip->channels != runtime->channels) {
+ stm_dbg(DBG_ST.alsa, " freq not same, %d %d\n", chip->freq,
+ runtime->rate);
+ stm_dbg(DBG_ST.alsa, " channels not same, %d %d\n",
+ chip->channels, runtime->channels);
+ if (chip->channels != runtime->channels) {
+ chip->channels = runtime->channels;
+ if ((error =
+ stm_config_hw(chip, substream, ALSA_PCM_DEV,
+ -1))) {
+ stm_dbg(DBG_ST.alsa,
+ "In func %s, stm_config_hw fails",
+ __FUNCTION__);
+ return error;
+ }
+ }
+ chip->freq = runtime->rate;
+ second_config = 1;
+ stream_id = substream->pstr->stream;
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+ u8500_acodec_close(I2S_CLIENT_MSP1,
+ ACODEC_DISABLE_TRANSMIT);
+ else if (stream_id == SNDRV_PCM_STREAM_CAPTURE)
+ u8500_acodec_close(I2S_CLIENT_MSP1,
+ ACODEC_DISABLE_RECEIVE);
+
+ error = u8500_acodec_open(I2S_CLIENT_MSP1, stream_id);
+ if (error) {
+ printk("failed in getting open\n");
+ return -1;
+ }
+ if ((error =
+ configure_rate(substream, ACODEC_CONFIG_NOT_REQUIRED))) {
+ stm_dbg(DBG_ST.alsa, "In func %s, configure_rate fails",
+ __FUNCTION__);
+ return error;
+ }
+ }
+
+ FUNC_EXIT();
+ return 0;
+}
+
+/**
+ * snd_u8500_alsa_pcm_trigger
+ * @substream - pointer to the playback/capture substream structure
+ * @cmd - specifies the command : start/stop/pause/resume
+ *
+ * This callback is called whene the pcm is started ,stopped or paused
+ * The action is specified in the second argument, SND_PCM_TRIGGER_XXX in
+ * <sound/pcm.h>.
+ * This callback is atomic and the interrupts are disabled , so you can't
+ * call other functions that need interrupts without possible risks
+ */
+static int snd_u8500_alsa_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ int stream_id = substream->pstr->stream;
+ audio_stream_t *stream = NULL;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ int error = 0;
+
+ FUNC_ENTER();
+
+ stream = &chip->stream[ALSA_PCM_DEV][stream_id];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ /* Start the pcm engine */
+ stm_dbg(DBG_ST.alsa, " TRIGGER START\n");
+ if (stream->active == 0) {
+ stream->active = 1;
+ stm_trigger_alsa(stream);
+ break;
+ }
+ stm_error(": H/w is busy\n");
+ return -EINVAL;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ stm_dbg(DBG_ST.alsa, " SNDRV_PCM_TRIGGER_PAUSE_PUSH\n");
+ if (stream->active == 1) {
+ stm_pause_alsa(stream);
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ stm_dbg(DBG_ST.alsa, " SNDRV_PCM_TRIGGER_PAUSE_RELEASE\n");
+ if (stream->active == 1)
+ stm_unpause_alsa(stream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* Stop the pcm engine */
+ stm_dbg(DBG_ST.alsa, " TRIGGER STOP\n");
+ if (stream->active == 1)
+ stm_stop_alsa(stream);
+ break;
+ default:
+ stm_error(": invalid command in pcm trigger\n");
+ return -EINVAL;
+ }
+
+ FUNC_EXIT();
+ return error;
+}
+
+/**
+ * snd_u8500_alsa_pcm_pointer
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This callback is called whene the pcm middle layer inquires the current
+ * hardware position on the buffer .The position is returned in frames
+ * ranged from 0 to buffer_size -1
+ */
+static snd_pcm_uframes_t snd_u8500_alsa_pcm_pointer(struct snd_pcm_substream
+ *substream)
+{
+ unsigned int offset;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ audio_stream_t *stream =
+ &chip->stream[ALSA_PCM_DEV][substream->pstr->stream];
+ struct snd_pcm_runtime *runtime = stream->substream->runtime;
+
+ offset = bytes_to_frames(runtime, stream->old_offset);
+ if (offset < 0 || stream->old_offset < 0)
+ stm_dbg(DBG_ST.alsa, " Offset=%i %i\n", offset,
+ stream->old_offset);
+
+ return offset;
+}
+
+static struct snd_pcm_ops snd_u8500_alsa_playback_ops = {
+ .open = snd_u8500_alsa_pcm_open,
+ .close = snd_u8500_alsa_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_u8500_alsa_pcm_hw_params,
+ .hw_free = snd_u8500_alsa_pcm_hw_free,
+ .prepare = snd_u8500_alsa_pcm_prepare,
+ .trigger = snd_u8500_alsa_pcm_trigger,
+ .pointer = snd_u8500_alsa_pcm_pointer,
+};
+
+static struct snd_pcm_ops snd_u8500_alsa_capture_ops = {
+ .open = snd_u8500_alsa_pcm_open,
+ .close = snd_u8500_alsa_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_u8500_alsa_pcm_hw_params,
+ .hw_free = snd_u8500_alsa_pcm_hw_free,
+ .prepare = snd_u8500_alsa_pcm_prepare,
+ .trigger = snd_u8500_alsa_pcm_trigger,
+ .pointer = snd_u8500_alsa_pcm_pointer,
+};
+
+#ifdef CONFIG_U8500_ACODEC_POLL
+
+/**
+* u8500_alsa_pio_start
+* @stream - pointer to the playback/capture audio_stream_t structure
+*
+* This function sends/receive one chunck of stream data to/from MSP
+*/
+static void u8500_alsa_pio_start(audio_stream_t * stream)
+{
+ unsigned int offset, dma_size, stream_id;
+ int ret_val;
+ struct snd_pcm_substream *substream = stream->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ stream_id = substream->pstr->stream;
+
+ FUNC_ENTER();
+ dma_size = frames_to_bytes(runtime, runtime->period_size);
+ offset = dma_size * stream->period;
+ stream->old_offset = offset;
+
+ stm_dbg(DBG_ST.alsa, " Transfer started\n");
+ stm_dbg(DBG_ST.alsa, " address = %x size=%d\n",
+ (runtime->dma_addr + offset), dma_size);
+
+ /* Send our stuff */
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_send_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_send_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+ else
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_receive_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_receive_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+
+ stream->period++;
+ stream->period %= runtime->periods;
+ stream->periods++;
+
+ FUNC_EXIT();
+
+}
+
+/**
+* acodec_feeding_thread
+* @data - void pointer to the playback/capture audio_stream_t structure
+*
+* This thread sends/receive data to MSP while stream is active
+*/
+static int acodec_feeding_thread(void *data)
+{
+ audio_stream_t *stream = (audio_stream_t *) data;
+
+ FUNC_ENTER();
+ daemonize("acodec_feeding_thread");
+ allow_signal(SIGKILL);
+ down(&stream->alsa_sem);
+
+ while ((!signal_pending(current)) && (stream->active)) {
+ if (stream->state == ALSA_STATE_PAUSE)
+ wait_for_completion(&(stream->alsa_com));
+
+ u8500_alsa_pio_start(stream);
+ if (stream->substream)
+ snd_pcm_period_elapsed(stream->substream);
+ }
+
+ up(&stream->alsa_sem);
+
+ FUNC_EXIT();
+ return 0;
+}
+
+/**
+* acodec_feeding_thread
+* @stream - pointer to the playback/capture audio_stream_t structure
+*
+* This function creates a kernel thread .
+*/
+
+int spawn_acodec_feeding_thread(audio_stream_t * stream)
+{
+ pid_t pid;
+
+ FUNC_ENTER();
+
+ pid =
+ kernel_thread(acodec_feeding_thread, stream,
+ CLONE_FS | CLONE_SIGHAND);
+
+ FUNC_EXIT();
+ return 0;
+}
+#endif
+
+/**
+ * dma_eot_handler
+ * @data - pointer to structure set in the dma callback handler
+ *
+ * This is the PCM tasklet handler linked to a pipe, its role is to tell
+ * the PCM middler layer whene the buffer position goes across the prescribed
+ * period size.To inform of this the snd_pcm_period_elapsed is called.
+ *
+ * this callback will be called in case of DMA_EVENT_TC only
+ */
+static void dma_eot_handler(void *data)
+{
+ audio_stream_t *stream = data;
+
+ /* snd_pcm_period_elapsed() is _not_ to be protected
+ */
+ stm_dbg(DBG_ST.alsa,
+ "One transfer complete.. going to start the next one\n");
+
+ if (stream->substream)
+ snd_pcm_period_elapsed(stream->substream);
+ if (stream->state == ALSA_STATE_PAUSE)
+ return;
+ if (stream->active == 1) {
+ u8500_alsa_dma_start(stream);
+ }
+}
+
+/**
+ * u8500_alsa_dma_start - used to transmit or recive a dma chunk
+ * @stream - specifies the playback/record stream structure
+ */
+void u8500_alsa_dma_start(audio_stream_t * stream)
+{
+ unsigned int offset, dma_size, stream_id;
+
+ struct snd_pcm_substream *substream = stream->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u8500_acodec_chip_t *u8500_chip = NULL;
+ stream_id = substream->pstr->stream;
+ u8500_chip = snd_pcm_substream_chip(substream);
+
+ dma_size = frames_to_bytes(runtime, runtime->period_size);
+ offset = dma_size * stream->period;
+ stream->old_offset = offset;
+
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_send_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_send_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+ else
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_receive_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_receive_data(I2S_CLIENT_MSP1,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+
+ stm_dbg(DBG_ST.alsa, " DMA Transfer started\n");
+ stm_dbg(DBG_ST.alsa, " address = %x size=%d\n",
+ (runtime->dma_addr + offset), dma_size);
+
+ stream->period++;
+ stream->period %= runtime->periods;
+ stream->periods++;
+}
+
+/**
+* u8500_audio_init
+* @chip - pointer to u8500_acodec_chip_t structure.
+*
+* This function intialises the u8500 chip structure with default values
+*/
+static void u8500_audio_init(u8500_acodec_chip_t * chip)
+{
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ ptr_audio_stream =
+ &chip->stream[ALSA_PCM_DEV][SNDRV_PCM_STREAM_PLAYBACK];
+ /* Setup DMA stuff */
+ strlcpy(ptr_audio_stream->id, "u8500 playback",
+ sizeof(ptr_audio_stream->id));
+ ptr_audio_stream->stream_id = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* default initialization for playback */
+ ptr_audio_stream->active = 0;
+ ptr_audio_stream->period = 0;
+ ptr_audio_stream->periods = 0;
+ ptr_audio_stream->old_offset = 0;
+
+ ptr_audio_stream =
+ &chip->stream[ALSA_PCM_DEV][SNDRV_PCM_STREAM_CAPTURE];
+ strlcpy(ptr_audio_stream->id, "u8500 capture",
+ sizeof(ptr_audio_stream->id));
+ ptr_audio_stream->stream_id = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* default initialization for capture */
+ ptr_audio_stream->active = 0;
+ ptr_audio_stream->period = 0;
+ ptr_audio_stream->periods = 0;
+ ptr_audio_stream->old_offset = 0;
+
+ chip->freq = DEFAULT_SAMPLE_RATE;
+ chip->channels = 1;
+ chip->input_lvolume = DEFAULT_GAIN;
+ chip->input_rvolume = DEFAULT_GAIN;
+ chip->output_lvolume = DEFAULT_VOLUME;
+ chip->output_rvolume = DEFAULT_VOLUME;
+ chip->output_device = DEFAULT_OUTPUT_DEVICE;
+ chip->input_device = DEFAULT_INPUT_DEVICE;
+ chip->analog_lpbk = DEFAULT_LOOPBACK_STATE;
+ chip->digital_lpbk = DEFAULT_LOOPBACK_STATE;
+ chip->playback_switch = DEFAULT_SWITCH_STATE;
+ chip->capture_switch = DEFAULT_SWITCH_STATE;
+ chip->tdm8_ch_mode = DEFAULT_TDM8_CH_MODE_STATE;
+ chip->direct_rendering_mode = DEFAULT_DIRECT_RENDERING_STATE;
+ chip->burst_fifo_mode = DEFAULT_BURST_FIFO_STATE;
+ chip->fm_playback_mode = DEFAULT_FM_PLAYBACK_STATE;
+ chip->fm_tx_mode = DEFAULT_FM_TX_STATE;
+
+ //HDMI Default params set
+ chip->hdmi_params.sampling_freq = 48000;
+ chip->hdmi_params.channel_count = 2;
+}
+
+/**
+ * snd_card_u8500_alsa_pcm_new - constructor for a new pcm cmponent
+ * @chip - pointer to chip specific data
+ * @device - specifies the card number
+ */
+static int snd_card_u8500_alsa_pcm_new(u8500_acodec_chip_t * chip, int device)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ if ((err = snd_pcm_new(chip->card, "u8500", device, 1, 1, &pcm)) < 0) {
+ stm_error(": error in snd_pcm_new\n");
+ return err;
+ }
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_u8500_alsa_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_u8500_alsa_capture_ops);
+
+ pcm->private_data = chip;
+ pcm->info_flags = 0;
+ chip->pcm = pcm;
+ strcpy(pcm->name, "u8500_alsa");
+
+ u8500_audio_init(pcm->private_data);
+ return 0;
+}
+
+static int u8500_register_alsa_controls(struct snd_card *card,
+ u8500_acodec_chip_t * u8500_chip)
+{
+ int error;
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_playback_vol_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_playback_vol_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_capture_vol_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_capture_vol_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_playback_sink_ctrl,
+ u8500_chip))) < 0) {
+ stm_error(": error initializing playback ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_capture_src_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_playback_sink_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_analog_lpbk_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_analog_lpbk_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_digital_lpbk_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_digital_lpbk_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_playback_switch_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_playback_switch_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_capture_switch_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_capture_switch_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_playback_power_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_playback_power_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_capture_power_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_capture_power_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_tdm_mode_ctrl, u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_tdm_mode_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_direct_rendering_mode_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_direct_rendering_mode_ctrl interface \n\n");
+ return (-1);
+ }
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_pcm_rendering_mode_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_pcm_rendering_mode_ctrl interface \n\n");
+ return (-1);
+ }
+
+ return 0;
+}
+
+static int __init u8500_alsa_probe(struct platform_device *devptr)
+{
+ //static int card_count=0;
+ int error;
+ struct snd_card *card, *hdmi_card;
+ u8500_acodec_chip_t *u8500_chip;
+
+ /*Set currently active users to 0 */
+ active_user = 0;
+
+ error = snd_card_create(0, NULL, THIS_MODULE, sizeof(u8500_acodec_chip_t), &card);
+ if (error < 0) {
+ stm_error(": error in snd_card_create\n");
+ return error;
+ }
+
+ u8500_chip = (u8500_acodec_chip_t *) card->private_data;
+ u8500_chip->card = card;
+
+ if ((error = snd_card_u8500_alsa_pcm_new(u8500_chip, 0)) < 0) {
+ stm_error(": pcm interface can't be initialized\n\n");
+ goto nodev;
+ }
+
+ if ((error = snd_card_u8500_alsa_hdmi_new(u8500_chip, 1)) < 0) {
+ stm_error(": alsa HDMI interface can't be initialized\n\n");
+ goto nodev;
+ }
+
+ if (u8500_register_alsa_controls(card, u8500_chip) < 0) {
+ goto nodev;
+ }
+
+ if (u8500_register_alsa_hdmi_controls(card, u8500_chip) < 0) {
+ goto nodev;
+ }
+#if 0
+ ///////////////////////////////$ H D M I $//////////////////////////////////////
+
+ if (card_count == 1) {
+ hdmi_card =
+ snd_card_new(1, NULL, THIS_MODULE,
+ sizeof(u8500_acodec_chip_t));
+ if (hdmi_card == NULL) {
+ stm_error(": error in hdmi - snd_card_new\n");
+ return -ENOMEM;
+ }
+
+ u8500_chip = (u8500_acodec_chip_t *) hdmi_card->private_data;
+ u8500_chip->card = hdmi_card;
+
+ if ((error = snd_card_u8500_alsa_hdmi_new(u8500_chip, 0)) < 0) {
+ stm_error
+ (": alsa HDMI interface can't be initialized\n\n");
+ goto nodev;
+ }
+
+ if (u8500_register_alsa_hdmi_controls(card, u8500_chip) < 0) {
+ goto nodev;
+ }
+ }
+#endif ////////////////////////////////////////////////////////
+
+ /*char driver[16]; driver name
+ char shortname[32]; short name of this soundcard
+ char longname[80]; name of this soundcard
+ char mixername[80]; mixer name
+ char components[80]; card components delimited withspace */
+
+ strcpy(card->driver, "u8500 alsa");
+ strcpy(card->shortname, "u8500 alsa pcm hdmi driver");
+ strcpy(card->longname, "u8500 alsa pcm hdmi driver");
+
+ snd_card_set_dev(card, &devptr->dev);
+
+ if ((error = snd_card_register(card)) == 0) {
+ stm_info("u8500 audio <hdmi> support running..\n");
+ platform_set_drvdata(devptr, card);
+ return 0;
+ }
+
+ nodev:
+ snd_card_free(card);
+ return error;
+}
+
+static int __devexit u8500_alsa_remove(struct platform_device *devptr)
+{
+ snd_card_free(platform_get_drvdata(devptr));
+ platform_set_drvdata(devptr, NULL);
+ stm_info("u8500 audio support stopped\n");
+
+ /*Set currently active users to 0 */
+ active_user = 0;
+
+ return 0;
+}
+
+static struct platform_driver u8500_alsa_driver = {
+ .probe = u8500_alsa_probe,
+ .remove = __devexit_p(u8500_alsa_remove),
+ .driver = {
+ .name = U8500_ALSA_DRIVER,
+ },
+};
+
+/**
+* u8500_alsa_init - Entry function of AB8500 alsa driver
+*
+* This function registers u8500 alsa driver with linux framework
+*/
+static int __init u8500_alsa_init(void)
+{
+ int err;
+
+ if ((err = platform_driver_register(&u8500_alsa_driver)) < 0)
+ return err;
+ device =
+ platform_device_register_simple(U8500_ALSA_DRIVER, -1, NULL, 0);
+ if (IS_ERR(device)) {
+ platform_driver_unregister(&u8500_alsa_driver);
+ return PTR_ERR(device);
+ }
+ //DBG_ST.acodec = 1;
+ //DBG_ST.alsa = 1;
+
+ return 0;
+}
+
+static void __exit u8500_alsa_exit(void)
+{
+ platform_device_unregister(device);
+ platform_driver_unregister(&u8500_alsa_driver);
+}
+
+module_init(u8500_alsa_init);
+module_exit(u8500_alsa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AB8500 ALSA driver");
diff --git a/sound/arm/u8500_alsa_ab8500.h b/sound/arm/u8500_alsa_ab8500.h
new file mode 100644
index 00000000000..dda802c2156
--- /dev/null
+++ b/sound/arm/u8500_alsa_ab8500.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Deepak Karda
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _U8500_ALSA_H_
+#define _U8500_ALSA_H_
+
+#ifdef CONFIG_U8500_AB8500_CUT10
+#include <mach/ab8500_codec_v1_0.h>
+//#include <mach/ab8500_codec_p_v1_0.h>
+#else
+//#include <mach/ab8500_codec_p.h>
+#include <mach/ab8500_codec.h>
+#endif
+#include <mach/u8500_acodec_ab8500.h>
+
+#define DEFAULT_SAMPLE_RATE 48000
+#define NMDK_BUFFER_SIZE (64*1024)
+#define U8500_ALSA_DRIVER "u8500_alsa"
+
+#define MAX_NUMBER_OF_DEVICES 3 /* ALSA_PCM, ALSA_BT, ALSA_HDMI */
+#define MAX_NUMBER_OF_STREAMS 2 /* PLAYBACK, CAPTURE */
+
+#define ALSA_PCM_DEV 0
+#define ALSA_BT_DEV 2
+#define ALSA_HDMI_DEV 1
+
+/* Debugging stuff */
+#ifndef CONFIG_DEBUG_USER
+#define DEBUG_LEVEL 0
+#else
+#define DEBUG_LEVEL 10
+#endif
+
+#if DEBUG_LEVEL > 0
+static int u8500_acodec_debug = DEBUG_LEVEL;
+#define DEBUG(n, args...) do { if (u8500_acodec_debug>(n)) printk(args); } while (0)
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif
+enum alsa_state {
+ ALSA_STATE_PAUSE,
+ ALSA_STATE_UNPAUSE
+};
+
+/* audio stream definition */
+typedef struct audio_stream_s {
+ char id[64]; /* module identifier string */
+ int stream_id; /* stream identifier */
+ int status;
+ int active; /* we are using this stream for transfer now */
+ int period; /* current transfer period */
+ int periods; /* current count of periods registerd in the DMA engine */
+ enum alsa_state state;
+ unsigned int old_offset;
+ struct snd_pcm_substream *substream;
+ unsigned int exchId;
+ snd_pcm_uframes_t played_frame;
+ struct semaphore alsa_sem;
+ struct completion alsa_com;
+
+} audio_stream_t;
+
+typedef struct hdmi_params_s {
+ int sampling_freq;
+ int channel_count;
+} hdmi_params_t;
+
+/* chip structure definition */
+typedef struct u8500_acodec_s {
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct snd_pcm *pcm_hdmi;
+ struct snd_pcm *pcm_bt;
+ unsigned int freq;
+ unsigned int channels;
+ unsigned int input_lvolume;
+ unsigned int input_rvolume;
+ unsigned int output_lvolume;
+ unsigned int output_rvolume;
+ t_ab8500_codec_src input_device;
+ t_ab8500_codec_dest output_device;
+ t_u8500_bool_state analog_lpbk;
+ t_u8500_bool_state digital_lpbk;
+ t_u8500_bool_state playback_switch;
+ t_u8500_bool_state capture_switch;
+ t_u8500_bool_state tdm8_ch_mode;
+ t_u8500_bool_state direct_rendering_mode;
+ t_u8500_pmc_rendering_state burst_fifo_mode;
+ t_u8500_pmc_rendering_state fm_playback_mode;
+ t_u8500_pmc_rendering_state fm_tx_mode;
+ audio_stream_t stream[MAX_NUMBER_OF_DEVICES][MAX_NUMBER_OF_STREAMS];
+ hdmi_params_t hdmi_params;
+} u8500_acodec_chip_t;
+
+void u8500_alsa_dma_start(audio_stream_t * stream);
+
+#if (defined(CONFIG_U8500_ACODEC_DMA) || defined(CONFIG_U8500_ACODEC_INTR))
+
+#define stm_trigger_alsa(x) u8500_alsa_dma_start(x)
+static void inline stm_pause_alsa(audio_stream_t * stream)
+{
+ if (stream->state == ALSA_STATE_UNPAUSE) {
+ stream->state = ALSA_STATE_PAUSE;
+ }
+
+}
+static void inline stm_unpause_alsa(audio_stream_t * stream)
+{
+ if (stream->state == ALSA_STATE_PAUSE) {
+ stream->state = ALSA_STATE_UNPAUSE;
+ stm_trigger_alsa(stream);
+ }
+}
+static void inline stm_stop_alsa(audio_stream_t * stream)
+{
+ stream->active = 0;
+ stream->period = 0;
+
+}
+static void inline stm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_lib_free_pages(substream);
+}
+
+#define stm_close_alsa(x, y,z)
+#define stm_config_hw(w,x, y, z) 0
+
+#else ////// CONFIG_U8500_ACODEC_POLL ////////////
+
+int spawn_acodec_feeding_thread(audio_stream_t * stream);
+//static int configure_dmadev_acodec(struct snd_pcm_substream *substream);
+
+#define stm_trigger_alsa(x) spawn_acodec_feeding_thread(x)
+#define stm_close_alsa(x, y,z)
+#define stm_config_hw(w,x, y, z) 0
+#define stm_hw_free(x)
+static void inline stm_pause_alsa(audio_stream_t * stream)
+{
+ stream->state = ALSA_STATE_PAUSE;
+}
+static void inline stm_unpause_alsa(audio_stream_t * stream)
+{
+ stream->state = ALSA_STATE_UNPAUSE;
+ complete(&stream->alsa_com);
+}
+static void inline stm_stop_alsa(audio_stream_t * stream)
+{
+ stream->active = 0;
+ stream->period = 0;
+ if (stream->state == ALSA_STATE_PAUSE)
+ complete(&stream->alsa_com);
+}
+
+#endif
+#endif /*END OF HEADER FILE */
diff --git a/sound/arm/u8500_alsa_hdmi.c b/sound/arm/u8500_alsa_hdmi.c
new file mode 100644
index 00000000000..dcbc3583ba7
--- /dev/null
+++ b/sound/arm/u8500_alsa_hdmi.c
@@ -0,0 +1,936 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL),
+ * version 2.
+ */
+
+/* This include must be defined at this point */
+//#include <sound/driver.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <mach/hardware.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/* alsa system */
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include "u8500_alsa_ab8500.h"
+#include <mach/msp.h>
+#include <mach/debug.h>
+
+#define ALSA_NAME "DRIVER ALSA HDMI"
+
+/* enables/disables debug msgs */
+#define DRIVER_DEBUG CONFIG_STM_ALSA_DEBUG
+/* msg header represents this module */
+#define DRIVER_DEBUG_PFX ALSA_NAME
+/* message level */
+#define DRIVER_DBG KERN_ERR
+#define ELEMENT_SIZE 0
+
+extern char *power_state_in_texts[NUMBER_POWER_STATE];
+
+static int u8500_hdmi_power_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+static int u8500_hdmi_power_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+static int u8500_hdmi_power_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo);
+
+void dump_msp2_registers();
+
+#ifdef CONFIG_U8500_ACODEC_DMA
+
+static void u8500_alsa_hdmi_dma_start(audio_stream_t * stream);
+#define stm_trigger_hdmi(x) u8500_alsa_hdmi_dma_start(x)
+static void inline stm_pause_hdmi(audio_stream_t * stream)
+{
+ if (stream->state == ALSA_STATE_UNPAUSE) {
+ stream->state = ALSA_STATE_PAUSE;
+ }
+}
+static void inline stm_unpause_hdmi(audio_stream_t * stream)
+{
+ if (stream->state == ALSA_STATE_PAUSE) {
+ stream->state = ALSA_STATE_UNPAUSE;
+ stm_trigger_hdmi(stream);
+ }
+}
+static void inline stm_stop_hdmi(audio_stream_t * stream)
+{
+ stream->active = 0;
+ stream->period = 0;
+}
+#else /* Polling */
+
+static int spawn_hdmi_feeding_thread(audio_stream_t * stream);
+static int hdmi_feeding_thread(void *data);
+static void u8500_hdmi_pio_start(audio_stream_t * stream);
+
+#define stm_trigger_hdmi(x) spawn_hdmi_feeding_thread(x);
+
+static void inline stm_pause_hdmi(audio_stream_t * stream)
+{
+ stream->state = ALSA_STATE_PAUSE;
+}
+static void inline stm_unpause_hdmi(audio_stream_t * stream)
+{
+ stream->state = ALSA_STATE_UNPAUSE;
+ complete(&stream->alsa_com);
+}
+static void inline stm_stop_hdmi(audio_stream_t * stream)
+{
+ stream->active = 0;
+ stream->period = 0;
+ if (stream->state == ALSA_STATE_PAUSE)
+ complete(&stream->alsa_com);
+}
+
+#endif
+
+extern struct driver_debug_st DBG_ST;
+extern struct i2sdrv_data *i2sdrv[MAX_I2S_CLIENTS];
+
+static void u8500_audio_hdmi_init(u8500_acodec_chip_t * chip);
+int u8500_register_alsa_hdmi_controls(struct snd_card *card,
+ u8500_acodec_chip_t * u8500_chip);
+static int snd_u8500_alsa_hdmi_open(struct snd_pcm_substream *substream);
+static int snd_u8500_alsa_hdmi_close(struct snd_pcm_substream *substream);
+static int snd_u8500_alsa_hdmi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params);
+static int snd_u8500_alsa_hdmi_hw_free(struct snd_pcm_substream *substream);
+static int snd_u8500_alsa_hdmi_prepare(struct snd_pcm_substream *substream);
+static int snd_u8500_alsa_hdmi_trigger(struct snd_pcm_substream *substream,
+ int cmd);
+static snd_pcm_uframes_t snd_u8500_alsa_hdmi_pointer(struct snd_pcm_substream
+ *substream);
+static int configure_hdmi_rate(struct snd_pcm_substream *);
+static int configure_msp_hdmi(int sampling_freq, int channel_count);
+
+int u8500_hdmi_rates[] = { 32000, 44100, 48000, 64000, 88200,
+ 96000, 128000, 176100, 192000
+};
+
+typedef enum {
+ HDMI_SAMPLING_FREQ_32KHZ = 32,
+ HDMI_SAMPLING_FREQ_44_1KHZ = 44,
+ HDMI_SAMPLING_FREQ_48KHZ = 48,
+ HDMI_SAMPLING_FREQ_64KHZ = 64,
+ HDMI_SAMPLING_FREQ_88_2KHZ = 88,
+ HDMI_SAMPLING_FREQ_96KHZ = 96,
+ HDMI_SAMPLING_FREQ_128KHZ = 128,
+ HDMI_SAMPLING_FREQ_176_1KHZ = 176,
+ HDMI_SAMPLING_FREQ_192KHZ = 192
+} t_hdmi_sample_freq;
+
+static struct snd_pcm_ops snd_u8500_alsa_hdmi_playback_ops = {
+ .open = snd_u8500_alsa_hdmi_open,
+ .close = snd_u8500_alsa_hdmi_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_u8500_alsa_hdmi_hw_params,
+ .hw_free = snd_u8500_alsa_hdmi_hw_free,
+ .prepare = snd_u8500_alsa_hdmi_prepare,
+ .trigger = snd_u8500_alsa_hdmi_trigger,
+ .pointer = snd_u8500_alsa_hdmi_pointer,
+};
+
+static struct snd_pcm_ops snd_u8500_alsa_hdmi_capture_ops = {
+ .open = snd_u8500_alsa_hdmi_open,
+ .close = snd_u8500_alsa_hdmi_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_u8500_alsa_hdmi_hw_params,
+ .hw_free = snd_u8500_alsa_hdmi_hw_free,
+ .prepare = snd_u8500_alsa_hdmi_prepare,
+ .trigger = snd_u8500_alsa_hdmi_trigger,
+ .pointer = snd_u8500_alsa_hdmi_pointer,
+};
+
+/* Hardware description , this structure (struct snd_pcm_hardware )
+ * contains the definitions of the fundamental hardware configuration.
+ * This configuration will be applied on the runtime structure
+ */
+static struct snd_pcm_hardware snd_u8500_hdmi_playback_hw = {
+ .info =
+ (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE),
+ .formats =
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = MIN_RATE_PLAYBACK,
+ .rate_max = MAX_RATE_PLAYBACK,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = NMDK_BUFFER_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = PAGE_SIZE,
+ .periods_min = NMDK_BUFFER_SIZE / PAGE_SIZE,
+ .periods_max = NMDK_BUFFER_SIZE / 128
+};
+
+static struct snd_pcm_hardware snd_u8500_hdmi_capture_hw = {
+ .info =
+ (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE),
+ .formats =
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = MIN_RATE_CAPTURE,
+ .rate_max = MAX_RATE_CAPTURE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = NMDK_BUFFER_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = PAGE_SIZE,
+ .periods_min = NMDK_BUFFER_SIZE / PAGE_SIZE,
+ .periods_max = NMDK_BUFFER_SIZE / 128
+};
+
+static struct snd_pcm_hw_constraint_list constraints_hdmi_rate = {
+ .count = sizeof(u8500_hdmi_rates) / sizeof(u8500_hdmi_rates[0]),
+ .list = u8500_hdmi_rates,
+ .mask = 0,
+};
+
+/**
+ * snd_card_u8500_alsa_hdmi_new - constructor for a new pcm cmponent
+ * @chip - pointer to chip specific data
+ * @device - specifies the card number
+ */
+int snd_card_u8500_alsa_hdmi_new(u8500_acodec_chip_t * chip, int device)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ if ((err =
+ snd_pcm_new(chip->card, "u8500_hdmi", device, 1, 1, &pcm)) < 0) {
+ stm_error(" : error in snd_pcm_new\n");
+ return err;
+ }
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_u8500_alsa_hdmi_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_u8500_alsa_hdmi_capture_ops);
+
+ pcm->private_data = chip;
+ pcm->info_flags = 0;
+ chip->pcm_hdmi = pcm;
+ strcpy(pcm->name, "u8500_alsa_hdmi");
+
+ u8500_audio_hdmi_init(pcm->private_data);
+ return 0;
+}
+
+/**
+* u8500_audio_hdmi_init
+* @chip - pointer to u8500_acodec_chip_t structure.
+*
+* This function intialises the u8500 chip structure with default values
+*/
+static void u8500_audio_hdmi_init(u8500_acodec_chip_t * chip)
+{
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ ptr_audio_stream =
+ &chip->stream[ALSA_HDMI_DEV][SNDRV_PCM_STREAM_PLAYBACK];
+ /* Setup DMA stuff */
+
+ strlcpy(ptr_audio_stream->id, "u8500 hdmi playback",
+ sizeof(ptr_audio_stream->id));
+
+ ptr_audio_stream->stream_id = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* default initialization for playback */
+ ptr_audio_stream->active = 0;
+ ptr_audio_stream->period = 0;
+ ptr_audio_stream->periods = 0;
+ ptr_audio_stream->old_offset = 0;
+
+ ptr_audio_stream =
+ &chip->stream[ALSA_HDMI_DEV][SNDRV_PCM_STREAM_CAPTURE];
+
+ strlcpy(ptr_audio_stream->id, "u8500 hdmi capture",
+ sizeof(ptr_audio_stream->id));
+
+ ptr_audio_stream->stream_id = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* default initialization for capture */
+ ptr_audio_stream->active = 0;
+ ptr_audio_stream->period = 0;
+ ptr_audio_stream->periods = 0;
+ ptr_audio_stream->old_offset = 0;
+
+}
+
+/**
+ * snd_u8500_alsa_hdmi_open
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This routine is used by alsa framework to open a pcm stream .
+ * Here a dma pipe is requested and device is configured(default).
+ */
+static int snd_u8500_alsa_hdmi_open(struct snd_pcm_substream *substream)
+{
+ int error = 0, stream_id, status = 0;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ stream_id = substream->pstr->stream;
+ error = u8500_acodec_setuser(USER_ALSA);
+ status = u8500_acodec_open(I2S_CLIENT_MSP2, stream_id);
+ if (status) {
+ printk("failed in getting open\n");
+ return (-1);
+ }
+
+ error = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_hdmi_rate);
+ if (error < 0) {
+ stm_error
+ (": error initializing hdmi hw sample rate constraint\n");
+ return error;
+ }
+
+ if ((error = configure_hdmi_rate(substream)))
+ return error;
+
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw = snd_u8500_hdmi_playback_hw;
+ } else {
+ runtime->hw = snd_u8500_hdmi_capture_hw;
+ }
+
+ ptr_audio_stream = &chip->stream[ALSA_HDMI_DEV][stream_id];
+
+ ptr_audio_stream->substream = substream;
+
+ stm_config_hw(chip, substream, ALSA_HDMI_DEV, stream_id);
+ sema_init(&(ptr_audio_stream->alsa_sem), 1);
+ init_completion(&(ptr_audio_stream->alsa_com));
+
+ ptr_audio_stream->state = ALSA_STATE_UNPAUSE;
+ return 0;
+}
+
+/**
+ * snd_u8500_alsa_hdmi_close
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This routine is used by alsa framework to close a pcm stream .
+ * Here a dma pipe is disabled and freed.
+ */
+
+static int snd_u8500_alsa_hdmi_close(struct snd_pcm_substream *substream)
+{
+ int stream_id, error = 0;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ audio_stream_t *ptr_audio_stream = NULL;
+
+ stream_id = substream->pstr->stream;
+ ptr_audio_stream = &chip->stream[ALSA_HDMI_DEV][stream_id];
+
+ stm_close_alsa(chip, ALSA_HDMI_DEV, stream_id);
+
+ /* reset the different variables to default */
+
+ ptr_audio_stream->active = 0;
+ ptr_audio_stream->period = 0;
+ ptr_audio_stream->periods = 0;
+ ptr_audio_stream->old_offset = 0;
+ ptr_audio_stream->substream = NULL;
+
+ /* Disable the MSP2 */
+ error = u8500_acodec_unsetuser(USER_ALSA);
+ u8500_acodec_close(I2S_CLIENT_MSP2, ACODEC_DISABLE_ALL);
+
+ return error;
+
+}
+
+/**
+ * snd_u8500_alsa_hdmi_hw_params
+ * @substream - pointer to the playback/capture substream structure
+ * @hw_params - specifies the hw parameters like format/no of channels etc
+ *
+ * This routine is used by alsa framework to allocate a dma buffer
+ * used to transfer the data from user space to kernel space
+ *
+ */
+static int snd_u8500_alsa_hdmi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+}
+
+/**
+ * snd_u8500_alsa_hdmi_hw_free
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This routine is used by alsa framework to deallocate a dma buffer
+ * allocated berfore by snd_u8500_alsa_pcm_hw_params
+ */
+static int snd_u8500_alsa_hdmi_hw_free(struct snd_pcm_substream *substream)
+{
+ stm_hw_free(substream);
+ return 0;
+}
+
+/**
+ * snd_u8500_alsa_hdmi_pointer
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This callback is called whene the pcm middle layer inquires the current
+ * hardware position on the buffer .The position is returned in frames
+ * ranged from 0 to buffer_size -1
+ */
+static snd_pcm_uframes_t snd_u8500_alsa_hdmi_pointer(struct snd_pcm_substream
+ *substream)
+{
+ unsigned int offset;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ audio_stream_t *stream =
+ &chip->stream[ALSA_HDMI_DEV][substream->pstr->stream];
+ struct snd_pcm_runtime *runtime = stream->substream->runtime;
+
+ offset = bytes_to_frames(runtime, stream->old_offset);
+ if (offset < 0 || stream->old_offset < 0)
+ stm_dbg(DBG_ST.alsa, " Offset=%i %i\n", offset,
+ stream->old_offset);
+
+ return offset;
+}
+
+/**
+ * snd_u8500_alsa_hdmi_prepare
+ * @substream - pointer to the playback/capture substream structure
+ *
+ * This callback is called whene the pcm is "prepared" Here is possible
+ * to set the format type ,sample rate ,etc.The callback is called as
+ * well everytime a recovery after an underrun happens.
+ */
+
+static int snd_u8500_alsa_hdmi_prepare(struct snd_pcm_substream *substream)
+{
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int error;
+
+ if (chip->hdmi_params.sampling_freq != runtime->rate
+ || chip->hdmi_params.channel_count != runtime->channels) {
+ stm_dbg(DBG_ST.alsa, " freq not same, %d %d\n",
+ chip->hdmi_params.sampling_freq, runtime->rate);
+ stm_dbg(DBG_ST.alsa, " channels not same, %d %d\n",
+ chip->hdmi_params.channel_count, runtime->channels);
+ if (chip->hdmi_params.channel_count != runtime->channels) {
+ chip->hdmi_params.channel_count = runtime->channels;
+ if ((error =
+ stm_config_hw(chip, substream, ALSA_HDMI_DEV,
+ -1))) {
+ stm_dbg(DBG_ST.alsa,
+ "In func %s, stm_config_hw fails",
+ __FUNCTION__);
+ return error;
+ }
+ }
+ chip->hdmi_params.sampling_freq = runtime->rate;
+ if ((error = configure_hdmi_rate(substream))) {
+ stm_dbg(DBG_ST.alsa, "In func %s, configure_rate fails",
+ __FUNCTION__);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * snd_u8500_alsa_hdmi_trigger
+ * @substream - pointer to the playback/capture substream structure
+ * @cmd - specifies the command : start/stop/pause/resume
+ *
+ * This callback is called whene the pcm is started ,stopped or paused
+ * The action is specified in the second argument, SND_PCM_TRIGGER_XXX in
+ * <sound/pcm.h>.
+ * This callback is atomic and the interrupts are disabled , so you can't
+ * call other functions that need interrupts without possible risks
+ */
+static int snd_u8500_alsa_hdmi_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ int stream_id = substream->pstr->stream;
+ audio_stream_t *stream = NULL;
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+ int error = 0;
+
+ stream = &chip->stream[ALSA_HDMI_DEV][stream_id];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ /* Start the pcm engine */
+ stm_dbg(DBG_ST.alsa, " TRIGGER START\n");
+ if (stream->active == 0) {
+ stream->active = 1;
+ stm_trigger_hdmi(stream);
+ break;
+ }
+ stm_error(": H/w is busy\n");
+ return -EINVAL;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ stm_dbg(DBG_ST.alsa, " SNDRV_PCM_TRIGGER_PAUSE_PUSH\n");
+ if (stream->active == 1) {
+ stm_pause_hdmi(stream);
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ stm_dbg(DBG_ST.alsa, " SNDRV_PCM_TRIGGER_PAUSE_RELEASE\n");
+ if (stream->active == 1)
+ stm_unpause_hdmi(stream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* Stop the pcm engine */
+ stm_dbg(DBG_ST.alsa, " TRIGGER STOP\n");
+ if (stream->active == 1)
+ stm_stop_hdmi(stream);
+ break;
+ default:
+ stm_error(": invalid command in pcm trigger\n");
+ return -EINVAL;
+ }
+
+ return error;
+
+}
+
+struct snd_kcontrol_new u8500_hdmi_power_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .device = 1,
+ .subdevice = 0,
+ .name = "HDMI Power",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = 0xfff,
+ .info = u8500_hdmi_power_ctrl_info,
+ .get = u8500_hdmi_power_ctrl_get,
+ .put = u8500_hdmi_power_ctrl_put
+};
+
+/**
+* u8500_hdmi_power_ctrl_info
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions fills playback device info into user structure.
+*/
+static int u8500_hdmi_power_ctrl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->value.enumerated.items = NUMBER_POWER_STATE;
+ uinfo->count = 1;
+ if (uinfo->value.enumerated.item >= NUMBER_POWER_STATE)
+ uinfo->value.enumerated.item = NUMBER_POWER_STATE - 1;
+ strcpy(uinfo->value.enumerated.name,
+ power_state_in_texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+
+/**
+* u8500_hdmi_power_ctrl_get
+* @kcontrol - pointer to the snd_kcontrol structure
+* @uinfo - pointer to the snd_ctl_elem_info structure, this is filled by the function
+*
+* This functions returns the current playback device selected.
+*/
+static int u8500_hdmi_power_ctrl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ uinfo->value.enumerated.item[0] = 0;
+ return 0;
+}
+
+/**
+* u8500_hdmi_power_ctrl_put
+* @kcontrol - pointer to the snd_kcontrol structure
+* @ .
+*
+* This functions sets the playback device.
+*/
+static int u8500_hdmi_power_ctrl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uinfo)
+{
+ u8500_acodec_chip_t *chip =
+ (u8500_acodec_chip_t *) snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+ t_ab8500_codec_error error;
+ t_u8500_bool_state power_state;
+
+ power_state = uinfo->value.enumerated.item[0];
+
+ changed = 1;
+
+ return changed;
+}
+
+int u8500_register_alsa_hdmi_controls(struct snd_card *card,
+ u8500_acodec_chip_t * u8500_chip)
+{
+ int error;
+
+ if ((error =
+ snd_ctl_add(card,
+ snd_ctl_new1(&u8500_hdmi_power_ctrl,
+ u8500_chip))) < 0) {
+ stm_error
+ (": error initializing u8500_hdmi_power_ctrl interface \n\n");
+ return (-1);
+ }
+
+ return 0;
+}
+
+/**
+* configure_hdmi_rate
+* @substream - pointer to the playback/capture substream structure
+*
+* This functions configures audio codec in to stream frequency frequency
+*/
+static int configure_hdmi_rate(struct snd_pcm_substream *substream)
+{
+ t_hdmi_sample_freq hdmi_sampling_freq;
+
+ u8500_acodec_chip_t *chip = snd_pcm_substream_chip(substream);
+
+ switch (chip->hdmi_params.sampling_freq) {
+ case 32000:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_32KHZ;
+ break;
+ case 44100:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_44_1KHZ;
+ break;
+ case 48000:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_48KHZ;
+ break;
+ case 64000:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_64KHZ;
+ break;
+ case 88200:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_88_2KHZ;
+ break;
+ case 96000:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_96KHZ;
+ break;
+ case 128000:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_128KHZ;
+ break;
+ case 176100:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_176_1KHZ;
+ break;
+ case 192000:
+ hdmi_sampling_freq = HDMI_SAMPLING_FREQ_192KHZ;
+ default:
+ stm_error("not supported frequnecy\n");
+ return -EINVAL;
+ }
+
+ configure_msp_hdmi(hdmi_sampling_freq, chip->hdmi_params.channel_count);
+
+ return 0;
+
+}
+
+static int configure_msp_hdmi(int sampling_freq, int channel_count)
+{
+ struct i2s_device *i2s_dev = i2sdrv[I2S_CLIENT_MSP2]->i2s;
+ struct msp_config msp_config;
+ t_ab8500_codec_error error_status = AB8500_CODEC_OK;
+
+ memset(&msp_config, 0, sizeof(msp_config));
+
+
+ if (i2sdrv[I2S_CLIENT_MSP2]->flag) {
+ stm_dbg(DBG_ST.acodec, " I2S controller not available\n");
+ return -1;
+ }
+
+ /* MSP configuration */
+
+ msp_config.tx_clock_sel = 0;
+ msp_config.rx_clock_sel = 0;
+
+ msp_config.tx_frame_sync_sel = 0;
+ msp_config.rx_frame_sync_sel = 0;
+
+ msp_config.input_clock_freq = MSP_INPUT_FREQ_48MHZ;
+ msp_config.srg_clock_sel = 0;
+
+ msp_config.rx_frame_sync_pol = RX_FIFO_SYNC_HI;
+ msp_config.tx_frame_sync_pol = TX_FIFO_SYNC_HI;
+
+ msp_config.rx_fifo_config = 0;
+ msp_config.tx_fifo_config = TX_FIFO_ENABLE;
+
+ msp_config.spi_clk_mode = SPI_CLK_MODE_NORMAL;
+ msp_config.spi_burst_mode = 0;
+ msp_config.tx_data_enable = 0;
+ msp_config.loopback_enable = 0;
+ msp_config.default_protocol_desc = 1;
+ msp_config.direction = MSP_TRANSMIT_MODE;
+ msp_config.protocol = MSP_I2S_PROTOCOL;
+ msp_config.frame_size = ELEMENT_SIZE;
+ msp_config.frame_freq = sampling_freq;
+ msp_config.def_elem_len = 0;
+ /* enable msp for both tr and rx mode with dma data transfer.
+ THIS IS NOW DONE SEPARATELY from SAA. */
+ msp_config.data_size = MSP_DATA_SIZE_16BIT;
+
+#ifdef CONFIG_U8500_ACODEC_DMA
+ msp_config.work_mode = MSP_DMA_MODE;
+#elif defined(CONFIG_U8500_ACODEC_POLL)
+ msp_config.work_mode = MSP_POLLING_MODE;
+#else
+ msp_config.work_mode = MSP_INTERRUPT_MODE;
+#endif
+ msp_config.default_protocol_desc = 0;
+
+ msp_config.protocol_desc.rx_phase_mode = MSP_DUAL_PHASE;
+ msp_config.protocol_desc.tx_phase_mode = MSP_DUAL_PHASE;
+ msp_config.protocol_desc.rx_phase2_start_mode =
+ MSP_PHASE2_START_MODE_FRAME_SYNC;
+ msp_config.protocol_desc.tx_phase2_start_mode =
+ MSP_PHASE2_START_MODE_FRAME_SYNC;
+ msp_config.protocol_desc.rx_bit_transfer_format = MSP_BTF_MS_BIT_FIRST;
+ msp_config.protocol_desc.tx_bit_transfer_format = MSP_BTF_MS_BIT_FIRST;
+ msp_config.protocol_desc.rx_frame_length_1 = MSP_FRAME_LENGTH_1;
+ msp_config.protocol_desc.rx_frame_length_2 = MSP_FRAME_LENGTH_1;
+ msp_config.protocol_desc.tx_frame_length_1 = MSP_FRAME_LENGTH_1;
+ msp_config.protocol_desc.tx_frame_length_2 = MSP_FRAME_LENGTH_1;
+ msp_config.protocol_desc.rx_element_length_1 = MSP_ELEM_LENGTH_16;
+ msp_config.protocol_desc.rx_element_length_2 = MSP_ELEM_LENGTH_16;
+ msp_config.protocol_desc.tx_element_length_1 = MSP_ELEM_LENGTH_16;
+ msp_config.protocol_desc.tx_element_length_2 = MSP_ELEM_LENGTH_16;
+ msp_config.protocol_desc.rx_data_delay = MSP_DELAY_1;
+ msp_config.protocol_desc.tx_data_delay = MSP_DELAY_1;
+ msp_config.protocol_desc.rx_clock_pol = MSP_RISING_EDGE;
+ msp_config.protocol_desc.tx_clock_pol = 0;
+ msp_config.protocol_desc.rx_frame_sync_pol =
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH;
+ msp_config.protocol_desc.tx_frame_sync_pol =
+ MSP_FRAME_SYNC_POL_ACTIVE_HIGH;
+ msp_config.protocol_desc.rx_half_word_swap = MSP_HWS_NO_SWAP;
+ msp_config.protocol_desc.tx_half_word_swap = MSP_HWS_NO_SWAP;
+ msp_config.protocol_desc.compression_mode = MSP_COMPRESS_MODE_LINEAR;
+ msp_config.protocol_desc.expansion_mode = MSP_EXPAND_MODE_LINEAR;
+ msp_config.protocol_desc.spi_clk_mode = MSP_SPI_CLOCK_MODE_NON_SPI;
+ msp_config.protocol_desc.spi_burst_mode = MSP_SPI_BURST_MODE_DISABLE;
+ msp_config.protocol_desc.frame_period = 63;
+ msp_config.protocol_desc.frame_width = 31;
+ msp_config.protocol_desc.total_clocks_for_one_frame = 64;
+ msp_config.multichannel_configured = 0;
+ msp_config.multichannel_config.tx_multichannel_enable = 0;
+ /* Channel 1 and channel 3 */
+ msp_config.multichannel_config.tx_channel_0_enable = 0x0000005;
+ msp_config.multichannel_config.tx_channel_1_enable = 0x0000000;
+ msp_config.multichannel_config.tx_channel_2_enable = 0x0000000;
+ msp_config.multichannel_config.tx_channel_3_enable = 0x0000000;
+ error_status = i2s_setup(i2s_dev->controller, &msp_config);
+
+#ifdef CONFIG_DEBUG
+ {
+ dump_msp2_registers();
+ }
+#endif
+
+ if (error_status < 0) {
+ printk("error in msp enable, error_status is %d\n",
+ error_status);
+ return error_status;
+ }
+
+ return 0;
+
+}
+
+#ifdef CONFIG_U8500_ACODEC_DMA
+/**
+ * u8500_alsa_hdmi_dma_start - used to transmit or recive a dma chunk
+ * @stream - specifies the playback/record stream structure
+ */
+static void u8500_alsa_hdmi_dma_start(audio_stream_t * stream)
+{
+ unsigned int offset, dma_size, stream_id;
+
+ struct snd_pcm_substream *substream = stream->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ stream_id = substream->pstr->stream;
+
+ dma_size = frames_to_bytes(runtime, runtime->period_size);
+ offset = dma_size * stream->period;
+ stream->old_offset = offset;
+
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_send_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_send_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+ else
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_receive_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_receive_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+
+ stm_dbg(DBG_ST.alsa, " DMA Transfer started\n");
+ stm_dbg(DBG_ST.alsa, " address = %x size=%d\n",
+ (runtime->dma_addr + offset), dma_size);
+
+ stream->period++;
+ stream->period %= runtime->periods;
+ stream->periods++;
+
+
+}
+
+#else
+
+/**
+* acodec_feeding_thread
+* @stream - pointer to the playback/capture audio_stream_t structure
+*
+* This function creates a kernel thread .
+*/
+
+static int spawn_hdmi_feeding_thread(audio_stream_t * stream)
+{
+ pid_t pid;
+
+ pid =
+ kernel_thread(hdmi_feeding_thread, stream,
+ CLONE_FS | CLONE_SIGHAND);
+
+ return 0;
+}
+
+/**
+* hdmi_feeding_thread
+* @data - void pointer to the playback/capture audio_stream_t structure
+*
+* This thread sends/receive data to MSP while stream is active
+*/
+static int hdmi_feeding_thread(void *data)
+{
+ audio_stream_t *stream = (audio_stream_t *) data;
+
+ daemonize("hdmi_feeding_thread");
+ allow_signal(SIGKILL);
+ down(&stream->alsa_sem);
+
+ while ((!signal_pending(current)) && (stream->active)) {
+ if (stream->state == ALSA_STATE_PAUSE)
+ wait_for_completion(&(stream->alsa_com));
+
+ u8500_hdmi_pio_start(stream);
+ if (stream->substream)
+ snd_pcm_period_elapsed(stream->substream);
+ }
+
+ up(&stream->alsa_sem);
+
+ return 0;
+}
+
+/**
+* u8500_hdmi_pio_start
+* @stream - pointer to the playback/capture audio_stream_t structure
+*
+* This function sends/receive one chunck of stream data to/from MSP
+*/
+static void u8500_hdmi_pio_start(audio_stream_t * stream)
+{
+ unsigned int offset, dma_size, stream_id;
+ struct snd_pcm_substream *substream = stream->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ stream_id = substream->pstr->stream;
+
+ dma_size = frames_to_bytes(runtime, runtime->period_size);
+ offset = dma_size * stream->period;
+ stream->old_offset = offset;
+
+ stm_dbg(DBG_ST.alsa, " Transfer started\n");
+ stm_dbg(DBG_ST.alsa, " address = %x size=%d\n",
+ (runtime->dma_addr + offset), dma_size);
+
+ /* Send our stuff */
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_send_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_send_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+ else
+#ifdef CONFIG_U8500_ACODEC_DMA
+ u8500_acodec_receive_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_addr + offset),
+ dma_size, 1);
+#else
+ u8500_acodec_receive_data(I2S_CLIENT_MSP2,
+ (void *)(runtime->dma_area + offset),
+ dma_size, 0);
+#endif
+
+ stream->period++;
+ stream->period %= runtime->periods;
+ stream->periods++;
+}
+#endif
+
+void dump_msp2_registers()
+{
+ int i;
+
+ stm_dbg(DBG_ST.acodec, "\nMSP_2 base add = 0x%x\n",
+ (unsigned int)U8500_MSP2_BASE);
+
+ for (i = 0; i < 0x40; i += 4)
+ stm_dbg(DBG_ST.acodec, "msp[0x%x]=0x%x\n", i,
+ readl((char *)(IO_ADDRESS(U8500_MSP2_BASE) + i)));
+
+ return 0;
+}
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 3420bd3da5d..cfadbb6a879 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -649,6 +649,8 @@ int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
}
} else if (!i->openmin && !i->openmax && i->min == i->max)
i->integer = 1;
+ if (i->max < i->min)
+ i->max = i->min;
if (snd_interval_checkempty(i)) {
snd_interval_none(i);
return -EINVAL;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 35e662d270e..e2054fa16e4 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -13,7 +13,7 @@ menuconfig SND_SOC
If you want ASoC support, you should say Y here and also to the
specific driver for your SoC platform below.
-
+
ASoC provides power efficient ALSA support for embedded battery powered
SoC based systems like PDA's, Phones and Personal Media Players.
@@ -45,6 +45,7 @@ source "sound/soc/s6000/Kconfig"
source "sound/soc/sh/Kconfig"
source "sound/soc/tegra/Kconfig"
source "sound/soc/txx9/Kconfig"
+source "sound/soc/ux500/Kconfig"
# Supported codecs
source "sound/soc/codecs/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 9ea8ac827ad..c5d3966cf03 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_SND_SOC) += s6000/
obj-$(CONFIG_SND_SOC) += sh/
obj-$(CONFIG_SND_SOC) += tegra/
obj-$(CONFIG_SND_SOC) += txx9/
+obj-$(CONFIG_SND_SOC) += ux500/
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 7c205e77d83..efc806a205e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -105,7 +105,12 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM9705 if SND_SOC_AC97_BUS
select SND_SOC_WM9712 if SND_SOC_AC97_BUS
select SND_SOC_WM9713 if SND_SOC_AC97_BUS
- help
+ select SND_SOC_AB3550
+ select SND_SOC_AB5500
+ select SND_SOC_AB8500
+ select SND_SOC_CG29XX
+ select SND_SOC_AV8100
+ help
Normally ASoC codec drivers are only built if a machine driver which
uses them is also built since they are only usable with a machine
driver. Selecting this option will allow these drivers to be built
@@ -421,6 +426,21 @@ config SND_SOC_WM9712
config SND_SOC_WM9713
tristate
+config SND_SOC_AB3550
+ tristate
+
+config SND_SOC_AB5500
+ tristate
+
+config SND_SOC_AB8500
+ tristate
+
+config SND_SOC_CG29XX
+ tristate
+
+config SND_SOC_AV8100
+ tristate
+
# Amp
config SND_SOC_LM4857
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index de8078178f8..c8b8fd3f8c5 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -1,4 +1,7 @@
snd-soc-88pm860x-objs := 88pm860x-codec.o
+snd-soc-ab3550-objs := ab3550.o
+snd-soc-ab5500-objs := ab5500.o
+snd-soc-ab8500_audio-objs := ab8500_audio.o
snd-soc-ac97-objs := ac97.o
snd-soc-ad1836-objs := ad1836.o
snd-soc-ad193x-objs := ad193x.o
@@ -13,6 +16,8 @@ snd-soc-ak4535-objs := ak4535.o
snd-soc-ak4641-objs := ak4641.o
snd-soc-ak4642-objs := ak4642.o
snd-soc-ak4671-objs := ak4671.o
+snd-soc-av8100_audio-objs := av8100_audio.o
+snd-soc-cg29xx-objs := cg29xx.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l73-objs := cs42l73.o
@@ -100,6 +105,9 @@ snd-soc-wm-hubs-objs := wm_hubs.o
snd-soc-max9877-objs := max9877.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
+obj-$(CONFIG_SND_SOC_AB3550) += snd-soc-ab3550.o
+obj-$(CONFIG_SND_SOC_AB5500) += snd-soc-ab5500.o
+obj-$(CONFIG_SND_SOC_AB8500) += snd-soc-ab8500_audio.o
obj-$(CONFIG_SND_SOC_88PM860X) += snd-soc-88pm860x.o
obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o
obj-$(CONFIG_SND_SOC_AD1836) += snd-soc-ad1836.o
@@ -117,6 +125,8 @@ obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o
obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
obj-$(CONFIG_SND_SOC_ALC5632) += snd-soc-alc5632.o
+obj-$(CONFIG_SND_SOC_AV8100) += snd-soc-av8100_audio.o
+obj-$(CONFIG_SND_SOC_CG29XX) += snd-soc-cg29xx.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L73) += snd-soc-cs42l73.o
@@ -201,3 +211,9 @@ obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
# Amp
obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
+ifdef CONFIG_SND_SOC_UX500_DEBUG
+CFLAGS_av8100_audio.o := -DDEBUG
+CFLAGS_ab3550.o := -DDEBUG
+CFLAGS_cg29xx.o := -DDEBUG
+CFLAGS_ab8500_audio.o := -DDEBUG
+endif
diff --git a/sound/soc/codecs/ab3550.c b/sound/soc/codecs/ab3550.c
new file mode 100644
index 00000000000..4a15ab64d32
--- /dev/null
+++ b/sound/soc/codecs/ab3550.c
@@ -0,0 +1,1429 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Xie Xiaolei <xie.xiaolei@etericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>,
+ * Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/mfd/abx500.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <asm/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <stdarg.h>
+#include "ab3550.h"
+
+
+#define I2C_BANK 0
+
+/* codec private data */
+struct ab3550_codec_dai_data {
+};
+
+static struct device *ab3550_dev;
+
+static u8 virtual_regs[] = {
+ 0, 0
+};
+
+static void set_reg(u8 reg, u8 val)
+{
+ if (!ab3550_dev) {
+ pr_err("%s: The AB3550 codec driver not initialized.\n",
+ __func__);
+ return;
+ }
+ if (reg < AB3550_FIRST_REG)
+ return;
+ else if (reg <= AB3550_LAST_REG) {
+ abx500_set_register_interruptible(
+ ab3550_dev, I2C_BANK, reg, val);
+ } else if (reg - AB3550_LAST_REG - 1 < ARRAY_SIZE(virtual_regs)) {
+ virtual_regs[reg - AB3550_LAST_REG - 1] = val;
+ }
+}
+
+static void mask_set_reg(u8 reg, u8 mask, u8 val)
+{
+ if (!ab3550_dev) {
+ pr_err("%s: The AB3550 codec driver not initialized.\n",
+ __func__);
+ return;
+ }
+ if (reg < AB3550_FIRST_REG)
+ return;
+ else if (reg <= AB3550_LAST_REG) {
+ abx500_mask_and_set_register_interruptible(
+ ab3550_dev, I2C_BANK, reg, mask, val);
+ } else if (reg - AB3550_LAST_REG - 1 < ARRAY_SIZE(virtual_regs)) {
+ virtual_regs[reg - AB3550_LAST_REG - 1] &= ~mask;
+ virtual_regs[reg - AB3550_LAST_REG - 1] |= val & mask;
+ }
+}
+
+static u8 read_reg(u8 reg)
+{
+ if (!ab3550_dev) {
+ pr_err("%s: The AB3550 codec driver not initialized.\n",
+ __func__);
+ return 0;
+ }
+ if (reg < AB3550_FIRST_REG)
+ return 0;
+ else if (reg <= AB3550_LAST_REG) {
+ u8 val;
+ abx500_get_register_interruptible(
+ ab3550_dev, I2C_BANK, reg, &val);
+ return val;
+ } else if (reg - AB3550_LAST_REG - 1 < ARRAY_SIZE(virtual_regs))
+ return virtual_regs[reg - AB3550_LAST_REG - 1];
+ dev_warn(ab3550_dev, "%s: out-of-scope reigster %u.\n",
+ __func__, reg);
+ return 0;
+}
+
+/* Components that can be powered up/down */
+enum enum_widget {
+ widget_ear = 0,
+ widget_auxo1,
+ widget_auxo2,
+
+ widget_spkr,
+ widget_line1,
+ widget_line2,
+
+ widget_dac1,
+ widget_dac2,
+ widget_dac3,
+
+ widget_rx1,
+ widget_rx2,
+ widget_rx3,
+
+ widget_mic1,
+ widget_mic2,
+
+ widget_micbias1,
+ widget_micbias2,
+
+ widget_apga1,
+ widget_apga2,
+
+ widget_tx1,
+ widget_tx2,
+
+ widget_adc1,
+ widget_adc2,
+
+ widget_if0_dld_l,
+ widget_if0_dld_r,
+ widget_if0_uld_l,
+ widget_if0_uld_r,
+ widget_if1_dld_l,
+ widget_if1_dld_r,
+ widget_if1_uld_l,
+ widget_if1_uld_r,
+
+ widget_mic1p1,
+ widget_mic1n1,
+ widget_mic1p2,
+ widget_mic1n2,
+
+ widget_mic2p1,
+ widget_mic2n1,
+ widget_mic2p2,
+ widget_mic2n2,
+
+ widget_clock,
+
+ number_of_widgets
+};
+
+/* This is only meant for debugging */
+static const char *widget_names[] = {
+ "EAR", "AUXO1", "AUXO2", "SPKR", "LINE1", "LINE2",
+ "DAC1", "DAC2", "DAC3",
+ "RX1", "RX2", "RX3",
+ "MIC1", "MIC2",
+ "MIC-BIAS1", "MIC-BIAS2",
+ "APGA1", "APGA2",
+ "TX1", "TX2",
+ "ADC1", "ADC2",
+ "IF0-DLD-L", "IF0-DLD-R", "IF0-ULD-L", "IF0-ULD-R",
+ "IF1-DLD-L", "IF1-DLD-R", "IF1-ULD-L", "IF1-ULD-R",
+ "MIC1P1", "MIC1N1", "MIC1P2", "MIC1N2",
+ "MIC2P1", "MIC2N1", "MIC2P2", "MIC2N2",
+ "CLOCK"
+};
+
+struct widget_pm {
+ enum enum_widget widget;
+ u8 reg;
+ u8 shift;
+
+ unsigned long source_list[BIT_WORD(number_of_widgets) + 1];
+ unsigned long sink_list[BIT_WORD(number_of_widgets) + 1];
+};
+
+static struct widget_pm widget_pm_array[] = {
+ {.widget = widget_ear, .reg = EAR, .shift = EAR_PWR_SHIFT},
+ {.widget = widget_auxo1, .reg = AUXO1, .shift = AUXOx_PWR_SHIFT},
+ {.widget = widget_auxo2, .reg = AUXO2, .shift = AUXOx_PWR_SHIFT},
+ {.widget = widget_spkr, .reg = SPKR, .shift = SPKR_PWR_SHIFT},
+ {.widget = widget_line1, .reg = LINE1, .shift = LINEx_PWR_SHIFT},
+ {.widget = widget_line2, .reg = LINE2, .shift = LINEx_PWR_SHIFT},
+
+ {.widget = widget_dac1, .reg = RX1, .shift = DACx_PWR_SHIFT},
+ {.widget = widget_dac2, .reg = RX2, .shift = DACx_PWR_SHIFT},
+ {.widget = widget_dac3, .reg = RX3, .shift = DACx_PWR_SHIFT},
+
+ {.widget = widget_rx1, .reg = RX1, .shift = RXx_PWR_SHIFT},
+ {.widget = widget_rx2, .reg = RX2, .shift = RXx_PWR_SHIFT},
+ {.widget = widget_rx3, .reg = RX3, .shift = RXx_PWR_SHIFT},
+
+ {.widget = widget_mic1, .reg = MIC1_GAIN, .shift = MICx_PWR_SHIFT},
+ {.widget = widget_mic2, .reg = MIC2_GAIN, .shift = MICx_PWR_SHIFT},
+
+ {.widget = widget_micbias1, .reg = MIC_BIAS1,
+ .shift = MBIAS_PWR_SHIFT},
+ {.widget = widget_micbias2, .reg = MIC_BIAS2,
+ .shift = MBIAS_PWR_SHIFT},
+
+ {.widget = widget_apga1, .reg = ANALOG_LOOP_PGA1,
+ .shift = APGAx_PWR_SHIFT},
+ {.widget = widget_apga2, .reg = ANALOG_LOOP_PGA2,
+ .shift = APGAx_PWR_SHIFT},
+
+ {.widget = widget_tx1, .reg = TX1, .shift = TXx_PWR_SHIFT},
+ {.widget = widget_tx2, .reg = TX2, .shift = TXx_PWR_SHIFT},
+
+ {.widget = widget_adc1, .reg = TX1, .shift = ADCx_PWR_SHIFT},
+ {.widget = widget_adc2, .reg = TX2, .shift = ADCx_PWR_SHIFT},
+
+ {.widget = widget_if0_dld_l, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF0_DLD_L_PW_SHIFT},
+ {.widget = widget_if0_dld_r, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF0_DLD_R_PW_SHIFT},
+ {.widget = widget_if0_uld_l, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF0_ULD_L_PW_SHIFT},
+ {.widget = widget_if0_uld_r, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF0_ULD_R_PW_SHIFT},
+
+ {.widget = widget_if1_dld_l, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF1_DLD_L_PW_SHIFT},
+ {.widget = widget_if1_dld_r, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF1_DLD_R_PW_SHIFT},
+ {.widget = widget_if1_uld_l, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF1_ULD_L_PW_SHIFT},
+ {.widget = widget_if1_uld_r, .reg = AB3550_VIRTUAL_REG1,
+ .shift = IF1_ULD_R_PW_SHIFT},
+
+ {.widget = widget_mic1p1, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC1P1_PW_SHIFT},
+ {.widget = widget_mic1n1, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC1N1_PW_SHIFT},
+ {.widget = widget_mic1p2, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC1P2_PW_SHIFT},
+ {.widget = widget_mic1n2, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC1N2_PW_SHIFT},
+
+ {.widget = widget_mic2p1, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC2P1_PW_SHIFT},
+ {.widget = widget_mic2n1, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC2N1_PW_SHIFT},
+ {.widget = widget_mic2p2, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC2P2_PW_SHIFT},
+ {.widget = widget_mic2n2, .reg = AB3550_VIRTUAL_REG2,
+ .shift = MIC2N2_PW_SHIFT},
+
+ {.widget = widget_clock, .reg = CLOCK, .shift = CLOCK_ENABLE_SHIFT},
+};
+
+DEFINE_MUTEX(ab3550_pm_mutex);
+
+static struct {
+ enum enum_widget stack[number_of_widgets];
+ int p;
+} pm_stack;
+
+struct ab3550_dai_private {
+ unsigned int fmt;
+};
+
+#define pm_stack_as_bitmap ({ \
+ unsigned long bitmap[BIT_WORD(number_of_widgets) + 1]; \
+ int i; \
+ memset(bitmap, 0, sizeof(bitmap)); \
+ for (i = 0; i < pm_stack.p; i++) { \
+ set_bit(pm_stack.stack[i], bitmap); \
+ } \
+ bitmap; \
+ })
+
+/* These are only meant to meet the obligations of DAPM */
+static const struct snd_soc_dapm_widget ab3550_dapm_widgets[] = {
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+};
+
+
+static const char *enum_rx2_select[] = {"I2S0", "I2S1"};
+static const char *enum_i2s_input_select[] = {
+ "tri-state", "MIC1", "MIC2", "mute"
+};
+static const char *enum_apga1_source[] = {"LINEIN1", "MIC1", "MIC2"};
+static const char *enum_apga2_source[] = {"LINEIN2", "MIC1", "MIC2"};
+static const char *enum_dac_side_tone[] = {"TX1", "TX2"};
+static const char *enum_dac_power_mode[] = {"100%", "75%", "55%"};
+static const char *enum_ear_power_mode[] = {"100%", "70%"};
+static const char *enum_auxo_power_mode[] = {
+ "100%", "67%", "50%", "25%", "auto"
+};
+static const char *enum_onoff[] = {"Off", "On"};
+static const char *enum_mbias_hiz_option[] = {"GND", "HiZ"};
+static const char *enum_mbias2_output_voltage[] = {"2.0v", "2.2v"};
+static const char *enum_mic_input_impedance[] = {
+ "12.5 kohm", "25 kohm", "50 kohm"
+};
+static const char *enum_hp_filter[] = {"HP3", "HP1", "bypass"};
+static const char *enum_i2s_word_length[] = {"16 bits", "24 bits"};
+static const char *enum_i2s_mode[] = {"Master Mode", "Slave Mode"};
+static const char *enum_i2s_tristate[] = {"Normal", "Tri-state"};
+static const char *enum_optional_resistor[] = {"disconnected", "connected"};
+static const char *enum_i2s_sample_rate[] = {
+ "8 kHz", "16 kHz", "44.1 kHz", "48 kHz"
+};
+static const char *enum_signal_inversion[] = {"normal", "inverted"};
+
+/* RX2 Select */
+static struct soc_enum soc_enum_rx2_select =
+ SOC_ENUM_SINGLE(RX2, 4, ARRAY_SIZE(enum_rx2_select), enum_rx2_select);
+
+/* I2S0 Input Select */
+static struct soc_enum soc_enum_i2s0_input_select =
+ SOC_ENUM_DOUBLE(INTERFACE0_DATA, 0, 2,
+ ARRAY_SIZE(enum_i2s_input_select),
+ enum_i2s_input_select);
+/* I2S1 Input Select */
+static struct soc_enum soc_enum_i2s1_input_select =
+ SOC_ENUM_DOUBLE(INTERFACE1_DATA, 0, 2,
+ ARRAY_SIZE(enum_i2s_input_select),
+ enum_i2s_input_select);
+
+/* APGA1 Source */
+static struct soc_enum soc_enum_apga1_source =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA1, APGAx_MUX_SHIFT,
+ ARRAY_SIZE(enum_apga1_source), enum_apga1_source);
+
+/* APGA2 Source */
+static struct soc_enum soc_enum_apga2_source =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA2, APGAx_MUX_SHIFT,
+ ARRAY_SIZE(enum_apga2_source), enum_apga2_source);
+
+static struct soc_enum soc_enum_apga1_enable =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA1, APGAx_PWR_SHIFT,
+ ARRAY_SIZE(enum_onoff), enum_onoff);
+
+static struct soc_enum soc_enum_apga2_enable =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA2, APGAx_PWR_SHIFT,
+ ARRAY_SIZE(enum_onoff), enum_onoff);
+
+/* DAC1 Side Tone */
+static struct soc_enum soc_enum_dac1_side_tone =
+ SOC_ENUM_SINGLE(SIDETONE1_PGA, STx_MUX_SHIFT,
+ ARRAY_SIZE(enum_dac_side_tone), enum_dac_side_tone);
+
+/* DAC2 Side Tone */
+static struct soc_enum soc_enum_dac2_side_tone =
+ SOC_ENUM_SINGLE(SIDETONE2_PGA, STx_MUX_SHIFT,
+ ARRAY_SIZE(enum_dac_side_tone), enum_dac_side_tone);
+
+/* DAC1 Power Mode */
+static struct soc_enum soc_enum_dac1_power_mode =
+ SOC_ENUM_SINGLE(RX1, DACx_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_dac_power_mode), enum_dac_power_mode);
+
+/* DAC2 Power Mode */
+static struct soc_enum soc_enum_dac2_power_mode =
+ SOC_ENUM_SINGLE(RX2, DACx_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_dac_power_mode), enum_dac_power_mode);
+
+/* DAC3 Power Mode */
+static struct soc_enum soc_enum_dac3_power_mode =
+ SOC_ENUM_SINGLE(RX3, DACx_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_dac_power_mode), enum_dac_power_mode);
+
+/* EAR Power Mode */
+static struct soc_enum soc_enum_ear_power_mode =
+ SOC_ENUM_SINGLE(EAR, EAR_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_ear_power_mode), enum_ear_power_mode);
+
+/* AUXO Power Mode */
+static struct soc_enum soc_enum_auxo_power_mode =
+ SOC_ENUM_SINGLE(AUXO_PWR_MODE, AUXO_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_auxo_power_mode),
+ enum_auxo_power_mode);
+
+/* MBIAS1 HiZ Option */
+static struct soc_enum soc_enum_mbias1_hiz_option =
+ SOC_ENUM_SINGLE(MIC_BIAS1, MBIAS_PDN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mbias_hiz_option),
+ enum_mbias_hiz_option);
+
+/* MBIAS1 HiZ Option */
+static struct soc_enum soc_enum_mbias2_hiz_option =
+ SOC_ENUM_SINGLE(MIC_BIAS2, MBIAS_PDN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mbias_hiz_option),
+ enum_mbias_hiz_option);
+
+/* MBIAS2 Output voltage */
+static struct soc_enum soc_enum_mbias2_output_voltage =
+ SOC_ENUM_SINGLE(MIC_BIAS2, MBIAS2_OUT_V_SHIFT,
+ ARRAY_SIZE(enum_mbias2_output_voltage),
+ enum_mbias2_output_voltage);
+
+static struct soc_enum soc_enum_mbias2_internal_resistor =
+ SOC_ENUM_SINGLE(MIC_BIAS2_VAD, MBIAS2_R_INT_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_mic1_input_impedance =
+ SOC_ENUM_SINGLE(MIC1_GAIN, MICx_IN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mic_input_impedance),
+ enum_mic_input_impedance);
+
+static struct soc_enum soc_enum_mic2_input_impedance =
+ SOC_ENUM_SINGLE(MIC2_GAIN, MICx_IN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mic_input_impedance),
+ enum_mic_input_impedance);
+
+static struct soc_enum soc_enum_tx1_hp_filter =
+ SOC_ENUM_SINGLE(TX1, TXx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_tx2_hp_filter =
+ SOC_ENUM_SINGLE(TX2, TXx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_st1_hp_filter =
+ SOC_ENUM_SINGLE(SIDETONE1_PGA, STx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_st2_hp_filter =
+ SOC_ENUM_SINGLE(SIDETONE2_PGA, STx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_i2s0_word_length =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_WORDLENGTH_SHIFT,
+ ARRAY_SIZE(enum_i2s_word_length),
+ enum_i2s_word_length);
+
+static struct soc_enum soc_enum_i2s1_word_length =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_WORDLENGTH_SHIFT,
+ ARRAY_SIZE(enum_i2s_word_length),
+ enum_i2s_word_length);
+
+static struct soc_enum soc_enum_i2s0_mode =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_MODE_SHIFT,
+ ARRAY_SIZE(enum_i2s_mode),
+ enum_i2s_mode);
+
+static struct soc_enum soc_enum_i2s1_mode =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_MODE_SHIFT,
+ ARRAY_SIZE(enum_i2s_mode),
+ enum_i2s_mode);
+
+static struct soc_enum soc_enum_i2s0_tristate =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_TRISTATE_SHIFT,
+ ARRAY_SIZE(enum_i2s_tristate),
+ enum_i2s_tristate);
+
+static struct soc_enum soc_enum_i2s1_tristate =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_TRISTATE_SHIFT,
+ ARRAY_SIZE(enum_i2s_tristate),
+ enum_i2s_tristate);
+
+static struct soc_enum soc_enum_i2s0_pulldown_resistor =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_i2s1_pulldown_resistor =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_i2s0_sample_rate =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_SR_SHIFT,
+ ARRAY_SIZE(enum_i2s_sample_rate),
+ enum_i2s_sample_rate);
+
+static struct soc_enum soc_enum_i2s1_sample_rate =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_SR_SHIFT,
+ ARRAY_SIZE(enum_i2s_sample_rate),
+ enum_i2s_sample_rate);
+
+static struct soc_enum soc_enum_line1_inversion =
+ SOC_ENUM_SINGLE(LINE1, LINEx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_line2_inversion =
+ SOC_ENUM_SINGLE(LINE2, LINEx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo1_inversion =
+ SOC_ENUM_SINGLE(AUXO1, AUXOx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo2_inversion =
+ SOC_ENUM_SINGLE(AUXO1, AUXOx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo1_pulldown_resistor =
+ SOC_ENUM_SINGLE(AUXO1, AUXOx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_auxo2_pulldown_resistor =
+ SOC_ENUM_SINGLE(AUXO1, AUXOx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct snd_kcontrol_new ab3550_snd_controls[] = {
+ /* RX Routing */
+ SOC_ENUM("RX2 Select", soc_enum_rx2_select),
+ SOC_SINGLE("LINE1 Adder", LINE1_ADDER, 0, 0x07, 0),
+ SOC_SINGLE("LINE2 Adder", LINE2_ADDER, 0, 0x07, 0),
+ SOC_SINGLE("EAR Adder", EAR_ADDER, 0, 0x07, 0),
+ SOC_SINGLE("SPKR Adder", SPKR_ADDER, 0, 0x07, 0),
+ SOC_SINGLE("AUXO1 Adder", AUXO1_ADDER, 0, 0x07, 0),
+ SOC_SINGLE("AUXO2 Adder", AUXO2_ADDER, 0, 0x07, 0),
+ /* TX Routing */
+ SOC_SINGLE("MIC1 Input Select", MIC1_INPUT_SELECT, 0, 0xff, 0),
+ SOC_SINGLE("MIC2 Input Select", MIC1_INPUT_SELECT, 0, 0xff, 0),
+ SOC_SINGLE("MIC2 to MIC1", MIC2_TO_MIC1, 0, 0x03, 0),
+ SOC_ENUM("I2S0 Input Select", soc_enum_i2s0_input_select),
+ SOC_ENUM("I2S1 Input Select", soc_enum_i2s1_input_select),
+ /* Routing of Side Tone and Analop Loop */
+ SOC_ENUM("APGA1 Source", soc_enum_apga1_source),
+ SOC_ENUM("APGA2 Source", soc_enum_apga2_source),
+ SOC_ENUM("APGA1 Enable", soc_enum_apga1_enable),
+ SOC_ENUM("APGA2 Enable", soc_enum_apga2_enable),
+ SOC_SINGLE("APGA1 Destination", APGA1_ADDER, 0, 0x3f, 0),
+ SOC_SINGLE("APGA2 Destination", APGA2_ADDER, 0, 0x3f, 0),
+ SOC_ENUM("DAC1 Side Tone", soc_enum_dac1_side_tone),
+ SOC_ENUM("DAC2 Side Tone", soc_enum_dac2_side_tone),
+ /* RX Volume Control */
+ SOC_SINGLE("RX-DPGA1 Gain", RX1_DIGITAL_PGA, 0, 0x43, 0),
+ SOC_SINGLE("RX-DPGA2 Gain", RX1_DIGITAL_PGA, 0, 0x43, 0),
+ SOC_SINGLE("RX-DPGA3 Gain", RX3_DIGITAL_PGA, 0, 0x43, 0),
+ SOC_SINGLE("LINE1 Gain", LINE1, LINEx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("LINE2 Gain", LINE2, LINEx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("SPKR Gain", SPKR, SPKR_GAIN_SHIFT, 0x16, 0),
+ SOC_SINGLE("EAR Gain", EAR, EAR_GAIN_SHIFT, 0x0e, 0),
+ SOC_SINGLE("AUXO1 Gain", AUXO1, AUXOx_GAIN_SHIFT, 0x0c, 0),
+ SOC_SINGLE("AUXO2 Gain", AUXO2, AUXOx_GAIN_SHIFT, 0x0c, 0),
+ /* TX Volume Control */
+ SOC_SINGLE("MIC1 Gain", MIC1_GAIN, MICx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("MIC2 Gain", MIC2_GAIN, MICx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("TX-DPGA1 Gain", TX_DIGITAL_PGA1, TXDPGAx_SHIFT, 0x0f, 0),
+ SOC_SINGLE("TX-DPGA2 Gain", TX_DIGITAL_PGA2, TXDPGAx_SHIFT, 0x0f, 0),
+ /* Volume Control of Side Tone and Analog Loop */
+ SOC_SINGLE("ST-PGA1 Gain", SIDETONE1_PGA, STx_PGA_SHIFT, 0x0a, 0),
+ SOC_SINGLE("ST-PGA2 Gain", SIDETONE2_PGA, STx_PGA_SHIFT, 0x0a, 0),
+ SOC_SINGLE("APGA1 Gain", ANALOG_LOOP_PGA1, APGAx_GAIN_SHIFT, 0x1d, 0),
+ SOC_SINGLE("APGA2 Gain", ANALOG_LOOP_PGA2, APGAx_GAIN_SHIFT, 0x1d, 0),
+ /* RX Properties */
+ SOC_ENUM("DAC1 Power Mode", soc_enum_dac1_power_mode),
+ SOC_ENUM("DAC2 Power Mode", soc_enum_dac2_power_mode),
+ SOC_ENUM("DAC3 Power Mode", soc_enum_dac3_power_mode),
+ SOC_ENUM("EAR Power Mode", soc_enum_ear_power_mode),
+ SOC_ENUM("AUXO Power Mode", soc_enum_auxo_power_mode),
+ SOC_ENUM("LINE1 Inversion", soc_enum_line1_inversion),
+ SOC_ENUM("LINE2 Inversion", soc_enum_line2_inversion),
+ SOC_ENUM("AUXO1 Inversion", soc_enum_auxo1_inversion),
+ SOC_ENUM("AUXO2 Inversion", soc_enum_auxo2_inversion),
+ SOC_ENUM("AUXO1 Pulldown Resistor", soc_enum_auxo1_pulldown_resistor),
+ SOC_ENUM("AUXO2 Pulldown Resistor", soc_enum_auxo2_pulldown_resistor),
+ /* TX Properties */
+ SOC_SINGLE("MIC1 VMID", MIC1_VMID_SELECT, 0, 0xff, 0),
+ SOC_SINGLE("MIC2 VMID", MIC2_VMID_SELECT, 0, 0xff, 0),
+ SOC_ENUM("MBIAS1 HiZ Option", soc_enum_mbias1_hiz_option),
+ SOC_ENUM("MBIAS2 HiZ Option", soc_enum_mbias2_hiz_option),
+ SOC_ENUM("MBIAS2 Output Voltage", soc_enum_mbias2_output_voltage),
+ SOC_ENUM("MBIAS2 Internal Resistor", soc_enum_mbias2_internal_resistor),
+ SOC_ENUM("MIC1 Input Impedance", soc_enum_mic1_input_impedance),
+ SOC_ENUM("MIC2 Input Impedance", soc_enum_mic2_input_impedance),
+ SOC_ENUM("TX1 HP Filter", soc_enum_tx1_hp_filter),
+ SOC_ENUM("TX2 HP Filter", soc_enum_tx2_hp_filter),
+ /* Side Tone and Analog Loop Properties */
+ SOC_ENUM("ST1 HP Filter", soc_enum_st1_hp_filter),
+ SOC_ENUM("ST2 HP Filter", soc_enum_st2_hp_filter),
+ /* I2S Interface Properties */
+ SOC_ENUM("I2S0 Word Length", soc_enum_i2s0_word_length),
+ SOC_ENUM("I2S1 Word Length", soc_enum_i2s1_word_length),
+ SOC_ENUM("I2S0 Mode", soc_enum_i2s0_mode),
+ SOC_ENUM("I2S1 Mode", soc_enum_i2s1_mode),
+ SOC_ENUM("I2S0 tri-state", soc_enum_i2s0_tristate),
+ SOC_ENUM("I2S1 tri-state", soc_enum_i2s1_tristate),
+ SOC_ENUM("I2S0 Pulldown Resistor", soc_enum_i2s0_pulldown_resistor),
+ SOC_ENUM("I2S1 Pulldown Resistor", soc_enum_i2s1_pulldown_resistor),
+ SOC_ENUM("I2S0 Sample Rate", soc_enum_i2s0_sample_rate),
+ SOC_ENUM("I2S1 Sample Rate", soc_enum_i2s1_sample_rate),
+ SOC_SINGLE("Interface Loop", INTERFACE_LOOP, 0, 0x0f, 0),
+ SOC_SINGLE("Interface Swap", INTERFACE_SWAP, 0, 0x1f, 0),
+ /* Miscellaneous */
+ SOC_SINGLE("Negative Charge Pump", NEGATIVE_CHARGE_PUMP, 0, 0x03, 0)
+};
+
+/* count the number of 1 */
+#define count_ones(x) ({ \
+ int num; \
+ for (num = 0; x; (x) &= (x) - 1, num++) \
+ ; \
+ num; \
+ })
+
+enum enum_power {
+ POWER_OFF = 0,
+ POWER_ON = 1
+};
+
+enum enum_link {
+ UNLINK = 0,
+ LINK = 1
+};
+
+static enum enum_power get_widget_power_status(enum enum_widget widget)
+{
+ u8 val;
+
+ if (widget >= number_of_widgets)
+ return POWER_OFF;
+ val = read_reg(widget_pm_array[widget].reg);
+ if (val & (1 << widget_pm_array[widget].shift))
+ return POWER_ON;
+ else
+ return POWER_OFF;
+}
+
+static int count_powered_neighbors(const unsigned long *neighbors)
+{
+ unsigned long i;
+ int n = 0;
+ for_each_set_bit(i, neighbors, number_of_widgets) {
+ if (get_widget_power_status(i) == POWER_ON)
+ n++;
+ }
+ return n;
+}
+
+static int has_powered_neighbors(const unsigned long *neighbors)
+{
+ unsigned int i;
+ for_each_set_bit(i, neighbors, number_of_widgets) {
+ if (get_widget_power_status(i) == POWER_ON)
+ return 1;
+ }
+ return 0;
+}
+
+
+static int has_stacked_neighbors(const unsigned long *neighbors)
+{
+ unsigned long *stack_map = pm_stack_as_bitmap;
+ return bitmap_intersects(stack_map, neighbors, number_of_widgets);
+}
+
+static void power_widget_unlocked(enum enum_power onoff,
+ enum enum_widget widget)
+{
+ enum enum_widget w;
+ int done;
+
+ if (widget >= number_of_widgets)
+ return;
+ if (get_widget_power_status(widget) == onoff)
+ return;
+
+ for (w = widget, done = 0; !done;) {
+ unsigned long i;
+ unsigned long *srcs = widget_pm_array[w].source_list;
+ unsigned long *sinks = widget_pm_array[w].sink_list;
+ dev_dbg(ab3550_dev, "%s: processing widget %s.\n",
+ __func__, widget_names[w]);
+
+ if (onoff == POWER_ON &&
+ !bitmap_empty(srcs, number_of_widgets) &&
+ !has_powered_neighbors(srcs)) {
+ pm_stack.stack[pm_stack.p++] = w;
+ for_each_set_bit(i, srcs, number_of_widgets) {
+ pm_stack.stack[pm_stack.p++] = i;
+ }
+ w = pm_stack.stack[--pm_stack.p];
+ continue;
+ } else if (onoff == POWER_OFF &&
+ has_powered_neighbors(sinks)) {
+ int n = 0;
+ pm_stack.stack[pm_stack.p++] = w;
+ for_each_set_bit(i, sinks, number_of_widgets) {
+ if (count_powered_neighbors(
+ widget_pm_array[i].source_list)
+ == 1 &&
+ get_widget_power_status(i) == POWER_ON) {
+ pm_stack.stack[pm_stack.p++] = i;
+ n++;
+ }
+ }
+ if (n) {
+ w = pm_stack.stack[--pm_stack.p];
+ continue;
+ } else
+ --pm_stack.p;
+ }
+ mask_set_reg(widget_pm_array[w].reg,
+ 1 << widget_pm_array[w].shift,
+ onoff == POWER_ON ? 0xff : 0);
+ dev_dbg(ab3550_dev, "%s: widget %s powered %s.\n",
+ __func__, widget_names[w],
+ onoff == POWER_ON ? "on" : "off");
+
+ if (onoff == POWER_ON &&
+ !bitmap_empty(sinks, number_of_widgets) &&
+ !has_powered_neighbors(sinks) &&
+ !has_stacked_neighbors(sinks)) {
+ for_each_set_bit(i, sinks, number_of_widgets) {
+ pm_stack.stack[pm_stack.p++] = i;
+ }
+ w = pm_stack.stack[--pm_stack.p];
+ continue;
+ } else if (onoff == POWER_OFF) {
+ for_each_set_bit(i, srcs, number_of_widgets) {
+ if (!has_powered_neighbors(
+ widget_pm_array[i].sink_list)
+ && get_widget_power_status(i) == POWER_ON
+ && !test_bit(i, pm_stack_as_bitmap)) {
+ pm_stack.stack[pm_stack.p++] = i;
+ }
+ }
+ }
+ if (pm_stack.p > 0)
+ w = pm_stack.stack[--pm_stack.p];
+ else
+ done = 1;
+ }
+}
+
+static void power_widget_locked(enum enum_power onoff,
+ enum enum_widget widget)
+{
+ if (mutex_lock_interruptible(&ab3550_pm_mutex)) {
+ dev_warn(ab3550_dev,
+ "%s: Signal received while waiting on the PM mutex.\n",
+ __func__);
+ return;
+ }
+ power_widget_unlocked(onoff, widget);
+ mutex_unlock(&ab3550_pm_mutex);
+}
+
+static void dump_registers(const char *where, ...)
+{
+ va_list ap;
+ va_start(ap, where);
+ do {
+ short reg = va_arg(ap, int);
+ if (reg < 0)
+ break;
+ dev_dbg(ab3550_dev, "%s from %s> 0x%02X : 0x%02X.\n",
+ __func__, where, reg, read_reg(reg));
+ } while (1);
+ va_end(ap);
+}
+
+/**
+ * update the link between two widgets.
+ * @op: 1 - connect; 0 - disconnect
+ * @src: source of the connection
+ * @sink: sink of the connection
+ */
+static int update_widgets_link(enum enum_link op, enum enum_widget src,
+ enum enum_widget sink,
+ u8 reg, u8 mask, u8 newval)
+{
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&ab3550_pm_mutex)) {
+ dev_warn(ab3550_dev, "%s: A signal is received while waiting on"
+ " the PM mutex.\n", __func__);
+ return -EINTR;
+ }
+
+ switch (op << 2 | test_bit(sink, widget_pm_array[src].sink_list) << 1 |
+ test_bit(src, widget_pm_array[sink].source_list)) {
+ case 3: /* UNLINK, sink in sink_list, src in source_list */
+ case 4: /* LINK, sink not in sink_list, src not in source_list */
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (((int)op) << 2 | get_widget_power_status(src) << 1 |
+ get_widget_power_status(sink)) {
+ case 3: /* op = 0, src on, sink on */
+ if (count_powered_neighbors(widget_pm_array[sink].source_list)
+ == 1)
+ power_widget_unlocked(POWER_OFF, sink);
+ mask_set_reg(reg, mask, newval);
+ break;
+ case 6: /* op = 1, src on, sink off */
+ mask_set_reg(reg, mask, newval);
+ power_widget_unlocked(POWER_ON, sink);
+ break;
+ default:
+ /* op = 0, src off, sink off */
+ /* op = 0, src off, sink on */
+ /* op = 0, src on, sink off */
+ /* op = 1, src off, sink off */
+ /* op = 1, src off, sink on */
+ /* op = 1, src on, sink on */
+ mask_set_reg(reg, mask, newval);
+ }
+ change_bit(sink, widget_pm_array[src].sink_list);
+ change_bit(src, widget_pm_array[sink].source_list);
+end:
+ mutex_unlock(&ab3550_pm_mutex);
+ return ret;
+};
+
+static enum enum_widget apga_source_translate(u8 reg_value)
+{
+ switch (reg_value) {
+ case 1:
+ return widget_mic1;
+ case 2:
+ return widget_mic2;
+ default:
+ return number_of_widgets;
+ }
+}
+
+static enum enum_widget adder_sink_translate(u8 reg)
+{
+ switch (reg) {
+ case EAR_ADDER:
+ return widget_ear;
+ case AUXO1_ADDER:
+ return widget_auxo1;
+ case AUXO2_ADDER:
+ return widget_auxo2;
+ case SPKR_ADDER:
+ return widget_spkr;
+ case LINE1_ADDER:
+ return widget_line1;
+ case LINE2_ADDER:
+ return widget_line2;
+ case APGA1_ADDER:
+ return widget_apga1;
+ case APGA2_ADDER:
+ return widget_apga2;
+ default:
+ return number_of_widgets;
+ }
+}
+
+static int ab3550_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(&codec->dapm, ab3550_dapm_widgets,
+ ARRAY_SIZE(ab3550_dapm_widgets));
+
+ snd_soc_dapm_add_routes(&codec->dapm, intercon, ARRAY_SIZE(intercon));
+
+ snd_soc_dapm_new_widgets(&codec->dapm);
+ return 0;
+}
+
+static void power_for_playback(enum enum_power onoff, int ifsel)
+{
+ dev_dbg(ab3550_dev, "%s: interface %d power %s.\n", __func__,
+ ifsel, onoff == POWER_ON ? "on" : "off");
+
+ if (mutex_lock_interruptible(&ab3550_pm_mutex)) {
+ dev_warn(ab3550_dev,
+ "%s: Signal received while waiting on the PM mutex.\n",
+ __func__);
+ return;
+ }
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_dld_l : widget_if1_dld_l);
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_dld_r : widget_if1_dld_r);
+ mutex_unlock(&ab3550_pm_mutex);
+}
+
+static void power_for_capture(enum enum_power onoff, int ifsel)
+{
+ dev_dbg(ab3550_dev, "%s: interface %d power %s", __func__,
+ ifsel, onoff == POWER_ON ? "on" : "off");
+ if (mutex_lock_interruptible(&ab3550_pm_mutex)) {
+ dev_warn(ab3550_dev,
+ "%s: Signal received while waiting on the PM mutex.\n",
+ __func__);
+ return;
+ }
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_uld_l : widget_if1_uld_l);
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_uld_r : widget_if1_uld_r);
+ mutex_unlock(&ab3550_pm_mutex);
+}
+
+static int ab3550_add_controls(struct snd_soc_codec *codec)
+{
+ int err = 0, i, n = ARRAY_SIZE(ab3550_snd_controls);
+
+ pr_debug("%s: %s called.\n", __FILE__, __func__);
+ for (i = 0; i < n; i++) {
+ err = snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &ab3550_snd_controls[i], codec));
+ if (err < 0) {
+ pr_err("%s failed to add control No.%d of %d.\n",
+ __func__, i, n);
+ return err;
+ }
+ }
+ return err;
+}
+
+static int ab3550_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *dai)
+{
+ u8 val;
+ u8 reg = dai->id == 0 ? INTERFACE0 : INTERFACE1;
+
+ if (!ab3550_dev) {
+ pr_err("%s: The AB3550 codec driver not initialized.\n",
+ __func__);
+ return -EAGAIN;
+ }
+ dev_info(ab3550_dev, "%s called.\n", __func__);
+ switch (params_rate(hw_params)) {
+ case 8000:
+ val = I2Sx_SR_8000Hz;
+ break;
+ case 16000:
+ val = I2Sx_SR_16000Hz;
+ break;
+ case 44100:
+ val = I2Sx_SR_44100Hz;
+ break;
+ case 48000:
+ val = I2Sx_SR_48000Hz;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ !dai->capture_active : !dai->playback_active) {
+
+ mask_set_reg(reg, I2Sx_SR_MASK, val << I2Sx_SR_SHIFT);
+ if ((read_reg(reg) & I2Sx_MODE_MASK) == 0) {
+ mask_set_reg(reg, MASTER_GENx_PWR_MASK,
+ 1 << MASTER_GENx_PWR_SHIFT);
+ }
+ }
+ return 0;
+}
+
+static int ab3550_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ dai->playback_active : dai->capture_active) {
+
+ dev_err(ab3550_dev, "%s: A %s stream is already active.\n",
+ __func__,
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ "PLAYBACK" : "CAPTURE");
+ return -EBUSY;
+ }
+ return 0;
+}
+static int ab3550_pcm_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_info(ab3550_dev, "%s called.\n", __func__);
+
+ /* Configure registers for either playback or capture */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ power_for_playback(POWER_ON, dai->id);
+ dump_registers(__func__,
+ dai->id == 0 ? INTERFACE0 : INTERFACE1,
+ RX1, RX2, SPKR, EAR, -1);
+ } else {
+ power_for_capture(POWER_ON, dai->id);
+ dump_registers(__func__, MIC_BIAS1, MIC_BIAS2, MIC1_GAIN, TX1,
+ dai->id == 0 ? INTERFACE0 : INTERFACE1, -1);
+ }
+ return 0;
+}
+
+static void ab3550_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ u8 iface = dai->id == 0 ? INTERFACE0 : INTERFACE1;
+ dev_info(ab3550_dev, "%s called.\n", __func__);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ power_for_playback(POWER_OFF, dai->id);
+ else
+ power_for_capture(POWER_OFF, dai->id);
+ if (!dai->playback_active && !dai->capture_active &&
+ (read_reg(iface) & I2Sx_MODE_MASK) == 0)
+ mask_set_reg(iface, MASTER_GENx_PWR_MASK, 0);
+}
+
+static int ab3550_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ return 0;
+}
+
+static int ab3550_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ u8 iface = (codec_dai->id == 0) ? INTERFACE0 : INTERFACE1;
+ u8 val = 0;
+ dev_info(ab3550_dev, "%s called.\n", __func__);
+
+ switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK |
+ SND_SOC_DAIFMT_MASTER_MASK)) {
+
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
+ val |= 1 << I2Sx_MODE_SHIFT;
+ break;
+
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
+ break;
+
+ default:
+ dev_warn(ab3550_dev, "AB3550_dai: unsupported DAI format "
+ "0x%x\n", fmt);
+ return -EINVAL;
+ }
+ if (codec_dai->playback_active && codec_dai->capture_active) {
+ if ((read_reg(iface) & I2Sx_MODE_MASK) == val)
+ return 0;
+ else {
+ dev_err(ab3550_dev,
+ "%s: DAI format set differently "
+ "by an existing stream.\n", __func__);
+ return -EINVAL;
+ }
+ }
+ mask_set_reg(iface, I2Sx_MODE_MASK, val);
+ return 0;
+}
+
+struct snd_soc_dai_driver ab3550_dai_drv[] = {
+ {
+ .name = "ab3550-codec-dai.0",
+ .id = 0,
+ .playback = {
+ .stream_name = "AB3550.0 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB3550_SUPPORTED_RATE,
+ .formats = AB3550_SUPPORTED_FMT,
+ },
+ .capture = {
+ .stream_name = "AB3550.0 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB3550_SUPPORTED_RATE,
+ .formats = AB3550_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .startup = ab3550_pcm_startup,
+ .prepare = ab3550_pcm_prepare,
+ .hw_params = ab3550_pcm_hw_params,
+ .shutdown = ab3550_pcm_shutdown,
+ .set_sysclk = ab3550_set_dai_sysclk,
+ .set_fmt = ab3550_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "ab3550-codec-dai.1",
+ .id = 1,
+ .playback = {
+ .stream_name = "AB3550.1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB3550_SUPPORTED_RATE,
+ .formats = AB3550_SUPPORTED_FMT,
+ },
+ .capture = {
+ .stream_name = "AB3550.0 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB3550_SUPPORTED_RATE,
+ .formats = AB3550_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .startup = ab3550_pcm_startup,
+ .prepare = ab3550_pcm_prepare,
+ .hw_params = ab3550_pcm_hw_params,
+ .shutdown = ab3550_pcm_shutdown,
+ .set_sysclk = ab3550_set_dai_sysclk,
+ .set_fmt = ab3550_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1,
+ }
+};
+EXPORT_SYMBOL_GPL(ab3550_dai_drv);
+
+static int ab3550_codec_probe(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ pr_info("%s: Enter.\n", __func__);
+
+ /* Add controls */
+ if (ab3550_add_controls(codec) < 0)
+ return ret;
+
+ /* Add widgets */
+ ab3550_add_widgets(codec);
+
+ return 0;
+}
+
+static int ab3550_codec_remove(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_free(&codec->dapm);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ab3550_codec_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0);
+
+ return 0;
+}
+
+static int ab3550_codec_resume(struct snd_soc_codec *codec)
+{
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0xff);
+
+ return 0;
+}
+#else
+#define ab3550_codec_resume NULL
+#define ab3550_codec_suspend NULL
+#endif
+
+/*
+ * This function is only called by the SOC framework to
+ * set registers associated to the mixer controls.
+ */
+static int ab3550_codec_write_reg(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ if (reg < MIC_BIAS1 || reg > INTERFACE_SWAP)
+ return -EINVAL;
+ switch (reg) {
+ u8 diff, oldval;
+ case ANALOG_LOOP_PGA1:
+ case ANALOG_LOOP_PGA2: {
+ enum enum_widget apga = reg == ANALOG_LOOP_PGA1 ?
+ widget_apga1 : widget_apga2;
+
+ oldval = read_reg(reg);
+ diff = value ^ oldval;
+
+ /* The APGA is to be turned on/off.
+ * The power bit and the other bits in the
+ * same register won't be changed at the same time
+ * since they belong to different controls.
+ */
+ if (diff & (1 << APGAx_PWR_SHIFT)) {
+ power_widget_locked(value >> APGAx_PWR_SHIFT & 1,
+ apga);
+ } else if (diff & APGAx_MUX_MASK) {
+ enum enum_widget old_source =
+ apga_source_translate(oldval);
+ enum enum_widget new_source =
+ apga_source_translate(value);
+ update_widgets_link(UNLINK, old_source, apga,
+ reg, APGAx_MUX_MASK, 0);
+ update_widgets_link(LINK, new_source, apga,
+ reg, APGAx_MUX_MASK, value);
+ } else {
+ set_reg(reg, value);
+ }
+ break;
+ }
+
+ case APGA1_ADDER:
+ case APGA2_ADDER: {
+ int i;
+ enum enum_widget apga;
+ enum enum_widget apga_dst[] = {
+ widget_auxo2, widget_auxo1, widget_ear, widget_spkr,
+ widget_line2, widget_line1
+ };
+
+ apga = adder_sink_translate(reg);
+ oldval = read_reg(reg);
+ diff = value ^ oldval;
+ for (i = 0; diff; i++) {
+ if (!(diff & 1 << i))
+ continue;
+ diff ^= 1 << i;
+ update_widgets_link(value >> i & 1, apga, apga_dst[i],
+ reg, 1 << i, value);
+ }
+ break;
+ }
+
+ case EAR_ADDER:
+ case AUXO1_ADDER:
+ case AUXO2_ADDER:
+ case SPKR_ADDER:
+ case LINE1_ADDER:
+ case LINE2_ADDER: {
+ int i;
+ enum enum_widget widgets[] = {
+ widget_dac1, widget_dac2, widget_dac3,
+ };
+ oldval = read_reg(reg);
+ diff = value ^ oldval;
+ for (i = 0; diff; i++) {
+ if (!(diff & 1 << i))
+ continue;
+ diff ^= 1 << i;
+ update_widgets_link(value >> i & 1, widgets[i],
+ adder_sink_translate(reg),
+ reg, 1 << i, value);
+ }
+ break;
+ }
+
+ default:
+ set_reg(reg, value);
+ }
+ return 0;
+}
+
+static unsigned int ab3550_codec_read_reg(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ return read_reg(reg);
+}
+
+static struct snd_soc_codec_driver ab3550_codec_drv = {
+ .probe = ab3550_codec_probe,
+ .remove = ab3550_codec_remove,
+ .suspend = ab3550_codec_suspend,
+ .resume = ab3550_codec_resume,
+ .read = ab3550_codec_read_reg,
+ .write = ab3550_codec_write_reg,
+};
+EXPORT_SYMBOL_GPL(ab3550_codec_drv);
+
+static inline void init_playback_route(void)
+{
+ update_widgets_link(LINK, widget_if0_dld_l, widget_rx1, 0, 0, 0);
+ update_widgets_link(LINK, widget_rx1, widget_dac1, 0, 0, 0);
+ update_widgets_link(LINK, widget_dac1, widget_spkr,
+ SPKR_ADDER, DAC1_TO_ADDER_MASK, 0xff);
+
+ update_widgets_link(LINK, widget_if0_dld_r, widget_rx2,
+ RX2, RX2_IF_SELECT_MASK, 0);
+ update_widgets_link(LINK, widget_rx2, widget_dac2, 0, 0, 0);
+ update_widgets_link(LINK, widget_dac2, widget_ear,
+ EAR_ADDER, DAC2_TO_ADDER_MASK, 0xff);
+}
+
+static inline void init_capture_route(void)
+{
+ update_widgets_link(LINK, widget_micbias2, widget_mic1p1,
+ 0, 0, 0);
+ update_widgets_link(LINK, widget_micbias2, widget_mic1n1,
+ 0, 0, 0);
+ update_widgets_link(LINK, widget_mic1p1, widget_mic1,
+ MIC1_INPUT_SELECT, MICxP1_SEL_MASK, 0xff);
+ update_widgets_link(LINK, widget_mic1n1, widget_mic1,
+ MIC1_INPUT_SELECT, MICxN1_SEL_MASK, 0xff);
+ update_widgets_link(LINK, widget_mic1, widget_adc1,
+ 0, 0, 0);
+ update_widgets_link(LINK, widget_adc1, widget_tx1,
+ 0, 0, 0);
+ update_widgets_link(LINK, widget_tx1, widget_if0_uld_l,
+ INTERFACE0_DATA, I2Sx_L_DATA_MASK,
+ I2Sx_L_DATA_TX1_MASK);
+ update_widgets_link(LINK, widget_tx1, widget_if0_uld_r,
+ INTERFACE0_DATA, I2Sx_R_DATA_MASK,
+ I2Sx_R_DATA_TX1_MASK);
+}
+
+static inline void init_playback_gain(void)
+{
+ mask_set_reg(RX1_DIGITAL_PGA, RXx_PGA_GAIN_MASK,
+ 0x40 << RXx_PGA_GAIN_SHIFT);
+ mask_set_reg(RX2_DIGITAL_PGA, RXx_PGA_GAIN_MASK,
+ 0x40 << RXx_PGA_GAIN_SHIFT);
+ mask_set_reg(EAR, EAR_GAIN_MASK, 0x06 << EAR_GAIN_SHIFT);
+ mask_set_reg(SPKR, SPKR_GAIN_MASK, 0x6 << SPKR_GAIN_SHIFT);
+}
+
+static inline void init_capture_gain(void)
+{
+ mask_set_reg(MIC1_GAIN, MICx_GAIN_MASK, 0x06 << MICx_GAIN_SHIFT);
+ mask_set_reg(TX_DIGITAL_PGA1, TXDPGAx_MASK, 0x0f << TXDPGAx_SHIFT);
+}
+
+static __devinit int ab3550_codec_drv_probe(struct platform_device *pdev)
+{
+ struct ab3550_codec_dai_data *codec_drvdata;
+ int ret = 0;
+ u8 reg;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ pr_info("%s: Init codec private data.\n", __func__);
+ codec_drvdata = kzalloc(sizeof(struct ab3550_codec_dai_data), GFP_KERNEL);
+ if (codec_drvdata == NULL)
+ return -ENOMEM;
+
+ /* TODO: Add private data to codec_drvdata */
+
+ platform_set_drvdata(pdev, codec_drvdata);
+
+ pr_info("%s: Register codec.\n", __func__);
+ ret = snd_soc_register_codec(&pdev->dev, &ab3550_codec_drv, &ab3550_dai_drv[0], 2);
+ if (ret < 0) {
+ pr_debug("%s: Error: Failed to register codec (ret = %d).\n",
+ __func__,
+ ret);
+ snd_soc_unregister_codec(&pdev->dev);
+ kfree(platform_get_drvdata(pdev));
+ return ret;
+ }
+
+ ab3550_dev = &pdev->dev;
+ /* Initialize the codec registers */
+ for (reg = AB3550_FIRST_REG; reg <= AB3550_LAST_REG; reg++)
+ set_reg(reg, 0);
+
+ mask_set_reg(CLOCK, CLOCK_REF_SELECT_MASK | CLOCK_ENABLE_MASK,
+ 1 << CLOCK_REF_SELECT_SHIFT | 1 << CLOCK_ENABLE_SHIFT);
+ init_playback_route();
+ init_playback_gain();
+ init_capture_route();
+ init_capture_gain();
+ memset(&pm_stack, 0, sizeof(pm_stack));
+
+ return 0;
+}
+
+static int __devexit ab3550_codec_drv_remove(struct platform_device *pdev)
+{
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0);
+
+ ab3550_dev = NULL;
+
+ snd_soc_unregister_codec(&pdev->dev);
+ kfree(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static int ab3550_codec_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int ab3550_codec_drv_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ab3550_codec_platform_drv = {
+ .driver = {
+ .name = "ab3550-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab3550_codec_drv_probe,
+ .remove = __devexit_p(ab3550_codec_drv_remove),
+ .suspend = ab3550_codec_drv_suspend,
+ .resume = ab3550_codec_drv_resume,
+};
+
+
+static int __devinit ab3550_codec_platform_drv_init(void)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ ab3550_dev = NULL;
+
+ ret = platform_driver_register(&ab3550_codec_platform_drv);
+ if (ret != 0)
+ pr_err("Failed to register AB3550 platform driver (%d)!\n", ret);
+
+ return ret;
+}
+
+static void __exit ab3550_codec_platform_drv_exit(void)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ platform_driver_unregister(&ab3550_codec_platform_drv);
+}
+
+
+module_init(ab3550_codec_platform_drv_init);
+module_exit(ab3550_codec_platform_drv_exit);
+
+MODULE_DESCRIPTION("AB3550 Codec driver");
+MODULE_AUTHOR("Xie Xiaolei <xie.xiaolei@stericsson.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ab3550.h b/sound/soc/codecs/ab3550.h
new file mode 100644
index 00000000000..fe9c77b1a62
--- /dev/null
+++ b/sound/soc/codecs/ab3550.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Xie Xiaolei <xie.xiaolei@etericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#ifndef AB3550_CODEC_REGISTERS_H
+#define AB3550_CODEC_REGISTERS_H
+
+#define AB3550_SUPPORTED_RATE (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define AB3550_SUPPORTED_FMT (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+/* MIC BIAS */
+
+#define MIC_BIAS1 0X31
+#define MIC_BIAS2 0X32
+#define MBIAS2_OUT_V_MASK 0x04
+#define MBIAS2_OUT_V_SHIFT 2
+#define MBIAS_PWR_MASK 0x02
+#define MBIAS_PWR_SHIFT 1
+#define MBIAS_PDN_IMP_MASK 0x01
+#define MBIAS_PDN_IMP_SHIFT 0
+
+#define MIC_BIAS2_VAD 0x33
+#define MBIAS2_R_INT_MASK 0x01
+#define MBIAS2_R_INT_SHIFT 0
+
+/* MIC */
+#define MIC1_GAIN 0x34
+#define MIC2_GAIN 0x35
+#define MICx_GAIN_MASK 0xF0
+#define MICx_GAIN_SHIFT 4
+#define MICx_IN_IMP_MASK 0x0C
+#define MICx_IN_IMP_SHIFT 2
+#define MICx_PWR_MASK 0x01
+#define MICx_PWR_SHIFT 0
+
+#define MIC1_INPUT_SELECT 0x36
+#define MIC2_INPUT_SELECT 0x37
+#define MICxP1_SEL_MASK 0x80
+#define MICxP1_SEL_SHIFT 7
+#define MICxN1_SEL_MASK 0x40
+#define MICxN1_SEL_SHIFT 6
+#define MICxP2_SEL_MASK 0x20
+#define MICxP2_SEL_SHIFT 5
+#define MICxN2_SEL_MASK 0x10
+#define MICxN2_SEL_SHIFT 4
+#define LINEIN_SEL_MASK 0x03
+#define LINEIN_SEL_SHIFT 0
+
+#define MIC1_VMID_SELECT 0x38
+#define MIC2_VMID_SELECT 0x39
+#define VMIDx_ENABLE_MASK 0xC0
+#define VMIDx_ENABLE_SHIFT 6
+#define VMIDx_LINEIN1_N_MASK 0x20
+#define VMIDx_LINEIN1_N_SHIFT 5
+#define VMIDx_LINEIN2_N_MASK 0x10
+#define VMIDx_LINEIN2_N_SHIFT 4
+#define VMIDx_MICxP1_MASK 0x08
+#define VMIDx_MICxP1_SHIFT 3
+#define VMIDx_MICxP2_MASK 0x04
+#define VMIDx_MICxP2_SHIFT 2
+#define VMIDx_MICxN1_MASK 0x02
+#define VMIDx_MICxN1_SHIFT 1
+#define VMIDx_MICxN2_MASK 0x01
+#define VMIDx_MICxN2_SHIFT 0
+
+#define MIC2_TO_MIC1 0x3A
+#define MIC2_TO_MIC1_MASK 0x03
+#define MIC2_TO_MIC1_SHIFT 0
+
+/* Analog Loop */
+#define ANALOG_LOOP_PGA1 0x3B
+#define ANALOG_LOOP_PGA2 0x3C
+#define APGAx_GAIN_MASK 0xF8
+#define APGAx_GAIN_SHIFT 3
+#define APGAx_PWR_MASK 0x04
+#define APGAx_PWR_SHIFT 2
+#define APGAx_MUX_MASK 0x03
+#define APGAx_MUX_SHIFT 0
+#define APGAx_MUX_MIC1_MASK 0x01
+#define APGAx_MUX_MIC1_SHIFT 0
+#define APGAx_MUX_MIC2_MASK 0x02
+#define APGAx_MUX_MIC2_SHIFT 1
+
+
+#define APGA_VMID_SELECT 0x3D
+#define VMID_APGA1_ENABLE_MASK 0xC0
+#define VMID_APGA1_ENABLE_SHIFT 6
+#define VMID_APGA1_LINEIN1_MASK 0x20
+#define VMID_APGA1_LINEIN1_SHIFT 5
+#define VMID_APGA2_ENABLE_MASK 0x0C
+#define VMID_APGA2_ENABLE_SHIFT 2
+#define VMID_APGA2_LINEIN2_MASK 0x02
+#define VMID_APGA2_LINEIN2_SHIFT 1
+
+/* Output Amplifiers */
+#define EAR 0x3E
+#define EAR_PWR_MODE_MASK 0x20
+#define EAR_PWR_MODE_SHIFT 5
+#define EAR_PWR_MASK 0x10
+#define EAR_PWR_SHIFT 4
+#define EAR_GAIN_MASK 0x0F
+#define EAR_GAIN_SHIFT 0
+
+#define AUXO1 0x3F
+#define AUXO2 0x40
+#define AUXOx_PWR_MASK 0x80
+#define AUXOx_PWR_SHIFT 7
+#define AUXOx_INV_MASK 0x40
+#define AUXOx_INV_SHIFT 6
+#define AUXOx_PULLDOWN_MASK 0x20
+#define AUXOx_PULLDOWN_SHIFT 5
+#define AUXOx_GAIN_MASK 0x0F
+#define AUXOx_GAIN_SHIFT 0
+
+#define AUXO_PWR_MODE 0x41
+#define AUT_PWR_MODE_MASK 0x04
+#define AUT_PWR_MODE_SHIFT 2
+#define AUXO_PWR_MODE_MASK 0x03
+#define AUXO_PWR_MODE_SHIFT 0
+
+#define OFFSET_CANCEL 0x42
+#define SPKR_OFF_CANC_MASK 0x04
+#define SPKR_OFF_CANC_SHIFT 2
+#define AUXO_OFF_CANC_MASK 0x02
+#define AUXO_OFF_CANC_SHIFT 1
+#define OFFSET_CLOCK_MASK 0x01
+#define OFFSET_CLOCK_SHIFT 0
+
+#define SPKR 0x43
+#define OVR_CURR_PROT_MASK 0x80
+#define OVR_CURR_PROT_SHIFT 7
+#define SPKR_PWR_MASK 0x40
+#define SPKR_PWR_SHIFT 6
+#define SPKR_GAIN_MASK 0x1F
+#define SPKR_GAIN_SHIFT 0
+
+#define LINE1 0x44
+#define LINE2 0x45
+#define LINEx_PWR_MASK 0x80
+#define LINEx_PWR_SHIFT 7
+#define LINEx_INV_MASK 0x40
+#define LINEx_INV_SHIFT 6
+#define VMID_BUFFx_MASK 0x10
+#define VMID_BUFFx_SHIFT 4
+#define LINEx_GAIN_MASK 0x0F
+#define LINEx_GAIN_SHIFT 0
+
+/* Analog loop Routing */
+
+#define APGA1_ADDER 0x46
+#define APGA2_ADDER 0x47
+#define APGAx_TO_LINE1_MASK 0x20
+#define APGAx_TO_LINE1_SHIFT 0x5F
+#define APGAx_TO_LINE2_MASK 0x10
+#define APGAx_TO_LINE2_SHIFT 4
+#define APGAx_TO_SPKR_MASK 0x08
+#define APGAx_TO_SPKR_SHIFT 3
+#define APGAx_TO_EAR_MASK 0x04
+#define APGAx_TO_EAR_SHIFT 2
+#define APGAx_TO_AUXO1_MASK 0x02
+#define APGAx_TO_AUXO1_SHIFT 1
+#define APGAx_TO_AUXO2_MASK 0x01
+#define APGAx_TO_AUXO2_SHIFT 0
+#define APGAx_ADDER_VALID_BITS_MASK 0x3F
+
+/* Output Amplifiers Routing */
+
+#define EAR_ADDER 0x48
+#define AUXO1_ADDER 0x49
+#define AUXO2_ADDER 0x4A
+#define SPKR_ADDER 0x4B
+#define LINE1_ADDER 0x4C
+#define LINE2_ADDER 0x4D
+#define DAC3_TO_ADDER_MASK 0x04
+#define DAC3_TO_ADDER_SHIFT 2
+#define DAC2_TO_ADDER_MASK 0x02
+#define DAC2_TO_ADDER_SHIFT 1
+#define DAC1_TO_ADDER_MASK 0x01
+#define DAC1_TO_ADDER_SHIFT 0
+
+#define EAR_TO_MIC2 0x4E
+#define EAR_TO_MIC2_MASK 0x01
+#define EAR_TO_MIC2_SHIFT 0
+
+#define SPKR_TO_MIC2 0x4F
+#define SPKR_TO_MIC2_MASK 0x01
+#define SPKR_TO_MIC2_SHIFT 0
+
+#define NEGATIVE_CHARGE_PUMP 0x50
+#define NCP_MODE_MASK 0x02
+#define NCP_MODE_SHIFT 1
+#define NCP_PWR_MASK 0x01
+#define NCP_PWR_SHIFT 0
+
+#define TX1 0x51
+#define TX2 0x52
+#define TXx_HP_FILTER_MASK 0x0C
+#define TXx_HP_FILTER_SHIFT 2
+#define TXx_PWR_MASK 0x02
+#define TXx_PWR_SHIFT 1
+#define ADCx_PWR_MASK 0x01
+#define ADCx_PWR_SHIFT 0
+
+#define RX1 0x53
+#define RX2 0x54
+#define RX2_IF_SELECT_MASK 0x10
+#define RX2_IF_SELECT_SHIFT 4
+#define RX3 0x55
+#define RXx_PWR_MASK 0x08
+#define RXx_PWR_SHIFT 3
+#define DACx_PWR_MASK 0x04
+#define DACx_PWR_SHIFT 2
+#define DACx_PWR_MODE_MASK 0x03
+#define DACx_PWR_MODE_SHIFT 0
+
+#define TX_DIGITAL_PGA1 0X56
+#define TX_DIGITAL_PGA2 0X57
+#define TXDPGAx_MASK 0x0F
+#define TXDPGAx_SHIFT 0
+
+#define RX1_DIGITAL_PGA 0x58
+#define RX2_DIGITAL_PGA 0x59
+#define RX3_DIGITAL_PGA 0x5A
+#define RXx_PGA_GAIN_MASK 0x7F
+#define RXx_PGA_GAIN_SHIFT 0
+
+#define SIDETONE1_PGA 0x5B
+#define SIDETONE2_PGA 0x5C
+#define STx_HP_FILTER_MASK 0x60
+#define STx_HP_FILTER_SHIFT 5
+#define STx_MUX_MASK 0x10
+#define STx_MUX_SHIFT 4
+#define STx_PGA_MASK 0x0F
+#define STx_PGA_SHIFT 0
+
+/* clock */
+
+#define CLOCK 0x5D
+#define CLOCK_REF_SELECT_MASK 0x02
+#define CLOCK_REF_SELECT_SHIFT 1
+#define CLOCK_ENABLE_MASK 0x01
+#define CLOCK_ENABLE_SHIFT 0
+
+/* Interface */
+
+#define INTERFACE0 0x5E
+#define INTERFACE1 0x60
+#define I2Sx_WORDLENGTH_MASK 0x40
+#define I2Sx_WORDLENGTH_SHIFT 6
+#define MASTER_GENx_PWR_MASK 0x20
+#define MASTER_GENx_PWR_SHIFT 5
+#define I2Sx_MODE_MASK 0x10
+#define I2Sx_MODE_SHIFT 4
+#define I2Sx_TRISTATE_MASK 0x08
+#define I2Sx_TRISTATE_SHIFT 3
+#define I2Sx_PULLDOWN_MASK 0x04
+#define I2Sx_PULLDOWN_SHIFT 2
+#define I2Sx_SR_MASK 0x03
+#define I2Sx_SR_SHIFT 0
+#define I2Sx_SR_8000Hz 0
+#define I2Sx_SR_16000Hz 1
+#define I2Sx_SR_44100Hz 2
+#define I2Sx_SR_48000Hz 3
+
+#define INTERFACE0_DATA 0x5F
+#define INTERFACE1_DATA 0x61
+#define I2Sx_L_DATA_MASK 0x0C
+#define I2Sx_L_DATA_TX1_MASK 0x04
+#define I2Sx_L_DATA_TX2_MASK 0x08
+#define I2Sx_L_DATA_SHIFT 2
+#define I2Sx_R_DATA_MASK 0x03
+#define I2Sx_R_DATA_TX1_MASK 0x01
+#define I2Sx_R_DATA_TX2_MASK 0x02
+#define I2Sx_R_DATA_SHIFT 0
+
+#define INTERFACE_LOOP 0x62
+#define I2S0_INT_LOOP_MASK 0x08
+#define I2S0_INT_LOOP_SHIFT 3
+#define I2S0_EXT_LOOP_MASK 0x04
+#define I2S0_EXT_LOOP_SHIFT 2
+#define I2S1_INT_LOOP_MASK 0x02
+#define I2S1_INT_LOOP_SHIFT 1
+#define I2S1_EXT_LOOP_MASK 0x01
+#define I2S1_EXT_LOOP_SHIFT 0
+
+#define INTERFACE_SWAP 0x63
+#define RX_SWAP0_MASK 0x10
+#define RX_SWAP0_SHIFT 4
+#define RX_SWAP1_MASK 0x08
+#define RX_SWAP1_SHIFT 3
+#define IF_SWAP_MASK 0x04
+#define IF_SWAP_SHIFT 2
+#define IO_SWAP0_MASK 0x02
+#define IO_SWAP0_SHIFT 1
+#define IO_SWAP1_MASK 0x01
+#define IO_SWAP1_SHIFT 0
+
+#define AB3550_FIRST_REG MIC_BIAS1
+#define AB3550_LAST_REG INTERFACE_SWAP
+
+#define AB3550_VIRTUAL_REG1 (AB3550_LAST_REG + 1)
+#define IF0_DLD_L_PW_SHIFT 0
+#define IF0_DLD_R_PW_SHIFT 1
+#define IF0_ULD_L_PW_SHIFT 2
+#define IF0_ULD_R_PW_SHIFT 3
+#define IF1_DLD_L_PW_SHIFT 4
+#define IF1_DLD_R_PW_SHIFT 5
+#define IF1_ULD_L_PW_SHIFT 6
+#define IF1_ULD_R_PW_SHIFT 7
+
+#define AB3550_VIRTUAL_REG2 (AB3550_LAST_REG + 2)
+#define MIC1P1_PW_SHIFT 0
+#define MIC1N1_PW_SHIFT 1
+#define MIC1P2_PW_SHIFT 2
+#define MIC1N2_PW_SHIFT 3
+#define MIC2P1_PW_SHIFT 4
+#define MIC2N1_PW_SHIFT 5
+#define MIC2P2_PW_SHIFT 6
+#define MIC2N2_PW_SHIFT 7
+
+
+#endif
diff --git a/sound/soc/codecs/ab5500.c b/sound/soc/codecs/ab5500.c
new file mode 100755
index 00000000000..49b9c1cc178
--- /dev/null
+++ b/sound/soc/codecs/ab5500.c
@@ -0,0 +1,1805 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Xie Xiaolei <xie.xiaolei@etericsson.com>,
+ * Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <asm/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <stdarg.h>
+#include "ab5500.h"
+
+/* No of digital interface on the Codec */
+#define NO_CODEC_DAI_IF 2
+
+/* codec private data */
+struct ab5500_codec_dai_data {
+ bool playback_active;
+ bool capture_active;
+
+};
+
+enum regulator_idx {
+ REGULATOR_DMIC,
+ REGULATOR_AMIC
+};
+
+static struct device *ab5500_dev;
+static struct regulator_bulk_data reg_info[2] = {
+ { .supply = "vdigmic" },
+ { .supply = "v-amic" }
+};
+
+static bool reg_enabled[2] = {
+ false,
+ false
+};
+
+static u8 virtual_regs[] = {
+ 0, 0, 0, 0, 0
+};
+
+static int ab5500_clk_request;
+static DEFINE_MUTEX(ab5500_clk_mutex);
+
+#define set_reg(reg, val) mask_set_reg((reg), 0xff, (val))
+
+static void mask_set_reg(u8 reg, u8 mask, u8 val)
+{
+ u8 newval = mask & val;
+ u8 oldval, diff;
+
+ if (!ab5500_dev) {
+ pr_err("%s: The AB5500 codec driver not initialized.\n",
+ __func__);
+ return;
+ }
+ /* Check if the reg value falls within the
+ * range of AB5500 real registers. If
+ * so, set the mask */
+ if (reg < AB5500_FIRST_REG)
+ return;
+ if (reg <= AB5500_LAST_REG) {
+ abx500_mask_and_set_register_interruptible(
+ ab5500_dev, AB5500_BANK_AUDIO_HEADSETUSB,
+ reg, mask, val);
+ return;
+ }
+ if (reg - AB5500_LAST_REG - 1 >= ARRAY_SIZE(virtual_regs))
+ return;
+
+ /* treatment of virtual registers follows */
+ /*Compute the difference between the new value and the old value.
+ *1.If there is no difference, do nothing.
+ *2.If the difference is in the PWR_SHIFT,
+ *set the PWR masks appropriately.
+ */
+ oldval = virtual_regs[reg - AB5500_LAST_REG - 1];
+ diff = (val ^ oldval) & mask;
+ if (!diff)
+ return;
+
+ switch (reg) {
+ case AB5500_VIRTUAL_REG3:
+ if ((diff & (1 << SPKR1_PWR_SHIFT))) {
+ if ((val & (1 << SPKR1_PWR_SHIFT)) == 0) {
+ /*
+ * If the new value has PWR_SHIFT
+ * disabled, set the
+ * PWR_MASK to 0
+ */
+ mask_set_reg(SPKR1, SPKRx_PWR_MASK, 0);
+ }
+ else {
+ /* Else, set the PWR_MASK values based on the old value. */
+ switch (oldval & SPKR1_MODE_MASK) {
+ case 0:
+ mask_set_reg(SPKR1, SPKRx_PWR_MASK,
+ SPKRx_PWR_VBR_VALUE);
+ break;
+ case 1:
+ mask_set_reg(SPKR1, SPKRx_PWR_MASK,
+ SPKRx_PWR_CLS_D_VALUE);
+ break;
+ case 2:
+ mask_set_reg(SPKR1, SPKRx_PWR_MASK,
+ SPKRx_PWR_CLS_AB_VALUE);
+ break;
+ }
+ }
+ }
+ if ((diff & (1 << SPKR2_PWR_SHIFT))) {
+ if ((val & (1 << SPKR2_PWR_SHIFT)) == 0) {
+ /*
+ * If the new value has PWR_SHIFT
+ * disabled, set the
+ * PWR_MASK to 0
+ */
+ mask_set_reg(SPKR2, SPKRx_PWR_MASK, 0);
+ }
+ else {
+ /* Else, set the PWR_MASK values based on the old value. */
+ switch (oldval & SPKR2_MODE_MASK) {
+ case 0:
+ mask_set_reg(SPKR2, SPKRx_PWR_MASK,
+ SPKRx_PWR_VBR_VALUE);
+ break;
+ case 1:
+ mask_set_reg(SPKR2, SPKRx_PWR_MASK,
+ SPKRx_PWR_CLS_D_VALUE);
+ break;
+ }
+ }
+ }
+
+ break;
+ case AB5500_VIRTUAL_REG4:
+ ;
+ /* configure PWMCTRL_SPKR1, PWMCTRL_SPKR2, etc. */
+ }
+ virtual_regs[reg - AB5500_LAST_REG - 1] &= ~mask;
+ virtual_regs[reg - AB5500_LAST_REG - 1] |= newval;
+}
+
+static u8 read_reg(u8 reg)
+{
+ if (!ab5500_dev) {
+ pr_err("%s: The AB5500 codec driver not initialized.\n",
+ __func__);
+ return 0;
+ }
+ /* Check if the reg value falls within the range of AB5500 real
+ * registers.If so, set the mask */
+ if (reg < AB5500_FIRST_REG)
+ return 0;
+ else if (reg <= AB5500_LAST_REG) {
+ u8 val;
+ abx500_get_register_interruptible(
+ ab5500_dev, AB5500_BANK_AUDIO_HEADSETUSB, reg, &val);
+ return val;
+ } else if (reg - AB5500_LAST_REG - 1 < ARRAY_SIZE(virtual_regs))
+ return virtual_regs[reg - AB5500_LAST_REG - 1];
+ dev_warn(ab5500_dev, "%s: out-of-scope reigster %u.\n",
+ __func__, reg);
+ return 0;
+}
+
+/* Components that can be powered up/down */
+enum enum_widget {
+ widget_ear = 0,
+ widget_auxo1,
+ widget_auxo2,
+ widget_auxo3,
+ widget_auxo4,
+ widget_spkr1,
+ widget_spkr2,
+ widget_spkr1_adder,
+ widget_spkr2_adder,
+ widget_pwm_spkr1,
+ widget_pwm_spkr2,
+ widget_pwm_spkr1n,
+ widget_pwm_spkr1p,
+ widget_pwm_spkr2n,
+ widget_pwm_spkr2p,
+ widget_line1,
+ widget_line2,
+ widget_dac1,
+ widget_dac2,
+ widget_dac3,
+ widget_rx1,
+ widget_rx2,
+ widget_rx3,
+ widget_mic1,
+ widget_mic2,
+ widget_micbias1,
+ widget_micbias2,
+ widget_apga1,
+ widget_apga2,
+ widget_tx1,
+ widget_tx2,
+ widget_adc1,
+ widget_adc2,
+ widget_if0_dld_l,
+ widget_if0_dld_r,
+ widget_if0_uld_l,
+ widget_if0_uld_r,
+ widget_if1_dld_l,
+ widget_if1_dld_r,
+ widget_if1_uld_l,
+ widget_if1_uld_r,
+ widget_mic1p1,
+ widget_mic1n1,
+ widget_mic1p2,
+ widget_mic1n2,
+ widget_mic2p1,
+ widget_mic2n1,
+ widget_mic2p2,
+ widget_mic2n2,
+ widget_clock,
+ number_of_widgets
+};
+
+/* This is only meant for debugging */
+static const char *widget_names[] = {
+ "EAR", "AUXO1", "AUXO2", "AUXO3", "AUXO4",
+ "SPKR1", "SPKR2", "SPKR1_ADDER", "SPKR2_ADDER",
+ "PWM_SPKR1", "PWM_SPKR2",
+ "PWM_SPKR1N", "PWM_SPKR1P",
+ "PWM_SPKR2N", "PWM_SPKR2P",
+ "LINE1", "LINE2",
+ "DAC1", "DAC2", "DAC3",
+ "RX1", "RX2", "RX3",
+ "MIC1", "MIC2",
+ "MIC-BIAS1", "MIC-BIAS2",
+ "APGA1", "APGA2",
+ "TX1", "TX2",
+ "ADC1", "ADC2",
+ "IF0-DLD-L", "IF0-DLD-R", "IF0-ULD-L", "IF0-ULD-R",
+ "IF1-DLD-L", "IF1-DLD-R", "IF1-ULD-L", "IF1-ULD-R",
+ "MIC1P1", "MIC1N1", "MIC1P2", "MIC1N2",
+ "MIC2P1", "MIC2N1", "MIC2P2", "MIC2N2",
+ "CLOCK"
+};
+
+struct widget_pm {
+ enum enum_widget widget;
+ u8 reg;
+ u8 shift;
+
+ unsigned long source_list[BIT_WORD(number_of_widgets) + 1];
+ unsigned long sink_list[BIT_WORD(number_of_widgets) + 1];
+};
+
+static struct widget_pm widget_pm_array[] = {
+ {.widget = widget_ear, .reg = EAR_PWR, .shift = EAR_PWR_SHIFT},
+
+ {.widget = widget_auxo1, .reg = AUXO1, .shift = AUXOx_PWR_SHIFT},
+ {.widget = widget_auxo2, .reg = AUXO2, .shift = AUXOx_PWR_SHIFT},
+ {.widget = widget_auxo3, .reg = AUXO3, .shift = AUXOx_PWR_SHIFT},
+ {.widget = widget_auxo4, .reg = AUXO4, .shift = AUXOx_PWR_SHIFT},
+
+ {.widget = widget_spkr1, .reg = DUMMY_REG, .shift = 0},
+ {.widget = widget_spkr2, .reg = AB5500_VIRTUAL_REG3,
+ .shift = SPKR2_PWR_SHIFT},
+
+ {.widget = widget_spkr1_adder, .reg = AB5500_VIRTUAL_REG3,
+ .shift = SPKR1_ADDER_PWR_SHIFT},
+ {.widget = widget_spkr2_adder, .reg = AB5500_VIRTUAL_REG3,
+ .shift = SPKR2_ADDER_PWR_SHIFT},
+
+ {.widget = widget_pwm_spkr1, .reg = AB5500_VIRTUAL_REG4,
+ .shift = PWM_SPKR1_PWR_SHIFT},
+ {.widget = widget_pwm_spkr2, .reg = AB5500_VIRTUAL_REG4,
+ .shift = PWM_SPKR2_PWR_SHIFT},
+
+ {.widget = widget_pwm_spkr1n, .reg = AB5500_VIRTUAL_REG4,
+ .shift = PWM_SPKR1N_PWR_SHIFT},
+ {.widget = widget_pwm_spkr1p, .reg = AB5500_VIRTUAL_REG4,
+ .shift = PWM_SPKR1P_PWR_SHIFT},
+
+ {.widget = widget_pwm_spkr2n, .reg = AB5500_VIRTUAL_REG4,
+ .shift = PWM_SPKR2N_PWR_SHIFT},
+ {.widget = widget_pwm_spkr2p, .reg = AB5500_VIRTUAL_REG4,
+ .shift = PWM_SPKR2P_PWR_SHIFT},
+
+
+ {.widget = widget_line1, .reg = LINE1, .shift = LINEx_PWR_SHIFT},
+ {.widget = widget_line2, .reg = LINE2, .shift = LINEx_PWR_SHIFT},
+
+ {.widget = widget_dac1, .reg = RX1, .shift = DACx_PWR_SHIFT},
+ {.widget = widget_dac2, .reg = RX2, .shift = DACx_PWR_SHIFT},
+ {.widget = widget_dac3, .reg = RX3, .shift = DACx_PWR_SHIFT},
+
+ {.widget = widget_rx1, .reg = RX1, .shift = RXx_PWR_SHIFT},
+ {.widget = widget_rx2, .reg = RX2, .shift = RXx_PWR_SHIFT},
+ {.widget = widget_rx3, .reg = RX3, .shift = RXx_PWR_SHIFT},
+
+ {.widget = widget_mic1, .reg = MIC1_GAIN, .shift = MICx_PWR_SHIFT},
+ {.widget = widget_mic2, .reg = MIC2_GAIN, .shift = MICx_PWR_SHIFT},
+
+ {.widget = widget_micbias1, .reg = MIC_BIAS1,
+ .shift = MBIASx_PWR_SHIFT},
+ {.widget = widget_micbias2, .reg = MIC_BIAS2,
+ .shift = MBIASx_PWR_SHIFT},
+
+ {.widget = widget_apga1, .reg = ANALOG_LOOP_PGA1,
+ .shift = APGAx_PWR_SHIFT},
+ {.widget = widget_apga2, .reg = ANALOG_LOOP_PGA2,
+ .shift = APGAx_PWR_SHIFT},
+
+ {.widget = widget_tx1, .reg = TX1, .shift = TXx_PWR_SHIFT},
+ {.widget = widget_tx2, .reg = TX2, .shift = TXx_PWR_SHIFT},
+
+ {.widget = widget_adc1, .reg = TX1, .shift = ADCx_PWR_SHIFT},
+ {.widget = widget_adc2, .reg = TX2, .shift = ADCx_PWR_SHIFT},
+
+ {.widget = widget_if0_dld_l, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF0_DLD_L_PW_SHIFT},
+ {.widget = widget_if0_dld_r, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF0_DLD_R_PW_SHIFT},
+ {.widget = widget_if0_uld_l, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF0_ULD_L_PW_SHIFT},
+ {.widget = widget_if0_uld_r, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF0_ULD_R_PW_SHIFT},
+
+ {.widget = widget_if1_dld_l, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF1_DLD_L_PW_SHIFT},
+ {.widget = widget_if1_dld_r, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF1_DLD_R_PW_SHIFT},
+ {.widget = widget_if1_uld_l, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF1_ULD_L_PW_SHIFT},
+ {.widget = widget_if1_uld_r, .reg = AB5500_VIRTUAL_REG1,
+ .shift = IF1_ULD_R_PW_SHIFT},
+
+ {.widget = widget_mic1p1, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC1P1_PW_SHIFT},
+ {.widget = widget_mic1n1, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC1N1_PW_SHIFT},
+ {.widget = widget_mic1p2, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC1P2_PW_SHIFT},
+ {.widget = widget_mic1n2, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC1N2_PW_SHIFT},
+
+ {.widget = widget_mic2p1, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC2P1_PW_SHIFT},
+ {.widget = widget_mic2n1, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC2N1_PW_SHIFT},
+ {.widget = widget_mic2p2, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC2P2_PW_SHIFT},
+ {.widget = widget_mic2n2, .reg = AB5500_VIRTUAL_REG2,
+ .shift = MIC2N2_PW_SHIFT},
+
+ {.widget = widget_clock, .reg = CLOCK, .shift = CLOCK_ENABLE_SHIFT},
+};
+
+DEFINE_MUTEX(ab5500_pm_mutex);
+
+static struct {
+ enum enum_widget stack[number_of_widgets];
+ int p;
+} pm_stack;
+
+#define pm_stack_as_bitmap ({ \
+ unsigned long bitmap[BIT_WORD(number_of_widgets) + 1]; \
+ int i; \
+ memset(bitmap, 0, sizeof(bitmap)); \
+ for (i = 0; i < pm_stack.p; i++) { \
+ set_bit(pm_stack.stack[i], bitmap); \
+ } \
+ bitmap; \
+ })
+
+/* These are only meant to meet the obligations of DAPM */
+static const struct snd_soc_dapm_widget ab5500_dapm_widgets[] = {
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+};
+
+
+struct ab5500_codec_dai_data ab5500_codec_privates[NO_CODEC_DAI_IF] = {
+ {
+ .playback_active = false,
+ .capture_active = false,
+ },
+ {
+ .playback_active = false,
+ .capture_active = false,
+ }
+};
+
+static const char *enum_rx_input_select[] = {
+ "Mute", "TX1", "TX2", "I2S0_DLD_L",
+ "I2S0_DLD_R", "I2S1_DLD_L", "I2S1_DLD_R"
+};
+
+static const char *enum_i2s_uld_select[] = {
+ "Mute", "TX1", "TX2", "I2S0_DLD_L",
+ "I2S0_DLD_R", "I2S1_DLD_L", "I2S1_DLD_R", "tri-state"
+};
+static const char *enum_apga1_source[] = {"LINEIN1", "MIC1", "MIC2", "None"};
+static const char *enum_apga2_source[] = {"LINEIN2", "MIC1", "MIC2", "None"};
+static const char *enum_rx_side_tone[] = {"TX1", "TX2"};
+static const char *enum_dac_power_mode[] = {"100%", "75%", "55%"};
+static const char *enum_ear_power_mode[] = {"100%", "70%", "50%"};
+static const char *enum_auxo_power_mode[] = {
+ "100%", "67%", "50%", "25%", "auto"
+};
+static const char *enum_onoff[] = {"Off", "On"};
+static const char *enum_mbias_pdn_imp[] = {"GND", "HiZ"};
+static const char *enum_mbias2_out_v[] = {"2.0v", "2.2v"};
+static const char *enum_mic_in_imp[] = {
+ "12.5 kohm", "25 kohm", "50 kohm"
+};
+static const char *enum_hp_filter[] = {"HP3", "HP1", "bypass"};
+static const char *enum_i2s_word_length[] = {"16 bits", "24 bits"};
+static const char *enum_i2s_mode[] = {"Master Mode", "Slave Mode"};
+static const char *enum_i2s_tristate[] = {"Normal", "Tri-state"};
+static const char *enum_optional_resistor[] = {"disconnected", "connected"};
+static const char *enum_i2s_sample_rate[] = {
+ "8 kHz", "16 kHz", "44.1 kHz", "48 kHz"
+};
+static const char *enum_tx1_input_select[] = {
+ "ADC1", "DIGMIC1", "DIGMIC2"
+};
+static const char *enum_tx2_input_select[] = {
+ "ADC2", "DIGMIC1", "DIGMIC2"
+};
+static const char *enum_signal_inversion[] = {"normal", "inverted"};
+static const char *enum_spkr1_mode[] = {
+ "SPKR1 power down", "Vibra PWM", "class D amplifier", "class AB amplifier"
+};
+static const char *enum_spkr2_mode[] = {
+ "Vibra PWM", "class D amplifier",
+};
+static const char *enum_pwm_pol[] = {
+ "GND", "VDD"
+};
+/* RX1 Input Select */
+static struct soc_enum soc_enum_rx1_in_sel =
+ SOC_ENUM_SINGLE(RX1, RXx_DATA_SHIFT,
+ ARRAY_SIZE(enum_rx_input_select),
+ enum_rx_input_select);
+
+/* RX2 Input Select */
+static struct soc_enum soc_enum_rx2_in_sel =
+ SOC_ENUM_SINGLE(RX2, RXx_DATA_SHIFT,
+ ARRAY_SIZE(enum_rx_input_select),
+ enum_rx_input_select);
+/* RX3 Input Select */
+static struct soc_enum soc_enum_rx3_in_sel =
+ SOC_ENUM_SINGLE(RX3, RXx_DATA_SHIFT,
+ ARRAY_SIZE(enum_rx_input_select),
+ enum_rx_input_select);
+/* TX1 Input Select */
+static struct soc_enum soc_enum_tx1_in_sel =
+ SOC_ENUM_SINGLE(TX1, TXx_MUX_SHIFT,
+ ARRAY_SIZE(enum_tx1_input_select),
+ enum_tx1_input_select);
+/* TX2 Input Select */
+static struct soc_enum soc_enum_tx2_in_sel =
+ SOC_ENUM_SINGLE(TX2, TXx_MUX_SHIFT,
+ ARRAY_SIZE(enum_tx2_input_select),
+ enum_tx2_input_select);
+
+/* I2S0 ULD Select */
+static struct soc_enum soc_enum_i2s0_input_select =
+ SOC_ENUM_DOUBLE(INTERFACE0_ULD, 0, 4,
+ ARRAY_SIZE(enum_i2s_uld_select),
+ enum_i2s_uld_select);
+/* I2S1 ULD Select */
+static struct soc_enum soc_enum_i2s1_input_select =
+ SOC_ENUM_DOUBLE(INTERFACE1_ULD, 0, 4,
+ ARRAY_SIZE(enum_i2s_uld_select),
+ enum_i2s_uld_select);
+
+/* APGA1 Source */
+static struct soc_enum soc_enum_apga1_source =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA1, APGAx_MUX_SHIFT,
+ ARRAY_SIZE(enum_apga1_source),
+ enum_apga1_source);
+
+/* APGA2 Source */
+static struct soc_enum soc_enum_apga2_source =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA2, APGAx_MUX_SHIFT,
+ ARRAY_SIZE(enum_apga2_source),
+ enum_apga2_source);
+
+static struct soc_enum soc_enum_apga1_enable =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA1, APGAx_PWR_SHIFT,
+ ARRAY_SIZE(enum_onoff), enum_onoff);
+
+static struct soc_enum soc_enum_apga2_enable =
+ SOC_ENUM_SINGLE(ANALOG_LOOP_PGA2, APGAx_PWR_SHIFT,
+ ARRAY_SIZE(enum_onoff), enum_onoff);
+
+/* RX1 Side Tone */
+static struct soc_enum soc_enum_dac1_side_tone =
+ SOC_ENUM_SINGLE(ST1_PGA, STx_MUX_SHIFT,
+ ARRAY_SIZE(enum_rx_side_tone),
+ enum_rx_side_tone);
+
+/* RX2 Side Tone */
+static struct soc_enum soc_enum_dac2_side_tone =
+ SOC_ENUM_SINGLE(ST2_PGA, STx_MUX_SHIFT,
+ ARRAY_SIZE(enum_rx_side_tone),
+ enum_rx_side_tone);
+
+/* DAC1 Power Mode */
+static struct soc_enum soc_enum_dac1_power_mode =
+ SOC_ENUM_SINGLE(RX1, DACx_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_dac_power_mode),
+ enum_dac_power_mode);
+
+/* DAC2 Power Mode */
+static struct soc_enum soc_enum_dac2_power_mode =
+ SOC_ENUM_SINGLE(RX2, DACx_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_dac_power_mode),
+ enum_dac_power_mode);
+
+/* DAC3 Power Mode */
+static struct soc_enum soc_enum_dac3_power_mode =
+ SOC_ENUM_SINGLE(RX3, DACx_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_dac_power_mode),
+ enum_dac_power_mode);
+
+/* EAR Power Mode */
+static struct soc_enum soc_enum_ear_power_mode =
+ SOC_ENUM_SINGLE(EAR_PWR, EAR_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_ear_power_mode),
+ enum_ear_power_mode);
+
+/* AUXO12 Power Mode */
+static struct soc_enum soc_enum_auxo12_power_mode =
+ SOC_ENUM_SINGLE(AUXO12_PWR_MODE, AUXOxy_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_auxo_power_mode),
+ enum_auxo_power_mode);
+
+/* AUXO34 Power Mode */
+static struct soc_enum soc_enum_auxo34_power_mode =
+ SOC_ENUM_SINGLE(AUXO34_PWR_MODE, AUXOxy_PWR_MODE_SHIFT,
+ ARRAY_SIZE(enum_auxo_power_mode),
+ enum_auxo_power_mode);
+
+/* MBIAS1 PDN Impedance */
+static struct soc_enum soc_enum_mbias1_pdn_imp =
+ SOC_ENUM_SINGLE(MIC_BIAS1, MBIASx_PDN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mbias_pdn_imp),
+ enum_mbias_pdn_imp);
+
+/* MBIAS2 PDN Impedance */
+static struct soc_enum soc_enum_mbias2_pdn_imp =
+ SOC_ENUM_SINGLE(MIC_BIAS2, MBIASx_PDN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mbias_pdn_imp),
+ enum_mbias_pdn_imp);
+
+/* MBIAS2 Output voltage */
+static struct soc_enum soc_enum_mbias2_out_v =
+ SOC_ENUM_SINGLE(MIC_BIAS2, MBIAS2_OUT_V_SHIFT,
+ ARRAY_SIZE(enum_mbias2_out_v),
+ enum_mbias2_out_v);
+
+static struct soc_enum soc_enum_mbias2_int_r =
+ SOC_ENUM_SINGLE(MIC_BIAS2_VAD, MBIAS2_R_INT_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_mic1_in_imp =
+ SOC_ENUM_SINGLE(MIC1_GAIN, MICx_IN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mic_in_imp),
+ enum_mic_in_imp);
+
+static struct soc_enum soc_enum_mic2_in_imp =
+ SOC_ENUM_SINGLE(MIC2_GAIN, MICx_IN_IMP_SHIFT,
+ ARRAY_SIZE(enum_mic_in_imp),
+ enum_mic_in_imp);
+
+static struct soc_enum soc_enum_tx1_hp_filter =
+ SOC_ENUM_SINGLE(TX1, TXx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_tx2_hp_filter =
+ SOC_ENUM_SINGLE(TX2, TXx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_st1_hp_filter =
+ SOC_ENUM_SINGLE(ST1_PGA, STx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_st2_hp_filter =
+ SOC_ENUM_SINGLE(ST2_PGA, STx_HP_FILTER_SHIFT,
+ ARRAY_SIZE(enum_hp_filter),
+ enum_hp_filter);
+
+static struct soc_enum soc_enum_i2s0_word_length =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_WORDLENGTH_SHIFT,
+ ARRAY_SIZE(enum_i2s_word_length),
+ enum_i2s_word_length);
+
+static struct soc_enum soc_enum_i2s1_word_length =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_WORDLENGTH_SHIFT,
+ ARRAY_SIZE(enum_i2s_word_length),
+ enum_i2s_word_length);
+
+static struct soc_enum soc_enum_i2s0_mode =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_MODE_SHIFT,
+ ARRAY_SIZE(enum_i2s_mode),
+ enum_i2s_mode);
+
+static struct soc_enum soc_enum_i2s1_mode =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_MODE_SHIFT,
+ ARRAY_SIZE(enum_i2s_mode),
+ enum_i2s_mode);
+
+static struct soc_enum soc_enum_i2s0_tristate =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_TRISTATE_SHIFT,
+ ARRAY_SIZE(enum_i2s_tristate),
+ enum_i2s_tristate);
+
+static struct soc_enum soc_enum_i2s1_tristate =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_TRISTATE_SHIFT,
+ ARRAY_SIZE(enum_i2s_tristate),
+ enum_i2s_tristate);
+
+static struct soc_enum soc_enum_i2s0_pulldown_resistor =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_i2s1_pulldown_resistor =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_i2s0_sample_rate =
+ SOC_ENUM_SINGLE(INTERFACE0, I2Sx_SR_SHIFT,
+ ARRAY_SIZE(enum_i2s_sample_rate),
+ enum_i2s_sample_rate);
+
+static struct soc_enum soc_enum_i2s1_sample_rate =
+ SOC_ENUM_SINGLE(INTERFACE1, I2Sx_SR_SHIFT,
+ ARRAY_SIZE(enum_i2s_sample_rate),
+ enum_i2s_sample_rate);
+
+static struct soc_enum soc_enum_line1_inversion =
+ SOC_ENUM_SINGLE(LINE1, LINEx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_line2_inversion =
+ SOC_ENUM_SINGLE(LINE2, LINEx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo1_inversion =
+ SOC_ENUM_SINGLE(AUXO1, AUXOx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo2_inversion =
+ SOC_ENUM_SINGLE(AUXO2, AUXOx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo3_inversion =
+ SOC_ENUM_SINGLE(AUXO3, AUXOx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo4_inversion =
+ SOC_ENUM_SINGLE(AUXO4, AUXOx_INV_SHIFT,
+ ARRAY_SIZE(enum_signal_inversion),
+ enum_signal_inversion);
+
+static struct soc_enum soc_enum_auxo1_pulldown_resistor =
+ SOC_ENUM_SINGLE(AUXO1, AUXOx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_auxo2_pulldown_resistor =
+ SOC_ENUM_SINGLE(AUXO2, AUXOx_PULLDOWN_SHIFT,
+ ARRAY_SIZE(enum_optional_resistor),
+ enum_optional_resistor);
+
+static struct soc_enum soc_enum_spkr1_mode =
+ SOC_ENUM_SINGLE(SPKR1, SPKRx_PWR_SHIFT,
+ ARRAY_SIZE(enum_spkr1_mode),
+ enum_spkr1_mode);
+
+static struct soc_enum soc_enum_spkr2_mode =
+ SOC_ENUM_SINGLE(AB5500_VIRTUAL_REG3, SPKR2_MODE_SHIFT,
+ ARRAY_SIZE(enum_spkr2_mode),
+ enum_spkr2_mode);
+
+static struct soc_enum soc_enum_pwm_spkr1n_pol =
+ SOC_ENUM_SINGLE(PWMCTRL_SPKR1, PWMCTRL_SPKRx_N1_POL_SHIFT,
+ ARRAY_SIZE(enum_pwm_pol), enum_pwm_pol);
+
+static struct soc_enum soc_enum_pwm_spkr1p_pol =
+ SOC_ENUM_SINGLE(PWMCTRL_SPKR1, PWMCTRL_SPKRx_P1_POL_SHIFT,
+ ARRAY_SIZE(enum_pwm_pol), enum_pwm_pol);
+
+static struct soc_enum soc_enum_pwm_spkr2n_pol =
+ SOC_ENUM_SINGLE(PWMCTRL_SPKR2, PWMCTRL_SPKRx_N1_POL_SHIFT,
+ ARRAY_SIZE(enum_pwm_pol), enum_pwm_pol);
+
+static struct soc_enum soc_enum_pwm_spkr2p_pol =
+ SOC_ENUM_SINGLE(PWMCTRL_SPKR2, PWMCTRL_SPKRx_P1_POL_SHIFT,
+ ARRAY_SIZE(enum_pwm_pol), enum_pwm_pol);
+
+static struct snd_kcontrol_new ab5500_snd_controls[] = {
+ /* RX Routing */
+ SOC_ENUM("RX1 Input Select", soc_enum_rx1_in_sel),
+ SOC_ENUM("RX2 Input Select", soc_enum_rx2_in_sel),
+ SOC_ENUM("RX3 Input Select", soc_enum_rx3_in_sel),
+ SOC_SINGLE("LINE1 Adder", LINE1_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("LINE2 Adder", LINE2_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("EAR Adder", EAR_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("SPKR1 Adder", SPKR1_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("SPKR2 Adder", SPKR2_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("AUXO1 Adder", AUXO1_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("AUXO2 Adder", AUXO2_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("AUXO3 Adder", AUXO3_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("AUXO4 Adder", AUXO4_ADDER, 0, 0x1F, 0),
+ SOC_SINGLE("SPKR1 PWM Select", AB5500_VIRTUAL_REG5, 0, 0x03, 0),
+ SOC_SINGLE("SPKR2 PWM Select", AB5500_VIRTUAL_REG5, 2, 0x0C, 0),
+ /* TX Routing */
+ SOC_ENUM("TX1 Input Select", soc_enum_tx1_in_sel),
+ SOC_ENUM("TX2 Input Select", soc_enum_tx2_in_sel),
+ SOC_SINGLE("MIC1 Input Select", MIC1_INPUT_SELECT, 0, 0xff, 0),
+ SOC_SINGLE("MIC2 Input Select", MIC2_INPUT_SELECT, 0, 0xff, 0),
+ SOC_SINGLE("MIC2 to MIC1", MIC2_TO_MIC1, 0, 0x03, 0),
+ SOC_ENUM("I2S0 Input Select", soc_enum_i2s0_input_select),
+ SOC_ENUM("I2S1 Input Select", soc_enum_i2s1_input_select),
+ /* Routing of Side Tone and Analop Loop */
+ SOC_ENUM("APGA1 Source", soc_enum_apga1_source),
+ SOC_ENUM("APGA2 Source", soc_enum_apga2_source),
+ SOC_ENUM("APGA1 Enable", soc_enum_apga1_enable),
+ SOC_ENUM("APGA2 Enable", soc_enum_apga2_enable),
+ SOC_ENUM("DAC1 Side Tone", soc_enum_dac1_side_tone),
+ SOC_ENUM("DAC2 Side Tone", soc_enum_dac2_side_tone),
+ /* RX Volume Control */
+ SOC_SINGLE("RX-DPGA1 Gain", RX1_DPGA, 0, 0x43, 0),
+ SOC_SINGLE("RX-DPGA2 Gain", RX2_DPGA, 0, 0x43, 0),
+ SOC_SINGLE("RX-DPGA3 Gain", RX3_DPGA, 0, 0x43, 0),
+ SOC_SINGLE("LINE1 Gain", LINE1, LINEx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("LINE2 Gain", LINE2, LINEx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("SPKR1 Gain", SPKR1, SPKRx_GAIN_SHIFT, 0x16, 0),
+ SOC_SINGLE("SPKR2 Gain", SPKR2, SPKRx_GAIN_SHIFT, 0x16, 0),
+ SOC_SINGLE("EAR Gain", EAR_GAIN, EAR_GAIN_SHIFT, 0x12, 0),
+ SOC_SINGLE("AUXO1 Gain", AUXO1, AUXOx_GAIN_SHIFT, 0x0c, 0),
+ SOC_SINGLE("AUXO2 Gain", AUXO2, AUXOx_GAIN_SHIFT, 0x0c, 0),
+ SOC_SINGLE("AUXO3 Gain", AUXO3, AUXOx_GAIN_SHIFT, 0x0c, 0),
+ SOC_SINGLE("AUXO4 Gain", AUXO4, AUXOx_GAIN_SHIFT, 0x0c, 0),
+ /* TX Volume Control */
+ SOC_SINGLE("MIC1 Gain", MIC1_GAIN, MICx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("MIC2 Gain", MIC2_GAIN, MICx_GAIN_SHIFT, 0x0a, 0),
+ SOC_SINGLE("TX-DPGA1 Gain", TX_DPGA1, TX_DPGAx_SHIFT, 0x0f, 0),
+ SOC_SINGLE("TX-DPGA2 Gain", TX_DPGA2, TX_DPGAx_SHIFT, 0x0f, 0),
+ /* Volume Control of Side Tone and Analog Loop */
+ SOC_SINGLE("ST-PGA1 Gain", ST1_PGA, STx_PGA_SHIFT, 0x0a, 0),
+ SOC_SINGLE("ST-PGA2 Gain", ST2_PGA, STx_PGA_SHIFT, 0x0a, 0),
+ SOC_SINGLE("APGA1 Gain", ANALOG_LOOP_PGA1, APGAx_GAIN_SHIFT, 0x1d, 0),
+ SOC_SINGLE("APGA2 Gain", ANALOG_LOOP_PGA2, APGAx_GAIN_SHIFT, 0x1d, 0),
+ /* RX Properties */
+ SOC_ENUM("DAC1 Power Mode", soc_enum_dac1_power_mode),
+ SOC_ENUM("DAC2 Power Mode", soc_enum_dac2_power_mode),
+ SOC_ENUM("DAC3 Power Mode", soc_enum_dac3_power_mode),
+ SOC_ENUM("EAR Power Mode", soc_enum_ear_power_mode),
+ SOC_ENUM("AUXO12 Power Mode", soc_enum_auxo12_power_mode),
+ SOC_ENUM("AUXO34 Power Mode", soc_enum_auxo34_power_mode),
+ SOC_ENUM("LINE1 Inversion", soc_enum_line1_inversion),
+ SOC_ENUM("LINE2 Inversion", soc_enum_line2_inversion),
+ SOC_ENUM("AUXO1 Inversion", soc_enum_auxo1_inversion),
+ SOC_ENUM("AUXO2 Inversion", soc_enum_auxo2_inversion),
+ SOC_ENUM("AUXO3 Inversion", soc_enum_auxo3_inversion),
+ SOC_ENUM("AUXO4 Inversion", soc_enum_auxo4_inversion),
+ SOC_ENUM("AUXO1 Pulldown Resistor", soc_enum_auxo1_pulldown_resistor),
+ SOC_ENUM("AUXO2 Pulldown Resistor", soc_enum_auxo2_pulldown_resistor),
+ SOC_ENUM("SPKR1 Mode", soc_enum_spkr1_mode),
+ SOC_ENUM("SPKR2 Mode", soc_enum_spkr2_mode),
+ SOC_ENUM("PWM SPKR1N POL", soc_enum_pwm_spkr1n_pol),
+ SOC_ENUM("PWM SPKR1P POL", soc_enum_pwm_spkr1p_pol),
+ SOC_ENUM("PWM SPKR2N POL", soc_enum_pwm_spkr2n_pol),
+ SOC_ENUM("PWM SPKR2P POL", soc_enum_pwm_spkr2p_pol),
+ /* TX Properties */
+ SOC_SINGLE("MIC1 VMID", MIC1_VMID_SELECT, 0, 0x3f, 0),
+ SOC_SINGLE("MIC2 VMID", MIC2_VMID_SELECT, 0, 0x3f, 0),
+ SOC_ENUM("MBIAS1 PDN Impedance", soc_enum_mbias1_pdn_imp),
+ SOC_ENUM("MBIAS2 PDN Impedance", soc_enum_mbias2_pdn_imp),
+ SOC_ENUM("MBIAS2 Output Voltage", soc_enum_mbias2_out_v),
+ SOC_ENUM("MBIAS2 Internal Resistor", soc_enum_mbias2_int_r),
+ SOC_ENUM("MIC1 Input Impedance", soc_enum_mic1_in_imp),
+ SOC_ENUM("MIC2 Input Impedance", soc_enum_mic2_in_imp),
+ SOC_ENUM("TX1 HP Filter", soc_enum_tx1_hp_filter),
+ SOC_ENUM("TX2 HP Filter", soc_enum_tx2_hp_filter),
+ /* Side Tone and Analog Loop Properties */
+ SOC_ENUM("ST1 HP Filter", soc_enum_st1_hp_filter),
+ SOC_ENUM("ST2 HP Filter", soc_enum_st2_hp_filter),
+ /* I2S Interface Properties */
+ SOC_ENUM("I2S0 Word Length", soc_enum_i2s0_word_length),
+ SOC_ENUM("I2S1 Word Length", soc_enum_i2s1_word_length),
+ SOC_ENUM("I2S0 Mode", soc_enum_i2s0_mode),
+ SOC_ENUM("I2S1 Mode", soc_enum_i2s1_mode),
+ SOC_ENUM("I2S0 tri-state", soc_enum_i2s0_tristate),
+ SOC_ENUM("I2S1 tri-state", soc_enum_i2s1_tristate),
+ SOC_ENUM("I2S0 Pulldown Resistor", soc_enum_i2s0_pulldown_resistor),
+ SOC_ENUM("I2S1 Pulldown Resistor", soc_enum_i2s1_pulldown_resistor),
+ SOC_ENUM("I2S0 Sample Rate", soc_enum_i2s0_sample_rate),
+ SOC_ENUM("I2S1 Sample Rate", soc_enum_i2s1_sample_rate),
+ SOC_SINGLE("Interface Swap", INTERFACE_SWAP, 0, 0x03, 0),
+ /* Miscellaneous */
+ SOC_SINGLE("Negative Charge Pump", NEG_CHARGE_PUMP, 0, 0x03, 0)
+};
+
+/* count the number of 1 */
+#define count_ones(x) ({ \
+ int num; \
+ typeof(x) y = x; \
+ for (num = 0; y; y &= y - 1, num++) \
+ ; \
+ num; \
+ })
+
+enum enum_power {
+ POWER_OFF = 0,
+ POWER_ON = 1
+};
+
+enum enum_link {
+ UNLINK = 0,
+ LINK = 1
+};
+
+static enum enum_power get_widget_power_status(enum enum_widget widget)
+{
+ u8 val;
+
+ if (widget >= number_of_widgets)
+ return POWER_OFF;
+ val = read_reg(widget_pm_array[widget].reg);
+ if (val & (1 << widget_pm_array[widget].shift))
+ return POWER_ON;
+ else
+ return POWER_OFF;
+}
+
+static int count_powered_neighbors(const unsigned long *neighbors)
+{
+ unsigned long i;
+ int n = 0;
+ for_each_set_bit(i, neighbors, number_of_widgets) {
+ if (get_widget_power_status(i) == POWER_ON)
+ n++;
+ }
+ return n;
+}
+
+static int has_powered_neighbors(const unsigned long *neighbors)
+{
+ unsigned int i;
+ for_each_set_bit(i, neighbors, number_of_widgets) {
+ if (get_widget_power_status(i) == POWER_ON)
+ return 1;
+ }
+ return 0;
+}
+
+
+static int has_stacked_neighbors(const unsigned long *neighbors)
+{
+ unsigned long *stack_map = pm_stack_as_bitmap;
+ return bitmap_intersects(stack_map, neighbors, number_of_widgets);
+}
+
+static void power_widget_unlocked(enum enum_power onoff, enum enum_widget widget)
+{
+ enum enum_widget w;
+ int done;
+
+ if (widget >= number_of_widgets)
+ return;
+ if (get_widget_power_status(widget) == onoff)
+ return;
+
+ for (w = widget, done = 0; !done;) {
+ unsigned long i;
+ unsigned long *srcs = widget_pm_array[w].source_list;
+ unsigned long *sinks = widget_pm_array[w].sink_list;
+ dev_dbg(ab5500_dev, "%s: processing widget %s.\n",
+ __func__, widget_names[w]);
+
+ if (onoff == POWER_ON &&
+ !bitmap_empty(srcs, number_of_widgets) &&
+ !has_powered_neighbors(srcs)) {
+ pm_stack.stack[pm_stack.p++] = w;
+ for_each_set_bit(i, srcs, number_of_widgets) {
+ pm_stack.stack[pm_stack.p++] = i;
+ }
+ w = pm_stack.stack[--pm_stack.p];
+ continue;
+ } else if (onoff == POWER_OFF &&
+ has_powered_neighbors(sinks)) {
+ int n = 0;
+ pm_stack.stack[pm_stack.p++] = w;
+ for_each_set_bit(i, sinks, number_of_widgets) {
+ if (count_powered_neighbors(
+ widget_pm_array[i].source_list)
+ == 1 &&
+ get_widget_power_status(i) == POWER_ON) {
+ pm_stack.stack[pm_stack.p++] = i;
+ n++;
+ }
+ }
+ if (n) {
+ w = pm_stack.stack[--pm_stack.p];
+ continue;
+ } else
+ --pm_stack.p;
+ }
+ mask_set_reg(widget_pm_array[w].reg,
+ 1 << widget_pm_array[w].shift,
+ onoff == POWER_ON ? 0xff : 0);
+ dev_dbg(ab5500_dev, "%s: widget %s powered %s.\n",
+ __func__, widget_names[w],
+ onoff == POWER_ON ? "on" : "off");
+ if (onoff == POWER_ON &&
+ !bitmap_empty(sinks, number_of_widgets) &&
+ !has_powered_neighbors(sinks) &&
+ !has_stacked_neighbors(sinks)) {
+ for_each_set_bit(i, sinks, number_of_widgets) {
+ pm_stack.stack[pm_stack.p++] = i;
+ }
+ w = pm_stack.stack[--pm_stack.p];
+ continue;
+ } else if (onoff == POWER_OFF) {
+ for_each_set_bit(i, srcs, number_of_widgets) {
+ if (!has_powered_neighbors(
+ widget_pm_array[i].sink_list)
+ && get_widget_power_status(i) == POWER_ON
+ && !test_bit(i, pm_stack_as_bitmap)) {
+ pm_stack.stack[pm_stack.p++] = i;
+ }
+ }
+ }
+ if (pm_stack.p > 0)
+ w = pm_stack.stack[--pm_stack.p];
+ else
+ done = 1;
+ }
+}
+
+static void power_widget_locked(enum enum_power onoff,
+ enum enum_widget widget)
+{
+ if (mutex_lock_interruptible(&ab5500_pm_mutex)) {
+ dev_warn(ab5500_dev,
+ "%s: Signal received while waiting on the PM mutex.\n",
+ __func__);
+ return;
+ }
+ power_widget_unlocked(onoff, widget);
+ mutex_unlock(&ab5500_pm_mutex);
+}
+
+
+static void dump_registers(const char *where, ...)
+{
+ va_list ap;
+ va_start(ap, where);
+ do {
+ short reg = va_arg(ap, int);
+ if (reg < 0)
+ break;
+ dev_dbg(ab5500_dev, "%s from %s> 0x%02X : 0x%02X.\n",
+ __func__, where, reg, read_reg(reg));
+ } while (1);
+ va_end(ap);
+}
+
+/**
+ * update the link two widgets.
+ * @op: 1 - connect; 0 - disconnect
+ * @src: source of the connection
+ * @sink: sink of the connection
+ */
+static int update_widgets_link(enum enum_link op, enum enum_widget src,
+ enum enum_widget sink,
+ u8 reg, u8 mask, u8 newval)
+{
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&ab5500_pm_mutex)) {
+ dev_warn(ab5500_dev, "%s: A signal is received while waiting on"
+ " the PM mutex.\n", __func__);
+ return -EINTR;
+ }
+
+ switch (op << 2 | test_bit(sink, widget_pm_array[src].sink_list) << 1 |
+ test_bit(src, widget_pm_array[sink].source_list)) {
+ case 3: /* UNLINK, sink in sink_list, src in source_list */
+ case 4: /* LINK, sink not in sink_list, src not in source_list */
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (((int)op) << 2 | get_widget_power_status(src) << 1 |
+ get_widget_power_status(sink)) {
+ case 3: /* op = 0, src on, sink on */
+ if (count_powered_neighbors(widget_pm_array[sink].source_list)
+ == 1)
+ power_widget_unlocked(POWER_OFF, sink);
+ mask_set_reg(reg, mask, newval);
+ break;
+ case 6: /* op = 1, src on, sink off */
+ mask_set_reg(reg, mask, newval);
+ power_widget_unlocked(POWER_ON, sink);
+ break;
+ default:
+ /* op = 0, src off, sink off */
+ /* op = 0, src off, sink on */
+ /* op = 0, src on, sink off */
+ /* op = 1, src off, sink off */
+ /* op = 1, src off, sink on */
+ /* op = 1, src on, sink on */
+ mask_set_reg(reg, mask, newval);
+ }
+ change_bit(sink, widget_pm_array[src].sink_list);
+ change_bit(src, widget_pm_array[sink].source_list);
+end:
+ mutex_unlock(&ab5500_pm_mutex);
+ return ret;
+};
+
+static enum enum_widget apga_source_translate(u8 reg_value)
+{
+ switch (reg_value) {
+ case 1:
+ return widget_mic1;
+ case 2:
+ return widget_mic2;
+ default:
+ return number_of_widgets;
+ }
+}
+
+static enum enum_widget adder_sink_translate(u8 reg)
+{
+ switch (reg) {
+ case EAR_ADDER:
+ return widget_ear;
+ case AUXO1_ADDER:
+ return widget_auxo1;
+ case AUXO2_ADDER:
+ return widget_auxo2;
+ case AUXO3_ADDER:
+ return widget_auxo3;
+ case AUXO4_ADDER:
+ return widget_auxo4;
+ case SPKR1_ADDER:
+ return widget_spkr1;
+ case SPKR2_ADDER:
+ return widget_spkr2;
+ case LINE1_ADDER:
+ return widget_line1;
+ case LINE2_ADDER:
+ return widget_line2;
+ default:
+ return number_of_widgets;
+ }
+}
+
+static int ab5500_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(&codec->dapm, ab5500_dapm_widgets,
+ ARRAY_SIZE(ab5500_dapm_widgets));
+
+ snd_soc_dapm_add_routes(&codec->dapm, intercon, ARRAY_SIZE(intercon));
+
+ snd_soc_dapm_new_widgets(&codec->dapm);
+ return 0;
+}
+
+static void power_for_playback(enum enum_power onoff, int ifsel)
+{
+ dev_dbg(ab5500_dev, "%s: interface %d power %s.\n", __func__,
+ ifsel, onoff == POWER_ON ? "on" : "off");
+ if (mutex_lock_interruptible(&ab5500_pm_mutex)) {
+ dev_warn(ab5500_dev,
+ "%s: Signal received while waiting on the PM mutex.\n",
+ __func__);
+ return;
+ }
+ mask_set_reg(ENV_THR, ENV_THR_HIGH_MASK, 0x0F << ENV_THR_HIGH_SHIFT);
+ mask_set_reg(ENV_THR, ENV_THR_LOW_MASK, 0x00 << ENV_THR_LOW_SHIFT);
+ mask_set_reg(DC_CANCEL, DC_CANCEL_AUXO12_MASK,
+ 0x01 << DC_CANCEL_AUXO12_SHIFT);
+
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_dld_l : widget_if1_dld_l);
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_dld_r : widget_if1_dld_r);
+
+ mutex_unlock(&ab5500_pm_mutex);
+}
+
+static int enable_regulator(enum regulator_idx idx)
+{
+ int ret;
+
+ if (reg_enabled[idx])
+ return 0;
+
+ ret = regulator_enable(reg_info[idx].consumer);
+ if (ret != 0) {
+ pr_err("%s: Failure to enable regulator '%s' (ret = %d)\n",
+ __func__, reg_info[idx].supply, ret);
+ return ret;
+ };
+
+ reg_enabled[idx] = true;
+ pr_debug("%s: Enabled regulator '%s', status: %d, %d\n",
+ __func__,
+ reg_info[idx].supply,
+ (int)reg_enabled[0],
+ (int)reg_enabled[1]);
+ return 0;
+}
+
+static void disable_regulator(enum regulator_idx idx)
+{
+ if (!reg_enabled[idx])
+ return;
+
+ regulator_disable(reg_info[idx].consumer);
+
+ reg_enabled[idx] = false;
+ pr_debug("%s: Disabled regulator '%s', status: %d, %d\n",
+ __func__,
+ reg_info[idx].supply,
+ (int)reg_enabled[0],
+ (int)reg_enabled[1]);
+}
+
+static void power_for_capture(enum enum_power onoff, int ifsel)
+{
+ int err;
+ int mask;
+
+ dev_info(ab5500_dev, "%s: interface %d power %s", __func__,
+ ifsel, onoff == POWER_ON ? "on" : "off");
+ if (mutex_lock_interruptible(&ab5500_pm_mutex)) {
+ dev_warn(ab5500_dev,
+ "%s: Signal received while waiting on the PM mutex.\n",
+ __func__);
+ return;
+ }
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_uld_l : widget_if1_uld_l);
+ power_widget_unlocked(onoff, ifsel == 0 ?
+ widget_if0_uld_r : widget_if1_uld_r);
+
+ mask = (read_reg(TX2) & TXx_MUX_MASK) >> TXx_MUX_SHIFT;
+
+ switch (onoff << 2 | mask) {
+ case 0: /* Power off : Amic */
+ disable_regulator(REGULATOR_AMIC);
+ break;
+ case 1: /* Power off : Dmic */
+ case 2:
+ disable_regulator(REGULATOR_DMIC);
+ break;
+ case 4: /* Power on : Amic */
+ err = enable_regulator(REGULATOR_AMIC);
+ if (err < 0)
+ goto unlock;
+ break;
+ case 5: /* Power on : Dmic */
+ case 6:
+ err = enable_regulator(REGULATOR_DMIC);
+ if (err < 0)
+ goto unlock;
+ break;
+ default:
+ pr_debug("%s : Not a valid regulator combination\n",
+ __func__);
+ break;
+ }
+unlock:
+ mutex_unlock(&ab5500_pm_mutex);
+}
+
+static int ab5500_add_controls(struct snd_soc_codec *codec)
+{
+ int err = 0, i, n = ARRAY_SIZE(ab5500_snd_controls);
+
+ pr_info("%s: %s called.\n", __FILE__, __func__);
+ for (i = 0; i < n; i++) {
+ err = snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &ab5500_snd_controls[i], codec));
+ if (err < 0) {
+ pr_err("%s failed to add control No.%d of %d.\n",
+ __func__, i, n);
+ return err;
+ }
+ }
+ return err;
+}
+
+static int ab5500_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ dai->playback_active : dai->capture_active) {
+ dev_err(dai->dev, "A %s stream is already active.\n",
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int ab5500_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *dai)
+{
+ u8 val;
+ u8 reg = dai->id == 0 ? INTERFACE0 : INTERFACE1;
+
+ if (!ab5500_dev) {
+ pr_err("%s: The AB5500 codec driver not initialized.\n",
+ __func__);
+ return -EAGAIN;
+ }
+ dev_info(ab5500_dev, "%s called.\n", __func__);
+ switch (params_rate(hw_params)) {
+ case 8000:
+ val = I2Sx_SR_8000Hz;
+ break;
+ case 16000:
+ val = I2Sx_SR_16000Hz;
+ break;
+ case 44100:
+ val = I2Sx_SR_44100Hz;
+ break;
+ case 48000:
+ val = I2Sx_SR_48000Hz;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ !dai->capture_active : !dai->playback_active) {
+
+ mask_set_reg(reg, I2Sx_SR_MASK, val << I2Sx_SR_SHIFT);
+ if ((read_reg(reg) & I2Sx_MODE_MASK) == 0) {
+ mask_set_reg(reg, MASTER_GENx_PWR_MASK,
+ 1 << MASTER_GENx_PWR_SHIFT);
+ }
+ }
+ return 0;
+}
+
+static int ab5500_pcm_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_info(ab5500_dev, "%s called.\n", __func__);
+ u8 value = (dai->id == 1) ? INTERFACE1 : INTERFACE0;
+
+ /* Configure registers for either playback or capture */
+ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
+ !(ab5500_codec_privates[dai->id].playback_active == true)) {
+ power_for_playback(POWER_ON, dai->id);
+ ab5500_codec_privates[dai->id].playback_active = true;
+ mask_set_reg(value, I2Sx_TRISTATE_MASK,
+ 0 << I2Sx_TRISTATE_SHIFT);
+ } else if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) &&
+ !(ab5500_codec_privates[dai->id].capture_active == true)) {
+ power_for_capture(POWER_ON, dai->id);
+ ab5500_codec_privates[dai->id].capture_active = true;
+ mask_set_reg(value, I2Sx_TRISTATE_MASK,
+ 0 << I2Sx_TRISTATE_SHIFT);
+
+ }
+ mutex_lock(&ab5500_clk_mutex);
+ ab5500_clk_request++;
+ if (ab5500_clk_request == 1)
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 1 << CLOCK_ENABLE_SHIFT);
+ mutex_unlock(&ab5500_clk_mutex);
+
+ dump_registers(__func__, RX1, AUXO1_ADDER, RX2,
+ AUXO2_ADDER, RX1_DPGA, RX2_DPGA, AUXO1, AUXO2, -1);
+
+ return 0;
+}
+
+static void ab5500_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ u8 iface = (dai->id == 0) ? INTERFACE0 : INTERFACE1;
+ dev_info(ab5500_dev, "%s called.\n", __func__);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ power_for_playback(POWER_OFF, dai->id);
+ ab5500_codec_privates[dai->id].playback_active = false;
+ } else {
+ power_for_capture(POWER_OFF, dai->id);
+ ab5500_codec_privates[dai->id].capture_active = false;
+ }
+ if (!dai->playback_active && !dai->capture_active &&
+ (read_reg(iface) & I2Sx_MODE_MASK) == 0) {
+ mask_set_reg(iface, I2Sx_TRISTATE_MASK,
+ 1 << I2Sx_TRISTATE_SHIFT);
+ mask_set_reg(iface, MASTER_GENx_PWR_MASK, 0);
+ }
+ mutex_lock(&ab5500_clk_mutex);
+ ab5500_clk_request--;
+ if (ab5500_clk_request == 0)
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0 << CLOCK_ENABLE_SHIFT);
+ mutex_unlock(&ab5500_clk_mutex);
+}
+
+static int ab5500_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ return 0;
+}
+
+static int ab5500_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ u8 iface = (codec_dai->id == 0) ? INTERFACE0 : INTERFACE1;
+ u8 val = 0;
+ dev_info(ab5500_dev, "%s called.\n", __func__);
+
+ switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK |
+ SND_SOC_DAIFMT_MASTER_MASK)) {
+
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
+ val |= 1 << I2Sx_MODE_SHIFT;
+ mask_set_reg(iface, I2Sx_MODE_MASK, val);
+ break;
+
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
+ break;
+
+ default:
+ dev_warn(ab5500_dev, "AB5500_dai: unsupported DAI format "
+ "0x%x\n", fmt);
+ return -EINVAL;
+ }
+ if (codec_dai->playback_active && codec_dai->capture_active) {
+ if ((read_reg(iface) & I2Sx_MODE_MASK) == val)
+ return 0;
+ else {
+ dev_err(ab5500_dev,
+ "%s: DAI format set differently "
+ "by an existing stream.\n", __func__);
+ return -EINVAL;
+ }
+ }
+ mask_set_reg(iface, I2Sx_MODE_MASK, val);
+ return 0;
+}
+
+struct snd_soc_dai_driver ab5500_dai_drv[] = {
+ {
+ .name = "ab5500-codec-dai.0",
+ .id = 0,
+ .playback = {
+ .stream_name = "ab5500.0 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB5500_SUPPORTED_RATE,
+ .formats = AB5500_SUPPORTED_FMT,
+ },
+ .capture = {
+ .stream_name = "ab5500.0 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB5500_SUPPORTED_RATE,
+ .formats = AB5500_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .startup = ab5500_pcm_startup,
+ .prepare = ab5500_pcm_prepare,
+ .hw_params = ab5500_pcm_hw_params,
+ .shutdown = ab5500_pcm_shutdown,
+ .set_sysclk = ab5500_set_dai_sysclk,
+ .set_fmt = ab5500_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "ab5500-codec-dai.1",
+ .id = 1,
+ .playback = {
+ .stream_name = "ab5500.1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB5500_SUPPORTED_RATE,
+ .formats = AB5500_SUPPORTED_FMT,
+ },
+ .capture = {
+ .stream_name = "ab5500.1 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = AB5500_SUPPORTED_RATE,
+ .formats = AB5500_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .startup = ab5500_pcm_startup,
+ .prepare = ab5500_pcm_prepare,
+ .hw_params = ab5500_pcm_hw_params,
+ .shutdown = ab5500_pcm_shutdown,
+ .set_sysclk = ab5500_set_dai_sysclk,
+ .set_fmt = ab5500_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1,
+ }
+};
+
+static int ab5500_codec_probe(struct snd_soc_codec *codec)
+{
+ int ret = ab5500_add_controls(codec);
+ if (ret < 0)
+ return ret;
+ ab5500_add_widgets(codec);
+ return 0;
+}
+
+static int ab5500_codec_remove(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_free(&codec->dapm);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ab5500_codec_suspend(struct snd_soc_codec *codec,
+ pm_message_t state)
+{
+ if (!ab5500_clk_request)
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0);
+ return 0;
+}
+
+static int ab5500_codec_resume(struct snd_soc_codec *codec)
+{
+ if (ab5500_clk_request)
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0xff);
+ return 0;
+}
+#else
+#define ab5500_codec_resume NULL
+#define ab5500_codec_suspend NULL
+#endif
+
+/**
+ This function is only called by the SOC framework to
+ set registers associated to the mixer controls.
+*/
+static int ab5500_codec_write_reg(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
+{
+ if (reg < MIC_BIAS1 || reg > INTERFACE_SWAP)
+ return -EINVAL;
+ switch (reg) {
+ u8 diff, oldval;
+ case ANALOG_LOOP_PGA1:
+ case ANALOG_LOOP_PGA2: {
+ enum enum_widget apga = reg == ANALOG_LOOP_PGA1 ?
+ widget_apga1 : widget_apga2;
+
+ oldval = read_reg(reg);
+ diff = value ^ oldval;
+
+ /*
+ * The APGA is to be turned on/off. The power bit and the
+ * other bits in the same register won't be changed at the
+ * same time since they belong to different controls.
+ */
+ if (diff & (1 << APGAx_PWR_SHIFT)) {
+ power_widget_locked(value >> APGAx_PWR_SHIFT & 1,
+ apga);
+ } else if (diff & APGAx_MUX_MASK) {
+ enum enum_widget old_source =
+ apga_source_translate(oldval);
+ enum enum_widget new_source =
+ apga_source_translate(value);
+ update_widgets_link(UNLINK, old_source, apga,
+ reg, APGAx_MUX_MASK, 0);
+ update_widgets_link(LINK, new_source, apga,
+ reg, APGAx_MUX_MASK, value);
+ } else {
+ set_reg(reg, value);
+ }
+ break;
+ }
+
+ case AUXO3_ADDER:
+ case AUXO4_ADDER:
+ case SPKR2_ADDER:
+ case LINE1_ADDER:
+ case LINE2_ADDER: {
+ int i;
+ enum enum_widget widgets[] = {
+ widget_dac1, widget_dac2, widget_dac3,
+ widget_apga1, widget_apga2
+ };
+ oldval = read_reg(reg);
+ diff = value ^ oldval;
+ for (i = 0; diff; i++) {
+ if (!(diff & 1 << i))
+ continue;
+ diff ^= 1 << i;
+ update_widgets_link(value >> i & 1, widgets[i],
+ adder_sink_translate(reg),
+ reg, 1 << i, value);
+ }
+ break;
+ }
+ case AB5500_VIRTUAL_REG3:
+ oldval = read_reg(reg);
+ diff = value ^ oldval;
+ /*
+ * The following changes won't take place in the same call,
+ * since they are arranged into different mixer controls.
+ */
+
+ /* changed between the two amplifier modes */
+ if (count_ones(diff & SPKR1_MODE_MASK) == 2) {
+ set_reg(reg, value);
+ break;
+ }
+
+ if (diff & SPKR1_MODE_MASK) {
+ update_widgets_link(
+ UNLINK,
+ (oldval & SPKR1_MODE_MASK) == 0 ?
+ widget_pwm_spkr1 : widget_spkr1_adder,
+ widget_spkr1,
+ reg, SPKR1_MODE_MASK, value);
+ update_widgets_link(
+ LINK,
+ (value & SPKR1_MODE_MASK) == 0 ?
+ widget_pwm_spkr1 : widget_spkr1_adder,
+ widget_spkr1,
+ DUMMY_REG, 0, 0);
+
+ }
+ if (diff & SPKR2_MODE_MASK) {
+ update_widgets_link(
+ UNLINK,
+ (oldval & SPKR2_MODE_MASK) == 0 ?
+ widget_pwm_spkr2 : widget_spkr2_adder,
+ widget_spkr2,
+ reg, SPKR2_MODE_MASK, value);
+ update_widgets_link(
+ LINK,
+ (value & SPKR2_MODE_MASK) == 0 ?
+ widget_pwm_spkr2 : widget_spkr2_adder,
+ widget_spkr2,
+ DUMMY_REG, 0, 0);
+
+ }
+ break;
+
+ case AB5500_VIRTUAL_REG4:
+ /* configure PWMCTRL_SPKR1, PWMCTRL_SPKR2, etc. */
+ break;
+ default:
+ set_reg(reg, value);
+ }
+ return 0;
+}
+
+static unsigned int ab5500_codec_read_reg(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ return read_reg(reg);
+}
+
+
+static struct snd_soc_codec_driver ab5500_codec_drv = {
+ .probe = ab5500_codec_probe,
+ .remove = ab5500_codec_remove,
+ .suspend = ab5500_codec_suspend,
+ .resume = ab5500_codec_resume,
+ .read = ab5500_codec_read_reg,
+ .write = ab5500_codec_write_reg,
+};
+EXPORT_SYMBOL_GPL(ab5500_codec_drv);
+
+static inline void init_playback_route(void)
+{
+ /* if0_dld_l -> rx1 -> dac1 -> auxo1 */
+ update_widgets_link(LINK, widget_if0_dld_l, widget_rx1, 0, 0, 0);
+ update_widgets_link(LINK, widget_rx1, widget_dac1, 0, 0, 0);
+ update_widgets_link(LINK, widget_dac1, widget_auxo1, 0, 0, 0);
+
+ /* if0_dld_r -> rx2 -> dac2 -> auxo2 */
+ update_widgets_link(LINK, widget_if0_dld_r, widget_rx2, 0, 0, 0);
+ update_widgets_link(LINK, widget_rx2, widget_dac2, 0, 0, 0);
+ update_widgets_link(LINK, widget_dac2, widget_auxo2, 0, 0, 0);
+
+ /* Earpiece */
+ update_widgets_link(LINK, widget_dac1, widget_ear, 0, 0, 0);
+
+ /* if1_dld_l -> rx3 -> dac3 -> spkr1 */
+ update_widgets_link(LINK, widget_if1_dld_l, widget_rx3, 0, 0, 0);
+ update_widgets_link(LINK, widget_rx3, widget_dac3, 0, 0, 0);
+ update_widgets_link(LINK, widget_dac3, widget_spkr1, 0, 0, 0);
+
+}
+
+static inline void init_capture_route(void)
+{
+ /* mic bias - > mic2 inputs */
+ update_widgets_link(LINK, widget_micbias1, widget_mic2p2, 0, 0, 0);
+ update_widgets_link(LINK, widget_micbias1, widget_mic2n2, 0, 0, 0);
+
+ /* mic2 inputs -> mic2 */
+ update_widgets_link(LINK, widget_mic2p2, widget_mic2, 0, 0, 0);
+ update_widgets_link(LINK, widget_mic2n2, widget_mic2, 0, 0, 0);
+
+ /* mic2 -> adc2 -> tx2 */
+ update_widgets_link(LINK, widget_mic2, widget_adc2, 0, 0, 0);
+ update_widgets_link(LINK, widget_adc2, widget_tx2, 0, 0, 0);
+
+ /* tx2 -> if0_uld_l & if0_uld_r */
+ update_widgets_link(LINK, widget_tx2, widget_if0_uld_l, 0, 0, 0);
+ update_widgets_link(LINK, widget_tx2, widget_if0_uld_r, 0, 0, 0);
+}
+
+static int create_regulators(void)
+{
+ int i, status = 0;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(reg_info); ++i)
+ reg_info[i].consumer = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(reg_info); ++i) {
+ reg_info[i].consumer = regulator_get(ab5500_dev,
+ reg_info[i].supply);
+ if (IS_ERR(reg_info[i].consumer)) {
+ status = PTR_ERR(reg_info[i].consumer);
+ pr_err("%s: ERROR: Failed to get regulator '%s' (ret = %d)!\n",
+ __func__, reg_info[i].supply, status);
+ reg_info[i].consumer = NULL;
+ goto err_get;
+ }
+ }
+
+ return 0;
+
+err_get:
+
+ for (i = 0; i < ARRAY_SIZE(reg_info); ++i) {
+ if (reg_info[i].consumer) {
+ regulator_put(reg_info[i].consumer);
+ reg_info[i].consumer = NULL;
+ }
+ }
+
+ return status;
+}
+
+static int __devinit ab5500_platform_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ u8 reg;
+ struct ab5500_codec_dai_data *codec_drvdata;
+ int status;
+
+ pr_info("%s invoked with pdev = %p.\n", __func__, pdev);
+ ab5500_dev = &pdev->dev;
+
+ status = create_regulators();
+ if (status < 0) {
+ pr_err("%s: ERROR: Failed to instantiate regulators (ret = %d)!\n",
+ __func__, status);
+ return status;
+ }
+ codec_drvdata = kzalloc(sizeof(struct ab5500_codec_dai_data),
+ GFP_KERNEL);
+ if (codec_drvdata == NULL)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, codec_drvdata);
+ ret = snd_soc_register_codec(ab5500_dev, &ab5500_codec_drv,
+ ab5500_dai_drv,
+ ARRAY_SIZE(ab5500_dai_drv));
+ if (ret < 0) {
+ dev_err(ab5500_dev, "%s: Failed to register codec. "
+ "Error %d.\n", __func__, ret);
+ snd_soc_unregister_codec(ab5500_dev);
+ kfree(codec_drvdata);
+ }
+ /* Initialize the codec registers */
+ for (reg = AB5500_FIRST_REG; reg <= AB5500_LAST_REG; reg++)
+ set_reg(reg, 0);
+
+ mask_set_reg(INTERFACE0, I2Sx_TRISTATE_MASK, 1 << I2Sx_TRISTATE_SHIFT);
+ mask_set_reg(INTERFACE1, I2Sx_TRISTATE_MASK, 1 << I2Sx_TRISTATE_SHIFT);
+
+ printk(KERN_ERR "Clock Setting ab5500\n");
+ init_playback_route();
+ init_capture_route();
+ memset(&pm_stack, 0, sizeof(pm_stack));
+ return ret;
+}
+
+static int __devexit ab5500_platform_remove(struct platform_device *pdev)
+{
+ pr_info("%s called.\n", __func__);
+ regulator_bulk_free(ARRAY_SIZE(reg_info), reg_info);
+ mask_set_reg(CLOCK, CLOCK_ENABLE_MASK, 0);
+ snd_soc_unregister_codec(ab5500_dev);
+ kfree(platform_get_drvdata(pdev));
+ ab5500_dev = NULL;
+ return 0;
+}
+
+static int ab5500_platform_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int ab5500_platform_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ab5500_platform_driver = {
+ .driver = {
+ .name = "ab5500-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_platform_probe,
+ .remove = ab5500_platform_remove,
+ .suspend = ab5500_platform_suspend,
+ .resume = ab5500_platform_resume,
+};
+
+static int __devinit ab5500_init(void)
+{
+ int ret;
+
+ pr_info("%s called.\n", __func__);
+
+ /* Register codec platform driver. */
+ ret = platform_driver_register(&ab5500_platform_driver);
+ if (ret) {
+ pr_err("%s: Error %d: Failed to register codec platform "
+ "driver.\n", __func__, ret);
+ }
+ return ret;
+}
+
+static void __devexit ab5500_exit(void)
+{
+ pr_info("%s called.\n", __func__);
+ platform_driver_unregister(&ab5500_platform_driver);
+}
+
+module_init(ab5500_init);
+module_exit(ab5500_exit);
+
+MODULE_DESCRIPTION("AB5500 Codec driver");
+MODULE_AUTHOR("Xie Xiaolei <xie.xiaolei@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ab5500.h b/sound/soc/codecs/ab5500.h
new file mode 100644
index 00000000000..bb37798b8c7
--- /dev/null
+++ b/sound/soc/codecs/ab5500.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Register definitions for AB5500 codec
+ * Author: Xie Xiaolei <xie.xiaolei@etericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#ifndef AB5500_CODEC_REGISTERS_H
+#define AB5500_CODEC_REGISTERS_H
+
+#define AB5500_SUPPORTED_RATE (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define AB5500_SUPPORTED_FMT (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+
+/* MIC BIAS */
+
+#define MIC_BIAS1 0x00
+#define MIC_BIAS2 0x01
+#define MBIAS2_OUT_V_MASK 0x04
+#define MBIAS2_OUT_V_SHIFT 2
+#define MBIASx_PWR_MASK 0x02
+#define MBIASx_PWR_SHIFT 1
+#define MBIASx_PDN_IMP_MASK 0x01
+#define MBIASx_PDN_IMP_SHIFT 0
+
+#define MIC_BIAS2_VAD 0x02
+#define MBIAS2_R_INT_MASK 0x01
+#define MBIAS2_R_INT_SHIFT 0
+
+/* MIC */
+#define MIC1_GAIN 0x03
+#define MIC2_GAIN 0x04
+#define MICx_GAIN_MASK 0xF0
+#define MICx_GAIN_SHIFT 4
+#define MICx_IN_IMP_MASK 0x0C
+#define MICx_IN_IMP_SHIFT 2
+#define MICx_PWR_MASK 0x01
+#define MICx_PWR_SHIFT 0
+
+#define MIC1_INPUT_SELECT 0x05
+#define MIC2_INPUT_SELECT 0x06
+#define MICxP1_SEL_MASK 0x80
+#define MICxP1_SEL_SHIFT 7
+#define MICxN1_SEL_MASK 0x40
+#define MICxN1_SEL_SHIFT 6
+#define MICxP2_SEL_MASK 0x20
+#define MICxP2_SEL_SHIFT 5
+#define MICxN2_SEL_MASK 0x10
+#define MICxN2_SEL_SHIFT 4
+#define LINEIN_SEL_MASK 0x03
+#define LINEIN_SEL_SHIFT 0
+
+#define MIC1_VMID_SELECT 0x07
+#define MIC2_VMID_SELECT 0x08
+#define VMIDx_ENABLE_MASK 0xC0
+#define VMIDx_ENABLE_SHIFT 6
+#define VMIDx_LINEIN1_N_MASK 0x20
+#define VMIDx_LINEIN1_N_SHIFT 5
+#define VMIDx_LINEIN2_N_MASK 0x10
+#define VMIDx_LINEIN2_N_SHIFT 4
+#define VMIDx_MICxP1_MASK 0x08
+#define VMIDx_MICxP1_SHIFT 3
+#define VMIDx_MICxP2_MASK 0x04
+#define VMIDx_MICxP2_SHIFT 2
+#define VMIDx_MICxN1_MASK 0x02
+#define VMIDx_MICxN1_SHIFT 1
+#define VMIDx_MICxN2_MASK 0x01
+#define VMIDx_MICxN2_SHIFT 0
+
+#define MIC2_TO_MIC1 0x09
+#define MIC2_TO_MIC1_MASK 0x03
+#define MIC2_TO_MIC1_SHIFT 0
+
+/* Analog Loop */
+#define ANALOG_LOOP_PGA1 0x0A
+#define ANALOG_LOOP_PGA2 0x0B
+#define APGAx_GAIN_MASK 0xF8
+#define APGAx_GAIN_SHIFT 3
+#define APGAx_PWR_MASK 0x04
+#define APGAx_PWR_SHIFT 2
+#define APGAx_MUX_MASK 0x03
+#define APGAx_MUX_SHIFT 0
+#define APGAx_MUX_MIC1_MASK 0x01
+#define APGAx_MUX_MIC1_SHIFT 0
+#define APGAx_MUX_MIC2_MASK 0x02
+#define APGAx_MUX_MIC2_SHIFT 1
+
+#define APGA_VMID_SELECT 0x0C
+#define VMID_APGA1_ENABLE_MASK 0xC0
+#define VMID_APGA1_ENABLE_SHIFT 6
+#define VMID_APGA1_LINEIN1_MASK 0x20
+#define VMID_APGA1_LINEIN1_SHIFT 5
+#define VMID_APGA2_ENABLE_MASK 0x0C
+#define VMID_APGA2_ENABLE_SHIFT 2
+#define VMID_APGA2_LINEIN2_MASK 0x02
+#define VMID_APGA2_LINEIN2_SHIFT 1
+
+/* Output Amplifiers */
+#define EAR_PWR 0x0D
+#define EAR_PWR_MODE_MASK 0xC0
+#define EAR_PWR_MODE_SHIFT 6
+#define EAR_PWR_VMID_MASK 0x30
+#define EAR_PWR_VMID_SHIFT 4
+#define EAR_PWR_MASK 0x01
+#define EAR_PWR_SHIFT 0
+
+#define EAR_GAIN 0x0E
+#define EAR_GAIN_MASK 0x1F
+#define EAR_GAIN_SHIFT 0
+
+#define AUXO1 0x0F
+#define AUXO2 0x10
+#define AUXO3 0x11
+#define AUXO4 0x12
+#define AUXOx_PWR_MASK 0x80
+#define AUXOx_PWR_SHIFT 7
+#define AUXOx_INV_MASK 0x40
+#define AUXOx_INV_SHIFT 6
+#define AUXOx_PULLDOWN_MASK 0x20
+#define AUXOx_PULLDOWN_SHIFT 5
+#define AUXOx_GAIN_MASK 0x0F
+#define AUXOx_GAIN_SHIFT 0
+
+#define AUXO12_PWR_MODE 0x13
+#define AUXO34_PWR_MODE 0x14
+#define AUXOxy_PWR_MODE_MASK 0x07
+#define AUXOxy_PWR_MODE_SHIFT 0
+
+#define NEG_CHARGE_PUMP 0x15
+#define NEG_CHARGE_PUMP_MODE_MASK 0x02
+#define NEG_CHARGE_PUMP_MODE_SHIFT 1
+#define NEG_CHARGE_PUMP_PWR_MASK 0x01
+#define NEG_CHARGE_PUMP_PWR_SHIFT 0
+
+#define ENV_THR 0x16
+#define ENV_THR_HIGH_MASK 0xF0
+#define ENV_THR_HIGH_SHIFT 4
+#define ENV_THR_LOW_MASK 0x0F
+#define ENV_THR_LOW_SHIFT 0
+
+#define ENV_DECAY_TIME 0x17
+#define ENV_DECAY_TIME_CP_LV_MASK 0x20
+#define ENV_DECAY_TIME_CP_LV_SHIFT 5
+#define ENV_DECAY_TIME_DET_CP_MASK 0x10
+#define ENV_DECAY_TIME_DET_CP_SHIFT 4
+#define ENV_DECAY_TIME_MASK 0x0F
+#define ENV_DECAY_TIME_SHIFT 0
+
+#define DC_CANCEL 0x18
+#define DC_CANCEL_SPKR2_MASK 0x10
+#define DC_CANCEL_SPKR2_SHIFT 4
+#define DC_CANCEL_SPKR1_MASK 0x08
+#define DC_CANCEL_SPKR1_SHIFT 3
+#define DC_CANCEL_AUXO34_MASK 0x04
+#define DC_CANCEL_AUXO34_SHIFT 2
+#define DC_CANCEL_AUXO12_MASK 0x02
+#define DC_CANCEL_AUXO12_SHIFT 1
+#define DC_CANCEL_OFFSET_CLOCK_MASK 0x01
+#define DC_CANCEL_OFFSET_CLOCK_SHIFT 0
+
+#define SPKR1 0x19
+#define SPKR2 0x1A
+#define SPKRx_PWR_MASK 0xC0
+#define SPKRx_PWR_SHIFT 6
+#define SPKRx_PWR_VBR_VALUE 0x40
+#define SPKRx_PWR_CLS_D_VALUE 0x80
+#define SPKRx_PWR_CLS_AB_VALUE 0xC0
+#define SPKR1_VMID_MASK 0x20
+#define SPKR1_VMID_SHIFT 5
+#define SPKRx_GAIN_MASK 0x1F
+#define SPKRx_GAIN_SHIFT 0
+
+#define SPKR_OVCR 0x1B
+#define SPKR_OVCR_PROT2_MASK 0x80
+#define SPKR_OVCR_PROT2_SHIFT 7
+#define SPKR_OVCR_TRIM2_MASK 0x70
+#define SPKR_OVCR_TRIM2_SHIFT 4
+#define SPKR_OVCR_PROT1_MASK 0x08
+#define SPKR_OVCR_PROT1_SHIFT 3
+#define SPKR_OVCR_TRIM1_MASK 0x07
+#define SPKR_OVCR_TRIM1_SHIFT 0
+
+#define PWMCTRL_SPKR1 0x1C
+#define PWMCTRL_SPKR2 0x1F
+#define PWMCTRL_SPKRx_N1_POL_MASK 0x80
+#define PWMCTRL_SPKRx_N1_POL_SHIFT 7
+#define PWMCTRL_SPKRx_P1_POL_MASK 0x40
+#define PWMCTRL_SPKRx_P1_POL_SHIFT 6
+#define PWMCTRL_SPKRx_MASK 0x04
+#define PWMCTRL_SPKRx_SHIFT 2
+#define PWMCTRL_SPKRxN_MASK 0x02
+#define PWMCTRL_SPKRxN_SHIFT 1
+#define PWMCTRL_SPKRxP_MASK 0x01
+#define PWMCTRL_SPKRxP_SHIFT 0
+
+#define PWM_SPKR1N 0x1D
+#define PWM_SPKR2N 0x20
+#define PWM_SPKR1P 0x1E
+#define PWM_SPKR2P 0x21
+#define PWM_SPKRxy_DUT_CYC_MASK 0xFF
+#define PWM_SPKRxy_DUT_CYC_SHIFT 0
+
+#define SPKR1_CLK_DIV 0x22
+#define SPKR2_CLK_DIV 0x23
+#define SPKRx_CLK_DIV_MASK 0x3F
+#define SPKRx_CLK_DIV_SHIFT 0
+
+#define LINE1 0x24
+#define LINE2 0x25
+#define LINEx_PWR_MASK 0x80
+#define LINEx_PWR_SHIFT 7
+#define LINEx_INV_MASK 0x40
+#define LINEx_INV_SHIFT 6
+#define LINEx_TO_USB_MASK 0x20
+#define LINEx_TO_USB_SHIFT 5
+#define LINEx_VMID_BUFF_MASK 0x10
+#define LINEx_VMID_BUFF_SHIFT 4
+#define LINEx_GAIN_MASK 0x0F
+#define LINEx_GAIN_SHIFT 0
+
+#define USB_AUDIO 0x26
+#define USB_AUDIO_MIC_MUX_MASK 0x03
+#define USB_AUDIO_MIC_MUX_SHIFT 0
+
+#define EAR_ADDER 0x28
+#define AUXO1_ADDER 0x29
+#define AUXO2_ADDER 0x2A
+#define AUXO3_ADDER 0x2B
+#define AUXO4_ADDER 0x2C
+#define SPKR1_ADDER 0x2D
+#define SPKR2_ADDER 0x2E
+#define LINE1_ADDER 0x2F
+#define LINE2_ADDER 0x30
+#define APGA2_TO_X_MASK 0x10
+#define APGA2_TO_X_SHIFT 4
+#define APGA1_TO_X_MASK 0x08
+#define APGA1_TO_X_SHIFT 3
+#define DAC3_TO_X_MASK 0x04
+#define DAC3_TO_X_SHIFT 2
+#define DAC2_TO_X_MASK 0x02
+#define DAC2_TO_X_SHIFT 1
+#define DAC1_TO_X_MASK 0x01
+#define DAC1_TO_X_SHIFT 0
+
+#define EAR_TO_MIC2 0x31
+#define SPKR1_TO_MIC2 0x32
+#define SPKR2_TO_MIC2 0x33
+#define EAR_TO_MIC2_MASK 0x01
+#define EAR_TO_MIC2_SHIFT 0
+
+#define ADC_LOW_PWR 0x35
+#define ADC_LOW_PWR_MASK 0x01
+#define ADC_LOW_PWR_SHIFT 0
+
+#define TX1 0x36
+#define TX2 0x37
+#define TXx_MUX_MASK 0x60
+#define TXx_MUX_SHIFT 5
+#define TXx_FS_MASK 0x10
+#define TXx_FS_SHIFT 4
+#define TXx_HP_FILTER_MASK 0x0C
+#define TXx_HP_FILTER_SHIFT 2
+#define TXx_PWR_MASK 0x02
+#define TXx_PWR_SHIFT 1
+#define ADCx_PWR_MASK 0x01
+#define ADCx_PWR_SHIFT 0
+
+#define RX1 0x38
+#define RX2 0x39
+#define RX3 0x3A
+#define RXx_DATA_MASK 0x70
+#define RXx_DATA_SHIFT 4
+#define RXx_PWR_MASK 0x08
+#define RXx_PWR_SHIFT 3
+#define DACx_PWR_MASK 0x04
+#define DACx_PWR_SHIFT 2
+#define DACx_PWR_MODE_MASK 0x03
+#define DACx_PWR_MODE_SHIFT 0
+
+#define TX_DPGA1 0x3B
+#define TX_DPGA2 0x3C
+#define TX_DPGAx_MASK 0x0F
+#define TX_DPGAx_SHIFT 0
+
+#define RX1_DPGA 0x3D
+#define RX2_DPGA 0x3E
+#define RX3_DPGA 0x3F
+#define RXx_DPGA_MASK 0x7F
+#define RXx_DPGA_SHIFT 0
+
+#define ST1_PGA 0x40
+#define ST2_PGA 0x41
+#define STx_HP_FILTER_MASK 0x60
+#define STx_HP_FILTER_SHIFT 6
+#define STx_MUX_MASK 0x10
+#define STx_MUX_SHIFT 4
+#define STx_PGA_MASK 0x0F
+#define STx_PGA_SHIFT 0
+
+#define CLOCK 0x42
+#define CLOCK_REF_SELECT_MASK 0x02
+#define CLOCK_REF_SELECT_SHIFT 1
+#define CLOCK_ENABLE_MASK 0x01
+#define CLOCK_ENABLE_SHIFT 0
+
+#define INTERFACE0 0x43
+#define INTERFACE1 0x45
+#define I2Sx_WORDLENGTH_MASK 0x40
+#define I2Sx_WORDLENGTH_SHIFT 6
+#define MASTER_GENx_PWR_MASK 0x20
+#define MASTER_GENx_PWR_SHIFT 5
+#define I2Sx_MODE_MASK 0x10
+#define I2Sx_MODE_SHIFT 4
+#define I2Sx_TRISTATE_MASK 0x08
+#define I2Sx_TRISTATE_SHIFT 3
+#define I2Sx_PULLDOWN_MASK 0x04
+#define I2Sx_PULLDOWN_SHIFT 2
+#define I2Sx_SR_MASK 0x03
+#define I2Sx_SR_SHIFT 0
+#define I2Sx_SR_8000Hz 0
+#define I2Sx_SR_16000Hz 1
+#define I2Sx_SR_44100Hz 2
+#define I2Sx_SR_48000Hz 3
+
+#define INTERFACE0_ULD 0x44
+#define INTERFACE1_ULD 0x46
+#define I2Sx_ULD_R_MASK 0x70
+#define I2Sx_ULD_R_SHIFT 4
+#define I2Sx_ULD_L_MASK 0x07
+#define I2Sx_ULD_L_SHIFT 0
+
+#define INTERFACE_SWAP 0x47
+#define IO_SWAP0_MASK 0x02
+#define IO_SWAP0_SHIFT 1
+#define IO_SWAP1_MASK 0x01
+#define IO_SWAP1_SHIFT 0
+
+#define AB5500_FIRST_REG MIC_BIAS1
+#define AB5500_LAST_REG INTERFACE_SWAP
+
+#define AB5500_VIRTUAL_REG1 (AB5500_LAST_REG + 1)
+#define IF0_DLD_L_PW_SHIFT 0
+#define IF0_DLD_R_PW_SHIFT 1
+#define IF0_ULD_L_PW_SHIFT 2
+#define IF0_ULD_R_PW_SHIFT 3
+#define IF1_DLD_L_PW_SHIFT 4
+#define IF1_DLD_R_PW_SHIFT 5
+#define IF1_ULD_L_PW_SHIFT 6
+#define IF1_ULD_R_PW_SHIFT 7
+
+#define AB5500_VIRTUAL_REG2 (AB5500_LAST_REG + 2)
+#define MIC1P1_PW_SHIFT 0
+#define MIC1N1_PW_SHIFT 1
+#define MIC1P2_PW_SHIFT 2
+#define MIC1N2_PW_SHIFT 3
+#define MIC2P1_PW_SHIFT 4
+#define MIC2N1_PW_SHIFT 5
+#define MIC2P2_PW_SHIFT 6
+#define MIC2N2_PW_SHIFT 7
+
+#define AB5500_VIRTUAL_REG3 (AB5500_LAST_REG + 3)
+#define SPKR1_MODE_MASK 0x03
+#define SPKR1_MODE_SHIFT 0
+#define SPKR1_MODE_VBR_VALUE 0
+#define SPKR1_MODE_CLS_D_VALUE 1
+#define SPKR1_MODE_CLS_AB_VALUE 2
+#define SPKR1_ADDER_PWR_SHIFT 2
+#define SPKR1_PWR_SHIFT 3
+#define SPKR2_MODE_MASK 0x10
+#define SPKR2_MODE_SHIFT 4
+#define SPKR2_MODE_VBR_VALUE 0
+#define SPKR2_MODE_CLS_D_VALUE 1
+#define SPKR2_ADDER_PWR_SHIFT 5
+#define SPKR2_PWR_SHIFT 6
+
+#define AB5500_VIRTUAL_REG4 (AB5500_LAST_REG + 4)
+#define PWM_SPKR1_PWR_SHIFT 0
+#define PWM_SPKR2_PWR_SHIFT 1
+#define PWM_SPKR1N_PWR_SHIFT 2
+#define PWM_SPKR1P_PWR_SHIFT 3
+#define PWM_SPKR2N_PWR_SHIFT 4
+#define PWM_SPKR2P_PWR_SHIFT 5
+
+#define AB5500_VIRTUAL_REG5 (AB5500_LAST_REG + 5)
+#define PWM_SPKR1N_SEL_SHIFT 0
+#define PWM_SPKR1P_SEL_SHIFT 1
+#define PWM_SPKR2N_SEL_SHIFT 2
+#define PWM_SPKR2P_SEL_SHIFT 3
+
+#define DUMMY_REG 0xff
+
+/* #define SPKR1_PWR_VBR_SHIFT 0 */
+/* #define SPKR1_PWR_CLS_D_SHIFT 1 */
+/* #define SPKR1_PWR_CLS_AB_SHIFT 2 */
+/* #define SPKR2_PWR_VBR_SHIFT 3 */
+/* #define SPKR2_PWR_CLS_D_SHIFT 4 */
+/* #define SPKR2_PWR_CLS_AB_SHIFT 5 */
+
+#endif
diff --git a/sound/soc/codecs/ab8500_audio.c b/sound/soc/codecs/ab8500_audio.c
new file mode 100644
index 00000000000..4764dd38089
--- /dev/null
+++ b/sound/soc/codecs/ab8500_audio.c
@@ -0,0 +1,2960 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Mikko J. Lehto <mikko.lehto@symbio.com>,
+ * Mikko Sarmanne <mikko.sarmanne@symbio.com>,
+ * Jarmo K. Kuronen <jarmo.kuronen@symbio.com>,
+ * Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Kristoffer Karlsson <kristoffer.karlsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include "ab8500_audio.h"
+
+/* To convert register definition shifts to masks */
+#define BMASK(bsft) (1 << (bsft))
+
+/* Macrocell value definitions */
+#define CLK_32K_OUT2_DISABLE 0x01
+#define INACTIVE_RESET_AUDIO 0x02
+#define ENABLE_AUDIO_CLK_TO_AUDIO_BLK 0x10
+#define ENABLE_VINTCORE12_SUPPLY 0x04
+#define GPIO27_DIR_OUTPUT 0x04
+#define GPIO29_DIR_OUTPUT 0x10
+#define GPIO31_DIR_OUTPUT 0x40
+#define GPIO35_DIR_OUTPUT 0x04
+
+/* Macrocell register definitions */
+#define AB8500_CTRL3_REG 0x0200
+#define AB8500_GPIO_DIR4_REG 0x1013
+#define AB8500_GPIO_DIR5_REG 0x1014
+#define AB8500_GPIO_OUT5_REG 0x1024
+
+/* Nr of FIR/IIR-coeff banks in ANC-block */
+#define AB8500_NR_OF_ANC_COEFF_BANKS 2
+
+/* Macros to simplify implementation of register write sequences and error handling */
+#define AB8500_SET_BIT_LOCKED(xreg, xbit, xerr, xerr_hdl) { \
+ xerr = ab8500_codec_update_reg_audio_locked(ab8500_codec, \
+ xreg, REG_MASK_NONE, BMASK(xbit)); \
+ if (xerr < 0) \
+ goto xerr_hdl; }
+#define AB8500_CLEAR_BIT_LOCKED(xreg, xbit, xerr, xerr_hdl) { \
+ xerr = ab8500_codec_update_reg_audio_locked(ab8500_codec, \
+ xreg, BMASK(xbit), REG_MASK_NONE); \
+ if (xerr < 0) \
+ goto xerr_hdl; }
+#define AB8500_WRITE(xreg, xvalue, xerr, xerr_hdl) { \
+ xerr = ab8500_codec_write_reg_audio(ab8500_codec, xreg, xvalue); \
+ if (xerr < 0) \
+ goto xerr_hdl; }
+
+/*
+ * AB8500 register cache & default register settings
+ */
+static const u8 ab8500_reg_cache[AB8500_CACHEREGNUM] = {
+ 0x00, /* REG_POWERUP (0x00) */
+ 0x00, /* REG_AUDSWRESET (0x01) */
+ 0x00, /* REG_ADPATHENA (0x02) */
+ 0x00, /* REG_DAPATHENA (0x03) */
+ 0x00, /* REG_ANACONF1 (0x04) */
+ 0x0F, /* REG_ANACONF2 (0x05) */
+ 0x00, /* REG_DIGMICCONF (0x06) */
+ 0x00, /* REG_ANACONF3 (0x07) */
+ 0x00, /* REG_ANACONF4 (0x08) */
+ 0x00, /* REG_DAPATHCONF (0x09) */
+ 0x40, /* REG_MUTECONF (0x0A) */
+ 0x00, /* REG_SHORTCIRCONF (0x0B) */
+ 0x01, /* REG_ANACONF5 (0x0C) */
+ 0x00, /* REG_ENVCPCONF (0x0D) */
+ 0x00, /* REG_SIGENVCONF (0x0E) */
+ 0x3F, /* REG_PWMGENCONF1 (0x0F) */
+ 0x32, /* REG_PWMGENCONF2 (0x10) */
+ 0x32, /* REG_PWMGENCONF3 (0x11) */
+ 0x32, /* REG_PWMGENCONF4 (0x12) */
+ 0x32, /* REG_PWMGENCONF5 (0x13) */
+ 0x0F, /* REG_ANAGAIN1 (0x14) */
+ 0x0F, /* REG_ANAGAIN2 (0x15) */
+ 0x22, /* REG_ANAGAIN3 (0x16) */
+ 0x55, /* REG_ANAGAIN4 (0x17) */
+ 0x13, /* REG_DIGLINHSLGAIN (0x18) */
+ 0x13, /* REG_DIGLINHSRGAIN (0x19) */
+ 0x00, /* REG_ADFILTCONF (0x1A) */
+ 0x00, /* REG_DIGIFCONF1 (0x1B) */
+ 0x02, /* REG_DIGIFCONF2 (0x1C) */
+ 0x00, /* REG_DIGIFCONF3 (0x1D) */
+ 0x02, /* REG_DIGIFCONF4 (0x1E) */
+ 0xCC, /* REG_ADSLOTSEL1 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL2 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL3 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL4 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL5 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL6 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL7 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL8 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL9 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL10 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL11 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL12 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL13 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL14 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL15 (0xCC) */
+ 0xCC, /* REG_ADSLOTSEL16 (0xCC) */
+ 0x00, /* REG_ADSLOTHIZCTRL1 (0x2F) */
+ 0x00, /* REG_ADSLOTHIZCTRL2 (0x30) */
+ 0x00, /* REG_ADSLOTHIZCTRL3 (0x31) */
+ 0x00, /* REG_ADSLOTHIZCTRL4 (0x32) */
+ 0x08, /* REG_DASLOTCONF1 (0x33) */
+ 0x08, /* REG_DASLOTCONF2 (0x34) */
+ 0x08, /* REG_DASLOTCONF3 (0x35) */
+ 0x08, /* REG_DASLOTCONF4 (0x36) */
+ 0x08, /* REG_DASLOTCONF5 (0x37) */
+ 0x08, /* REG_DASLOTCONF6 (0x38) */
+ 0x08, /* REG_DASLOTCONF7 (0x39) */
+ 0x08, /* REG_DASLOTCONF8 (0x3A) */
+ 0x00, /* REG_CLASSDCONF1 (0x3B) */
+ 0x00, /* REG_CLASSDCONF2 (0x3C) */
+ 0x84, /* REG_CLASSDCONF3 (0x3D) */
+ 0x00, /* REG_DMICFILTCONF (0x3E) */
+ 0xFE, /* REG_DIGMULTCONF1 (0x3F) */
+ 0xC0, /* REG_DIGMULTCONF2 (0x40) */
+ 0x3F, /* REG_ADDIGGAIN1 (0x41) */
+ 0x3F, /* REG_ADDIGGAIN2 (0x42) */
+ 0x1F, /* REG_ADDIGGAIN3 (0x43) */
+ 0x1F, /* REG_ADDIGGAIN4 (0x44) */
+ 0x3F, /* REG_ADDIGGAIN5 (0x45) */
+ 0x3F, /* REG_ADDIGGAIN6 (0x46) */
+ 0x1F, /* REG_DADIGGAIN1 (0x47) */
+ 0x1F, /* REG_DADIGGAIN2 (0x48) */
+ 0x3F, /* REG_DADIGGAIN3 (0x49) */
+ 0x3F, /* REG_DADIGGAIN4 (0x4A) */
+ 0x3F, /* REG_DADIGGAIN5 (0x4B) */
+ 0x3F, /* REG_DADIGGAIN6 (0x4C) */
+ 0x3F, /* REG_ADDIGLOOPGAIN1 (0x4D) */
+ 0x3F, /* REG_ADDIGLOOPGAIN2 (0x4E) */
+ 0x00, /* REG_HSLEARDIGGAIN (0x4F) */
+ 0x00, /* REG_HSRDIGGAIN (0x50) */
+ 0x1F, /* REG_SIDFIRGAIN1 (0x51) */
+ 0x1F, /* REG_SIDFIRGAIN2 (0x52) */
+ 0x00, /* REG_ANCCONF1 (0x53) */
+ 0x00, /* REG_ANCCONF2 (0x54) */
+ 0x00, /* REG_ANCCONF3 (0x55) */
+ 0x00, /* REG_ANCCONF4 (0x56) */
+ 0x00, /* REG_ANCCONF5 (0x57) */
+ 0x00, /* REG_ANCCONF6 (0x58) */
+ 0x00, /* REG_ANCCONF7 (0x59) */
+ 0x00, /* REG_ANCCONF8 (0x5A) */
+ 0x00, /* REG_ANCCONF9 (0x5B) */
+ 0x00, /* REG_ANCCONF10 (0x5C) */
+ 0x00, /* REG_ANCCONF11 (0x5D) - read only */
+ 0x00, /* REG_ANCCONF12 (0x5E) - read only */
+ 0x00, /* REG_ANCCONF13 (0x5F) - read only */
+ 0x00, /* REG_ANCCONF14 (0x60) - read only */
+ 0x00, /* REG_SIDFIRADR (0x61) */
+ 0x00, /* REG_SIDFIRCOEF1 (0x62) */
+ 0x00, /* REG_SIDFIRCOEF2 (0x63) */
+ 0x00, /* REG_SIDFIRCONF (0x64) */
+ 0x00, /* REG_AUDINTMASK1 (0x65) */
+ 0x00, /* REG_AUDINTSOURCE1 (0x66) - read only */
+ 0x00, /* REG_AUDINTMASK2 (0x67) */
+ 0x00, /* REG_AUDINTSOURCE2 (0x68) - read only */
+ 0x00, /* REG_FIFOCONF1 (0x69) */
+ 0x00, /* REG_FIFOCONF2 (0x6A) */
+ 0x00, /* REG_FIFOCONF3 (0x6B) */
+ 0x00, /* REG_FIFOCONF4 (0x6C) */
+ 0x00, /* REG_FIFOCONF5 (0x6D) */
+ 0x00, /* REG_FIFOCONF6 (0x6E) */
+ 0x02, /* REG_AUDREV (0x6F) - read only */
+};
+
+static struct snd_soc_codec *ab8500_codec;
+
+/* ADCM */
+static const u8 ADCM_ANACONF5_MASK = BMASK(REG_ANACONF5_ENCPHS);
+static const u8 ADCM_MUTECONF_MASK = BMASK(REG_MUTECONF_MUTHSL) |
+ BMASK(REG_MUTECONF_MUTHSR);
+static const u8 ADCM_ANACONF4_MASK = BMASK(REG_ANACONF4_ENHSL) |
+ BMASK(REG_ANACONF4_ENHSR);
+static unsigned int adcm_anaconf5, adcm_muteconf, adcm_anaconf4;
+static int adcm = AB8500_AUDIO_ADCM_NORMAL;
+
+/* Signed multi register array controls. */
+struct soc_smra_control {
+ unsigned int *reg;
+ const unsigned int rcount, count, invert;
+ long min, max;
+ const char **texts;
+ long *values;
+};
+
+/* ANC FIR- & IIR-coeff caches */
+static long anc_fir_cache[REG_ANC_FIR_COEFFS];
+static long anc_iir_cache[REG_ANC_IIR_COEFFS];
+
+/* ANC states */
+enum anc_states {
+ ANC_UNCONFIGURED = 0,
+ ANC_CONFIGURE_FIR_IIR = 1,
+ ANC_FIR_IIR_CONFIGURED = 2,
+ ANC_CONFIGURE_FIR = 3,
+ ANC_FIR_CONFIGURED = 4,
+ ANC_CONFIGURE_IIR = 5,
+ ANC_IIR_CONFIGURED = 6,
+ ANC_ERROR = 7
+};
+static int ab8500_anc_status = ANC_UNCONFIGURED;
+
+/* ANC configuration lock */
+static DEFINE_MUTEX(ab8500_anc_conf_lock);
+
+/* Reads an arbitrary register from the ab8500 chip.
+*/
+static int ab8500_codec_read_reg(struct snd_soc_codec *codec, unsigned int bank,
+ unsigned int reg)
+{
+ u8 value;
+ int status = abx500_get_register_interruptible(
+ codec->dev, bank, reg, &value);
+
+ if (status < 0) {
+ pr_err("%s: Register (%02x:%02x) read failed (%d).\n",
+ __func__, (u8)bank, (u8)reg, status);
+ } else {
+ pr_debug("Read 0x%02x from register %02x:%02x\n",
+ (u8)value, (u8)bank, (u8)reg);
+ status = value;
+ }
+
+ return status;
+}
+
+/* Writes an arbitrary register to the ab8500 chip.
+ */
+static int ab8500_codec_write_reg(struct snd_soc_codec *codec, unsigned int bank,
+ unsigned int reg, unsigned int value)
+{
+ int status = abx500_set_register_interruptible(
+ codec->dev, bank, reg, value);
+
+ if (status < 0) {
+ pr_err("%s: Register (%02x:%02x) write failed (%d).\n",
+ __func__, (u8)bank, (u8)reg, status);
+ } else {
+ pr_debug("Wrote 0x%02x into register %02x:%02x\n",
+ (u8)value, (u8)bank, (u8)reg);
+ }
+
+ return status;
+}
+
+/* Reads an audio register from the cache.
+ */
+static unsigned int ab8500_codec_read_reg_audio(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 *cache = codec->reg_cache;
+ return cache[reg];
+}
+
+/* Reads an audio register from the hardware.
+ */
+static int ab8500_codec_read_reg_audio_nocache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 *cache = codec->reg_cache;
+ int value = ab8500_codec_read_reg(codec, AB8500_AUDIO, reg);
+
+ if (value >= 0)
+ cache[reg] = value;
+
+ return value;
+}
+
+/* Writes an audio register to the hardware and cache.
+ */
+static int ab8500_codec_write_reg_audio(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
+{
+ u8 *cache = codec->reg_cache;
+ int status = ab8500_codec_write_reg(codec, AB8500_AUDIO, reg, value);
+
+ if (status >= 0)
+ cache[reg] = value;
+
+ return status;
+}
+
+/* Dumps all audio registers.
+ */
+static inline void ab8500_codec_dump_all_reg(struct snd_soc_codec *codec)
+{
+ int i;
+
+ pr_debug("%s Enter.\n", __func__);
+
+ for (i = AB8500_FIRST_REG; i <= AB8500_LAST_REG; i++)
+ ab8500_codec_read_reg_audio_nocache(codec, i);
+}
+
+/*
+ * Updates an audio register.
+ *
+ * Returns 1 for change, 0 for no change, or negative error code.
+ */
+static inline int ab8500_codec_update_reg_audio(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int clr, unsigned int ins)
+{
+ unsigned int new, old;
+ int ret;
+
+ old = ab8500_codec_read_reg_audio(codec, reg);
+ new = (old & ~clr) | ins;
+ if (old == new)
+ return 0;
+
+ ret = ab8500_codec_write_reg_audio(codec, reg, new);
+
+ return (ret < 0) ? ret : 1;
+}
+
+/*
+ * Updates an audio register, and takes the codec mutex.
+ *
+ * Returns 1 for change, 0 for no change, or negative error code.
+ */
+static int ab8500_codec_update_reg_audio_locked(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int clr, unsigned int ins)
+{
+ int ret;
+
+ mutex_lock(&codec->mutex);
+ ret = ab8500_codec_update_reg_audio(codec, reg, clr, ins);
+ mutex_unlock(&codec->mutex);
+
+ return ret;
+}
+
+/* Generic soc info for signed register controls. */
+int snd_soc_info_s(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_smra_control *smra =
+ (struct soc_smra_control *)kcontrol->private_value;
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = smra->count;
+ uinfo->value.integer.min = smra->min;
+ uinfo->value.integer.max = smra->max;
+
+ return 0;
+}
+
+/* Generic soc get for signed multi register controls. */
+int snd_soc_get_smr(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_smra_control *smra =
+ (struct soc_smra_control *)kcontrol->private_value;
+ unsigned int *reg = smra->reg;
+ unsigned int rcount = smra->rcount;
+ long min = smra->min;
+ long max = smra->max;
+ unsigned int invert = smra->invert;
+ unsigned long mask = abs(min) | abs(max);
+ long value = 0;
+ int i, rvalue;
+
+ for (i = 0; i < rcount; i++) {
+ rvalue = snd_soc_read(codec, reg[i]) & REG_MASK_ALL;
+ value |= rvalue << (8 * (rcount - i - 1));
+ }
+ value &= mask;
+ if (min < 0 && value > max)
+ value |= ~mask;
+ if (invert)
+ value = ~value;
+ ucontrol->value.integer.value[0] = value;
+
+ return 0;
+}
+
+/* Generic soc put for signed multi register controls. */
+int snd_soc_put_smr(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_smra_control *smra =
+ (struct soc_smra_control *)kcontrol->private_value;
+ unsigned int *reg = smra->reg;
+ unsigned int rcount = smra->rcount;
+ long min = smra->min;
+ long max = smra->max;
+ unsigned int invert = smra->invert;
+ unsigned long mask = abs(min) | abs(max);
+ long value = ucontrol->value.integer.value[0];
+ int i, rvalue, err;
+
+ if (invert)
+ value = ~value;
+ if (value > max)
+ value = max;
+ else if (value < min)
+ value = min;
+ value &= mask;
+ for (i = 0; i < rcount; i++) {
+ rvalue = (value >> (8 * (rcount - i - 1))) & REG_MASK_ALL;
+ err = snd_soc_write(codec, reg[i], rvalue);
+ if (err < 0)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Generic soc get for signed array controls. */
+static int snd_soc_get_sa(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_smra_control *smra =
+ (struct soc_smra_control *)kcontrol->private_value;
+ long *values = smra->values;
+ unsigned int count = smra->count;
+ unsigned int idx;
+
+ for (idx = 0; idx < count; idx++)
+ ucontrol->value.integer.value[idx] = values[idx];
+
+ return 0;
+}
+
+/* Generic soc put for signed array controls. */
+static int snd_soc_put_sa(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_smra_control *smra =
+ (struct soc_smra_control *) kcontrol->private_value;
+ long *values = smra->values;
+ unsigned int count = smra->count;
+ long min = smra->min;
+ long max = smra->max;
+ unsigned int idx;
+ long value;
+
+ for (idx = 0; idx < count; idx++) {
+ value = ucontrol->value.integer.value[idx];
+ if (value > max)
+ value = max;
+ else if (value < min)
+ value = min;
+ values[idx] = value;
+ }
+
+ return 0;
+}
+
+/* Generic soc get for enum strobe controls. */
+int snd_soc_get_enum_strobe(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int reg = e->reg;
+ unsigned int bit = e->shift_l;
+ unsigned int invert = e->shift_r != 0;
+ unsigned int value = snd_soc_read(codec, reg) & BMASK(bit);
+
+ if (bit != 0 && value != 0)
+ value = value >> bit;
+ ucontrol->value.enumerated.item[0] = value ^ invert;
+
+ return 0;
+}
+
+/* Generic soc put for enum strobe controls. */
+int snd_soc_put_enum_strobe(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int reg = e->reg;
+ unsigned int bit = e->shift_l;
+ unsigned int invert = e->shift_r != 0;
+ unsigned int strobe = ucontrol->value.enumerated.item[0] != 0;
+ unsigned int clear_mask = (strobe ^ invert) ? REG_MASK_NONE : BMASK(bit);
+ unsigned int set_mask = (strobe ^ invert) ? BMASK(bit) : REG_MASK_NONE;
+
+ if (snd_soc_update_bits(codec, reg, clear_mask, set_mask) == 0)
+ return 0;
+ return snd_soc_update_bits(codec, reg, set_mask, clear_mask);
+}
+
+static const char * const enum_ena_dis[] = {"Enabled", "Disabled"};
+static const char * const enum_dis_ena[] = {"Disabled", "Enabled"};
+static const char * const enum_rdy_apl[] = {"Ready", "Apply"};
+
+/* Controls - DAPM */
+
+/* Inverted order - Ascending/Descending */
+enum control_inversion {
+ NORMAL = 0,
+ INVERT = 1
+};
+
+/* Headset */
+
+/* Headset Left - Enable/Disable */
+static const struct soc_enum enum_headset_left = SOC_ENUM_SINGLE(0, 0, 2, enum_ena_dis);
+static const struct snd_kcontrol_new dapm_headset_left_mux =
+ SOC_DAPM_ENUM_VIRT("Headset Left", enum_headset_left);
+
+/* Headsett Right - Enable/Disable */
+static const struct soc_enum enum_headset_right = SOC_ENUM_SINGLE(0, 0, 2, enum_ena_dis);
+static const struct snd_kcontrol_new dapm_headset_right_mux =
+ SOC_DAPM_ENUM_VIRT("Headset Right", enum_headset_right);
+
+/* Earpiece */
+
+/* Earpiece - Mute */
+static const struct snd_kcontrol_new dapm_ear_mute[] = {
+ SOC_DAPM_SINGLE("Playback Switch", REG_MUTECONF, REG_MUTECONF_MUTEAR, 1, INVERT),
+};
+
+/* Earpiece source selector */
+static const char * const enum_ear_lineout_source[] = {"Headset Left", "IHF Left"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ear_lineout_source, REG_DMICFILTCONF,
+ REG_DMICFILTCONF_DA3TOEAR, enum_ear_lineout_source);
+static const struct snd_kcontrol_new dapm_ear_lineout_source[] = {
+ SOC_DAPM_ENUM("Earpiece or LineOut Mono Source", dapm_enum_ear_lineout_source),
+};
+
+/* LineOut */
+
+/* LineOut source selector */
+static const char * const enum_lineout_source[] = {"Mono Path", "Stereo Path"};
+static SOC_ENUM_DOUBLE_DECL(dapm_enum_lineout_source, REG_ANACONF5,
+ REG_ANACONF5_HSLDACTOLOL, REG_ANACONF5_HSRDACTOLOR, enum_lineout_source);
+static const struct snd_kcontrol_new dapm_lineout_source[] = {
+ SOC_DAPM_ENUM("LineOut Source", dapm_enum_lineout_source),
+};
+
+/* LineOut */
+
+/* LineOut Left - Enable/Disable */
+static const struct soc_enum enum_lineout_left = SOC_ENUM_SINGLE(0, 0, 2, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_lineout_left_mux =
+ SOC_DAPM_ENUM_VIRT("LineOut Left", enum_lineout_left);
+
+/* LineOut Right - Enable/Disable */
+static const struct soc_enum enum_lineout_right = SOC_ENUM_SINGLE(0, 0, 2, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_lineout_right_mux =
+ SOC_DAPM_ENUM_VIRT("LineOut Right", enum_lineout_right);
+
+/* LineOut/IHF - Select */
+static const char * const enum_ihf_or_lineout_select_sel[] = {"IHF", "LineOut"};
+static const struct soc_enum enum_ihf_or_lineout_select = SOC_ENUM_SINGLE(0, 0, 2, enum_ihf_or_lineout_select_sel);
+static const struct snd_kcontrol_new dapm_ihf_or_lineout_select_mux =
+ SOC_DAPM_ENUM_VIRT("IHF or LineOut Select", enum_ihf_or_lineout_select);
+
+
+/* IHF */
+
+/* IHF - Enable/Disable */
+static const struct soc_enum enum_ihf_left = SOC_ENUM_SINGLE(0, 0, 2, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_ihf_left_mux =
+ SOC_DAPM_ENUM_VIRT("IHF Left", enum_ihf_left);
+
+static const struct soc_enum enum_ihf_right = SOC_ENUM_SINGLE(0, 0, 2, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_ihf_right_mux =
+ SOC_DAPM_ENUM_VIRT("IHF Right", enum_ihf_right);
+
+/* IHF left - ANC selector */
+static const char * const enum_ihfx_sel[] = {"Audio Path", "ANC"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ihfl_sel, REG_DIGMULTCONF2,
+ REG_DIGMULTCONF2_HFLSEL, enum_ihfx_sel);
+static const struct snd_kcontrol_new dapm_ihfl_select[] = {
+ SOC_DAPM_ENUM("IHF Left Source", dapm_enum_ihfl_sel),
+};
+
+/* IHF right - ANC selector */
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ihfr_sel, REG_DIGMULTCONF2,
+ REG_DIGMULTCONF2_HFRSEL, enum_ihfx_sel);
+static const struct snd_kcontrol_new dapm_ihfr_select[] = {
+ SOC_DAPM_ENUM("IHF Right Source", dapm_enum_ihfr_sel),
+};
+
+/* Mic 1 */
+
+/* Mic 1 - Mute */
+static const struct snd_kcontrol_new dapm_mic1_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_ANACONF2, REG_ANACONF2_MUTMIC1, 1, INVERT),
+};
+
+/* Mic 1 - Mic 1A or 1B selector */
+static const char * const enum_mic1ab_sel[] = {"Mic 1A", "Mic 1B"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_mic1ab_sel, REG_ANACONF3,
+ REG_ANACONF3_MIC1SEL, enum_mic1ab_sel);
+static const struct snd_kcontrol_new dapm_mic1ab_select[] = {
+ SOC_DAPM_ENUM("Mic 1A or 1B Select", dapm_enum_mic1ab_sel),
+};
+
+/* Mic 1 - AD3 - Mic 1 or DMic 3 selector */
+static const char * const enum_ad3_sel[] = {"Mic 1", "DMic 3"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad3_sel, REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_AD3SEL, enum_ad3_sel);
+static const struct snd_kcontrol_new dapm_ad3_select[] = {
+ SOC_DAPM_ENUM("AD 3 Select", dapm_enum_ad3_sel),
+};
+
+/* Mic 1 - AD6 - Mic 1 or DMic 6 selector */
+static const char * const enum_ad6_sel[] = {"Mic 1", "DMic 6"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad6_sel, REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_AD6SEL, enum_ad6_sel);
+static const struct snd_kcontrol_new dapm_ad6_select[] = {
+ SOC_DAPM_ENUM("AD 6 Select", dapm_enum_ad6_sel),
+};
+
+/* Mic 2 */
+
+/* Mic 2 - Mute */
+static const struct snd_kcontrol_new dapm_mic2_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_ANACONF2, REG_ANACONF2_MUTMIC2, 1, INVERT),
+};
+
+/* Mic 2 - AD5 - Mic 2 or DMic 5 selector */
+static const char * const enum_ad5_sel[] = {"Mic 2", "DMic 5"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad5_sel, REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_AD5SEL, enum_ad5_sel);
+static const struct snd_kcontrol_new dapm_ad5_select[] = {
+ SOC_DAPM_ENUM("AD 5 Select", dapm_enum_ad5_sel),
+};
+
+/* LineIn */
+
+/* LineIn left - Mute */
+static const struct snd_kcontrol_new dapm_linl_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_ANACONF2, REG_ANACONF2_MUTLINL, 1, INVERT),
+};
+
+/* LineIn left - AD1 - LineIn Left or DMic 1 selector */
+static const char * const enum_ad1_sel[] = {"LineIn Left", "DMic 1"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad1_sel, REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_AD1SEL, enum_ad1_sel);
+static const struct snd_kcontrol_new dapm_ad1_select[] = {
+ SOC_DAPM_ENUM("AD 1 Select", dapm_enum_ad1_sel),
+};
+
+/* LineIn right - Mute */
+static const struct snd_kcontrol_new dapm_linr_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_ANACONF2, REG_ANACONF2_MUTLINR, 1, INVERT),
+};
+
+/* LineIn right - Mic 2 or LineIn Right selector */
+static const char * const enum_mic2lr_sel[] = {"Mic 2", "LineIn Right"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_mic2lr_sel, REG_ANACONF3,
+ REG_ANACONF3_LINRSEL, enum_mic2lr_sel);
+static const struct snd_kcontrol_new dapm_mic2lr_select[] = {
+ SOC_DAPM_ENUM("Mic 2 or LINR Select", dapm_enum_mic2lr_sel),
+};
+
+/* LineIn right - AD2 - LineIn Right or DMic2 selector */
+static const char * const enum_ad2_sel[] = {"LineIn Right", "DMic 2"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad2_sel, REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_AD2SEL, enum_ad2_sel);
+static const struct snd_kcontrol_new dapm_ad2_select[] = {
+ SOC_DAPM_ENUM("AD 2 Select", dapm_enum_ad2_sel),
+};
+
+/* DMic */
+
+/* DMic 1 - Mute */
+static const struct snd_kcontrol_new dapm_dmic1_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_DIGMICCONF,
+ REG_DIGMICCONF_ENDMIC1, 1, NORMAL),
+};
+
+/* DMic 2 - Mute */
+static const struct snd_kcontrol_new dapm_dmic2_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_DIGMICCONF,
+ REG_DIGMICCONF_ENDMIC2, 1, NORMAL),
+};
+
+/* DMic 3 - Mute */
+static const struct snd_kcontrol_new dapm_dmic3_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_DIGMICCONF,
+ REG_DIGMICCONF_ENDMIC3, 1, NORMAL),
+};
+
+/* DMic 4 - Mute */
+static const struct snd_kcontrol_new dapm_dmic4_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_DIGMICCONF,
+ REG_DIGMICCONF_ENDMIC4, 1, NORMAL),
+};
+
+/* DMic 5 - Mute */
+static const struct snd_kcontrol_new dapm_dmic5_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_DIGMICCONF,
+ REG_DIGMICCONF_ENDMIC5, 1, NORMAL),
+};
+
+/* DMic 6 - Mute */
+static const struct snd_kcontrol_new dapm_dmic6_mute[] = {
+ SOC_DAPM_SINGLE("Capture Switch", REG_DIGMICCONF,
+ REG_DIGMICCONF_ENDMIC6, 1, NORMAL),
+};
+
+/* ANC */
+
+static const char * const enum_anc_in_sel[] = {"Mic 1 / DMic 6", "Mic 2 / DMic 5"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_anc_in_sel, REG_DMICFILTCONF,
+ REG_DMICFILTCONF_ANCINSEL, enum_anc_in_sel);
+static const struct snd_kcontrol_new dapm_anc_in_select[] = {
+ SOC_DAPM_ENUM("ANC Source", dapm_enum_anc_in_sel),
+};
+
+/* ANC - Enable/Disable */
+static SOC_ENUM_SINGLE_DECL(dapm_enum_anc_enable, REG_ANCCONF1,
+ REG_ANCCONF1_ENANC, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_anc_enable[] = {
+ SOC_DAPM_ENUM("ANC", dapm_enum_anc_enable),
+};
+
+/* ANC to Earpiece - Mute */
+static const struct snd_kcontrol_new dapm_anc_ear_mute[] = {
+ SOC_DAPM_SINGLE("Playback Switch", REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_ANCSEL, 1, NORMAL),
+};
+
+/* Sidetone left */
+
+/* Sidetone left - Input selector */
+static const char * const enum_stfir1_in_sel[] = {
+ "LineIn Left", "LineIn Right", "Mic 1", "Headset Left"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_stfir1_in_sel, REG_DIGMULTCONF2,
+ REG_DIGMULTCONF2_FIRSID1SEL, enum_stfir1_in_sel);
+static const struct snd_kcontrol_new dapm_stfir1_in_select[] = {
+ SOC_DAPM_ENUM("Sidetone Left Source", dapm_enum_stfir1_in_sel),
+};
+
+/* Sidetone right path */
+
+/* Sidetone right - Input selector */
+static const char * const enum_stfir2_in_sel[] = {
+ "LineIn Right", "Mic 1", "DMic 4", "Headset Right"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_stfir2_in_sel, REG_DIGMULTCONF2,
+ REG_DIGMULTCONF2_FIRSID2SEL, enum_stfir2_in_sel);
+static const struct snd_kcontrol_new dapm_stfir2_in_select[] = {
+ SOC_DAPM_ENUM("Sidetone Right Source", dapm_enum_stfir2_in_sel),
+};
+
+/* Vibra */
+
+/* Vibra 1 - Enable/Disable */
+static const struct soc_enum enum_vibra1 = SOC_ENUM_SINGLE(0, 0, 2, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_vibra1_mux =
+ SOC_DAPM_ENUM_VIRT("Vibra 1", enum_vibra1);
+
+/* Vibra 2 - Enable/Disable */
+static const struct soc_enum enum_vibra2 = SOC_ENUM_SINGLE(0, 0, 2, enum_dis_ena);
+static const struct snd_kcontrol_new dapm_vibra2_mux =
+ SOC_DAPM_ENUM_VIRT("Vibra 2", enum_vibra2);
+
+static const char * const enum_pwm2vibx[] = {"Audio Path", "PWM Generator"};
+
+static SOC_ENUM_SINGLE_DECL(dapm_enum_pwm2vib1, REG_PWMGENCONF1,
+ REG_PWMGENCONF1_PWMTOVIB1, enum_pwm2vibx);
+
+static const struct snd_kcontrol_new dapm_pwm2vib1[] = {
+ SOC_DAPM_ENUM("Vibra 1 Controller", dapm_enum_pwm2vib1),
+};
+
+static SOC_ENUM_SINGLE_DECL(dapm_enum_pwm2vib2, REG_PWMGENCONF1,
+ REG_PWMGENCONF1_PWMTOVIB2, enum_pwm2vibx);
+
+static const struct snd_kcontrol_new dapm_pwm2vib2[] = {
+ SOC_DAPM_ENUM("Vibra 2 Controller", dapm_enum_pwm2vib2),
+};
+
+static const struct snd_soc_dapm_widget ab8500_dapm_widgets[] = {
+
+ /* DA/AD */
+
+ SND_SOC_DAPM_INPUT("ADC Input"),
+ SND_SOC_DAPM_ADC("ADC", "ab8500_0c", SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_DAC("DAC", "ab8500_0p", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("DAC Output"),
+
+ SND_SOC_DAPM_AIF_IN("DA_IN1", "ab8500_0p", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN2", "ab8500_0p", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN3", "ab8500_0p", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN4", "ab8500_0p", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN5", "ab8500_0p", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN6", "ab8500_0p", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT1", "ab8500_0c", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT2", "ab8500_0c", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT3", "ab8500_0c", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT4", "ab8500_0c", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT57", "ab8500_0c", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT68", "ab8500_0c", 0, SND_SOC_NOPM, 0, 0),
+
+ /* Headset path */
+
+ SND_SOC_DAPM_SUPPLY("Charge Pump", REG_ANACONF5, REG_ANACONF5_ENCPHS,
+ NORMAL, NULL, 0),
+
+ SND_SOC_DAPM_DAC("DA1 Enable", "ab8500_0p",
+ REG_DAPATHENA, REG_DAPATHENA_ENDA1, 0),
+ SND_SOC_DAPM_DAC("DA2 Enable", "ab8500_0p",
+ REG_DAPATHENA, REG_DAPATHENA_ENDA2, 0),
+
+ SND_SOC_DAPM_MUX("Headset Left",
+ SND_SOC_NOPM, 0, 0, &dapm_headset_left_mux),
+ SND_SOC_DAPM_MUX("Headset Right",
+ SND_SOC_NOPM, 0, 0, &dapm_headset_right_mux),
+
+ SND_SOC_DAPM_PGA("HSL Digital Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HSR Digital Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_DAC("HSL DAC", "ab8500_0p",
+ REG_DAPATHCONF, REG_DAPATHCONF_ENDACHSL, 0),
+ SND_SOC_DAPM_DAC("HSR DAC", "ab8500_0p",
+ REG_DAPATHCONF, REG_DAPATHCONF_ENDACHSR, 0),
+ SND_SOC_DAPM_MIXER("HSL DAC Mute", REG_MUTECONF, REG_MUTECONF_MUTDACHSL,
+ INVERT, NULL, 0),
+ SND_SOC_DAPM_MIXER("HSR DAC Mute", REG_MUTECONF, REG_MUTECONF_MUTDACHSR,
+ INVERT, NULL, 0),
+ SND_SOC_DAPM_DAC("HSL DAC Driver", "ab8500_0p",
+ REG_ANACONF3, REG_ANACONF3_ENDRVHSL, 0),
+ SND_SOC_DAPM_DAC("HSR DAC Driver", "ab8500_0p",
+ REG_ANACONF3, REG_ANACONF3_ENDRVHSR, 0),
+
+ SND_SOC_DAPM_MIXER("HSL Mute", REG_MUTECONF, REG_MUTECONF_MUTHSL,
+ INVERT, NULL, 0),
+ SND_SOC_DAPM_MIXER("HSR Mute", REG_MUTECONF, REG_MUTECONF_MUTHSR,
+ INVERT, NULL, 0),
+ SND_SOC_DAPM_MIXER("HSL Enable", REG_ANACONF4, REG_ANACONF4_ENHSL,
+ NORMAL, NULL, 0),
+ SND_SOC_DAPM_MIXER("HSR Enable", REG_ANACONF4, REG_ANACONF4_ENHSR,
+ NORMAL, NULL, 0),
+ SND_SOC_DAPM_PGA("HSL Gain", SND_SOC_NOPM, 0,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HSR Gain", SND_SOC_NOPM, 0,
+ 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("HSL"),
+ SND_SOC_DAPM_OUTPUT("HSR"),
+
+ /* LineOut path */
+
+ SND_SOC_DAPM_MUX("LineOut Source Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_lineout_source),
+
+ SND_SOC_DAPM_MIXER("LOL Enable", REG_ANACONF5,
+ REG_ANACONF5_ENLOL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("LOR Enable", REG_ANACONF5,
+ REG_ANACONF5_ENLOR, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("LineOut Left",
+ SND_SOC_NOPM, 0, 0, &dapm_lineout_left_mux),
+
+ SND_SOC_DAPM_MUX("LineOut Right",
+ SND_SOC_NOPM, 0, 0, &dapm_lineout_right_mux),
+
+ /* Earpiece path */
+
+ SND_SOC_DAPM_MUX("Earpiece or LineOut Mono Source",
+ SND_SOC_NOPM, 0, 0, &dapm_ear_lineout_source),
+
+ SND_SOC_DAPM_MIXER("EAR DAC", REG_DAPATHCONF,
+ REG_DAPATHCONF_ENDACEAR, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH("Earpiece", SND_SOC_NOPM, 0, 0, dapm_ear_mute),
+
+ SND_SOC_DAPM_MIXER("EAR Enable", REG_ANACONF4,
+ REG_ANACONF4_ENEAR, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("EAR"),
+
+ /* Handsfree path */
+
+ SND_SOC_DAPM_MIXER("DA3 Channel Gain", REG_DAPATHENA,
+ REG_DAPATHENA_ENDA3, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("DA4 Channel Gain", REG_DAPATHENA,
+ REG_DAPATHENA_ENDA4, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("IHF Left Source Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_ihfl_select),
+ SND_SOC_DAPM_MUX("IHF Right Source Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_ihfr_select),
+
+ SND_SOC_DAPM_MUX("IHF Left", SND_SOC_NOPM, 0, 0, &dapm_ihf_left_mux),
+ SND_SOC_DAPM_MUX("IHF Right", SND_SOC_NOPM, 0, 0, &dapm_ihf_right_mux),
+
+ SND_SOC_DAPM_MUX("IHF or LineOut Select", SND_SOC_NOPM,
+ 0, 0, &dapm_ihf_or_lineout_select_mux),
+
+ SND_SOC_DAPM_MIXER("IHFL DAC", REG_DAPATHCONF,
+ REG_DAPATHCONF_ENDACHFL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("IHFR DAC", REG_DAPATHCONF,
+ REG_DAPATHCONF_ENDACHFR, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("DA4 or ANC path to HfR", REG_DIGMULTCONF2,
+ REG_DIGMULTCONF2_DATOHFREN, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("DA3 or ANC path to HfL", REG_DIGMULTCONF2,
+ REG_DIGMULTCONF2_DATOHFLEN, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("IHFL Enable", REG_ANACONF4,
+ REG_ANACONF4_ENHFL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("IHFR Enable", REG_ANACONF4,
+ REG_ANACONF4_ENHFR, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("IHFL"),
+ SND_SOC_DAPM_OUTPUT("IHFR"),
+
+ /* Vibrator path */
+
+ SND_SOC_DAPM_MUX("Vibra 1", SND_SOC_NOPM, 0, 0, &dapm_vibra1_mux),
+ SND_SOC_DAPM_MUX("Vibra 2", SND_SOC_NOPM, 0, 0, &dapm_vibra2_mux),
+ SND_SOC_DAPM_MIXER("DA5 Channel Gain", REG_DAPATHENA,
+ REG_DAPATHENA_ENDA5, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("DA6 Channel Gain", REG_DAPATHENA,
+ REG_DAPATHENA_ENDA6, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("VIB1 DAC", REG_DAPATHCONF,
+ REG_DAPATHCONF_ENDACVIB1, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("VIB2 DAC", REG_DAPATHCONF,
+ REG_DAPATHCONF_ENDACVIB2, 0, NULL, 0),
+
+ SND_SOC_DAPM_INPUT("PWMGEN1"),
+ SND_SOC_DAPM_INPUT("PWMGEN2"),
+
+ SND_SOC_DAPM_MUX("Vibra 1 Controller Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_pwm2vib1),
+ SND_SOC_DAPM_MUX("Vibra 2 Controller Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_pwm2vib2),
+
+ SND_SOC_DAPM_MIXER("VIB1 Enable", REG_ANACONF4,
+ REG_ANACONF4_ENVIB1, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("VIB2 Enable", REG_ANACONF4,
+ REG_ANACONF4_ENVIB2, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("VIB1"),
+ SND_SOC_DAPM_OUTPUT("VIB2"),
+
+ /* LineIn & Microphone 2 path */
+
+ SND_SOC_DAPM_INPUT("LINL"),
+ SND_SOC_DAPM_INPUT("LINR"),
+ SND_SOC_DAPM_INPUT("MIC2 Input"),
+
+ SND_SOC_DAPM_SWITCH("LineIn Left", SND_SOC_NOPM, 0, 0, dapm_linl_mute),
+ SND_SOC_DAPM_SWITCH("LineIn Right", SND_SOC_NOPM, 0, 0, dapm_linr_mute),
+ SND_SOC_DAPM_SWITCH("Mic 2", SND_SOC_NOPM, 0, 0, dapm_mic2_mute),
+
+ SND_SOC_DAPM_MIXER("LINL Enable", REG_ANACONF2,
+ REG_ANACONF2_ENLINL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR Enable", REG_ANACONF2,
+ REG_ANACONF2_ENLINR, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("MIC2 Enable", REG_ANACONF2,
+ REG_ANACONF2_ENMIC2, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("Mic 2 or LINR Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_mic2lr_select),
+
+ SND_SOC_DAPM_MIXER("LINL ADC", REG_ANACONF3,
+ REG_ANACONF3_ENADCLINL, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR ADC", REG_ANACONF3,
+ REG_ANACONF3_ENADCLINR, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("AD 1 Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_ad1_select),
+ SND_SOC_DAPM_MUX("AD 2 Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_ad2_select),
+
+ SND_SOC_DAPM_MIXER("AD1 Channel Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("AD2 Channel Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("AD12 Enable", REG_ADPATHENA,
+ REG_ADPATHENA_ENAD12, 0, NULL, 0),
+
+ /* Microphone 1 path */
+
+ SND_SOC_DAPM_INPUT("MIC1A Input"),
+ SND_SOC_DAPM_INPUT("MIC1B Input"),
+
+ SND_SOC_DAPM_MUX("Mic 1A or 1B Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_mic1ab_select),
+
+ SND_SOC_DAPM_SWITCH("Mic 1", SND_SOC_NOPM, 0, 0, dapm_mic1_mute),
+
+ SND_SOC_DAPM_MIXER("MIC1 Enable", REG_ANACONF2,
+ REG_ANACONF2_ENMIC1, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("MIC1 ADC", REG_ANACONF3,
+ REG_ANACONF3_ENADCMIC, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("AD 3 Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_ad3_select),
+
+ SND_SOC_DAPM_MIXER("AD3 Channel Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("AD3 Enable", REG_ADPATHENA,
+ REG_ADPATHENA_ENAD34, 0, NULL, 0),
+
+ /* HD Capture path */
+
+ SND_SOC_DAPM_MUX("AD 5 Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_ad5_select),
+ SND_SOC_DAPM_MUX("AD 6 Select Capture Route",
+ SND_SOC_NOPM, 0, 0, dapm_ad6_select),
+
+ SND_SOC_DAPM_MIXER("AD5 Channel Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("AD6 Channel Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("AD57 Enable", REG_ADPATHENA,
+ REG_ADPATHENA_ENAD5768, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("AD68 Enable", REG_ADPATHENA,
+ REG_ADPATHENA_ENAD5768, 0, NULL, 0),
+
+ /* Digital Microphone path */
+
+ SND_SOC_DAPM_INPUT("DMIC Input"),
+
+ SND_SOC_DAPM_SWITCH("DMic 1", SND_SOC_NOPM, 0, 0, dapm_dmic1_mute),
+ SND_SOC_DAPM_SWITCH("DMic 2", SND_SOC_NOPM, 0, 0, dapm_dmic2_mute),
+ SND_SOC_DAPM_SWITCH("DMic 3", SND_SOC_NOPM, 0, 0, dapm_dmic3_mute),
+ SND_SOC_DAPM_SWITCH("DMic 4", SND_SOC_NOPM, 0, 0, dapm_dmic4_mute),
+ SND_SOC_DAPM_SWITCH("DMic 5", SND_SOC_NOPM, 0, 0, dapm_dmic5_mute),
+ SND_SOC_DAPM_SWITCH("DMic 6", SND_SOC_NOPM, 0, 0, dapm_dmic6_mute),
+
+ SND_SOC_DAPM_MIXER("AD4 Channel Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("AD4 Enable", REG_ADPATHENA,
+ REG_ADPATHENA_ENAD34, 0, NULL, 0),
+
+ /* LineIn Bypass path */
+
+ SND_SOC_DAPM_MIXER("LINL to HSL Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR to HSR Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Acoustical Noise Cancellation path */
+
+ SND_SOC_DAPM_MUX("ANC Source Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_anc_in_select),
+
+ SND_SOC_DAPM_MUX("ANC Playback Switch",
+ SND_SOC_NOPM, 0, 0, dapm_anc_enable),
+
+ SND_SOC_DAPM_SWITCH("ANC to Earpiece",
+ SND_SOC_NOPM, 0, 0, dapm_anc_ear_mute),
+
+ /* Sidetone Filter path */
+
+ SND_SOC_DAPM_MUX("Sidetone Left Source Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_stfir1_in_select),
+ SND_SOC_DAPM_MUX("Sidetone Right Source Playback Route",
+ SND_SOC_NOPM, 0, 0, dapm_stfir2_in_select),
+
+ SND_SOC_DAPM_MIXER("STFIR1 Control", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("STFIR2 Control", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("STFIR1 Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("STFIR2 Gain", SND_SOC_NOPM, 0, 0, NULL, 0),
+};
+
+/* DAPM-routes */
+
+static const struct snd_soc_dapm_route dapm_routes[] = {
+ /* AD/DA */
+ {"ADC", NULL, "ADC Input"},
+ {"DAC Output", NULL, "DAC"},
+
+ /* Powerup charge pump if DA1/2 is in use */
+ {"DA_IN1", NULL, "Charge Pump"},
+ {"DA_IN2", NULL, "Charge Pump"},
+
+ /* Headset path */
+
+ {"DA1 Enable", NULL, "DA_IN1"},
+ {"DA2 Enable", NULL, "DA_IN2"},
+
+ {"HSL Digital Gain", NULL, "DA1 Enable"},
+ {"HSR Digital Gain", NULL, "DA2 Enable"},
+
+ {"HSL DAC", NULL, "HSL Digital Gain"},
+ {"HSR DAC", NULL, "HSR Digital Gain"},
+
+ {"HSL DAC Mute", NULL, "HSL DAC"},
+ {"HSR DAC Mute", NULL, "HSR DAC"},
+
+ {"HSL DAC Driver", NULL, "HSL DAC Mute"},
+ {"HSR DAC Driver", NULL, "HSR DAC Mute"},
+
+ {"HSL Mute", NULL, "HSL DAC Driver"},
+ {"HSR Mute", NULL, "HSR DAC Driver"},
+
+ {"Headset Left", "Enabled", "HSL Mute"},
+ {"Headset Right", "Enabled", "HSR Mute"},
+
+ {"HSL Enable", NULL, "Headset Left"},
+ {"HSR Enable", NULL, "Headset Right"},
+
+ {"HSL Gain", NULL, "HSL Enable"},
+ {"HSR Gain", NULL, "HSR Enable"},
+
+ {"HSL", NULL, "HSL Gain"},
+ {"HSR", NULL, "HSR Gain"},
+
+ /* IHF or LineOut path */
+
+ {"DA3 Channel Gain", NULL, "DA_IN3"},
+ {"DA4 Channel Gain", NULL, "DA_IN4"},
+
+ {"IHF Left Source Playback Route", "Audio Path", "DA3 Channel Gain"},
+ {"IHF Right Source Playback Route", "Audio Path", "DA4 Channel Gain"},
+
+ {"DA3 or ANC path to HfL", NULL, "IHF Left Source Playback Route"},
+ {"DA4 or ANC path to HfR", NULL, "IHF Right Source Playback Route"},
+
+ /* IHF path */
+
+ {"IHF Left", "Enabled", "DA3 or ANC path to HfL"},
+ {"IHF Right", "Enabled", "DA4 or ANC path to HfR"},
+
+ {"IHFL DAC", NULL, "IHF Left"},
+ {"IHFR DAC", NULL, "IHF Right"},
+
+ {"IHFL Enable", NULL, "IHFL DAC"},
+ {"IHFR Enable", NULL, "IHFR DAC"},
+
+ {"IHF or LineOut Select", "IHF", "IHFL Enable"},
+ {"IHF or LineOut Select", "IHF", "IHFR Enable"},
+
+ /* Earpiece path */
+
+ {"Earpiece or LineOut Mono Source", "Headset Left", "HSL Digital Gain"},
+ {"Earpiece or LineOut Mono Source", "IHF Left", "DA3 or ANC path to HfL"},
+
+ {"EAR DAC", NULL, "Earpiece or LineOut Mono Source"},
+
+ {"Earpiece", "Playback Switch", "EAR DAC"},
+
+ {"EAR Enable", NULL, "Earpiece"},
+
+ {"EAR", NULL, "EAR Enable"},
+
+ /* LineOut path stereo */
+
+ {"LineOut Source Playback Route", "Stereo Path", "HSL DAC Driver"},
+ {"LineOut Source Playback Route", "Stereo Path", "HSR DAC Driver"},
+
+ /* LineOut path mono */
+
+ {"LineOut Source Playback Route", "Mono Path", "EAR DAC"},
+
+ /* LineOut path */
+
+ {"LineOut Left", "Enabled", "LineOut Source Playback Route"},
+ {"LineOut Right", "Enabled", "LineOut Source Playback Route"},
+
+ {"LOL Enable", NULL, "LineOut Left"},
+ {"LOR Enable", NULL, "LineOut Right"},
+
+ {"IHF or LineOut Select", "LineOut", "LOL Enable"},
+ {"IHF or LineOut Select", "LineOut", "LOR Enable"},
+
+ /* IHF path */
+
+ {"IHFL", NULL, "IHF or LineOut Select"},
+ {"IHFR", NULL, "IHF or LineOut Select"},
+
+ /* Vibrator path */
+
+ {"DA5 Channel Gain", NULL, "DA_IN5"},
+ {"DA6 Channel Gain", NULL, "DA_IN6"},
+
+ {"VIB1 DAC", NULL, "DA5 Channel Gain"},
+ {"VIB2 DAC", NULL, "DA6 Channel Gain"},
+
+ {"Vibra 1 Controller Playback Route", "Audio Path", "VIB1 DAC"},
+ {"Vibra 2 Controller Playback Route", "Audio Path", "VIB2 DAC"},
+ {"Vibra 1 Controller Playback Route", "PWM Generator", "PWMGEN1"},
+ {"Vibra 2 Controller Playback Route", "PWM Generator", "PWMGEN2"},
+
+ {"Vibra 1", "Enabled", "Vibra 1 Controller Playback Route"},
+ {"Vibra 2", "Enabled", "Vibra 2 Controller Playback Route"},
+
+ {"VIB1 Enable", NULL, "Vibra 1"},
+ {"VIB2 Enable", NULL, "Vibra 2"},
+
+ {"VIB1", NULL, "VIB1 Enable"},
+ {"VIB2", NULL, "VIB2 Enable"},
+
+ /* LineIn & Microphone 2 path */
+
+ {"LineIn Left", "Capture Switch", "LINL"},
+ {"LineIn Right", "Capture Switch", "LINR"},
+ {"Mic 2", "Capture Switch", "MIC2 Input"},
+
+ {"LINL Enable", NULL, "LineIn Left"},
+ {"LINR Enable", NULL, "LineIn Right"},
+ {"MIC2 Enable", NULL, "Mic 2"},
+
+ {"Mic 2 or LINR Select Capture Route", "LineIn Right", "LINR Enable"},
+ {"Mic 2 or LINR Select Capture Route", "Mic 2", "MIC2 Enable"},
+
+ {"LINL ADC", NULL, "LINL Enable"},
+ {"LINR ADC", NULL, "Mic 2 or LINR Select Capture Route"},
+
+ {"AD 1 Select Capture Route", "LineIn Left", "LINL ADC"},
+ {"AD 2 Select Capture Route", "LineIn Right", "LINR ADC"},
+
+ {"AD1 Channel Gain", NULL, "AD 1 Select Capture Route"},
+ {"AD2 Channel Gain", NULL, "AD 2 Select Capture Route"},
+
+ {"AD12 Enable", NULL, "AD1 Channel Gain"},
+ {"AD12 Enable", NULL, "AD2 Channel Gain"},
+
+ {"AD_OUT1", NULL, "AD12 Enable"},
+ {"AD_OUT2", NULL, "AD12 Enable"},
+
+ /* Microphone 1 path */
+
+ {"Mic 1A or 1B Select Capture Route", "Mic 1A", "MIC1A Input"},
+ {"Mic 1A or 1B Select Capture Route", "Mic 1B", "MIC1B Input"},
+
+ {"Mic 1", "Capture Switch", "Mic 1A or 1B Select Capture Route"},
+
+ {"MIC1 Enable", NULL, "Mic 1"},
+
+ {"MIC1 ADC", NULL, "MIC1 Enable"},
+
+ {"AD 3 Select Capture Route", "Mic 1", "MIC1 ADC"},
+
+ {"AD3 Channel Gain", NULL, "AD 3 Select Capture Route"},
+
+ {"AD3 Enable", NULL, "AD3 Channel Gain"},
+
+ {"AD_OUT3", NULL, "AD3 Enable"},
+
+ /* HD Capture path */
+
+ {"AD 5 Select Capture Route", "Mic 2", "LINR ADC"},
+ {"AD 6 Select Capture Route", "Mic 1", "MIC1 ADC"},
+
+ {"AD5 Channel Gain", NULL, "AD 5 Select Capture Route"},
+ {"AD6 Channel Gain", NULL, "AD 6 Select Capture Route"},
+
+ {"AD57 Enable", NULL, "AD5 Channel Gain"},
+ {"AD68 Enable", NULL, "AD6 Channel Gain"},
+
+ {"AD_OUT57", NULL, "AD57 Enable"},
+ {"AD_OUT68", NULL, "AD68 Enable"},
+
+ /* Digital Microphone path */
+
+ {"DMic 1", "Capture Switch", "DMIC Input"},
+ {"DMic 2", "Capture Switch", "DMIC Input"},
+ {"DMic 3", "Capture Switch", "DMIC Input"},
+ {"DMic 4", "Capture Switch", "DMIC Input"},
+ {"DMic 5", "Capture Switch", "DMIC Input"},
+ {"DMic 6", "Capture Switch", "DMIC Input"},
+
+ {"AD 1 Select Capture Route", "DMic 1", "DMic 1"},
+ {"AD 2 Select Capture Route", "DMic 2", "DMic 2"},
+ {"AD 3 Select Capture Route", "DMic 3", "DMic 3"},
+ {"AD 5 Select Capture Route", "DMic 5", "DMic 5"},
+ {"AD 6 Select Capture Route", "DMic 6", "DMic 6"},
+
+ {"AD4 Channel Gain", NULL, "DMic 4"},
+
+ {"AD4 Enable", NULL, "AD4 Channel Gain"},
+
+ {"AD_OUT4", NULL, "AD4 Enable"},
+
+ /* LineIn Bypass path */
+
+ {"LINL to HSL Gain", NULL, "LINL Enable"},
+ {"LINR to HSR Gain", NULL, "LINR Enable"},
+
+ {"HSL DAC Driver", NULL, "LINL to HSL Gain"},
+ {"HSR DAC Driver", NULL, "LINR to HSR Gain"},
+
+ /* Acoustical Noise Cancellation path */
+
+ {"ANC Source Playback Route", "Mic 2 / DMic 5", "AD5 Channel Gain"},
+ {"ANC Source Playback Route", "Mic 1 / DMic 6", "AD6 Channel Gain"},
+
+ {"ANC Playback Switch", "Enabled", "ANC Source Playback Route"},
+
+ {"IHF Left Source Playback Route", "ANC", "ANC Playback Switch"},
+ {"IHF Right Source Playback Route", "ANC", "ANC Playback Switch"},
+ {"ANC to Earpiece", "Playback Switch", "ANC Playback Switch"},
+
+ {"HSL Digital Gain", NULL, "ANC to Earpiece"},
+
+ /* Sidetone Filter path */
+
+ {"Sidetone Left Source Playback Route", "LineIn Left", "AD12 Enable"},
+ {"Sidetone Left Source Playback Route", "LineIn Right", "AD12 Enable"},
+ {"Sidetone Left Source Playback Route", "Mic 1", "AD3 Enable"},
+ {"Sidetone Left Source Playback Route", "Headset Left", "DA_IN1"},
+ {"Sidetone Right Source Playback Route", "LineIn Right", "AD12 Enable"},
+ {"Sidetone Right Source Playback Route", "Mic 1", "AD3 Enable"},
+ {"Sidetone Right Source Playback Route", "DMic 4", "AD4 Enable"},
+ {"Sidetone Right Source Playback Route", "Headset Right", "DA_IN2"},
+
+ {"STFIR1 Control", NULL, "Sidetone Left Source Playback Route"},
+ {"STFIR2 Control", NULL, "Sidetone Right Source Playback Route"},
+
+ {"STFIR1 Gain", NULL, "STFIR1 Control"},
+ {"STFIR2 Gain", NULL, "STFIR2 Control"},
+
+ {"DA1 Enable", NULL, "STFIR1 Gain"},
+ {"DA2 Enable", NULL, "STFIR2 Gain"},
+};
+
+/* Controls - Non-DAPM ASoC */
+
+/* from -31 to 31 dB in 1 dB steps (mute instead of -32 dB) */
+static DECLARE_TLV_DB_SCALE(adx_dig_gain_tlv, -3200, 100, 1);
+
+/* from -62 to 0 dB in 1 dB steps (mute instead of -63 dB) */
+static DECLARE_TLV_DB_SCALE(dax_dig_gain_tlv, -6300, 100, 1);
+
+/* from 0 to 8 dB in 1 dB steps (mute instead of -1 dB) */
+static DECLARE_TLV_DB_SCALE(hs_ear_dig_gain_tlv, -100, 100, 1);
+
+/* from -30 to 0 dB in 1 dB steps (mute instead of -31 dB) */
+static DECLARE_TLV_DB_SCALE(stfir_dig_gain_tlv, -3100, 100, 1);
+
+/* from -32 to -20 dB in 4 dB steps / from -18 to 2 dB in 2 dB steps */
+static const unsigned int hs_gain_tlv[] = {
+ TLV_DB_RANGE_HEAD(2),
+ 0, 3, TLV_DB_SCALE_ITEM(-3200, 400, 0),
+ 4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0),
+};
+
+/* from 0 to 31 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(mic_gain_tlv, 0, 100, 0);
+
+/* from -10 to 20 dB in 2 dB steps */
+static DECLARE_TLV_DB_SCALE(lin_gain_tlv, -1000, 200, 0);
+
+/* from -36 to 0 dB in 2 dB steps (mute instead of -38 dB) */
+static DECLARE_TLV_DB_SCALE(lin2hs_gain_tlv, -3800, 200, 1);
+
+static SOC_ENUM_SINGLE_DECL(soc_enum_hshpen,
+ REG_ANACONF1, REG_ANACONF1_HSHPEN, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_hslowpow,
+ REG_ANACONF1, REG_ANACONF1_HSLOWPOW, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_daclowpow1,
+ REG_ANACONF1, REG_ANACONF1_DACLOWPOW1, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_daclowpow0,
+ REG_ANACONF1, REG_ANACONF1_DACLOWPOW0, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_eardaclowpow,
+ REG_ANACONF1, REG_ANACONF1_EARDACLOWPOW, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_eardrvlowpow,
+ REG_ANACONF1, REG_ANACONF1_EARDRVLOWPOW, enum_dis_ena);
+
+static const char * const enum_earselcm[] = {"0.95V", "1.10V", "1.27V", "1.58V"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_earselcm,
+ REG_ANACONF1, REG_ANACONF1_EARSELCM, enum_earselcm);
+
+static const char * const enum_hsfadspeed[] = {"2ms", "0.5ms", "10.6ms", "5ms"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_hsfadspeed,
+ REG_DIGMICCONF, REG_DIGMICCONF_HSFADSPEED, enum_hsfadspeed);
+
+static const char * const enum_envdetthre[] = {
+ "250mV", "300mV", "350mV", "400mV",
+ "450mV", "500mV", "550mV", "600mV",
+ "650mV", "700mV", "750mV", "800mV",
+ "850mV", "900mV", "950mV", "1.00V" };
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdetcpen,
+ REG_SIGENVCONF, REG_SIGENVCONF_ENVDETCPEN, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdeththre,
+ REG_ENVCPCONF, REG_ENVCPCONF_ENVDETHTHRE, enum_envdetthre);
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdetlthre,
+ REG_ENVCPCONF, REG_ENVCPCONF_ENVDETLTHRE, enum_envdetthre);
+
+static const char * const enum_envdettime[] = {
+ "26.6us", "53.2us", "106us", "213us",
+ "426us", "851us", "1.70ms", "3.40ms",
+ "6.81ms", "13.6ms", "27.2ms", "54.5ms",
+ "109ms", "218ms", "436ms", "872ms" };
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdettime,
+ REG_SIGENVCONF, REG_SIGENVCONF_ENVDETTIME, enum_envdettime);
+
+static const char * const enum_ensemicx[] = {"Differential", "Single Ended"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_ensemic1,
+ REG_ANAGAIN1, REG_ANAGAINX_ENSEMICX, enum_ensemicx);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ensemic2,
+ REG_ANAGAIN2, REG_ANAGAINX_ENSEMICX, enum_ensemicx);
+static SOC_ENUM_SINGLE_DECL(soc_enum_lowpowmic1,
+ REG_ANAGAIN1, REG_ANAGAINX_LOWPOWMICX, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_lowpowmic2,
+ REG_ANAGAIN2, REG_ANAGAINX_LOWPOWMICX, enum_dis_ena);
+
+static SOC_ENUM_DOUBLE_DECL(soc_enum_ad12nh, REG_ADFILTCONF,
+ REG_ADFILTCONF_AD1NH, REG_ADFILTCONF_AD2NH, enum_ena_dis);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_ad34nh, REG_ADFILTCONF,
+ REG_ADFILTCONF_AD3NH, REG_ADFILTCONF_AD4NH, enum_ena_dis);
+
+static const char * const enum_av_mode[] = {"Audio", "Voice"};
+static SOC_ENUM_DOUBLE_DECL(soc_enum_ad12voice, REG_ADFILTCONF,
+ REG_ADFILTCONF_AD1VOICE, REG_ADFILTCONF_AD2VOICE, enum_av_mode);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_ad34voice, REG_ADFILTCONF,
+ REG_ADFILTCONF_AD3VOICE, REG_ADFILTCONF_AD4VOICE, enum_av_mode);
+
+static SOC_ENUM_SINGLE_DECL(soc_enum_da12voice,
+ REG_DASLOTCONF1, REG_DASLOTCONF1_DA12VOICE, enum_av_mode);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da34voice,
+ REG_DASLOTCONF3, REG_DASLOTCONF3_DA34VOICE, enum_av_mode);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da56voice,
+ REG_DASLOTCONF5, REG_DASLOTCONF5_DA56VOICE, enum_av_mode);
+
+static SOC_ENUM_SINGLE_DECL(soc_enum_swapda12_34,
+ REG_DASLOTCONF1, REG_DASLOTCONF1_SWAPDA12_34, enum_dis_ena);
+
+static SOC_ENUM_DOUBLE_DECL(soc_enum_vib12swap, REG_CLASSDCONF1,
+ REG_CLASSDCONF1_VIB1SWAPEN, REG_CLASSDCONF1_VIB2SWAPEN, enum_dis_ena);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_hflrswap, REG_CLASSDCONF1,
+ REG_CLASSDCONF1_HFLSWAPEN, REG_CLASSDCONF1_HFRSWAPEN, enum_dis_ena);
+
+static SOC_ENUM_DOUBLE_DECL(soc_enum_fir01byp, REG_CLASSDCONF2,
+ REG_CLASSDCONF2_FIRBYP0, REG_CLASSDCONF2_FIRBYP1, enum_dis_ena);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_fir23byp, REG_CLASSDCONF2,
+ REG_CLASSDCONF2_FIRBYP2, REG_CLASSDCONF2_FIRBYP3, enum_dis_ena);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_highvol01, REG_CLASSDCONF2,
+ REG_CLASSDCONF2_HIGHVOLEN0, REG_CLASSDCONF2_HIGHVOLEN1, enum_dis_ena);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_highvol23, REG_CLASSDCONF2,
+ REG_CLASSDCONF2_HIGHVOLEN2, REG_CLASSDCONF2_HIGHVOLEN3, enum_dis_ena);
+
+static const char * const enum_sinc53[] = {"Sinc 5", "Sinc 3"};
+static SOC_ENUM_DOUBLE_DECL(soc_enum_dmic12sinc, REG_DMICFILTCONF,
+ REG_DMICFILTCONF_DMIC1SINC3, REG_DMICFILTCONF_DMIC2SINC3, enum_sinc53);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_dmic34sinc, REG_DMICFILTCONF,
+ REG_DMICFILTCONF_DMIC3SINC3, REG_DMICFILTCONF_DMIC4SINC3, enum_sinc53);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_dmic56sinc, REG_DMICFILTCONF,
+ REG_DMICFILTCONF_DMIC5SINC3, REG_DMICFILTCONF_DMIC6SINC3, enum_sinc53);
+
+static const char * const enum_da2hslr[] = {"Sidetone", "Audio Path"};
+static SOC_ENUM_DOUBLE_DECL(soc_enum_da2hslr, REG_DIGMULTCONF1,
+ REG_DIGMULTCONF1_DATOHSLEN, REG_DIGMULTCONF1_DATOHSREN, enum_da2hslr);
+
+static const char * const enum_sinc31[] = {"Sinc 3", "Sinc 1"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_hsesinc,
+ REG_HSLEARDIGGAIN, REG_HSLEARDIGGAIN_HSSINC1, enum_sinc31);
+
+static const char * const enum_fadespeed[] = {"1ms", "4ms", "8ms", "16ms"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_fadespeed,
+ REG_HSRDIGGAIN, REG_HSRDIGGAIN_FADESPEED, enum_fadespeed);
+
+/* Digital interface - Clocks */
+static SOC_ENUM_SINGLE_DECL(soc_enum_mastgen,
+ REG_DIGIFCONF1, REG_DIGIFCONF1_ENMASTGEN, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_fsbitclk0,
+ REG_DIGIFCONF1, REG_DIGIFCONF1_ENFSBITCLK0, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_fsbitclk1,
+ REG_DIGIFCONF1, REG_DIGIFCONF1_ENFSBITCLK1, enum_dis_ena);
+
+/* Digital interface - DA from slot mapping */
+static const char * const enum_da_from_slot_map[] = {"SLOT0",
+ "SLOT1",
+ "SLOT2",
+ "SLOT3",
+ "SLOT4",
+ "SLOT5",
+ "SLOT6",
+ "SLOT7",
+ "SLOT8",
+ "SLOT9",
+ "SLOT10",
+ "SLOT11",
+ "SLOT12",
+ "SLOT13",
+ "SLOT14",
+ "SLOT15",
+ "SLOT16",
+ "SLOT17",
+ "SLOT18",
+ "SLOT19",
+ "SLOT20",
+ "SLOT21",
+ "SLOT22",
+ "SLOT23",
+ "SLOT24",
+ "SLOT25",
+ "SLOT26",
+ "SLOT27",
+ "SLOT28",
+ "SLOT29",
+ "SLOT30",
+ "SLOT31"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_da1slotmap,
+ REG_DASLOTCONF1, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da2slotmap,
+ REG_DASLOTCONF2, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da3slotmap,
+ REG_DASLOTCONF3, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da4slotmap,
+ REG_DASLOTCONF4, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da5slotmap,
+ REG_DASLOTCONF5, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da6slotmap,
+ REG_DASLOTCONF6, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da7slotmap,
+ REG_DASLOTCONF7, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da8slotmap,
+ REG_DASLOTCONF8, REG_DASLOTCONFX_SLTODAX_SHIFT, enum_da_from_slot_map);
+
+/* Digital interface - AD to slot mapping */
+static const char * const enum_ad_to_slot_map[] = {"AD_OUT1",
+ "AD_OUT2",
+ "AD_OUT3",
+ "AD_OUT4",
+ "AD_OUT5",
+ "AD_OUT6",
+ "AD_OUT7",
+ "AD_OUT8",
+ "zeroes",
+ "tristate"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot0map,
+ REG_ADSLOTSEL1, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot1map,
+ REG_ADSLOTSEL1, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot2map,
+ REG_ADSLOTSEL2, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot3map,
+ REG_ADSLOTSEL2, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot4map,
+ REG_ADSLOTSEL3, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot5map,
+ REG_ADSLOTSEL3, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot6map,
+ REG_ADSLOTSEL4, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot7map,
+ REG_ADSLOTSEL4, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot8map,
+ REG_ADSLOTSEL5, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot9map,
+ REG_ADSLOTSEL5, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot10map,
+ REG_ADSLOTSEL6, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot11map,
+ REG_ADSLOTSEL6, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot12map,
+ REG_ADSLOTSEL7, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot13map,
+ REG_ADSLOTSEL7, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot14map,
+ REG_ADSLOTSEL8, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot15map,
+ REG_ADSLOTSEL8, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot16map,
+ REG_ADSLOTSEL9, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot17map,
+ REG_ADSLOTSEL9, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot18map,
+ REG_ADSLOTSEL10, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot19map,
+ REG_ADSLOTSEL10, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot20map,
+ REG_ADSLOTSEL11, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot21map,
+ REG_ADSLOTSEL11, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot22map,
+ REG_ADSLOTSEL12, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot23map,
+ REG_ADSLOTSEL12, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot24map,
+ REG_ADSLOTSEL13, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot25map,
+ REG_ADSLOTSEL13, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot26map,
+ REG_ADSLOTSEL14, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot27map,
+ REG_ADSLOTSEL14, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot28map,
+ REG_ADSLOTSEL15, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot29map,
+ REG_ADSLOTSEL15, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot30map,
+ REG_ADSLOTSEL16, REG_ADSLOTSELX_EVEN_SHIFT, enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot31map,
+ REG_ADSLOTSEL16, REG_ADSLOTSELX_ODD_SHIFT, enum_ad_to_slot_map);
+
+/* Digital interface - Digital loopback */
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad1loop,
+ REG_DASLOTCONF1, REG_DASLOTCONF1_DAI7TOADO1, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad2loop,
+ REG_DASLOTCONF2, REG_DASLOTCONF2_DAI8TOADO2, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad3loop,
+ REG_DASLOTCONF3, REG_DASLOTCONF3_DAI7TOADO3, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad4loop,
+ REG_DASLOTCONF4, REG_DASLOTCONF4_DAI8TOADO4, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad5loop,
+ REG_DASLOTCONF5, REG_DASLOTCONF5_DAI7TOADO5, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad6loop,
+ REG_DASLOTCONF6, REG_DASLOTCONF6_DAI8TOADO6, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad7loop,
+ REG_DASLOTCONF7, REG_DASLOTCONF7_DAI8TOADO7, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_ad8loop,
+ REG_DASLOTCONF8, REG_DASLOTCONF8_DAI7TOADO8, enum_dis_ena);
+
+/* Digital interface - Burst mode */
+static SOC_ENUM_SINGLE_DECL(soc_enum_if0fifoen,
+ REG_DIGIFCONF3, REG_DIGIFCONF3_IF0BFIFOEN, enum_dis_ena);
+static const char * const enum_mask[] = {"Unmasked", "Masked"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifomask,
+ REG_FIFOCONF1, REG_FIFOCONF1_BFIFOMASK, enum_mask);
+static const char * const enum_bitclk0[] = {"19_2_MHz", "38_4_MHz"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifo19m2,
+ REG_FIFOCONF1, REG_FIFOCONF1_BFIFO19M2, enum_bitclk0);
+static const char * const enum_slavemaster[] = {"Slave", "Master"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifomast,
+ REG_FIFOCONF3, REG_FIFOCONF3_BFIFOMAST_SHIFT, enum_slavemaster);
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifoint,
+ REG_FIFOCONF3, REG_FIFOCONF3_BFIFORUN_SHIFT, enum_dis_ena);
+
+/* TODO: move to DAPM */
+static SOC_ENUM_SINGLE_DECL(soc_enum_enfirsids,
+ REG_SIDFIRCONF, REG_SIDFIRCONF_ENFIRSIDS, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_parlhf,
+ REG_CLASSDCONF1, REG_CLASSDCONF1_PARLHF, enum_dis_ena);
+static SOC_ENUM_SINGLE_DECL(soc_enum_parlvib,
+ REG_CLASSDCONF1, REG_CLASSDCONF1_PARLVIB, enum_dis_ena);
+static SOC_ENUM_STROBE_DECL(soc_enum_applysidetone,
+ REG_SIDFIRADR, REG_SIDFIRADR_FIRSIDSET, NORMAL, enum_rdy_apl);
+
+static struct snd_kcontrol_new ab8500_snd_controls[] = {
+ SOC_ENUM("Headset High Pass Playback Switch", soc_enum_hshpen),
+ SOC_ENUM("Headset Low Power Playback Switch", soc_enum_hslowpow),
+ SOC_ENUM("Headset DAC Low Power Playback Switch", soc_enum_daclowpow1),
+ SOC_ENUM("Headset DAC Drv Low Power Playback Switch",
+ soc_enum_daclowpow0),
+ SOC_ENUM("Earpiece DAC Low Power Playback Switch",
+ soc_enum_eardaclowpow),
+ SOC_ENUM("Earpiece DAC Drv Low Power Playback Switch",
+ soc_enum_eardrvlowpow),
+ SOC_ENUM("Earpiece Common Mode Playback Switch", soc_enum_earselcm),
+
+ SOC_ENUM("Headset Fade Speed Playback Switch", soc_enum_hsfadspeed),
+
+ SOC_ENUM("Charge Pump High Threshold For Low Voltage",
+ soc_enum_envdeththre),
+ SOC_ENUM("Charge Pump Low Threshold For Low Voltage",
+ soc_enum_envdetlthre),
+ SOC_ENUM("Charge Pump Envelope Detection", soc_enum_envdetcpen),
+ SOC_ENUM("Charge Pump Envelope Detection Decay Time",
+ soc_enum_envdettime),
+
+ SOC_ENUM("Mic 1 Type Capture Switch", soc_enum_ensemic1),
+ SOC_ENUM("Mic 2 Type Capture Switch", soc_enum_ensemic2),
+ SOC_ENUM("Mic 1 Low Power Capture Switch", soc_enum_lowpowmic1),
+ SOC_ENUM("Mic 2 Low Power Capture Switch", soc_enum_lowpowmic2),
+
+ SOC_ENUM("LineIn High Pass Capture Switch", soc_enum_ad12nh),
+ SOC_ENUM("Mic High Pass Capture Switch", soc_enum_ad34nh),
+ SOC_ENUM("LineIn Mode Capture Switch", soc_enum_ad12voice),
+ SOC_ENUM("Mic Mode Capture Switch", soc_enum_ad34voice),
+
+ SOC_ENUM("Headset Mode Playback Switch", soc_enum_da12voice),
+ SOC_ENUM("IHF Mode Playback Switch", soc_enum_da34voice),
+ SOC_ENUM("Vibra Mode Playback Switch", soc_enum_da56voice),
+
+ SOC_ENUM("IHF and Headset Swap Playback Switch", soc_enum_swapda12_34),
+
+ SOC_ENUM("IHF Low EMI Mode Playback Switch", soc_enum_hflrswap),
+ SOC_ENUM("Vibra Low EMI Mode Playback Switch", soc_enum_vib12swap),
+
+ SOC_ENUM("IHF FIR Bypass Playback Switch", soc_enum_fir01byp),
+ SOC_ENUM("Vibra FIR Bypass Playback Switch", soc_enum_fir23byp),
+
+ /* TODO: Cannot be changed on the fly with digital channel enabled. */
+ SOC_ENUM("IHF High Volume Playback Switch", soc_enum_highvol01),
+ SOC_ENUM("Vibra High Volume Playback Switch", soc_enum_highvol23),
+
+ SOC_SINGLE("ClassD High Pass Gain Playback Volume",
+ REG_CLASSDCONF3, REG_CLASSDCONF3_DITHHPGAIN,
+ REG_CLASSDCONF3_DITHHPGAIN_MAX, NORMAL),
+ SOC_SINGLE("ClassD White Gain Playback Volume",
+ REG_CLASSDCONF3, REG_CLASSDCONF3_DITHWGAIN,
+ REG_CLASSDCONF3_DITHWGAIN_MAX, NORMAL),
+
+ SOC_ENUM("LineIn Filter Capture Switch", soc_enum_dmic12sinc),
+ SOC_ENUM("Mic Filter Capture Switch", soc_enum_dmic34sinc),
+ SOC_ENUM("HD Mic Filter Capture Switch", soc_enum_dmic56sinc),
+
+ SOC_ENUM("Headset Source Playback Route", soc_enum_da2hslr),
+
+ /* TODO: Cannot be changed on the fly with digital channel enabled. */
+ SOC_ENUM("Headset Filter Playback Switch", soc_enum_hsesinc),
+
+ SOC_ENUM("Digital Gain Fade Speed Switch", soc_enum_fadespeed),
+
+ SOC_DOUBLE_R("Vibra PWM Duty Cycle N Playback Volume",
+ REG_PWMGENCONF3, REG_PWMGENCONF5,
+ REG_PWMGENCONFX_PWMVIBXDUTCYC,
+ REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX, NORMAL),
+ SOC_DOUBLE_R("Vibra PWM Duty Cycle P Playback Volume",
+ REG_PWMGENCONF2, REG_PWMGENCONF4,
+ REG_PWMGENCONFX_PWMVIBXDUTCYC,
+ REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX, NORMAL),
+
+ /* TODO: move to DAPM */
+ SOC_ENUM("Sidetone Playback Switch", soc_enum_enfirsids),
+ SOC_ENUM("IHF L and R Bridge Playback Route", soc_enum_parlhf),
+ SOC_ENUM("Vibra 1 and 2 Bridge Playback Route", soc_enum_parlvib),
+
+ /* Digital gains for AD side */
+
+ SOC_DOUBLE_R_TLV("LineIn Master Gain Capture Volume",
+ REG_ADDIGGAIN1, REG_ADDIGGAIN2,
+ 0, REG_ADDIGGAINX_ADXGAIN_MAX, INVERT, adx_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("Mic Master Gain Capture Volume",
+ REG_ADDIGGAIN3, REG_ADDIGGAIN4,
+ 0, REG_ADDIGGAINX_ADXGAIN_MAX, INVERT, adx_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("HD Mic Master Gain Capture Volume",
+ REG_ADDIGGAIN5, REG_ADDIGGAIN6,
+ 0, REG_ADDIGGAINX_ADXGAIN_MAX, INVERT, adx_dig_gain_tlv),
+
+ /* Digital gains for DA side */
+
+ SOC_DOUBLE_R_TLV("Headset Master Gain Playback Volume",
+ REG_DADIGGAIN1, REG_DADIGGAIN2,
+ 0, REG_DADIGGAINX_DAXGAIN_MAX, INVERT, dax_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("IHF Master Gain Playback Volume",
+ REG_DADIGGAIN3, REG_DADIGGAIN4,
+ 0, REG_DADIGGAINX_DAXGAIN_MAX, INVERT, dax_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("Vibra Master Gain Playback Volume",
+ REG_DADIGGAIN5, REG_DADIGGAIN6,
+ 0, REG_DADIGGAINX_DAXGAIN_MAX, INVERT, dax_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("Analog Loopback Gain Playback Volume",
+ REG_ADDIGLOOPGAIN1, REG_ADDIGLOOPGAIN2,
+ 0, REG_ADDIGLOOPGAINX_ADXLBGAIN_MAX, INVERT, dax_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("Headset Digital Gain Playback Volume",
+ REG_HSLEARDIGGAIN, REG_HSRDIGGAIN,
+ 0, REG_HSLEARDIGGAIN_HSLDGAIN_MAX, INVERT, hs_ear_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("Sidetone Digital Gain Playback Volume",
+ REG_SIDFIRGAIN1, REG_SIDFIRGAIN2,
+ 0, REG_SIDFIRGAINX_FIRSIDXGAIN_MAX, INVERT, stfir_dig_gain_tlv),
+
+ /* Analog gains */
+
+ SOC_DOUBLE_TLV("Headset Gain Playback Volume",
+ REG_ANAGAIN3,
+ REG_ANAGAIN3_HSLGAIN, REG_ANAGAIN3_HSRGAIN,
+ REG_ANAGAIN3_HSXGAIN_MAX, INVERT, hs_gain_tlv),
+ SOC_SINGLE_TLV("Mic 1 Capture Volume",
+ REG_ANAGAIN1,
+ REG_ANAGAINX_MICXGAIN,
+ REG_ANAGAINX_MICXGAIN_MAX, NORMAL, mic_gain_tlv),
+ SOC_SINGLE_TLV("Mic 2 Capture Volume",
+ REG_ANAGAIN2,
+ REG_ANAGAINX_MICXGAIN,
+ REG_ANAGAINX_MICXGAIN_MAX, NORMAL, mic_gain_tlv),
+ SOC_DOUBLE_TLV("LineIn Capture Volume",
+ REG_ANAGAIN4,
+ REG_ANAGAIN4_LINLGAIN, REG_ANAGAIN4_LINRGAIN,
+ REG_ANAGAIN4_LINXGAIN_MAX, NORMAL, lin_gain_tlv),
+ SOC_DOUBLE_R_TLV("LineIn to Headset Bypass Playback Volume",
+ REG_DIGLINHSLGAIN, REG_DIGLINHSRGAIN,
+ REG_DIGLINHSXGAIN_LINTOHSXGAIN,
+ REG_DIGLINHSXGAIN_LINTOHSXGAIN_MAX, INVERT, lin2hs_gain_tlv),
+
+ /* Digital interface - Clocks */
+ SOC_ENUM("Digital Interface Master Generator Switch", soc_enum_mastgen),
+ SOC_ENUM("Digital Interface 0 Bit-clock Switch", soc_enum_fsbitclk0),
+ SOC_ENUM("Digital Interface 1 Bit-clock Switch", soc_enum_fsbitclk1),
+
+ /* Digital interface - DA from slot mapping */
+ SOC_ENUM("Digital Interface DA 1 From Slot Map", soc_enum_da1slotmap),
+ SOC_ENUM("Digital Interface DA 2 From Slot Map", soc_enum_da2slotmap),
+ SOC_ENUM("Digital Interface DA 3 From Slot Map", soc_enum_da3slotmap),
+ SOC_ENUM("Digital Interface DA 4 From Slot Map", soc_enum_da4slotmap),
+ SOC_ENUM("Digital Interface DA 5 From Slot Map", soc_enum_da5slotmap),
+ SOC_ENUM("Digital Interface DA 6 From Slot Map", soc_enum_da6slotmap),
+ SOC_ENUM("Digital Interface DA 7 From Slot Map", soc_enum_da7slotmap),
+ SOC_ENUM("Digital Interface DA 8 From Slot Map", soc_enum_da8slotmap),
+
+ /* Digital interface - AD to slot mapping */
+ SOC_ENUM("Digital Interface AD To Slot 0 Map", soc_enum_adslot0map),
+ SOC_ENUM("Digital Interface AD To Slot 1 Map", soc_enum_adslot1map),
+ SOC_ENUM("Digital Interface AD To Slot 2 Map", soc_enum_adslot2map),
+ SOC_ENUM("Digital Interface AD To Slot 3 Map", soc_enum_adslot3map),
+ SOC_ENUM("Digital Interface AD To Slot 4 Map", soc_enum_adslot4map),
+ SOC_ENUM("Digital Interface AD To Slot 5 Map", soc_enum_adslot5map),
+ SOC_ENUM("Digital Interface AD To Slot 6 Map", soc_enum_adslot6map),
+ SOC_ENUM("Digital Interface AD To Slot 7 Map", soc_enum_adslot7map),
+ SOC_ENUM("Digital Interface AD To Slot 8 Map", soc_enum_adslot8map),
+ SOC_ENUM("Digital Interface AD To Slot 9 Map", soc_enum_adslot9map),
+ SOC_ENUM("Digital Interface AD To Slot 10 Map", soc_enum_adslot10map),
+ SOC_ENUM("Digital Interface AD To Slot 11 Map", soc_enum_adslot11map),
+ SOC_ENUM("Digital Interface AD To Slot 12 Map", soc_enum_adslot12map),
+ SOC_ENUM("Digital Interface AD To Slot 13 Map", soc_enum_adslot13map),
+ SOC_ENUM("Digital Interface AD To Slot 14 Map", soc_enum_adslot14map),
+ SOC_ENUM("Digital Interface AD To Slot 15 Map", soc_enum_adslot15map),
+ SOC_ENUM("Digital Interface AD To Slot 16 Map", soc_enum_adslot16map),
+ SOC_ENUM("Digital Interface AD To Slot 17 Map", soc_enum_adslot17map),
+ SOC_ENUM("Digital Interface AD To Slot 18 Map", soc_enum_adslot18map),
+ SOC_ENUM("Digital Interface AD To Slot 19 Map", soc_enum_adslot19map),
+ SOC_ENUM("Digital Interface AD To Slot 20 Map", soc_enum_adslot20map),
+ SOC_ENUM("Digital Interface AD To Slot 21 Map", soc_enum_adslot21map),
+ SOC_ENUM("Digital Interface AD To Slot 22 Map", soc_enum_adslot22map),
+ SOC_ENUM("Digital Interface AD To Slot 23 Map", soc_enum_adslot23map),
+ SOC_ENUM("Digital Interface AD To Slot 24 Map", soc_enum_adslot24map),
+ SOC_ENUM("Digital Interface AD To Slot 25 Map", soc_enum_adslot25map),
+ SOC_ENUM("Digital Interface AD To Slot 26 Map", soc_enum_adslot26map),
+ SOC_ENUM("Digital Interface AD To Slot 27 Map", soc_enum_adslot27map),
+ SOC_ENUM("Digital Interface AD To Slot 28 Map", soc_enum_adslot28map),
+ SOC_ENUM("Digital Interface AD To Slot 29 Map", soc_enum_adslot29map),
+ SOC_ENUM("Digital Interface AD To Slot 30 Map", soc_enum_adslot30map),
+ SOC_ENUM("Digital Interface AD To Slot 31 Map", soc_enum_adslot31map),
+
+ /* Digital interface - Loopback */
+ SOC_ENUM("Digital Interface AD 1 Loopback Switch", soc_enum_ad1loop),
+ SOC_ENUM("Digital Interface AD 2 Loopback Switch", soc_enum_ad2loop),
+ SOC_ENUM("Digital Interface AD 3 Loopback Switch", soc_enum_ad3loop),
+ SOC_ENUM("Digital Interface AD 4 Loopback Switch", soc_enum_ad4loop),
+ SOC_ENUM("Digital Interface AD 5 Loopback Switch", soc_enum_ad5loop),
+ SOC_ENUM("Digital Interface AD 6 Loopback Switch", soc_enum_ad6loop),
+ SOC_ENUM("Digital Interface AD 7 Loopback Switch", soc_enum_ad7loop),
+ SOC_ENUM("Digital Interface AD 8 Loopback Switch", soc_enum_ad8loop),
+
+ /* Digital interface - Burst FIFO */
+ SOC_ENUM("Digital Interface 0 FIFO Enable Switch", soc_enum_if0fifoen),
+ SOC_ENUM("Burst FIFO Mask", soc_enum_bfifomask),
+ SOC_ENUM("Burst FIFO Bit-clock Frequency", soc_enum_bfifo19m2),
+ SOC_SINGLE("Burst FIFO Threshold",
+ REG_FIFOCONF1,
+ REG_FIFOCONF1_BFIFOINT_SHIFT,
+ REG_FIFOCONF1_BFIFOINT_MAX,
+ NORMAL),
+ SOC_SINGLE("Burst FIFO Length",
+ REG_FIFOCONF2,
+ REG_FIFOCONF2_BFIFOTX_SHIFT,
+ REG_FIFOCONF2_BFIFOTX_MAX,
+ NORMAL),
+ SOC_SINGLE("Burst FIFO EOS Extra Slots",
+ REG_FIFOCONF3,
+ REG_FIFOCONF3_BFIFOEXSL_SHIFT,
+ REG_FIFOCONF3_BFIFOEXSL_MAX,
+ NORMAL),
+ SOC_SINGLE("Burst FIFO FS Extra Bit-clocks",
+ REG_FIFOCONF3,
+ REG_FIFOCONF3_PREBITCLK0_SHIFT,
+ REG_FIFOCONF3_PREBITCLK0_MAX,
+ NORMAL),
+ SOC_ENUM("Burst FIFO Interface Mode", soc_enum_bfifomast),
+ SOC_ENUM("Burst FIFO Interface Switch", soc_enum_bfifoint),
+ SOC_SINGLE("Burst FIFO Switch Frame Number",
+ REG_FIFOCONF4,
+ REG_FIFOCONF4_BFIFOFRAMSW_SHIFT,
+ REG_FIFOCONF4_BFIFOFRAMSW_MAX,
+ NORMAL),
+ SOC_SINGLE("Burst FIFO Wake Up Delay",
+ REG_FIFOCONF5,
+ REG_FIFOCONF5_BFIFOWAKEUP_SHIFT,
+ REG_FIFOCONF5_BFIFOWAKEUP_MAX,
+ NORMAL),
+ SOC_SINGLE("Burst FIFO Samples In FIFO",
+ REG_FIFOCONF6,
+ REG_FIFOCONF6_BFIFOSAMPLE_SHIFT,
+ REG_FIFOCONF6_BFIFOSAMPLE_MAX,
+ NORMAL),
+
+ /* Sidetone */
+ SOC_SINGLE("Sidetone FIR Coefficient Index",
+ REG_SIDFIRADR,
+ REG_SIDFIRADR_ADDRESS_SHIFT,
+ REG_SIDFIRADR_ADDRESS_MAX,
+ NORMAL),
+ SOC_SINGLE_S2R("Sidetone FIR Coefficient Value",
+ REG_SIDFIRCOEF1, REG_SIDFIRCOEF2,
+ REG_SIDFIRCOEFX_VALUE_SHIFT,
+ REG_SIDFIRCOEFX_VALUE_MAX,
+ NORMAL),
+ SOC_ENUM_STROBE("Sidetone FIR Apply Coefficients",
+ soc_enum_applysidetone),
+
+ /* ANC */
+ SOC_SINGLE_S1R("ANC Warp Delay Shift",
+ REG_ANCCONF2,
+ REG_ANCCONF2_VALUE_MIN,
+ REG_ANCCONF2_VALUE_MAX,
+ NORMAL),
+ SOC_SINGLE_S1R("ANC FIR Output Shift",
+ REG_ANCCONF3,
+ REG_ANCCONF3_VALUE_MIN,
+ REG_ANCCONF3_VALUE_MAX,
+ NORMAL),
+ SOC_SINGLE_S1R("ANC IIR Output Shift",
+ REG_ANCCONF4,
+ REG_ANCCONF4_VALUE_MIN,
+ REG_ANCCONF4_VALUE_MAX,
+ NORMAL),
+ SOC_SINGLE_S2R("ANC Warp Delay",
+ REG_ANCCONF9, REG_ANCCONF10,
+ REG_ANC_WARP_DELAY_MIN,
+ REG_ANC_WARP_DELAY_MAX,
+ NORMAL),
+ SOC_MULTIPLE_SA("ANC FIR Coefficients",
+ anc_fir_cache,
+ REG_ANC_FIR_COEFF_MIN,
+ REG_ANC_FIR_COEFF_MAX,
+ NORMAL),
+ SOC_MULTIPLE_SA("ANC IIR Coefficients",
+ anc_iir_cache,
+ REG_ANC_IIR_COEFF_MIN,
+ REG_ANC_IIR_COEFF_MAX,
+ NORMAL),
+};
+
+static int ab8500_codec_set_format_if1(struct snd_soc_codec *codec, unsigned int fmt)
+{
+ unsigned int clear_mask, set_mask;
+
+ /* Master or slave */
+
+ clear_mask = BMASK(REG_DIGIFCONF3_IF1MASTER);
+ set_mask = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & FRM master */
+ pr_debug("%s: IF1 Master-mode: AB8500 master.\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF3_IF1MASTER);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & FRM slave */
+ pr_debug("%s: IF1 Master-mode: AB8500 slave.\n", __func__);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & FRM master */
+ case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
+ pr_err("%s: ERROR: The device is either a master or a slave.\n",
+ __func__);
+ default:
+ pr_err("%s: ERROR: Unsupported master mask 0x%x\n",
+ __func__,
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ return -EINVAL;
+ }
+
+ ab8500_codec_update_reg_audio(codec,
+ REG_DIGIFCONF3,
+ BMASK(REG_DIGIFCONF3_IF1MASTER),
+ BMASK(REG_DIGIFCONF3_IF1MASTER));
+
+ /* I2S or TDM */
+
+ clear_mask = BMASK(REG_DIGIFCONF4_FSYNC1P) |
+ BMASK(REG_DIGIFCONF4_BITCLK1P) |
+ BMASK(REG_DIGIFCONF4_IF1FORMAT1) |
+ BMASK(REG_DIGIFCONF4_IF1FORMAT0);
+ set_mask = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S: /* I2S mode */
+ pr_debug("%s: IF1 Protocol: I2S\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF4_IF1FORMAT1);
+ break;
+ case SND_SOC_DAIFMT_DSP_B: /* L data MSB during FRM LRC */
+ pr_debug("%s: IF1 Protocol: DSP B (TDM)\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF4_IF1FORMAT0);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported format (0x%x)!\n",
+ __func__,
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF4, clear_mask, set_mask);
+
+ return 0;
+}
+
+static int ab8500_codec_set_word_length_if1(struct snd_soc_codec *codec, unsigned int wl)
+{
+ unsigned int clear_mask, set_mask;
+
+ clear_mask = BMASK(REG_DIGIFCONF4_IF1WL1) | BMASK(REG_DIGIFCONF4_IF1WL0);
+ set_mask = 0;
+
+ switch (wl) {
+ case 16:
+ break;
+ case 20:
+ set_mask |= BMASK(REG_DIGIFCONF4_IF1WL0);
+ break;
+ case 24:
+ set_mask |= BMASK(REG_DIGIFCONF4_IF1WL1);
+ break;
+ case 32:
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0WL1) |
+ BMASK(REG_DIGIFCONF2_IF0WL0);
+ break;
+ default:
+ pr_err("%s: Unsupporter word-length 0x%x\n", __func__, wl);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Word-length: %d bits.\n", __func__, wl);
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF4, clear_mask, set_mask);
+
+ return 0;
+}
+
+static int ab8500_codec_set_bit_delay_if1(struct snd_soc_codec *codec, unsigned int delay)
+{
+ unsigned int clear_mask, set_mask;
+
+ clear_mask = BMASK(REG_DIGIFCONF4_IF1DEL);
+ set_mask = 0;
+
+ switch (delay) {
+ case 0:
+ break;
+ case 1:
+ set_mask |= BMASK(REG_DIGIFCONF4_IF1DEL);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported bit-delay (0x%x)!\n", __func__, delay);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: IF1 Bit-delay: %d bits.\n", __func__, delay);
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF4, clear_mask, set_mask);
+
+ return 0;
+}
+
+/* Configures audio macrocell into the AB8500 Chip */
+static void ab8500_codec_configure_audio_macrocell(struct snd_soc_codec *codec)
+{
+ int data, ret;
+
+ ret = ab8500_sysctrl_write(AB8500_STW4500CTRL3,
+ AB8500_STW4500CTRL3_CLK32KOUT2DIS | AB8500_STW4500CTRL3_RESETAUDN,
+ AB8500_STW4500CTRL3_RESETAUDN);
+ if (ret < 0)
+ pr_err("%s: WARN: Unable to set reg STW4500CTRL3!\n", __func__);
+
+ data = ab8500_codec_read_reg(codec, AB8500_MISC, AB8500_GPIO_DIR4_REG);
+ data |= GPIO27_DIR_OUTPUT | GPIO29_DIR_OUTPUT | GPIO31_DIR_OUTPUT;
+ ab8500_codec_write_reg(codec, AB8500_MISC, AB8500_GPIO_DIR4_REG, data);
+}
+
+/* Extended interface for codec-driver */
+
+int ab8500_audio_power_control(bool power_on)
+{
+ int pwr_mask = BMASK(REG_POWERUP_POWERUP) | BMASK(REG_POWERUP_ENANA);
+
+ if (ab8500_codec == NULL) {
+ pr_err("%s: ERROR: AB8500 ASoC-driver not yet probed!\n", __func__);
+ return -EIO;
+ }
+
+ pr_debug("%s AB8500.", (power_on) ? "Enabling" : "Disabling");
+
+ return ab8500_codec_update_reg_audio(ab8500_codec, REG_POWERUP,
+ pwr_mask, (power_on) ? pwr_mask : REG_MASK_NONE);
+}
+
+void ab8500_audio_pwm_vibra(unsigned char speed_left_pos,
+ unsigned char speed_left_neg,
+ unsigned char speed_right_pos,
+ unsigned char speed_right_neg)
+{
+ unsigned int clear_mask, set_mask;
+ bool vibra_on;
+
+ if (ab8500_codec == NULL) {
+ pr_err("%s: ERROR: AB8500 ASoC-driver not yet probed!\n", __func__);
+ return;
+ }
+
+ vibra_on = speed_left_pos | speed_left_neg | speed_right_pos | speed_right_neg;
+ if (!vibra_on) {
+ speed_left_pos = 0;
+ speed_left_neg = 0;
+ speed_right_pos = 0;
+ speed_right_neg = 0;
+ }
+
+ pr_debug("%s: PWM-vibra (%d, %d, %d, %d).\n",
+ __func__,
+ speed_left_pos,
+ speed_left_neg,
+ speed_right_pos,
+ speed_right_neg);
+
+ set_mask = BMASK(REG_PWMGENCONF1_PWMTOVIB1) |
+ BMASK(REG_PWMGENCONF1_PWMTOVIB2) |
+ BMASK(REG_PWMGENCONF1_PWM1CTRL) |
+ BMASK(REG_PWMGENCONF1_PWM2CTRL) |
+ BMASK(REG_PWMGENCONF1_PWM1NCTRL) |
+ BMASK(REG_PWMGENCONF1_PWM1PCTRL) |
+ BMASK(REG_PWMGENCONF1_PWM2NCTRL) |
+ BMASK(REG_PWMGENCONF1_PWM2PCTRL);
+ ab8500_codec_update_reg_audio(ab8500_codec, REG_PWMGENCONF1, 0x00, set_mask);
+
+ if (speed_left_pos > REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX)
+ speed_left_pos = REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX;
+ ab8500_codec_update_reg_audio(ab8500_codec, REG_PWMGENCONF3, REG_MASK_ALL, speed_left_pos);
+
+ if (speed_left_neg > REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX)
+ speed_left_neg = REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX;
+ ab8500_codec_update_reg_audio(ab8500_codec, REG_PWMGENCONF2, REG_MASK_ALL, speed_left_neg);
+
+ if (speed_right_pos > REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX)
+ speed_right_pos = REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX;
+ ab8500_codec_update_reg_audio(ab8500_codec, REG_PWMGENCONF5, REG_MASK_ALL, speed_right_pos);
+
+ if (speed_right_neg > REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX)
+ speed_right_neg = REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX;
+ ab8500_codec_update_reg_audio(ab8500_codec, REG_PWMGENCONF4, REG_MASK_ALL, speed_right_neg);
+
+ if (vibra_on) {
+ clear_mask = 0;
+ set_mask = BMASK(REG_ANACONF4_ENVIB1) | BMASK(REG_ANACONF4_ENVIB2);
+ } else {
+ clear_mask = BMASK(REG_ANACONF4_ENVIB1) | BMASK(REG_ANACONF4_ENVIB2);
+ set_mask = 0;
+ };
+ ab8500_codec_update_reg_audio(ab8500_codec, REG_ANACONF4, clear_mask, set_mask);
+}
+
+int ab8500_audio_set_word_length(struct snd_soc_dai *dai, unsigned int wl)
+{
+ unsigned int clear_mask, set_mask;
+ struct snd_soc_codec *codec = dai->codec;
+
+ clear_mask = BMASK(REG_DIGIFCONF2_IF0WL0) | BMASK(REG_DIGIFCONF2_IF0WL1);
+ set_mask = 0;
+
+ switch (wl) {
+ case 16:
+ break;
+ case 20:
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0WL0);
+ break;
+ case 24:
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0WL1);
+ break;
+ case 32:
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0WL1) |
+ BMASK(REG_DIGIFCONF2_IF0WL0);
+ break;
+ default:
+ pr_err("%s: Unsupported word-length 0x%x\n", __func__, wl);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: IF0 Word-length: %d bits.\n", __func__, wl);
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF2, clear_mask, set_mask);
+
+ return 0;
+}
+
+int ab8500_audio_set_bit_delay(struct snd_soc_dai *dai, unsigned int delay)
+{
+ unsigned int clear_mask, set_mask;
+ struct snd_soc_codec *codec = dai->codec;
+
+ clear_mask = BMASK(REG_DIGIFCONF2_IF0DEL);
+ set_mask = 0;
+
+ switch (delay) {
+ case 0:
+ break;
+ case 1:
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0DEL);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported bit-delay (0x%x)!\n", __func__, delay);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: IF0 Bit-delay: %d bits.\n", __func__, delay);
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF2, clear_mask, set_mask);
+
+ return 0;
+}
+
+int ab8500_audio_setup_if1(struct snd_soc_codec *codec,
+ unsigned int fmt,
+ unsigned int wl,
+ unsigned int delay)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ ret = ab8500_codec_set_format_if1(codec, fmt);
+ if (ret)
+ return -1;
+
+ ret = ab8500_codec_set_bit_delay_if1(codec, delay);
+ if (ret)
+ return -1;
+
+
+ ret = ab8500_codec_set_word_length_if1(codec, wl);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+/* ANC block current configuration status */
+unsigned int ab8500_audio_anc_status(void)
+{
+ return ab8500_anc_status;
+}
+
+/* ANC IIR-/FIR-coefficients configuration sequence */
+int ab8500_audio_anc_configure(unsigned int req_state)
+{
+ bool configure_fir = req_state == ANC_CONFIGURE_FIR ||
+ req_state == ANC_CONFIGURE_FIR_IIR;
+ bool configure_iir = req_state == ANC_CONFIGURE_IIR ||
+ req_state == ANC_CONFIGURE_FIR_IIR;
+ unsigned int bank, param;
+ int ret;
+
+ if (req_state == ANC_UNCONFIGURED ||
+ req_state == ANC_FIR_IIR_CONFIGURED ||
+ req_state == ANC_FIR_CONFIGURED ||
+ req_state == ANC_IIR_CONFIGURED ||
+ req_state == ANC_ERROR)
+ return -EINVAL;
+
+ mutex_lock(&ab8500_anc_conf_lock);
+
+ if (configure_fir)
+ AB8500_CLEAR_BIT_LOCKED(REG_ANCCONF1, REG_ANCCONF1_ENANC, ret, cleanup)
+
+ AB8500_SET_BIT_LOCKED(REG_ANCCONF1, REG_ANCCONF1_ENANC, ret, cleanup)
+
+ if (configure_fir) {
+ for (bank = 0; bank < AB8500_NR_OF_ANC_COEFF_BANKS; bank++) {
+ for (param = 0; param < REG_ANC_FIR_COEFFS; param++) {
+ if (param == 0 && bank == 0)
+ AB8500_SET_BIT_LOCKED(REG_ANCCONF1,
+ REG_ANCCONF1_ANCFIRUPDATE, ret, cleanup)
+
+ AB8500_WRITE(REG_ANCCONF5,
+ anc_fir_cache[param] >> 8 & REG_MASK_ALL,
+ ret, cleanup)
+ AB8500_WRITE(REG_ANCCONF6,
+ anc_fir_cache[param] & REG_MASK_ALL,
+ ret, cleanup)
+
+ if (param == REG_ANC_FIR_COEFFS - 1 && bank == 1)
+ AB8500_CLEAR_BIT_LOCKED(REG_ANCCONF1,
+ REG_ANCCONF1_ANCFIRUPDATE, ret, cleanup)
+ }
+ }
+ if (ab8500_anc_status == ANC_IIR_CONFIGURED)
+ ab8500_anc_status = ANC_FIR_IIR_CONFIGURED;
+ else if (ab8500_anc_status != ANC_FIR_IIR_CONFIGURED)
+ ab8500_anc_status = ANC_FIR_CONFIGURED;
+ }
+
+ if (configure_iir) {
+ for (bank = 0; bank < AB8500_NR_OF_ANC_COEFF_BANKS; bank++) {
+ for (param = 0; param < REG_ANC_IIR_COEFFS; param++) {
+ if (param == 0) {
+ if (bank == 0) {
+ AB8500_SET_BIT_LOCKED(REG_ANCCONF1,
+ REG_ANCCONF1_ANCIIRINIT,
+ ret, cleanup)
+ udelay(2000);
+ AB8500_CLEAR_BIT_LOCKED(REG_ANCCONF1,
+ REG_ANCCONF1_ANCIIRINIT,
+ ret, cleanup)
+ udelay(2000);
+ } else {
+ AB8500_SET_BIT_LOCKED(REG_ANCCONF1,
+ REG_ANCCONF1_ANCIIRUPDATE,
+ ret, cleanup)
+ }
+ } else if (param > 3) {
+ AB8500_WRITE(REG_ANCCONF7,
+ REG_MASK_NONE, ret, cleanup)
+ AB8500_WRITE(REG_ANCCONF8,
+ anc_iir_cache[param] >> 16 & REG_MASK_ALL,
+ ret, cleanup)
+ }
+
+ AB8500_WRITE(REG_ANCCONF7,
+ anc_iir_cache[param] >> 8 & REG_MASK_ALL,
+ ret, cleanup)
+ AB8500_WRITE(REG_ANCCONF8,
+ anc_iir_cache[param] & REG_MASK_ALL,
+ ret, cleanup)
+
+ if (param == REG_ANC_IIR_COEFFS - 1 && bank == 1)
+ AB8500_CLEAR_BIT_LOCKED(REG_ANCCONF1,
+ REG_ANCCONF1_ANCIIRUPDATE,
+ ret, cleanup)
+ }
+ }
+ if (ab8500_anc_status == ANC_FIR_CONFIGURED)
+ ab8500_anc_status = ANC_FIR_IIR_CONFIGURED;
+ else if (ab8500_anc_status != ANC_FIR_IIR_CONFIGURED)
+ ab8500_anc_status = ANC_IIR_CONFIGURED;
+ }
+
+ mutex_unlock(&ab8500_anc_conf_lock);
+
+ return 0;
+
+cleanup:
+ ret |= ab8500_codec_update_reg_audio_locked(ab8500_codec,
+ REG_ANCCONF1,
+ BMASK(REG_ANCCONF1_ENANC) |
+ BMASK(REG_ANCCONF1_ANCIIRINIT) |
+ BMASK(REG_ANCCONF1_ANCIIRUPDATE) |
+ BMASK(REG_ANCCONF1_ANCFIRUPDATE),
+ REG_MASK_NONE);
+
+ ab8500_anc_status = ANC_ERROR;
+
+ mutex_unlock(&ab8500_anc_conf_lock);
+
+ return ret;
+}
+
+bool ab8500_audio_dapm_path_active(enum ab8500_audio_dapm_path dapm_path)
+{
+ int reg, reg_mask;
+
+ switch (dapm_path) {
+ case AB8500_AUDIO_DAPM_PATH_DMIC:
+ reg = ab8500_codec_read_reg_audio(ab8500_codec, REG_DIGMICCONF);
+ reg_mask = BMASK(REG_DIGMICCONF_ENDMIC1) |
+ BMASK(REG_DIGMICCONF_ENDMIC2) |
+ BMASK(REG_DIGMICCONF_ENDMIC3) |
+ BMASK(REG_DIGMICCONF_ENDMIC4) |
+ BMASK(REG_DIGMICCONF_ENDMIC5) |
+ BMASK(REG_DIGMICCONF_ENDMIC6);
+ return reg & reg_mask;
+
+ case AB8500_AUDIO_DAPM_PATH_AMIC1:
+ reg = ab8500_codec_read_reg_audio(ab8500_codec, REG_ANACONF2);
+ reg_mask = BMASK(REG_ANACONF2_MUTMIC1);
+ return !(reg & reg_mask);
+
+ case AB8500_AUDIO_DAPM_PATH_AMIC2:
+ reg = ab8500_codec_read_reg_audio(ab8500_codec, REG_ANACONF2);
+ reg_mask = BMASK(REG_ANACONF2_MUTMIC2);
+ return !(reg & reg_mask);
+
+ default:
+ return false;
+ }
+}
+
+int ab8500_audio_set_adcm(enum ab8500_audio_adcm req_adcm)
+{
+ int ret = 0;
+
+ if (ab8500_codec == NULL) {
+ pr_err("%s: ERROR: AB8500 ASoC-driver not yet probed!\n", __func__);
+ return -EIO;
+ }
+
+ if (adcm == req_adcm)
+ return ret;
+
+ if (AB8500_AUDIO_ADCM_FORCE_UP == req_adcm ||
+ AB8500_AUDIO_ADCM_FORCE_DOWN == req_adcm) {
+
+ mutex_lock(&ab8500_codec->mutex);
+
+ adcm_anaconf5 = ab8500_codec_read_reg_audio(ab8500_codec, REG_ANACONF5);
+ adcm_muteconf = ab8500_codec_read_reg_audio(ab8500_codec, REG_MUTECONF);
+ adcm_anaconf4 = ab8500_codec_read_reg_audio(ab8500_codec, REG_ANACONF4);
+
+ if (AB8500_AUDIO_ADCM_FORCE_UP == req_adcm) {
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_ANACONF5, REG_MASK_NONE, ADCM_ANACONF5_MASK);
+ if (ret < 0)
+ goto cleanup;
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_MUTECONF, REG_MASK_NONE, ADCM_MUTECONF_MASK);
+ if (ret < 0)
+ goto cleanup;
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_ANACONF4, REG_MASK_NONE, ADCM_ANACONF4_MASK);
+ if (ret < 0)
+ goto cleanup;
+ } else {
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_ANACONF5, ADCM_ANACONF5_MASK, REG_MASK_NONE);
+ if (ret < 0)
+ goto cleanup;
+ }
+ } else if (AB8500_AUDIO_ADCM_NORMAL == req_adcm) {
+
+ if (AB8500_AUDIO_ADCM_FORCE_UP == adcm) {
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_ANACONF5, ~adcm_anaconf5 & ADCM_ANACONF5_MASK,
+ adcm_anaconf5 & ADCM_ANACONF5_MASK);
+ if (ret < 0)
+ goto cleanup;
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_MUTECONF, ~adcm_muteconf & ADCM_MUTECONF_MASK,
+ adcm_muteconf & ADCM_MUTECONF_MASK);
+ if (ret < 0)
+ goto cleanup;
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_ANACONF4, ~adcm_anaconf4 & ADCM_ANACONF4_MASK,
+ adcm_anaconf4 & ADCM_ANACONF4_MASK);
+ if (ret < 0)
+ goto cleanup;
+ } else {
+ ret |= ab8500_codec_update_reg_audio(ab8500_codec,
+ REG_ANACONF5, ~adcm_anaconf5 & ADCM_ANACONF5_MASK,
+ adcm_anaconf5 & ADCM_ANACONF5_MASK);
+ if (ret < 0)
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ adcm = (ret < 0) ? AB8500_AUDIO_ADCM_NORMAL : req_adcm;
+
+ if (AB8500_AUDIO_ADCM_NORMAL == adcm)
+ mutex_unlock(&ab8500_codec->mutex);
+
+ return ret;
+}
+
+static int ab8500_codec_add_widgets(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&codec->dapm, ab8500_dapm_widgets,
+ ARRAY_SIZE(ab8500_dapm_widgets));
+ if (ret < 0) {
+ pr_err("%s: Failed to create DAPM controls (%d).\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&codec->dapm, dapm_routes, ARRAY_SIZE(dapm_routes));
+ if (ret < 0) {
+ pr_err("%s: Failed to add DAPM routes (%d).\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ab8500_codec_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai)
+{
+ pr_debug("%s Enter.\n", __func__);
+ return 0;
+}
+
+static int ab8500_codec_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ return 0;
+}
+
+static int ab8500_codec_pcm_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ /* Clear interrupt status registers by reading them. */
+ ab8500_codec_read_reg_audio(dai->codec, REG_AUDINTSOURCE1);
+ ab8500_codec_read_reg_audio(dai->codec, REG_AUDINTSOURCE2);
+
+ return 0;
+}
+
+static void ab8500_codec_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ ab8500_codec_dump_all_reg(dai->codec);
+}
+
+static int ab8500_codec_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ pr_err("%s Enter.\n", __func__);
+
+ return 0;
+}
+
+/* Gates clocking according format mask */
+static int ab8500_codec_set_dai_clock_gate(struct snd_soc_codec *codec, unsigned int fmt)
+{
+ unsigned int clear_mask;
+ unsigned int set_mask;
+
+ clear_mask = BMASK(REG_DIGIFCONF1_ENMASTGEN) |
+ BMASK(REG_DIGIFCONF1_ENFSBITCLK0);
+
+ set_mask = BMASK(REG_DIGIFCONF1_ENMASTGEN);
+
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
+ case SND_SOC_DAIFMT_CONT: /* continuous clock */
+ pr_debug("%s: IF0 Clock is continuous.\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF1_ENFSBITCLK0);
+ break;
+ case SND_SOC_DAIFMT_GATED: /* clock is gated */
+ pr_debug("%s: IF0 Clock is gated.\n", __func__);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported clock mask (0x%x)!\n",
+ __func__,
+ fmt & SND_SOC_DAIFMT_CLOCK_MASK);
+ return -EINVAL;
+ }
+
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF1, clear_mask, set_mask);
+
+ return 0;
+}
+
+static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ unsigned int clear_mask;
+ unsigned int set_mask;
+ struct snd_soc_codec *codec = dai->codec;
+ int err;
+
+ pr_debug("%s: Enter (fmt = 0x%x)\n", __func__, fmt);
+
+ clear_mask = BMASK(REG_DIGIFCONF3_IF1DATOIF0AD) |
+ BMASK(REG_DIGIFCONF3_IF1CLKTOIF0CLK) |
+ BMASK(REG_DIGIFCONF3_IF0BFIFOEN) |
+ BMASK(REG_DIGIFCONF3_IF0MASTER);
+ set_mask = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & FRM master */
+ pr_debug("%s: IF0 Master-mode: AB8500 master.\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF3_IF0MASTER);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & FRM slave */
+ pr_debug("%s: IF0 Master-mode: AB8500 slave.\n", __func__);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & FRM master */
+ case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
+ pr_err("%s: ERROR: The device is either a master or a slave.\n", __func__);
+ default:
+ pr_err("%s: ERROR: Unsupporter master mask 0x%x\n",
+ __func__,
+ (fmt & SND_SOC_DAIFMT_MASTER_MASK));
+ return -EINVAL;
+ break;
+ }
+
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF3, clear_mask, set_mask);
+
+ /* Set clock gating */
+ err = ab8500_codec_set_dai_clock_gate(codec, fmt);
+ if (err) {
+ pr_err("%s: ERRROR: Failed to set clock gate (%d).\n", __func__, err);
+ return err;
+ }
+
+ /* Setting data transfer format */
+
+ clear_mask = BMASK(REG_DIGIFCONF2_IF0FORMAT0) |
+ BMASK(REG_DIGIFCONF2_IF0FORMAT1) |
+ BMASK(REG_DIGIFCONF2_FSYNC0P) |
+ BMASK(REG_DIGIFCONF2_BITCLK0P);
+ set_mask = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S: /* I2S mode */
+ pr_debug("%s: IF0 Protocol: I2S\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0FORMAT1);
+
+ /* 32 bit, 0 delay */
+ ab8500_audio_set_word_length(dai, 32);
+ ab8500_audio_set_bit_delay(dai, 0);
+
+ break;
+ case SND_SOC_DAIFMT_DSP_A: /* L data MSB after FRM LRC */
+ pr_debug("%s: IF0 Protocol: DSP A (TDM)\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0FORMAT0);
+ break;
+ case SND_SOC_DAIFMT_DSP_B: /* L data MSB during FRM LRC */
+ pr_debug("%s: IF0 Protocol: DSP B (TDM)\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF2_IF0FORMAT0);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported format (0x%x)!\n",
+ __func__,
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */
+ pr_debug("%s: IF0: Normal bit clock, normal frame\n", __func__);
+ break;
+ case SND_SOC_DAIFMT_NB_IF: /* normal BCLK + inv FRM */
+ pr_debug("%s: IF0: Normal bit clock, inverted frame\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF2_FSYNC0P);
+ break;
+ case SND_SOC_DAIFMT_IB_NF: /* invert BCLK + nor FRM */
+ pr_debug("%s: IF0: Inverted bit clock, normal frame\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF2_BITCLK0P);
+ break;
+ case SND_SOC_DAIFMT_IB_IF: /* invert BCLK + FRM */
+ pr_debug("%s: IF0: Inverted bit clock, inverted frame\n", __func__);
+ set_mask |= BMASK(REG_DIGIFCONF2_FSYNC0P);
+ set_mask |= BMASK(REG_DIGIFCONF2_BITCLK0P);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported INV mask 0x%x\n",
+ __func__,
+ (fmt & SND_SOC_DAIFMT_INV_MASK));
+ return -EINVAL;
+ break;
+ }
+
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF2, clear_mask, set_mask);
+
+ return 0;
+}
+
+static int ab8500_codec_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int set_mask, clear_mask, slots_active;
+
+ /* Only 16 bit slot width is supported at the moment in TDM mode */
+ if (slot_width != 16) {
+ pr_err("%s: ERROR: Unsupported slot_width %d.\n",
+ __func__, slot_width);
+ return -EINVAL;
+ }
+
+ /* Setup TDM clocking according to slot count */
+ pr_debug("%s: Slots, total: %d\n", __func__, slots);
+ clear_mask = BMASK(REG_DIGIFCONF1_IF0BITCLKOS0) |
+ BMASK(REG_DIGIFCONF1_IF0BITCLKOS1);
+ switch (slots) {
+ case 2:
+ set_mask = REG_MASK_NONE;
+ break;
+ case 4:
+ set_mask = BMASK(REG_DIGIFCONF1_IF0BITCLKOS0);
+ break;
+ case 8:
+ set_mask = BMASK(REG_DIGIFCONF1_IF0BITCLKOS1);
+ break;
+ case 16:
+ set_mask = BMASK(REG_DIGIFCONF1_IF0BITCLKOS0) |
+ BMASK(REG_DIGIFCONF1_IF0BITCLKOS1);
+ break;
+ default:
+ pr_err("%s: ERROR: Unsupported number of slots (%d)!\n", __func__, slots);
+ return -EINVAL;
+ }
+ ab8500_codec_update_reg_audio(codec, REG_DIGIFCONF1, clear_mask, set_mask);
+
+ /* Setup TDM DA according to active tx slots */
+ clear_mask = REG_DASLOTCONFX_SLTODAX_MASK;
+ slots_active = hweight32(tx_mask);
+ pr_debug("%s: Slots, active, TX: %d\n", __func__, slots_active);
+ switch (slots_active) {
+ case 0:
+ break;
+ case 1:
+ /* Slot 9 -> DA_IN1 & DA_IN3 */
+ ab8500_codec_update_reg_audio(codec, REG_DASLOTCONF1, clear_mask, 9);
+ ab8500_codec_update_reg_audio(codec, REG_DASLOTCONF3, clear_mask, 9);
+ break;
+ case 2:
+ /* Slot 9 -> DA_IN1 & DA_IN3, Slot 11 -> DA_IN2 & DA_IN4 */
+ ab8500_codec_update_reg_audio(codec, REG_DASLOTCONF1, clear_mask, 9);
+ ab8500_codec_update_reg_audio(codec, REG_DASLOTCONF3, clear_mask, 9);
+ ab8500_codec_update_reg_audio(codec, REG_DASLOTCONF2, clear_mask, 11);
+ ab8500_codec_update_reg_audio(codec, REG_DASLOTCONF4, clear_mask, 11);
+
+ break;
+ case 8:
+ pr_debug("%s: In 8-channel mode DA-from-slot mapping is set manually.", __func__);
+ break;
+ default:
+ pr_err("%s: Unsupported number of active TX-slots (%d)!\n", __func__, slots_active);
+ return -EINVAL;
+ }
+
+ /* Setup TDM AD according to active RX-slots */
+ slots_active = hweight32(rx_mask);
+ pr_debug("%s: Slots, active, RX: %d\n", __func__, slots_active);
+ switch (slots_active) {
+ case 0:
+ break;
+ case 1:
+ /* AD_OUT3 -> slot 0 & 1 */
+ ab8500_codec_update_reg_audio(codec, REG_ADSLOTSEL1,
+ REG_MASK_ALL,
+ REG_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN |
+ REG_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD);
+ break;
+ case 2:
+ /* AD_OUT3 -> slot 0, AD_OUT2 -> slot 1 */
+ ab8500_codec_update_reg_audio(codec, REG_ADSLOTSEL1,
+ REG_MASK_ALL,
+ REG_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN |
+ REG_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD);
+ break;
+ case 8:
+ pr_debug("%s: In 8-channel mode AD-to-slot mapping is set manually.", __func__);
+ break;
+ default:
+ pr_err("%s: Unsupported number of active RX-slots (%d)!\n", __func__, slots_active);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct snd_soc_dai_driver ab8500_codec_dai[] = {
+ {
+ .name = "ab8500-codec-dai.0",
+ .id = 0,
+ .playback = {
+ .stream_name = "ab8500_0p",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = AB8500_SUPPORTED_RATE,
+ .formats = AB8500_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .startup = ab8500_codec_pcm_startup,
+ .prepare = ab8500_codec_pcm_prepare,
+ .hw_params = ab8500_codec_pcm_hw_params,
+ .shutdown = ab8500_codec_pcm_shutdown,
+ .set_sysclk = ab8500_codec_set_dai_sysclk,
+ .set_tdm_slot = ab8500_codec_set_dai_tdm_slot,
+ .set_fmt = ab8500_codec_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1
+ },
+ {
+ .name = "ab8500-codec-dai.1",
+ .id = 1,
+ .capture = {
+ .stream_name = "ab8500_0c",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = AB8500_SUPPORTED_RATE,
+ .formats = AB8500_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .startup = ab8500_codec_pcm_startup,
+ .prepare = ab8500_codec_pcm_prepare,
+ .hw_params = ab8500_codec_pcm_hw_params,
+ .shutdown = ab8500_codec_pcm_shutdown,
+ .set_sysclk = ab8500_codec_set_dai_sysclk,
+ .set_tdm_slot = ab8500_codec_set_dai_tdm_slot,
+ .set_fmt = ab8500_codec_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1
+ }
+};
+
+static int ab8500_codec_probe(struct snd_soc_codec *codec)
+{
+ int i, ret;
+ u8 *cache = codec->reg_cache;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ ab8500_codec_configure_audio_macrocell(codec);
+
+ for (i = REG_AUDREV; i >= REG_POWERUP; i--)
+ ab8500_codec_write_reg_audio(codec, i, cache[i]);
+
+ /* Add controls */
+ ret = snd_soc_add_controls(codec, ab8500_snd_controls,
+ ARRAY_SIZE(ab8500_snd_controls));
+ if (ret < 0) {
+ pr_err("%s: failed to add soc controls (%d).\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Add DAPM-widgets */
+ ret = ab8500_codec_add_widgets(codec);
+ if (ret < 0) {
+ pr_err("%s: Failed add widgets (%d).\n", __func__, ret);
+ return ret;
+ }
+
+ ab8500_codec = codec;
+
+ return ret;
+}
+
+static int ab8500_codec_remove(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_free(&codec->dapm);
+ ab8500_codec = NULL;
+
+ return 0;
+}
+
+static int ab8500_codec_suspend(struct snd_soc_codec *codec,
+ pm_message_t state)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ return 0;
+}
+
+static int ab8500_codec_resume(struct snd_soc_codec *codec)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ return 0;
+}
+
+struct snd_soc_codec_driver ab8500_codec_driver = {
+ .probe = ab8500_codec_probe,
+ .remove = ab8500_codec_remove,
+ .suspend = ab8500_codec_suspend,
+ .resume = ab8500_codec_resume,
+ .read = ab8500_codec_read_reg_audio,
+ .write = ab8500_codec_write_reg_audio,
+ .reg_cache_size = ARRAY_SIZE(ab8500_reg_cache),
+ .reg_word_size = sizeof(u8),
+ .reg_cache_default = ab8500_reg_cache,
+};
+
+static int __devinit ab8500_codec_driver_probe(struct platform_device *pdev)
+{
+ int err;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ pr_info("%s: Register codec.\n", __func__);
+ err = snd_soc_register_codec(&pdev->dev,
+ &ab8500_codec_driver,
+ ab8500_codec_dai,
+ ARRAY_SIZE(ab8500_codec_dai));
+
+ if (err < 0) {
+ pr_err("%s: Error: Failed to register codec (%d).\n",
+ __func__, err);
+ }
+
+ return err;
+}
+
+static int __devexit ab8500_codec_driver_remove(struct platform_device *pdev)
+{
+ pr_info("%s Enter.\n", __func__);
+
+ snd_soc_unregister_codec(&pdev->dev);
+
+ return 0;
+}
+
+static int ab8500_codec_driver_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ return 0;
+}
+
+static int ab8500_codec_driver_resume(struct platform_device *pdev)
+{
+ pr_debug("%s Enter.\n", __func__);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_codec_platform_driver = {
+ .driver = {
+ .name = "ab8500-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_codec_driver_probe,
+ .remove = __devexit_p(ab8500_codec_driver_remove),
+ .suspend = ab8500_codec_driver_suspend,
+ .resume = ab8500_codec_driver_resume,
+};
+
+static int __devinit ab8500_codec_platform_driver_init(void)
+{
+ int ret;
+
+ pr_info("%s: Enter.\n", __func__);
+
+ ret = platform_driver_register(&ab8500_codec_platform_driver);
+ if (ret != 0) {
+ pr_err("%s: Failed to register AB8500 platform driver (%d)!\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static void __exit ab8500_codec_platform_driver_exit(void)
+{
+ pr_info("%s: Enter.\n", __func__);
+
+ platform_driver_unregister(&ab8500_codec_platform_driver);
+}
+
+module_init(ab8500_codec_platform_driver_init);
+module_exit(ab8500_codec_platform_driver_exit);
+
+MODULE_DESCRIPTION("AB8500 Codec driver");
+MODULE_ALIAS("platform:ab8500-codec");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ab8500_audio.h b/sound/soc/codecs/ab8500_audio.h
new file mode 100644
index 00000000000..bfc1c01e5fc
--- /dev/null
+++ b/sound/soc/codecs/ab8500_audio.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Mikko J. Lehto <mikko.lehto@symbio.com>,
+ * Mikko Sarmanne <mikko.sarmanne@symbio.com>,
+ * Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef AB8500_CODEC_REGISTERS_H
+#define AB8500_CODEC_REGISTERS_H
+
+#define AB8500_SUPPORTED_RATE (SNDRV_PCM_RATE_48000)
+#define AB8500_SUPPORTED_FMT (SNDRV_PCM_FMTBIT_S16_LE)
+
+extern struct snd_soc_dai_driver ab8500_codec_dai[];
+extern struct snd_soc_codec_driver soc_codec_dev_ab8500;
+
+/* Extended interface for codec-driver */
+
+int ab8500_audio_power_control(bool power_on);
+int ab8500_audio_set_word_length(struct snd_soc_dai *dai, unsigned int wl);
+int ab8500_audio_set_bit_delay(struct snd_soc_dai *dai, unsigned int delay);
+int ab8500_audio_setup_if1(struct snd_soc_codec *codec,
+ unsigned int fmt,
+ unsigned int wl,
+ unsigned int delay);
+unsigned int ab8500_audio_anc_status(void);
+int ab8500_audio_anc_configure(unsigned int req_state);
+
+enum ab8500_audio_dapm_path {
+ AB8500_AUDIO_DAPM_PATH_DMIC,
+ AB8500_AUDIO_DAPM_PATH_AMIC1,
+ AB8500_AUDIO_DAPM_PATH_AMIC2
+};
+bool ab8500_audio_dapm_path_active(enum ab8500_audio_dapm_path dapm_path);
+
+enum ab8500_audio_adcm {
+ AB8500_AUDIO_ADCM_NORMAL,
+ AB8500_AUDIO_ADCM_FORCE_UP,
+ AB8500_AUDIO_ADCM_FORCE_DOWN
+};
+int ab8500_audio_set_adcm(enum ab8500_audio_adcm req_adcm);
+
+#define SOC_SINGLE_VALUE_S1R(xreg0, xcount, xmin, xmax, xinvert) \
+ ((unsigned long)&(struct soc_smra_control) \
+ { .reg = ((unsigned int[]){ xreg0 }), \
+ .rcount = 1, .count = xcount, \
+ .invert = xinvert, .min = xmin, .max = xmax})
+
+#define SOC_SINGLE_VALUE_S2R(xreg0, xreg1, xcount, xmin, xmax, xinvert) \
+ ((unsigned long)&(struct soc_smra_control) \
+ {.reg = ((unsigned int[]){ xreg0, xreg1 }), \
+ .rcount = 2, .count = xcount, \
+ .min = xmin, .max = xmax, .invert = xinvert})
+
+#define SOC_SINGLE_VALUE_S4R(xreg0, xreg1, xreg2, xreg3, \
+ xcount, xmin, xmax, xinvert) \
+ ((unsigned long)&(struct soc_smra_control) \
+ {.reg = ((unsigned int[]){ xreg0, xreg1, xreg2, xreg3 }), \
+ .rcount = 4, .count = xcount, \
+ .min = xmin, .max = xmax, .invert = xinvert})
+
+#define SOC_SINGLE_VALUE_S8R(xreg0, xreg1, xreg2, xreg3, \
+ xreg4, xreg5, xreg6, xreg7, xcount, xmin, xmax, xinvert) \
+ ((unsigned long)&(struct soc_smra_control) \
+ {.reg = ((unsigned int[]){ xreg0, xreg1, xreg2, xreg3, \
+ xreg4, xreg5, xreg6, xreg7 }), \
+ .rcount = 8, .count = xcount, \
+ .min = xmin, .max = xmax, .invert = xinvert})
+
+#define SOC_MULTIPLE_VALUE_SA(xvalues, xcount, xmin, xmax, xinvert) \
+ ((unsigned long)&(struct soc_smra_control) \
+ {.values = xvalues, .rcount = 1, .count = xcount, \
+ .min = xmin, .max = xmax, .invert = xinvert})
+
+#define SOC_ENUM_STROBE_DECL(name, xreg, xbit, xinvert, xtexts) \
+ struct soc_enum name = SOC_ENUM_DOUBLE(xreg, xbit, \
+ xinvert, 2, xtexts)
+
+/* Extended SOC macros */
+
+#define SOC_SINGLE_S1R(xname, reg0, min, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_s, .get = snd_soc_get_smr, .put = snd_soc_put_smr, \
+ .private_value = SOC_SINGLE_VALUE_S1R(reg0, 1, min, max, invert) }
+
+#define SOC_SINGLE_S2R(xname, reg0, reg1, min, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_s, .get = snd_soc_get_smr, .put = snd_soc_put_smr, \
+ .private_value = SOC_SINGLE_VALUE_S2R(reg0, reg1, 1, min, max, invert) }
+
+#define SOC_SINGLE_S4R(xname, reg0, reg1, reg2, reg3, min, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_s, .get = snd_soc_get_smr, .put = snd_soc_put_smr, \
+ .private_value = SOC_SINGLE_VALUE_S4R(reg0, reg1, reg2, reg3, \
+ 1, min, max, invert) }
+
+#define SOC_SINGLE_S8R(xname, reg0, reg1, reg2, reg3, \
+ reg4, reg5, reg6, reg7, min, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_s, .get = snd_soc_get_smr, .put = snd_soc_put_smr, \
+ .private_value = SOC_SINGLE_VALUE_S4R(reg0, reg1, reg2, reg3, \
+ reg4, reg5, reg6, reg7\
+ 1, min, max, invert) }
+
+#define SOC_MULTIPLE_SA(xname, values, min, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_s, .get = snd_soc_get_sa, .put = snd_soc_put_sa, \
+ .private_value = SOC_MULTIPLE_VALUE_SA(values, ARRAY_SIZE(values), \
+ min, max, invert) }
+
+#define SOC_ENUM_STROBE(xname, xenum) \
+ SOC_ENUM_EXT(xname, xenum, \
+ snd_soc_get_enum_strobe, \
+ snd_soc_put_enum_strobe)
+
+/* AB8500 audio bank (0x0d) register definitions */
+
+#define REG_POWERUP 0x00
+#define REG_AUDSWRESET 0x01
+#define REG_ADPATHENA 0x02
+#define REG_DAPATHENA 0x03
+#define REG_ANACONF1 0x04
+#define REG_ANACONF2 0x05
+#define REG_DIGMICCONF 0x06
+#define REG_ANACONF3 0x07
+#define REG_ANACONF4 0x08
+#define REG_DAPATHCONF 0x09
+#define REG_MUTECONF 0x0A
+#define REG_SHORTCIRCONF 0x0B
+#define REG_ANACONF5 0x0C
+#define REG_ENVCPCONF 0x0D
+#define REG_SIGENVCONF 0x0E
+#define REG_PWMGENCONF1 0x0F
+#define REG_PWMGENCONF2 0x10
+#define REG_PWMGENCONF3 0x11
+#define REG_PWMGENCONF4 0x12
+#define REG_PWMGENCONF5 0x13
+#define REG_ANAGAIN1 0x14
+#define REG_ANAGAIN2 0x15
+#define REG_ANAGAIN3 0x16
+#define REG_ANAGAIN4 0x17
+#define REG_DIGLINHSLGAIN 0x18
+#define REG_DIGLINHSRGAIN 0x19
+#define REG_ADFILTCONF 0x1A
+#define REG_DIGIFCONF1 0x1B
+#define REG_DIGIFCONF2 0x1C
+#define REG_DIGIFCONF3 0x1D
+#define REG_DIGIFCONF4 0x1E
+#define REG_ADSLOTSEL1 0x1F
+#define REG_ADSLOTSEL2 0x20
+#define REG_ADSLOTSEL3 0x21
+#define REG_ADSLOTSEL4 0x22
+#define REG_ADSLOTSEL5 0x23
+#define REG_ADSLOTSEL6 0x24
+#define REG_ADSLOTSEL7 0x25
+#define REG_ADSLOTSEL8 0x26
+#define REG_ADSLOTSEL9 0x27
+#define REG_ADSLOTSEL10 0x28
+#define REG_ADSLOTSEL11 0x29
+#define REG_ADSLOTSEL12 0x2A
+#define REG_ADSLOTSEL13 0x2B
+#define REG_ADSLOTSEL14 0x2C
+#define REG_ADSLOTSEL15 0x2D
+#define REG_ADSLOTSEL16 0x2E
+#define REG_ADSLOTHIZCTRL1 0x2F
+#define REG_ADSLOTHIZCTRL2 0x30
+#define REG_ADSLOTHIZCTRL3 0x31
+#define REG_ADSLOTHIZCTRL4 0x32
+#define REG_DASLOTCONF1 0x33
+#define REG_DASLOTCONF2 0x34
+#define REG_DASLOTCONF3 0x35
+#define REG_DASLOTCONF4 0x36
+#define REG_DASLOTCONF5 0x37
+#define REG_DASLOTCONF6 0x38
+#define REG_DASLOTCONF7 0x39
+#define REG_DASLOTCONF8 0x3A
+#define REG_CLASSDCONF1 0x3B
+#define REG_CLASSDCONF2 0x3C
+#define REG_CLASSDCONF3 0x3D
+#define REG_DMICFILTCONF 0x3E
+#define REG_DIGMULTCONF1 0x3F
+#define REG_DIGMULTCONF2 0x40
+#define REG_ADDIGGAIN1 0x41
+#define REG_ADDIGGAIN2 0x42
+#define REG_ADDIGGAIN3 0x43
+#define REG_ADDIGGAIN4 0x44
+#define REG_ADDIGGAIN5 0x45
+#define REG_ADDIGGAIN6 0x46
+#define REG_DADIGGAIN1 0x47
+#define REG_DADIGGAIN2 0x48
+#define REG_DADIGGAIN3 0x49
+#define REG_DADIGGAIN4 0x4A
+#define REG_DADIGGAIN5 0x4B
+#define REG_DADIGGAIN6 0x4C
+#define REG_ADDIGLOOPGAIN1 0x4D
+#define REG_ADDIGLOOPGAIN2 0x4E
+#define REG_HSLEARDIGGAIN 0x4F
+#define REG_HSRDIGGAIN 0x50
+#define REG_SIDFIRGAIN1 0x51
+#define REG_SIDFIRGAIN2 0x52
+#define REG_ANCCONF1 0x53
+#define REG_ANCCONF2 0x54
+#define REG_ANCCONF3 0x55
+#define REG_ANCCONF4 0x56
+#define REG_ANCCONF5 0x57
+#define REG_ANCCONF6 0x58
+#define REG_ANCCONF7 0x59
+#define REG_ANCCONF8 0x5A
+#define REG_ANCCONF9 0x5B
+#define REG_ANCCONF10 0x5C
+#define REG_ANCCONF11 0x5D
+#define REG_ANCCONF12 0x5E
+#define REG_ANCCONF13 0x5F
+#define REG_ANCCONF14 0x60
+#define REG_SIDFIRADR 0x61
+#define REG_SIDFIRCOEF1 0x62
+#define REG_SIDFIRCOEF2 0x63
+#define REG_SIDFIRCONF 0x64
+#define REG_AUDINTMASK1 0x65
+#define REG_AUDINTSOURCE1 0x66
+#define REG_AUDINTMASK2 0x67
+#define REG_AUDINTSOURCE2 0x68
+#define REG_FIFOCONF1 0x69
+#define REG_FIFOCONF2 0x6A
+#define REG_FIFOCONF3 0x6B
+#define REG_FIFOCONF4 0x6C
+#define REG_FIFOCONF5 0x6D
+#define REG_FIFOCONF6 0x6E
+#define REG_AUDREV 0x6F
+
+#define AB8500_FIRST_REG REG_POWERUP
+#define AB8500_LAST_REG REG_AUDREV
+#define AB8500_CACHEREGNUM (AB8500_LAST_REG + 1)
+
+
+#define REG_MASK_ALL 0xFF
+#define REG_MASK_NONE 0x00
+
+/* REG_POWERUP */
+#define REG_POWERUP_POWERUP 7
+#define REG_POWERUP_ENANA 3
+
+/* REG_AUDSWRESET */
+#define REG_AUDSWRESET_SWRESET 7
+
+/* REG_ADPATHENA */
+#define REG_ADPATHENA_ENAD12 7
+#define REG_ADPATHENA_ENAD34 5
+#define REG_ADPATHENA_ENAD5768 3
+
+/* REG_DAPATHENA */
+#define REG_DAPATHENA_ENDA1 7
+#define REG_DAPATHENA_ENDA2 6
+#define REG_DAPATHENA_ENDA3 5
+#define REG_DAPATHENA_ENDA4 4
+#define REG_DAPATHENA_ENDA5 3
+#define REG_DAPATHENA_ENDA6 2
+
+/* REG_ANACONF1 */
+#define REG_ANACONF1_HSLOWPOW 7
+#define REG_ANACONF1_DACLOWPOW1 6
+#define REG_ANACONF1_DACLOWPOW0 5
+#define REG_ANACONF1_EARDACLOWPOW 4
+#define REG_ANACONF1_EARSELCM 2
+#define REG_ANACONF1_HSHPEN 1
+#define REG_ANACONF1_EARDRVLOWPOW 0
+
+/* REG_ANACONF2 */
+#define REG_ANACONF2_ENMIC1 7
+#define REG_ANACONF2_ENMIC2 6
+#define REG_ANACONF2_ENLINL 5
+#define REG_ANACONF2_ENLINR 4
+#define REG_ANACONF2_MUTMIC1 3
+#define REG_ANACONF2_MUTMIC2 2
+#define REG_ANACONF2_MUTLINL 1
+#define REG_ANACONF2_MUTLINR 0
+
+/* REG_DIGMICCONF */
+#define REG_DIGMICCONF_ENDMIC1 7
+#define REG_DIGMICCONF_ENDMIC2 6
+#define REG_DIGMICCONF_ENDMIC3 5
+#define REG_DIGMICCONF_ENDMIC4 4
+#define REG_DIGMICCONF_ENDMIC5 3
+#define REG_DIGMICCONF_ENDMIC6 2
+#define REG_DIGMICCONF_HSFADSPEED 0
+
+/* REG_ANACONF3 */
+#define REG_ANACONF3_MIC1SEL 7
+#define REG_ANACONF3_LINRSEL 6
+#define REG_ANACONF3_ENDRVHSL 5
+#define REG_ANACONF3_ENDRVHSR 4
+#define REG_ANACONF3_ENADCMIC 2
+#define REG_ANACONF3_ENADCLINL 1
+#define REG_ANACONF3_ENADCLINR 0
+
+/* REG_ANACONF4 */
+#define REG_ANACONF4_DISPDVSS 7
+#define REG_ANACONF4_ENEAR 6
+#define REG_ANACONF4_ENHSL 5
+#define REG_ANACONF4_ENHSR 4
+#define REG_ANACONF4_ENHFL 3
+#define REG_ANACONF4_ENHFR 2
+#define REG_ANACONF4_ENVIB1 1
+#define REG_ANACONF4_ENVIB2 0
+
+/* REG_DAPATHCONF */
+#define REG_DAPATHCONF_ENDACEAR 6
+#define REG_DAPATHCONF_ENDACHSL 5
+#define REG_DAPATHCONF_ENDACHSR 4
+#define REG_DAPATHCONF_ENDACHFL 3
+#define REG_DAPATHCONF_ENDACHFR 2
+#define REG_DAPATHCONF_ENDACVIB1 1
+#define REG_DAPATHCONF_ENDACVIB2 0
+
+/* REG_MUTECONF */
+#define REG_MUTECONF_MUTEAR 6
+#define REG_MUTECONF_MUTHSL 5
+#define REG_MUTECONF_MUTHSR 4
+#define REG_MUTECONF_MUTDACEAR 2
+#define REG_MUTECONF_MUTDACHSL 1
+#define REG_MUTECONF_MUTDACHSR 0
+
+
+/* REG_SHORTCIRCONF */
+
+/* REG_ANACONF5 */
+#define REG_ANACONF5_ENCPHS 7
+#define REG_ANACONF5_HSLDACTOLOL 5
+#define REG_ANACONF5_HSRDACTOLOR 4
+#define REG_ANACONF5_ENLOL 3
+#define REG_ANACONF5_ENLOR 2
+#define REG_ANACONF5_HSAUTOEN 0
+
+/* REG_ENVCPCONF */
+#define REG_ENVCPCONF_ENVDETHTHRE 4
+#define REG_ENVCPCONF_ENVDETLTHRE 0
+#define REG_ENVCPCONF_ENVDETHTHRE_MAX 0x0F
+#define REG_ENVCPCONF_ENVDETLTHRE_MAX 0x0F
+
+/* REG_SIGENVCONF */
+#define REG_SIGENVCONF_CPLVEN 5
+#define REG_SIGENVCONF_ENVDETCPEN 4
+#define REG_SIGENVCONF_ENVDETTIME 0
+#define REG_SIGENVCONF_ENVDETTIME_MAX 0x0F
+
+/* REG_PWMGENCONF1 */
+#define REG_PWMGENCONF1_PWMTOVIB1 7
+#define REG_PWMGENCONF1_PWMTOVIB2 6
+#define REG_PWMGENCONF1_PWM1CTRL 5
+#define REG_PWMGENCONF1_PWM2CTRL 4
+#define REG_PWMGENCONF1_PWM1NCTRL 3
+#define REG_PWMGENCONF1_PWM1PCTRL 2
+#define REG_PWMGENCONF1_PWM2NCTRL 1
+#define REG_PWMGENCONF1_PWM2PCTRL 0
+
+/* REG_PWMGENCONF2 */
+/* REG_PWMGENCONF3 */
+/* REG_PWMGENCONF4 */
+/* REG_PWMGENCONF5 */
+#define REG_PWMGENCONFX_PWMVIBXPOL 7
+#define REG_PWMGENCONFX_PWMVIBXDUTCYC 0
+#define REG_PWMGENCONFX_PWMVIBXDUTCYC_MAX 0x64
+
+/* REG_ANAGAIN1 */
+/* REG_ANAGAIN2 */
+#define REG_ANAGAINX_ENSEMICX 7
+#define REG_ANAGAINX_LOWPOWMICX 6
+#define REG_ANAGAINX_MICXGAIN 0
+#define REG_ANAGAINX_MICXGAIN_MAX 0x1F
+
+/* REG_ANAGAIN3 */
+#define REG_ANAGAIN3_HSLGAIN 4
+#define REG_ANAGAIN3_HSRGAIN 0
+#define REG_ANAGAIN3_HSXGAIN_MAX 0x0F
+
+/* REG_ANAGAIN4 */
+#define REG_ANAGAIN4_LINLGAIN 4
+#define REG_ANAGAIN4_LINRGAIN 0
+#define REG_ANAGAIN4_LINXGAIN_MAX 0x0F
+
+/* REG_DIGLINHSLGAIN */
+/* REG_DIGLINHSRGAIN */
+#define REG_DIGLINHSXGAIN_LINTOHSXGAIN 0
+#define REG_DIGLINHSXGAIN_LINTOHSXGAIN_MAX 0x13
+
+/* REG_ADFILTCONF */
+#define REG_ADFILTCONF_AD1NH 7
+#define REG_ADFILTCONF_AD2NH 6
+#define REG_ADFILTCONF_AD3NH 5
+#define REG_ADFILTCONF_AD4NH 4
+#define REG_ADFILTCONF_AD1VOICE 3
+#define REG_ADFILTCONF_AD2VOICE 2
+#define REG_ADFILTCONF_AD3VOICE 1
+#define REG_ADFILTCONF_AD4VOICE 0
+
+/* REG_DIGIFCONF1 */
+#define REG_DIGIFCONF1_ENMASTGEN 7
+#define REG_DIGIFCONF1_IF1BITCLKOS1 6
+#define REG_DIGIFCONF1_IF1BITCLKOS0 5
+#define REG_DIGIFCONF1_ENFSBITCLK1 4
+#define REG_DIGIFCONF1_IF0BITCLKOS1 2
+#define REG_DIGIFCONF1_IF0BITCLKOS0 1
+#define REG_DIGIFCONF1_ENFSBITCLK0 0
+
+/* REG_DIGIFCONF2 */
+#define REG_DIGIFCONF2_FSYNC0P 6
+#define REG_DIGIFCONF2_BITCLK0P 5
+#define REG_DIGIFCONF2_IF0DEL 4
+#define REG_DIGIFCONF2_IF0FORMAT1 3
+#define REG_DIGIFCONF2_IF0FORMAT0 2
+#define REG_DIGIFCONF2_IF0WL1 1
+#define REG_DIGIFCONF2_IF0WL0 0
+
+/* REG_DIGIFCONF3 */
+#define REG_DIGIFCONF3_IF0DATOIF1AD 7
+#define REG_DIGIFCONF3_IF0CLKTOIF1CLK 6
+#define REG_DIGIFCONF3_IF1MASTER 5
+#define REG_DIGIFCONF3_IF1DATOIF0AD 3
+#define REG_DIGIFCONF3_IF1CLKTOIF0CLK 2
+#define REG_DIGIFCONF3_IF0MASTER 1
+#define REG_DIGIFCONF3_IF0BFIFOEN 0
+
+/* REG_DIGIFCONF4 */
+#define REG_DIGIFCONF4_FSYNC1P 6
+#define REG_DIGIFCONF4_BITCLK1P 5
+#define REG_DIGIFCONF4_IF1DEL 4
+#define REG_DIGIFCONF4_IF1FORMAT1 3
+#define REG_DIGIFCONF4_IF1FORMAT0 2
+#define REG_DIGIFCONF4_IF1WL1 1
+#define REG_DIGIFCONF4_IF1WL0 0
+
+/* REG_ADSLOTSELX */
+#define REG_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00
+#define REG_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01
+#define REG_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02
+#define REG_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03
+#define REG_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04
+#define REG_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05
+#define REG_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06
+#define REG_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07
+#define REG_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08
+#define REG_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F
+#define REG_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
+#define REG_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10
+#define REG_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20
+#define REG_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30
+#define REG_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40
+#define REG_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50
+#define REG_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60
+#define REG_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70
+#define REG_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80
+#define REG_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0
+#define REG_ADSLOTSELX_EVEN_SHIFT 0
+#define REG_ADSLOTSELX_ODD_SHIFT 4
+
+/* REG_ADSLOTHIZCTRL1 */
+/* REG_ADSLOTHIZCTRL2 */
+/* REG_ADSLOTHIZCTRL3 */
+/* REG_ADSLOTHIZCTRL4 */
+/* REG_DASLOTCONF1 */
+#define REG_DASLOTCONF1_DA12VOICE 7
+#define REG_DASLOTCONF1_SWAPDA12_34 6
+#define REG_DASLOTCONF1_DAI7TOADO1 5
+
+/* REG_DASLOTCONF2 */
+#define REG_DASLOTCONF2_DAI8TOADO2 5
+
+/* REG_DASLOTCONF3 */
+#define REG_DASLOTCONF3_DA34VOICE 7
+#define REG_DASLOTCONF3_DAI7TOADO3 5
+
+/* REG_DASLOTCONF4 */
+#define REG_DASLOTCONF4_DAI8TOADO4 5
+
+/* REG_DASLOTCONF5 */
+#define REG_DASLOTCONF5_DA56VOICE 7
+#define REG_DASLOTCONF5_DAI7TOADO5 5
+
+/* REG_DASLOTCONF6 */
+#define REG_DASLOTCONF6_DAI8TOADO6 5
+
+/* REG_DASLOTCONF7 */
+#define REG_DASLOTCONF7_DAI8TOADO7 5
+
+/* REG_DASLOTCONF8 */
+#define REG_DASLOTCONF8_DAI7TOADO8 5
+
+#define REG_DASLOTCONFX_SLTODAX_SHIFT 0
+#define REG_DASLOTCONFX_SLTODAX_MASK 0x1F
+
+/* REG_CLASSDCONF1 */
+#define REG_CLASSDCONF1_PARLHF 7
+#define REG_CLASSDCONF1_PARLVIB 6
+#define REG_CLASSDCONF1_VIB1SWAPEN 3
+#define REG_CLASSDCONF1_VIB2SWAPEN 2
+#define REG_CLASSDCONF1_HFLSWAPEN 1
+#define REG_CLASSDCONF1_HFRSWAPEN 0
+
+/* REG_CLASSDCONF2 */
+#define REG_CLASSDCONF2_FIRBYP3 7
+#define REG_CLASSDCONF2_FIRBYP2 6
+#define REG_CLASSDCONF2_FIRBYP1 5
+#define REG_CLASSDCONF2_FIRBYP0 4
+#define REG_CLASSDCONF2_HIGHVOLEN3 3
+#define REG_CLASSDCONF2_HIGHVOLEN2 2
+#define REG_CLASSDCONF2_HIGHVOLEN1 1
+#define REG_CLASSDCONF2_HIGHVOLEN0 0
+
+/* REG_CLASSDCONF3 */
+#define REG_CLASSDCONF3_DITHHPGAIN 4
+#define REG_CLASSDCONF3_DITHHPGAIN_MAX 0x0A
+#define REG_CLASSDCONF3_DITHWGAIN 0
+#define REG_CLASSDCONF3_DITHWGAIN_MAX 0x0A
+
+/* REG_DMICFILTCONF */
+#define REG_DMICFILTCONF_ANCINSEL 7
+#define REG_DMICFILTCONF_DA3TOEAR 6
+#define REG_DMICFILTCONF_DMIC1SINC3 5
+#define REG_DMICFILTCONF_DMIC2SINC3 4
+#define REG_DMICFILTCONF_DMIC3SINC3 3
+#define REG_DMICFILTCONF_DMIC4SINC3 2
+#define REG_DMICFILTCONF_DMIC5SINC3 1
+#define REG_DMICFILTCONF_DMIC6SINC3 0
+
+/* REG_DIGMULTCONF1 */
+#define REG_DIGMULTCONF1_DATOHSLEN 7
+#define REG_DIGMULTCONF1_DATOHSREN 6
+#define REG_DIGMULTCONF1_AD1SEL 5
+#define REG_DIGMULTCONF1_AD2SEL 4
+#define REG_DIGMULTCONF1_AD3SEL 3
+#define REG_DIGMULTCONF1_AD5SEL 2
+#define REG_DIGMULTCONF1_AD6SEL 1
+#define REG_DIGMULTCONF1_ANCSEL 0
+
+/* REG_DIGMULTCONF2 */
+#define REG_DIGMULTCONF2_DATOHFREN 7
+#define REG_DIGMULTCONF2_DATOHFLEN 6
+#define REG_DIGMULTCONF2_HFRSEL 5
+#define REG_DIGMULTCONF2_HFLSEL 4
+#define REG_DIGMULTCONF2_FIRSID1SEL 2
+#define REG_DIGMULTCONF2_FIRSID2SEL 0
+
+/* REG_ADDIGGAIN1 */
+/* REG_ADDIGGAIN2 */
+/* REG_ADDIGGAIN3 */
+/* REG_ADDIGGAIN4 */
+/* REG_ADDIGGAIN5 */
+/* REG_ADDIGGAIN6 */
+#define REG_ADDIGGAINX_FADEDISADX 6
+#define REG_ADDIGGAINX_ADXGAIN_MAX 0x3F
+
+/* REG_DADIGGAIN1 */
+/* REG_DADIGGAIN2 */
+/* REG_DADIGGAIN3 */
+/* REG_DADIGGAIN4 */
+/* REG_DADIGGAIN5 */
+/* REG_DADIGGAIN6 */
+#define REG_DADIGGAINX_FADEDISDAX 6
+#define REG_DADIGGAINX_DAXGAIN_MAX 0x3F
+
+/* REG_ADDIGLOOPGAIN1 */
+/* REG_ADDIGLOOPGAIN2 */
+#define REG_ADDIGLOOPGAINX_FADEDISADXL 6
+#define REG_ADDIGLOOPGAINX_ADXLBGAIN_MAX 0x3F
+
+/* REG_HSLEARDIGGAIN */
+#define REG_HSLEARDIGGAIN_HSSINC1 7
+#define REG_HSLEARDIGGAIN_FADEDISHSL 4
+#define REG_HSLEARDIGGAIN_HSLDGAIN_MAX 0x09
+
+/* REG_HSRDIGGAIN */
+#define REG_HSRDIGGAIN_FADESPEED 6
+#define REG_HSRDIGGAIN_FADEDISHSR 4
+#define REG_HSRDIGGAIN_HSRDGAIN_MAX 0x09
+
+/* REG_SIDFIRGAIN1 */
+/* REG_SIDFIRGAIN2 */
+#define REG_SIDFIRGAINX_FIRSIDXGAIN_MAX 0x1F
+
+/* REG_ANCCONF1 */
+#define REG_ANCCONF1_ANCIIRUPDATE 3
+#define REG_ANCCONF1_ENANC 2
+#define REG_ANCCONF1_ANCIIRINIT 1
+#define REG_ANCCONF1_ANCFIRUPDATE 0
+
+/* REG_ANCCONF2 */
+#define REG_ANCCONF2_VALUE_MIN -0x10
+#define REG_ANCCONF2_VALUE_MAX 0x0F
+/* REG_ANCCONF3 */
+#define REG_ANCCONF3_VALUE_MIN -0x10
+#define REG_ANCCONF3_VALUE_MAX 0x0F
+/* REG_ANCCONF4 */
+#define REG_ANCCONF4_VALUE_MIN -0x10
+#define REG_ANCCONF4_VALUE_MAX 0x0F
+/* REG_ANC_FIR_COEFFS */
+#define REG_ANC_FIR_COEFF_MIN -0x8000
+#define REG_ANC_FIR_COEFF_MAX 0x7FFF
+#define REG_ANC_FIR_COEFFS 0xF
+/* REG_ANC_IIR_COEFFS */
+#define REG_ANC_IIR_COEFF_MIN -0x800000
+#define REG_ANC_IIR_COEFF_MAX 0x7FFFFF
+#define REG_ANC_IIR_COEFFS 0x18
+/* REG_ANC_WARP_DELAY */
+#define REG_ANC_WARP_DELAY_MIN 0x0000
+#define REG_ANC_WARP_DELAY_MAX 0xFFFF
+/* REG_ANCCONF11 */
+/* REG_ANCCONF12 */
+/* REG_ANCCONF13 */
+/* REG_ANCCONF14 */
+
+/* REG_SIDFIRADR */
+#define REG_SIDFIRADR_FIRSIDSET 7
+#define REG_SIDFIRADR_ADDRESS_SHIFT 0
+#define REG_SIDFIRADR_ADDRESS_MAX 0x7F
+
+/* REG_SIDFIRCOEF1 */
+/* REG_SIDFIRCOEF2 */
+#define REG_SIDFIRCOEFX_VALUE_SHIFT 0
+#define REG_SIDFIRCOEFX_VALUE_MAX 0xFFFF
+
+/* REG_SIDFIRCONF */
+#define REG_SIDFIRCONF_ENFIRSIDS 2
+#define REG_SIDFIRCONF_FIRSIDSTOIF1 1
+#define REG_SIDFIRCONF_FIRSIDBUSY 0
+
+/* REG_AUDINTMASK1 */
+/* REG_AUDINTSOURCE1 */
+/* REG_AUDINTMASK2 */
+/* REG_AUDINTSOURCE2 */
+
+/* REG_FIFOCONF1 */
+#define REG_FIFOCONF1_BFIFOMASK 0x80
+#define REG_FIFOCONF1_BFIFO19M2 0x40
+#define REG_FIFOCONF1_BFIFOINT_SHIFT 0
+#define REG_FIFOCONF1_BFIFOINT_MAX 0x3F
+
+/* REG_FIFOCONF2 */
+#define REG_FIFOCONF2_BFIFOTX_SHIFT 0
+#define REG_FIFOCONF2_BFIFOTX_MAX 0xFF
+
+/* REG_FIFOCONF3 */
+#define REG_FIFOCONF3_BFIFOEXSL_SHIFT 5
+#define REG_FIFOCONF3_BFIFOEXSL_MAX 0x5
+#define REG_FIFOCONF3_PREBITCLK0_SHIFT 2
+#define REG_FIFOCONF3_PREBITCLK0_MAX 0x7
+#define REG_FIFOCONF3_BFIFOMAST_SHIFT 1
+#define REG_FIFOCONF3_BFIFORUN_SHIFT 0
+
+/* REG_FIFOCONF4 */
+#define REG_FIFOCONF4_BFIFOFRAMSW_SHIFT 0
+#define REG_FIFOCONF4_BFIFOFRAMSW_MAX 0xFF
+
+/* REG_FIFOCONF5 */
+#define REG_FIFOCONF5_BFIFOWAKEUP_SHIFT 0
+#define REG_FIFOCONF5_BFIFOWAKEUP_MAX 0xFF
+
+/* REG_FIFOCONF6 */
+#define REG_FIFOCONF6_BFIFOSAMPLE_SHIFT 0
+#define REG_FIFOCONF6_BFIFOSAMPLE_MAX 0xFF
+
+/* REG_AUDREV */
+
+#endif
diff --git a/sound/soc/codecs/av8100_audio.c b/sound/soc/codecs/av8100_audio.c
new file mode 100644
index 00000000000..8716827c17e
--- /dev/null
+++ b/sound/soc/codecs/av8100_audio.c
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+
+#include "av8100_audio.h"
+
+/* codec private data */
+struct av8100_codec_dai_data {
+ struct hdmi_audio_settings as;
+};
+
+static struct av8100_codec_dai_data *get_dai_data_codec(struct snd_soc_codec *codec,
+ int dai_id)
+{
+ struct av8100_codec_dai_data *dai_data = snd_soc_codec_get_drvdata(codec);
+ return &dai_data[dai_id];
+}
+
+static struct av8100_codec_dai_data *get_dai_data(struct snd_soc_dai *codec_dai)
+{
+ return get_dai_data_codec(codec_dai->codec, codec_dai->id);
+}
+
+/* Controls - Non-DAPM Non-ASoC */
+
+/* Coding Type */
+
+static const char *hdmi_coding_type_str[] = {"AV8100_CODEC_CT_REFER",
+ "AV8100_CODEC_CT_IEC60958_PCM",
+ "AV8100_CODEC_CT_AC3",
+ "AV8100_CODEC_CT_MPEG1",
+ "AV8100_CODEC_CT_MP3",
+ "AV8100_CODEC_CT_MPEG2",
+ "AV8100_CODEC_CT_AAC",
+ "AV8100_CODEC_CT_DTS",
+ "AV8100_CODEC_CT_ATRAC",
+ "AV8100_CODEC_CT_ONE_BIT_AUDIO",
+ "AV8100_CODEC_CT_DOLBY_DIGITAL",
+ "AV8100_CODEC_CT_DTS_HD",
+ "AV8100_CODEC_CT_MAT",
+ "AV8100_CODEC_CT_DST",
+ "AV8100_CODEC_CT_WMA_PRO"};
+
+enum hdmi_audio_coding_type audio_coding_type;
+
+static int hdmi_coding_type_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int items = ARRAY_SIZE(hdmi_coding_type_str);
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = items;
+
+ if (uinfo->value.enumerated.item > items - 1)
+ uinfo->value.enumerated.item = items - 1;
+
+ strcpy(uinfo->value.enumerated.name,
+ hdmi_coding_type_str[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int hdmi_coding_type_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = audio_coding_type;
+
+ return 0;
+}
+
+static int hdmi_coding_type_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int items = ARRAY_SIZE(hdmi_coding_type_str);
+
+ if (ucontrol->value.enumerated.item[0] > items - 1)
+ ucontrol->value.enumerated.item[0] = items - 1;
+
+ audio_coding_type = ucontrol->value.enumerated.item[0];
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new hdmi_coding_type_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "HDMI Coding Type",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = hdmi_coding_type_control_info,
+ .get = hdmi_coding_type_control_get,
+ .put = hdmi_coding_type_control_put,
+};
+
+/* Extended interface for codec-driver */
+
+int av8100_audio_change_hdmi_audio_settings(struct snd_soc_dai *codec_dai,
+ struct hdmi_audio_settings *as)
+{
+ struct av8100_codec_dai_data *dai_data = get_dai_data(codec_dai);
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ dai_data->as.audio_channel_count = as->audio_channel_count;
+ dai_data->as.sampling_frequency = as->sampling_frequency;
+ dai_data->as.sample_size = as->sample_size;
+ dai_data->as.channel_allocation = as->channel_allocation;
+ dai_data->as.level_shift_value = as->level_shift_value;
+ dai_data->as.downmix_inhibit = as->downmix_inhibit;
+
+ return 0;
+}
+
+static int av8100_codec_powerup(void)
+{
+ struct av8100_status status;
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ pr_debug("%s: Powering up AV8100.", __func__);
+ ret = av8100_powerup();
+ if (ret != 0) {
+ pr_err("%s: Power up AV8100 failed "
+ "(av8100_powerup returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+ }
+ if (status.av8100_state < AV8100_OPMODE_INIT) {
+ ret = av8100_download_firmware(I2C_INTERFACE);
+ if (ret != 0) {
+ pr_err("%s: Download firmware failed "
+ "(av8100_download_firmware returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int av8100_codec_setup_hdmi_format(void)
+{
+ union av8100_configuration config;
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ pr_debug("%s: hdmi_mode = AV8100_HDMI_ON.", __func__);
+ pr_debug("%s: hdmi_format = AV8100_HDMI.", __func__);
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ config.hdmi_format.hdmi_format = AV8100_HDMI;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &config);
+ if (ret != 0) {
+ pr_err("%s: Setting hdmi_format failed "
+ "(av8100_conf_prep returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+ ret = av8100_conf_w(AV8100_COMMAND_HDMI,
+ NULL,
+ NULL,
+ I2C_INTERFACE);
+ if (ret != 0) {
+ pr_err("%s: Setting hdmi_format failed "
+ "(av8100_conf_w returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int av8100_codec_pcm_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ return 0;
+}
+
+static int av8100_codec_send_audio_infoframe(struct hdmi_audio_settings *as)
+{
+ union av8100_configuration config;
+ struct av8100_infoframes_format_cmd info_fr;
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ pr_debug("%s: HDMI-settings:\n", __func__);
+ pr_debug("%s: audio_coding_type = %d\n", __func__, audio_coding_type);
+ pr_debug("%s: audio_channel_count = %d\n", __func__, as->audio_channel_count);
+ pr_debug("%s: sampling_frequency = %d\n", __func__, as->sampling_frequency);
+ pr_debug("%s: sample_size = %d\n", __func__, as->sample_size);
+ pr_debug("%s: channel_allocation = %d\n", __func__, as->channel_allocation);
+ pr_debug("%s: level_shift_value = %d\n", __func__, as->level_shift_value);
+ pr_debug("%s: downmix_inhibit = %d\n", __func__, as->downmix_inhibit);
+
+ /* Prepare the infoframe from the hdmi_audio_settings struct */
+ pr_info("%s: Preparing audio info-frame.", __func__);
+ info_fr.type = 0x84;
+ info_fr.version = 0x01;
+ info_fr.length = 0x0a;
+ info_fr.data[0] = (audio_coding_type << 4) | as->audio_channel_count;
+ info_fr.data[1] = (as->sampling_frequency << 2) | as->sample_size;
+ info_fr.data[2] = 0;
+ info_fr.data[3] = as->channel_allocation;
+ info_fr.data[4] = ((int)as->downmix_inhibit << 7) |
+ (as->level_shift_value << 3);
+ info_fr.data[5] = 0;
+ info_fr.data[6] = 0;
+ info_fr.data[7] = 0;
+ info_fr.data[8] = 0;
+ info_fr.data[9] = 0;
+ info_fr.crc = info_fr.version +
+ info_fr.length +
+ info_fr.data[0] +
+ info_fr.data[1] +
+ info_fr.data[3] +
+ info_fr.data[4];
+ config.infoframes_format.type = info_fr.type;
+ config.infoframes_format.version = info_fr.version;
+ config.infoframes_format.crc = info_fr.crc;
+ config.infoframes_format.length = info_fr.length;
+ memcpy(&config.infoframes_format.data, info_fr.data, info_fr.length);
+
+ /* Send audio info-frame */
+ pr_info("%s: Sending audio info-frame.", __func__);
+ ret = av8100_conf_prep(AV8100_COMMAND_INFOFRAMES, &config);
+ if (ret != 0) {
+ pr_err("%s: Sending audio info-frame failed "
+ "(av8100_conf_prep returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+ ret = av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL,
+ NULL,
+ I2C_INTERFACE);
+ if (ret != 0) {
+ pr_err("%s: Sending audio info-frame failed "
+ "(av8100_conf_w returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int av8100_codec_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *codec_dai)
+{
+ struct av8100_codec_dai_data *dai_data = get_dai_data(codec_dai);
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ av8100_codec_send_audio_infoframe(&dai_data->as);
+
+ return 0;
+}
+
+static int av8100_codec_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *codec_dai)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ /* Startup AV8100 if it is not already started */
+ ret = av8100_codec_powerup();
+ if (ret != 0) {
+ pr_err("%s: Startup of AV8100 failed "
+ "(av8100_codec_powerupAV8100 returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void av8100_codec_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *codec_dai)
+{
+ pr_debug("%s: Enter.\n", __func__);
+}
+
+static int av8100_codec_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id,
+ unsigned int freq, int dir)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ return 0;
+}
+
+static int av8100_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ union av8100_configuration config;
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ /* Set the HDMI format of AV8100 */
+ ret = av8100_codec_setup_hdmi_format();
+ if (ret != 0)
+ return ret;
+
+ /* Set the audio input format of AV8100 */
+ config.audio_input_format.audio_input_if_format =
+ ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_DSP_A) ?
+ AV8100_AUDIO_TDM_MODE : AV8100_AUDIO_I2SDELAYED_MODE;
+ config.audio_input_format.audio_if_mode =
+ ((fmt & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM) ?
+ AV8100_AUDIO_MASTER : AV8100_AUDIO_SLAVE;
+ pr_info("%s: Setting audio_input_format "
+ "(if_format = %d, if_mode = %d).",
+ __func__,
+ config.audio_input_format.audio_input_if_format,
+ config.audio_input_format.audio_if_mode);
+ config.audio_input_format.i2s_input_nb = 1;
+ config.audio_input_format.sample_audio_freq = AV8100_AUDIO_FREQ_48KHZ;
+ config.audio_input_format.audio_word_lg = AV8100_AUDIO_16BITS;
+ config.audio_input_format.audio_format = AV8100_AUDIO_LPCM_MODE;
+ config.audio_input_format.audio_mute = AV8100_AUDIO_MUTE_DISABLE;
+ ret = av8100_conf_prep(AV8100_COMMAND_AUDIO_INPUT_FORMAT, &config);
+ if (ret != 0) {
+ pr_err("%s: Setting audio_input_format failed "
+ "(av8100_conf_prep returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+ ret = av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL,
+ NULL,
+ I2C_INTERFACE);
+ if (ret != 0) {
+ pr_err("%s: Setting audio_input_format failed "
+ "(av8100_conf_w returned %d)!\n",
+ __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct snd_soc_dai_driver av8100_dai_driver = {
+ .name = "av8100-codec-dai",
+ .playback = {
+ .stream_name = "AV8100 Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = AV8100_SUPPORTED_RATE,
+ .formats = AV8100_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .prepare = av8100_codec_pcm_prepare,
+ .hw_params = av8100_codec_pcm_hw_params,
+ .startup = av8100_codec_pcm_startup,
+ .shutdown = av8100_codec_pcm_shutdown,
+ .set_sysclk = av8100_codec_set_dai_sysclk,
+ .set_fmt = av8100_codec_set_dai_fmt,
+ }
+ },
+};
+EXPORT_SYMBOL_GPL(av8100_dai_driver);
+
+static int av8100_codec_probe(struct snd_soc_codec *codec)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ audio_coding_type = AV8100_CODEC_CT_IEC60958_PCM;
+
+ /* Add controls with events */
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&hdmi_coding_type_control, codec));
+
+ return 0;
+}
+
+static int av8100_codec_remove(struct snd_soc_codec *codec)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ return 0;
+}
+
+static int av8100_codec_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ return 0;
+}
+
+static int av8100_codec_resume(struct snd_soc_codec *codec)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ return 0;
+}
+
+struct snd_soc_codec_driver av8100_codec_drv = {
+ .probe = av8100_codec_probe,
+ .remove = av8100_codec_remove,
+ .suspend = av8100_codec_suspend,
+ .resume = av8100_codec_resume
+};
+
+static __devinit int av8100_codec_drv_probe(struct platform_device *pdev)
+{
+ struct av8100_codec_dai_data *dai_data;
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ pr_info("%s: Init codec private data..\n", __func__);
+ dai_data = kzalloc(sizeof(struct av8100_codec_dai_data), GFP_KERNEL);
+ if (dai_data == NULL)
+ return -ENOMEM;
+
+ /* Setup hdmi_audio_settings default values */
+ dai_data[0].as.audio_channel_count = AV8100_CODEC_CC_2CH;
+ dai_data[0].as.sampling_frequency = AV8100_CODEC_SF_48KHZ;
+ dai_data[0].as.sample_size = AV8100_CODEC_SS_16BIT;
+ dai_data[0].as.channel_allocation = AV8100_CODEC_CA_FL_FR;
+ dai_data[0].as.level_shift_value = AV8100_CODEC_LSV_0DB;
+ dai_data[0].as.downmix_inhibit = false;
+
+ platform_set_drvdata(pdev, dai_data);
+
+ pr_info("%s: Register codec.\n", __func__);
+ ret = snd_soc_register_codec(&pdev->dev, &av8100_codec_drv, &av8100_dai_driver, 1);
+ if (ret < 0) {
+ pr_debug("%s: Error: Failed to register codec (ret = %d).\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit av8100_codec_drv_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ kfree(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static const struct platform_device_id av8100_codec_platform_id[] = {
+ { "av8100-codec", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, av8100_codec_platform_id);
+
+static struct platform_driver av8100_codec_platform_driver = {
+ .driver = {
+ .name = "av8100-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = av8100_codec_drv_probe,
+ .remove = __devexit_p(av8100_codec_drv_remove),
+ .id_table = av8100_codec_platform_id,
+};
+
+static int __devinit av8100_codec_platform_drv_init(void)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ ret = platform_driver_register(&av8100_codec_platform_driver);
+ if (ret != 0) {
+ pr_err("Failed to register AV8100 platform driver (%d)!\n", ret);
+ }
+
+ return ret;
+}
+
+static void __exit av8100_codec_platform_drv_exit(void)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ platform_driver_unregister(&av8100_codec_platform_driver);
+}
+
+module_init(av8100_codec_platform_drv_init);
+module_exit(av8100_codec_platform_drv_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/av8100_audio.h b/sound/soc/codecs/av8100_audio.h
new file mode 100644
index 00000000000..4802e0d0242
--- /dev/null
+++ b/sound/soc/codecs/av8100_audio.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#ifndef AV8100_AUDIO_CODEC_H
+#define AV8100_AUDIO_CODEC_H
+
+/* Supported sampling rates */
+#define AV8100_SUPPORTED_RATE (SNDRV_PCM_RATE_48000)
+
+/* Supported data formats */
+#define AV8100_SUPPORTED_FMT (SNDRV_PCM_FMTBIT_S16_LE)
+
+/* TDM-slot mask */
+#define AV8100_CODEC_MASK_MONO 0x0001
+#define AV8100_CODEC_MASK_STEREO 0x0005
+#define AV8100_CODEC_MASK_2DOT1 0x0015
+#define AV8100_CODEC_MASK_QUAD 0x0505
+#define AV8100_CODEC_MASK_5DOT0 0x0545
+#define AV8100_CODEC_MASK_5DOT1 0x0555
+#define AV8100_CODEC_MASK_7DOT0 0x5545
+#define AV8100_CODEC_MASK_7DOT1 0x5555
+
+enum hdmi_audio_coding_type {
+ AV8100_CODEC_CT_REFER,
+ AV8100_CODEC_CT_IEC60958_PCM,
+ AV8100_CODEC_CT_AC3,
+ AV8100_CODEC_CT_MPEG1,
+ AV8100_CODEC_CT_MP3,
+ AV8100_CODEC_CT_MPEG2,
+ AV8100_CODEC_CT_AAC,
+ AV8100_CODEC_CT_DTS,
+ AV8100_CODEC_CT_ATRAC,
+ AV8100_CODEC_CT_ONE_BIT_AUDIO,
+ AV8100_CODEC_CT_DOLBY_DIGITAL,
+ AV8100_CODEC_CT_DTS_HD,
+ AV8100_CODEC_CT_MAT,
+ AV8100_CODEC_CT_DST,
+ AV8100_CODEC_CT_WMA_PRO
+};
+
+enum hdmi_audio_channel_count {
+ AV8100_CODEC_CC_REFER,
+ AV8100_CODEC_CC_2CH,
+ AV8100_CODEC_CC_3CH,
+ AV8100_CODEC_CC_4CH,
+ AV8100_CODEC_CC_5CH,
+ AV8100_CODEC_CC_6CH,
+ AV8100_CODEC_CC_7CH,
+ AV8100_CODEC_CC_8CH
+};
+
+enum hdmi_sampling_frequency {
+ AV8100_CODEC_SF_REFER,
+ AV8100_CODEC_SF_32KHZ,
+ AV8100_CODEC_SF_44_1KHZ,
+ AV8100_CODEC_SF_48KHZ,
+ AV8100_CODEC_SF_88_2KHZ,
+ AV8100_CODEC_SF_96KHZ,
+ AV8100_CODEC_SF_176_4KHZ,
+ AV8100_CODEC_SF_192KHZ
+};
+
+enum hdmi_sample_size {
+ AV8100_CODEC_SS_REFER,
+ AV8100_CODEC_SS_16BIT,
+ AV8100_CODEC_SS_20BIT,
+ AV8100_CODEC_SS_24BIT
+};
+
+enum hdmi_speaker_placement {
+ AV8100_CODEC_SP_FL, /* Front Left */
+ AV8100_CODEC_SP_FC, /* Front Center */
+ AV8100_CODEC_SP_FR, /* Front Right */
+ AV8100_CODEC_SP_FLC, /* Front Left Center */
+ AV8100_CODEC_SP_FRC, /* Front Right Center */
+ AV8100_CODEC_SP_RL, /* Rear Left */
+ AV8100_CODEC_SP_RC, /* Rear Center */
+ AV8100_CODEC_SP_RR, /* Rear Right */
+ AV8100_CODEC_SP_RLC, /* Rear Left Center */
+ AV8100_CODEC_SP_RRC, /* Rear Right Center */
+ AV8100_CODEC_SP_LFE, /* Low Frequency Effekt */
+};
+
+enum hdmi_channel_allocation {
+ AV8100_CODEC_CA_FL_FR, /* 0x00, Stereo */
+ AV8100_CODEC_CA_FL_FR_LFE, /* 0x01, 2.1 */
+ AV8100_CODEC_CA_FL_FR_FC, /* 0x02*/
+ AV8100_CODEC_CA_FL_FR_LFE_FC, /* 0x03*/
+ AV8100_CODEC_CA_FL_FR_RC, /* 0x04*/
+ AV8100_CODEC_CA_FL_FR_LFE_RC, /* 0x05*/
+ AV8100_CODEC_CA_FL_FR_FC_RC, /* 0x06*/
+ AV8100_CODEC_CA_FL_FR_LFE_FC_RC, /* 0x07*/
+ AV8100_CODEC_CA_FL_FR_RL_RR, /* 0x08, Quad */
+ AV8100_CODEC_CA_FL_FR_LFE_RL_RR, /* 0x09*/
+ AV8100_CODEC_CA_FL_FR_FC_RL_RR, /* 0x0a, 5.0*/
+ AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR, /* 0x0b, 5.1*/
+ AV8100_CODEC_CA_FL_FR_RL_RR_RC, /* 0x0c*/
+ AV8100_CODEC_CA_FL_FR_LFE_RL_RR_RC, /* 0x0d*/
+ AV8100_CODEC_CA_FL_FR_RC_RL_RR_RC, /* 0x0e*/
+ AV8100_CODEC_CA_FL_FR_LFE_RC_RL_RR_RC, /* 0x0f*/
+ AV8100_CODEC_CA_FL_FR_RL_RR_RLC_RRC, /* 0x10*/
+ AV8100_CODEC_CA_FL_FR_LFE_RL_RR_RLC_RRC, /* 0x11*/
+ AV8100_CODEC_CA_FL_FR_FC_RL_RR_RLC_RRC, /* 0x12*/
+ AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR_RLC_RRC, /* 0x13*/
+ AV8100_CODEC_CA_FL_FR_FLC_FRC, /* 0x14*/
+ AV8100_CODEC_CA_FL_FR_LFE_FLC_FRC, /* 0x15*/
+ AV8100_CODEC_CA_FL_FR_FC_FLC_FRC, /* 0x16*/
+ AV8100_CODEC_CA_FL_FR_LFE_FC_FLC_FRC, /* 0x17*/
+ AV8100_CODEC_CA_FL_FR_RC_FLC_FRC, /* 0x18*/
+ AV8100_CODEC_CA_FL_FR_LFE_RC_FLC_FRC, /* 0x19*/
+ AV8100_CODEC_CA_FL_FR_FC_RC_FLC_FRC, /* 0x1a*/
+ AV8100_CODEC_CA_FL_FR_LFE_FR_FC_RC_FLC_FRC, /* 0x1b*/
+ AV8100_CODEC_CA_FL_FR_RL_RR_FLC_FRC, /* 0x1c*/
+ AV8100_CODEC_CA_FL_FR_LFE_RL_RR_FLC_FRC, /* 0x1d*/
+ AV8100_CODEC_CA_FL_FR_FC_RL_RR_FLC_FRC, /* 0x1e*/
+ AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR_FLC_FRC /* 0x1f, 7.1 */
+};
+
+enum hdmi_level_shift_value {
+ AV8100_CODEC_LSV_0DB,
+ AV8100_CODEC_LSV_1DB,
+ AV8100_CODEC_LSV_2DB,
+ AV8100_CODEC_LSV_3DB,
+ AV8100_CODEC_LSV_4DB,
+ AV8100_CODEC_LSV_5DB,
+ AV8100_CODEC_LSV_6DB,
+ AV8100_CODEC_LSV_7DB,
+ AV8100_CODEC_LSV_8DB,
+ AV8100_CODEC_LSV_9DB,
+ AV8100_CODEC_LSV_10DB,
+ AV8100_CODEC_LSV_11DB,
+ AV8100_CODEC_LSV_12DB,
+ AV8100_CODEC_LSV_13DB,
+ AV8100_CODEC_LSV_14DB,
+ AV8100_CODEC_LSV_15DB
+};
+
+struct hdmi_audio_settings {
+ enum hdmi_audio_channel_count audio_channel_count;
+ enum hdmi_sampling_frequency sampling_frequency;
+ enum hdmi_sample_size sample_size;
+ enum hdmi_channel_allocation channel_allocation;
+ enum hdmi_level_shift_value level_shift_value;
+ bool downmix_inhibit;
+};
+
+/* Extended interface for codec-driver */
+int av8100_audio_change_hdmi_audio_settings(struct snd_soc_dai *dai,
+ struct hdmi_audio_settings *as);
+
+#endif /* AV8100_AUDIO_CODEC_H */
+
+
+
diff --git a/sound/soc/codecs/cg29xx.c b/sound/soc/codecs/cg29xx.c
new file mode 100644
index 00000000000..109387fa2f9
--- /dev/null
+++ b/sound/soc/codecs/cg29xx.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Roger Nilsson <roger.xr.nilsson@stericsson.com>,
+ * Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+ #include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/bitops.h>
+#include <../../../drivers/staging/cg2900/include/cg2900_audio.h>
+
+#include "cg29xx.h"
+
+#define CG29XX_NBR_OF_DAI 2
+#define CG29XX_SUPPORTED_RATE_PCM (SNDRV_PCM_RATE_8000 | \
+ SNDRV_PCM_RATE_16000)
+
+#define CG29XX_SUPPORTED_RATE (SNDRV_PCM_RATE_8000 | \
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define CG29XX_SUPPORTED_FMT (SNDRV_PCM_FMTBIT_S16_LE)
+
+enum cg29xx_dai_direction {
+ CG29XX_DAI_DIRECTION_TX,
+ CG29XX_DAI_DIRECTION_RX
+};
+
+static int cg29xx_dai_startup(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+
+static int cg29xx_dai_prepare(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+
+static int cg29xx_dai_hw_params(
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *dai);
+
+static void cg29xx_dai_shutdown(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+
+static int cg29xx_set_dai_sysclk(
+ struct snd_soc_dai *codec_dai,
+ int clk_id,
+ unsigned int freq, int dir);
+
+static int cg29xx_set_dai_fmt(
+ struct snd_soc_dai *codec_dai,
+ unsigned int fmt);
+
+static int cg29xx_set_tdm_slot(
+ struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots,
+ int slot_width);
+
+static struct cg29xx_codec codec_private = {
+ .session = 0,
+};
+
+static struct snd_soc_dai_ops cg29xx_dai_driver_dai_ops = {
+ .startup = cg29xx_dai_startup,
+ .prepare = cg29xx_dai_prepare,
+ .hw_params = cg29xx_dai_hw_params,
+ .shutdown = cg29xx_dai_shutdown,
+ .set_sysclk = cg29xx_set_dai_sysclk,
+ .set_fmt = cg29xx_set_dai_fmt,
+ .set_tdm_slot = cg29xx_set_tdm_slot
+};
+
+struct snd_soc_dai_driver cg29xx_dai_driver[] = {
+ {
+ .name = "cg29xx-codec-dai.0",
+ .id = 0,
+ .playback = {
+ .stream_name = "CG29xx.0 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CG29XX_SUPPORTED_RATE,
+ .formats = CG29XX_SUPPORTED_FMT,
+ },
+ .capture = {
+ .stream_name = "CG29xx.0 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CG29XX_SUPPORTED_RATE,
+ .formats = CG29XX_SUPPORTED_FMT,
+ },
+ .ops = &cg29xx_dai_driver_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "cg29xx-codec-dai.1",
+ .id = 1,
+ .playback = {
+ .stream_name = "CG29xx.1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = CG29XX_SUPPORTED_RATE_PCM,
+ .formats = CG29XX_SUPPORTED_FMT,
+ },
+ .capture = {
+ .stream_name = "CG29xx.1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = CG29XX_SUPPORTED_RATE_PCM,
+ .formats = CG29XX_SUPPORTED_FMT,
+ },
+ .ops = &cg29xx_dai_driver_dai_ops,
+ .symmetric_rates = 1,
+ }
+};
+EXPORT_SYMBOL_GPL(cg29xx_dai_driver);
+
+static const char *enum_ifs_input_select[] = {
+ "BT_SCO", "FM_RX"
+};
+
+static const char *enum_ifs_output_select[] = {
+ "BT_SCO", "FM_TX"
+};
+
+/* If0 Input Select */
+static struct soc_enum if0_input_select =
+ SOC_ENUM_SINGLE(INTERFACE0_INPUT_SELECT, 0,
+ ARRAY_SIZE(enum_ifs_input_select),
+ enum_ifs_input_select);
+
+/* If1 Input Select */
+static struct soc_enum if1_input_select =
+ SOC_ENUM_SINGLE(INTERFACE1_INPUT_SELECT, 0,
+ ARRAY_SIZE(enum_ifs_input_select),
+ enum_ifs_input_select);
+
+/* If0 Output Select */
+static struct soc_enum if0_output_select =
+ SOC_ENUM_SINGLE(INTERFACE0_OUTPUT_SELECT, 0,
+ ARRAY_SIZE(enum_ifs_output_select),
+ enum_ifs_output_select);
+
+/* If1 Output Select */
+static struct soc_enum if1_output_select =
+ SOC_ENUM_SINGLE(INTERFACE1_OUTPUT_SELECT, 4,
+ ARRAY_SIZE(enum_ifs_output_select),
+ enum_ifs_output_select);
+
+static struct snd_kcontrol_new cg29xx_snd_controls[] = {
+ SOC_ENUM("If0 Input Select", if0_input_select),
+ SOC_ENUM("If1 Input Select", if1_input_select),
+ SOC_ENUM("If0 Output Select", if0_output_select),
+ SOC_ENUM("If1 Output Select", if1_output_select),
+};
+
+
+static struct cg29xx_codec_dai_data *get_dai_data_codec(struct snd_soc_codec *codec,
+ int dai_id)
+{
+ struct cg29xx_codec_dai_data *codec_drvdata = snd_soc_codec_get_drvdata(codec);
+ return &codec_drvdata[dai_id];
+}
+
+static struct cg29xx_codec_dai_data *get_dai_data(struct snd_soc_dai *codec_dai)
+{
+ return get_dai_data_codec(codec_dai->codec, codec_dai->id);
+}
+
+static int cg29xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id,
+ unsigned int freq, int dir)
+{
+ return 0;
+}
+
+static int cg29xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct cg29xx_codec_dai_data *dai_data = get_dai_data(codec_dai);
+ unsigned int prot;
+ unsigned int msel;
+ prot = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+ msel = fmt & SND_SOC_DAIFMT_MASTER_MASK;
+
+ switch (prot) {
+ case SND_SOC_DAIFMT_I2S:
+ if (dai_data->config.port != PORT_0_I2S) {
+ pr_err("cg29xx_dai: unsupported DAI format 0x%x\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ if (msel == SND_SOC_DAIFMT_CBM_CFM)
+ dai_data->config.conf.i2s.mode = DAI_MODE_MASTER;
+ else
+ dai_data->config.conf.i2s.mode = DAI_MODE_SLAVE;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
+ if (dai_data->config.port != PORT_1_I2S_PCM ||
+ msel == SND_SOC_DAIFMT_CBM_CFM) {
+ pr_err("cg29xx_dai: unsupported DAI format 0x%x port=%d,msel=%d\n",
+ fmt, dai_data->config.port, msel);
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cg29xx_set_tdm_slot(struct snd_soc_dai *codec_dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots,
+ int slot_width)
+{
+ struct cg29xx_codec_dai_data *dai_data = get_dai_data(codec_dai);
+
+ if (dai_data->config.port != PORT_1_I2S_PCM)
+ return -EINVAL;
+
+ dai_data->config.conf.i2s_pcm.slot_0_used =
+ (tx_mask | rx_mask) & (1<<CG29XX_DAI_SLOT0_SHIFT) ?
+ true : false;
+ dai_data->config.conf.i2s_pcm.slot_1_used =
+ (tx_mask | rx_mask) & (1<<CG29XX_DAI_SLOT1_SHIFT) ?
+ true : false;
+ dai_data->config.conf.i2s_pcm.slot_2_used =
+ (tx_mask | rx_mask) & (1<<CG29XX_DAI_SLOT2_SHIFT) ?
+ true : false;
+ dai_data->config.conf.i2s_pcm.slot_3_used =
+ (tx_mask | rx_mask) & (1<<CG29XX_DAI_SLOT3_SHIFT) ?
+ true : false;
+
+ dai_data->config.conf.i2s_pcm.slot_0_start = 0;
+ dai_data->config.conf.i2s_pcm.slot_1_start = slot_width;
+ dai_data->config.conf.i2s_pcm.slot_2_start = 2 * slot_width;
+ dai_data->config.conf.i2s_pcm.slot_3_start = 3 * slot_width;
+
+ return 0;
+}
+
+static int cg29xx_configure_endp(struct cg29xx_codec_dai_data *dai_data,
+ enum cg2900_audio_endpoint_id endpid)
+{
+ struct cg2900_endpoint_config config;
+ int err;
+ enum cg2900_dai_sample_rate dai_sr;
+ enum cg2900_endpoint_sample_rate endp_sr;
+
+ switch (dai_data->config.port) {
+ default:
+ case PORT_0_I2S:
+ dai_sr = dai_data->config.conf.i2s.sample_rate;
+ break;
+
+ case PORT_1_I2S_PCM:
+ dai_sr = dai_data->config.conf.i2s_pcm.sample_rate;
+ break;
+ }
+
+ switch (dai_sr) {
+ default:
+ case SAMPLE_RATE_8:
+ endp_sr = ENDPOINT_SAMPLE_RATE_8_KHZ;
+ break;
+ case SAMPLE_RATE_16:
+ endp_sr = ENDPOINT_SAMPLE_RATE_16_KHZ;
+ break;
+ case SAMPLE_RATE_44_1:
+ endp_sr = ENDPOINT_SAMPLE_RATE_44_1_KHZ;
+ break;
+ case SAMPLE_RATE_48:
+ endp_sr = ENDPOINT_SAMPLE_RATE_48_KHZ;
+ break;
+ }
+
+ config.endpoint_id = endpid;
+
+ switch (endpid) {
+ default:
+ case ENDPOINT_BT_SCO_INOUT:
+ config.config.sco.sample_rate = endp_sr;
+ break;
+
+ case ENDPOINT_FM_TX:
+ case ENDPOINT_FM_RX:
+ config.config.fm.sample_rate = endp_sr;
+ break;
+ }
+
+ err = cg2900_audio_config_endpoint(codec_private.session, &config);
+
+ return err;
+}
+
+static int cg29xx_stop_if(struct cg29xx_codec_dai_data *dai_data,
+ enum cg29xx_dai_direction direction)
+{
+ int err = 0;
+ unsigned int *stream;
+
+ if (direction == CG29XX_DAI_DIRECTION_TX)
+ stream = &dai_data->tx_active;
+ else
+ stream = &dai_data->rx_active;
+
+ if (*stream) {
+ err = cg2900_audio_stop_stream(
+ codec_private.session,
+ *stream);
+ if (!err) {
+ *stream = 0;
+ } else {
+ pr_err("asoc cg29xx - %s - Failed to stop stream on interface %d.\n",
+ __func__,
+ dai_data->config.port);
+ }
+ }
+
+ return err;
+}
+
+static int cg29xx_start_if(struct cg29xx_codec_dai_data *dai_data,
+ enum cg29xx_dai_direction direction)
+{
+ enum cg2900_audio_endpoint_id if_endpid;
+ enum cg2900_audio_endpoint_id endpid;
+ unsigned int *stream;
+ int err;
+
+ if (dai_data->config.port == PORT_0_I2S)
+ if_endpid = ENDPOINT_PORT_0_I2S;
+ else
+ if_endpid = ENDPOINT_PORT_1_I2S_PCM;
+
+ if (direction == CG29XX_DAI_DIRECTION_RX) {
+ switch (dai_data->output_select) {
+ default:
+ case 0:
+ endpid = ENDPOINT_BT_SCO_INOUT;
+ break;
+ case 1:
+ endpid = ENDPOINT_FM_TX;
+ }
+ stream = &dai_data->rx_active;
+ } else {
+ switch (dai_data->input_select) {
+ default:
+ case 0:
+ endpid = ENDPOINT_BT_SCO_INOUT;
+ break;
+ case 1:
+ endpid = ENDPOINT_FM_RX;
+ }
+
+ stream = &dai_data->tx_active;
+ }
+
+ if (*stream || (endpid == ENDPOINT_BT_SCO_INOUT)) {
+ pr_debug("asoc cg29xx - %s - The interface has already been started.\n",
+ __func__);
+ return 0;
+ }
+
+ pr_debug("asoc cg29xx - %s - direction: %d, if_id: %d endpid: %d\n",
+ __func__,
+ direction,
+ if_endpid,
+ endpid);
+
+ err = cg29xx_configure_endp(dai_data, endpid);
+
+ if (err) {
+ pr_err("asoc cg29xx - %s - Configure endpoint id: %d failed.\n",
+ __func__,
+ endpid);
+
+ return err;
+ }
+
+ err = cg2900_audio_start_stream(codec_private.session,
+ if_endpid,
+ endpid,
+ stream);
+
+ return err;
+}
+
+static int cg29xx_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int err = 0;
+
+ if (!codec_private.session)
+ err = cg2900_audio_open(&codec_private.session, NULL);
+
+ return err;
+}
+
+static int cg29xx_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *codec_dai)
+{
+ struct cg29xx_codec_dai_data *dai_data = get_dai_data(codec_dai);
+ int err = 0;
+ enum cg29xx_dai_direction direction;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ direction = CG29XX_DAI_DIRECTION_RX;
+ else
+ direction = CG29XX_DAI_DIRECTION_TX;
+
+ err = cg29xx_start_if(dai_data, direction);
+
+ return err;
+}
+
+static void cg29xx_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *codec_dai)
+{
+ struct cg29xx_codec_dai_data *dai_data = get_dai_data(codec_dai);
+ enum cg29xx_dai_direction direction;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ direction = CG29XX_DAI_DIRECTION_RX;
+ else
+ direction = CG29XX_DAI_DIRECTION_TX;
+
+ (void) cg29xx_stop_if(dai_data, direction);
+}
+
+static int cg29xx_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *codec_dai)
+{
+ struct cg29xx_codec_dai_data *dai_data = get_dai_data(codec_dai);
+ enum cg2900_dai_fs_duration duration = SYNC_DURATION_32;
+ enum cg2900_dai_bit_clk bclk = BIT_CLK_512;
+ int sr;
+ int err = 0;
+ enum cg2900_dai_stream_ratio ratio = STREAM_RATIO_FM48_VOICE16;
+
+ pr_debug("cg29xx asoc - %s called. Port: %d.\n",
+ __func__,
+ dai_data->config.port);
+
+ switch (params_rate(hw_params)) {
+ case 8000:
+ sr = SAMPLE_RATE_8;
+ bclk = BIT_CLK_512;
+ duration = SYNC_DURATION_32;
+ ratio = STREAM_RATIO_FM48_VOICE8;
+ break;
+ case 16000:
+ sr = SAMPLE_RATE_16;
+ bclk = BIT_CLK_512;
+ duration = SYNC_DURATION_32;
+ ratio = STREAM_RATIO_FM48_VOICE16;
+ break;
+ case 44100:
+ sr = SAMPLE_RATE_44_1;
+ break;
+ case 48000:
+ sr = SAMPLE_RATE_48;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dai_data->config.port == PORT_0_I2S) {
+ dai_data->config.conf.i2s.sample_rate = sr;
+ } else {
+ dai_data->config.conf.i2s_pcm.sample_rate = sr;
+ dai_data->config.conf.i2s_pcm.duration = duration;
+ dai_data->config.conf.i2s_pcm.clk = bclk;
+ dai_data->config.conf.i2s_pcm.ratio = ratio;
+ }
+
+ if (!(dai_data->tx_active | dai_data->rx_active) && dai_data->config.port != PORT_1_I2S_PCM) {
+ err = cg2900_audio_set_dai_config(
+ codec_private.session,
+ &dai_data->config);
+
+ pr_debug("asoc cg29xx: cg2900_audio_set_dai_config"
+ "on port %d completed with result: %d.\n",
+ dai_data->config.port,
+ err);
+ }
+
+ return err;
+}
+
+static unsigned int cg29xx_codec_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ struct cg29xx_codec_dai_data *dai_data;
+
+ switch (reg) {
+ case INTERFACE0_INPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 0);
+ return dai_data->input_select;
+
+ case INTERFACE1_INPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 1);
+ return dai_data->input_select;
+
+ case INTERFACE0_OUTPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 0);
+ return dai_data->output_select;
+
+ case INTERFACE1_OUTPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 1);
+ return dai_data->output_select;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int cg29xx_codec_write(struct snd_soc_codec *codec,
+ unsigned int reg,
+ unsigned int value)
+{
+ struct cg29xx_codec_dai_data *dai_data;
+ enum cg29xx_dai_direction direction;
+ bool restart_if = false;
+ int old_value;
+
+ switch (reg) {
+ case INTERFACE0_INPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 0);
+ direction = CG29XX_DAI_DIRECTION_TX;
+
+ old_value = dai_data->input_select;
+ dai_data->input_select = value;
+
+ if ((old_value ^ value) && dai_data->tx_active)
+ restart_if = true;
+ break;
+
+ case INTERFACE1_INPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 1);
+ direction = CG29XX_DAI_DIRECTION_TX;
+
+ old_value = dai_data->input_select;
+ dai_data->input_select = value;
+
+ if ((old_value ^ value) && dai_data->tx_active)
+ restart_if = true;
+ break;
+
+ case INTERFACE0_OUTPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 0);
+ direction = CG29XX_DAI_DIRECTION_RX;
+
+ old_value = dai_data->output_select;
+ dai_data->output_select = value;
+
+ if ((old_value ^ value) && dai_data->rx_active)
+ restart_if = true;
+ break;
+
+ case INTERFACE1_OUTPUT_SELECT:
+ dai_data = get_dai_data_codec(codec, 1);
+ direction = CG29XX_DAI_DIRECTION_RX;
+
+ old_value = dai_data->output_select;
+ dai_data->output_select = value;
+
+ if ((old_value ^ value) && dai_data->rx_active)
+ restart_if = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (restart_if) {
+ (void) cg29xx_stop_if(dai_data, direction);
+ (void) cg29xx_start_if(dai_data, direction);
+ }
+
+ return 0;
+}
+
+static int cg29xx_codec_probe(struct snd_soc_codec *codec)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ snd_soc_add_controls(
+ codec,
+ cg29xx_snd_controls,
+ ARRAY_SIZE(cg29xx_snd_controls));
+
+ return 0;
+}
+
+static int cg29xx_codec_remove(struct snd_soc_codec *codec)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ return 0;
+}
+
+static int cg29xx_codec_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ return 0;
+}
+
+static int cg29xx_codec_resume(struct snd_soc_codec *codec)
+{
+ pr_debug("%s: Enter (codec->name = %s).\n", __func__, codec->name);
+
+ return 0;
+}
+
+struct snd_soc_codec_driver cg29xx_codec_driver = {
+ .probe = cg29xx_codec_probe,
+ .remove = cg29xx_codec_remove,
+ .suspend = cg29xx_codec_suspend,
+ .resume = cg29xx_codec_resume,
+ .read = cg29xx_codec_read,
+ .write = cg29xx_codec_write,
+};
+
+static int __devinit cg29xx_codec_driver_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct cg29xx_codec_dai_data *dai_data;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ pr_info("%s: Init codec private data..\n", __func__);
+ dai_data = kzalloc(CG29XX_NBR_OF_DAI * sizeof(struct cg29xx_codec_dai_data),
+ GFP_KERNEL);
+ if (dai_data == NULL)
+ return -ENOMEM;
+
+ dai_data[0].tx_active = 0;
+ dai_data[0].rx_active = 0;
+ dai_data[0].input_select = 1;
+ dai_data[0].output_select = 1;
+ dai_data[0].config.port = PORT_0_I2S;
+ dai_data[0].config.conf.i2s.mode = DAI_MODE_SLAVE;
+ dai_data[0].config.conf.i2s.half_period = HALF_PER_DUR_16;
+ dai_data[0].config.conf.i2s.channel_sel = CHANNEL_SELECTION_BOTH;
+ dai_data[0].config.conf.i2s.sample_rate = SAMPLE_RATE_48;
+ dai_data[0].config.conf.i2s.word_width = WORD_WIDTH_32;
+ dai_data[1].tx_active = 0;
+ dai_data[1].rx_active = 0;
+ dai_data[1].input_select = 0;
+ dai_data[1].output_select = 0;
+ dai_data[1].config.port = PORT_1_I2S_PCM;
+ dai_data[1].config.conf.i2s_pcm.mode = DAI_MODE_SLAVE;
+ dai_data[1].config.conf.i2s_pcm.slot_0_dir = DAI_DIR_B_RX_A_TX;
+ dai_data[1].config.conf.i2s_pcm.slot_1_dir = DAI_DIR_B_TX_A_RX;
+ dai_data[1].config.conf.i2s_pcm.slot_2_dir = DAI_DIR_B_RX_A_TX;
+ dai_data[1].config.conf.i2s_pcm.slot_3_dir = DAI_DIR_B_RX_A_TX;
+ dai_data[1].config.conf.i2s_pcm.slot_0_used = true;
+ dai_data[1].config.conf.i2s_pcm.slot_1_used = false;
+ dai_data[1].config.conf.i2s_pcm.slot_2_used = false;
+ dai_data[1].config.conf.i2s_pcm.slot_3_used = false;
+ dai_data[1].config.conf.i2s_pcm.slot_0_start = 0;
+ dai_data[1].config.conf.i2s_pcm.slot_1_start = 16;
+ dai_data[1].config.conf.i2s_pcm.slot_2_start = 32;
+ dai_data[1].config.conf.i2s_pcm.slot_3_start = 48;
+ dai_data[1].config.conf.i2s_pcm.protocol = PORT_PROTOCOL_PCM;
+ dai_data[1].config.conf.i2s_pcm.ratio = STREAM_RATIO_FM48_VOICE16;
+ dai_data[1].config.conf.i2s_pcm.duration = SYNC_DURATION_32;
+ dai_data[1].config.conf.i2s_pcm.clk = BIT_CLK_512;
+ dai_data[1].config.conf.i2s_pcm.sample_rate = SAMPLE_RATE_16;
+
+ platform_set_drvdata(pdev, dai_data);
+
+ pr_info("%s: Register codec.\n", __func__);
+ ret = snd_soc_register_codec(&pdev->dev, &cg29xx_codec_driver, &cg29xx_dai_driver[0], 2);
+ if (ret < 0) {
+ pr_debug("%s: Error: Failed to register codec (ret = %d).\n",
+ __func__,
+ ret);
+ snd_soc_unregister_codec(&pdev->dev);
+ kfree(platform_get_drvdata(pdev));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit cg29xx_codec_driver_remove(struct platform_device *pdev)
+{
+ (void)cg2900_audio_close(&codec_private.session);
+
+ snd_soc_unregister_codec(&pdev->dev);
+ kfree(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static int cg29xx_codec_driver_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int cg29xx_codec_driver_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver cg29xx_codec_platform_driver = {
+ .driver = {
+ .name = "cg29xx-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = cg29xx_codec_driver_probe,
+ .remove = __devexit_p(cg29xx_codec_driver_remove),
+ .suspend = cg29xx_codec_driver_suspend,
+ .resume = cg29xx_codec_driver_resume,
+};
+
+
+static int __devinit cg29xx_codec_platform_driver_init(void)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ ret = platform_driver_register(&cg29xx_codec_platform_driver);
+ if (ret != 0)
+ pr_err("Failed to register CG29xx platform driver (%d)!\n", ret);
+
+ return ret;
+}
+
+static void __exit cg29xx_codec_platform_driver_exit(void)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ platform_driver_unregister(&cg29xx_codec_platform_driver);
+}
+
+
+module_init(cg29xx_codec_platform_driver_init);
+module_exit(cg29xx_codec_platform_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/cg29xx.h b/sound/soc/codecs/cg29xx.h
new file mode 100644
index 00000000000..fec52d7cdd7
--- /dev/null
+++ b/sound/soc/codecs/cg29xx.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Roger Nilsson roger.xr.nilsson@stericsson.com
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#ifndef CG29XX_CODEC_H
+#define CG29XX_CODEC_H
+
+#include <../../../drivers/staging/cg2900/include/cg2900_audio.h>
+
+struct cg29xx_codec_dai_data {
+ struct mutex mutex;
+ unsigned int rx_active;
+ unsigned int tx_active;
+ int input_select;
+ int output_select;
+ struct cg2900_dai_config config;
+};
+
+struct cg29xx_codec{
+ unsigned int session;
+};
+
+#define CG29XX_DAI_SLOT0_SHIFT 0
+#define CG29XX_DAI_SLOT1_SHIFT 1
+#define CG29XX_DAI_SLOT2_SHIFT 2
+#define CG29XX_DAI_SLOT3_SHIFT 3
+
+#define INTERFACE0_INPUT_SELECT 0x00
+#define INTERFACE1_INPUT_SELECT 0x01
+#define INTERFACE0_OUTPUT_SELECT 0x02
+#define INTERFACE1_OUTPUT_SELECT 0x03
+
+#endif /* CG29XX_CODEC_H */
diff --git a/sound/soc/ux500/Kconfig b/sound/soc/ux500/Kconfig
new file mode 100644
index 00000000000..f412c89d8df
--- /dev/null
+++ b/sound/soc/ux500/Kconfig
@@ -0,0 +1,67 @@
+#
+# Ux500 SoC audio configuration
+#
+
+menuconfig SND_SOC_UX500
+ bool "SoC Audio support for Ux500 platform"
+ depends on SND_SOC && STM_MSP_SPI
+ default n
+ help
+ Say Y if you want to add support for the Ux500 platform.
+
+choice
+ prompt "Platform 5500/8500"
+ depends on SND_SOC_UX500
+ default SND_SOC_U8500
+ config SND_SOC_U8500
+ bool "Platform - U8500"
+ config SND_SOC_U5500
+ bool "Platform - U5500"
+endchoice
+
+config SND_SOC_UX500_AB3550
+ bool "Codec - AB3550"
+ depends on SND_SOC_UX500 && (UX500_SOC_DB8500 || UX500_SOC_DB5500) && AB3550_CORE
+ select SND_SOC_AB3550
+ default n
+ help
+ Say Y if you want to include the AB3550 codec.
+
+config SND_SOC_UX500_AB5500
+ bool "Codec - AB5500"
+ depends on SND_SOC_UX500 && (UX500_SOC_DB8500 || UX500_SOC_DB5500) && AB5500_CORE
+ select SND_SOC_AB5500
+ default n
+ help
+ Say Y if you want to include the AB5500 codec.
+
+config SND_SOC_UX500_AB8500
+ bool "Codec - AB8500"
+ depends on SND_SOC_UX500 && UX500_SOC_DB8500 && AB8500_CORE && AB8500_GPADC
+ select SND_SOC_AB8500
+ default n
+ help
+ Say Y if you want to include AB8500 audio codec.
+
+config SND_SOC_UX500_CG29XX
+ bool "Codec - CG29xx"
+ depends on SND_SOC_UX500 && (UX500_SOC_DB8500 || UX500_SOC_DB5500) && CG2900_AUDIO
+ select SND_SOC_CG29XX
+ default n
+ help
+ Say Y if you want to include CG29xx codec (Combo chip).
+
+config SND_SOC_UX500_AV8100
+ bool "Codec - AV8100"
+ depends on SND_SOC_UX500 && (UX500_SOC_DB8500 || UX500_SOC_DB5500) && AV8100
+ select SND_SOC_AV8100
+ default n
+ help
+ Say Y if you want to include AV8100 codec (HDMI chip).
+
+config SND_SOC_UX500_DEBUG
+ bool "Activate Ux500 platform debug-mode (pr_debug)"
+ depends on SND_SOC_UX500
+ default n
+ help
+ Say Y if you want to add debug level prints for Ux500 code-files.
diff --git a/sound/soc/ux500/Makefile b/sound/soc/ux500/Makefile
new file mode 100644
index 00000000000..262e44a2812
--- /dev/null
+++ b/sound/soc/ux500/Makefile
@@ -0,0 +1,46 @@
+# Ux500 Platform Support
+
+ifdef CONFIG_SND_SOC_UX500_DEBUG
+CFLAGS_u8500.o := -DDEBUG
+CFLAGS_ux500_pcm.o := -DDEBUG
+CFLAGS_ux500_msp_dai.o := -DDEBUG
+CFLAGS_ux500_ab3550.o := -DDEBUG
+CFLAGS_ux500_ab8500.o := -DDEBUG
+CFLAGS_ux500_av8100.o := -DDEBUG
+CFLAGS_ux500_cg29xx.o := -DDEBUG
+CFLAGS_ux500_msp_i2s.o := -DDEBUG
+endif
+
+ifdef CONFIG_UX500_SOC_DBX500
+snd-soc-ux500-platform-objs := ux500_pcm.o ux500_msp_dai.o ux500_msp_i2s.o
+obj-y += snd-soc-ux500-platform.o
+endif
+
+ifdef CONFIG_SND_SOC_UX500_AB8500
+snd-soc-ux500-machine-objs += ux500_ab8500.o
+endif
+
+ifdef CONFIG_SND_SOC_UX500_AV8100
+snd-soc-ux500-machine-objs += ux500_av8100.o
+endif
+
+ifdef CONFIG_SND_SOC_UX500_CG29XX
+snd-soc-ux500-machine-objs += ux500_cg29xx.o
+endif
+
+ifdef CONFIG_SND_SOC_UX500_AB5500
+snd-soc-ux500-machine-objs += ux500_ab5500.o
+endif
+
+obj-y += snd-soc-ux500-machine.o
+
+ifdef CONFIG_UX500_SOC_DB8500
+snd-soc-u8500-objs := u8500.o
+obj-y += snd-soc-u8500.o
+endif
+
+ifdef CONFIG_UX500_SOC_DB5500
+snd-soc-u5500-objs := u5500.o
+obj-y += snd-soc-u5500.o
+endif
+
diff --git a/sound/soc/ux500/u5500.c b/sound/soc/ux500/u5500.c
new file mode 100755
index 00000000000..6787daa9de5
--- /dev/null
+++ b/sound/soc/ux500/u5500.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Xie Xiaolei (xie.xiaolei@stericsson.com)
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+
+#include "ux500_pcm.h"
+#include "ux500_msp_dai.h"
+
+#include <linux/spi/spi.h>
+#include <sound/initval.h>
+
+#ifdef CONFIG_SND_SOC_UX500_AB5500
+#include "ux500_ab5500.h"
+#endif
+
+#ifdef CONFIG_SND_SOC_UX500_AV8100
+#include "ux500_av8100.h"
+#endif
+#ifdef CONFIG_SND_SOC_UX500_CG29XX
+#include "ux500_cg29xx.h"
+#endif
+static struct platform_device *u5500_platform_dev;
+/* Create dummy devices for platform drivers */
+
+static struct platform_device ux500_pcm = {
+ .name = "ux500-pcm",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+
+#ifdef CONFIG_SND_SOC_UX500_AV8100
+static struct platform_device av8100_codec = {
+ .name = "av8100-codec",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+#endif
+
+#ifdef CONFIG_SND_SOC_UX500_CG29XX
+static struct platform_device cg29xx_codec = {
+ .name = "cg29xx-codec",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+#endif
+/* Define the whole U5500 soundcard, linking platform to the codec-drivers */
+struct snd_soc_dai_link u5500_dai_links[] = {
+ {
+ .name = "ab5500_0",
+ .stream_name = "ab5500_0",
+ .cpu_dai_name = "ux500-msp-i2s.0",
+ .codec_dai_name = "ab5500-codec-dai.0",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab5500-codec.0",
+ .init = ux500_ab5500_machine_codec_init,
+ .ops = (struct snd_soc_ops[]) {
+ {
+ .startup = ux500_ab5500_startup,
+ .shutdown = ux500_ab5500_shutdown,
+ .hw_params = ux500_ab5500_hw_params,
+ }
+ }
+ },
+ #ifdef CONFIG_SND_SOC_UX500_CG29XX
+ {
+ .name = "cg29xx_0",
+ .stream_name = "cg29xx_0",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "cg29xx-codec-dai.0",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "cg29xx-codec.0",
+ .init = NULL,
+ .ops = u5500_cg29xx_ops,
+ },
+ {
+ .name = "cg29xx_1",
+ .stream_name = "cg29xx_1",
+ .cpu_dai_name = "ux500-msp-i2s.0",
+ .codec_dai_name = "cg29xx-codec-dai.1",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "cg29xx-codec.0",
+ .init = NULL,
+ .ops = u5500_cg29xx_ops,
+ },
+ #endif
+ {
+ .name = "ab5500_1",
+ .stream_name = "ab5500_1",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "ab5500-codec-dai.1",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab5500-codec.0",
+ .init = ux500_ab5500_machine_codec_init,
+ .ops = (struct snd_soc_ops[]) {
+ {
+ .startup = ux500_ab5500_startup,
+ .shutdown = ux500_ab5500_shutdown,
+ .hw_params = ux500_ab5500_hw_params,
+ }
+ }
+ },
+ #ifdef CONFIG_SND_SOC_UX500_AV8100
+ {
+ .name = "hdmi",
+ .stream_name = "hdmi",
+ .cpu_dai_name = "ux500-msp-i2s.2",
+ .codec_dai_name = "av8100-codec-dai",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "av8100-codec.0",
+ .init = NULL,
+ .ops = ux500_av8100_ops,
+ },
+ #endif
+};
+
+static struct snd_soc_card u5500_drvdata = {
+ .name = "U5500-card",
+ .probe = NULL,
+ .dai_link = u5500_dai_links,
+ .num_links = ARRAY_SIZE(u5500_dai_links),
+};
+
+static int __init u5500_soc_init(void)
+{
+ int ret = 0;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ if (!machine_is_u5500())
+ return 0;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ #ifdef CONFIG_SND_SOC_UX500_AV8100
+ pr_debug("%s: Register device to generate a probe for AV8100 codec.\n",
+ __func__);
+ platform_device_register(&av8100_codec);
+ #endif
+
+ #ifdef CONFIG_SND_SOC_UX500_CG29XX
+ pr_debug("%s: Register device to generate a probe for CG29xx codec.\n",
+ __func__);
+ platform_device_register(&cg29xx_codec);
+ #endif
+ pr_debug("%s: Register device to generate a probe for Ux500-pcm platform.\n",
+ __func__);
+ platform_device_register(&ux500_pcm);
+
+ u5500_platform_dev = platform_device_alloc("soc-audio", -1);
+ if (!u5500_platform_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(u5500_platform_dev, &u5500_drvdata);
+ u5500_drvdata.dev = &u5500_platform_dev->dev;
+
+ ret = platform_device_add(u5500_platform_dev);
+ if (ret) {
+ pr_err("%s: Error: Failed to add platform device (%s).\n",
+ __func__,
+ u5500_drvdata.name);
+ platform_device_put(u5500_platform_dev);
+ }
+
+ return ret;
+}
+
+static void __exit u5500_soc_exit(void)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ platform_device_unregister(u5500_platform_dev);
+}
+
+module_init(u5500_soc_init);
+module_exit(u5500_soc_exit);
+
+MODULE_LICENSE("GPLv2");
diff --git a/sound/soc/ux500/u8500.c b/sound/soc/ux500/u8500.c
new file mode 100644
index 00000000000..516008fcc6a
--- /dev/null
+++ b/sound/soc/ux500/u8500.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja (ola.o.lilja@stericsson.com)
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+
+#include "ux500_pcm.h"
+#include "ux500_msp_dai.h"
+
+#include <linux/spi/spi.h>
+#include <sound/initval.h>
+
+#ifdef CONFIG_SND_SOC_UX500_AB3550
+#include "ux500_ab3550.h"
+#endif
+
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+#include <sound/ux500_ab8500.h>
+#endif
+
+#ifdef CONFIG_SND_SOC_UX500_AV8100
+#include "ux500_av8100.h"
+#endif
+
+#ifdef CONFIG_SND_SOC_UX500_CG29XX
+#include "ux500_cg29xx.h"
+#endif
+
+
+static struct platform_device *u8500_platform_dev;
+
+/* Create dummy devices for platform drivers */
+
+static struct platform_device ux500_pcm = {
+ .name = "ux500-pcm",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+
+#ifdef CONFIG_SND_SOC_UX500_AV8100
+static struct platform_device av8100_codec = {
+ .name = "av8100-codec",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+#endif
+
+#ifdef CONFIG_SND_SOC_UX500_CG29XX
+static struct platform_device cg29xx_codec = {
+ .name = "cg29xx-codec",
+ .id = 0,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+#endif
+
+/* Define the whole U8500 soundcard, linking platform to the codec-drivers */
+struct snd_soc_dai_link u8500_dai_links[] = {
+ #ifdef CONFIG_SND_SOC_UX500_AV8100
+ {
+ .name = "hdmi",
+ .stream_name = "hdmi",
+ .cpu_dai_name = "ux500-msp-i2s.2",
+ .codec_dai_name = "av8100-codec-dai",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "av8100-codec.0",
+ .init = NULL,
+ .ops = ux500_av8100_ops,
+ },
+ #endif
+ #ifdef CONFIG_SND_SOC_UX500_AB3550
+ {
+ .name = "ab3550_0",
+ .stream_name = "ab3550_0",
+ .cpu_dai_name = "ux500-msp-i2s.0",
+ .codec_dai_name = "ab3550-codec-dai.0",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab3550-codec.11",
+ .init = NULL,
+ .ops = ux500_ab3550_ops,
+ },
+ {
+ .name = "ab3550_1",
+ .stream_name = "ab3550_1",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "ab3550-codec-dai.1",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab3550-codec.11",
+ .init = NULL,
+ .ops = ux500_ab3550_ops,
+ },
+ #endif
+ #ifdef CONFIG_SND_SOC_UX500_AB8500
+ {
+ .name = "ab8500_0",
+ .stream_name = "ab8500_0",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "ab8500-codec-dai.0",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab8500-codec.0",
+ .init = ux500_ab8500_machine_codec_init,
+ .ops = ux500_ab8500_ops,
+ },
+ {
+ .name = "ab8500_1",
+ .stream_name = "ab8500_1",
+ .cpu_dai_name = "ux500-msp-i2s.3",
+ .codec_dai_name = "ab8500-codec-dai.1",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab8500-codec.0",
+ .init = NULL,
+ .ops = ux500_ab8500_ops,
+ },
+ #endif
+ #ifdef CONFIG_SND_SOC_UX500_CG29XX
+ {
+ .name = "cg29xx_0",
+ .stream_name = "cg29xx_0",
+ .cpu_dai_name = "ux500-msp-i2s.0",
+ .codec_dai_name = "cg29xx-codec-dai.1",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "cg29xx-codec.0",
+ .init = NULL,
+ .ops = ux500_cg29xx_ops,
+ },
+ #endif
+};
+
+static struct snd_soc_card u8500_drvdata = {
+ .name = "U8500-card",
+ .probe = NULL,
+ .dai_link = u8500_dai_links,
+ .num_links = ARRAY_SIZE(u8500_dai_links),
+};
+
+static int __init u8500_soc_init(void)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ if (machine_is_u5500())
+ return 0;
+
+ #ifdef CONFIG_SND_SOC_UX500_AV8100
+ pr_debug("%s: Register device to generate a probe for AV8100 codec.\n",
+ __func__);
+ platform_device_register(&av8100_codec);
+ #endif
+
+ #ifdef CONFIG_SND_SOC_UX500_CG29XX
+ pr_debug("%s: Register device to generate a probe for CG29xx codec.\n",
+ __func__);
+ platform_device_register(&cg29xx_codec);
+ #endif
+
+ #ifdef CONFIG_SND_SOC_UX500_AB8500
+ pr_debug("%s: Calling init-function for AB8500 machine driver.\n",
+ __func__);
+ ret = ux500_ab8500_soc_machine_drv_init();
+ if (ret)
+ pr_err("%s: ux500_ab8500_soc_machine_drv_init failed (%d).\n",
+ __func__, ret);
+ #endif
+
+ pr_debug("%s: Register device to generate a probe for Ux500-pcm platform.\n",
+ __func__);
+ platform_device_register(&ux500_pcm);
+
+ pr_debug("%s: Allocate platform device 'soc-audio'.\n",
+ __func__);
+ u8500_platform_dev = platform_device_alloc("soc-audio", -1);
+ if (!u8500_platform_dev)
+ return -ENOMEM;
+
+ pr_debug("%s: Card %s: num_links = %d\n",
+ __func__,
+ u8500_drvdata.name,
+ u8500_drvdata.num_links);
+ pr_debug("%s: Card %s: DAI-link 0: name = %s\n",
+ __func__,
+ u8500_drvdata.name,
+ u8500_drvdata.dai_link[0].name);
+ pr_debug("%s: Card %s: DAI-link 0: stream_name = %s\n",
+ __func__,
+ u8500_drvdata.name,
+ u8500_drvdata.dai_link[0].stream_name);
+
+ pr_debug("%s: Card %s: Set platform drvdata.\n",
+ __func__,
+ u8500_drvdata.name);
+ platform_set_drvdata(u8500_platform_dev, &u8500_drvdata);
+ u8500_drvdata.dev = &u8500_platform_dev->dev;
+
+ pr_debug("%s: Card %s: Add platform device.\n",
+ __func__,
+ u8500_drvdata.name);
+ ret = platform_device_add(u8500_platform_dev);
+ if (ret) {
+ pr_err("%s: Error: Failed to add platform device (%s).\n",
+ __func__,
+ u8500_drvdata.name);
+ platform_device_put(u8500_platform_dev);
+ }
+
+ return ret;
+}
+
+static void __exit u8500_soc_exit(void)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ #ifdef CONFIG_SND_SOC_UX500_AB8500
+ pr_debug("%s: Calling exit-function for AB8500 machine driver.\n",
+ __func__);
+ ux500_ab8500_soc_machine_drv_cleanup();
+ #endif
+
+ pr_debug("%s: Unregister platform device (%s).\n",
+ __func__,
+ u8500_drvdata.name);
+ platform_device_unregister(u8500_platform_dev);
+}
+
+module_init(u8500_soc_init);
+module_exit(u8500_soc_exit);
+
+MODULE_LICENSE("GPLv2");
diff --git a/sound/soc/ux500/ux500_ab3550.c b/sound/soc/ux500/ux500_ab3550.c
new file mode 100644
index 00000000000..7e144c0e4d2
--- /dev/null
+++ b/sound/soc/ux500/ux500_ab3550.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja ola.o.lilja@stericsson.com,
+ * Roger Nilsson roger.xr.nilsson@stericsson.com
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <sound/soc.h>
+#include "../codecs/ab3550.h"
+
+static int ux500_ab3550_startup(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ return 0;
+}
+
+static void ux500_ab3550_shutdown(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s: Enter.\n", __func__);
+}
+
+static int ux500_ab3550_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+
+ int channels = params_channels(params);
+
+ pr_debug("%s: Enter.\n", __func__);
+ pr_debug("%s: substream->pcm->name = %s.\n", __func__, substream->pcm->name);
+ pr_debug("%s: substream->pcm->id = %s.\n", __func__, substream->pcm->id);
+ pr_debug("%s: substream->name = %s.\n", __func__, substream->name);
+ pr_debug("%s: substream->number = %d.\n", __func__, substream->number);
+ pr_debug("%s: channels = %d.\n", __func__, channels);
+ pr_debug("%s: DAI-index (Codec): %d\n", __func__, codec_dai->id);
+ pr_debug("%s: DAI-index (Platform): %d\n", __func__, cpu_dai->id);
+
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0) {
+ pr_debug("%s: snd_soc_dai_set_fmt failed with %d.\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0) {
+ pr_debug("%s: snd_soc_dai_set_fmt failed with %d.\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+struct snd_soc_ops ux500_ab3550_ops[] = {
+ {
+ .startup = ux500_ab3550_startup,
+ .shutdown = ux500_ab3550_shutdown,
+ .hw_params = ux500_ab3550_hw_params,
+ }
+};
diff --git a/sound/soc/ux500/ux500_ab3550.h b/sound/soc/ux500/ux500_ab3550.h
new file mode 100644
index 00000000000..53ea3902d36
--- /dev/null
+++ b/sound/soc/ux500/ux500_ab3550.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_AB3550_H
+#define UX500_AB3550_H
+
+extern struct snd_soc_ops ux500_ab3550_ops[];
+
+#endif
diff --git a/sound/soc/ux500/ux500_ab5500.c b/sound/soc/ux500/ux500_ab5500.c
new file mode 100755
index 00000000000..3a1dab0a990
--- /dev/null
+++ b/sound/soc/ux500/ux500_ab5500.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Ola Lilja ola.o.lilja@stericsson.com,
+ * Roger Nilsson roger.xr.nilsson@stericsson.com
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <sound/soc.h>
+#include <linux/clk.h>
+#include "../codecs/ab5500.h"
+#include "ux500_msp_dai.h"
+
+/* For a workwround purpose we enable sysclk
+ by default this will be changed later */
+static unsigned int sysclk_state = 1;/* Enabled */
+static struct clk *ux500_ab5500_sysclk;
+
+static int sysclk_input_select_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ return 0;
+}
+
+static int sysclk_input_select_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = sysclk_state;
+ return 0;
+}
+
+static int sysclk_input_select_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ sysclk_state = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static const struct snd_kcontrol_new sysclk_input_select_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sysclk Input Select",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = sysclk_input_select_control_info,
+ .get = sysclk_input_select_control_get,
+ .put = sysclk_input_select_control_put
+};
+
+int ux500_ab5500_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ int ret = 0;
+
+ if (sysclk_state == 1) {
+ ret = clk_enable(ux500_ab5500_sysclk);
+ if (ret)
+ dev_err(codec->dev, "failed to enable clock %d\n", ret);
+ }
+
+ return ret;
+}
+
+void ux500_ab5500_shutdown(struct snd_pcm_substream *substream)
+{
+ pr_info("%s: Enter.\n", __func__);
+ if (sysclk_state == 1)
+ clk_disable(ux500_ab5500_sysclk);
+}
+
+int ux500_ab5500_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+
+ int channels = params_channels(params);
+
+ printk(KERN_DEBUG "%s: Enter.\n", __func__);
+ printk(KERN_DEBUG "%s: substream->pcm->name = %s.\n", __func__, substream->pcm->name);
+ printk(KERN_DEBUG "%s: substream->pcm->id = %s.\n", __func__, substream->pcm->id);
+ printk(KERN_DEBUG "%s: substream->name = %s.\n", __func__, substream->name);
+ printk(KERN_DEBUG "%s: substream->number = %d.\n", __func__, substream->number);
+ printk(KERN_DEBUG "%s: channels = %d.\n", __func__, channels);
+ printk(KERN_DEBUG "%s: DAI-index (Codec): %d\n", __func__, codec_dai->id);
+ printk(KERN_DEBUG "%s: DAI-index (Platform): %d\n", __func__, cpu_dai->id);
+
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_NF);
+ if (ret < 0)
+ return ret;
+ ux500_msp_dai_set_data_delay(cpu_dai, MSP_DELAY_1);
+
+ return ret;
+}
+
+int ux500_ab5500_machine_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ int ret = 0;
+
+ snd_ctl_add(codec->card->snd_card,
+ snd_ctl_new1(&sysclk_input_select_control, codec));
+
+ ux500_ab5500_sysclk = clk_get(codec->dev, "sysclk");
+ if (IS_ERR(ux500_ab5500_sysclk)) {
+ dev_err(codec->dev, "could not get sysclk %ld\n",
+ PTR_ERR(ux500_ab5500_sysclk));
+ ret = PTR_ERR(ux500_ab5500_sysclk);
+ }
+
+ return ret;
+}
diff --git a/sound/soc/ux500/ux500_ab5500.h b/sound/soc/ux500/ux500_ab5500.h
new file mode 100755
index 00000000000..8a9be4b98d0
--- /dev/null
+++ b/sound/soc/ux500/ux500_ab5500.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Xie Xiaolei (xie.xiaolei@stericsson.com)
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_AB5500_H
+#define UX500_AB5500_H
+
+struct snd_soc_pcm_runtime;
+
+int ux500_ab5500_startup(struct snd_pcm_substream *substream);
+
+void ux500_ab5500_shutdown(struct snd_pcm_substream *substream);
+
+int ux500_ab5500_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+
+int ux500_ab5500_machine_codec_init(struct snd_soc_pcm_runtime *runtime);
+
+#endif
diff --git a/sound/soc/ux500/ux500_ab8500.c b/sound/soc/ux500/ux500_ab8500.c
new file mode 100644
index 00000000000..452564f3e5c
--- /dev/null
+++ b/sound/soc/ux500/ux500_ab8500.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Mikko J. Lehto <mikko.lehto@symbio.com>,
+ * Mikko Sarmanne <mikko.sarmanne@symbio.com>,
+ * Jarmo K. Kuronen <jarmo.kuronen@symbio.com>.
+ * Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Kristoffer Karlsson <kristoffer.karlsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/jack.h>
+#include <sound/pcm_params.h>
+#include <sound/soc-dapm.h>
+#include <mach/hardware.h>
+#include "ux500_pcm.h"
+#include "ux500_msp_dai.h"
+#include "../codecs/ab8500_audio.h"
+
+#define TX_SLOT_MONO 0x0008
+#define TX_SLOT_STEREO 0x000a
+#define RX_SLOT_MONO 0x0001
+#define RX_SLOT_STEREO 0x0003
+#define TX_SLOT_8CH 0x00FF
+#define RX_SLOT_8CH 0x00FF
+
+#define DEF_TX_SLOTS TX_SLOT_STEREO
+#define DEF_RX_SLOTS RX_SLOT_MONO
+
+#define DRIVERMODE_NORMAL 0
+#define DRIVERMODE_CODEC_ONLY 1
+
+static struct snd_soc_jack jack;
+
+/* Power-control */
+static DEFINE_MUTEX(power_lock);
+static int ab8500_power_count;
+
+/* ADCM-control */
+static DEFINE_MUTEX(adcm_lock);
+#define GPADC_MIN_DELTA_DELAY 500
+#define GPADC_MAX_DELTA_DELAY 1000
+#define GPADC_MAX_VOLT_DIFF 20
+#define GPADC_MAX_ITERATIONS 4
+
+/* Clocks */
+/* audioclk -> intclk -> sysclk/ulpclk */
+static int master_clock_sel;
+static struct clk *clk_ptr_audioclk;
+static struct clk *clk_ptr_intclk;
+static struct clk *clk_ptr_sysclk;
+static struct clk *clk_ptr_ulpclk;
+static struct clk *clk_ptr_gpio1;
+
+static const char * const enum_mclk[] = {
+ "SYSCLK",
+ "ULPCLK"
+};
+static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_mclk, enum_mclk);
+
+/* ANC States */
+static const char * const enum_anc_state[] = {
+ "Unconfigured",
+ "Configure FIR+IIR",
+ "FIR+IIR Configured",
+ "Configure FIR",
+ "FIR Configured",
+ "Configure IIR",
+ "IIR Configured",
+ "Error"
+};
+static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_ancstate, enum_anc_state);
+
+/* Regulators */
+enum regulator_idx {
+ REGULATOR_AUDIO,
+ REGULATOR_DMIC,
+ REGULATOR_AMIC1,
+ REGULATOR_AMIC2
+};
+static struct regulator_bulk_data reg_info[4] = {
+ { .consumer = NULL, .supply = "v-audio" },
+ { .consumer = NULL, .supply = "v-dmic" },
+ { .consumer = NULL, .supply = "v-amic1" },
+ { .consumer = NULL, .supply = "v-amic2" }
+};
+static bool reg_enabled[4] = {
+ false,
+ false,
+ false,
+ false
+};
+static int reg_claim[4];
+enum amic_idx { AMIC_1A, AMIC_1B, AMIC_2 };
+struct amic_conf {
+ enum regulator_idx reg_id;
+ bool enabled;
+ char *name;
+};
+static struct amic_conf amic_info[3] = {
+ { REGULATOR_AMIC1, false, "amic1a" },
+ { REGULATOR_AMIC1, false, "amic1b" },
+ { REGULATOR_AMIC2, false, "amic2" }
+};
+static DEFINE_MUTEX(amic_conf_lock);
+
+static const char *enum_amic_reg_conf[2] = { "v-amic1", "v-amic2" };
+static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_amicconf, enum_amic_reg_conf);
+
+/* Slot configuration */
+static unsigned int tx_slots = DEF_TX_SLOTS;
+static unsigned int rx_slots = DEF_RX_SLOTS;
+
+/* Regulators */
+
+static int enable_regulator(enum regulator_idx idx)
+{
+ int ret;
+
+ if (reg_info[idx].consumer == NULL) {
+ pr_err("%s: Failure to enable regulator '%s'\n",
+ __func__, reg_info[idx].supply);
+ return -EIO;
+ }
+
+ if (reg_enabled[idx])
+ return 0;
+
+ ret = regulator_enable(reg_info[idx].consumer);
+ if (ret != 0) {
+ pr_err("%s: Failure to enable regulator '%s' (ret = %d)\n",
+ __func__, reg_info[idx].supply, ret);
+ return -EIO;
+ };
+
+ reg_enabled[idx] = true;
+ pr_debug("%s: Enabled regulator '%s', status: %d, %d, %d, %d\n",
+ __func__,
+ reg_info[idx].supply,
+ (int)reg_enabled[0],
+ (int)reg_enabled[1],
+ (int)reg_enabled[2],
+ (int)reg_enabled[3]);
+ return 0;
+}
+
+static void disable_regulator(enum regulator_idx idx)
+{
+ if (reg_info[idx].consumer == NULL) {
+ pr_err("%s: Failure to disable regulator '%s'\n",
+ __func__, reg_info[idx].supply);
+ return;
+ }
+
+ if (!reg_enabled[idx])
+ return;
+
+ regulator_disable(reg_info[idx].consumer);
+
+ reg_enabled[idx] = false;
+ pr_debug("%s: Disabled regulator '%s', status: %d, %d, %d, %d\n",
+ __func__,
+ reg_info[idx].supply,
+ (int)reg_enabled[0],
+ (int)reg_enabled[1],
+ (int)reg_enabled[2],
+ (int)reg_enabled[3]);
+}
+
+static int create_regulators(void)
+{
+ int i, status = 0;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(reg_info); ++i)
+ reg_info[i].consumer = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(reg_info); ++i) {
+ reg_info[i].consumer = regulator_get(NULL, reg_info[i].supply);
+ if (IS_ERR(reg_info[i].consumer)) {
+ status = PTR_ERR(reg_info[i].consumer);
+ pr_err("%s: ERROR: Failed to get regulator '%s' (ret = %d)!\n",
+ __func__, reg_info[i].supply, status);
+ reg_info[i].consumer = NULL;
+ goto err_get;
+ }
+ }
+
+ return 0;
+
+err_get:
+
+ for (i = 0; i < ARRAY_SIZE(reg_info); ++i) {
+ if (reg_info[i].consumer) {
+ regulator_put(reg_info[i].consumer);
+ reg_info[i].consumer = NULL;
+ }
+ }
+
+ return status;
+}
+
+static int claim_amic_regulator(enum amic_idx amic_id)
+{
+ enum regulator_idx reg_id = amic_info[amic_id].reg_id;
+ int ret = 0;
+
+ reg_claim[reg_id]++;
+ if (reg_claim[reg_id] > 1)
+ goto cleanup;
+
+ ret = enable_regulator(reg_id);
+ if (ret < 0) {
+ pr_err("%s: Failed to claim %s for %s (ret = %d)!",
+ __func__, reg_info[reg_id].supply,
+ amic_info[amic_id].name, ret);
+ reg_claim[reg_id]--;
+ }
+
+cleanup:
+ amic_info[amic_id].enabled = (ret == 0);
+
+ return ret;
+}
+
+static void release_amic_regulator(enum amic_idx amic_id)
+{
+ enum regulator_idx reg_id = amic_info[amic_id].reg_id;
+
+ reg_claim[reg_id]--;
+ if (reg_claim[reg_id] <= 0) {
+ disable_regulator(reg_id);
+ reg_claim[reg_id] = 0;
+ }
+
+ amic_info[amic_id].enabled = false;
+}
+
+/* Power/clock control */
+
+static int ux500_ab8500_power_control_inc(void)
+{
+ int ret = 0;
+
+ mutex_lock(&power_lock);
+
+ ab8500_power_count++;
+ pr_debug("%s: ab8500_power_count changed from %d to %d",
+ __func__,
+ ab8500_power_count-1,
+ ab8500_power_count);
+
+ if (ab8500_power_count == 1) {
+ /* Turn on audio-regulator */
+ ret = enable_regulator(REGULATOR_AUDIO);
+ if (ret < 0)
+ goto out;
+
+ /* Enable audio-clock */
+ ret = clk_set_parent(clk_ptr_intclk,
+ (master_clock_sel == 0) ? clk_ptr_sysclk : clk_ptr_ulpclk);
+ if (ret != 0) {
+ pr_err("%s: ERROR: Setting master-clock to %s failed (ret = %d)!",
+ __func__,
+ (master_clock_sel == 0) ? "SYSCLK" : "ULPCLK",
+ ret);
+ ret = -EIO;
+ goto clk_err;
+ }
+ pr_debug("%s: Enabling master-clock (%s).",
+ __func__,
+ (master_clock_sel == 0) ? "SYSCLK" : "ULPCLK");
+ ret = clk_enable(clk_ptr_audioclk);
+ if (ret != 0) {
+ pr_err("%s: ERROR: clk_enable failed (ret = %d)!", __func__, ret);
+ ret = -EIO;
+ ab8500_power_count = 0;
+ goto clk_err;
+ }
+
+ /* Power on audio-parts of AB8500 */
+ ret = ab8500_audio_power_control(true);
+ }
+
+ goto out;
+
+clk_err:
+ disable_regulator(REGULATOR_AUDIO);
+
+out:
+ mutex_unlock(&power_lock);
+
+ return ret;
+}
+
+static void ux500_ab8500_power_control_dec(void)
+{
+ mutex_lock(&power_lock);
+
+ ab8500_power_count--;
+
+ pr_debug("%s: ab8500_power_count changed from %d to %d",
+ __func__,
+ ab8500_power_count+1,
+ ab8500_power_count);
+
+ if (ab8500_power_count == 0) {
+ /* Power off audio-parts of AB8500 */
+ ab8500_audio_power_control(false);
+
+ /* Disable audio-clock */
+ pr_debug("%s: Disabling master-clock (%s).",
+ __func__,
+ (master_clock_sel == 0) ? "SYSCLK" : "ULPCLK");
+ clk_disable(clk_ptr_audioclk);
+
+ /* Turn off audio-regulator */
+ disable_regulator(REGULATOR_AUDIO);
+ }
+
+ mutex_unlock(&power_lock);
+}
+
+/* Controls - Non-DAPM Non-ASoC */
+
+static int mclk_input_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = master_clock_sel;
+
+ return 0;
+}
+
+static int mclk_input_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ unsigned int val;
+
+ val = (ucontrol->value.enumerated.item[0] != 0);
+ if (master_clock_sel == val)
+ return 0;
+
+ master_clock_sel = val;
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new mclk_input_control = \
+ SOC_ENUM_EXT("Master Clock Select", soc_enum_mclk,
+ mclk_input_control_get, mclk_input_control_put);
+
+static int anc_status_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = ab8500_audio_anc_status();
+
+ return 0;
+}
+
+static int anc_status_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int req_state = ucontrol->value.integer.value[0];
+
+ int ret = ux500_ab8500_power_control_inc();
+ if (ret < 0)
+ goto cleanup;
+
+ ret = ab8500_audio_anc_configure(req_state);
+
+ ux500_ab8500_power_control_dec();
+
+cleanup:
+ if (ret < 0)
+ pr_err("%s: Unable to configure ANC! (ret = %d)\n",
+ __func__, ret);
+
+ return (ret < 0) ? 0 : 1;
+}
+
+static const struct snd_kcontrol_new anc_status_control = \
+ SOC_ENUM_EXT("ANC Status", soc_enum_ancstate,
+ anc_status_control_get, anc_status_control_put);
+
+static int amic_reg_control_get(struct snd_ctl_elem_value *ucontrol,
+ enum amic_idx amic_id)
+{
+ ucontrol->value.integer.value[0] =
+ (amic_info[amic_id].reg_id == REGULATOR_AMIC2);
+
+ return 0;
+}
+
+static int amic_reg_control_put(struct snd_ctl_elem_value *ucontrol,
+ enum amic_idx amic_id)
+{
+ enum regulator_idx old_reg_id, new_reg_id;
+ int ret = 0;
+
+ if (ucontrol->value.integer.value[0] == 0)
+ new_reg_id = REGULATOR_AMIC1;
+ else
+ new_reg_id = REGULATOR_AMIC2;
+
+ mutex_lock(&amic_conf_lock);
+
+ old_reg_id = amic_info[amic_id].reg_id;
+ if (old_reg_id == new_reg_id)
+ goto cleanup;
+
+ if (!amic_info[amic_id].enabled) {
+ amic_info[amic_id].reg_id = new_reg_id;
+ goto cleanup;
+ }
+
+ release_amic_regulator(amic_id);
+ amic_info[amic_id].reg_id = new_reg_id;
+ ret = claim_amic_regulator(amic_id);
+
+cleanup:
+ mutex_unlock(&amic_conf_lock);
+
+ return (ret < 0) ? 0 : 1;
+}
+
+static int amic1a_reg_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return amic_reg_control_get(ucontrol, AMIC_1A);
+}
+
+static int amic1a_reg_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return amic_reg_control_put(ucontrol, AMIC_1A);
+}
+
+static int amic1b_reg_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return amic_reg_control_get(ucontrol, AMIC_1B);
+}
+
+static int amic1b_reg_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return amic_reg_control_put(ucontrol, AMIC_1B);
+}
+
+static int amic2_reg_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return amic_reg_control_get(ucontrol, AMIC_2);
+}
+
+static int amic2_reg_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return amic_reg_control_put(ucontrol, AMIC_2);
+}
+
+static const struct snd_kcontrol_new mic1a_regulator_control = \
+ SOC_ENUM_EXT("Mic 1A Regulator", soc_enum_amicconf,
+ amic1a_reg_control_get, amic1a_reg_control_put);
+static const struct snd_kcontrol_new mic1b_regulator_control = \
+ SOC_ENUM_EXT("Mic 1B Regulator", soc_enum_amicconf,
+ amic1b_reg_control_get, amic1b_reg_control_put);
+static const struct snd_kcontrol_new mic2_regulator_control = \
+ SOC_ENUM_EXT("Mic 2 Regulator", soc_enum_amicconf,
+ amic2_reg_control_get, amic2_reg_control_put);
+
+/* DAPM-events */
+
+static int dapm_audioreg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ux500_ab8500_power_control_inc();
+ else
+ ux500_ab8500_power_control_dec();
+
+ return 0;
+}
+
+static int dapm_amicreg_event(enum amic_idx amic_id, int event)
+{
+ int ret = 0;
+
+ mutex_lock(&amic_conf_lock);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = claim_amic_regulator(amic_id);
+ else if (amic_info[amic_id].enabled)
+ release_amic_regulator(amic_id);
+
+ mutex_unlock(&amic_conf_lock);
+
+ return ret;
+}
+
+static int dapm_amic1areg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ return dapm_amicreg_event(AMIC_1A, event);
+}
+
+static int dapm_amic1breg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ return dapm_amicreg_event(AMIC_1B, event);
+}
+
+static int dapm_amic2reg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ return dapm_amicreg_event(AMIC_2, event);
+}
+
+static int dapm_dmicreg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ int ret = 0;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = enable_regulator(REGULATOR_DMIC);
+ else
+ disable_regulator(REGULATOR_DMIC);
+
+ return ret;
+}
+
+/* DAPM-widgets */
+
+static const struct snd_soc_dapm_widget ux500_ab8500_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("AUDIO Regulator",
+ SND_SOC_NOPM, 0, 0, dapm_audioreg_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("AMIC1A Regulator",
+ SND_SOC_NOPM, 0, 0, dapm_amic1areg_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("AMIC1B Regulator",
+ SND_SOC_NOPM, 0, 0, dapm_amic1breg_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("AMIC2 Regulator",
+ SND_SOC_NOPM, 0, 0, dapm_amic2reg_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("DMIC Regulator",
+ SND_SOC_NOPM, 0, 0, dapm_dmicreg_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+/* DAPM-routes */
+
+static const struct snd_soc_dapm_route ux500_ab8500_dapm_intercon[] = {
+
+ /* Power AB8500 audio-block when AD/DA is active */
+ {"DAC", NULL, "AUDIO Regulator"},
+ {"ADC", NULL, "AUDIO Regulator"},
+
+ /* Power configured regulator when an analog mic is enabled */
+ {"MIC1A Input", NULL, "AMIC1A Regulator"},
+ {"MIC1B Input", NULL, "AMIC1B Regulator"},
+ {"MIC2 Input", NULL, "AMIC2 Regulator"},
+
+ /* Power DMIC-regulator when any digital mic is enabled */
+ {"DMic 1", NULL, "DMIC Regulator"},
+ {"DMic 2", NULL, "DMIC Regulator"},
+ {"DMic 3", NULL, "DMIC Regulator"},
+ {"DMic 4", NULL, "DMIC Regulator"},
+ {"DMic 5", NULL, "DMIC Regulator"},
+ {"DMic 6", NULL, "DMIC Regulator"},
+};
+
+
+static int add_widgets(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&codec->dapm,
+ ux500_ab8500_dapm_widgets,
+ ARRAY_SIZE(ux500_ab8500_dapm_widgets));
+ if (ret < 0) {
+ pr_err("%s: Failed to create DAPM controls (%d).\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&codec->dapm,
+ ux500_ab8500_dapm_intercon,
+ ARRAY_SIZE(ux500_ab8500_dapm_intercon));
+ if (ret < 0) {
+ pr_err("%s: Failed to add DAPM routes (%d).\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ASoC */
+
+int ux500_ab8500_startup(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ pr_debug("%s: Enter\n", __func__);
+
+ /* Enable gpio.1-clock (needed by DSP in burst mode) */
+ ret = clk_enable(clk_ptr_gpio1);
+ if (ret) {
+ pr_err("%s: ERROR: clk_enable(gpio.1) failed (ret = %d)!", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ux500_ab8500_shutdown(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s: Enter\n", __func__);
+
+ /* Reset slots configuration to default(s) */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tx_slots = DEF_TX_SLOTS;
+ else
+ rx_slots = DEF_RX_SLOTS;
+
+ clk_disable(clk_ptr_gpio1);
+}
+
+int ux500_ab8500_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ unsigned int fmt, fmt_if1;
+ int channels, ret = 0, slots, slot_width, driver_mode;
+ bool streamIsPlayback;
+
+ pr_debug("%s: Enter\n", __func__);
+
+ pr_debug("%s: substream->pcm->name = %s\n"
+ "substream->pcm->id = %s.\n"
+ "substream->name = %s.\n"
+ "substream->number = %d.\n",
+ __func__,
+ substream->pcm->name,
+ substream->pcm->id,
+ substream->name,
+ substream->number);
+
+ channels = params_channels(params);
+
+ /* Setup codec depending on driver-mode */
+ driver_mode = (channels == 8) ?
+ DRIVERMODE_CODEC_ONLY : DRIVERMODE_NORMAL;
+ pr_debug("%s: Driver-mode: %s.\n",
+ __func__,
+ (driver_mode == DRIVERMODE_NORMAL) ? "NORMAL" : "CODEC_ONLY");
+
+ ab8500_audio_set_bit_delay(codec_dai, 1);
+
+ if (driver_mode == DRIVERMODE_NORMAL) {
+ ab8500_audio_set_word_length(codec_dai, 16);
+ fmt = SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CONT;
+ } else {
+ ab8500_audio_set_word_length(codec_dai, 20);
+ fmt = SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_GATED;
+ }
+
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret < 0) {
+ pr_err("%s: ERROR: snd_soc_dai_set_fmt failed for codec_dai (ret = %d)!\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ if (ret < 0) {
+ pr_err("%s: ERROR: snd_soc_dai_set_fmt for cpu_dai (ret = %d)!\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ ux500_msp_dai_set_data_delay(cpu_dai, MSP_DELAY_1);
+
+ /* Setup TDM-slots */
+
+ streamIsPlayback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ switch (channels) {
+ case 1:
+ slots = 16;
+ slot_width = 16;
+ tx_slots = (streamIsPlayback) ? TX_SLOT_MONO : 0;
+ rx_slots = (streamIsPlayback) ? 0 : RX_SLOT_MONO;
+ break;
+ case 2:
+ slots = 16;
+ slot_width = 16;
+ tx_slots = (streamIsPlayback) ? TX_SLOT_STEREO : 0;
+ rx_slots = (streamIsPlayback) ? 0 : RX_SLOT_STEREO;
+ break;
+ case 8:
+ slots = 16;
+ slot_width = 16;
+ tx_slots = (streamIsPlayback) ? TX_SLOT_8CH : 0;
+ rx_slots = (streamIsPlayback) ? 0 : RX_SLOT_8CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("%s: CPU-DAI TDM: TX=0x%04X RX=0x%04x\n",
+ __func__, tx_slots, rx_slots);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, tx_slots, rx_slots, slots, slot_width);
+ if (ret)
+ return ret;
+
+ pr_debug("%s: CODEC-DAI TDM: TX=0x%04X RX=0x%04x\n",
+ __func__, tx_slots, rx_slots);
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_slots, rx_slots, slots, slot_width);
+ if (ret)
+ return ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pr_debug("%s: Setup IF1 for FM-radio.\n", __func__);
+ fmt_if1 = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_I2S;
+ ret = ab8500_audio_setup_if1(codec_dai->codec, fmt_if1, 16, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct snd_soc_ops ux500_ab8500_ops[] = {
+ {
+ .hw_params = ux500_ab8500_hw_params,
+ .startup = ux500_ab8500_startup,
+ .shutdown = ux500_ab8500_shutdown,
+ }
+};
+
+int ux500_ab8500_machine_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ int ret;
+
+ pr_debug("%s Enter.\n", __func__);
+
+ ret = snd_soc_jack_new(codec,
+ "AB8500 Hs Status",
+ SND_JACK_HEADPHONE |
+ SND_JACK_MICROPHONE |
+ SND_JACK_HEADSET |
+ SND_JACK_LINEOUT |
+ SND_JACK_MECHANICAL |
+ SND_JACK_VIDEOOUT,
+ &jack);
+ if (ret < 0) {
+ pr_err("%s: ERROR: Failed to create Jack (ret = %d)!\n", __func__, ret);
+ return ret;
+ }
+
+ /* Add controls */
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &mclk_input_control, codec));
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &anc_status_control, codec));
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &mic1a_regulator_control, codec));
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &mic1b_regulator_control, codec));
+ snd_ctl_add(codec->card->snd_card, snd_ctl_new1(
+ &mic2_regulator_control, codec));
+
+ /* Get references to clock-nodes */
+ clk_ptr_sysclk = NULL;
+ clk_ptr_ulpclk = NULL;
+ clk_ptr_intclk = NULL;
+ clk_ptr_audioclk = NULL;
+ clk_ptr_gpio1 = NULL;
+ clk_ptr_sysclk = clk_get(codec->dev, "sysclk");
+ if (IS_ERR(clk_ptr_sysclk)) {
+ pr_err("ERROR: clk_get failed (ret = %d)!", -EFAULT);
+ return -EFAULT;
+ }
+ clk_ptr_ulpclk = clk_get(codec->dev, "ulpclk");
+ if (IS_ERR(clk_ptr_ulpclk)) {
+ pr_err("ERROR: clk_get failed (ret = %d)!", -EFAULT);
+ return -EFAULT;
+ }
+ clk_ptr_intclk = clk_get(codec->dev, "intclk");
+ if (IS_ERR(clk_ptr_intclk)) {
+ pr_err("ERROR: clk_get failed (ret = %d)!", -EFAULT);
+ return -EFAULT;
+ }
+ clk_ptr_audioclk = clk_get(codec->dev, "audioclk");
+ if (IS_ERR(clk_ptr_audioclk)) {
+ pr_err("ERROR: clk_get failed (ret = %d)!", -EFAULT);
+ return -EFAULT;
+ }
+ clk_ptr_gpio1 = clk_get_sys("gpio.1", NULL);
+ if (IS_ERR(clk_ptr_gpio1)) {
+ pr_err("ERROR: clk_get_sys(gpio.1) failed (ret = %d)!", -EFAULT);
+ return -EFAULT;
+ }
+
+ /* Set intclk default parent to ulpclk */
+ ret = clk_set_parent(clk_ptr_intclk, clk_ptr_ulpclk);
+ if (ret) {
+ pr_err("%s: ERROR: Setting intclk parent to ulpclk failed (ret = %d)!",
+ __func__,
+ ret);
+ return -EFAULT;
+ }
+
+ master_clock_sel = 1;
+
+ ab8500_power_count = 0;
+
+ reg_claim[REGULATOR_AMIC1] = 0;
+ reg_claim[REGULATOR_AMIC2] = 0;
+
+ /* Add DAPM-widgets */
+ ret = add_widgets(codec);
+ if (ret < 0) {
+ pr_err("%s: Failed add widgets (%d).\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ux500_ab8500_soc_machine_drv_init(void)
+{
+ int status = 0;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ status = create_regulators();
+ if (status < 0) {
+ pr_err("%s: ERROR: Failed to instantiate regulators (ret = %d)!\n",
+ __func__, status);
+ return status;
+ }
+
+ return 0;
+}
+
+void ux500_ab8500_soc_machine_drv_cleanup(void)
+{
+ pr_debug("%s: Enter.\n", __func__);
+
+ regulator_bulk_free(ARRAY_SIZE(reg_info), reg_info);
+
+ if (clk_ptr_sysclk != NULL)
+ clk_put(clk_ptr_sysclk);
+ if (clk_ptr_ulpclk != NULL)
+ clk_put(clk_ptr_ulpclk);
+ if (clk_ptr_intclk != NULL)
+ clk_put(clk_ptr_intclk);
+ if (clk_ptr_audioclk != NULL)
+ clk_put(clk_ptr_audioclk);
+ if (clk_ptr_gpio1 != NULL)
+ clk_put(clk_ptr_gpio1);
+}
+
+/*
+ * Measures a relative stable voltage from spec. input on spec channel
+ */
+static int gpadc_convert_stable(struct ab8500_gpadc *gpadc,
+ u8 channel, int *value)
+{
+ int i = GPADC_MAX_ITERATIONS;
+ int mv1, mv2, dmv;
+
+ mv1 = ab8500_gpadc_convert(gpadc, channel);
+ do {
+ i--;
+ usleep_range(GPADC_MIN_DELTA_DELAY, GPADC_MAX_DELTA_DELAY);
+ mv2 = ab8500_gpadc_convert(gpadc, channel);
+ dmv = abs(mv2 - mv1);
+ mv1 = mv2;
+ } while (i > 0 && dmv > GPADC_MAX_VOLT_DIFF);
+
+ if (mv1 < 0 || dmv > GPADC_MAX_VOLT_DIFF)
+ return -EIO;
+
+ *value = mv1;
+
+ return 0;
+}
+
+/* Extended interface */
+
+int ux500_ab8500_audio_gpadc_measure(struct ab8500_gpadc *gpadc,
+ u8 channel, bool mode, int *value)
+{
+ int ret = 0;
+ int adcm = (mode) ?
+ AB8500_AUDIO_ADCM_FORCE_UP :
+ AB8500_AUDIO_ADCM_FORCE_DOWN;
+
+ mutex_lock(&adcm_lock);
+
+ ret = ux500_ab8500_power_control_inc();
+ if (ret < 0) {
+ pr_err("%s: ERROR: Failed to enable power (ret = %d)!\n",
+ __func__, ret);
+ goto power_failure;
+ }
+
+ ret = ab8500_audio_set_adcm(adcm);
+ if (ret < 0) {
+ pr_err("%s: ERROR: Failed to force adcm %s (ret = %d)!\n",
+ __func__, (mode) ? "UP" : "DOWN", ret);
+ goto adcm_failure;
+ }
+
+ ret = gpadc_convert_stable(gpadc, channel, value);
+ ret |= ab8500_audio_set_adcm(AB8500_AUDIO_ADCM_NORMAL);
+
+adcm_failure:
+ ux500_ab8500_power_control_dec();
+
+power_failure:
+ mutex_unlock(&adcm_lock);
+
+ return ret;
+}
+
+void ux500_ab8500_jack_report(int value)
+{
+ if (jack.jack)
+ snd_soc_jack_report(&jack, value, 0xFF);
+}
+EXPORT_SYMBOL_GPL(ux500_ab8500_jack_report);
+
diff --git a/sound/soc/ux500/ux500_av8100.c b/sound/soc/ux500/ux500_av8100.c
new file mode 100644
index 00000000000..3e8f29b14b3
--- /dev/null
+++ b/sound/soc/ux500/ux500_av8100.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <sound/soc.h>
+#include "../codecs/av8100_audio.h"
+#include "ux500_av8100.h"
+#include "ux500_msp_dai.h"
+
+static const char *stream_str(struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+static int ux500_av8100_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int channels = params_channels(params);
+ unsigned int tx_mask, fmt;
+ enum hdmi_channel_allocation hdmi_ca;
+ enum hdmi_audio_channel_count hdmi_cc;
+ struct hdmi_audio_settings as;
+ int ret;
+
+ pr_debug("%s: Enter (%s).\n", __func__, stream_str(substream));
+ pr_debug("%s: substream->pcm->name = %s.\n", __func__, substream->pcm->name);
+ pr_debug("%s: substream->pcm->id = %s.\n", __func__, substream->pcm->id);
+ pr_debug("%s: substream->name = %s.\n", __func__, substream->name);
+ pr_debug("%s: substream->number = %d.\n", __func__, substream->number);
+ pr_debug("%s: channels = %d.\n", __func__, channels);
+ pr_debug("%s: DAI-index (Codec): %d\n", __func__, codec_dai->id);
+ pr_debug("%s: DAI-index (Platform): %d\n", __func__, cpu_dai->id);
+
+ switch (channels) {
+ case 1:
+ hdmi_cc = AV8100_CODEC_CC_2CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR; /* Stereo-setup */
+ tx_mask = AV8100_CODEC_MASK_MONO;
+ break;
+ case 2:
+ hdmi_cc = AV8100_CODEC_CC_2CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR; /* Stereo */
+ tx_mask = AV8100_CODEC_MASK_STEREO;
+ break;
+ case 3:
+ hdmi_cc = AV8100_CODEC_CC_6CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR; /* 5.1-setup */
+ tx_mask = AV8100_CODEC_MASK_2DOT1;
+ break;
+ case 4:
+ hdmi_cc = AV8100_CODEC_CC_6CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR; /* 5.1-setup */
+ tx_mask = AV8100_CODEC_MASK_QUAD;
+ break;
+ case 5:
+ hdmi_cc = AV8100_CODEC_CC_6CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR; /* 5.1-setup */
+ tx_mask = AV8100_CODEC_MASK_5DOT0;
+ break;
+ case 6:
+ hdmi_cc = AV8100_CODEC_CC_6CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR; /* 5.1 */
+ tx_mask = AV8100_CODEC_MASK_5DOT1;
+ break;
+ case 7:
+ hdmi_cc = AV8100_CODEC_CC_8CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR_RLC_RRC; /* 7.1 */
+ tx_mask = AV8100_CODEC_MASK_7DOT0;
+ break;
+ case 8:
+ hdmi_cc = AV8100_CODEC_CC_8CH;
+ hdmi_ca = AV8100_CODEC_CA_FL_FR_LFE_FC_RL_RR_RLC_RRC; /* 7.1 */
+ tx_mask = AV8100_CODEC_MASK_7DOT1;
+ break;
+ default:
+ pr_err("%s: Unsupported number of channels (channels = %d)!\n",
+ __func__,
+ channels);
+ return -EINVAL;
+ }
+
+ /* Change HDMI audio-settings for codec-DAI. */
+ pr_debug("%s: Change HDMI audio-settings for codec-DAI.\n", __func__);
+ as.audio_channel_count = hdmi_cc;
+ as.sampling_frequency = AV8100_CODEC_SF_48KHZ;
+ as.sample_size = AV8100_CODEC_SS_16BIT;
+ as.channel_allocation = hdmi_ca;
+ as.level_shift_value = AV8100_CODEC_LSV_0DB;
+ as.downmix_inhibit = false;
+ ret = av8100_audio_change_hdmi_audio_settings(codec_dai, &as);
+ if (ret < 0) {
+ pr_err("%s: Unable to change HDMI audio-settings for codec-DAI "
+ "(av8100_codec_change_hdmi_audio_settings returned %d)!\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ /* Set format for codec-DAI */
+ fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM;
+ pr_debug("%s: Setting format for codec-DAI (fmt = %d).\n",
+ __func__,
+ fmt);
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret < 0) {
+ pr_err("%s: Unable to set format for codec-DAI "
+ "(snd_soc_dai_set_tdm_slot returned %d)!\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ /* Set TDM-slot for CPU-DAI */
+ pr_debug("%s: Setting TDM-slot for codec-DAI (tx_mask = %d).\n",
+ __func__,
+ tx_mask);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, tx_mask, 0, 16, 16);
+ if (ret < 0) {
+ pr_err("%s: Unable to set TDM-slot for codec-DAI "
+ "(snd_soc_dai_set_tdm_slot returned %d)!\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ /* Set format for CPU-DAI */
+ fmt = SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_IF;
+ pr_debug("%s: Setting DAI-format for Ux500-platform (fmt = %d).\n",
+ __func__,
+ fmt);
+ ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ if (ret < 0) {
+ pr_err("%s: Unable to set DAI-format for Ux500-platform "
+ "(snd_soc_dai_set_fmt returned %d).\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ ux500_msp_dai_set_data_delay(cpu_dai, MSP_DELAY_1);
+
+ return ret;
+}
+
+struct snd_soc_ops ux500_av8100_ops[] = {
+ {
+ .hw_params = ux500_av8100_hw_params,
+ }
+};
+
diff --git a/sound/soc/ux500/ux500_av8100.h b/sound/soc/ux500/ux500_av8100.h
new file mode 100644
index 00000000000..b107b2e1be7
--- /dev/null
+++ b/sound/soc/ux500/ux500_av8100.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_AV8100_H
+#define UX500_AV8100_H
+
+extern struct snd_soc_ops ux500_av8100_ops[];
+
+#endif
diff --git a/sound/soc/ux500/ux500_cg29xx.c b/sound/soc/ux500/ux500_cg29xx.c
new file mode 100644
index 00000000000..456262c6c0b
--- /dev/null
+++ b/sound/soc/ux500/ux500_cg29xx.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <sound/soc.h>
+#include "../codecs/cg29xx.h"
+#include "ux500_msp_dai.h"
+
+#define UX500_CG29XX_MSP_CLOCK_FREQ 18900000
+#define U5500_CG29XX_MSP_CLOCK_FREQ 13000000
+#define UX500_CG29XX_DAI_SLOT_WIDTH 16
+#define UX500_CG29XX_DAI_SLOTS 2
+#define UX500_CG29XX_DAI_ACTIVE_SLOTS 0x02
+
+int ux500_cg29xx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int channels = params_channels(params);
+ int err;
+
+ pr_debug("%s: Enter.\n", __func__);
+ pr_debug("%s: substream->pcm->name = %s.\n", __func__, substream->pcm->name);
+ pr_debug("%s: substream->pcm->id = %s.\n", __func__, substream->pcm->id);
+ pr_debug("%s: substream->name = %s.\n", __func__, substream->name);
+ pr_debug("%s: substream->number = %d.\n", __func__, substream->number);
+ pr_debug("%s: channels = %d.\n", __func__, channels);
+ pr_debug("%s: DAI-index (Codec): %d\n", __func__, codec_dai->id);
+ pr_debug("%s: DAI-index (Platform): %d\n", __func__, cpu_dai->id);
+
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS);
+
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_fmt(codec) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_tdm_slot(codec_dai,
+ 1 << CG29XX_DAI_SLOT0_SHIFT,
+ 1 << CG29XX_DAI_SLOT0_SHIFT,
+ UX500_CG29XX_DAI_SLOTS,
+ UX500_CG29XX_DAI_SLOT_WIDTH);
+
+ if (err) {
+ pr_err("%s: cg29xx_set_tdm_slot(codec_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS |
+ SND_SOC_DAIFMT_NB_NF);
+
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_fmt(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_sysclk(cpu_dai,
+ UX500_MSP_MASTER_CLOCK,
+ UX500_CG29XX_MSP_CLOCK_FREQ,
+ 0);
+
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_sysclk(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_tdm_slot(cpu_dai,
+ UX500_CG29XX_DAI_ACTIVE_SLOTS,
+ UX500_CG29XX_DAI_ACTIVE_SLOTS,
+ UX500_CG29XX_DAI_SLOTS,
+ UX500_CG29XX_DAI_SLOT_WIDTH);
+
+ if (err) {
+ pr_err("%s: cg29xx_set_tdm_slot(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+out_err:
+ return err;
+}
+
+int u5500_cg29xx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int channels = params_channels(params);
+ int err;
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int dai_id = codec_dai->id;
+ struct cg29xx_codec_dai_data *codec_drvdata =
+ snd_soc_codec_get_drvdata(codec);
+ struct cg29xx_codec_dai_data *dai_data = &codec_drvdata[dai_id];
+
+ pr_debug("%s: Enter.\n", __func__);
+ pr_debug("%s: substream->pcm->name=%s\n",
+ __func__, substream->pcm->name);
+ pr_debug("%s: substream->pcm->id = %s\n", __func__, substream->pcm->id);
+ pr_debug("%s: substream->name = %s.\n", __func__, substream->name);
+ pr_debug("%s: substream->number = %d.\n", __func__, substream->number);
+ pr_debug("%s: channels = %d.\n", __func__, channels);
+ pr_debug("%s: DAI-index (Codec): %d\n", __func__, codec_dai->id);
+ pr_debug("%s: DAI-index (Platform): %d\n", __func__, cpu_dai->id);
+
+ if (dai_data->config.port == PORT_0_I2S) {
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_fmt (codec) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS);
+
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_sysclk(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+ } else {
+ err = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS);
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_fmt(codec) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_tdm_slot(codec_dai,
+ 1 << CG29XX_DAI_SLOT0_SHIFT,
+ 1 << CG29XX_DAI_SLOT0_SHIFT,
+ UX500_CG29XX_DAI_SLOTS,
+ UX500_CG29XX_DAI_SLOT_WIDTH);
+
+ if (err) {
+ pr_err("%s: cg29xx_set_tdm_slot(codec_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_CBS_CFS |
+ SND_SOC_DAIFMT_NB_NF);
+
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_fmt(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_sysclk(cpu_dai,
+ UX500_MSP_MASTER_CLOCK,
+ U5500_CG29XX_MSP_CLOCK_FREQ,
+ 0);
+
+ if (err) {
+ pr_err("%s: snd_soc_dai_set_sysclk(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ err = snd_soc_dai_set_tdm_slot(cpu_dai,
+ UX500_CG29XX_DAI_ACTIVE_SLOTS,
+ UX500_CG29XX_DAI_ACTIVE_SLOTS,
+ UX500_CG29XX_DAI_SLOTS,
+ UX500_CG29XX_DAI_SLOT_WIDTH);
+
+ if (err) {
+ pr_err("%s: cg29xx_set_tdm_slot(cpu_dai) failed with %d.\n",
+ __func__,
+ err);
+ goto out_err;
+ }
+
+ }
+ ux500_msp_dai_set_data_delay(cpu_dai, MSP_DELAY_0);
+out_err:
+ return err;
+}
+
+struct snd_soc_ops ux500_cg29xx_ops[] = {
+ {
+ .hw_params = ux500_cg29xx_hw_params,
+ }
+};
+
+struct snd_soc_ops u5500_cg29xx_ops[] = {
+ {
+ .hw_params = u5500_cg29xx_hw_params,
+ }
+};
+
diff --git a/sound/soc/ux500/ux500_cg29xx.h b/sound/soc/ux500/ux500_cg29xx.h
new file mode 100644
index 00000000000..33736bca0cd
--- /dev/null
+++ b/sound/soc/ux500/ux500_cg29xx.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_CG29XX_H
+#define UX500_CG29XX_H
+
+extern struct snd_soc_ops ux500_cg29xx_ops[];
+extern struct snd_soc_ops u5500_cg29xx_ops[];
+
+#endif
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
new file mode 100644
index 00000000000..2af5bf14193
--- /dev/null
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+
+#include <mach/hardware.h>
+#include <mach/msp.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "ux500_msp_i2s.h"
+#include "ux500_msp_dai.h"
+#include "ux500_pcm.h"
+
+static struct ux500_platform_drvdata platform_drvdata[UX500_NBR_OF_DAI] = {
+ {
+ .msp_i2s_drvdata = NULL,
+ .fmt = 0,
+ .slots = 1,
+ .tx_mask = 0x01,
+ .rx_mask = 0x01,
+ .slot_width = 16,
+ .playback_active = false,
+ .capture_active = false,
+ .configured = 0,
+ .data_delay = MSP_DELAY_0,
+ .master_clk = UX500_MSP_INTERNAL_CLOCK_FREQ,
+ },
+ {
+ .msp_i2s_drvdata = NULL,
+ .fmt = 0,
+ .slots = 1,
+ .tx_mask = 0x01,
+ .rx_mask = 0x01,
+ .slot_width = 16,
+ .playback_active = false,
+ .capture_active = false,
+ .configured = 0,
+ .data_delay = MSP_DELAY_0,
+ .master_clk = UX500_MSP1_INTERNAL_CLOCK_FREQ,
+ },
+ {
+ .msp_i2s_drvdata = NULL,
+ .fmt = 0,
+ .slots = 1,
+ .tx_mask = 0x01,
+ .rx_mask = 0x01,
+ .slot_width = 16,
+ .playback_active = false,
+ .capture_active = false,
+ .configured = 0,
+ .data_delay = MSP_DELAY_0,
+ .master_clk = UX500_MSP_INTERNAL_CLOCK_FREQ,
+ },
+ {
+ .msp_i2s_drvdata = NULL,
+ .fmt = 0,
+ .slots = 1,
+ .tx_mask = 0x01,
+ .rx_mask = 0x01,
+ .slot_width = 16,
+ .playback_active = false,
+ .capture_active = false,
+ .configured = 0,
+ .data_delay = MSP_DELAY_0,
+ .master_clk = UX500_MSP1_INTERNAL_CLOCK_FREQ,
+ },
+};
+
+bool ux500_msp_dai_i2s_get_underrun_status(int dai_idx)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai_idx];
+ int status = ux500_msp_i2s_hw_status(drvdata->msp_i2s_drvdata);
+ return (bool)(status & TRANSMIT_UNDERRUN_ERR_INT);
+}
+
+dma_addr_t ux500_msp_dai_i2s_get_pointer(int dai_idx, int stream_id)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai_idx];
+ return ux500_msp_i2s_get_pointer(drvdata->msp_i2s_drvdata,
+ (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ?
+ I2S_DIRECTION_TX :
+ I2S_DIRECTION_RX);
+}
+
+int ux500_msp_dai_i2s_configure_sg(dma_addr_t dma_addr,
+ int period_cnt,
+ size_t period_len,
+ int dai_idx,
+ int stream_id)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai_idx];
+ struct i2s_message message;
+ int ret = 0;
+ bool playback_req_valid =
+ (drvdata->playback_active &&
+ stream_id == SNDRV_PCM_STREAM_PLAYBACK);
+ bool capture_req_valid =
+ (drvdata->capture_active &&
+ stream_id == SNDRV_PCM_STREAM_CAPTURE);
+
+ pr_debug("%s: Enter (MSP Index: %u, period-cnt: %u, period-len: %u).\n",
+ __func__,
+ dai_idx,
+ period_cnt,
+ period_len);
+
+ if (!playback_req_valid && !capture_req_valid) {
+ pr_err("%s: The I2S controller is not available."
+ "MSP index:%d\n",
+ __func__,
+ dai_idx);
+ return ret;
+ }
+
+ message.i2s_direction = (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ?
+ I2S_DIRECTION_TX :
+ I2S_DIRECTION_RX;
+ message.buf_addr = dma_addr;
+ message.buf_len = period_cnt * period_len;
+ message.period_len = period_len;
+
+ ret = ux500_msp_i2s_transfer(drvdata->msp_i2s_drvdata, &message);
+ if (ret < 0) {
+ pr_err("%s: Error: i2s_transfer failed. MSP index: %d\n",
+ __func__,
+ dai_idx);
+ }
+
+ return ret;
+}
+
+static const char *stream_str(struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+static int ux500_msp_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+ bool mode_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+
+ pr_debug("%s: MSP %d (%s): Enter.\n", __func__, dai->id, stream_str(substream));
+
+ if ((mode_playback && drvdata->playback_active) ||
+ (!mode_playback && drvdata->capture_active)) {
+ pr_err("%s: Error: MSP %d (%s): Stream already active.\n",
+ __func__,
+ dai->id,
+ stream_str(substream));
+ return -EBUSY;
+ }
+
+ if (mode_playback)
+ drvdata->playback_active = true;
+ else
+ drvdata->capture_active = true;
+
+ return 0;
+}
+
+static void ux500_msp_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+ bool mode_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+
+ pr_debug("%s: MSP %d (%s): Enter.\n", __func__, dai->id, stream_str(substream));
+
+ if (drvdata == NULL)
+ return;
+
+ if (mode_playback)
+ drvdata->playback_active = false;
+ else
+ drvdata->capture_active = false;
+
+ if (ux500_msp_i2s_close(drvdata->msp_i2s_drvdata,
+ mode_playback ? DISABLE_TRANSMIT : DISABLE_RECEIVE)) {
+ pr_err("%s: Error: MSP %d (%s): Unable to close i2s.\n",
+ __func__,
+ dai->id,
+ stream_str(substream));
+ }
+
+ if (mode_playback)
+ drvdata->configured &= ~PLAYBACK_CONFIGURED;
+ else
+ drvdata->configured &= ~CAPTURE_CONFIGURED;
+}
+
+static void ux500_msp_dai_setup_multichannel(struct ux500_platform_drvdata *private,
+ struct msp_config *msp_config)
+{
+ struct msp_multichannel_config *multi = &msp_config->multichannel_config;
+
+ if (private->slots > 1) {
+ msp_config->multichannel_configured = 1;
+
+ multi->tx_multichannel_enable = true;
+ multi->rx_multichannel_enable = true;
+ multi->rx_comparison_enable_mode = MSP_COMPARISON_DISABLED;
+
+ multi->tx_channel_0_enable = private->tx_mask;
+ multi->tx_channel_1_enable = 0;
+ multi->tx_channel_2_enable = 0;
+ multi->tx_channel_3_enable = 0;
+
+ multi->rx_channel_0_enable = private->rx_mask;
+ multi->rx_channel_1_enable = 0;
+ multi->rx_channel_2_enable = 0;
+ multi->rx_channel_3_enable = 0;
+
+ pr_debug("%s: Multichannel enabled."
+ "Slots: %d TX: %u RX: %u\n",
+ __func__,
+ private->slots,
+ multi->tx_channel_0_enable,
+ multi->rx_channel_0_enable);
+ }
+}
+
+static void ux500_msp_dai_setup_frameper(struct ux500_platform_drvdata *private,
+ unsigned int rate,
+ struct msp_protocol_desc *prot_desc)
+{
+ switch (private->slots) {
+ default:
+ case 1:
+ switch (rate) {
+ case 8000:
+ prot_desc->frame_period =
+ FRAME_PER_SINGLE_SLOT_8_KHZ;
+ break;
+ case 16000:
+ prot_desc->frame_period =
+ FRAME_PER_SINGLE_SLOT_16_KHZ;
+ break;
+ case 44100:
+ prot_desc->frame_period =
+ FRAME_PER_SINGLE_SLOT_44_1_KHZ;
+ break;
+ case 48000:
+ default:
+ prot_desc->frame_period =
+ FRAME_PER_SINGLE_SLOT_48_KHZ;
+ break;
+ }
+ break;
+
+ case 2:
+ prot_desc->frame_period = FRAME_PER_2_SLOTS;
+ break;
+
+ case 8:
+ prot_desc->frame_period =
+ FRAME_PER_8_SLOTS;
+ break;
+
+ case 16:
+ prot_desc->frame_period =
+ FRAME_PER_16_SLOTS;
+ break;
+ }
+
+ prot_desc->total_clocks_for_one_frame =
+ prot_desc->frame_period+1;
+
+ pr_debug("%s: Total clocks per frame: %u\n",
+ __func__,
+ prot_desc->total_clocks_for_one_frame);
+}
+
+static void ux500_msp_dai_setup_framing_pcm(struct ux500_platform_drvdata *private,
+ unsigned int rate,
+ struct msp_protocol_desc *prot_desc)
+{
+ u32 frame_length = MSP_FRAME_LENGTH_1;
+ prot_desc->frame_width = 0;
+
+ switch (private->slots) {
+ default:
+ case 1:
+ frame_length = MSP_FRAME_LENGTH_1;
+ break;
+
+ case 2:
+ frame_length = MSP_FRAME_LENGTH_2;
+ break;
+
+ case 8:
+ frame_length = MSP_FRAME_LENGTH_8;
+ break;
+
+ case 16:
+ frame_length = MSP_FRAME_LENGTH_16;
+ break;
+ }
+
+ prot_desc->tx_frame_length_1 = frame_length;
+ prot_desc->rx_frame_length_1 = frame_length;
+ prot_desc->tx_frame_length_2 = frame_length;
+ prot_desc->rx_frame_length_2 = frame_length;
+
+ prot_desc->tx_element_length_1 = MSP_ELEM_LENGTH_16;
+ prot_desc->rx_element_length_1 = MSP_ELEM_LENGTH_16;
+ prot_desc->tx_element_length_2 = MSP_ELEM_LENGTH_16;
+ prot_desc->rx_element_length_2 = MSP_ELEM_LENGTH_16;
+
+ ux500_msp_dai_setup_frameper(private, rate, prot_desc);
+}
+
+static void ux500_msp_dai_setup_clocking(unsigned int fmt,
+ struct msp_config *msp_config)
+{
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ default:
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+
+ case SND_SOC_DAIFMT_NB_IF:
+ msp_config->tx_frame_sync_pol ^= 1 << TFSPOL_SHIFT;
+ msp_config->rx_frame_sync_pol ^= 1 << RFSPOL_SHIFT;
+ break;
+ }
+
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBM_CFM) {
+ pr_debug("%s: Codec is MASTER.\n",
+ __func__);
+ msp_config->iodelay = 0x20;
+ msp_config->rx_frame_sync_sel = 0;
+ msp_config->tx_frame_sync_sel = 1 << TFSSEL_SHIFT;
+ msp_config->tx_clock_sel = 0;
+ msp_config->rx_clock_sel = 0;
+ msp_config->srg_clock_sel = 0x2 << SCKSEL_SHIFT;
+
+ } else {
+ pr_debug("%s: Codec is SLAVE.\n",
+ __func__);
+
+ msp_config->tx_clock_sel = TX_CLK_SEL_SRG;
+ msp_config->tx_frame_sync_sel = TX_SYNC_SRG_PROG;
+ msp_config->rx_clock_sel = RX_CLK_SEL_SRG;
+ msp_config->rx_frame_sync_sel = RX_SYNC_SRG;
+ msp_config->srg_clock_sel = 1 << SCKSEL_SHIFT;
+ }
+}
+
+static void ux500_msp_dai_compile_prot_desc_pcm(unsigned int fmt,
+ struct msp_protocol_desc *prot_desc)
+{
+ prot_desc->rx_phase_mode = MSP_SINGLE_PHASE;
+ prot_desc->tx_phase_mode = MSP_SINGLE_PHASE;
+ prot_desc->rx_phase2_start_mode = MSP_PHASE2_START_MODE_IMEDIATE;
+ prot_desc->tx_phase2_start_mode = MSP_PHASE2_START_MODE_IMEDIATE;
+ prot_desc->rx_bit_transfer_format = MSP_BTF_MS_BIT_FIRST;
+ prot_desc->tx_bit_transfer_format = MSP_BTF_MS_BIT_FIRST;
+ prot_desc->tx_frame_sync_pol = MSP_FRAME_SYNC_POL(MSP_FRAME_SYNC_POL_ACTIVE_HIGH);
+ prot_desc->rx_frame_sync_pol = MSP_FRAME_SYNC_POL_ACTIVE_HIGH << RFSPOL_SHIFT;
+
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_DSP_A) {
+ pr_debug("%s: DSP_A.\n",
+ __func__);
+ prot_desc->rx_clock_pol = MSP_RISING_EDGE;
+ prot_desc->tx_clock_pol = MSP_FALLING_EDGE;
+ } else {
+ pr_debug("%s: DSP_B.\n",
+ __func__);
+ prot_desc->rx_clock_pol = MSP_FALLING_EDGE;
+ prot_desc->tx_clock_pol = MSP_RISING_EDGE;
+ }
+
+ prot_desc->rx_half_word_swap = MSP_HWS_NO_SWAP;
+ prot_desc->tx_half_word_swap = MSP_HWS_NO_SWAP;
+ prot_desc->compression_mode = MSP_COMPRESS_MODE_LINEAR;
+ prot_desc->expansion_mode = MSP_EXPAND_MODE_LINEAR;
+ prot_desc->spi_clk_mode = MSP_SPI_CLOCK_MODE_NON_SPI;
+ prot_desc->spi_burst_mode = MSP_SPI_BURST_MODE_DISABLE;
+ prot_desc->frame_sync_ignore = MSP_FRAME_SYNC_IGNORE;
+}
+
+static void ux500_msp_dai_compile_prot_desc_i2s(struct msp_protocol_desc *prot_desc)
+{
+ prot_desc->rx_phase_mode = MSP_DUAL_PHASE;
+ prot_desc->tx_phase_mode = MSP_DUAL_PHASE;
+ prot_desc->rx_phase2_start_mode =
+ MSP_PHASE2_START_MODE_FRAME_SYNC;
+ prot_desc->tx_phase2_start_mode =
+ MSP_PHASE2_START_MODE_FRAME_SYNC;
+ prot_desc->rx_bit_transfer_format = MSP_BTF_MS_BIT_FIRST;
+ prot_desc->tx_bit_transfer_format = MSP_BTF_MS_BIT_FIRST;
+ prot_desc->tx_frame_sync_pol = MSP_FRAME_SYNC_POL(MSP_FRAME_SYNC_POL_ACTIVE_LOW);
+ prot_desc->rx_frame_sync_pol = MSP_FRAME_SYNC_POL_ACTIVE_LOW << RFSPOL_SHIFT;
+
+ prot_desc->rx_frame_length_1 = MSP_FRAME_LENGTH_1;
+ prot_desc->rx_frame_length_2 = MSP_FRAME_LENGTH_1;
+ prot_desc->tx_frame_length_1 = MSP_FRAME_LENGTH_1;
+ prot_desc->tx_frame_length_2 = MSP_FRAME_LENGTH_1;
+ prot_desc->rx_element_length_1 = MSP_ELEM_LENGTH_16;
+ prot_desc->rx_element_length_2 = MSP_ELEM_LENGTH_16;
+ prot_desc->tx_element_length_1 = MSP_ELEM_LENGTH_16;
+ prot_desc->tx_element_length_2 = MSP_ELEM_LENGTH_16;
+
+ prot_desc->rx_clock_pol = MSP_RISING_EDGE;
+ prot_desc->tx_clock_pol = MSP_FALLING_EDGE;
+
+ prot_desc->tx_half_word_swap = MSP_HWS_NO_SWAP;
+ prot_desc->rx_half_word_swap = MSP_HWS_NO_SWAP;
+ prot_desc->compression_mode = MSP_COMPRESS_MODE_LINEAR;
+ prot_desc->expansion_mode = MSP_EXPAND_MODE_LINEAR;
+ prot_desc->spi_clk_mode = MSP_SPI_CLOCK_MODE_NON_SPI;
+ prot_desc->spi_burst_mode = MSP_SPI_BURST_MODE_DISABLE;
+ prot_desc->frame_sync_ignore = MSP_FRAME_SYNC_IGNORE;
+}
+
+static void ux500_msp_dai_compile_msp_config(struct snd_pcm_substream *substream,
+ struct ux500_platform_drvdata *private,
+ unsigned int rate,
+ struct msp_config *msp_config)
+{
+ struct msp_protocol_desc *prot_desc = &msp_config->protocol_desc;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int fmt = private->fmt;
+
+ memset(msp_config, 0, sizeof(*msp_config));
+
+ msp_config->input_clock_freq = private->master_clk;
+
+ msp_config->tx_fifo_config = TX_FIFO_ENABLE;
+ msp_config->rx_fifo_config = RX_FIFO_ENABLE;
+ msp_config->spi_clk_mode = SPI_CLK_MODE_NORMAL;
+ msp_config->spi_burst_mode = 0;
+ msp_config->handler = ux500_pcm_dma_eot_handler;
+ msp_config->tx_callback_data =
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ substream : NULL;
+ msp_config->rx_callback_data =
+ substream->stream == SNDRV_PCM_STREAM_CAPTURE ?
+ substream : NULL;
+ msp_config->def_elem_len = 1;
+ msp_config->direction =
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ MSP_TRANSMIT_MODE : MSP_RECEIVE_MODE;
+ msp_config->data_size = MSP_DATA_BITS_32;
+ msp_config->work_mode = MSP_DMA_MODE;
+ msp_config->frame_freq = rate;
+
+ pr_debug("%s: input_clock_freq = %u, frame_freq = %u.\n",
+ __func__, msp_config->input_clock_freq, msp_config->frame_freq);
+ /* To avoid division by zero in I2S-driver (i2s_setup) */
+ prot_desc->total_clocks_for_one_frame = 1;
+
+ prot_desc->rx_data_delay = private->data_delay;
+ prot_desc->tx_data_delay = private->data_delay;
+
+ pr_debug("%s: rate: %u channels: %d.\n",
+ __func__,
+ rate,
+ runtime->channels);
+ switch (fmt &
+ (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) {
+
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
+ pr_debug("%s: SND_SOC_DAIFMT_I2S.\n",
+ __func__);
+
+ msp_config->default_protocol_desc = 1;
+ msp_config->protocol = MSP_I2S_PROTOCOL;
+ break;
+
+ default:
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
+ pr_debug("%s: SND_SOC_DAIFMT_I2S.\n",
+ __func__);
+
+ msp_config->data_size = MSP_DATA_BITS_16;
+ msp_config->protocol = MSP_I2S_PROTOCOL;
+
+ ux500_msp_dai_compile_prot_desc_i2s(prot_desc);
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM:
+ pr_debug("%s: PCM format.\n",
+ __func__);
+ msp_config->data_size = MSP_DATA_BITS_16;
+ msp_config->protocol = MSP_PCM_PROTOCOL;
+
+ ux500_msp_dai_compile_prot_desc_pcm(fmt, prot_desc);
+ ux500_msp_dai_setup_multichannel(private, msp_config);
+ ux500_msp_dai_setup_framing_pcm(private, rate, prot_desc);
+ break;
+ }
+
+ ux500_msp_dai_setup_clocking(fmt, msp_config);
+}
+
+static int ux500_msp_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int ret = 0;
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msp_config msp_config;
+ bool mode_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+
+ pr_debug("%s: MSP %d (%s): Enter.\n", __func__, dai->id, stream_str(substream));
+
+ /* If already configured -> not errors reported */
+ if (mode_playback) {
+ if ((drvdata->configured & PLAYBACK_CONFIGURED) &&
+ (drvdata->playback_active))
+ goto cleanup;
+ } else {
+ if ((drvdata->configured & CAPTURE_CONFIGURED) &&
+ (drvdata->capture_active))
+ goto cleanup;
+ }
+
+ pr_debug("%s: Setup dai (Rate: %u).\n", __func__, runtime->rate);
+ ux500_msp_dai_compile_msp_config(substream,
+ drvdata,
+ runtime->rate,
+ &msp_config);
+
+ ret = ux500_msp_i2s_open(drvdata->msp_i2s_drvdata, &msp_config);
+ if (ret < 0) {
+ pr_err("%s: Error: msp_setup failed (ret = %d)!\n", __func__, ret);
+ goto cleanup;
+ }
+
+ drvdata->configured |= mode_playback ?
+ PLAYBACK_CONFIGURED : CAPTURE_CONFIGURED;
+
+cleanup:
+ return ret;
+}
+
+static int ux500_msp_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ unsigned int mask, slots_active;
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+
+ pr_debug("%s: MSP %d (%s): Enter.\n",
+ __func__,
+ dai->id,
+ stream_str(substream));
+
+ switch (drvdata->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ if (params_channels(params) != 2) {
+ pr_err("%s: Error: I2S requires channels = 2 "
+ "(channels = %d)!\n",
+ __func__,
+ params_channels(params));
+ return -EINVAL;
+ }
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ case SND_SOC_DAIFMT_DSP_A:
+
+ mask = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ drvdata->tx_mask :
+ drvdata->rx_mask;
+
+ slots_active = hweight32(mask);
+
+ pr_debug("TDM slots active: %d", slots_active);
+
+ if (params_channels(params) != slots_active) {
+ pr_err("%s: Error: PCM TDM format requires channels "
+ "to match active slots "
+ "(channels = %d, active slots = %d)!\n",
+ __func__,
+ params_channels(params),
+ slots_active);
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ux500_msp_dai_set_data_delay(struct snd_soc_dai *dai, int delay)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+
+ pr_debug("%s: MSP %d: Enter.\n", __func__, dai->id);
+
+ switch (delay) {
+ case MSP_DELAY_0:
+ case MSP_DELAY_1:
+ case MSP_DELAY_2:
+ case MSP_DELAY_3:
+ break;
+ default:
+ goto unsupported_delay;
+ }
+
+ drvdata->data_delay = delay;
+ return 0;
+
+unsupported_delay:
+ pr_err("%s: MSP %d: Error: Unsupported DAI delay (%d)!\n",
+ __func__,
+ dai->id,
+ delay);
+ return -EINVAL;
+}
+
+static int ux500_msp_dai_set_dai_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+
+ pr_debug("%s: MSP %d: Enter.\n", __func__, dai->id);
+
+ switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) {
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM:
+ break;
+
+ default:
+ goto unsupported_format;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ case SND_SOC_DAIFMT_NB_IF:
+ case SND_SOC_DAIFMT_IB_IF:
+ break;
+
+ default:
+ goto unsupported_format;
+ }
+
+ drvdata->fmt = fmt;
+ return 0;
+
+unsupported_format:
+ pr_err("%s: MSP %d: Error: Unsupported DAI format (0x%x)!\n",
+ __func__,
+ dai->id,
+ fmt);
+ return -EINVAL;
+}
+
+static int ux500_msp_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots,
+ int slot_width)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+ unsigned int cap;
+
+ if (!(slots == 1 || slots == 2 || slots == 8 || slots == 16)) {
+ pr_err("%s: Error: Unsupported slots (%d)! "
+ "Supported values are 1/2/8/16.\n",
+ __func__,
+ slots);
+ return -EINVAL;
+ }
+ drvdata->slots = slots;
+
+ if (!(slot_width == 16)) {
+ pr_err("%s: Error: Unsupported slots_width (%d)!. "
+ "Supported value is 16.\n",
+ __func__,
+ slot_width);
+ return -EINVAL;
+ }
+ drvdata->slot_width = slot_width;
+
+ switch (slots) {
+ default:
+ case 1:
+ cap = 0x01;
+ break;
+ case 2:
+ cap = 0x03;
+ break;
+ case 8:
+ cap = 0xFF;
+ break;
+ case 16:
+ cap = 0xFFFF;
+ break;
+ }
+
+ drvdata->tx_mask = tx_mask & cap;
+ drvdata->rx_mask = rx_mask & cap;
+
+ return 0;
+}
+
+static int ux500_msp_dai_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id,
+ unsigned int freq,
+ int dir)
+{
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+
+ pr_debug("%s: MSP %d: Enter. Clk id: %d, freq: %u.\n",
+ __func__,
+ dai->id,
+ clk_id,
+ freq);
+
+ switch (clk_id) {
+ case UX500_MSP_MASTER_CLOCK:
+ drvdata->master_clk = freq;
+ break;
+
+ default:
+ pr_err("%s: MSP %d: Invalid clkid: %d.\n",
+ __func__,
+ dai->id,
+ clk_id);
+ }
+
+ return 0;
+}
+
+static int ux500_msp_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd,
+ struct snd_soc_dai *dai)
+{
+ int ret = 0;
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[dai->id];
+
+ pr_debug("%s: MSP %d (%s): Enter (msp->id = %d, cmd = %d).\n",
+ __func__,
+ dai->id,
+ stream_str(substream),
+ (int)drvdata->msp_i2s_drvdata->id,
+ cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ ret = 0;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_dai_driver ux500_msp_dai_drv[UX500_NBR_OF_DAI] = {
+ {
+ .name = "ux500-msp-i2s.0",
+ .id = 0,
+ .suspend = NULL,
+ .resume = NULL,
+ .playback = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .set_sysclk = ux500_msp_dai_set_dai_sysclk,
+ .set_fmt = ux500_msp_dai_set_dai_fmt,
+ .set_tdm_slot = ux500_msp_dai_set_tdm_slot,
+ .startup = ux500_msp_dai_startup,
+ .shutdown = ux500_msp_dai_shutdown,
+ .prepare = ux500_msp_dai_prepare,
+ .trigger = ux500_msp_dai_trigger,
+ .hw_params = ux500_msp_dai_hw_params,
+ }
+ },
+ },
+ {
+ .name = "ux500-msp-i2s.1",
+ .id = 1,
+ .suspend = NULL,
+ .resume = NULL,
+ .playback = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .set_sysclk = ux500_msp_dai_set_dai_sysclk,
+ .set_fmt = ux500_msp_dai_set_dai_fmt,
+ .set_tdm_slot = ux500_msp_dai_set_tdm_slot,
+ .startup = ux500_msp_dai_startup,
+ .shutdown = ux500_msp_dai_shutdown,
+ .prepare = ux500_msp_dai_prepare,
+ .trigger = ux500_msp_dai_trigger,
+ .hw_params = ux500_msp_dai_hw_params,
+ }
+ },
+ },
+ {
+ .name = "ux500-msp-i2s.2",
+ .id = 2,
+ .suspend = NULL,
+ .resume = NULL,
+ .playback = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .set_sysclk = ux500_msp_dai_set_dai_sysclk,
+ .set_fmt = ux500_msp_dai_set_dai_fmt,
+ .set_tdm_slot = ux500_msp_dai_set_tdm_slot,
+ .startup = ux500_msp_dai_startup,
+ .shutdown = ux500_msp_dai_shutdown,
+ .prepare = ux500_msp_dai_prepare,
+ .trigger = ux500_msp_dai_trigger,
+ .hw_params = ux500_msp_dai_hw_params,
+ }
+ },
+ },
+ {
+ .name = "ux500-msp-i2s.3",
+ .id = 3,
+ .suspend = NULL,
+ .resume = NULL,
+ .playback = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = UX500_MSP_MIN_CHANNELS,
+ .channels_max = UX500_MSP_MAX_CHANNELS,
+ .rates = UX500_I2S_RATES,
+ .formats = UX500_I2S_FORMATS,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .set_sysclk = ux500_msp_dai_set_dai_sysclk,
+ .set_fmt = ux500_msp_dai_set_dai_fmt,
+ .set_tdm_slot = ux500_msp_dai_set_tdm_slot,
+ .startup = ux500_msp_dai_startup,
+ .shutdown = ux500_msp_dai_shutdown,
+ .prepare = ux500_msp_dai_prepare,
+ .trigger = ux500_msp_dai_trigger,
+ .hw_params = ux500_msp_dai_hw_params,
+ }
+ },
+ },
+};
+EXPORT_SYMBOL(ux500_msp_dai_drv);
+
+static int ux500_msp_drv_probe(struct platform_device *pdev)
+{
+ struct ux500_msp_i2s_drvdata *msp_i2s_drvdata;
+ struct ux500_platform_drvdata *drvdata;
+ struct msp_i2s_platform_data *platform_data;
+ int id;
+ int ret = 0;
+
+ pr_err("%s: Enter (pdev->name = %s).\n", __func__, pdev->name);
+
+ platform_data = (struct msp_i2s_platform_data *)pdev->dev.platform_data;
+ msp_i2s_drvdata = ux500_msp_i2s_init(pdev, platform_data);
+ if (!msp_i2s_drvdata) {
+ pr_err("%s: ERROR: ux500_msp_i2s_init failed!", __func__);
+ return -EINVAL;
+ }
+
+ id = msp_i2s_drvdata->id;
+ drvdata = &platform_drvdata[id];
+ drvdata->msp_i2s_drvdata = msp_i2s_drvdata;
+
+ pr_info("%s: Registering ux500-msp-dai SoC CPU-DAI.\n", __func__);
+ ret = snd_soc_register_dai(&pdev->dev, &ux500_msp_dai_drv[id]);
+ if (ret < 0) {
+ pr_err("Error: %s: Failed to register MSP %d.\n", __func__, id);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ux500_msp_drv_remove(struct platform_device *pdev)
+{
+ struct ux500_msp_i2s_drvdata *msp_i2s_drvdata = dev_get_drvdata(&pdev->dev);
+ struct ux500_platform_drvdata *drvdata = &platform_drvdata[msp_i2s_drvdata->id];
+
+ pr_info("%s: Unregister ux500-msp-dai ASoC CPU-DAI.\n", __func__);
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(ux500_msp_dai_drv));
+
+ ux500_msp_i2s_exit(msp_i2s_drvdata);
+ drvdata->msp_i2s_drvdata = NULL;
+
+ return 0;
+}
+
+int ux500_msp_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ux500_msp_i2s_drvdata *msp_i2s_drvdata = dev_get_drvdata(&pdev->dev);
+
+ pr_debug("%s: Enter (pdev->name = %s).\n", __func__, pdev->name);
+
+ return ux500_msp_i2s_suspend(msp_i2s_drvdata);
+}
+
+int ux500_msp_drv_resume(struct platform_device *pdev)
+{
+ struct ux500_msp_i2s_drvdata *msp_i2s_drvdata = dev_get_drvdata(&pdev->dev);
+
+ pr_debug("%s: Enter (pdev->name = %s).\n", __func__, pdev->name);
+
+ return ux500_msp_i2s_resume(msp_i2s_drvdata);
+}
+
+static struct platform_driver msp_i2s_driver = {
+ .driver = {
+ .name = "ux500-msp-i2s",
+ .owner = THIS_MODULE,
+ },
+ .probe = ux500_msp_drv_probe,
+ .remove = ux500_msp_drv_remove,
+ .suspend = ux500_msp_drv_suspend,
+ .resume = ux500_msp_drv_resume,
+};
+
+static int __init ux500_msp_init(void)
+{
+ pr_info("%s: Register ux500-msp-dai platform driver.\n", __func__);
+ return platform_driver_register(&msp_i2s_driver);
+}
+
+static void __exit ux500_msp_exit(void)
+{
+ pr_info("%s: Unregister ux500-msp-dai platform driver.\n", __func__);
+ platform_driver_unregister(&msp_i2s_driver);
+}
+
+module_init(ux500_msp_init);
+module_exit(ux500_msp_exit);
+
+MODULE_LICENSE("GPLv2");
diff --git a/sound/soc/ux500/ux500_msp_dai.h b/sound/soc/ux500/ux500_msp_dai.h
new file mode 100644
index 00000000000..c44894526f2
--- /dev/null
+++ b/sound/soc/ux500/ux500_msp_dai.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef UX500_msp_dai_H
+#define UX500_msp_dai_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <mach/msp.h>
+
+#define UX500_NBR_OF_DAI 4
+
+#define UX500_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define UX500_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+
+#define FRAME_PER_SINGLE_SLOT_8_KHZ 31
+#define FRAME_PER_SINGLE_SLOT_16_KHZ 124
+#define FRAME_PER_SINGLE_SLOT_44_1_KHZ 63
+#define FRAME_PER_SINGLE_SLOT_48_KHZ 49
+#define FRAME_PER_2_SLOTS 31
+#define FRAME_PER_8_SLOTS 138
+#define FRAME_PER_16_SLOTS 277
+
+#ifndef CONFIG_SND_SOC_UX500_AB5500
+#define UX500_MSP_INTERNAL_CLOCK_FREQ 40000000
+#define UX500_MSP1_INTERNAL_CLOCK_FREQ UX500_MSP_INTERNAL_CLOCK_FREQ
+#else
+#define UX500_MSP_INTERNAL_CLOCK_FREQ 13000000
+#define UX500_MSP1_INTERNAL_CLOCK_FREQ (UX500_MSP_INTERNAL_CLOCK_FREQ * 2)
+#endif
+
+#define UX500_MSP_MIN_CHANNELS 1
+#define UX500_MSP_MAX_CHANNELS 8
+
+#define PLAYBACK_CONFIGURED 1
+#define CAPTURE_CONFIGURED 2
+
+enum ux500_msp_clock_id {
+ UX500_MSP_MASTER_CLOCK,
+};
+
+struct ux500_platform_drvdata {
+ struct ux500_msp_i2s_drvdata *msp_i2s_drvdata;
+ unsigned int fmt;
+ unsigned int tx_mask;
+ unsigned int rx_mask;
+ int slots;
+ int slot_width;
+ bool playback_active;
+ bool capture_active;
+ u8 configured;
+ int data_delay;
+ unsigned int master_clk;
+};
+
+extern struct snd_soc_dai ux500_msp_dai[UX500_NBR_OF_DAI];
+
+bool ux500_msp_dai_i2s_get_underrun_status(int dai_idx);
+dma_addr_t ux500_msp_dai_i2s_get_pointer(int dai_idx, int stream_id);
+int ux500_msp_dai_i2s_configure_sg(dma_addr_t dma_addr,
+ int perod_cnt,
+ size_t period_len,
+ int dai_idx,
+ int stream_id);
+int ux500_msp_dai_i2s_send_data(void *data, size_t bytes, int dai_idx);
+int ux500_msp_dai_i2s_receive_data(void *data, size_t bytes, int dai_idx);
+
+int ux500_msp_dai_set_data_delay(struct snd_soc_dai *dai, int delay);
+
+#endif
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
new file mode 100644
index 00000000000..6df08c5d4d3
--- /dev/null
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Sandeep Kaushik <sandeep.kaushik@st.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <mach/hardware.h>
+#include <mach/msp.h>
+
+#include "ux500_msp_i2s.h"
+
+ /* Protocol desciptors */
+static const struct msp_protocol_desc prot_descs[] = {
+ I2S_PROTOCOL_DESC,
+ PCM_PROTOCOL_DESC,
+ PCM_COMPAND_PROTOCOL_DESC,
+ AC97_PROTOCOL_DESC,
+ SPI_MASTER_PROTOCOL_DESC,
+ SPI_SLAVE_PROTOCOL_DESC,
+};
+
+static void ux500_msp_i2s_set_prot_desc_tx(struct msp *msp,
+ struct msp_protocol_desc *protocol_desc,
+ enum msp_data_size data_size)
+{
+ u32 temp_reg = 0;
+
+ temp_reg |= MSP_P2_ENABLE_BIT(protocol_desc->tx_phase_mode);
+ temp_reg |= MSP_P2_START_MODE_BIT(protocol_desc->tx_phase2_start_mode);
+ temp_reg |= MSP_P1_FRAME_LEN_BITS(protocol_desc->tx_frame_length_1);
+ temp_reg |= MSP_P2_FRAME_LEN_BITS(protocol_desc->tx_frame_length_2);
+ if (msp->def_elem_len) {
+ temp_reg |= MSP_P1_ELEM_LEN_BITS(protocol_desc->tx_element_length_1);
+ temp_reg |= MSP_P2_ELEM_LEN_BITS(protocol_desc->tx_element_length_2);
+ if (protocol_desc->tx_element_length_1 ==
+ protocol_desc->tx_element_length_2) {
+ msp->actual_data_size = protocol_desc->tx_element_length_1;
+ } else {
+ msp->actual_data_size = data_size;
+ }
+ } else {
+ temp_reg |= MSP_P1_ELEM_LEN_BITS(data_size);
+ temp_reg |= MSP_P2_ELEM_LEN_BITS(data_size);
+ msp->actual_data_size = data_size;
+ }
+ temp_reg |= MSP_DATA_DELAY_BITS(protocol_desc->tx_data_delay);
+ temp_reg |= MSP_SET_ENDIANNES_BIT(protocol_desc->tx_bit_transfer_format);
+ temp_reg |= MSP_FRAME_SYNC_POL(protocol_desc->tx_frame_sync_pol);
+ temp_reg |= MSP_DATA_WORD_SWAP(protocol_desc->tx_half_word_swap);
+ temp_reg |= MSP_SET_COMPANDING_MODE(protocol_desc->compression_mode);
+ temp_reg |= MSP_SET_FRAME_SYNC_IGNORE(protocol_desc->frame_sync_ignore);
+
+ writel(temp_reg, msp->registers + MSP_TCF);
+}
+
+static void ux500_msp_i2s_set_prot_desc_rx(struct msp *msp,
+ struct msp_protocol_desc *protocol_desc,
+ enum msp_data_size data_size)
+{
+ u32 temp_reg = 0;
+
+ temp_reg |= MSP_P2_ENABLE_BIT(protocol_desc->rx_phase_mode);
+ temp_reg |= MSP_P2_START_MODE_BIT(protocol_desc->rx_phase2_start_mode);
+ temp_reg |= MSP_P1_FRAME_LEN_BITS(protocol_desc->rx_frame_length_1);
+ temp_reg |= MSP_P2_FRAME_LEN_BITS(protocol_desc->rx_frame_length_2);
+ if (msp->def_elem_len) {
+ temp_reg |= MSP_P1_ELEM_LEN_BITS(protocol_desc->rx_element_length_1);
+ temp_reg |= MSP_P2_ELEM_LEN_BITS(protocol_desc->rx_element_length_2);
+ if (protocol_desc->rx_element_length_1 ==
+ protocol_desc->rx_element_length_2) {
+ msp->actual_data_size = protocol_desc->rx_element_length_1;
+ } else {
+ msp->actual_data_size = data_size;
+ }
+ } else {
+ temp_reg |= MSP_P1_ELEM_LEN_BITS(data_size);
+ temp_reg |= MSP_P2_ELEM_LEN_BITS(data_size);
+ msp->actual_data_size = data_size;
+ }
+
+ temp_reg |= MSP_DATA_DELAY_BITS(protocol_desc->rx_data_delay);
+ temp_reg |= MSP_SET_ENDIANNES_BIT(protocol_desc->rx_bit_transfer_format);
+ temp_reg |= MSP_FRAME_SYNC_POL(protocol_desc->rx_frame_sync_pol);
+ temp_reg |= MSP_DATA_WORD_SWAP(protocol_desc->rx_half_word_swap);
+ temp_reg |= MSP_SET_COMPANDING_MODE(protocol_desc->expansion_mode);
+ temp_reg |= MSP_SET_FRAME_SYNC_IGNORE(protocol_desc->frame_sync_ignore);
+
+ writel(temp_reg, msp->registers + MSP_RCF);
+
+}
+
+static int ux500_msp_i2s_configure_protocol(struct msp *msp,
+ struct msp_config *config)
+{
+ int direction;
+ struct msp_protocol_desc *protocol_desc;
+ enum msp_data_size data_size;
+ u32 temp_reg = 0;
+
+ data_size = config->data_size;
+ msp->def_elem_len = config->def_elem_len;
+ direction = config->direction;
+ if (config->default_protocol_desc == 1) {
+ if (config->protocol >= MSP_INVALID_PROTOCOL) {
+ pr_err("%s: ERROR: Invalid protocol!\n", __func__);
+ return -EINVAL;
+ }
+ protocol_desc =
+ (struct msp_protocol_desc *)&prot_descs[config->protocol];
+ } else {
+ protocol_desc = (struct msp_protocol_desc *)&config->protocol_desc;
+ }
+
+ if (data_size < MSP_DATA_BITS_DEFAULT || data_size > MSP_DATA_BITS_32) {
+ pr_err("%s: ERROR: Invalid data-size requested (data_size = %d)!\n",
+ __func__, data_size);
+ return -EINVAL;
+ }
+
+ switch (direction) {
+ case MSP_TRANSMIT_MODE:
+ ux500_msp_i2s_set_prot_desc_tx(msp, protocol_desc, data_size);
+ break;
+ case MSP_RECEIVE_MODE:
+ ux500_msp_i2s_set_prot_desc_rx(msp, protocol_desc, data_size);
+ break;
+ case MSP_BOTH_T_R_MODE:
+ ux500_msp_i2s_set_prot_desc_tx(msp, protocol_desc, data_size);
+ ux500_msp_i2s_set_prot_desc_rx(msp, protocol_desc, data_size);
+ break;
+ default:
+ pr_err("%s: ERROR: Invalid direction requested (direction = %d)!\n",
+ __func__, direction);
+ return -EINVAL;
+ }
+
+ /* The below code is needed for both Rx and Tx path. Can't separate them. */
+ temp_reg = readl(msp->registers + MSP_GCR) & ~TX_CLK_POL_RISING;
+ temp_reg |= MSP_TX_CLKPOL_BIT(~protocol_desc->tx_clock_pol);
+ writel(temp_reg, msp->registers + MSP_GCR);
+ temp_reg = readl(msp->registers + MSP_GCR) & ~RX_CLK_POL_RISING;
+ temp_reg |= MSP_RX_CLKPOL_BIT(protocol_desc->rx_clock_pol);
+ writel(temp_reg, msp->registers + MSP_GCR);
+
+ return 0;
+}
+
+static int ux500_msp_i2s_configure_clock(struct msp *msp, struct msp_config *config)
+{
+ u32 reg_val_GCR;
+ u32 frame_per = 0;
+ u32 sck_div = 0;
+ u32 frame_width = 0;
+ u32 temp_reg = 0;
+ u32 bit_clock = 0;
+ struct msp_protocol_desc *protocol_desc = NULL;
+
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR & ~SRG_ENABLE, msp->registers + MSP_GCR);
+
+ if (config->default_protocol_desc)
+ protocol_desc =
+ (struct msp_protocol_desc *)&prot_descs[config->protocol];
+ else
+ protocol_desc = (struct msp_protocol_desc *)&config->protocol_desc;
+
+ switch (config->protocol) {
+ case MSP_PCM_PROTOCOL:
+ case MSP_PCM_COMPAND_PROTOCOL:
+ frame_width = protocol_desc->frame_width;
+ sck_div = config->input_clock_freq / (config->frame_freq *
+ (protocol_desc->total_clocks_for_one_frame));
+ frame_per = protocol_desc->frame_period;
+ break;
+ case MSP_I2S_PROTOCOL:
+ frame_width = protocol_desc->frame_width;
+ sck_div = config->input_clock_freq / (config->frame_freq *
+ (protocol_desc->total_clocks_for_one_frame));
+ frame_per = protocol_desc->frame_period;
+ break;
+ case MSP_AC97_PROTOCOL:
+ /* Not supported */
+ pr_err("%s: ERROR: AC97 protocol not supported!\n", __func__);
+ return -ENOSYS;
+ default:
+ pr_err("%s: ERROR: Unknown protocol (%d)!\n",
+ __func__,
+ config->protocol);
+ return -EINVAL;
+ }
+
+ temp_reg = (sck_div - 1) & SCK_DIV_MASK;
+ temp_reg |= FRAME_WIDTH_BITS(frame_width);
+ temp_reg |= FRAME_PERIOD_BITS(frame_per);
+ writel(temp_reg, msp->registers + MSP_SRG);
+
+ bit_clock = (config->input_clock_freq)/(sck_div + 1);
+ /* If the bit clock is higher than 19.2MHz, Vape should be run in 100% OPP
+ * Only consider OPP 100% when bit-clock is used, i.e. MSP master mode
+ */
+ if ((bit_clock > 19200000) && ((config->tx_clock_sel != 0) || (config->rx_clock_sel != 0))) {
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "ux500-msp-i2s", 100);
+ msp->vape_opp_constraint = 1;
+ } else {
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "ux500-msp-i2s", 50);
+ msp->vape_opp_constraint = 0;
+ }
+
+ /* Enable clock */
+ udelay(100);
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR | SRG_ENABLE, msp->registers + MSP_GCR);
+ udelay(100);
+
+ return 0;
+}
+
+static int ux500_msp_i2s_configure_multichannel(struct msp *msp, struct msp_config *config)
+{
+ struct msp_protocol_desc *protocol_desc;
+ struct msp_multichannel_config *mcfg;
+ u32 reg_val_MCR;
+
+ if (config->default_protocol_desc == 1) {
+ if (config->protocol >= MSP_INVALID_PROTOCOL) {
+ pr_err("%s: ERROR: Invalid protocol (%d)!\n",
+ __func__,
+ config->protocol);
+ return -EINVAL;
+ }
+ protocol_desc = (struct msp_protocol_desc *)
+ &prot_descs[config->protocol];
+ } else {
+ protocol_desc = (struct msp_protocol_desc *)&config->protocol_desc;
+ }
+
+ mcfg = &config->multichannel_config;
+ if (mcfg->tx_multichannel_enable) {
+ if (protocol_desc->tx_phase_mode == MSP_SINGLE_PHASE) {
+ reg_val_MCR = readl(msp->registers + MSP_MCR);
+ writel(reg_val_MCR |
+ (mcfg->tx_multichannel_enable ? 1 << TMCEN_BIT : 0),
+ msp->registers + MSP_MCR);
+ writel(mcfg->tx_channel_0_enable,
+ msp->registers + MSP_TCE0);
+ writel(mcfg->tx_channel_1_enable,
+ msp->registers + MSP_TCE1);
+ writel(mcfg->tx_channel_2_enable,
+ msp->registers + MSP_TCE2);
+ writel(mcfg->tx_channel_3_enable,
+ msp->registers + MSP_TCE3);
+ } else {
+ pr_err("%s: ERROR: Only single-phase supported (TX-mode: %d)!\n",
+ __func__, protocol_desc->tx_phase_mode);
+ return -EINVAL;
+ }
+ }
+ if (mcfg->rx_multichannel_enable) {
+ if (protocol_desc->rx_phase_mode == MSP_SINGLE_PHASE) {
+ reg_val_MCR = readl(msp->registers + MSP_MCR);
+ writel(reg_val_MCR |
+ (mcfg->rx_multichannel_enable ? 1 << RMCEN_BIT : 0),
+ msp->registers + MSP_MCR);
+ writel(mcfg->rx_channel_0_enable,
+ msp->registers + MSP_RCE0);
+ writel(mcfg->rx_channel_1_enable,
+ msp->registers + MSP_RCE1);
+ writel(mcfg->rx_channel_2_enable,
+ msp->registers + MSP_RCE2);
+ writel(mcfg->rx_channel_3_enable,
+ msp->registers + MSP_RCE3);
+ } else {
+ pr_err("%s: ERROR: Only single-phase supported (RX-mode: %d)!\n",
+ __func__, protocol_desc->rx_phase_mode);
+ return -EINVAL;
+ }
+ if (mcfg->rx_comparison_enable_mode) {
+ reg_val_MCR = readl(msp->registers + MSP_MCR);
+ writel(reg_val_MCR |
+ (mcfg->rx_comparison_enable_mode << RCMPM_BIT),
+ msp->registers + MSP_MCR);
+
+ writel(mcfg->comparison_mask,
+ msp->registers + MSP_RCM);
+ writel(mcfg->comparison_value,
+ msp->registers + MSP_RCV);
+
+ }
+ }
+
+ return 0;
+}
+
+void ux500_msp_i2s_configure_dma(struct msp *msp, struct msp_config *config)
+{
+ struct stedma40_chan_cfg *rx_dma_info = msp->dma_cfg_rx;
+ struct stedma40_chan_cfg *tx_dma_info = msp->dma_cfg_tx;
+ dma_cap_mask_t mask;
+ u16 word_width;
+ bool rx_active, tx_active;
+
+ if (msp->tx_pipeid != NULL) {
+ dma_release_channel(msp->tx_pipeid);
+ msp->tx_pipeid = NULL;
+ }
+
+ if (msp->rx_pipeid !=NULL) {
+ dma_release_channel(msp->rx_pipeid);
+ msp->rx_pipeid = NULL;
+ }
+
+ switch (config->data_size) {
+ case MSP_DATA_BITS_32:
+ word_width = STEDMA40_WORD_WIDTH;
+ break;
+ case MSP_DATA_BITS_16:
+ word_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case MSP_DATA_BITS_8:
+ word_width = STEDMA40_BYTE_WIDTH;
+ break;
+ default:
+ word_width = STEDMA40_WORD_WIDTH;
+ pr_warn("%s: Unknown data-size (%d)! Assuming 32 bits.\n",
+ __func__, config->data_size);
+ }
+
+ rx_active = (config->direction == MSP_RECEIVE_MODE ||
+ config->direction == MSP_BOTH_T_R_MODE);
+ tx_active = (config->direction == MSP_TRANSMIT_MODE ||
+ config->direction == MSP_BOTH_T_R_MODE);
+
+ if (rx_active) {
+ rx_dma_info->src_info.data_width = word_width;
+ rx_dma_info->dst_info.data_width = word_width;
+ }
+ if (tx_active) {
+ tx_dma_info->src_info.data_width = word_width;
+ tx_dma_info->dst_info.data_width = word_width;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ if (rx_active)
+ msp->rx_pipeid = dma_request_channel(mask, stedma40_filter, rx_dma_info);
+
+ if (tx_active)
+ msp->tx_pipeid = dma_request_channel(mask, stedma40_filter, tx_dma_info);
+}
+
+static int ux500_msp_i2s_dma_xfer(struct msp *msp, struct i2s_message *msg)
+{
+ dma_cookie_t status_submit;
+ int direction, enable_bit;
+ u32 reg_val_GCR;
+ struct dma_chan *pipeid;
+ struct dma_async_tx_descriptor *cdesc;
+
+ if (msg->i2s_direction == I2S_DIRECTION_TX) {
+ direction = DMA_TO_DEVICE;
+ pipeid = msp->tx_pipeid;
+ enable_bit = TX_ENABLE;
+ pr_debug("%s: Direction: TX\n", __func__);
+ } else {
+ direction = DMA_FROM_DEVICE;
+ pipeid = msp->rx_pipeid;
+ enable_bit = RX_ENABLE;
+ pr_debug("%s: Direction: RX\n", __func__);
+ }
+
+ pr_debug("%s: msg->buf_addr = %p\n", __func__, (void *)msg->buf_addr);
+ pr_debug("%s: buf_len = %d\n", __func__, msg->buf_len);
+ pr_debug("%s: perios_len = %d\n", __func__, msg->period_len);
+
+ /* setup the cyclic description */
+ cdesc = pipeid->device->device_prep_dma_cyclic(pipeid,
+ msg->buf_addr,
+ msg->buf_len,
+ msg->period_len,
+ direction);
+ if (IS_ERR(cdesc)) {
+ pr_err("%s: ERROR: device_prep_dma_cyclic failed (%ld)!\n",
+ __func__,
+ PTR_ERR(cdesc));
+ return -EINVAL;
+ }
+
+ /* Submit to the dma */
+ if (msg->i2s_direction == I2S_DIRECTION_TX) {
+ cdesc->callback = msp->xfer_data.tx_handler;
+ cdesc->callback_param = msp->xfer_data.tx_callback_data;
+ } else {
+ cdesc->callback = msp->xfer_data.rx_handler;
+ cdesc->callback_param = msp->xfer_data.rx_callback_data;
+ }
+ status_submit = dmaengine_submit(cdesc);
+ if (dma_submit_error(status_submit)) {
+ pr_err("%s: ERROR: dmaengine_submit failed!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Start the dma */
+ dma_async_issue_pending(pipeid);
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR | enable_bit, msp->registers + MSP_GCR);
+
+ return 0;
+}
+
+static int ux500_msp_i2s_enable(struct msp *msp, struct msp_config *config)
+{
+ int status = 0;
+ u32 reg_val_DMACR, reg_val_GCR;
+
+ if (config->work_mode != MSP_DMA_MODE) {
+ pr_err("%s: ERROR: Only DMA-mode is supported (msp->work_mode = %d)\n",
+ __func__,
+ msp->work_mode);
+ return -EINVAL;
+ }
+ msp->work_mode = config->work_mode;
+
+ /* Check msp state whether in RUN or CONFIGURED Mode */
+ if (msp->msp_state == MSP_STATE_IDLE) {
+ if (msp->plat_init) {
+ status = msp->plat_init();
+ if (status) {
+ pr_err("%s: ERROR: Failed to init MSP (%d)!\n",
+ __func__,
+ status);
+ return status;
+ }
+ }
+ }
+
+ /* Configure msp with protocol dependent settings */
+ ux500_msp_i2s_configure_protocol(msp, config);
+ ux500_msp_i2s_configure_clock(msp, config);
+ if (config->multichannel_configured == 1) {
+ status = ux500_msp_i2s_configure_multichannel(msp, config);
+ if (status)
+ pr_warn("%s: WARN: ux500_msp_i2s_configure_multichannel failed (%d)!\n",
+ __func__, status);
+ }
+
+ /* Make sure the correct DMA-directions are configured */
+ if ((config->direction == MSP_RECEIVE_MODE) ||
+ (config->direction == MSP_BOTH_T_R_MODE))
+ if (!msp->dma_cfg_rx) {
+ pr_err("%s: ERROR: MSP RX-mode is not configured!", __func__);
+ return -EINVAL;
+ }
+ if ((config->direction == MSP_TRANSMIT_MODE) ||
+ (config->direction == MSP_BOTH_T_R_MODE))
+ if (!msp->dma_cfg_tx) {
+ pr_err("%s: ERROR: MSP TX-mode is not configured!", __func__);
+ return -EINVAL;
+ }
+
+ reg_val_DMACR = readl(msp->registers + MSP_DMACR);
+ switch (config->direction) {
+ case MSP_TRANSMIT_MODE:
+ writel(reg_val_DMACR | TX_DMA_ENABLE,
+ msp->registers + MSP_DMACR);
+
+ msp->xfer_data.tx_handler = config->handler;
+ msp->xfer_data.tx_callback_data = config->tx_callback_data;
+
+ break;
+ case MSP_RECEIVE_MODE:
+ writel(reg_val_DMACR | RX_DMA_ENABLE,
+ msp->registers + MSP_DMACR);
+
+ msp->xfer_data.rx_handler = config->handler;
+ msp->xfer_data.rx_callback_data = config->rx_callback_data;
+
+ break;
+ case MSP_BOTH_T_R_MODE:
+ writel(reg_val_DMACR | RX_DMA_ENABLE | TX_DMA_ENABLE,
+ msp->registers + MSP_DMACR);
+
+ msp->xfer_data.tx_handler = config->handler;
+ msp->xfer_data.rx_handler = config->handler;
+ msp->xfer_data.tx_callback_data = config->tx_callback_data;
+ msp->xfer_data.rx_callback_data = config->rx_callback_data;
+
+ break;
+ default:
+ pr_err("%s: ERROR: Illegal MSP direction (config->direction = %d)!",
+ __func__,
+ config->direction);
+ if (msp->plat_exit)
+ msp->plat_exit();
+ return -EINVAL;
+ }
+ ux500_msp_i2s_configure_dma(msp, config);
+
+ msp->transfer = ux500_msp_i2s_dma_xfer;
+
+ writel(config->iodelay, msp->registers + MSP_IODLY);
+
+ /* Enable frame generation logic */
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR | FRAME_GEN_ENABLE, msp->registers + MSP_GCR);
+
+ return status;
+}
+
+static void flush_fifo_rx(struct msp *msp)
+{
+ u32 reg_val_DR, reg_val_GCR, reg_val_FLR;
+ u32 limit = 32;
+
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR | RX_ENABLE, msp->registers + MSP_GCR);
+
+ reg_val_FLR = readl(msp->registers + MSP_FLR);
+ while (!(reg_val_FLR & RX_FIFO_EMPTY) && limit--) {
+ reg_val_DR = readl(msp->registers + MSP_DR);
+ reg_val_FLR = readl(msp->registers + MSP_FLR);
+ }
+
+ writel(reg_val_GCR, msp->registers + MSP_GCR);
+}
+
+static void flush_fifo_tx(struct msp *msp)
+{
+ u32 reg_val_TSTDR, reg_val_GCR, reg_val_FLR;
+ u32 limit = 32;
+
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR | TX_ENABLE, msp->registers + MSP_GCR);
+ writel(MSP_ITCR_ITEN | MSP_ITCR_TESTFIFO, msp->registers + MSP_ITCR);
+
+ reg_val_FLR = readl(msp->registers + MSP_FLR);
+ while (!(reg_val_FLR & TX_FIFO_EMPTY) && limit--) {
+ reg_val_TSTDR = readl(msp->registers + MSP_TSTDR);
+ reg_val_FLR = readl(msp->registers + MSP_FLR);
+ }
+ writel(0x0, msp->registers + MSP_ITCR);
+ writel(reg_val_GCR, msp->registers + MSP_GCR);
+}
+
+int ux500_msp_i2s_open(struct ux500_msp_i2s_drvdata *drvdata, struct msp_config *msp_config)
+{
+ struct msp *msp = drvdata->msp;
+ u32 old_reg, new_reg, mask;
+ int res;
+
+ if (in_interrupt()) {
+ pr_err("%s: ERROR: Open called in interrupt context!\n", __func__);
+ return -1;
+ }
+
+ /* Two simultanous configuring msp is avoidable */
+ down(&msp->lock);
+
+ /* Don't enable regulator if its msp1 or msp3 */
+ if (!(msp->reg_enabled) && msp->id != MSP_1_I2S_CONTROLLER
+ && msp->id != MSP_3_I2S_CONTROLLER) {
+ res = regulator_enable(drvdata->reg_vape);
+ if (res != 0) {
+ pr_err("%s: Failed to enable regulator!\n", __func__);
+ up(&msp->lock);
+ return res;
+ }
+ msp->reg_enabled = 1;
+ }
+
+ switch (msp->users) {
+ case 0:
+ clk_enable(msp->clk);
+ msp->direction = msp_config->direction;
+ break;
+ case 1:
+ if (msp->direction == MSP_BOTH_T_R_MODE ||
+ msp_config->direction == msp->direction ||
+ msp_config->direction == MSP_BOTH_T_R_MODE) {
+ pr_warn("%s: WARN: MSP is in use (direction = %d)!\n",
+ __func__, msp_config->direction);
+ up(&msp->lock);
+ return -EBUSY;
+ }
+ msp->direction = MSP_BOTH_T_R_MODE;
+ break;
+ default:
+ pr_warn("%s: MSP in use in (both directions)!\n", __func__);
+ up(&msp->lock);
+ return -EBUSY;
+ }
+ msp->users++;
+
+ /* First do the global config register */
+ mask =
+ RX_CLK_SEL_MASK | TX_CLK_SEL_MASK | RX_FRAME_SYNC_MASK |
+ TX_FRAME_SYNC_MASK | RX_SYNC_SEL_MASK | TX_SYNC_SEL_MASK |
+ RX_FIFO_ENABLE_MASK | TX_FIFO_ENABLE_MASK | SRG_CLK_SEL_MASK |
+ LOOPBACK_MASK | TX_EXTRA_DELAY_MASK;
+
+ new_reg = (msp_config->tx_clock_sel | msp_config->rx_clock_sel |
+ msp_config->rx_frame_sync_pol | msp_config->tx_frame_sync_pol |
+ msp_config->rx_frame_sync_sel | msp_config->tx_frame_sync_sel |
+ msp_config->rx_fifo_config | msp_config->tx_fifo_config |
+ msp_config->srg_clock_sel | msp_config->loopback_enable |
+ msp_config->tx_data_enable);
+
+ old_reg = readl(msp->registers + MSP_GCR);
+ old_reg &= ~mask;
+ new_reg |= old_reg;
+ writel(new_reg, msp->registers + MSP_GCR);
+
+ if (ux500_msp_i2s_enable(msp, msp_config) != 0) {
+ pr_err("%s: ERROR: ux500_msp_i2s_enable failed!\n", __func__);
+ return -EBUSY;
+ }
+ if (msp_config->loopback_enable & 0x80)
+ msp->loopback_enable = 1;
+
+ /* Flush FIFOs */
+ flush_fifo_tx(msp);
+ flush_fifo_rx(msp);
+
+ msp->msp_state = MSP_STATE_CONFIGURED;
+ up(&msp->lock);
+ return 0;
+}
+
+static void func_notify_timer(unsigned long data)
+{
+ struct msp *msp = (struct msp *)data;
+ if (msp->polling_flag) {
+ msp->msp_io_error = 1;
+ pr_err("%s: ERROR: Polling timeout!\n", __func__);
+ del_timer(&msp->notify_timer);
+ }
+}
+
+int ux500_msp_i2s_transfer(struct ux500_msp_i2s_drvdata *drvdata, struct i2s_message *message)
+{
+ struct msp *msp = drvdata->msp;
+ int status = 0;
+
+ if (!message || (msp->msp_state == MSP_STATE_IDLE)) {
+ pr_err("%s: ERROR: i2s_message == NULL!\n", __func__);
+ return -EINVAL;
+ }
+ if (msp->msp_state == MSP_STATE_IDLE) {
+ pr_err("%s: ERROR: MSP in idle-state!\n", __func__);
+ return -EPERM;
+ }
+
+ msp->msp_state = MSP_STATE_RUN;
+ if (msp->transfer)
+ status = msp->transfer(msp, message);
+
+ if (msp->msp_state == MSP_STATE_RUN)
+ msp->msp_state = MSP_STATE_CONFIGURED;
+
+ return status;
+}
+
+static void ux500_msp_i2s_disable_rx(struct msp *msp)
+{
+ u32 reg_val_GCR, reg_val_DMACR, reg_val_IMSC;
+
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR & ~RX_ENABLE, msp->registers + MSP_GCR);
+ reg_val_DMACR = readl(msp->registers + MSP_DMACR);
+ writel(reg_val_DMACR & ~RX_DMA_ENABLE, msp->registers + MSP_DMACR);
+ reg_val_IMSC = readl(msp->registers + MSP_IMSC);
+ writel(reg_val_IMSC &
+ ~(RECEIVE_SERVICE_INT | RECEIVE_OVERRUN_ERROR_INT),
+ msp->registers + MSP_IMSC);
+ msp->xfer_data.message.rxbytes = 0;
+ msp->xfer_data.message.rx_offset = 0;
+ msp->xfer_data.message.rxdata = NULL;
+ msp->read = NULL;
+}
+
+static void ux500_msp_i2s_disable_tx(struct msp *msp)
+{
+ u32 reg_val_GCR, reg_val_DMACR, reg_val_IMSC;
+
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR & ~TX_ENABLE, msp->registers + MSP_GCR);
+ reg_val_DMACR = readl(msp->registers + MSP_DMACR);
+ writel(reg_val_DMACR & ~TX_DMA_ENABLE, msp->registers + MSP_DMACR);
+ reg_val_IMSC = readl(msp->registers + MSP_IMSC);
+ writel(reg_val_IMSC &
+ ~(TRANSMIT_SERVICE_INT | TRANSMIT_UNDERRUN_ERR_INT),
+ msp->registers + MSP_IMSC);
+ msp->xfer_data.message.txbytes = 0;
+ msp->xfer_data.message.tx_offset = 0;
+ msp->xfer_data.message.txdata = NULL;
+ msp->write = NULL;
+}
+
+static int ux500_msp_i2s_disable(struct msp *msp, int direction, enum i2s_flag flag)
+{
+ u32 reg_val_GCR;
+ int status = 0;
+
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ if (!(reg_val_GCR & (TX_ENABLE | RX_ENABLE)))
+ return 0;
+
+ if (flag == DISABLE_ALL || flag == DISABLE_TRANSMIT) {
+ if (msp->tx_pipeid != NULL) {
+ dmaengine_terminate_all(msp->tx_pipeid);
+ dma_release_channel(msp->tx_pipeid);
+ msp->tx_pipeid = NULL;
+ }
+ }
+ if ((flag == DISABLE_ALL || flag == DISABLE_RECEIVE)) {
+ if (msp->rx_pipeid != NULL) {
+ dmaengine_terminate_all(msp->rx_pipeid);
+ dma_release_channel(msp->rx_pipeid);
+ msp->rx_pipeid = NULL;
+ }
+ }
+
+ if (flag == DISABLE_TRANSMIT)
+ ux500_msp_i2s_disable_tx(msp);
+ else if (flag == DISABLE_RECEIVE)
+ ux500_msp_i2s_disable_rx(msp);
+ else {
+ reg_val_GCR = readl(msp->registers + MSP_GCR);
+ writel(reg_val_GCR | LOOPBACK_MASK,
+ msp->registers + MSP_GCR);
+
+ /* Flush TX-FIFO */
+ flush_fifo_tx(msp);
+
+ /* Disable TX-channel */
+ writel((readl(msp->registers + MSP_GCR) &
+ (~TX_ENABLE)), msp->registers + MSP_GCR);
+
+ /* Flush RX-FIFO */
+ flush_fifo_rx(msp);
+
+ /* Disable Loopback and Receive channel */
+ writel((readl(msp->registers + MSP_GCR) &
+ (~(RX_ENABLE | LOOPBACK_MASK))),
+ msp->registers + MSP_GCR);
+
+ ux500_msp_i2s_disable_tx(msp);
+ ux500_msp_i2s_disable_rx(msp);
+
+ }
+
+ /* disable sample rate and frame generators */
+ if (flag == DISABLE_ALL) {
+ msp->msp_state = MSP_STATE_IDLE;
+ writel((readl(msp->registers + MSP_GCR) &
+ (~(FRAME_GEN_ENABLE | SRG_ENABLE))),
+ msp->registers + MSP_GCR);
+ memset(&msp->xfer_data, 0, sizeof(struct trans_data));
+ if (msp->plat_exit)
+ status = msp->plat_exit();
+ if (status)
+ pr_warn("%s: WARN: ux500_msp_i2s_exit failed (%d)!\n",
+ __func__, status);
+ msp->transfer = NULL;
+ writel(0, msp->registers + MSP_GCR);
+ writel(0, msp->registers + MSP_TCF);
+ writel(0, msp->registers + MSP_RCF);
+ writel(0, msp->registers + MSP_DMACR);
+ writel(0, msp->registers + MSP_SRG);
+ writel(0, msp->registers + MSP_MCR);
+ writel(0, msp->registers + MSP_RCM);
+ writel(0, msp->registers + MSP_RCV);
+ writel(0, msp->registers + MSP_TCE0);
+ writel(0, msp->registers + MSP_TCE1);
+ writel(0, msp->registers + MSP_TCE2);
+ writel(0, msp->registers + MSP_TCE3);
+ writel(0, msp->registers + MSP_RCE0);
+ writel(0, msp->registers + MSP_RCE1);
+ writel(0, msp->registers + MSP_RCE2);
+ writel(0, msp->registers + MSP_RCE3);
+ }
+
+ return status;
+}
+
+int ux500_msp_i2s_close(struct ux500_msp_i2s_drvdata *drvdata, enum i2s_flag flag)
+{
+ struct msp *msp = drvdata->msp;
+ int status = 0;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ down(&msp->lock);
+
+ if (msp->users == 0) {
+ pr_err("%s: ERROR: MSP already closed!\n", __func__);
+ status = -EINVAL;
+ goto end;
+ }
+ pr_debug("%s: msp->users = %d, flag = %d\n", __func__, msp->users, flag);
+
+ /* We need to call it twice for DISABLE_ALL*/
+ msp->users = flag == DISABLE_ALL ? 0 : msp->users - 1;
+ if (msp->users)
+ status = ux500_msp_i2s_disable(msp, MSP_BOTH_T_R_MODE, flag);
+ else {
+ status = ux500_msp_i2s_disable(msp, MSP_BOTH_T_R_MODE, DISABLE_ALL);
+ clk_disable(msp->clk);
+ if (msp->reg_enabled) {
+ status = regulator_disable(drvdata->reg_vape);
+ msp->reg_enabled = 0;
+ }
+ if (status != 0) {
+ pr_err("%s: ERROR: Failed to disable regulator (%d)!\n",
+ __func__, status);
+ clk_enable(msp->clk);
+ goto end;
+ }
+ }
+ if (status)
+ goto end;
+ if (msp->users)
+ msp->direction = flag == DISABLE_TRANSMIT ?
+ MSP_RECEIVE_MODE : MSP_TRANSMIT_MODE;
+
+ if (msp->vape_opp_constraint == 1) {
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s", 50);
+ msp->vape_opp_constraint = 0;
+ }
+end:
+ up(&msp->lock);
+ return status;
+
+}
+
+int ux500_msp_i2s_hw_status(struct ux500_msp_i2s_drvdata *drvdata)
+{
+ struct msp *msp = drvdata->msp;
+ int status;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ status = readl(msp->registers + MSP_RIS) & 0xee;
+ if (status)
+ writel(status, msp->registers + MSP_ICR);
+
+ return status;
+}
+
+dma_addr_t ux500_msp_i2s_get_pointer(struct ux500_msp_i2s_drvdata *drvdata,
+ enum i2s_direction_t i2s_direction)
+{
+ struct msp *msp = drvdata->msp;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ return (i2s_direction == I2S_DIRECTION_TX) ?
+ stedma40_get_src_addr(msp->tx_pipeid) :
+ stedma40_get_dst_addr(msp->rx_pipeid);
+}
+
+struct ux500_msp_i2s_drvdata *ux500_msp_i2s_init(struct platform_device *pdev,
+ struct msp_i2s_platform_data *platform_data)
+{
+ struct ux500_msp_i2s_drvdata *msp_i2s_drvdata;
+ int irq;
+ struct resource *res = NULL;
+ struct i2s_controller *i2s_cont;
+ struct msp *msp;
+
+ pr_debug("%s: Enter (pdev->name = %s).\n", __func__, pdev->name);
+
+ msp_i2s_drvdata = kzalloc(sizeof(struct ux500_msp_i2s_drvdata), GFP_KERNEL);
+ msp_i2s_drvdata->msp = kzalloc(sizeof(struct msp), GFP_KERNEL);
+ msp = msp_i2s_drvdata->msp;
+
+ msp->id = platform_data->id;
+ msp_i2s_drvdata->id = msp->id;
+ pr_debug("msp_i2s_drvdata->id = %d\n", msp_i2s_drvdata->id);
+
+ msp->plat_init = platform_data->msp_i2s_init;
+ msp->plat_exit = platform_data->msp_i2s_exit;
+ msp->dma_cfg_rx = platform_data->msp_i2s_dma_rx;
+ msp->dma_cfg_tx = platform_data->msp_i2s_dma_tx;
+
+ sema_init(&msp->lock, 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ pr_err("%s: ERROR: Unable to get resource!\n", __func__);
+ goto free_msp;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto free_msp;
+ msp->irq = irq;
+
+ msp->registers = ioremap(res->start, (res->end - res->start + 1));
+ if (msp->registers == NULL)
+ goto free_msp;
+
+ msp_i2s_drvdata->reg_vape = regulator_get(NULL, "v-ape");
+ if (IS_ERR(msp_i2s_drvdata->reg_vape)) {
+ pr_err("%s: ERROR: Failed to get Vape supply (%d)!\n",
+ __func__, (int)PTR_ERR(msp_i2s_drvdata->reg_vape));
+ goto free_irq;
+ }
+ dev_set_drvdata(&pdev->dev, msp_i2s_drvdata);
+
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)pdev->name, 50);
+ msp->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(msp->clk)) {
+ pr_err("%s: ERROR: clk_get failed (%d)!\n",
+ __func__, (int)PTR_ERR(msp->clk));
+ goto free_irq;
+ }
+
+ init_timer(&msp->notify_timer);
+ msp->notify_timer.expires = jiffies + msecs_to_jiffies(1000);
+ msp->notify_timer.function = func_notify_timer;
+ msp->notify_timer.data = (unsigned long)msp;
+
+ msp->rx_pipeid = NULL;
+ msp->tx_pipeid = NULL;
+ msp->read = NULL;
+ msp->write = NULL;
+ msp->transfer = NULL;
+ msp->msp_state = MSP_STATE_IDLE;
+ msp->loopback_enable = 0;
+
+ /* I2S Controller is allocated and added in I2S controller class. */
+ i2s_cont = kzalloc(sizeof(*i2s_cont), GFP_KERNEL);
+ if (!i2s_cont) {
+ pr_err("%s: ERROR: Failed to allocate struct i2s_cont (kzalloc)!\n",
+ __func__);
+ goto del_timer;
+ }
+ i2s_cont->dev.parent = &pdev->dev;
+ i2s_cont->data = (void *)msp;
+ i2s_cont->id = (s16)msp->id;
+ snprintf(i2s_cont->name,
+ sizeof(i2s_cont->name),
+ "ux500-msp-i2s.%04x",
+ msp->id);
+ pr_debug("I2S device-name :%s\n", i2s_cont->name);
+ msp->i2s_cont = i2s_cont;
+
+ return msp_i2s_drvdata;
+
+del_timer:
+ del_timer_sync(&msp->notify_timer);
+ clk_put(msp->clk);
+free_irq:
+ iounmap(msp->registers);
+free_msp:
+ kfree(msp);
+ return NULL;
+}
+
+int ux500_msp_i2s_exit(struct ux500_msp_i2s_drvdata *drvdata)
+{
+ struct msp *msp = drvdata->msp;
+ int status = 0;
+
+ pr_debug("%s: Enter (drvdata->id = %d).\n", __func__, drvdata->id);
+
+ device_unregister(&msp->i2s_cont->dev);
+ del_timer_sync(&msp->notify_timer);
+ clk_put(msp->clk);
+ iounmap(msp->registers);
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s");
+ regulator_put(drvdata->reg_vape);
+ kfree(msp);
+
+ return status;
+}
+
+int ux500_msp_i2s_suspend(struct ux500_msp_i2s_drvdata *drvdata)
+{
+ struct msp *msp = drvdata->msp;
+
+ pr_debug("%s: Enter (drvdata->id = %d).\n", __func__, drvdata->id);
+
+ down(&msp->lock);
+ if (msp->users > 0) {
+ up(&msp->lock);
+ return -EBUSY;
+ }
+ up(&msp->lock);
+
+ return 0;
+}
+
+int ux500_msp_i2s_resume(struct ux500_msp_i2s_drvdata *drvdata)
+{
+ pr_debug("%s: Enter (drvdata->id = %d).\n", __func__, drvdata->id);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPLv2");
diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h
new file mode 100644
index 00000000000..db88d0ca5de
--- /dev/null
+++ b/sound/soc/ux500/ux500_msp_i2s.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+
+#ifndef UX500_MSP_I2S_H
+#define UX500_MSP_I2S_H
+
+#include <linux/platform_device.h>
+#include <mach/msp.h>
+
+struct ux500_msp_i2s_drvdata {
+ int id;
+ struct msp *msp;
+ struct regulator *reg_vape;
+};
+
+struct ux500_msp_i2s_drvdata *ux500_msp_i2s_init(struct platform_device *pdev,
+ struct msp_i2s_platform_data *platform_data);
+int ux500_msp_i2s_exit(struct ux500_msp_i2s_drvdata *drvdata);
+int ux500_msp_i2s_open(struct ux500_msp_i2s_drvdata *drvdata, struct msp_config *msp_config);
+int ux500_msp_i2s_close(struct ux500_msp_i2s_drvdata *drvdata, enum i2s_flag flag);
+int ux500_msp_i2s_transfer(struct ux500_msp_i2s_drvdata *drvdata, struct i2s_message *message);
+int ux500_msp_i2s_hw_status(struct ux500_msp_i2s_drvdata *drvdata);
+dma_addr_t ux500_msp_i2s_get_pointer(struct ux500_msp_i2s_drvdata *drvdata,
+ enum i2s_direction_t i2s_direction);
+
+int ux500_msp_i2s_suspend(struct ux500_msp_i2s_drvdata *drvdata);
+int ux500_msp_i2s_resume(struct ux500_msp_i2s_drvdata *drvdata);
+
+#endif
+
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
new file mode 100644
index 00000000000..29b3f5e0ffb
--- /dev/null
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/page.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "ux500_pcm.h"
+#include "ux500_msp_dai.h"
+
+static struct snd_pcm_hardware ux500_pcm_hw_playback = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_U16_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = UX500_PLATFORM_MIN_RATE_PLAYBACK,
+ .rate_max = UX500_PLATFORM_MAX_RATE_PLAYBACK,
+ .channels_min = UX500_PLATFORM_MIN_CHANNELS,
+ .channels_max = UX500_PLATFORM_MAX_CHANNELS,
+ .buffer_bytes_max = UX500_PLATFORM_BUFFER_BYTES_MAX,
+ .period_bytes_min = UX500_PLATFORM_PERIODS_BYTES_MIN,
+ .period_bytes_max = UX500_PLATFORM_PERIODS_BYTES_MAX,
+ .periods_min = UX500_PLATFORM_PERIODS_MIN,
+ .periods_max = UX500_PLATFORM_PERIODS_MAX,
+};
+
+static struct snd_pcm_hardware ux500_pcm_hw_capture = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_U16_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = UX500_PLATFORM_MIN_RATE_CAPTURE,
+ .rate_max = UX500_PLATFORM_MAX_RATE_CAPTURE,
+ .channels_min = UX500_PLATFORM_MIN_CHANNELS,
+ .channels_max = UX500_PLATFORM_MAX_CHANNELS,
+ .buffer_bytes_max = UX500_PLATFORM_BUFFER_BYTES_MAX,
+ .period_bytes_min = UX500_PLATFORM_PERIODS_BYTES_MIN,
+ .period_bytes_max = UX500_PLATFORM_PERIODS_BYTES_MAX,
+ .periods_min = UX500_PLATFORM_PERIODS_MIN,
+ .periods_max = UX500_PLATFORM_PERIODS_MAX,
+};
+
+static const char *stream_str(struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+static void ux500_pcm_dma_hw_free(struct device *dev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_dma_buffer *buf = runtime->dma_buffer_p;
+
+ if (runtime->dma_area == NULL)
+ return;
+
+ if (buf != &substream->dma_buffer) {
+ dma_free_coherent(
+ buf->dev.dev,
+ buf->bytes,
+ buf->area,
+ buf->addr);
+ kfree(runtime->dma_buffer_p);
+ }
+
+ snd_pcm_set_runtime_buffer(substream, NULL);
+}
+
+void ux500_pcm_dma_eot_handler(void *data)
+{
+ struct snd_pcm_substream *substream = data;
+ struct snd_pcm_runtime *runtime;
+ struct ux500_pcm_private *private;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+
+ pr_debug("%s: MSP %d (%s): Enter.\n", __func__, dai->id, stream_str(substream));
+
+ if (substream) {
+ runtime = substream->runtime;
+ private = substream->runtime->private_data;
+
+ if (ux500_msp_dai_i2s_get_underrun_status(private->msp_id)) {
+ private->no_of_underruns++;
+ pr_debug("%s: Nr of underruns (%d)\n", __func__,
+ private->no_of_underruns);
+ }
+
+ /* calc the offset in the circular buffer */
+ private->offset += frames_to_bytes(runtime,
+ runtime->period_size);
+ private->offset %= frames_to_bytes(runtime,
+ runtime->period_size) * runtime->periods;
+
+ snd_pcm_period_elapsed(substream);
+ }
+}
+EXPORT_SYMBOL(ux500_pcm_dma_eot_handler);
+
+static int ux500_pcm_open(struct snd_pcm_substream *substream)
+{
+ int stream_id = substream->pstr->stream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct ux500_pcm_private *private;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ int ret;
+
+ pr_debug("%s: MSP %d (%s): Enter.\n", __func__, dai->id, stream_str(substream));
+
+ pr_debug("%s: Set runtime hwparams.\n", __func__);
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_soc_set_runtime_hwparams(substream, &ux500_pcm_hw_playback);
+ else
+ snd_soc_set_runtime_hwparams(substream, &ux500_pcm_hw_capture);
+
+ /* ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ pr_err("%s: Error: snd_pcm_hw_constraints failed (%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ pr_debug("%s: Init runtime private data.\n", __func__);
+ private = kzalloc(sizeof(struct ux500_pcm_private), GFP_KERNEL);
+ if (private == NULL)
+ return -ENOMEM;
+ private->msp_id = dai->id;
+ runtime->private_data = private;
+
+ pr_debug("%s: Set hw-struct for %s.\n", __func__, stream_str(substream));
+ runtime->hw = (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ?
+ ux500_pcm_hw_playback : ux500_pcm_hw_capture;
+
+ return 0;
+}
+
+static int ux500_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct ux500_pcm_private *private = substream->runtime->private_data;
+
+ pr_debug("%s: Enter\n", __func__);
+
+ kfree(private);
+
+ return 0;
+}
+
+static int ux500_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_dma_buffer *buf = runtime->dma_buffer_p;
+ int ret = 0;
+ int size;
+
+ pr_debug("%s: Enter\n", __func__);
+
+ size = params_buffer_bytes(hw_params);
+
+ if (buf) {
+ if (buf->bytes >= size)
+ goto out;
+ ux500_pcm_dma_hw_free(NULL, substream);
+ }
+
+ if (substream->dma_buffer.area != NULL &&
+ substream->dma_buffer.bytes >= size) {
+ buf = &substream->dma_buffer;
+ } else {
+ buf = kmalloc(sizeof(struct snd_dma_buffer), GFP_KERNEL);
+ if (!buf)
+ goto nomem;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = NULL;
+ buf->area = dma_alloc_coherent(
+ NULL,
+ size,
+ &buf->addr,
+ GFP_KERNEL);
+ buf->bytes = size;
+ buf->private_data = NULL;
+
+ if (!buf->area)
+ goto free;
+ }
+ snd_pcm_set_runtime_buffer(substream, buf);
+ ret = 1;
+ out:
+ runtime->dma_bytes = size;
+ return ret;
+
+ free:
+ kfree(buf);
+ nomem:
+ return -ENOMEM;
+}
+
+static int ux500_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s: Enter\n", __func__);
+
+ ux500_pcm_dma_hw_free(NULL, substream);
+
+ return 0;
+}
+
+static int ux500_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s: Enter\n", __func__);
+ return 0;
+}
+
+static int ux500_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct ux500_pcm_private *private = runtime->private_data;
+ int stream_id = substream->pstr->stream;
+
+ pr_debug("%s: Enter\n", __func__);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pr_debug("%s: START/PAUSE-RELEASE\n", __func__);
+ if (runtime->status->state == SNDRV_PCM_STATE_XRUN) {
+ pr_debug("XRUN occurred\n");
+ return 0;
+ }
+
+ private->no_of_underruns = 0;
+ private->offset = 0;
+ ret = ux500_msp_dai_i2s_configure_sg(runtime->dma_addr,
+ runtime->periods,
+ frames_to_bytes(runtime, runtime->period_size),
+ private->msp_id,
+ stream_id);
+ if (ret) {
+ pr_err("%s: Failed to configure I2S!\n", __func__);
+ return -EINVAL;
+ }
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("%s: SNDRV_PCM_TRIGGER_STOP\n", __func__);
+ pr_debug("%s: no_of_underruns = %u\n",
+ __func__,
+ private->no_of_underruns);
+ break;
+
+ default:
+ pr_err("%s: Invalid command in pcm trigger\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t ux500_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct ux500_pcm_private *private = runtime->private_data;
+
+ pr_debug("%s: dma_offset %d frame %ld\n", __func__, private->offset,
+ bytes_to_frames(substream->runtime, private->offset));
+
+ return bytes_to_frames(substream->runtime, private->offset);
+}
+
+static int ux500_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ pr_debug("%s: Enter.\n", __func__);
+
+ return dma_mmap_coherent(
+ NULL,
+ vma,
+ runtime->dma_area,
+ runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops ux500_pcm_ops = {
+ .open = ux500_pcm_open,
+ .close = ux500_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = ux500_pcm_hw_params,
+ .hw_free = ux500_pcm_hw_free,
+ .prepare = ux500_pcm_prepare,
+ .trigger = ux500_pcm_trigger,
+ .pointer = ux500_pcm_pointer,
+ .mmap = ux500_pcm_mmap
+};
+
+int ux500_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+
+ pr_debug("%s: pcm = %d\n", __func__, (int)pcm);
+
+ pcm->info_flags = 0;
+ strcpy(pcm->name, "UX500_PCM");
+
+ pr_debug("%s: pcm->name = %s.\n", __func__, pcm->name);
+
+ return 0;
+}
+
+static void ux500_pcm_free(struct snd_pcm *pcm)
+{
+ pr_debug("%s: Enter\n", __func__);
+}
+
+static int ux500_pcm_suspend(struct snd_soc_dai *dai)
+{
+ pr_debug("%s: Enter\n", __func__);
+
+ return 0;
+}
+
+static int ux500_pcm_resume(struct snd_soc_dai *dai)
+{
+ pr_debug("%s: Enter\n", __func__);
+
+ return 0;
+}
+
+struct snd_soc_platform_driver ux500_pcm_soc_drv = {
+ .ops = &ux500_pcm_ops,
+ .pcm_new = ux500_pcm_new,
+ .pcm_free = ux500_pcm_free,
+ .suspend = ux500_pcm_suspend,
+ .resume = ux500_pcm_resume,
+};
+EXPORT_SYMBOL(ux500_pcm_soc_drv);
+
+static int __devexit ux500_pcm_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_info("%s: Register ux500-pcm SoC platform driver.\n", __func__);
+ ret = snd_soc_register_platform(&pdev->dev, &ux500_pcm_soc_drv);
+ if (ret < 0) {
+ pr_err("%s: Error: Failed to register "
+ "ux500-pcm SoC platform driver (%d)!\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit ux500_pcm_drv_remove(struct platform_device *pdev)
+{
+ pr_info("%s: Unregister ux500-pcm SoC platform driver.\n", __func__);
+ snd_soc_unregister_platform(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver ux500_pcm_driver = {
+ .driver = {
+ .name = "ux500-pcm",
+ .owner = THIS_MODULE,
+ },
+
+ .probe = ux500_pcm_drv_probe,
+ .remove = __devexit_p(ux500_pcm_drv_remove),
+};
+
+static int __init ux500_pcm_drv_init(void)
+{
+ pr_debug("%s: Register ux500-pcm platform driver.\n", __func__);
+
+ return platform_driver_register(&ux500_pcm_driver);
+}
+
+static void __exit ux500_pcm_drv_exit(void)
+{
+ pr_debug("%s: Unregister ux500-pcm platform driver.\n", __func__);
+
+ platform_driver_unregister(&ux500_pcm_driver);
+}
+
+module_init(ux500_pcm_drv_init);
+module_exit(ux500_pcm_drv_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/ux500/ux500_pcm.h b/sound/soc/ux500/ux500_pcm.h
new file mode 100644
index 00000000000..50f46615275
--- /dev/null
+++ b/sound/soc/ux500/ux500_pcm.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#ifndef UX500_PCM_H
+#define UX500_PCM_H
+
+#include <mach/msp.h>
+
+#define UX500_PLATFORM_MIN_RATE_PLAYBACK 8000
+#define UX500_PLATFORM_MAX_RATE_PLAYBACK 48000
+#define UX500_PLATFORM_MIN_RATE_CAPTURE 8000
+#define UX500_PLATFORM_MAX_RATE_CAPTURE 48000
+
+#define UX500_PLATFORM_MIN_CHANNELS 1
+#define UX500_PLATFORM_MAX_CHANNELS 8
+
+#define UX500_PLATFORM_PERIODS_BYTES_MIN 128
+#define UX500_PLATFORM_PERIODS_BYTES_MAX (64 * PAGE_SIZE)
+#define UX500_PLATFORM_PERIODS_MIN 2
+#define UX500_PLATFORM_PERIODS_MAX 48
+#define UX500_PLATFORM_BUFFER_BYTES_MAX (2048 * PAGE_SIZE)
+
+extern struct snd_soc_platform ux500_soc_platform;
+
+struct ux500_pcm_private {
+ int msp_id;
+ int stream_id;
+ unsigned int no_of_underruns;
+ unsigned int offset;
+};
+
+void ux500_pcm_dma_eot_handler(void *data);
+
+#endif